diff --git a/.gn b/.gn index 08f9da9187..d6f84df080 100644 --- a/.gn +++ b/.gn @@ -20,33 +20,11 @@ script_executable = "python3" # in the source tree, e.g. for third party source trees. secondary_source = "//build/secondary/" -# These are the targets to check headers for by default. The files in targets -# matching these patterns (see "gn help label_pattern" for format) will have +# These are the targets to skip header checking by default. The files in targets +# matching these patterns (see "gn help label_pattern" for format) will not have # their includes checked for proper dependencies when you run either # "gn check" or "gn gen --check". -check_targets = [ - "//api/*", - "//audio/*", - "//backup/*", - "//call/*", - "//common_audio/*", - "//common_video/*", - "//examples/*", - "//logging/*", - "//media/*", - "//modules/*", - "//net/*", - "//p2p/*", - "//pc/*", - "//rtc_base/*", - "//rtc_tools/*", - "//sdk/*", - "//stats/*", - "//system_wrappers/*", - "//test/*", - "//video/*", - "//third_party/libyuv/*", -] +no_check_targets = [ "//third_party/icu/*" ] # These are the list of GN files that run exec_script. This whitelist exists # to force additional review for new uses of exec_script, which is strongly @@ -66,7 +44,7 @@ default_args = { mac_sdk_min = "10.12" - ios_deployment_target = "10.0" + ios_deployment_target = "12.0" # The SDK API level, in contrast, is set by build/android/AndroidManifest.xml. android32_ndk_api_level = 16 diff --git a/BUILD.gn b/BUILD.gn index e60d7dd0bd..bc51df7c07 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -435,10 +435,6 @@ config("common_config") { config("common_objc") { frameworks = [ "Foundation.framework" ] - - if (rtc_use_metal_rendering) { - defines = [ "RTC_SUPPORTS_METAL" ] - } } if (!build_with_chromium) { @@ -520,6 +516,10 @@ if (!build_with_chromium) { rtc_executable("webrtc_lib_link_test") { testonly = true + # This target is used for checking to link, so do not check dependencies + # on gn check. + check_includes = false # no-presubmit-check TODO(bugs.webrtc.org/12785) + sources = [ "webrtc_lib_link_test.cc" ] deps = [ # NOTE: Don't add deps here. If this test fails to link, it means you diff --git a/DEPS b/DEPS index b0196982d6..c24608a98a 100644 --- a/DEPS +++ b/DEPS @@ -1,43 +1,49 @@ # This file contains dependencies for WebRTC. gclient_gn_args_file = 'src/build/config/gclient_args.gni' +gclient_gn_args = [ + 'generate_location_tags', +] vars = { # By default, we should check out everything needed to run on the main # chromium waterfalls. More info at: crbug.com/570091. 'checkout_configuration': 'default', 'checkout_instrumented_libraries': 'checkout_linux and checkout_configuration == "default"', - 'chromium_revision': '108d11241ed510ef31b4e14aa2da0fc884df0405', + 'chromium_revision': '6d8828f6a6eea769a05fa1c0b7acf10aca631d4a', + + # Keep the Chromium default of generating location tags. + 'generate_location_tags': True, } deps = { # TODO(kjellander): Move this to be Android-only once the libevent dependency # in base/third_party/libevent is solved. 'src/base': - 'https://chromium.googlesource.com/chromium/src/base@fb9171ae3b21f36981c7808f104f826af15136fb', + 'https://chromium.googlesource.com/chromium/src/base@e1acc6a30942360d4789d6c245cf7933e7e9bbec', 'src/build': - 'https://chromium.googlesource.com/chromium/src/build@4036cf1b17581f5668b487a25e252d56e0321a7f', + 'https://chromium.googlesource.com/chromium/src/build@826926008327af276adbaafcfa92b525eb5bf326', 'src/buildtools': - 'https://chromium.googlesource.com/chromium/src/buildtools@20b1d0fc13ebaa263a1248f08814f523a86e6bed', + 'https://chromium.googlesource.com/chromium/src/buildtools@2500c1d8f3a20a66a7cbafe3f69079a2edb742dd', # Gradle 6.6.1. Used for testing Android Studio project generation for WebRTC. 'src/examples/androidtests/third_party/gradle': { 'url': 'https://chromium.googlesource.com/external/github.com/gradle/gradle.git@f2d1fb54a951d8b11d25748e4711bec8d128d7e3', 'condition': 'checkout_android', }, 'src/ios': { - 'url': 'https://chromium.googlesource.com/chromium/src/ios@254885a5e6d14ce0a02c889bc2765c53746826fd', + 'url': 'https://chromium.googlesource.com/chromium/src/ios@695a3541172406518e45c377048956a3e5270d7c', 'condition': 'checkout_ios', }, 'src/testing': - 'https://chromium.googlesource.com/chromium/src/testing@7bf52eb391229fa358da2062f7cb017363e3e02f', + 'https://chromium.googlesource.com/chromium/src/testing@d749d1b98b475ea15face1c9d2311ed6b8e4b91f', 'src/third_party': - 'https://chromium.googlesource.com/chromium/src/third_party@b29fa1975a1e144dcd92472f855335f68a8897e4', + 'https://chromium.googlesource.com/chromium/src/third_party@c1d40d8b399db4c5ebab5e5022a002dca5b3dbb2', 'src/buildtools/linux64': { 'packages': [ { 'package': 'gn/gn/linux-amd64', - 'version': 'git_revision:39a87c0b36310bdf06b692c098f199a0d97fc810', + 'version': 'git_revision:24e2f7df92641de0351a96096fb2c490b2436bb8', } ], 'dep_type': 'cipd', @@ -47,7 +53,7 @@ deps = { 'packages': [ { 'package': 'gn/gn/mac-${{arch}}', - 'version': 'git_revision:39a87c0b36310bdf06b692c098f199a0d97fc810', + 'version': 'git_revision:24e2f7df92641de0351a96096fb2c490b2436bb8', } ], 'dep_type': 'cipd', @@ -57,7 +63,7 @@ deps = { 'packages': [ { 'package': 'gn/gn/windows-amd64', - 'version': 'git_revision:39a87c0b36310bdf06b692c098f199a0d97fc810', + 'version': 'git_revision:24e2f7df92641de0351a96096fb2c490b2436bb8', } ], 'dep_type': 'cipd', @@ -67,11 +73,11 @@ deps = { 'src/buildtools/clang_format/script': 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/clang/tools/clang-format.git@99803d74e35962f63a775f29477882afd4d57d94', 'src/buildtools/third_party/libc++/trunk': - 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxx.git@8fa87946779682841e21e2da977eccfb6cb3bded', + 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxx.git@79a2e924d96e2fc1e4b937c42efd08898fa472d7', 'src/buildtools/third_party/libc++abi/trunk': - 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxxabi.git@767de317f97343db64af048e3d198ab8b10fee5d', + 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxxabi.git@cb34896ebd62f93f708ff9aad26159cf11dde6f4', 'src/buildtools/third_party/libunwind/trunk': - 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libunwind.git@7846d256355e40273f7cc192c8f5893e8665a1f9', + 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libunwind.git@e7ac0f84fc2f2f8bd2ad151a7348e7120d77648a', 'src/tools/clang/dsymutil': { 'packages': [ @@ -118,22 +124,22 @@ deps = { }, 'src/third_party/boringssl/src': - 'https://boringssl.googlesource.com/boringssl.git@ddecaabdc8c950d1417ed69785ac17c3400bae4c', + 'https://boringssl.googlesource.com/boringssl.git@a10017c548b0805eb98e7847c37370dbd37cd8d6', 'src/third_party/breakpad/breakpad': - 'https://chromium.googlesource.com/breakpad/breakpad.git@3bea2815bfea6e641d50aad15bde2c494ef8f34b', + 'https://chromium.googlesource.com/breakpad/breakpad.git@b95c4868b10f69e642666742233aede1eb653012', 'src/third_party/catapult': - 'https://chromium.googlesource.com/catapult.git@c1e1d559b46476584ec0eb1d83bd7f43fa5a1b36', + 'https://chromium.googlesource.com/catapult.git@3345f09ed65020a999e108ea37d30b49c87e14ed', 'src/third_party/ced/src': { 'url': 'https://chromium.googlesource.com/external/github.com/google/compact_enc_det.git@ba412eaaacd3186085babcd901679a48863c7dd5', }, 'src/third_party/colorama/src': 'https://chromium.googlesource.com/external/colorama.git@799604a1041e9b3bc5d2789ecbd7e8db2e18e6b8', 'src/third_party/crc32c/src': - 'https://chromium.googlesource.com/external/github.com/google/crc32c.git@5998f8451548244de8cde7fab387a550e7c4497d', + 'https://chromium.googlesource.com/external/github.com/google/crc32c.git@fa5ade41ee480003d9c5af6f43567ba22e4e17e6', 'src/third_party/depot_tools': - 'https://chromium.googlesource.com/chromium/tools/depot_tools.git@6b0a611c2c692684f94c0c3629f793feebd16b39', + 'https://chromium.googlesource.com/chromium/tools/depot_tools.git@a806594b95a39141fdbf1f359087a44ffb2deaaf', 'src/third_party/ffmpeg': - 'https://chromium.googlesource.com/chromium/third_party/ffmpeg.git@280d5fd0df8b4284ad040bd29deb3241bd6dfc4a', + 'https://chromium.googlesource.com/chromium/third_party/ffmpeg.git@05c195662f0527913811827ba253cb93758ea4c0', 'src/third_party/findbugs': { 'url': 'https://chromium.googlesource.com/chromium/deps/findbugs.git@4275d9ac8610db6b1bc9a5e887f97e41b33fac67', 'condition': 'checkout_android', @@ -144,11 +150,11 @@ deps = { 'condition': 'checkout_linux', }, 'src/third_party/freetype/src': - 'https://chromium.googlesource.com/chromium/src/third_party/freetype2.git@1bc801b0d6ae70683c0a7952e949d2b3edff76e2', + 'https://chromium.googlesource.com/chromium/src/third_party/freetype2.git@d3dc2da9b27af5b90575d62989389cc65fe7977c', 'src/third_party/harfbuzz-ng/src': - 'https://chromium.googlesource.com/external/github.com/harfbuzz/harfbuzz.git@b37f03f16b39d397a626f097858e9ae550234ca0', + 'https://chromium.googlesource.com/external/github.com/harfbuzz/harfbuzz.git@cc9bb294919e846ef8a0731b5e9f304f95ef3bb8', 'src/third_party/google_benchmark/src': { - 'url': 'https://chromium.googlesource.com/external/github.com/google/benchmark.git@ffe1342eb2faa7d2e7c35b4db2ccf99fab81ec20', + 'url': 'https://chromium.googlesource.com/external/github.com/google/benchmark.git@e991355c02b93fe17713efe04cbc2e278e00fdbd', }, # WebRTC-only dependency (not present in Chromium). 'src/third_party/gtest-parallel': @@ -164,9 +170,9 @@ deps = { 'dep_type': 'cipd', }, 'src/third_party/googletest/src': - 'https://chromium.googlesource.com/external/github.com/google/googletest.git@23ef29555ef4789f555f1ba8c51b4c52975f0907', + 'https://chromium.googlesource.com/external/github.com/google/googletest.git@4ec4cd23f486bf70efcc5d2caa40f24368f752e3', 'src/third_party/icu': { - 'url': 'https://chromium.googlesource.com/chromium/deps/icu.git@f022e298b4f4a782486bb6d5ce6589c998b51fe2', + 'url': 'https://chromium.googlesource.com/chromium/deps/icu.git@b9dfc58bf9b02ea0365509244aca13841322feb0', }, 'src/third_party/jdk': { 'packages': [ @@ -198,19 +204,19 @@ deps = { 'src/third_party/libFuzzer/src': 'https://chromium.googlesource.com/chromium/llvm-project/compiler-rt/lib/fuzzer.git@debe7d2d1982e540fbd6bd78604bf001753f9e74', 'src/third_party/libjpeg_turbo': - 'https://chromium.googlesource.com/chromium/deps/libjpeg_turbo.git@b7bef8c05b7cdb1a038ae271a2c2b6647af4c879', + 'https://chromium.googlesource.com/chromium/deps/libjpeg_turbo.git@e9e400e0af31baf72d235655850bc00e55b6c145', 'src/third_party/libsrtp': - 'https://chromium.googlesource.com/chromium/deps/libsrtp.git@7990ca64c616b150a9cb4714601c4a3b0c84fe91', + 'https://chromium.googlesource.com/chromium/deps/libsrtp.git@5b7c744eb8310250ccc534f3f86a2015b3887a0a', 'src/third_party/libaom/source/libaom': - 'https://aomedia.googlesource.com/aom.git@4d1ace0ad32403cc204005f930ccb516150459e3', + 'https://aomedia.googlesource.com/aom.git@aba245dde334bd51a20940eb009fa46b6ffd4511', 'src/third_party/libunwindstack': { - 'url': 'https://chromium.googlesource.com/chromium/src/third_party/libunwindstack.git@aab2c874731396232739889ebe8d9e122b9bc448', + 'url': 'https://chromium.googlesource.com/chromium/src/third_party/libunwindstack.git@b34a0059a648f179ef05da2c0927f564bdaea2b3', 'condition': 'checkout_android', }, 'src/third_party/perfetto': - 'https://android.googlesource.com/platform/external/perfetto.git@7d6375fd3e2f91b5880195a9c02de2334a3fa0d4', + 'https://android.googlesource.com/platform/external/perfetto.git@aecbd80f576686b67e29bdfae8c9c03bb9ce1996', 'src/third_party/libvpx/source/libvpx': - 'https://chromium.googlesource.com/webm/libvpx.git@61edec1efbea1c02d71857e2aff9426d9cd2df4e', + 'https://chromium.googlesource.com/webm/libvpx.git@eebc5cd487a89c51ba148f6d6ac45779970f72d7', 'src/third_party/libyuv': 'https://chromium.googlesource.com/libyuv/libyuv.git@49ebc996aa8c4bdf89c1b5ea461eb677234c61cc', 'src/third_party/lss': { @@ -224,7 +230,7 @@ deps = { # Used by boringssl. 'src/third_party/nasm': { - 'url': 'https://chromium.googlesource.com/chromium/deps/nasm.git@19f3fad68da99277b2882939d3b2fa4c4b8d51d9' + 'url': 'https://chromium.googlesource.com/chromium/deps/nasm.git@e9be5fd6d723a435ca2da162f9e0ffcb688747c1' }, 'src/third_party/openh264/src': @@ -233,7 +239,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/r8', - 'version': 'gXyBDv_fM87KnLcxvF5AGV5lwnm-JXIALYH8zrzdoaMC', + 'version': 'Nu_mvQJe34CotIXadFlA3w732CJ9EvQGuVs4udcZedAC', }, ], 'condition': 'checkout_android', @@ -258,14 +264,14 @@ deps = { 'condition': 'checkout_android', }, 'src/third_party/usrsctp/usrsctplib': - 'https://chromium.googlesource.com/external/github.com/sctplab/usrsctp@22ba62ffe79c3881581ab430368bf3764d9533eb', + 'https://chromium.googlesource.com/external/github.com/sctplab/usrsctp@1ade45cbadfd19298d2c47dc538962d4425ad2dd', # Dependency used by libjpeg-turbo. 'src/third_party/yasm/binaries': { 'url': 'https://chromium.googlesource.com/chromium/deps/yasm/binaries.git@52f9b3f4b0aa06da24ef8b123058bb61ee468881', 'condition': 'checkout_win', }, 'src/tools': - 'https://chromium.googlesource.com/chromium/src/tools@6124b15fd147213b7aa1627cd89e1aa4bbc4ac7a', + 'https://chromium.googlesource.com/chromium/src/tools@1a00526b21d46b8b86f13add37003fd33885f32b', 'src/tools/swarming_client': 'https://chromium.googlesource.com/infra/luci/client-py.git@a32a1607f6093d338f756c7e7c7b4333b0c50c9c', @@ -366,7 +372,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/androidx', - 'version': '5wEAJbMDQJnCxXbN6hMn66IR4akg1G25HQtc_8_7Vz0C', + 'version': '-umIXLPTAdxRy2iaK4QFSeOf4t7PAKglJP7ggvWhfRwC', }, ], 'condition': 'checkout_android', @@ -460,31 +466,26 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/turbine', - 'version': '_iPtB_ThhxlMOt2TsYqVppwriEEn0mp-NUNRwDwYLUAC', + 'version': 'Om6yIEXgJxuqghErK29h9RcMH6VaymMbxwScwXmcN6EC', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/turbine/src': { - 'url': 'https://chromium.googlesource.com/external/github.com/google/turbine.git' + '@' + '3c31e67ae25b5e43713fd868e3a9b535ff6298af', - 'condition': 'checkout_android', - }, - 'src/tools/luci-go': { 'packages': [ { 'package': 'infra/tools/luci/isolate/${{platform}}', - 'version': 'git_revision:22d464e2f8f3bd2bd33f69fe819326d63f881008', + 'version': 'git_revision:2ac8bd9cbc20824bb04a39b0f1b77178ace930b3', }, { 'package': 'infra/tools/luci/isolated/${{platform}}', - 'version': 'git_revision:22d464e2f8f3bd2bd33f69fe819326d63f881008', + 'version': 'git_revision:2ac8bd9cbc20824bb04a39b0f1b77178ace930b3', }, { 'package': 'infra/tools/luci/swarming/${{platform}}', - 'version': 'git_revision:22d464e2f8f3bd2bd33f69fe819326d63f881008', + 'version': 'git_revision:2ac8bd9cbc20824bb04a39b0f1b77178ace930b3', }, ], 'dep_type': 'cipd', @@ -1006,7 +1007,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_github_ben_manes_caffeine_caffeine', - 'version': 'version:2@2.8.0.cr0', + 'version': 'version:2@2.8.8.cr0', }, ], 'condition': 'checkout_android', @@ -1270,7 +1271,18 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_material_material', - 'version': 'version:2@1.2.0-alpha06.cr0', + 'version': 'version:2@1.4.0-rc01.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_google_android_play_core': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_android_play_core', + 'version': 'version:2@1.10.0.cr0', }, ], 'condition': 'checkout_android', @@ -1402,7 +1414,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_annotation', - 'version': 'version:2@2.4.0.cr0', + 'version': 'version:2@2.7.1.cr0', }, ], 'condition': 'checkout_android', @@ -1413,7 +1425,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_annotations', - 'version': 'version:2@2.4.0.cr0', + 'version': 'version:2@2.7.1.cr0', }, ], 'condition': 'checkout_android', @@ -1424,7 +1436,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_check_api', - 'version': 'version:2@2.4.0.cr0', + 'version': 'version:2@2.7.1.cr0', }, ], 'condition': 'checkout_android', @@ -1435,7 +1447,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_core', - 'version': 'version:2@2.4.0.cr0', + 'version': 'version:2@2.7.1.cr0', }, ], 'condition': 'checkout_android', @@ -1446,7 +1458,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_type_annotations', - 'version': 'version:2@2.4.0.cr0', + 'version': 'version:2@2.7.1.cr0', }, ], 'condition': 'checkout_android', @@ -1717,6 +1729,17 @@ deps = { 'dep_type': 'cipd', }, + 'src/third_party/android_deps/libs/io_github_java_diff_utils_java_diff_utils': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/io_github_java_diff_utils_java_diff_utils', + 'version': 'version:2@4.0.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + 'src/third_party/android_deps/libs/javax_annotation_javax_annotation_api': { 'packages': [ { @@ -1996,7 +2019,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_checkerframework_checker_qual', - 'version': 'version:2@3.5.0.cr0', + 'version': 'version:2@3.8.0.cr0', }, ], 'condition': 'checkout_android', @@ -2007,7 +2030,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_checkerframework_dataflow_shaded', - 'version': 'version:2@3.1.2.cr0', + 'version': 'version:2@3.11.0.cr0', }, ], 'condition': 'checkout_android', @@ -2058,6 +2081,17 @@ deps = { 'dep_type': 'cipd', }, + 'src/third_party/android_deps/libs/org_eclipse_jgit_org_eclipse_jgit': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/org_eclipse_jgit_org_eclipse_jgit', + 'version': 'version:2@4.4.1.201607150455-r.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + 'src/third_party/android_deps/libs/org_jetbrains_annotations': { 'packages': [ { @@ -2073,7 +2107,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib', - 'version': 'version:2@1.4.32.cr0', + 'version': 'version:2@1.5.10.cr0', }, ], 'condition': 'checkout_android', @@ -2084,7 +2118,29 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_common', - 'version': 'version:2@1.4.32.cr0', + 'version': 'version:2@1.5.10.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_jdk7': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_jdk7', + 'version': 'version:2@1.5.0.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_jdk8': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_jdk8', + 'version': 'version:2@1.5.0.cr0', }, ], 'condition': 'checkout_android', @@ -2095,7 +2151,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlinx_kotlinx_coroutines_android', - 'version': 'version:2@1.4.3.cr0', + 'version': 'version:2@1.5.0.cr0', }, ], 'condition': 'checkout_android', @@ -2106,7 +2162,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlinx_kotlinx_coroutines_core_jvm', - 'version': 'version:2@1.4.3.cr0', + 'version': 'version:2@1.5.0.cr0', }, ], 'condition': 'checkout_android', @@ -2322,17 +2378,6 @@ deps = { 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/org_threeten_threeten_extra': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/org_threeten_threeten_extra', - 'version': 'version:2@1.5.0.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - # === ANDROID_DEPS Generated Code End === } diff --git a/PRESUBMIT.py b/PRESUBMIT.py index e475f22d77..21875f61af 100755 --- a/PRESUBMIT.py +++ b/PRESUBMIT.py @@ -146,9 +146,9 @@ def VerifyNativeApiHeadersListIsValid(input_api, output_api): if non_existing_paths: return [ output_api.PresubmitError( - 'Directories to native API headers have changed which has made the ' - 'list in PRESUBMIT.py outdated.\nPlease update it to the current ' - 'location of our native APIs.', non_existing_paths) + 'Directories to native API headers have changed which has made ' + 'the list in PRESUBMIT.py outdated.\nPlease update it to the ' + 'current location of our native APIs.', non_existing_paths) ] return [] @@ -212,10 +212,10 @@ def CheckNoIOStreamInHeaders(input_api, output_api, source_file_filter): if len(files): return [ output_api.PresubmitError( - 'Do not #include in header files, since it inserts static ' - + - 'initialization into every file including the header. Instead, ' - + '#include . See http://crbug.com/94794', files) + 'Do not #include in header files, since it inserts ' + 'static initialization into every file including the header. ' + 'Instead, #include . See http://crbug.com/94794', + files) ] return [] @@ -237,15 +237,15 @@ def CheckNoPragmaOnce(input_api, output_api, source_file_filter): return [ output_api.PresubmitError( 'Do not use #pragma once in header files.\n' - 'See http://www.chromium.org/developers/coding-style#TOC-File-headers', + 'See http://www.chromium.org/developers/coding-style' + '#TOC-File-headers', files) ] return [] - -def CheckNoFRIEND_TEST( +def CheckNoFRIEND_TEST(# pylint: disable=invalid-name input_api, - output_api, # pylint: disable=invalid-name + output_api, source_file_filter): """Make sure that gtest's FRIEND_TEST() macro is not used, the FRIEND_TEST_ALL_PREFIXES() macro from testsupport/gtest_prod_util.h should be @@ -263,9 +263,9 @@ def CheckNoFRIEND_TEST( return [] return [ output_api.PresubmitPromptWarning( - 'WebRTC\'s code should not use ' - 'gtest\'s FRIEND_TEST() macro. Include testsupport/gtest_prod_util.h and ' - 'use FRIEND_TEST_ALL_PREFIXES() instead.\n' + '\n'.join(problems)) + 'WebRTC\'s code should not use gtest\'s FRIEND_TEST() macro. ' + 'Include testsupport/gtest_prod_util.h and use ' + 'FRIEND_TEST_ALL_PREFIXES() instead.\n' + '\n'.join(problems)) ] @@ -346,9 +346,9 @@ def CheckNoSourcesAbove(input_api, gn_files, output_api): if violating_gn_files: return [ output_api.PresubmitError( - 'Referencing source files above the directory of the GN file is not ' - 'allowed. Please introduce new GN targets in the proper location ' - 'instead.\n' + 'Referencing source files above the directory of the GN file ' + 'is not allowed. Please introduce new GN targets in the proper ' + 'location instead.\n' 'Invalid source entries:\n' '%s\n' 'Violating GN files:' % '\n'.join(violating_source_entries), @@ -407,9 +407,9 @@ def _MoreThanOneSourceUsed(*sources_lists): gn_file_content = input_api.ReadFile(gn_file) for target_match in TARGET_RE.finditer(gn_file_content): # list_of_sources is a list of tuples of the form - # (c_files, cc_files, objc_files) that keeps track of all the sources - # defined in a target. A GN target can have more that on definition of - # sources (since it supports if/else statements). + # (c_files, cc_files, objc_files) that keeps track of all the + # sources defined in a target. A GN target can have more that + # on definition of sources (since it supports if/else statements). # E.g.: # rtc_static_library("foo") { # if (is_win) { @@ -454,7 +454,8 @@ def _MoreThanOneSourceUsed(*sources_lists): return [ output_api.PresubmitError( 'GN targets cannot mix .c, .cc and .m (or .mm) source files.\n' - 'Please create a separate target for each collection of sources.\n' + 'Please create a separate target for each collection of ' + 'sources.\n' 'Mixed sources: \n' '%s\n' 'Violating GN files:\n%s\n' % @@ -476,8 +477,8 @@ def CheckNoPackageBoundaryViolations(input_api, gn_files, output_api): if errors: return [ output_api.PresubmitError( - 'There are package boundary violations in the following GN files:', - long_text='\n\n'.join(str(err) for err in errors)) + 'There are package boundary violations in the following GN ' + 'files:', long_text='\n\n'.join(str(err) for err in errors)) ] return [] @@ -491,7 +492,7 @@ def CheckNoWarningSuppressionFlagsAreAdded(gn_files, input_api, output_api, error_formatter=_ReportFileAndLine): - """Make sure that warning suppression flags are not added wihtout a reason.""" + """Ensure warning suppression flags are not added wihtout a reason.""" msg = ('Usage of //build/config/clang:extra_warnings is discouraged ' 'in WebRTC.\n' 'If you are not adding this code (e.g. you are just moving ' @@ -674,7 +675,8 @@ def CheckGnGen(input_api, output_api): if errors: return [ output_api.PresubmitPromptWarning( - 'Some #includes do not match the build dependency graph. Please run:\n' + 'Some #includes do not match the build dependency graph. ' + 'Please run:\n' ' gn gen --check ', long_text='\n\n'.join(errors)) ] @@ -729,18 +731,20 @@ def CheckUnwantedDependencies(input_api, output_api, source_file_filter): if error_descriptions: results.append( output_api.PresubmitError( - 'You added one or more #includes that violate checkdeps rules.\n' - 'Check that the DEPS files in these locations contain valid rules.\n' - 'See https://cs.chromium.org/chromium/src/buildtools/checkdeps/ for ' - 'more details about checkdeps.', error_descriptions)) + 'You added one or more #includes that violate checkdeps rules.' + '\nCheck that the DEPS files in these locations contain valid ' + 'rules.\nSee ' + 'https://cs.chromium.org/chromium/src/buildtools/checkdeps/ ' + 'for more details about checkdeps.', error_descriptions)) if warning_descriptions: results.append( output_api.PresubmitPromptOrNotify( - 'You added one or more #includes of files that are temporarily\n' - 'allowed but being removed. Can you avoid introducing the\n' - '#include? See relevant DEPS file(s) for details and contacts.\n' - 'See https://cs.chromium.org/chromium/src/buildtools/checkdeps/ for ' - 'more details about checkdeps.', warning_descriptions)) + 'You added one or more #includes of files that are temporarily' + '\nallowed but being removed. Can you avoid introducing the\n' + '#include? See relevant DEPS file(s) for details and contacts.' + '\nSee ' + 'https://cs.chromium.org/chromium/src/buildtools/checkdeps/ ' + 'for more details about checkdeps.', warning_descriptions)) return results @@ -787,9 +791,10 @@ def CheckChangeHasBugField(input_api, output_api): else: return [ output_api.PresubmitError( - 'The "Bug: [bug number]" footer is mandatory. Please create a bug and ' - 'reference it using either of:\n' - ' * https://bugs.webrtc.org - reference it using Bug: webrtc:XXXX\n' + 'The "Bug: [bug number]" footer is mandatory. Please create a ' + 'bug and reference it using either of:\n' + ' * https://bugs.webrtc.org - reference it using Bug: ' + 'webrtc:XXXX\n' ' * https://crbug.com - reference it using Bug: chromium:XXXXXX' ) ] @@ -911,10 +916,19 @@ def CommonChecks(input_api, output_api): results.extend( input_api.canned_checks.CheckLicense(input_api, output_api, _LicenseHeader(input_api))) + + # TODO(bugs.webrtc.org/12114): Delete this filter and run pylint on + # all python files. This is a temporary solution. + python_file_filter = lambda f: (f.LocalPath().endswith('.py') and + source_file_filter(f)) + python_changed_files = [f.LocalPath() for f in input_api.AffectedFiles( + file_filter=python_file_filter)] + results.extend( input_api.canned_checks.RunPylint( input_api, output_api, + files_to_check=python_changed_files, files_to_skip=( r'^base[\\\/].*\.py$', r'^build[\\\/].*\.py$', @@ -932,12 +946,13 @@ def CommonChecks(input_api, output_api): pylintrc='pylintrc')) # TODO(nisse): talk/ is no more, so make below checks simpler? - # WebRTC can't use the presubmit_canned_checks.PanProjectChecks function since - # we need to have different license checks in talk/ and webrtc/ directories. + # WebRTC can't use the presubmit_canned_checks.PanProjectChecks function + # since we need to have different license checks + # in talk/ and webrtc/directories. # Instead, hand-picked checks are included below. - # .m and .mm files are ObjC files. For simplicity we will consider .h files in - # ObjC subdirectories ObjC headers. + # .m and .mm files are ObjC files. For simplicity we will consider + # .h files in ObjC subdirectories ObjC headers. objc_filter_list = (r'.+\.m$', r'.+\.mm$', r'.+objc\/.+\.h$') # Skip long-lines check for DEPS and GN files. build_file_filter_list = (r'.+\.gn$', r'.+\.gni$', 'DEPS') @@ -1163,9 +1178,9 @@ def CheckAbslMemoryInclude(input_api, output_api, source_file_filter): if len(files): return [ output_api.PresubmitError( - 'Please include "absl/memory/memory.h" header for absl::WrapUnique.\n' - 'This header may or may not be included transitively depending on the ' - 'C++ standard version.', files) + 'Please include "absl/memory/memory.h" header for ' + 'absl::WrapUnique.\nThis header may or may not be included ' + 'transitively depending on the C++ standard version.', files) ] return [] @@ -1343,13 +1358,15 @@ def CheckAddedDepsHaveTargetApprovals(input_api, output_api): if input_api.tbr: return [ output_api.PresubmitNotifyResult( - '--tbr was specified, skipping OWNERS check for DEPS additions' + '--tbr was specified, skipping OWNERS check for DEPS ' + 'additions' ) ] if input_api.dry_run: return [ output_api.PresubmitNotifyResult( - 'This is a dry run, skipping OWNERS check for DEPS additions' + 'This is a dry run, skipping OWNERS check for DEPS ' + 'additions' ) ] if not input_api.change.issue: @@ -1393,8 +1410,8 @@ def StripDeps(path): if unapproved_dependencies: output_list = [ output( - 'You need LGTM from owners of depends-on paths in DEPS that were ' - 'modified in this CL:\n %s' % + 'You need LGTM from owners of depends-on paths in DEPS that ' + ' were modified in this CL:\n %s' % '\n '.join(sorted(unapproved_dependencies))) ] suggested_owners = input_api.owners_client.SuggestOwners( diff --git a/api/BUILD.gn b/api/BUILD.gn index 00542f3076..c775a1a871 100644 --- a/api/BUILD.gn +++ b/api/BUILD.gn @@ -29,7 +29,10 @@ rtc_source_set("call_api") { rtc_source_set("callfactory_api") { visibility = [ "*" ] sources = [ "call/call_factory_interface.h" ] - deps = [ "../rtc_base/system:rtc_export" ] + deps = [ + "../call:rtp_interfaces", + "../rtc_base/system:rtc_export", + ] } if (!build_with_chromium) { @@ -135,14 +138,8 @@ rtc_library("libjingle_peerconnection_api") { "jsep_ice_candidate.cc", "jsep_ice_candidate.h", "jsep_session_description.h", - "media_stream_proxy.h", - "media_stream_track_proxy.h", - "peer_connection_factory_proxy.h", "peer_connection_interface.cc", "peer_connection_interface.h", - "peer_connection_proxy.h", - "proxy.cc", - "proxy.h", "rtp_receiver_interface.cc", "rtp_receiver_interface.h", "rtp_sender_interface.cc", @@ -157,7 +154,7 @@ rtc_library("libjingle_peerconnection_api") { "stats_types.h", "turn_customizer.h", "uma_metrics.h", - "video_track_source_proxy.h", + "video_track_source_proxy_factory.h", ] deps = [ ":array_view", @@ -178,6 +175,7 @@ rtc_library("libjingle_peerconnection_api") { ":rtp_transceiver_direction", ":scoped_refptr", ":sequence_checker", + "../call:rtp_interfaces", "../rtc_base:network_constants", "adaptation:resource_adaptation_api", "audio:audio_mixer_api", @@ -1059,6 +1057,7 @@ if (rtc_include_tests) { ":time_controller", "../call", "../call:call_interfaces", + "../call:rtp_interfaces", "../test/time_controller", ] } @@ -1103,6 +1102,7 @@ if (rtc_include_tests) { "units:time_delta", "units:timestamp", "units:units_unittests", + "video:rtp_video_frame_assembler_unittests", "video:video_unittests", ] } diff --git a/api/DEPS b/api/DEPS index 1d3d43f258..cdd17e9909 100644 --- a/api/DEPS +++ b/api/DEPS @@ -42,6 +42,11 @@ include_rules = [ specific_include_rules = { # Some internal headers are allowed even in API headers: + + "call_factory_interface\.h": [ + "+call/rtp_transport_controller_send_factory_interface.h", + ], + ".*\.h": [ "+rtc_base/checks.h", "+rtc_base/system/rtc_export.h", @@ -126,6 +131,7 @@ specific_include_rules = { ], "peer_connection_interface\.h": [ + "+call/rtp_transport_controller_send_factory_interface.h", "+media/base/media_config.h", "+media/base/media_engine.h", "+p2p/base/port.h", @@ -188,7 +194,6 @@ specific_include_rules = { "stats_types\.h": [ "+rtc_base/constructor_magic.h", "+rtc_base/ref_count.h", - "+rtc_base/string_encode.h", "+rtc_base/thread_checker.h", ], diff --git a/api/OWNERS b/api/OWNERS index e18667970b..6ffb2588aa 100644 --- a/api/OWNERS +++ b/api/OWNERS @@ -11,15 +11,4 @@ per-file peer_connection*=hbos@webrtc.org per-file DEPS=mbonadei@webrtc.org -# Please keep this list in sync with Chromium's //base/metrics/OWNERS and -# send a CL when you notice any difference. -# Even if people in the list below cannot formally grant +1 on WebRTC, it -# is good to get their LGTM before sending the CL to one of the folder OWNERS. -per-file uma_metrics.h=asvitkine@chromium.org -per-file uma_metrics.h=bcwhite@chromium.org -per-file uma_metrics.h=caitlinfischer@google.com -per-file uma_metrics.h=holte@chromium.org -per-file uma_metrics.h=isherman@chromium.org -per-file uma_metrics.h=jwd@chromium.org -per-file uma_metrics.h=mpearson@chromium.org -per-file uma_metrics.h=rkaplow@chromium.org +per-file uma_metrics.h=kron@webrtc.org diff --git a/api/audio/echo_canceller3_config.cc b/api/audio/echo_canceller3_config.cc index 5f1923e90f..b38d6b5b7e 100644 --- a/api/audio/echo_canceller3_config.cc +++ b/api/audio/echo_canceller3_config.cc @@ -153,7 +153,7 @@ bool EchoCanceller3Config::Validate(EchoCanceller3Config* config) { res = res & Limit(&c->filter.config_change_duration_blocks, 0, 100000); res = res & Limit(&c->filter.initial_state_seconds, 0.f, 100.f); - res = res & Limit(&c->filter.coarse_reset_hangover_blocks, 0, 2500); + res = res & Limit(&c->filter.coarse_reset_hangover_blocks, 0, 250000); res = res & Limit(&c->erle.min, 1.f, 100000.f); res = res & Limit(&c->erle.max_l, 1.f, 100000.f); @@ -229,6 +229,12 @@ bool EchoCanceller3Config::Validate(EchoCanceller3Config* config) { res = res & Limit(&c->suppressor.nearend_tuning.max_dec_factor_lf, 0.f, 100.f); + res = res & Limit(&c->suppressor.last_permanent_lf_smoothing_band, 0, 64); + res = res & Limit(&c->suppressor.last_lf_smoothing_band, 0, 64); + res = res & Limit(&c->suppressor.last_lf_band, 0, 63); + res = res & + Limit(&c->suppressor.first_hf_band, c->suppressor.last_lf_band + 1, 64); + res = res & Limit(&c->suppressor.dominant_nearend_detection.enr_threshold, 0.f, 1000000.f); res = res & Limit(&c->suppressor.dominant_nearend_detection.snr_threshold, diff --git a/api/audio/echo_canceller3_config.h b/api/audio/echo_canceller3_config.h index d4a04cd2ed..087e8da439 100644 --- a/api/audio/echo_canceller3_config.h +++ b/api/audio/echo_canceller3_config.h @@ -194,6 +194,12 @@ struct RTC_EXPORT EchoCanceller3Config { 2.0f, 0.25f); + bool lf_smoothing_during_initial_phase = true; + int last_permanent_lf_smoothing_band = 0; + int last_lf_smoothing_band = 5; + int last_lf_band = 5; + int first_hf_band = 8; + struct DominantNearendDetection { float enr_threshold = .25f; float enr_exit_threshold = 10.f; diff --git a/api/audio/echo_canceller3_config_json.cc b/api/audio/echo_canceller3_config_json.cc index 39713a1fb4..263599c538 100644 --- a/api/audio/echo_canceller3_config_json.cc +++ b/api/audio/echo_canceller3_config_json.cc @@ -11,6 +11,7 @@ #include +#include #include #include @@ -156,9 +157,14 @@ void Aec3ConfigFromJsonString(absl::string_view json_string, *parsing_successful = true; Json::Value root; - bool success = Json::Reader().parse(std::string(json_string), root); + Json::CharReaderBuilder builder; + std::string error_message; + std::unique_ptr reader(builder.newCharReader()); + bool success = + reader->parse(json_string.data(), json_string.data() + json_string.size(), + &root, &error_message); if (!success) { - RTC_LOG(LS_ERROR) << "Incorrect JSON format: " << json_string; + RTC_LOG(LS_ERROR) << "Incorrect JSON format: " << error_message; *parsing_successful = false; return; } @@ -341,6 +347,15 @@ void Aec3ConfigFromJsonString(absl::string_view json_string, &cfg.suppressor.nearend_tuning.max_dec_factor_lf); } + ReadParam(section, "lf_smoothing_during_initial_phase", + &cfg.suppressor.lf_smoothing_during_initial_phase); + ReadParam(section, "last_permanent_lf_smoothing_band", + &cfg.suppressor.last_permanent_lf_smoothing_band); + ReadParam(section, "last_lf_smoothing_band", + &cfg.suppressor.last_lf_smoothing_band); + ReadParam(section, "last_lf_band", &cfg.suppressor.last_lf_band); + ReadParam(section, "first_hf_band", &cfg.suppressor.first_hf_band); + if (rtc::GetValueFromJsonObject(section, "dominant_nearend_detection", &subsection)) { ReadParam(subsection, "enr_threshold", @@ -651,6 +666,16 @@ std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) { ost << "\"max_dec_factor_lf\": " << config.suppressor.nearend_tuning.max_dec_factor_lf; ost << "},"; + ost << "\"lf_smoothing_during_initial_phase\": " + << (config.suppressor.lf_smoothing_during_initial_phase ? "true" + : "false") + << ","; + ost << "\"last_permanent_lf_smoothing_band\": " + << config.suppressor.last_permanent_lf_smoothing_band << ","; + ost << "\"last_lf_smoothing_band\": " + << config.suppressor.last_lf_smoothing_band << ","; + ost << "\"last_lf_band\": " << config.suppressor.last_lf_band << ","; + ost << "\"first_hf_band\": " << config.suppressor.first_hf_band << ","; ost << "\"dominant_nearend_detection\": {"; ost << "\"enr_threshold\": " << config.suppressor.dominant_nearend_detection.enr_threshold << ","; diff --git a/api/audio_codecs/audio_decoder.cc b/api/audio_codecs/audio_decoder.cc index 97cda27a03..4b18b4ab52 100644 --- a/api/audio_codecs/audio_decoder.cc +++ b/api/audio_codecs/audio_decoder.cc @@ -162,7 +162,7 @@ AudioDecoder::SpeechType AudioDecoder::ConvertSpeechType(int16_t type) { case 2: return kComfortNoise; default: - assert(false); + RTC_NOTREACHED(); return kSpeech; } } diff --git a/api/candidate.cc b/api/candidate.cc index c857f89c3c..d5fe3a0672 100644 --- a/api/candidate.cc +++ b/api/candidate.cc @@ -12,6 +12,7 @@ #include "rtc_base/helpers.h" #include "rtc_base/ip_address.h" +#include "rtc_base/logging.h" #include "rtc_base/strings/string_builder.h" namespace cricket { @@ -129,9 +130,21 @@ Candidate Candidate::ToSanitizedCopy(bool use_hostname_address, bool filter_related_address) const { Candidate copy(*this); if (use_hostname_address) { - rtc::SocketAddress hostname_only_addr(address().hostname(), - address().port()); - copy.set_address(hostname_only_addr); + rtc::IPAddress ip; + if (address().hostname().empty()) { + // IP needs to be redacted, but no hostname available. + rtc::SocketAddress redacted_addr("redacted-ip.invalid", address().port()); + copy.set_address(redacted_addr); + } else if (IPFromString(address().hostname(), &ip)) { + // The hostname is an IP literal, and needs to be redacted too. + rtc::SocketAddress redacted_addr("redacted-literal.invalid", + address().port()); + copy.set_address(redacted_addr); + } else { + rtc::SocketAddress hostname_only_addr(address().hostname(), + address().port()); + copy.set_address(hostname_only_addr); + } } if (filter_related_address) { copy.set_related_address( diff --git a/api/g3doc/threading_design.md b/api/g3doc/threading_design.md index 868c433abc..20c3539b22 100644 --- a/api/g3doc/threading_design.md +++ b/api/g3doc/threading_design.md @@ -37,7 +37,7 @@ and sequenced task queues. At the moment, the API does not give any guarantee on which thread* the callbacks and events are called on. So it's best to write all callback and event handlers like this (pseudocode): -
+```
 void ObserverClass::Handler(event) {
   if (!called_on_client_thread()) {
     dispatch_to_client_thread(bind(handler(event)));
@@ -45,7 +45,7 @@ void ObserverClass::Handler(event) {
   }
   // Process event, we're now on the right thread
 }
-
+``` In the future, the implementation may change to always call the callbacks and event handlers on the client thread. diff --git a/api/neteq/neteq.h b/api/neteq/neteq.h index 9781377ca8..ea7079e369 100644 --- a/api/neteq/neteq.h +++ b/api/neteq/neteq.h @@ -214,11 +214,15 @@ class NetEq { // |data_| in |audio_frame| is not written, but should be interpreted as being // all zeros. For testing purposes, an override can be supplied in the // |action_override| argument, which will cause NetEq to take this action - // next, instead of the action it would normally choose. + // next, instead of the action it would normally choose. An optional output + // argument for fetching the current sample rate can be provided, which + // will return the same value as last_output_sample_rate_hz() but will avoid + // additional synchronization. // Returns kOK on success, or kFail in case of an error. virtual int GetAudio( AudioFrame* audio_frame, bool* muted, + int* current_sample_rate_hz = nullptr, absl::optional action_override = absl::nullopt) = 0; // Replaces the current set of decoders with the given one. diff --git a/api/peer_connection_interface.h b/api/peer_connection_interface.h index 892e84e192..5499b7d87c 100644 --- a/api/peer_connection_interface.h +++ b/api/peer_connection_interface.h @@ -118,6 +118,7 @@ #include "api/transport/webrtc_key_value_config.h" #include "api/turn_customizer.h" #include "api/video/video_bitrate_allocator_factory.h" +#include "call/rtp_transport_controller_send_factory_interface.h" #include "media/base/media_config.h" #include "media/base/media_engine.h" // TODO(bugs.webrtc.org/7447): We plan to provide a way to let applications @@ -918,9 +919,24 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { // Also, calling CreateDataChannel is the only way to get a data "m=" section // in SDP, so it should be done before CreateOffer is called, if the // application plans to use data channels. + virtual RTCErrorOr> + CreateDataChannelOrError(const std::string& label, + const DataChannelInit* config) { + return RTCError(RTCErrorType::INTERNAL_ERROR, "dummy function called"); + } + // TODO(crbug.com/788659): Remove "virtual" below and default implementation + // above once mock in Chrome is fixed. + ABSL_DEPRECATED("Use CreateDataChannelOrError") virtual rtc::scoped_refptr CreateDataChannel( const std::string& label, - const DataChannelInit* config) = 0; + const DataChannelInit* config) { + auto result = CreateDataChannelOrError(label, config); + if (!result.ok()) { + return nullptr; + } else { + return result.MoveValue(); + } + } // NOTE: For the following 6 methods, it's only safe to dereference the // SessionDescriptionInterface on signaling_thread() (for example, calling @@ -1090,13 +1106,11 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { // playout of the underlying audio device but starts a task which will poll // for audio data every 10ms to ensure that audio processing happens and the // audio statistics are updated. - // TODO(henrika): deprecate and remove this. virtual void SetAudioPlayout(bool playout) {} // Enable/disable recording of transmitted audio streams. Enabled by default. // Note that even if recording is enabled, streams will only be recorded if // the appropriate SDP is also applied. - // TODO(henrika): deprecate and remove this. virtual void SetAudioRecording(bool recording) {} // Looks up the DtlsTransport associated with a MID value. @@ -1385,6 +1399,8 @@ struct RTC_EXPORT PeerConnectionFactoryDependencies final { std::unique_ptr neteq_factory; std::unique_ptr sctp_factory; std::unique_ptr trials; + std::unique_ptr + transport_controller_send_factory; }; // PeerConnectionFactoryInterface is the factory interface used for creating diff --git a/api/rtc_event_log/rtc_event.cc b/api/rtc_event_log/rtc_event.cc index 81e6a4e6da..631188b915 100644 --- a/api/rtc_event_log/rtc_event.cc +++ b/api/rtc_event_log/rtc_event.cc @@ -14,6 +14,6 @@ namespace webrtc { -RtcEvent::RtcEvent() : timestamp_us_(rtc::TimeMicros()) {} +RtcEvent::RtcEvent() : timestamp_us_(rtc::TimeMillis() * 1000) {} } // namespace webrtc diff --git a/api/rtp_parameters.cc b/api/rtp_parameters.cc index 8a18f8983f..5ce6780753 100644 --- a/api/rtp_parameters.cc +++ b/api/rtp_parameters.cc @@ -170,63 +170,121 @@ bool RtpExtension::IsSupportedForVideo(absl::string_view uri) { } bool RtpExtension::IsEncryptionSupported(absl::string_view uri) { - return uri == webrtc::RtpExtension::kAudioLevelUri || - uri == webrtc::RtpExtension::kTimestampOffsetUri || -#if !defined(ENABLE_EXTERNAL_AUTH) - // TODO(jbauch): Figure out a way to always allow "kAbsSendTimeUri" - // here and filter out later if external auth is really used in - // srtpfilter. External auth is used by Chromium and replaces the - // extension header value of "kAbsSendTimeUri", so it must not be - // encrypted (which can't be done by Chromium). - uri == webrtc::RtpExtension::kAbsSendTimeUri || + return +#if defined(ENABLE_EXTERNAL_AUTH) + // TODO(jbauch): Figure out a way to always allow "kAbsSendTimeUri" + // here and filter out later if external auth is really used in + // srtpfilter. External auth is used by Chromium and replaces the + // extension header value of "kAbsSendTimeUri", so it must not be + // encrypted (which can't be done by Chromium). + uri != webrtc::RtpExtension::kAbsSendTimeUri && #endif - uri == webrtc::RtpExtension::kAbsoluteCaptureTimeUri || - uri == webrtc::RtpExtension::kVideoRotationUri || - uri == webrtc::RtpExtension::kTransportSequenceNumberUri || - uri == webrtc::RtpExtension::kTransportSequenceNumberV2Uri || - uri == webrtc::RtpExtension::kPlayoutDelayUri || - uri == webrtc::RtpExtension::kVideoContentTypeUri || - uri == webrtc::RtpExtension::kMidUri || - uri == webrtc::RtpExtension::kRidUri || - uri == webrtc::RtpExtension::kRepairedRidUri || - uri == webrtc::RtpExtension::kVideoLayersAllocationUri; + uri != webrtc::RtpExtension::kEncryptHeaderExtensionsUri; } -const RtpExtension* RtpExtension::FindHeaderExtensionByUri( +// Returns whether a header extension with the given URI exists. +// Note: This does not differentiate between encrypted and non-encrypted +// extensions, so use with care! +static bool HeaderExtensionWithUriExists( const std::vector& extensions, absl::string_view uri) { for (const auto& extension : extensions) { if (extension.uri == uri) { + return true; + } + } + return false; +} + +const RtpExtension* RtpExtension::FindHeaderExtensionByUri( + const std::vector& extensions, + absl::string_view uri, + Filter filter) { + const webrtc::RtpExtension* fallback_extension = nullptr; + for (const auto& extension : extensions) { + if (extension.uri != uri) { + continue; + } + + switch (filter) { + case kDiscardEncryptedExtension: + // We only accept an unencrypted extension. + if (!extension.encrypt) { + return &extension; + } + break; + + case kPreferEncryptedExtension: + // We prefer an encrypted extension but we can fall back to an + // unencrypted extension. + if (extension.encrypt) { + return &extension; + } else { + fallback_extension = &extension; + } + break; + + case kRequireEncryptedExtension: + // We only accept an encrypted extension. + if (extension.encrypt) { + return &extension; + } + break; + } + } + + // Returning fallback extension (if any) + return fallback_extension; +} + +const RtpExtension* RtpExtension::FindHeaderExtensionByUri( + const std::vector& extensions, + absl::string_view uri) { + return FindHeaderExtensionByUri(extensions, uri, kPreferEncryptedExtension); +} + +const RtpExtension* RtpExtension::FindHeaderExtensionByUriAndEncryption( + const std::vector& extensions, + absl::string_view uri, + bool encrypt) { + for (const auto& extension : extensions) { + if (extension.uri == uri && extension.encrypt == encrypt) { return &extension; } } return nullptr; } -std::vector RtpExtension::FilterDuplicateNonEncrypted( - const std::vector& extensions) { +const std::vector RtpExtension::DeduplicateHeaderExtensions( + const std::vector& extensions, + Filter filter) { std::vector filtered; - for (auto extension = extensions.begin(); extension != extensions.end(); - ++extension) { - if (extension->encrypt) { - filtered.push_back(*extension); - continue; - } - // Only add non-encrypted extension if no encrypted with the same URI - // is also present... - if (std::any_of(extension + 1, extensions.end(), - [&](const RtpExtension& check) { - return extension->uri == check.uri; - })) { - continue; + // If we do not discard encrypted extensions, add them first + if (filter != kDiscardEncryptedExtension) { + for (const auto& extension : extensions) { + if (!extension.encrypt) { + continue; + } + if (!HeaderExtensionWithUriExists(filtered, extension.uri)) { + filtered.push_back(extension); + } } + } - // ...and has not been added before. - if (!FindHeaderExtensionByUri(filtered, extension->uri)) { - filtered.push_back(*extension); + // If we do not require encrypted extensions, add missing, non-encrypted + // extensions. + if (filter != kRequireEncryptedExtension) { + for (const auto& extension : extensions) { + if (extension.encrypt) { + continue; + } + if (!HeaderExtensionWithUriExists(filtered, extension.uri)) { + filtered.push_back(extension); + } } } + return filtered; } } // namespace webrtc diff --git a/api/rtp_parameters.h b/api/rtp_parameters.h index 7fe9f2bc83..a098bad6b0 100644 --- a/api/rtp_parameters.h +++ b/api/rtp_parameters.h @@ -246,6 +246,18 @@ struct RTC_EXPORT RtpHeaderExtensionCapability { // RTP header extension, see RFC8285. struct RTC_EXPORT RtpExtension { + enum Filter { + // Encrypted extensions will be ignored and only non-encrypted extensions + // will be considered. + kDiscardEncryptedExtension, + // Encrypted extensions will be preferred but will fall back to + // non-encrypted extensions if necessary. + kPreferEncryptedExtension, + // Encrypted extensions will be required, so any non-encrypted extensions + // will be discarded. + kRequireEncryptedExtension, + }; + RtpExtension(); RtpExtension(absl::string_view uri, int id); RtpExtension(absl::string_view uri, int id, bool encrypt); @@ -260,17 +272,28 @@ struct RTC_EXPORT RtpExtension { // Return "true" if the given RTP header extension URI may be encrypted. static bool IsEncryptionSupported(absl::string_view uri); - // Returns the named header extension if found among all extensions, - // nullptr otherwise. + // Returns the header extension with the given URI or nullptr if not found. + static const RtpExtension* FindHeaderExtensionByUri( + const std::vector& extensions, + absl::string_view uri, + Filter filter); + ABSL_DEPRECATED( + "Use RtpExtension::FindHeaderExtensionByUri with filter argument") static const RtpExtension* FindHeaderExtensionByUri( const std::vector& extensions, absl::string_view uri); - // Return a list of RTP header extensions with the non-encrypted extensions - // removed if both the encrypted and non-encrypted extension is present for - // the same URI. - static std::vector FilterDuplicateNonEncrypted( - const std::vector& extensions); + // Returns the header extension with the given URI and encrypt parameter, + // if found, otherwise nullptr. + static const RtpExtension* FindHeaderExtensionByUriAndEncryption( + const std::vector& extensions, + absl::string_view uri, + bool encrypt); + + // Returns a list of extensions where any extension URI is unique. + static const std::vector DeduplicateHeaderExtensions( + const std::vector& extensions, + Filter filter); // Encryption of Header Extensions, see RFC 6904 for details: // https://tools.ietf.org/html/rfc6904 @@ -357,6 +380,11 @@ struct RTC_EXPORT RtpExtension { static constexpr char kVideoFrameTrackingIdUri[] = "http://www.webrtc.org/experiments/rtp-hdrext/video-frame-tracking-id"; + // Header extension for Mixer-to-Client audio levels per CSRC as defined in + // https://tools.ietf.org/html/rfc6465 + static constexpr char kCsrcAudioLevelsUri[] = + "urn:ietf:params:rtp-hdrext:csrc-audio-level"; + // Inclusive min and max IDs for two-byte header extensions and one-byte // header extensions, per RFC8285 Section 4.2-4.3. static constexpr int kMinId = 1; diff --git a/api/rtp_parameters_unittest.cc b/api/rtp_parameters_unittest.cc index 5928cbda63..51ad426748 100644 --- a/api/rtp_parameters_unittest.cc +++ b/api/rtp_parameters_unittest.cc @@ -23,28 +23,249 @@ static const RtpExtension kExtension1(kExtensionUri1, 1); static const RtpExtension kExtension1Encrypted(kExtensionUri1, 10, true); static const RtpExtension kExtension2(kExtensionUri2, 2); -TEST(RtpExtensionTest, FilterDuplicateNonEncrypted) { +TEST(RtpExtensionTest, DeduplicateHeaderExtensions) { std::vector extensions; std::vector filtered; + extensions.clear(); + extensions.push_back(kExtension1); + extensions.push_back(kExtension1Encrypted); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kDiscardEncryptedExtension); + EXPECT_EQ(1u, filtered.size()); + EXPECT_EQ(std::vector{kExtension1}, filtered); + + extensions.clear(); + extensions.push_back(kExtension1); + extensions.push_back(kExtension1Encrypted); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kPreferEncryptedExtension); + EXPECT_EQ(1u, filtered.size()); + EXPECT_EQ(std::vector{kExtension1Encrypted}, filtered); + + extensions.clear(); extensions.push_back(kExtension1); extensions.push_back(kExtension1Encrypted); - filtered = RtpExtension::FilterDuplicateNonEncrypted(extensions); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kRequireEncryptedExtension); + EXPECT_EQ(1u, filtered.size()); + EXPECT_EQ(std::vector{kExtension1Encrypted}, filtered); + + extensions.clear(); + extensions.push_back(kExtension1Encrypted); + extensions.push_back(kExtension1); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kDiscardEncryptedExtension); + EXPECT_EQ(1u, filtered.size()); + EXPECT_EQ(std::vector{kExtension1}, filtered); + + extensions.clear(); + extensions.push_back(kExtension1Encrypted); + extensions.push_back(kExtension1); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kPreferEncryptedExtension); EXPECT_EQ(1u, filtered.size()); EXPECT_EQ(std::vector{kExtension1Encrypted}, filtered); extensions.clear(); extensions.push_back(kExtension1Encrypted); extensions.push_back(kExtension1); - filtered = RtpExtension::FilterDuplicateNonEncrypted(extensions); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kRequireEncryptedExtension); EXPECT_EQ(1u, filtered.size()); EXPECT_EQ(std::vector{kExtension1Encrypted}, filtered); extensions.clear(); extensions.push_back(kExtension1); extensions.push_back(kExtension2); - filtered = RtpExtension::FilterDuplicateNonEncrypted(extensions); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kDiscardEncryptedExtension); + EXPECT_EQ(2u, filtered.size()); + EXPECT_EQ(extensions, filtered); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kPreferEncryptedExtension); EXPECT_EQ(2u, filtered.size()); EXPECT_EQ(extensions, filtered); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kRequireEncryptedExtension); + EXPECT_EQ(0u, filtered.size()); + + extensions.clear(); + extensions.push_back(kExtension1); + extensions.push_back(kExtension2); + extensions.push_back(kExtension1Encrypted); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kDiscardEncryptedExtension); + EXPECT_EQ(2u, filtered.size()); + EXPECT_EQ((std::vector{kExtension1, kExtension2}), filtered); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kPreferEncryptedExtension); + EXPECT_EQ(2u, filtered.size()); + EXPECT_EQ((std::vector{kExtension1Encrypted, kExtension2}), + filtered); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kRequireEncryptedExtension); + EXPECT_EQ(1u, filtered.size()); + EXPECT_EQ((std::vector{kExtension1Encrypted}), filtered); +} + +TEST(RtpExtensionTest, FindHeaderExtensionByUriAndEncryption) { + std::vector extensions; + + extensions.clear(); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUriAndEncryption( + extensions, kExtensionUri1, false)); + + extensions.clear(); + extensions.push_back(kExtension1); + EXPECT_EQ(kExtension1, *RtpExtension::FindHeaderExtensionByUriAndEncryption( + extensions, kExtensionUri1, false)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUriAndEncryption( + extensions, kExtensionUri1, true)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUriAndEncryption( + extensions, kExtensionUri2, false)); + + extensions.clear(); + extensions.push_back(kExtension1); + extensions.push_back(kExtension2); + extensions.push_back(kExtension1Encrypted); + EXPECT_EQ(kExtension1, *RtpExtension::FindHeaderExtensionByUriAndEncryption( + extensions, kExtensionUri1, false)); + EXPECT_EQ(kExtension2, *RtpExtension::FindHeaderExtensionByUriAndEncryption( + extensions, kExtensionUri2, false)); + EXPECT_EQ(kExtension1Encrypted, + *RtpExtension::FindHeaderExtensionByUriAndEncryption( + extensions, kExtensionUri1, true)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUriAndEncryption( + extensions, kExtensionUri2, true)); +} + +TEST(RtpExtensionTest, FindHeaderExtensionByUri) { + std::vector extensions; + + extensions.clear(); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kDiscardEncryptedExtension)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kPreferEncryptedExtension)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kRequireEncryptedExtension)); + + extensions.clear(); + extensions.push_back(kExtension1); + EXPECT_EQ(kExtension1, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kDiscardEncryptedExtension)); + EXPECT_EQ(kExtension1, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kPreferEncryptedExtension)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kRequireEncryptedExtension)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri2, + RtpExtension::Filter::kDiscardEncryptedExtension)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri2, + RtpExtension::Filter::kPreferEncryptedExtension)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri2, + RtpExtension::Filter::kRequireEncryptedExtension)); + + extensions.clear(); + extensions.push_back(kExtension1); + extensions.push_back(kExtension1Encrypted); + EXPECT_EQ(kExtension1, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kDiscardEncryptedExtension)); + + extensions.clear(); + extensions.push_back(kExtension1); + extensions.push_back(kExtension1Encrypted); + EXPECT_EQ(kExtension1Encrypted, + *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kPreferEncryptedExtension)); + + extensions.clear(); + extensions.push_back(kExtension1); + extensions.push_back(kExtension1Encrypted); + EXPECT_EQ(kExtension1Encrypted, + *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kRequireEncryptedExtension)); + + extensions.clear(); + extensions.push_back(kExtension1Encrypted); + extensions.push_back(kExtension1); + EXPECT_EQ(kExtension1, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kDiscardEncryptedExtension)); + + extensions.clear(); + extensions.push_back(kExtension1Encrypted); + extensions.push_back(kExtension1); + EXPECT_EQ(kExtension1Encrypted, + *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kPreferEncryptedExtension)); + + extensions.clear(); + extensions.push_back(kExtension1Encrypted); + extensions.push_back(kExtension1); + EXPECT_EQ(kExtension1Encrypted, + *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kRequireEncryptedExtension)); + + extensions.clear(); + extensions.push_back(kExtension1); + extensions.push_back(kExtension2); + EXPECT_EQ(kExtension1, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kDiscardEncryptedExtension)); + EXPECT_EQ(kExtension1, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kPreferEncryptedExtension)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kRequireEncryptedExtension)); + EXPECT_EQ(kExtension2, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri2, + RtpExtension::Filter::kDiscardEncryptedExtension)); + EXPECT_EQ(kExtension2, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri2, + RtpExtension::Filter::kPreferEncryptedExtension)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri2, + RtpExtension::Filter::kRequireEncryptedExtension)); + + extensions.clear(); + extensions.push_back(kExtension1); + extensions.push_back(kExtension2); + extensions.push_back(kExtension1Encrypted); + EXPECT_EQ(kExtension1, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kDiscardEncryptedExtension)); + EXPECT_EQ(kExtension1Encrypted, + *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kPreferEncryptedExtension)); + EXPECT_EQ(kExtension1Encrypted, + *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kRequireEncryptedExtension)); + EXPECT_EQ(kExtension2, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri2, + RtpExtension::Filter::kDiscardEncryptedExtension)); + EXPECT_EQ(kExtension2, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri2, + RtpExtension::Filter::kPreferEncryptedExtension)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri2, + RtpExtension::Filter::kRequireEncryptedExtension)); } } // namespace webrtc diff --git a/api/rtp_receiver_interface.h b/api/rtp_receiver_interface.h index d2645eda8c..327c9f2fee 100644 --- a/api/rtp_receiver_interface.h +++ b/api/rtp_receiver_interface.h @@ -22,7 +22,6 @@ #include "api/frame_transformer_interface.h" #include "api/media_stream_interface.h" #include "api/media_types.h" -#include "api/proxy.h" #include "api/rtp_parameters.h" #include "api/scoped_refptr.h" #include "api/transport/rtp/rtp_source.h" @@ -119,36 +118,6 @@ class RTC_EXPORT RtpReceiverInterface : public rtc::RefCountInterface { ~RtpReceiverInterface() override = default; }; -// Define proxy for RtpReceiverInterface. -// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods -// are called on is an implementation detail. -BEGIN_PROXY_MAP(RtpReceiver) -PROXY_PRIMARY_THREAD_DESTRUCTOR() -BYPASS_PROXY_CONSTMETHOD0(rtc::scoped_refptr, track) -PROXY_CONSTMETHOD0(rtc::scoped_refptr, dtls_transport) -PROXY_CONSTMETHOD0(std::vector, stream_ids) -PROXY_CONSTMETHOD0(std::vector>, - streams) -BYPASS_PROXY_CONSTMETHOD0(cricket::MediaType, media_type) -BYPASS_PROXY_CONSTMETHOD0(std::string, id) -PROXY_SECONDARY_CONSTMETHOD0(RtpParameters, GetParameters) -PROXY_METHOD1(void, SetObserver, RtpReceiverObserverInterface*) -PROXY_SECONDARY_METHOD1(void, - SetJitterBufferMinimumDelay, - absl::optional) -PROXY_SECONDARY_CONSTMETHOD0(std::vector, GetSources) -// TODO(bugs.webrtc.org/12772): Remove. -PROXY_SECONDARY_METHOD1(void, - SetFrameDecryptor, - rtc::scoped_refptr) -// TODO(bugs.webrtc.org/12772): Remove. -PROXY_SECONDARY_CONSTMETHOD0(rtc::scoped_refptr, - GetFrameDecryptor) -PROXY_SECONDARY_METHOD1(void, - SetDepacketizerToDecoderFrameTransformer, - rtc::scoped_refptr) -END_PROXY_MAP() - } // namespace webrtc #endif // API_RTP_RECEIVER_INTERFACE_H_ diff --git a/api/rtp_sender_interface.h b/api/rtp_sender_interface.h index dd93792a07..9ffad68644 100644 --- a/api/rtp_sender_interface.h +++ b/api/rtp_sender_interface.h @@ -23,7 +23,6 @@ #include "api/frame_transformer_interface.h" #include "api/media_stream_interface.h" #include "api/media_types.h" -#include "api/proxy.h" #include "api/rtc_error.h" #include "api/rtp_parameters.h" #include "api/scoped_refptr.h" @@ -101,33 +100,6 @@ class RTC_EXPORT RtpSenderInterface : public rtc::RefCountInterface { ~RtpSenderInterface() override = default; }; -// Define proxy for RtpSenderInterface. -// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods -// are called on is an implementation detail. -BEGIN_PRIMARY_PROXY_MAP(RtpSender) -PROXY_PRIMARY_THREAD_DESTRUCTOR() -PROXY_METHOD1(bool, SetTrack, MediaStreamTrackInterface*) -PROXY_CONSTMETHOD0(rtc::scoped_refptr, track) -PROXY_CONSTMETHOD0(rtc::scoped_refptr, dtls_transport) -PROXY_CONSTMETHOD0(uint32_t, ssrc) -BYPASS_PROXY_CONSTMETHOD0(cricket::MediaType, media_type) -BYPASS_PROXY_CONSTMETHOD0(std::string, id) -PROXY_CONSTMETHOD0(std::vector, stream_ids) -PROXY_CONSTMETHOD0(std::vector, init_send_encodings) -PROXY_CONSTMETHOD0(RtpParameters, GetParameters) -PROXY_METHOD1(RTCError, SetParameters, const RtpParameters&) -PROXY_CONSTMETHOD0(rtc::scoped_refptr, GetDtmfSender) -PROXY_METHOD1(void, - SetFrameEncryptor, - rtc::scoped_refptr) -PROXY_CONSTMETHOD0(rtc::scoped_refptr, - GetFrameEncryptor) -PROXY_METHOD1(void, SetStreams, const std::vector&) -PROXY_METHOD1(void, - SetEncoderToPacketizerFrameTransformer, - rtc::scoped_refptr) -END_PROXY_MAP() - } // namespace webrtc #endif // API_RTP_SENDER_INTERFACE_H_ diff --git a/api/stats/rtc_stats.h b/api/stats/rtc_stats.h index 5de5b7fbb0..9290e803fa 100644 --- a/api/stats/rtc_stats.h +++ b/api/stats/rtc_stats.h @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -237,6 +238,9 @@ class RTCStatsMemberInterface { kSequenceUint64, // std::vector kSequenceDouble, // std::vector kSequenceString, // std::vector + + kMapStringUint64, // std::map + kMapStringDouble, // std::map }; virtual ~RTCStatsMemberInterface() {} @@ -363,6 +367,13 @@ class RTCStatsMember : public RTCStatsMemberInterface { T value_; }; +namespace rtc_stats_internal { + +typedef std::map MapStringUint64; +typedef std::map MapStringDouble; + +} // namespace rtc_stats_internal + #define WEBRTC_DECLARE_RTCSTATSMEMBER(T) \ template <> \ RTC_EXPORT RTCStatsMemberInterface::Type RTCStatsMember::StaticType(); \ @@ -391,6 +402,8 @@ WEBRTC_DECLARE_RTCSTATSMEMBER(std::vector); WEBRTC_DECLARE_RTCSTATSMEMBER(std::vector); WEBRTC_DECLARE_RTCSTATSMEMBER(std::vector); WEBRTC_DECLARE_RTCSTATSMEMBER(std::vector); +WEBRTC_DECLARE_RTCSTATSMEMBER(rtc_stats_internal::MapStringUint64); +WEBRTC_DECLARE_RTCSTATSMEMBER(rtc_stats_internal::MapStringDouble); // Using inheritance just so that it's obvious from the member's declaration // whether it's standardized or not. @@ -455,6 +468,10 @@ extern template class RTC_EXPORT_TEMPLATE_DECLARE(RTC_EXPORT) RTCNonStandardStatsMember>; extern template class RTC_EXPORT_TEMPLATE_DECLARE(RTC_EXPORT) RTCNonStandardStatsMember>; +extern template class RTC_EXPORT_TEMPLATE_DECLARE(RTC_EXPORT) + RTCNonStandardStatsMember>; +extern template class RTC_EXPORT_TEMPLATE_DECLARE(RTC_EXPORT) + RTCNonStandardStatsMember>; } // namespace webrtc diff --git a/api/stats/rtcstats_objects.h b/api/stats/rtcstats_objects.h index 60ff8c29fe..2030380918 100644 --- a/api/stats/rtcstats_objects.h +++ b/api/stats/rtcstats_objects.h @@ -13,6 +13,7 @@ #include +#include #include #include #include @@ -501,8 +502,6 @@ class RTC_EXPORT RTCInboundRTPStreamStats final // FIR and PLI counts are only defined for |media_type == "video"|. RTCStatsMember fir_count; RTCStatsMember pli_count; - // TODO(hbos): NACK count should be collected by |RTCStatsCollector| for both - // audio and video but is only defined in the "video" case. crbug.com/657856 RTCStatsMember nack_count; RTCStatsMember qp_sum; }; @@ -542,10 +541,8 @@ class RTC_EXPORT RTCOutboundRTPStreamStats final : public RTCRTPStreamStats { // implement it for audio as well. RTCStatsMember total_packet_send_delay; // Enum type RTCQualityLimitationReason - // TODO(https://crbug.com/webrtc/10686): Also expose - // qualityLimitationDurations. Requires RTCStatsMember support for - // "record", see https://crbug.com/webrtc/10685. RTCStatsMember quality_limitation_reason; + RTCStatsMember> quality_limitation_durations; // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationresolutionchanges RTCStatsMember quality_limitation_resolution_changes; // https://henbos.github.io/webrtc-provisional-stats/#dom-rtcoutboundrtpstreamstats-contenttype @@ -556,8 +553,6 @@ class RTC_EXPORT RTCOutboundRTPStreamStats final : public RTCRTPStreamStats { // FIR and PLI counts are only defined for |media_type == "video"|. RTCStatsMember fir_count; RTCStatsMember pli_count; - // TODO(hbos): NACK count should be collected by |RTCStatsCollector| for both - // audio and video but is only defined in the "video" case. crbug.com/657856 RTCStatsMember nack_count; RTCStatsMember qp_sum; }; @@ -630,6 +625,8 @@ class RTC_EXPORT RTCAudioSourceStats final : public RTCMediaSourceStats { RTCStatsMember audio_level; RTCStatsMember total_audio_energy; RTCStatsMember total_samples_duration; + RTCStatsMember echo_return_loss; + RTCStatsMember echo_return_loss_enhancement; }; // https://w3c.github.io/webrtc-stats/#dom-rtcvideosourcestats diff --git a/api/stats_types.cc b/api/stats_types.cc index 7dcbd134a1..6fdc7e85a5 100644 --- a/api/stats_types.cc +++ b/api/stats_types.cc @@ -15,6 +15,7 @@ #include "absl/algorithm/container.h" #include "rtc_base/checks.h" #include "rtc_base/ref_counted_object.h" +#include "rtc_base/string_encode.h" // TODO(tommi): Could we have a static map of value name -> expected type // and use this to RTC_DCHECK on correct usage (somewhat strongly typed values)? diff --git a/api/stats_types.h b/api/stats_types.h index f910b4a164..d032462da6 100644 --- a/api/stats_types.h +++ b/api/stats_types.h @@ -24,7 +24,6 @@ #include "api/sequence_checker.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/ref_count.h" -#include "rtc_base/string_encode.h" #include "rtc_base/system/rtc_export.h" namespace webrtc { diff --git a/api/test/DEPS b/api/test/DEPS index 2784fcbcd4..329076830c 100644 --- a/api/test/DEPS +++ b/api/test/DEPS @@ -8,9 +8,6 @@ specific_include_rules = { "dummy_peer_connection\.h": [ "+rtc_base/ref_counted_object.h", ], - "fake_constraints\.h": [ - "+rtc_base/string_encode.h", - ], "neteq_factory_with_codecs\.h": [ "+system_wrappers/include/clock.h", ], diff --git a/api/test/create_time_controller.cc b/api/test/create_time_controller.cc index a2c0cb713f..f7faeaab42 100644 --- a/api/test/create_time_controller.cc +++ b/api/test/create_time_controller.cc @@ -13,6 +13,8 @@ #include #include "call/call.h" +#include "call/rtp_transport_config.h" +#include "call/rtp_transport_controller_send_factory_interface.h" #include "test/time_controller/external_time_controller.h" #include "test/time_controller/simulated_time_controller.h" @@ -40,8 +42,13 @@ std::unique_ptr CreateTimeControllerBasedCallFactory( time_controller_->CreateProcessThread("CallModules"), [this]() { module_thread_ = nullptr; }); } + + RtpTransportConfig transportConfig = config.ExtractTransportConfig(); + return Call::Create(config, time_controller_->GetClock(), module_thread_, - time_controller_->CreateProcessThread("Pacer")); + config.rtp_transport_controller_send_factory->Create( + transportConfig, time_controller_->GetClock(), + time_controller_->CreateProcessThread("Pacer"))); } private: diff --git a/api/test/dummy_peer_connection.h b/api/test/dummy_peer_connection.h index 4d17aeddd0..80ae20c3c7 100644 --- a/api/test/dummy_peer_connection.h +++ b/api/test/dummy_peer_connection.h @@ -114,10 +114,10 @@ class DummyPeerConnection : public PeerConnectionInterface { } void ClearStatsCache() override {} - rtc::scoped_refptr CreateDataChannel( + RTCErrorOr> CreateDataChannelOrError( const std::string& label, const DataChannelInit* config) override { - return nullptr; + return RTCError(RTCErrorType::INTERNAL_ERROR, "Dummy function called"); } const SessionDescriptionInterface* local_description() const override { diff --git a/api/test/mock_peerconnectioninterface.h b/api/test/mock_peerconnectioninterface.h index be34df0b32..b5d94238c8 100644 --- a/api/test/mock_peerconnectioninterface.h +++ b/api/test/mock_peerconnectioninterface.h @@ -100,8 +100,8 @@ class MockPeerConnectionInterface GetSctpTransport, (), (const override)); - MOCK_METHOD(rtc::scoped_refptr, - CreateDataChannel, + MOCK_METHOD(RTCErrorOr>, + CreateDataChannelOrError, (const std::string&, const DataChannelInit*), (override)); MOCK_METHOD(const SessionDescriptionInterface*, diff --git a/api/test/videocodec_test_stats.cc b/api/test/videocodec_test_stats.cc index b2f88a4661..b973dc2d12 100644 --- a/api/test/videocodec_test_stats.cc +++ b/api/test/videocodec_test_stats.cc @@ -24,71 +24,91 @@ VideoCodecTestStats::FrameStatistics::FrameStatistics(size_t frame_number, std::string VideoCodecTestStats::FrameStatistics::ToString() const { rtc::StringBuilder ss; - ss << "frame_number " << frame_number; - ss << " decoded_width " << decoded_width; - ss << " decoded_height " << decoded_height; - ss << " spatial_idx " << spatial_idx; - ss << " temporal_idx " << temporal_idx; - ss << " inter_layer_predicted " << inter_layer_predicted; - ss << " non_ref_for_inter_layer_pred " << non_ref_for_inter_layer_pred; - ss << " frame_type " << static_cast(frame_type); - ss << " length_bytes " << length_bytes; - ss << " qp " << qp; - ss << " psnr " << psnr; - ss << " psnr_y " << psnr_y; - ss << " psnr_u " << psnr_u; - ss << " psnr_v " << psnr_v; - ss << " ssim " << ssim; - ss << " encode_time_us " << encode_time_us; - ss << " decode_time_us " << decode_time_us; - ss << " rtp_timestamp " << rtp_timestamp; - ss << " target_bitrate_kbps " << target_bitrate_kbps; - ss << " target_framerate_fps " << target_framerate_fps; + for (const auto& entry : ToMap()) { + if (ss.size() > 0) { + ss << " "; + } + ss << entry.first << " " << entry.second; + } return ss.Release(); } +std::map VideoCodecTestStats::FrameStatistics::ToMap() + const { + std::map map; + map["frame_number"] = std::to_string(frame_number); + map["decoded_width"] = std::to_string(decoded_width); + map["decoded_height"] = std::to_string(decoded_height); + map["spatial_idx"] = std::to_string(spatial_idx); + map["temporal_idx"] = std::to_string(temporal_idx); + map["inter_layer_predicted"] = std::to_string(inter_layer_predicted); + map["non_ref_for_inter_layer_pred"] = + std::to_string(non_ref_for_inter_layer_pred); + map["frame_type"] = std::to_string(static_cast(frame_type)); + map["length_bytes"] = std::to_string(length_bytes); + map["qp"] = std::to_string(qp); + map["psnr"] = std::to_string(psnr); + map["psnr_y"] = std::to_string(psnr_y); + map["psnr_u"] = std::to_string(psnr_u); + map["psnr_v"] = std::to_string(psnr_v); + map["ssim"] = std::to_string(ssim); + map["encode_time_us"] = std::to_string(encode_time_us); + map["decode_time_us"] = std::to_string(decode_time_us); + map["rtp_timestamp"] = std::to_string(rtp_timestamp); + map["target_bitrate_kbps"] = std::to_string(target_bitrate_kbps); + map["target_framerate_fps"] = std::to_string(target_framerate_fps); + return map; +} + std::string VideoCodecTestStats::VideoStatistics::ToString( std::string prefix) const { rtc::StringBuilder ss; - ss << prefix << "target_bitrate_kbps: " << target_bitrate_kbps; - ss << "\n" << prefix << "input_framerate_fps: " << input_framerate_fps; - ss << "\n" << prefix << "spatial_idx: " << spatial_idx; - ss << "\n" << prefix << "temporal_idx: " << temporal_idx; - ss << "\n" << prefix << "width: " << width; - ss << "\n" << prefix << "height: " << height; - ss << "\n" << prefix << "length_bytes: " << length_bytes; - ss << "\n" << prefix << "bitrate_kbps: " << bitrate_kbps; - ss << "\n" << prefix << "framerate_fps: " << framerate_fps; - ss << "\n" << prefix << "enc_speed_fps: " << enc_speed_fps; - ss << "\n" << prefix << "dec_speed_fps: " << dec_speed_fps; - ss << "\n" << prefix << "avg_delay_sec: " << avg_delay_sec; - ss << "\n" - << prefix << "max_key_frame_delay_sec: " << max_key_frame_delay_sec; - ss << "\n" - << prefix << "max_delta_frame_delay_sec: " << max_delta_frame_delay_sec; - ss << "\n" - << prefix << "time_to_reach_target_bitrate_sec: " - << time_to_reach_target_bitrate_sec; - ss << "\n" - << prefix << "avg_key_frame_size_bytes: " << avg_key_frame_size_bytes; - ss << "\n" - << prefix << "avg_delta_frame_size_bytes: " << avg_delta_frame_size_bytes; - ss << "\n" << prefix << "avg_qp: " << avg_qp; - ss << "\n" << prefix << "avg_psnr: " << avg_psnr; - ss << "\n" << prefix << "min_psnr: " << min_psnr; - ss << "\n" << prefix << "avg_ssim: " << avg_ssim; - ss << "\n" << prefix << "min_ssim: " << min_ssim; - ss << "\n" << prefix << "num_input_frames: " << num_input_frames; - ss << "\n" << prefix << "num_encoded_frames: " << num_encoded_frames; - ss << "\n" << prefix << "num_decoded_frames: " << num_decoded_frames; - ss << "\n" - << prefix - << "num_dropped_frames: " << num_input_frames - num_encoded_frames; - ss << "\n" << prefix << "num_key_frames: " << num_key_frames; - ss << "\n" << prefix << "num_spatial_resizes: " << num_spatial_resizes; - ss << "\n" << prefix << "max_nalu_size_bytes: " << max_nalu_size_bytes; + for (const auto& entry : ToMap()) { + if (ss.size() > 0) { + ss << "\n"; + } + ss << prefix << entry.first << ": " << entry.second; + } return ss.Release(); } +std::map VideoCodecTestStats::VideoStatistics::ToMap() + const { + std::map map; + map["target_bitrate_kbps"] = std::to_string(target_bitrate_kbps); + map["input_framerate_fps"] = std::to_string(input_framerate_fps); + map["spatial_idx"] = std::to_string(spatial_idx); + map["temporal_idx"] = std::to_string(temporal_idx); + map["width"] = std::to_string(width); + map["height"] = std::to_string(height); + map["length_bytes"] = std::to_string(length_bytes); + map["bitrate_kbps"] = std::to_string(bitrate_kbps); + map["framerate_fps"] = std::to_string(framerate_fps); + map["enc_speed_fps"] = std::to_string(enc_speed_fps); + map["dec_speed_fps"] = std::to_string(dec_speed_fps); + map["avg_delay_sec"] = std::to_string(avg_delay_sec); + map["max_key_frame_delay_sec"] = std::to_string(max_key_frame_delay_sec); + map["max_delta_frame_delay_sec"] = std::to_string(max_delta_frame_delay_sec); + map["time_to_reach_target_bitrate_sec"] = + std::to_string(time_to_reach_target_bitrate_sec); + map["avg_key_frame_size_bytes"] = std::to_string(avg_key_frame_size_bytes); + map["avg_delta_frame_size_bytes"] = + std::to_string(avg_delta_frame_size_bytes); + map["avg_qp"] = std::to_string(avg_qp); + map["avg_psnr"] = std::to_string(avg_psnr); + map["min_psnr"] = std::to_string(min_psnr); + map["avg_ssim"] = std::to_string(avg_ssim); + map["min_ssim"] = std::to_string(min_ssim); + map["num_input_frames"] = std::to_string(num_input_frames); + map["num_encoded_frames"] = std::to_string(num_encoded_frames); + map["num_decoded_frames"] = std::to_string(num_decoded_frames); + map["num_dropped_frames"] = + std::to_string(num_input_frames - num_encoded_frames); + map["num_key_frames"] = std::to_string(num_key_frames); + map["num_spatial_resizes"] = std::to_string(num_spatial_resizes); + map["max_nalu_size_bytes"] = std::to_string(max_nalu_size_bytes); + return map; +} + } // namespace test } // namespace webrtc diff --git a/api/test/videocodec_test_stats.h b/api/test/videocodec_test_stats.h index df1aed73aa..02a18a71d9 100644 --- a/api/test/videocodec_test_stats.h +++ b/api/test/videocodec_test_stats.h @@ -14,6 +14,7 @@ #include #include +#include #include #include @@ -33,6 +34,9 @@ class VideoCodecTestStats { std::string ToString() const; + // Returns name -> value text map of frame statistics. + std::map ToMap() const; + size_t frame_number = 0; size_t rtp_timestamp = 0; @@ -78,6 +82,9 @@ class VideoCodecTestStats { struct VideoStatistics { std::string ToString(std::string prefix) const; + // Returns name -> value text map of video statistics. + std::map ToMap() const; + size_t target_bitrate_kbps = 0; float input_framerate_fps = 0.0f; diff --git a/api/transport/data_channel_transport_interface.h b/api/transport/data_channel_transport_interface.h index 550fabaacd..2b2f5d2e6d 100644 --- a/api/transport/data_channel_transport_interface.h +++ b/api/transport/data_channel_transport_interface.h @@ -88,7 +88,7 @@ class DataChannelSink { // Callback issued when the data channel becomes unusable (closed). // TODO(https://crbug.com/webrtc/10360): Make pure virtual when all // consumers updated. - virtual void OnTransportClosed() {} + virtual void OnTransportClosed(RTCError error) {} }; // Transport for data channels. diff --git a/api/transport/network_types.cc b/api/transport/network_types.cc index 88b67b3a47..7451940151 100644 --- a/api/transport/network_types.cc +++ b/api/transport/network_types.cc @@ -48,7 +48,7 @@ std::vector TransportPacketsFeedback::ReceivedWithSendInfo() const { std::vector res; for (const PacketResult& fb : packet_feedbacks) { - if (fb.receive_time.IsFinite()) { + if (fb.IsReceived()) { res.push_back(fb); } } @@ -58,7 +58,7 @@ std::vector TransportPacketsFeedback::ReceivedWithSendInfo() std::vector TransportPacketsFeedback::LostWithSendInfo() const { std::vector res; for (const PacketResult& fb : packet_feedbacks) { - if (fb.receive_time.IsPlusInfinity()) { + if (!fb.IsReceived()) { res.push_back(fb); } } @@ -74,7 +74,7 @@ std::vector TransportPacketsFeedback::SortedByReceiveTime() const { std::vector res; for (const PacketResult& fb : packet_feedbacks) { - if (fb.receive_time.IsFinite()) { + if (fb.IsReceived()) { res.push_back(fb); } } diff --git a/api/transport/network_types.h b/api/transport/network_types.h index 38a8917f1e..4e96b0f12e 100644 --- a/api/transport/network_types.h +++ b/api/transport/network_types.h @@ -158,6 +158,8 @@ struct PacketResult { PacketResult(const PacketResult&); ~PacketResult(); + inline bool IsReceived() const { return !receive_time.IsPlusInfinity(); } + SentPacket sent_packet; Timestamp receive_time = Timestamp::PlusInfinity(); }; diff --git a/api/uma_metrics.h b/api/uma_metrics.h index 3e0deb0093..a975b82aeb 100644 --- a/api/uma_metrics.h +++ b/api/uma_metrics.h @@ -202,6 +202,17 @@ enum BundlePolicyUsage { kBundlePolicyUsageMax }; +// Metrics for provisional answers as described in +// https://datatracker.ietf.org/doc/html/rfc8829#section-4.1.10.1 +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. +enum ProvisionalAnswerUsage { + kProvisionalAnswerNotUsed = 0, + kProvisionalAnswerLocal = 1, + kProvisionalAnswerRemote = 2, + kProvisionalAnswerMax +}; + // When adding new metrics please consider using the style described in // https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/histograms/README.md#usage // instead of the legacy enums used above. diff --git a/api/video/BUILD.gn b/api/video/BUILD.gn index 1a832486a9..ec90bc137e 100644 --- a/api/video/BUILD.gn +++ b/api/video/BUILD.gn @@ -143,6 +143,41 @@ rtc_library("encoded_frame") { deps = [ "../../modules/video_coding:encoded_frame" ] } +rtc_library("rtp_video_frame_assembler") { + visibility = [ "*" ] + sources = [ + "rtp_video_frame_assembler.cc", + "rtp_video_frame_assembler.h", + ] + + deps = [ + ":encoded_frame", + "../../modules/rtp_rtcp:rtp_rtcp", + "../../modules/rtp_rtcp:rtp_rtcp_format", + "../../modules/video_coding:video_coding", + "../../rtc_base:logging", + ] + + absl_deps = [ + "//third_party/abseil-cpp/absl/container:inlined_vector", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("rtp_video_frame_assembler_unittests") { + testonly = true + sources = [ "rtp_video_frame_assembler_unittests.cc" ] + + deps = [ + ":rtp_video_frame_assembler", + "..:array_view", + "../../modules/rtp_rtcp:rtp_packetizer_av1_test_helper", + "../../modules/rtp_rtcp:rtp_rtcp", + "../../modules/rtp_rtcp:rtp_rtcp_format", + "../../test:test_support", + ] +} + rtc_source_set("video_codec_constants") { visibility = [ "*" ] sources = [ "video_codec_constants.h" ] diff --git a/api/video/DEPS b/api/video/DEPS index 1cb8ad83cb..cf6770dce0 100644 --- a/api/video/DEPS +++ b/api/video/DEPS @@ -40,4 +40,8 @@ specific_include_rules = { "video_stream_encoder_create.cc": [ "+video/video_stream_encoder.h", ], + + "rtp_video_frame_assembler.h": [ + "+modules/rtp_rtcp/source/rtp_packet_received.h", + ], } diff --git a/api/video/nv12_buffer.cc b/api/video/nv12_buffer.cc index 974620ba27..37d688b88b 100644 --- a/api/video/nv12_buffer.cc +++ b/api/video/nv12_buffer.cc @@ -144,11 +144,10 @@ void NV12Buffer::CropAndScaleFrom(const NV12BufferInterface& src, const uint8_t* uv_plane = src.DataUV() + src.StrideUV() * uv_offset_y + uv_offset_x * 2; - // kFilterBox is unsupported in libyuv, so using kFilterBilinear instead. int res = libyuv::NV12Scale(y_plane, src.StrideY(), uv_plane, src.StrideUV(), crop_width, crop_height, MutableDataY(), StrideY(), MutableDataUV(), StrideUV(), width(), - height(), libyuv::kFilterBilinear); + height(), libyuv::kFilterBox); RTC_DCHECK_EQ(res, 0); } diff --git a/api/video/rtp_video_frame_assembler.cc b/api/video/rtp_video_frame_assembler.cc new file mode 100644 index 0000000000..8f3d04c30b --- /dev/null +++ b/api/video/rtp_video_frame_assembler.cc @@ -0,0 +1,332 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "api/video/rtp_video_frame_assembler.h" + +#include +#include +#include +#include +#include +#include + +#include "absl/container/inlined_vector.h" +#include "absl/types/optional.h" +#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h" +#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h" +#include "modules/rtp_rtcp/source/rtp_packet_received.h" +#include "modules/rtp_rtcp/source/video_rtp_depacketizer_av1.h" +#include "modules/rtp_rtcp/source/video_rtp_depacketizer_generic.h" +#include "modules/rtp_rtcp/source/video_rtp_depacketizer_h264.h" +#include "modules/rtp_rtcp/source/video_rtp_depacketizer_raw.h" +#include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.h" +#include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h" +#include "modules/video_coding/frame_object.h" +#include "modules/video_coding/packet_buffer.h" +#include "modules/video_coding/rtp_frame_reference_finder.h" +#include "rtc_base/logging.h" + +namespace webrtc { +namespace { +std::unique_ptr CreateDepacketizer( + RtpVideoFrameAssembler::PayloadFormat payload_format) { + switch (payload_format) { + case RtpVideoFrameAssembler::kRaw: + return std::make_unique(); + case RtpVideoFrameAssembler::kH264: + return std::make_unique(); + case RtpVideoFrameAssembler::kVp8: + return std::make_unique(); + case RtpVideoFrameAssembler::kVp9: + return std::make_unique(); + case RtpVideoFrameAssembler::kAv1: + return std::make_unique(); + case RtpVideoFrameAssembler::kGeneric: + return std::make_unique(); + } + RTC_NOTREACHED(); + return nullptr; +} +} // namespace + +class RtpVideoFrameAssembler::Impl { + public: + explicit Impl(std::unique_ptr depacketizer); + ~Impl() = default; + + FrameVector InsertPacket(const RtpPacketReceived& packet); + + private: + using RtpFrameVector = + absl::InlinedVector, 3>; + + RtpFrameVector AssembleFrames( + video_coding::PacketBuffer::InsertResult insert_result); + FrameVector FindReferences(RtpFrameVector frames); + FrameVector UpdateWithPadding(uint16_t seq_num); + bool ParseDependenciesDescriptorExtension(const RtpPacketReceived& rtp_packet, + RTPVideoHeader& video_header); + bool ParseGenericDescriptorExtension(const RtpPacketReceived& rtp_packet, + RTPVideoHeader& video_header); + void ClearOldData(uint16_t incoming_seq_num); + + std::unique_ptr video_structure_; + SeqNumUnwrapper frame_id_unwrapper_; + absl::optional video_structure_frame_id_; + std::unique_ptr depacketizer_; + video_coding::PacketBuffer packet_buffer_; + RtpFrameReferenceFinder reference_finder_; +}; + +RtpVideoFrameAssembler::Impl::Impl( + std::unique_ptr depacketizer) + : depacketizer_(std::move(depacketizer)), + packet_buffer_(/*start_buffer_size=*/2048, /*max_buffer_size=*/2048) {} + +RtpVideoFrameAssembler::FrameVector RtpVideoFrameAssembler::Impl::InsertPacket( + const RtpPacketReceived& rtp_packet) { + absl::optional parsed_payload = + depacketizer_->Parse(rtp_packet.PayloadBuffer()); + + if (parsed_payload == absl::nullopt) { + return {}; + } + + if (parsed_payload->video_payload.size() == 0) { + ClearOldData(rtp_packet.SequenceNumber()); + return UpdateWithPadding(rtp_packet.SequenceNumber()); + } + + if (rtp_packet.HasExtension()) { + if (!ParseDependenciesDescriptorExtension(rtp_packet, + parsed_payload->video_header)) { + return {}; + } + } else if (rtp_packet.HasExtension()) { + if (!ParseGenericDescriptorExtension(rtp_packet, + parsed_payload->video_header)) { + return {}; + } + } + + parsed_payload->video_header.is_last_packet_in_frame |= rtp_packet.Marker(); + + auto packet = std::make_unique( + rtp_packet, parsed_payload->video_header); + packet->video_payload = std::move(parsed_payload->video_payload); + + ClearOldData(rtp_packet.SequenceNumber()); + return FindReferences( + AssembleFrames(packet_buffer_.InsertPacket(std::move(packet)))); +} + +void RtpVideoFrameAssembler::Impl::ClearOldData(uint16_t incoming_seq_num) { + constexpr uint16_t kOldSeqNumThreshold = 2000; + uint16_t old_seq_num = incoming_seq_num - kOldSeqNumThreshold; + packet_buffer_.ClearTo(old_seq_num); + reference_finder_.ClearTo(old_seq_num); +} + +RtpVideoFrameAssembler::Impl::RtpFrameVector +RtpVideoFrameAssembler::Impl::AssembleFrames( + video_coding::PacketBuffer::InsertResult insert_result) { + video_coding::PacketBuffer::Packet* first_packet = nullptr; + std::vector> payloads; + RtpFrameVector result; + + for (auto& packet : insert_result.packets) { + if (packet->is_first_packet_in_frame()) { + first_packet = packet.get(); + payloads.clear(); + } + payloads.emplace_back(packet->video_payload); + + if (packet->is_last_packet_in_frame()) { + rtc::scoped_refptr bitstream = + depacketizer_->AssembleFrame(payloads); + + if (!bitstream) { + continue; + } + + const video_coding::PacketBuffer::Packet& last_packet = *packet; + result.push_back(std::make_unique( + first_packet->seq_num, // + last_packet.seq_num, // + last_packet.marker_bit, // + /*times_nacked=*/0, // + /*first_packet_received_time=*/0, // + /*last_packet_received_time=*/0, // + first_packet->timestamp, // + /*ntp_time_ms=*/0, // + /*timing=*/VideoSendTiming(), // + first_packet->payload_type, // + first_packet->codec(), // + last_packet.video_header.rotation, // + last_packet.video_header.content_type, // + first_packet->video_header, // + last_packet.video_header.color_space, // + /*packet_infos=*/RtpPacketInfos(), // + std::move(bitstream))); + } + } + + return result; +} + +RtpVideoFrameAssembler::FrameVector +RtpVideoFrameAssembler::Impl::FindReferences(RtpFrameVector frames) { + FrameVector res; + for (auto& frame : frames) { + auto complete_frames = reference_finder_.ManageFrame(std::move(frame)); + for (std::unique_ptr& complete_frame : complete_frames) { + res.push_back(std::move(complete_frame)); + } + } + return res; +} + +RtpVideoFrameAssembler::FrameVector +RtpVideoFrameAssembler::Impl::UpdateWithPadding(uint16_t seq_num) { + auto res = + FindReferences(AssembleFrames(packet_buffer_.InsertPadding(seq_num))); + auto ref_finder_update = reference_finder_.PaddingReceived(seq_num); + + res.insert(res.end(), std::make_move_iterator(ref_finder_update.begin()), + std::make_move_iterator(ref_finder_update.end())); + + return res; +} + +bool RtpVideoFrameAssembler::Impl::ParseDependenciesDescriptorExtension( + const RtpPacketReceived& rtp_packet, + RTPVideoHeader& video_header) { + webrtc::DependencyDescriptor dependency_descriptor; + + if (!rtp_packet.GetExtension( + video_structure_.get(), &dependency_descriptor)) { + // Descriptor is either malformed, or the template referenced is not in + // the `video_structure_` currently being held. + // TODO(bugs.webrtc.org/10342): Improve packet reordering behavior. + RTC_LOG(LS_WARNING) << "ssrc: " << rtp_packet.Ssrc() + << " Failed to parse dependency descriptor."; + return false; + } + + if (dependency_descriptor.attached_structure != nullptr && + !dependency_descriptor.first_packet_in_frame) { + RTC_LOG(LS_WARNING) << "ssrc: " << rtp_packet.Ssrc() + << "Invalid dependency descriptor: structure " + "attached to non first packet of a frame."; + return false; + } + + video_header.is_first_packet_in_frame = + dependency_descriptor.first_packet_in_frame; + video_header.is_last_packet_in_frame = + dependency_descriptor.last_packet_in_frame; + + int64_t frame_id = + frame_id_unwrapper_.Unwrap(dependency_descriptor.frame_number); + auto& generic_descriptor_info = video_header.generic.emplace(); + generic_descriptor_info.frame_id = frame_id; + generic_descriptor_info.spatial_index = + dependency_descriptor.frame_dependencies.spatial_id; + generic_descriptor_info.temporal_index = + dependency_descriptor.frame_dependencies.temporal_id; + + for (int fdiff : dependency_descriptor.frame_dependencies.frame_diffs) { + generic_descriptor_info.dependencies.push_back(frame_id - fdiff); + } + for (int cdiff : dependency_descriptor.frame_dependencies.chain_diffs) { + generic_descriptor_info.chain_diffs.push_back(frame_id - cdiff); + } + generic_descriptor_info.decode_target_indications = + dependency_descriptor.frame_dependencies.decode_target_indications; + if (dependency_descriptor.resolution) { + video_header.width = dependency_descriptor.resolution->Width(); + video_header.height = dependency_descriptor.resolution->Height(); + } + if (dependency_descriptor.active_decode_targets_bitmask.has_value()) { + generic_descriptor_info.active_decode_targets = + *dependency_descriptor.active_decode_targets_bitmask; + } + + // FrameDependencyStructure is sent in the dependency descriptor of the first + // packet of a key frame and is required to parse all subsequent packets until + // the next key frame. + if (dependency_descriptor.attached_structure) { + RTC_DCHECK(dependency_descriptor.first_packet_in_frame); + if (video_structure_frame_id_ > frame_id) { + RTC_LOG(LS_WARNING) + << "Arrived key frame with id " << frame_id << " and structure id " + << dependency_descriptor.attached_structure->structure_id + << " is older than the latest received key frame with id " + << *video_structure_frame_id_ << " and structure id " + << video_structure_->structure_id; + return false; + } + video_structure_ = std::move(dependency_descriptor.attached_structure); + video_structure_frame_id_ = frame_id; + video_header.frame_type = VideoFrameType::kVideoFrameKey; + } else { + video_header.frame_type = VideoFrameType::kVideoFrameDelta; + } + return true; +} + +bool RtpVideoFrameAssembler::Impl::ParseGenericDescriptorExtension( + const RtpPacketReceived& rtp_packet, + RTPVideoHeader& video_header) { + RtpGenericFrameDescriptor generic_frame_descriptor; + if (!rtp_packet.GetExtension( + &generic_frame_descriptor)) { + return false; + } + + video_header.is_first_packet_in_frame = + generic_frame_descriptor.FirstPacketInSubFrame(); + video_header.is_last_packet_in_frame = + generic_frame_descriptor.LastPacketInSubFrame(); + + if (generic_frame_descriptor.FirstPacketInSubFrame()) { + video_header.frame_type = + generic_frame_descriptor.FrameDependenciesDiffs().empty() + ? VideoFrameType::kVideoFrameKey + : VideoFrameType::kVideoFrameDelta; + + auto& generic_descriptor_info = video_header.generic.emplace(); + int64_t frame_id = + frame_id_unwrapper_.Unwrap(generic_frame_descriptor.FrameId()); + generic_descriptor_info.frame_id = frame_id; + generic_descriptor_info.spatial_index = + generic_frame_descriptor.SpatialLayer(); + generic_descriptor_info.temporal_index = + generic_frame_descriptor.TemporalLayer(); + for (uint16_t fdiff : generic_frame_descriptor.FrameDependenciesDiffs()) { + generic_descriptor_info.dependencies.push_back(frame_id - fdiff); + } + } + video_header.width = generic_frame_descriptor.Width(); + video_header.height = generic_frame_descriptor.Height(); + return true; +} + +RtpVideoFrameAssembler::RtpVideoFrameAssembler(PayloadFormat payload_format) + : impl_(std::make_unique(CreateDepacketizer(payload_format))) {} + +RtpVideoFrameAssembler::~RtpVideoFrameAssembler() = default; + +RtpVideoFrameAssembler::FrameVector RtpVideoFrameAssembler::InsertPacket( + const RtpPacketReceived& packet) { + return impl_->InsertPacket(packet); +} + +} // namespace webrtc diff --git a/api/video/rtp_video_frame_assembler.h b/api/video/rtp_video_frame_assembler.h new file mode 100644 index 0000000000..353942bdc8 --- /dev/null +++ b/api/video/rtp_video_frame_assembler.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_VIDEO_RTP_VIDEO_FRAME_ASSEMBLER_H_ +#define API_VIDEO_RTP_VIDEO_FRAME_ASSEMBLER_H_ + +#include +#include + +#include "absl/container/inlined_vector.h" +#include "api/video/encoded_frame.h" +#include "modules/rtp_rtcp/source/rtp_packet_received.h" + +namespace webrtc { +// The RtpVideoFrameAssembler takes RtpPacketReceived and assembles them into +// complete frames. A frame is considered complete when all packets of the frame +// has been received, the bitstream data has successfully extracted, an ID has +// been assigned, and all dependencies are known. Frame IDs are strictly +// monotonic in decode order, dependencies are expressed as frame IDs. +class RtpVideoFrameAssembler { + public: + // FrameVector is just a vector-like type of std::unique_ptr. + // The vector type may change without notice. + using FrameVector = absl::InlinedVector, 3>; + enum PayloadFormat { kRaw, kH264, kVp8, kVp9, kAv1, kGeneric }; + + explicit RtpVideoFrameAssembler(PayloadFormat payload_format); + RtpVideoFrameAssembler(const RtpVideoFrameAssembler& other) = delete; + RtpVideoFrameAssembler& operator=(const RtpVideoFrameAssembler& other) = + delete; + ~RtpVideoFrameAssembler(); + + // Typically when a packet is inserted zero or one frame is completed. In the + // case of RTP packets being inserted out of order then sometime multiple + // frames could be completed from a single packet, hence the 'FrameVector' + // return type. + FrameVector InsertPacket(const RtpPacketReceived& packet); + + private: + class Impl; + std::unique_ptr impl_; +}; + +} // namespace webrtc + +#endif // API_VIDEO_RTP_VIDEO_FRAME_ASSEMBLER_H_ diff --git a/api/video/rtp_video_frame_assembler_unittests.cc b/api/video/rtp_video_frame_assembler_unittests.cc new file mode 100644 index 0000000000..916a83cd73 --- /dev/null +++ b/api/video/rtp_video_frame_assembler_unittests.cc @@ -0,0 +1,495 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include "api/array_view.h" +#include "api/video/rtp_video_frame_assembler.h" +#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h" +#include "modules/rtp_rtcp/source/rtp_format.h" +#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h" +#include "modules/rtp_rtcp/source/rtp_packet_to_send.h" +#include "modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.h" +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +using ::testing::ElementsAreArray; +using ::testing::Eq; +using ::testing::IsEmpty; +using ::testing::Matches; +using ::testing::SizeIs; +using ::testing::UnorderedElementsAre; +using ::testing::UnorderedElementsAreArray; +using PayloadFormat = RtpVideoFrameAssembler::PayloadFormat; + +class PacketBuilder { + public: + explicit PacketBuilder(PayloadFormat format) + : format_(format), packet_to_send_(&extension_manager_) {} + + PacketBuilder& WithSeqNum(uint16_t seq_num) { + seq_num_ = seq_num; + return *this; + } + + PacketBuilder& WithPayload(rtc::ArrayView payload) { + payload_.assign(payload.begin(), payload.end()); + return *this; + } + + PacketBuilder& WithVideoHeader(const RTPVideoHeader& video_header) { + video_header_ = video_header; + return *this; + } + + template + PacketBuilder& WithExtension(int id, const Args&... args) { + extension_manager_.Register(id); + packet_to_send_.IdentifyExtensions(extension_manager_); + packet_to_send_.SetExtension(std::forward(args)...); + return *this; + } + + RtpPacketReceived Build() { + auto packetizer = + RtpPacketizer::Create(GetVideoCodecType(), payload_, {}, video_header_); + packetizer->NextPacket(&packet_to_send_); + packet_to_send_.SetSequenceNumber(seq_num_); + + RtpPacketReceived received(&extension_manager_); + received.Parse(packet_to_send_.Buffer()); + return received; + } + + private: + absl::optional GetVideoCodecType() { + switch (format_) { + case PayloadFormat::kRaw: { + return absl::nullopt; + } + case PayloadFormat::kH264: { + return kVideoCodecH264; + } + case PayloadFormat::kVp8: { + return kVideoCodecVP8; + } + case PayloadFormat::kVp9: { + return kVideoCodecVP9; + } + case PayloadFormat::kAv1: { + return kVideoCodecAV1; + } + case PayloadFormat::kGeneric: { + return kVideoCodecGeneric; + } + } + RTC_NOTREACHED(); + return absl::nullopt; + } + + const RtpVideoFrameAssembler::PayloadFormat format_; + uint16_t seq_num_ = 0; + std::vector payload_; + RTPVideoHeader video_header_; + RtpPacketReceived::ExtensionManager extension_manager_; + RtpPacketToSend packet_to_send_; +}; + +void AppendFrames(RtpVideoFrameAssembler::FrameVector from, + RtpVideoFrameAssembler::FrameVector& to) { + to.insert(to.end(), std::make_move_iterator(from.begin()), + std::make_move_iterator(from.end())); +} + +rtc::ArrayView References(const std::unique_ptr& frame) { + return rtc::MakeArrayView(frame->references, frame->num_references); +} + +rtc::ArrayView Payload(const std::unique_ptr& frame) { + return rtc::ArrayView(*frame->GetEncodedData()); +} + +TEST(RtpVideoFrameAssembler, Vp8Packetization) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kVp8); + + // When sending VP8 over RTP parts of the payload is actually inspected at the + // RTP level. It just so happen that the initial 'V' sets the keyframe bit + // (0x01) to the correct value. + uint8_t kKeyframePayload[] = "Vp8Keyframe"; + ASSERT_EQ(kKeyframePayload[0] & 0x01, 0); + + uint8_t kDeltaframePayload[] = "SomeFrame"; + ASSERT_EQ(kDeltaframePayload[0] & 0x01, 1); + + RtpVideoFrameAssembler::FrameVector frames; + + RTPVideoHeader video_header; + auto& vp8_header = + video_header.video_type_header.emplace(); + + vp8_header.pictureId = 10; + vp8_header.tl0PicIdx = 0; + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kVp8) + .WithPayload(kKeyframePayload) + .WithVideoHeader(video_header) + .Build()), + frames); + + vp8_header.pictureId = 11; + vp8_header.tl0PicIdx = 1; + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kVp8) + .WithPayload(kDeltaframePayload) + .WithVideoHeader(video_header) + .Build()), + frames); + + ASSERT_THAT(frames, SizeIs(2)); + + EXPECT_THAT(frames[0]->Id(), Eq(10)); + EXPECT_THAT(References(frames[0]), IsEmpty()); + EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kKeyframePayload)); + + EXPECT_THAT(frames[1]->Id(), Eq(11)); + EXPECT_THAT(References(frames[1]), UnorderedElementsAre(10)); + EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kDeltaframePayload)); +} + +TEST(RtpVideoFrameAssembler, Vp9Packetization) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kVp9); + RtpVideoFrameAssembler::FrameVector frames; + + uint8_t kPayload[] = "SomePayload"; + + RTPVideoHeader video_header; + auto& vp9_header = + video_header.video_type_header.emplace(); + vp9_header.InitRTPVideoHeaderVP9(); + + vp9_header.picture_id = 10; + vp9_header.tl0_pic_idx = 0; + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kVp9) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .Build()), + frames); + + vp9_header.picture_id = 11; + vp9_header.tl0_pic_idx = 1; + vp9_header.inter_pic_predicted = true; + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kVp9) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .Build()), + frames); + + ASSERT_THAT(frames, SizeIs(2)); + + EXPECT_THAT(frames[0]->Id(), Eq(10)); + EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[0]), IsEmpty()); + + EXPECT_THAT(frames[1]->Id(), Eq(11)); + EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[1]), UnorderedElementsAre(10)); +} + +TEST(RtpVideoFrameAssembler, Av1Packetization) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kAv1); + RtpVideoFrameAssembler::FrameVector frames; + + auto kKeyframePayload = + BuildAv1Frame({Av1Obu(kAv1ObuTypeSequenceHeader).WithPayload({1, 2, 3}), + Av1Obu(kAv1ObuTypeFrame).WithPayload({4, 5, 6})}); + + auto kDeltaframePayload = + BuildAv1Frame({Av1Obu(kAv1ObuTypeFrame).WithPayload({7, 8, 9})}); + + RTPVideoHeader video_header; + + video_header.frame_type = VideoFrameType::kVideoFrameKey; + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kAv1) + .WithPayload(kKeyframePayload) + .WithVideoHeader(video_header) + .WithSeqNum(20) + .Build()), + frames); + + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kAv1) + .WithPayload(kDeltaframePayload) + .WithSeqNum(21) + .Build()), + frames); + + ASSERT_THAT(frames, SizeIs(2)); + + EXPECT_THAT(frames[0]->Id(), Eq(20)); + EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kKeyframePayload)); + EXPECT_THAT(References(frames[0]), IsEmpty()); + + EXPECT_THAT(frames[1]->Id(), Eq(21)); + EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kDeltaframePayload)); + EXPECT_THAT(References(frames[1]), UnorderedElementsAre(20)); +} + +TEST(RtpVideoFrameAssembler, RawPacketizationDependencyDescriptorExtension) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kRaw); + RtpVideoFrameAssembler::FrameVector frames; + uint8_t kPayload[] = "SomePayload"; + + FrameDependencyStructure dependency_structure; + dependency_structure.num_decode_targets = 1; + dependency_structure.num_chains = 1; + dependency_structure.decode_target_protected_by_chain.push_back(0); + dependency_structure.templates.push_back( + FrameDependencyTemplate().S(0).T(0).Dtis("S").ChainDiffs({0})); + dependency_structure.templates.push_back( + FrameDependencyTemplate().S(0).T(0).Dtis("S").ChainDiffs({10}).FrameDiffs( + {10})); + + DependencyDescriptor dependency_descriptor; + + dependency_descriptor.frame_number = 10; + dependency_descriptor.frame_dependencies = dependency_structure.templates[0]; + dependency_descriptor.attached_structure = + std::make_unique(dependency_structure); + AppendFrames(assembler.InsertPacket( + PacketBuilder(PayloadFormat::kRaw) + .WithPayload(kPayload) + .WithExtension( + 1, dependency_structure, dependency_descriptor) + .Build()), + frames); + + dependency_descriptor.frame_number = 20; + dependency_descriptor.frame_dependencies = dependency_structure.templates[1]; + dependency_descriptor.attached_structure.reset(); + AppendFrames(assembler.InsertPacket( + PacketBuilder(PayloadFormat::kRaw) + .WithPayload(kPayload) + .WithExtension( + 1, dependency_structure, dependency_descriptor) + .Build()), + frames); + + ASSERT_THAT(frames, SizeIs(2)); + + EXPECT_THAT(frames[0]->Id(), Eq(10)); + EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[0]), IsEmpty()); + + EXPECT_THAT(frames[1]->Id(), Eq(20)); + EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[1]), UnorderedElementsAre(10)); +} + +TEST(RtpVideoFrameAssembler, RawPacketizationGenericDescriptor00Extension) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kRaw); + RtpVideoFrameAssembler::FrameVector frames; + uint8_t kPayload[] = "SomePayload"; + + RtpGenericFrameDescriptor generic; + + generic.SetFirstPacketInSubFrame(true); + generic.SetLastPacketInSubFrame(true); + generic.SetFrameId(100); + AppendFrames( + assembler.InsertPacket( + PacketBuilder(PayloadFormat::kRaw) + .WithPayload(kPayload) + .WithExtension(1, generic) + .Build()), + frames); + + generic.SetFrameId(102); + generic.AddFrameDependencyDiff(2); + AppendFrames( + assembler.InsertPacket( + PacketBuilder(PayloadFormat::kRaw) + .WithPayload(kPayload) + .WithExtension(1, generic) + .Build()), + frames); + + ASSERT_THAT(frames, SizeIs(2)); + + EXPECT_THAT(frames[0]->Id(), Eq(100)); + EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[0]), IsEmpty()); + + EXPECT_THAT(frames[1]->Id(), Eq(102)); + EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[1]), UnorderedElementsAre(100)); +} + +TEST(RtpVideoFrameAssembler, RawPacketizationGenericPayloadDescriptor) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kGeneric); + RtpVideoFrameAssembler::FrameVector frames; + uint8_t kPayload[] = "SomePayload"; + + RTPVideoHeader video_header; + + video_header.frame_type = VideoFrameType::kVideoFrameKey; + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(123) + .Build()), + frames); + + video_header.frame_type = VideoFrameType::kVideoFrameDelta; + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(124) + .Build()), + frames); + + ASSERT_THAT(frames, SizeIs(2)); + + EXPECT_THAT(frames[0]->Id(), Eq(123)); + EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[0]), IsEmpty()); + + EXPECT_THAT(frames[1]->Id(), Eq(124)); + EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[1]), UnorderedElementsAre(123)); +} + +TEST(RtpVideoFrameAssembler, Padding) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kGeneric); + RtpVideoFrameAssembler::FrameVector frames; + uint8_t kPayload[] = "SomePayload"; + + RTPVideoHeader video_header; + + video_header.frame_type = VideoFrameType::kVideoFrameKey; + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(123) + .Build()), + frames); + + video_header.frame_type = VideoFrameType::kVideoFrameDelta; + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(125) + .Build()), + frames); + + ASSERT_THAT(frames, SizeIs(1)); + + EXPECT_THAT(frames[0]->Id(), Eq(123)); + EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[0]), IsEmpty()); + + // Padding packets have no bitstream data. An easy way to generate one is to + // build a normal packet and then simply remove the bitstream portion of the + // payload. + RtpPacketReceived padding_packet = PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(124) + .Build(); + // The payload descriptor is one byte, keep it. + padding_packet.SetPayloadSize(1); + + AppendFrames(assembler.InsertPacket(padding_packet), frames); + + ASSERT_THAT(frames, SizeIs(2)); + + EXPECT_THAT(frames[1]->Id(), Eq(125)); + EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[1]), UnorderedElementsAre(123)); +} + +TEST(RtpVideoFrameAssembler, ClearOldPackets) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kGeneric); + + // If we don't have a payload the packet will be counted as a padding packet. + uint8_t kPayload[] = "DontCare"; + + RTPVideoHeader video_header; + video_header.frame_type = VideoFrameType::kVideoFrameKey; + EXPECT_THAT(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(0) + .Build()), + SizeIs(1)); + + EXPECT_THAT(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(2000) + .Build()), + SizeIs(1)); + + EXPECT_THAT(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(0) + .Build()), + SizeIs(0)); + + EXPECT_THAT(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(1) + .Build()), + SizeIs(1)); +} + +TEST(RtpVideoFrameAssembler, ClearOldPacketsWithPadding) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kGeneric); + uint8_t kPayload[] = "DontCare"; + + RTPVideoHeader video_header; + video_header.frame_type = VideoFrameType::kVideoFrameKey; + EXPECT_THAT(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(0) + .Build()), + SizeIs(1)); + + // Padding packets have no bitstream data. An easy way to generate one is to + // build a normal packet and then simply remove the bitstream portion of the + // payload. + RtpPacketReceived padding_packet = PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(2000) + .Build(); + // The payload descriptor is one byte, keep it. + padding_packet.SetPayloadSize(1); + EXPECT_THAT(assembler.InsertPacket(padding_packet), SizeIs(0)); + + EXPECT_THAT(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(0) + .Build()), + SizeIs(0)); + + EXPECT_THAT(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(1) + .Build()), + SizeIs(1)); +} + +} // namespace +} // namespace webrtc diff --git a/api/video/video_timing.h b/api/video/video_timing.h index fbd92254a0..80320daa83 100644 --- a/api/video/video_timing.h +++ b/api/video/video_timing.h @@ -41,7 +41,7 @@ struct VideoSendTiming { uint16_t pacer_exit_delta_ms; uint16_t network_timestamp_delta_ms; uint16_t network2_timestamp_delta_ms; - uint8_t flags; + uint8_t flags = TimingFrameFlags::kInvalid; }; // Used to report precise timings of a 'timing frames'. Contains all important diff --git a/api/video_codecs/video_encoder.cc b/api/video_codecs/video_encoder.cc index 486200bc82..a7e9d7487c 100644 --- a/api/video_codecs/video_encoder.cc +++ b/api/video_codecs/video_encoder.cc @@ -135,8 +135,17 @@ std::string VideoEncoder::EncoderInfo::ToString() const { << ", is_hardware_accelerated = " << is_hardware_accelerated << ", has_internal_source = " << has_internal_source << ", fps_allocation = ["; + size_t num_spatial_layer_with_fps_allocation = 0; + for (size_t i = 0; i < kMaxSpatialLayers; ++i) { + if (!fps_allocation[i].empty()) { + num_spatial_layer_with_fps_allocation = i + 1; + } + } bool first = true; - for (size_t i = 0; i < fps_allocation->size(); ++i) { + for (size_t i = 0; i < num_spatial_layer_with_fps_allocation; ++i) { + if (fps_allocation[i].empty()) { + break; + } if (!first) { oss << ", "; } diff --git a/api/video_codecs/video_encoder_software_fallback_wrapper.cc b/api/video_codecs/video_encoder_software_fallback_wrapper.cc index be79c42464..bcce9dcd93 100644 --- a/api/video_codecs/video_encoder_software_fallback_wrapper.cc +++ b/api/video_codecs/video_encoder_software_fallback_wrapper.cc @@ -25,6 +25,7 @@ #include "api/video/video_frame.h" #include "api/video_codecs/video_codec.h" #include "api/video_codecs/video_encoder.h" +#include "media/base/video_common.h" #include "modules/video_coding/include/video_error_codes.h" #include "modules/video_coding/utility/simulcast_utility.h" #include "rtc_base/checks.h" @@ -417,6 +418,13 @@ VideoEncoder::EncoderInfo VideoEncoderSoftwareFallbackWrapper::GetEncoderInfo() EncoderInfo info = IsFallbackActive() ? fallback_encoder_info : default_encoder_info; + info.requested_resolution_alignment = cricket::LeastCommonMultiple( + fallback_encoder_info.requested_resolution_alignment, + default_encoder_info.requested_resolution_alignment); + info.apply_alignment_to_all_simulcast_layers = + fallback_encoder_info.apply_alignment_to_all_simulcast_layers || + default_encoder_info.apply_alignment_to_all_simulcast_layers; + if (fallback_params_.has_value()) { const auto settings = (encoder_state_ == EncoderState::kForcedFallback) ? fallback_encoder_info.scaling_settings diff --git a/api/video_codecs/vp9_profile.cc b/api/video_codecs/vp9_profile.cc index d69f566e10..5e2bd53a86 100644 --- a/api/video_codecs/vp9_profile.cc +++ b/api/video_codecs/vp9_profile.cc @@ -47,7 +47,6 @@ absl::optional StringToVP9Profile(const std::string& str) { default: return absl::nullopt; } - return absl::nullopt; } absl::optional ParseSdpForVP9Profile( diff --git a/api/video_track_source_proxy_factory.h b/api/video_track_source_proxy_factory.h new file mode 100644 index 0000000000..974720d50b --- /dev/null +++ b/api/video_track_source_proxy_factory.h @@ -0,0 +1,28 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_VIDEO_TRACK_SOURCE_PROXY_FACTORY_H_ +#define API_VIDEO_TRACK_SOURCE_PROXY_FACTORY_H_ + +#include "api/media_stream_interface.h" + +namespace webrtc { + +// Creates a proxy source for |source| which makes sure the real +// VideoTrackSourceInterface implementation is destroyed on the signaling thread +// and marshals calls to |worker_thread| and |signaling_thread|. +rtc::scoped_refptr RTC_EXPORT +CreateVideoTrackSourceProxy(rtc::Thread* signaling_thread, + rtc::Thread* worker_thread, + VideoTrackSourceInterface* source); + +} // namespace webrtc + +#endif // API_VIDEO_TRACK_SOURCE_PROXY_FACTORY_H_ diff --git a/audio/BUILD.gn b/audio/BUILD.gn index ccbf9fd2e3..200f9f4038 100644 --- a/audio/BUILD.gn +++ b/audio/BUILD.gn @@ -95,6 +95,7 @@ rtc_library("audio") { "../rtc_base/experiments:field_trial_parser", "../rtc_base/synchronization:mutex", "../rtc_base/system:no_unique_address", + "../rtc_base/task_utils:pending_task_safety_flag", "../rtc_base/task_utils:to_queued_task", "../system_wrappers", "../system_wrappers:field_trial", @@ -139,6 +140,7 @@ if (rtc_include_tests) { "mock_voe_channel_proxy.h", "remix_resample_unittest.cc", "test/audio_stats_test.cc", + "test/nack_test.cc", ] deps = [ ":audio", @@ -151,6 +153,7 @@ if (rtc_include_tests) { "../api/audio_codecs:audio_codecs_api", "../api/audio_codecs/opus:audio_decoder_opus", "../api/audio_codecs/opus:audio_encoder_opus", + "../api/crypto:frame_decryptor_interface", "../api/rtc_event_log", "../api/task_queue:default_task_queue_factory", "../api/units:time_delta", diff --git a/audio/audio_receive_stream.cc b/audio/audio_receive_stream.cc index 467647be5e..f243fa67db 100644 --- a/audio/audio_receive_stream.cc +++ b/audio/audio_receive_stream.cc @@ -18,6 +18,7 @@ #include "api/audio_codecs/audio_format.h" #include "api/call/audio_sink.h" #include "api/rtp_parameters.h" +#include "api/sequence_checker.h" #include "audio/audio_send_stream.h" #include "audio/audio_state.h" #include "audio/channel_receive.h" @@ -69,7 +70,6 @@ namespace { std::unique_ptr CreateChannelReceive( Clock* clock, webrtc::AudioState* audio_state, - ProcessThread* module_process_thread, NetEqFactory* neteq_factory, const webrtc::AudioReceiveStream::Config& config, RtcEventLog* event_log) { @@ -77,48 +77,43 @@ std::unique_ptr CreateChannelReceive( internal::AudioState* internal_audio_state = static_cast(audio_state); return voe::CreateChannelReceive( - clock, module_process_thread, neteq_factory, - internal_audio_state->audio_device_module(), config.rtcp_send_transport, - event_log, config.rtp.local_ssrc, config.rtp.remote_ssrc, - config.jitter_buffer_max_packets, config.jitter_buffer_fast_accelerate, - config.jitter_buffer_min_delay_ms, + clock, neteq_factory, internal_audio_state->audio_device_module(), + config.rtcp_send_transport, event_log, config.rtp.local_ssrc, + config.rtp.remote_ssrc, config.jitter_buffer_max_packets, + config.jitter_buffer_fast_accelerate, config.jitter_buffer_min_delay_ms, config.jitter_buffer_enable_rtx_handling, config.decoder_factory, - config.codec_pair_id, config.frame_decryptor, config.crypto_options, - std::move(config.frame_transformer)); + config.codec_pair_id, std::move(config.frame_decryptor), + config.crypto_options, std::move(config.frame_transformer)); } } // namespace AudioReceiveStream::AudioReceiveStream( Clock* clock, - RtpStreamReceiverControllerInterface* receiver_controller, PacketRouter* packet_router, - ProcessThread* module_process_thread, NetEqFactory* neteq_factory, const webrtc::AudioReceiveStream::Config& config, const rtc::scoped_refptr& audio_state, webrtc::RtcEventLog* event_log) : AudioReceiveStream(clock, - receiver_controller, packet_router, config, audio_state, event_log, CreateChannelReceive(clock, audio_state.get(), - module_process_thread, neteq_factory, config, event_log)) {} AudioReceiveStream::AudioReceiveStream( Clock* clock, - RtpStreamReceiverControllerInterface* receiver_controller, PacketRouter* packet_router, const webrtc::AudioReceiveStream::Config& config, const rtc::scoped_refptr& audio_state, webrtc::RtcEventLog* event_log, std::unique_ptr channel_receive) - : audio_state_(audio_state), + : config_(config), + audio_state_(audio_state), source_tracker_(clock), channel_receive_(std::move(channel_receive)) { RTC_LOG(LS_INFO) << "AudioReceiveStream: " << config.rtp.remote_ssrc; @@ -127,7 +122,8 @@ AudioReceiveStream::AudioReceiveStream( RTC_DCHECK(audio_state_); RTC_DCHECK(channel_receive_); - RTC_DCHECK(receiver_controller); + packet_sequence_checker_.Detach(); + RTC_DCHECK(packet_router); // Configure bandwidth estimation. channel_receive_->RegisterReceiverCongestionControlObjects(packet_router); @@ -137,10 +133,14 @@ AudioReceiveStream::AudioReceiveStream( // be updated. channel_receive_->SetSourceTracker(&source_tracker_); - // Register with transport. - rtp_stream_receiver_ = receiver_controller->CreateReceiver( - config.rtp.remote_ssrc, channel_receive_.get()); - ConfigureStream(this, config, true); + // Complete configuration. + // TODO(solenberg): Config NACK history window (which is a packet count), + // using the actual packet size for the configured codec. + channel_receive_->SetNACKStatus(config.rtp.nack.rtp_history_ms != 0, + config.rtp.nack.rtp_history_ms / 20); + channel_receive_->SetReceiveCodecs(config.decoder_map); + // `frame_transformer` and `frame_decryptor` have been given to + // `channel_receive_` already. } AudioReceiveStream::~AudioReceiveStream() { @@ -151,10 +151,43 @@ AudioReceiveStream::~AudioReceiveStream() { channel_receive_->ResetReceiverCongestionControlObjects(); } -void AudioReceiveStream::Reconfigure( +void AudioReceiveStream::RegisterWithTransport( + RtpStreamReceiverControllerInterface* receiver_controller) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + RTC_DCHECK(!rtp_stream_receiver_); + rtp_stream_receiver_ = receiver_controller->CreateReceiver( + config_.rtp.remote_ssrc, channel_receive_.get()); +} + +void AudioReceiveStream::UnregisterFromTransport() { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + rtp_stream_receiver_.reset(); +} + +void AudioReceiveStream::ReconfigureForTesting( const webrtc::AudioReceiveStream::Config& config) { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); - ConfigureStream(this, config, false); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + + // SSRC can't be changed mid-stream. + RTC_DCHECK_EQ(config_.rtp.remote_ssrc, config.rtp.remote_ssrc); + RTC_DCHECK_EQ(config_.rtp.local_ssrc, config.rtp.local_ssrc); + + // Configuration parameters which cannot be changed. + RTC_DCHECK_EQ(config_.rtcp_send_transport, config.rtcp_send_transport); + // Decoder factory cannot be changed because it is configured at + // voe::Channel construction time. + RTC_DCHECK_EQ(config_.decoder_factory, config.decoder_factory); + + // TODO(solenberg): Config NACK history window (which is a packet count), + // using the actual packet size for the configured codec. + RTC_DCHECK_EQ(config_.rtp.nack.rtp_history_ms, config.rtp.nack.rtp_history_ms) + << "Use SetUseTransportCcAndNackHistory"; + + RTC_DCHECK(config_.decoder_map == config.decoder_map) << "Use SetDecoderMap"; + RTC_DCHECK_EQ(config_.frame_transformer, config.frame_transformer) + << "Use SetDepacketizerToDecoderFrameTransformer"; + + config_ = config; } void AudioReceiveStream::Start() { @@ -182,6 +215,49 @@ bool AudioReceiveStream::IsRunning() const { return playing_; } +void AudioReceiveStream::SetDepacketizerToDecoderFrameTransformer( + rtc::scoped_refptr frame_transformer) { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + channel_receive_->SetDepacketizerToDecoderFrameTransformer( + std::move(frame_transformer)); +} + +void AudioReceiveStream::SetDecoderMap( + std::map decoder_map) { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + config_.decoder_map = std::move(decoder_map); + channel_receive_->SetReceiveCodecs(config_.decoder_map); +} + +void AudioReceiveStream::SetUseTransportCcAndNackHistory(bool use_transport_cc, + int history_ms) { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + RTC_DCHECK_GE(history_ms, 0); + config_.rtp.transport_cc = use_transport_cc; + if (config_.rtp.nack.rtp_history_ms != history_ms) { + config_.rtp.nack.rtp_history_ms = history_ms; + // TODO(solenberg): Config NACK history window (which is a packet count), + // using the actual packet size for the configured codec. + channel_receive_->SetNACKStatus(history_ms != 0, history_ms / 20); + } +} + +void AudioReceiveStream::SetFrameDecryptor( + rtc::scoped_refptr frame_decryptor) { + // TODO(bugs.webrtc.org/11993): This is called via WebRtcAudioReceiveStream, + // expect to be called on the network thread. + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + channel_receive_->SetFrameDecryptor(std::move(frame_decryptor)); +} + +void AudioReceiveStream::SetRtpExtensions( + std::vector extensions) { + // TODO(bugs.webrtc.org/11993): This is called via WebRtcAudioReceiveStream, + // expect to be called on the network thread. + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + config_.rtp.extensions = std::move(extensions); +} + webrtc::AudioReceiveStream::Stats AudioReceiveStream::GetStats( bool get_and_clear_legacy_stats) const { RTC_DCHECK_RUN_ON(&worker_thread_checker_); @@ -202,6 +278,7 @@ webrtc::AudioReceiveStream::Stats AudioReceiveStream::GetStats( call_stats.header_and_padding_bytes_rcvd; stats.packets_rcvd = call_stats.packetsReceived; stats.packets_lost = call_stats.cumulativeLost; + stats.nacks_sent = call_stats.nacks_sent; stats.capture_start_ntp_time_ms = call_stats.capture_start_ntp_time_ms_; stats.last_packet_received_timestamp_ms = call_stats.last_packet_received_timestamp_ms; @@ -351,8 +428,7 @@ bool AudioReceiveStream::SetMinimumPlayoutDelay(int delay_ms) { } void AudioReceiveStream::AssociateSendStream(AudioSendStream* send_stream) { - // TODO(bugs.webrtc.org/11993): Expect to be called on the network thread. - RTC_DCHECK_RUN_ON(&worker_thread_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); channel_receive_->SetAssociatedSendChannel( send_stream ? send_stream->GetChannel() : nullptr); associated_send_stream_ = send_stream; @@ -366,6 +442,24 @@ void AudioReceiveStream::DeliverRtcp(const uint8_t* packet, size_t length) { channel_receive_->ReceivedRTCPPacket(packet, length); } +void AudioReceiveStream::SetSyncGroup(const std::string& sync_group) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + config_.sync_group = sync_group; +} + +void AudioReceiveStream::SetLocalSsrc(uint32_t local_ssrc) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + // TODO(tommi): Consider storing local_ssrc in one place. + config_.rtp.local_ssrc = local_ssrc; + channel_receive_->OnLocalSsrcChange(local_ssrc); +} + +uint32_t AudioReceiveStream::local_ssrc() const { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + RTC_DCHECK_EQ(config_.rtp.local_ssrc, channel_receive_->GetLocalSsrc()); + return config_.rtp.local_ssrc; +} + const webrtc::AudioReceiveStream::Config& AudioReceiveStream::config() const { RTC_DCHECK_RUN_ON(&worker_thread_checker_); return config_; @@ -373,9 +467,7 @@ const webrtc::AudioReceiveStream::Config& AudioReceiveStream::config() const { const AudioSendStream* AudioReceiveStream::GetAssociatedSendStreamForTesting() const { - // TODO(bugs.webrtc.org/11993): Expect to be called on the network thread or - // remove test method and |associated_send_stream_| variable. - RTC_DCHECK_RUN_ON(&worker_thread_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); return associated_send_stream_; } @@ -384,50 +476,5 @@ internal::AudioState* AudioReceiveStream::audio_state() const { RTC_DCHECK(audio_state); return audio_state; } - -void AudioReceiveStream::ConfigureStream(AudioReceiveStream* stream, - const Config& new_config, - bool first_time) { - RTC_LOG(LS_INFO) << "AudioReceiveStream::ConfigureStream: " - << new_config.ToString(); - RTC_DCHECK(stream); - const auto& channel_receive = stream->channel_receive_; - const auto& old_config = stream->config_; - - // Configuration parameters which cannot be changed. - RTC_DCHECK(first_time || - old_config.rtp.remote_ssrc == new_config.rtp.remote_ssrc); - RTC_DCHECK(first_time || - old_config.rtcp_send_transport == new_config.rtcp_send_transport); - // Decoder factory cannot be changed because it is configured at - // voe::Channel construction time. - RTC_DCHECK(first_time || - old_config.decoder_factory == new_config.decoder_factory); - - if (!first_time) { - // SSRC can't be changed mid-stream. - RTC_DCHECK_EQ(old_config.rtp.local_ssrc, new_config.rtp.local_ssrc); - RTC_DCHECK_EQ(old_config.rtp.remote_ssrc, new_config.rtp.remote_ssrc); - } - - // TODO(solenberg): Config NACK history window (which is a packet count), - // using the actual packet size for the configured codec. - if (first_time || old_config.rtp.nack.rtp_history_ms != - new_config.rtp.nack.rtp_history_ms) { - channel_receive->SetNACKStatus(new_config.rtp.nack.rtp_history_ms != 0, - new_config.rtp.nack.rtp_history_ms / 20); - } - if (first_time || old_config.decoder_map != new_config.decoder_map) { - channel_receive->SetReceiveCodecs(new_config.decoder_map); - } - - if (first_time || - old_config.frame_transformer != new_config.frame_transformer) { - channel_receive->SetDepacketizerToDecoderFrameTransformer( - new_config.frame_transformer); - } - - stream->config_ = new_config; -} } // namespace internal } // namespace webrtc diff --git a/audio/audio_receive_stream.h b/audio/audio_receive_stream.h index a8438c252a..61ebc2719f 100644 --- a/audio/audio_receive_stream.h +++ b/audio/audio_receive_stream.h @@ -11,7 +11,9 @@ #ifndef AUDIO_AUDIO_RECEIVE_STREAM_H_ #define AUDIO_AUDIO_RECEIVE_STREAM_H_ +#include #include +#include #include #include "api/audio/audio_mixer.h" @@ -22,6 +24,7 @@ #include "call/audio_receive_stream.h" #include "call/syncable.h" #include "modules/rtp_rtcp/source/source_tracker.h" +#include "rtc_base/system/no_unique_address.h" #include "system_wrappers/include/clock.h" namespace webrtc { @@ -44,9 +47,7 @@ class AudioReceiveStream final : public webrtc::AudioReceiveStream, public Syncable { public: AudioReceiveStream(Clock* clock, - RtpStreamReceiverControllerInterface* receiver_controller, PacketRouter* packet_router, - ProcessThread* module_process_thread, NetEqFactory* neteq_factory, const webrtc::AudioReceiveStream::Config& config, const rtc::scoped_refptr& audio_state, @@ -54,7 +55,6 @@ class AudioReceiveStream final : public webrtc::AudioReceiveStream, // For unit tests, which need to supply a mock channel receive. AudioReceiveStream( Clock* clock, - RtpStreamReceiverControllerInterface* receiver_controller, PacketRouter* packet_router, const webrtc::AudioReceiveStream::Config& config, const rtc::scoped_refptr& audio_state, @@ -65,13 +65,36 @@ class AudioReceiveStream final : public webrtc::AudioReceiveStream, AudioReceiveStream(const AudioReceiveStream&) = delete; AudioReceiveStream& operator=(const AudioReceiveStream&) = delete; + // Destruction happens on the worker thread. Prior to destruction the caller + // must ensure that a registration with the transport has been cleared. See + // `RegisterWithTransport` for details. + // TODO(tommi): As a further improvement to this, performing the full + // destruction on the network thread could be made the default. ~AudioReceiveStream() override; + // Called on the network thread to register/unregister with the network + // transport. + void RegisterWithTransport( + RtpStreamReceiverControllerInterface* receiver_controller); + // If registration has previously been done (via `RegisterWithTransport`) then + // `UnregisterFromTransport` must be called prior to destruction, on the + // network thread. + void UnregisterFromTransport(); + // webrtc::AudioReceiveStream implementation. - void Reconfigure(const webrtc::AudioReceiveStream::Config& config) override; void Start() override; void Stop() override; + const RtpConfig& rtp_config() const override { return config_.rtp; } bool IsRunning() const override; + void SetDepacketizerToDecoderFrameTransformer( + rtc::scoped_refptr frame_transformer) + override; + void SetDecoderMap(std::map decoder_map) override; + void SetUseTransportCcAndNackHistory(bool use_transport_cc, + int history_ms) override; + void SetFrameDecryptor(rtc::scoped_refptr + frame_decryptor) override; + void SetRtpExtensions(std::vector extensions) override; webrtc::AudioReceiveStream::Stats GetStats( bool get_and_clear_legacy_stats) const override; @@ -98,26 +121,48 @@ class AudioReceiveStream final : public webrtc::AudioReceiveStream, void AssociateSendStream(AudioSendStream* send_stream); void DeliverRtcp(const uint8_t* packet, size_t length); + + void SetSyncGroup(const std::string& sync_group); + + void SetLocalSsrc(uint32_t local_ssrc); + + uint32_t local_ssrc() const; + + uint32_t remote_ssrc() const { + // The remote_ssrc member variable of config_ will never change and can be + // considered const. + return config_.rtp.remote_ssrc; + } + const webrtc::AudioReceiveStream::Config& config() const; const AudioSendStream* GetAssociatedSendStreamForTesting() const; - private: - static void ConfigureStream(AudioReceiveStream* stream, - const Config& new_config, - bool first_time); + // TODO(tommi): Remove this method. + void ReconfigureForTesting(const webrtc::AudioReceiveStream::Config& config); + private: AudioState* audio_state() const; - SequenceChecker worker_thread_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_thread_checker_; + // TODO(bugs.webrtc.org/11993): This checker conceptually represents + // operations that belong to the network thread. The Call class is currently + // moving towards handling network packets on the network thread and while + // that work is ongoing, this checker may in practice represent the worker + // thread, but still serves as a mechanism of grouping together concepts + // that belong to the network thread. Once the packets are fully delivered + // on the network thread, this comment will be deleted. + RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_; webrtc::AudioReceiveStream::Config config_; rtc::scoped_refptr audio_state_; SourceTracker source_tracker_; const std::unique_ptr channel_receive_; - AudioSendStream* associated_send_stream_ = nullptr; + AudioSendStream* associated_send_stream_ + RTC_GUARDED_BY(packet_sequence_checker_) = nullptr; bool playing_ RTC_GUARDED_BY(worker_thread_checker_) = false; - std::unique_ptr rtp_stream_receiver_; + std::unique_ptr rtp_stream_receiver_ + RTC_GUARDED_BY(packet_sequence_checker_); }; } // namespace internal } // namespace webrtc diff --git a/audio/audio_receive_stream_unittest.cc b/audio/audio_receive_stream_unittest.cc index 72244dd84a..fb5f1cb876 100644 --- a/audio/audio_receive_stream_unittest.cc +++ b/audio/audio_receive_stream_unittest.cc @@ -104,8 +104,6 @@ struct ConfigHelper { .WillRepeatedly(Invoke([](const std::map& codecs) { EXPECT_THAT(codecs, ::testing::IsEmpty()); })); - EXPECT_CALL(*channel_receive_, SetDepacketizerToDecoderFrameTransformer(_)) - .Times(1); EXPECT_CALL(*channel_receive_, SetSourceTracker(_)); stream_config_.rtp.local_ssrc = kLocalSsrc; @@ -121,11 +119,12 @@ struct ConfigHelper { } std::unique_ptr CreateAudioReceiveStream() { - return std::unique_ptr( - new internal::AudioReceiveStream( - Clock::GetRealTimeClock(), &rtp_stream_receiver_controller_, - &packet_router_, stream_config_, audio_state_, &event_log_, - std::unique_ptr(channel_receive_))); + auto ret = std::make_unique( + Clock::GetRealTimeClock(), &packet_router_, stream_config_, + audio_state_, &event_log_, + std::unique_ptr(channel_receive_)); + ret->RegisterWithTransport(&rtp_stream_receiver_controller_); + return ret; } AudioReceiveStream::Config& config() { return stream_config_; } @@ -199,6 +198,7 @@ TEST(AudioReceiveStreamTest, ConstructDestruct) { for (bool use_null_audio_processing : {false, true}) { ConfigHelper helper(use_null_audio_processing); auto recv_stream = helper.CreateAudioReceiveStream(); + recv_stream->UnregisterFromTransport(); } } @@ -212,6 +212,7 @@ TEST(AudioReceiveStreamTest, ReceiveRtcpPacket) { ReceivedRTCPPacket(&rtcp_packet[0], rtcp_packet.size())) .WillOnce(Return()); recv_stream->DeliverRtcp(&rtcp_packet[0], rtcp_packet.size()); + recv_stream->UnregisterFromTransport(); } } @@ -276,6 +277,7 @@ TEST(AudioReceiveStreamTest, GetStats) { EXPECT_EQ(kCallStats.capture_start_ntp_time_ms_, stats.capture_start_ntp_time_ms); EXPECT_EQ(kPlayoutNtpTimestampMs, stats.estimated_playout_ntp_timestamp_ms); + recv_stream->UnregisterFromTransport(); } } @@ -286,6 +288,7 @@ TEST(AudioReceiveStreamTest, SetGain) { EXPECT_CALL(*helper.channel_receive(), SetChannelOutputVolumeScaling(FloatEq(0.765f))); recv_stream->SetGain(0.765f); + recv_stream->UnregisterFromTransport(); } } @@ -317,14 +320,9 @@ TEST(AudioReceiveStreamTest, StreamsShouldBeAddedToMixerOnceOnStart) { // Stop stream before it is being destructed. recv_stream2->Stop(); - } -} -TEST(AudioReceiveStreamTest, ReconfigureWithSameConfig) { - for (bool use_null_audio_processing : {false, true}) { - ConfigHelper helper(use_null_audio_processing); - auto recv_stream = helper.CreateAudioReceiveStream(); - recv_stream->Reconfigure(helper.config()); + recv_stream1->UnregisterFromTransport(); + recv_stream2->UnregisterFromTransport(); } } @@ -334,20 +332,32 @@ TEST(AudioReceiveStreamTest, ReconfigureWithUpdatedConfig) { auto recv_stream = helper.CreateAudioReceiveStream(); auto new_config = helper.config(); - new_config.rtp.nack.rtp_history_ms = 300 + 20; + new_config.rtp.extensions.clear(); new_config.rtp.extensions.push_back( RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId + 1)); new_config.rtp.extensions.push_back( RtpExtension(RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberId + 1)); - new_config.decoder_map.emplace(1, SdpAudioFormat("foo", 8000, 1)); MockChannelReceive& channel_receive = *helper.channel_receive(); - EXPECT_CALL(channel_receive, SetNACKStatus(true, 15 + 1)).Times(1); + + // TODO(tommi, nisse): This applies new extensions to the internal config, + // but there's nothing that actually verifies that the changes take effect. + // In fact Call manages the extensions separately in Call::ReceiveRtpConfig + // and changing this config value (there seem to be a few copies), doesn't + // affect that logic. + recv_stream->ReconfigureForTesting(new_config); + + new_config.decoder_map.emplace(1, SdpAudioFormat("foo", 8000, 1)); EXPECT_CALL(channel_receive, SetReceiveCodecs(new_config.decoder_map)); + recv_stream->SetDecoderMap(new_config.decoder_map); + + EXPECT_CALL(channel_receive, SetNACKStatus(true, 15 + 1)).Times(1); + recv_stream->SetUseTransportCcAndNackHistory(new_config.rtp.transport_cc, + 300 + 20); - recv_stream->Reconfigure(new_config); + recv_stream->UnregisterFromTransport(); } } @@ -361,14 +371,20 @@ TEST(AudioReceiveStreamTest, ReconfigureWithFrameDecryptor) { rtc::make_ref_counted()); new_config_0.frame_decryptor = mock_frame_decryptor_0; - recv_stream->Reconfigure(new_config_0); + // TODO(tommi): While this changes the internal config value, it doesn't + // actually change what frame_decryptor is used. WebRtcAudioReceiveStream + // recreates the whole instance in order to change this value. + // So, it's not clear if changing this post initialization needs to be + // supported. + recv_stream->ReconfigureForTesting(new_config_0); auto new_config_1 = helper.config(); rtc::scoped_refptr mock_frame_decryptor_1( rtc::make_ref_counted()); new_config_1.frame_decryptor = mock_frame_decryptor_1; new_config_1.crypto_options.sframe.require_frame_encryption = true; - recv_stream->Reconfigure(new_config_1); + recv_stream->ReconfigureForTesting(new_config_1); + recv_stream->UnregisterFromTransport(); } } diff --git a/audio/audio_send_stream.cc b/audio/audio_send_stream.cc index b769569fd5..62dd53d337 100644 --- a/audio/audio_send_stream.cc +++ b/audio/audio_send_stream.cc @@ -102,7 +102,6 @@ AudioSendStream::AudioSendStream( const webrtc::AudioSendStream::Config& config, const rtc::scoped_refptr& audio_state, TaskQueueFactory* task_queue_factory, - ProcessThread* module_process_thread, RtpTransportControllerSendInterface* rtp_transport, BitrateAllocatorInterface* bitrate_allocator, RtcEventLog* event_log, @@ -119,7 +118,6 @@ AudioSendStream::AudioSendStream( voe::CreateChannelSend( clock, task_queue_factory, - module_process_thread, config.send_transport, rtcp_rtt_stats, event_log, @@ -142,7 +140,7 @@ AudioSendStream::AudioSendStream( const absl::optional& suspended_rtp_state, std::unique_ptr channel_send) : clock_(clock), - worker_queue_(rtp_transport->GetWorkerQueue()), + rtp_transport_queue_(rtp_transport->GetWorkerQueue()), allocate_audio_without_feedback_( field_trial::IsEnabled("WebRTC-Audio-ABWENoTWCC")), enable_audio_alr_probing_( @@ -160,7 +158,7 @@ AudioSendStream::AudioSendStream( rtp_rtcp_module_(channel_send_->GetRtpRtcp()), suspended_rtp_state_(suspended_rtp_state) { RTC_LOG(LS_INFO) << "AudioSendStream: " << config.rtp.ssrc; - RTC_DCHECK(worker_queue_); + RTC_DCHECK(rtp_transport_queue_); RTC_DCHECK(audio_state_); RTC_DCHECK(channel_send_); RTC_DCHECK(bitrate_allocator_); @@ -182,7 +180,7 @@ AudioSendStream::~AudioSendStream() { // Blocking call to synchronize state with worker queue to ensure that there // are no pending tasks left that keeps references to audio. rtc::Event thread_sync_event; - worker_queue_->PostTask([&] { thread_sync_event.Set(); }); + rtp_transport_queue_->PostTask([&] { thread_sync_event.Set(); }); thread_sync_event.Wait(rtc::Event::kForever); } @@ -500,6 +498,8 @@ webrtc::AudioSendStream::Stats AudioSendStream::GetStats( stats.report_block_datas = std::move(call_stats.report_block_datas); + stats.nacks_rcvd = call_stats.nacks_rcvd; + return stats; } @@ -517,7 +517,7 @@ void AudioSendStream::DeliverRtcp(const uint8_t* packet, size_t length) { } uint32_t AudioSendStream::OnBitrateUpdated(BitrateAllocationUpdate update) { - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); // Pick a target bitrate between the constraints. Overrules the allocator if // it 1) allocated a bitrate of zero to disable the stream or 2) allocated a @@ -855,9 +855,10 @@ void AudioSendStream::ConfigureBitrateObserver() { if (allocation_settings_.priority_bitrate_raw) priority_bitrate = *allocation_settings_.priority_bitrate_raw; - worker_queue_->PostTask([this, constraints, priority_bitrate, - config_bitrate_priority = config_.bitrate_priority] { - RTC_DCHECK_RUN_ON(worker_queue_); + rtp_transport_queue_->PostTask([this, constraints, priority_bitrate, + config_bitrate_priority = + config_.bitrate_priority] { + RTC_DCHECK_RUN_ON(rtp_transport_queue_); bitrate_allocator_->AddObserver( this, MediaStreamAllocationConfig{ @@ -872,8 +873,8 @@ void AudioSendStream::ConfigureBitrateObserver() { void AudioSendStream::RemoveBitrateObserver() { registered_with_allocator_ = false; rtc::Event thread_sync_event; - worker_queue_->PostTask([this, &thread_sync_event] { - RTC_DCHECK_RUN_ON(worker_queue_); + rtp_transport_queue_->PostTask([this, &thread_sync_event] { + RTC_DCHECK_RUN_ON(rtp_transport_queue_); bitrate_allocator_->RemoveObserver(this); thread_sync_event.Set(); }); @@ -940,8 +941,8 @@ void AudioSendStream::UpdateCachedTargetAudioBitrateConstraints() { if (!new_constraints.has_value()) { return; } - worker_queue_->PostTask([this, new_constraints]() { - RTC_DCHECK_RUN_ON(worker_queue_); + rtp_transport_queue_->PostTask([this, new_constraints]() { + RTC_DCHECK_RUN_ON(rtp_transport_queue_); cached_constraints_ = new_constraints; }); } diff --git a/audio/audio_send_stream.h b/audio/audio_send_stream.h index 25346ae373..e0b15dc0c9 100644 --- a/audio/audio_send_stream.h +++ b/audio/audio_send_stream.h @@ -58,7 +58,6 @@ class AudioSendStream final : public webrtc::AudioSendStream, const webrtc::AudioSendStream::Config& config, const rtc::scoped_refptr& audio_state, TaskQueueFactory* task_queue_factory, - ProcessThread* module_process_thread, RtpTransportControllerSendInterface* rtp_transport, BitrateAllocatorInterface* bitrate_allocator, RtcEventLog* event_log, @@ -165,7 +164,7 @@ class AudioSendStream final : public webrtc::AudioSendStream, SequenceChecker worker_thread_checker_; SequenceChecker pacer_thread_checker_; rtc::RaceChecker audio_capture_race_checker_; - rtc::TaskQueue* worker_queue_; + rtc::TaskQueue* rtp_transport_queue_; const bool allocate_audio_without_feedback_; const bool force_no_audio_feedback_ = allocate_audio_without_feedback_; @@ -189,10 +188,10 @@ class AudioSendStream final : public webrtc::AudioSendStream, webrtc::voe::AudioLevel audio_level_ RTC_GUARDED_BY(audio_level_lock_); BitrateAllocatorInterface* const bitrate_allocator_ - RTC_GUARDED_BY(worker_queue_); - // Constrains cached to be accessed from |worker_queue_|. + RTC_GUARDED_BY(rtp_transport_queue_); + // Constrains cached to be accessed from |rtp_transport_queue_|. absl::optional - cached_constraints_ RTC_GUARDED_BY(worker_queue_) = absl::nullopt; + cached_constraints_ RTC_GUARDED_BY(rtp_transport_queue_) = absl::nullopt; RtpTransportControllerSendInterface* const rtp_transport_; RtpRtcpInterface* const rtp_rtcp_module_; diff --git a/audio/channel_receive.cc b/audio/channel_receive.cc index fd33dbdf24..57269cd193 100644 --- a/audio/channel_receive.cc +++ b/audio/channel_receive.cc @@ -10,8 +10,6 @@ #include "audio/channel_receive.h" -#include - #include #include #include @@ -23,6 +21,7 @@ #include "api/frame_transformer_interface.h" #include "api/rtc_event_log/rtc_event_log.h" #include "api/sequence_checker.h" +#include "api/task_queue/task_queue_base.h" #include "audio/audio_level.h" #include "audio/channel_receive_frame_transformer_delegate.h" #include "audio/channel_send.h" @@ -48,6 +47,9 @@ #include "rtc_base/numerics/safe_minmax.h" #include "rtc_base/race_checker.h" #include "rtc_base/synchronization/mutex.h" +#include "rtc_base/system/no_unique_address.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/task_utils/to_queued_task.h" #include "rtc_base/time_utils.h" #include "system_wrappers/include/metrics.h" @@ -79,12 +81,12 @@ AudioCodingModule::Config AcmConfig( return acm_config; } -class ChannelReceive : public ChannelReceiveInterface { +class ChannelReceive : public ChannelReceiveInterface, + public RtcpPacketTypeCounterObserver { public: // Used for receive streams. ChannelReceive( Clock* clock, - ProcessThread* module_process_thread, NetEqFactory* neteq_factory, AudioDeviceModule* audio_device_module, Transport* rtcp_send_transport, @@ -175,44 +177,55 @@ class ChannelReceive : public ChannelReceiveInterface { rtc::scoped_refptr frame_transformer) override; + void SetFrameDecryptor(rtc::scoped_refptr + frame_decryptor) override; + + void OnLocalSsrcChange(uint32_t local_ssrc) override; + uint32_t GetLocalSsrc() const override; + + void RtcpPacketTypesCounterUpdated( + uint32_t ssrc, + const RtcpPacketTypeCounter& packet_counter) override; + private: void ReceivePacket(const uint8_t* packet, size_t packet_length, - const RTPHeader& header); + const RTPHeader& header) + RTC_RUN_ON(worker_thread_checker_); int ResendPackets(const uint16_t* sequence_numbers, int length); - void UpdatePlayoutTimestamp(bool rtcp, int64_t now_ms); + void UpdatePlayoutTimestamp(bool rtcp, int64_t now_ms) + RTC_RUN_ON(worker_thread_checker_); int GetRtpTimestampRateHz() const; int64_t GetRTT() const; void OnReceivedPayloadData(rtc::ArrayView payload, - const RTPHeader& rtpHeader); + const RTPHeader& rtpHeader) + RTC_RUN_ON(worker_thread_checker_); void InitFrameTransformerDelegate( - rtc::scoped_refptr frame_transformer); - - bool Playing() const { - MutexLock lock(&playing_lock_); - return playing_; - } + rtc::scoped_refptr frame_transformer) + RTC_RUN_ON(worker_thread_checker_); // Thread checkers document and lock usage of some methods to specific threads // we know about. The goal is to eventually split up voe::ChannelReceive into // parts with single-threaded semantics, and thereby reduce the need for // locks. - SequenceChecker worker_thread_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_thread_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker network_thread_checker_; + + TaskQueueBase* const worker_thread_; + ScopedTaskSafety worker_safety_; // Methods accessed from audio and video threads are checked for sequential- // only access. We don't necessarily own and control these threads, so thread // checkers cannot be used. E.g. Chromium may transfer "ownership" from one // audio thread to another, but access is still sequential. rtc::RaceChecker audio_thread_race_checker_; - rtc::RaceChecker video_capture_thread_race_checker_; Mutex callback_mutex_; Mutex volume_settings_mutex_; - mutable Mutex playing_lock_; - bool playing_ RTC_GUARDED_BY(&playing_lock_) = false; + bool playing_ RTC_GUARDED_BY(worker_thread_checker_) = false; RtcEventLog* const event_log_; @@ -226,11 +239,10 @@ class ChannelReceive : public ChannelReceiveInterface { // Info for GetSyncInfo is updated on network or worker thread, and queried on // the worker thread. - mutable Mutex sync_info_lock_; absl::optional last_received_rtp_timestamp_ - RTC_GUARDED_BY(&sync_info_lock_); + RTC_GUARDED_BY(&worker_thread_checker_); absl::optional last_received_rtp_system_time_ms_ - RTC_GUARDED_BY(&sync_info_lock_); + RTC_GUARDED_BY(&worker_thread_checker_); // The AcmReceiver is thread safe, using its own lock. acm2::AcmReceiver acm_receiver_; @@ -243,15 +255,14 @@ class ChannelReceive : public ChannelReceiveInterface { // Timestamp of the audio pulled from NetEq. absl::optional jitter_buffer_playout_timestamp_; - mutable Mutex video_sync_lock_; - uint32_t playout_timestamp_rtp_ RTC_GUARDED_BY(video_sync_lock_); + uint32_t playout_timestamp_rtp_ RTC_GUARDED_BY(worker_thread_checker_); absl::optional playout_timestamp_rtp_time_ms_ - RTC_GUARDED_BY(video_sync_lock_); - uint32_t playout_delay_ms_ RTC_GUARDED_BY(video_sync_lock_); + RTC_GUARDED_BY(worker_thread_checker_); + uint32_t playout_delay_ms_ RTC_GUARDED_BY(worker_thread_checker_); absl::optional playout_timestamp_ntp_ - RTC_GUARDED_BY(video_sync_lock_); + RTC_GUARDED_BY(worker_thread_checker_); absl::optional playout_timestamp_ntp_time_ms_ - RTC_GUARDED_BY(video_sync_lock_); + RTC_GUARDED_BY(worker_thread_checker_); mutable Mutex ts_stats_lock_; @@ -262,33 +273,47 @@ class ChannelReceive : public ChannelReceiveInterface { // frame. int64_t capture_start_ntp_time_ms_ RTC_GUARDED_BY(ts_stats_lock_); - ProcessThread* const module_process_thread_; AudioDeviceModule* _audioDeviceModulePtr; float _outputGain RTC_GUARDED_BY(volume_settings_mutex_); const ChannelSendInterface* associated_send_channel_ - RTC_GUARDED_BY(worker_thread_checker_); + RTC_GUARDED_BY(network_thread_checker_); PacketRouter* packet_router_ = nullptr; SequenceChecker construction_thread_; // E2EE Audio Frame Decryption - rtc::scoped_refptr frame_decryptor_; + rtc::scoped_refptr frame_decryptor_ + RTC_GUARDED_BY(worker_thread_checker_); webrtc::CryptoOptions crypto_options_; - webrtc::AbsoluteCaptureTimeInterpolator absolute_capture_time_interpolator_; + webrtc::AbsoluteCaptureTimeInterpolator absolute_capture_time_interpolator_ + RTC_GUARDED_BY(worker_thread_checker_); webrtc::CaptureClockOffsetUpdater capture_clock_offset_updater_; rtc::scoped_refptr frame_transformer_delegate_; + + // Counter that's used to control the frequency of reporting histograms + // from the `GetAudioFrameWithInfo` callback. + int audio_frame_interval_count_ RTC_GUARDED_BY(audio_thread_race_checker_) = + 0; + // Controls how many callbacks we let pass by before reporting callback stats. + // A value of 100 means 100 callbacks, each one of which represents 10ms worth + // of data, so the stats reporting frequency will be 1Hz (modulo failures). + constexpr static int kHistogramReportingInterval = 100; + + mutable Mutex rtcp_counter_mutex_; + RtcpPacketTypeCounter rtcp_packet_type_counter_ + RTC_GUARDED_BY(rtcp_counter_mutex_); }; void ChannelReceive::OnReceivedPayloadData( rtc::ArrayView payload, const RTPHeader& rtpHeader) { - if (!Playing()) { + if (!playing_) { // Avoid inserting into NetEQ when we are not playing. Count the // packet as discarded. @@ -331,18 +356,20 @@ void ChannelReceive::InitFrameTransformerDelegate( rtc::scoped_refptr frame_transformer) { RTC_DCHECK(frame_transformer); RTC_DCHECK(!frame_transformer_delegate_); + RTC_DCHECK(worker_thread_->IsCurrent()); // Pass a callback to ChannelReceive::OnReceivedPayloadData, to be called by // the delegate to receive transformed audio. ChannelReceiveFrameTransformerDelegate::ReceiveFrameCallback receive_audio_callback = [this](rtc::ArrayView packet, const RTPHeader& header) { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); OnReceivedPayloadData(packet, header); }; frame_transformer_delegate_ = rtc::make_ref_counted( std::move(receive_audio_callback), std::move(frame_transformer), - rtc::Thread::Current()); + worker_thread_); frame_transformer_delegate_->Init(); } @@ -453,17 +480,21 @@ AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo( } audio_frame->packet_infos_ = RtpPacketInfos(packet_infos); - { - RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.TargetJitterBufferDelayMs", - acm_receiver_.TargetDelayMs()); - const int jitter_buffer_delay = acm_receiver_.FilteredCurrentDelayMs(); - MutexLock lock(&video_sync_lock_); - RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverDelayEstimateMs", - jitter_buffer_delay + playout_delay_ms_); - RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverJitterBufferDelayMs", - jitter_buffer_delay); - RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverDeviceDelayMs", - playout_delay_ms_); + ++audio_frame_interval_count_; + if (audio_frame_interval_count_ >= kHistogramReportingInterval) { + audio_frame_interval_count_ = 0; + worker_thread_->PostTask(ToQueuedTask(worker_safety_, [this]() { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.TargetJitterBufferDelayMs", + acm_receiver_.TargetDelayMs()); + const int jitter_buffer_delay = acm_receiver_.FilteredCurrentDelayMs(); + RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverDelayEstimateMs", + jitter_buffer_delay + playout_delay_ms_); + RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverJitterBufferDelayMs", + jitter_buffer_delay); + RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverDeviceDelayMs", + playout_delay_ms_); + })); } return muted ? AudioMixer::Source::AudioFrameInfo::kMuted @@ -483,7 +514,6 @@ void ChannelReceive::SetSourceTracker(SourceTracker* source_tracker) { ChannelReceive::ChannelReceive( Clock* clock, - ProcessThread* module_process_thread, NetEqFactory* neteq_factory, AudioDeviceModule* audio_device_module, Transport* rtcp_send_transport, @@ -499,7 +529,8 @@ ChannelReceive::ChannelReceive( rtc::scoped_refptr frame_decryptor, const webrtc::CryptoOptions& crypto_options, rtc::scoped_refptr frame_transformer) - : event_log_(rtc_event_log), + : worker_thread_(TaskQueueBase::Current()), + event_log_(rtc_event_log), rtp_receive_statistics_(ReceiveStatistics::Create(clock)), remote_ssrc_(remote_ssrc), acm_receiver_(AcmConfig(neteq_factory, @@ -515,16 +546,16 @@ ChannelReceive::ChannelReceive( rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()), capture_start_rtp_time_stamp_(-1), capture_start_ntp_time_ms_(-1), - module_process_thread_(module_process_thread), _audioDeviceModulePtr(audio_device_module), _outputGain(1.0f), associated_send_channel_(nullptr), frame_decryptor_(frame_decryptor), crypto_options_(crypto_options), absolute_capture_time_interpolator_(clock) { - RTC_DCHECK(module_process_thread_); RTC_DCHECK(audio_device_module); + network_thread_checker_.Detach(); + acm_receiver_.ResetInitialDelay(); acm_receiver_.SetMinimumDelay(0); acm_receiver_.SetMaximumDelay(0); @@ -541,6 +572,7 @@ ChannelReceive::ChannelReceive( configuration.receive_statistics = rtp_receive_statistics_.get(); configuration.event_log = event_log_; configuration.local_media_ssrc = local_ssrc; + configuration.rtcp_packet_type_counter_observer = this; if (frame_transformer) InitFrameTransformerDelegate(std::move(frame_transformer)); @@ -551,19 +583,10 @@ ChannelReceive::ChannelReceive( // Ensure that RTCP is enabled for the created channel. rtp_rtcp_->SetRTCPStatus(RtcpMode::kCompound); - - // TODO(tommi): This should be an implementation detail of ModuleRtpRtcpImpl2 - // and the pointer to the process thread should be there (which also localizes - // the problem of getting rid of that dependency). - module_process_thread_->RegisterModule(rtp_rtcp_.get(), RTC_FROM_HERE); } ChannelReceive::~ChannelReceive() { - RTC_DCHECK(construction_thread_.IsCurrent()); - - // Unregister the module before stopping playout etc, to match the order - // things were set up in the ctor. - module_process_thread_->DeRegisterModule(rtp_rtcp_.get()); + RTC_DCHECK_RUN_ON(&construction_thread_); // Resets the delegate's callback to ChannelReceive::OnReceivedPayloadData. if (frame_transformer_delegate_) @@ -580,13 +603,11 @@ void ChannelReceive::SetSink(AudioSinkInterface* sink) { void ChannelReceive::StartPlayout() { RTC_DCHECK_RUN_ON(&worker_thread_checker_); - MutexLock lock(&playing_lock_); playing_ = true; } void ChannelReceive::StopPlayout() { RTC_DCHECK_RUN_ON(&worker_thread_checker_); - MutexLock lock(&playing_lock_); playing_ = false; _outputAudioLevel.ResetLevelFullRange(); } @@ -614,11 +635,8 @@ void ChannelReceive::OnRtpPacket(const RtpPacketReceived& packet) { // UpdatePlayoutTimestamp and int64_t now_ms = rtc::TimeMillis(); - { - MutexLock lock(&sync_info_lock_); - last_received_rtp_timestamp_ = packet.Timestamp(); - last_received_rtp_system_time_ms_ = now_ms; - } + last_received_rtp_timestamp_ = packet.Timestamp(); + last_received_rtp_system_time_ms_ = now_ms; // Store playout timestamp for the received RTP packet UpdatePlayoutTimestamp(false, now_ms); @@ -651,7 +669,7 @@ void ChannelReceive::ReceivePacket(const uint8_t* packet, size_t packet_length, const RTPHeader& header) { const uint8_t* payload = packet + header.headerLength; - assert(packet_length >= header.headerLength); + RTC_DCHECK_GE(packet_length, header.headerLength); size_t payload_length = packet_length - header.headerLength; size_t payload_data_length = payload_length - header.paddingLength; @@ -811,6 +829,11 @@ CallReceiveStatistics ChannelReceive::GetRTCPStatistics() const { stats.last_packet_received_timestamp_ms = absl::nullopt; } + { + MutexLock lock(&rtcp_counter_mutex_); + stats.nacks_sent = rtcp_packet_type_counter_.nack_packets; + } + // Timestamps. { MutexLock lock(&ts_stats_lock_); @@ -855,10 +878,19 @@ int ChannelReceive::ResendPackets(const uint16_t* sequence_numbers, return rtp_rtcp_->SendNACK(sequence_numbers, length); } +void ChannelReceive::RtcpPacketTypesCounterUpdated( + uint32_t ssrc, + const RtcpPacketTypeCounter& packet_counter) { + if (ssrc != remote_ssrc_) { + return; + } + MutexLock lock(&rtcp_counter_mutex_); + rtcp_packet_type_counter_ = packet_counter; +} + void ChannelReceive::SetAssociatedSendChannel( const ChannelSendInterface* channel) { - // TODO(bugs.webrtc.org/11993): Expect to be called on the network thread. - RTC_DCHECK_RUN_ON(&worker_thread_checker_); + RTC_DCHECK_RUN_ON(&network_thread_checker_); associated_send_channel_ = channel; } @@ -867,11 +899,33 @@ void ChannelReceive::SetDepacketizerToDecoderFrameTransformer( RTC_DCHECK_RUN_ON(&worker_thread_checker_); // Depending on when the channel is created, the transformer might be set // twice. Don't replace the delegate if it was already initialized. - if (!frame_transformer || frame_transformer_delegate_) + if (!frame_transformer || frame_transformer_delegate_) { + RTC_NOTREACHED() << "Not setting the transformer?"; return; + } + InitFrameTransformerDelegate(std::move(frame_transformer)); } +void ChannelReceive::SetFrameDecryptor( + rtc::scoped_refptr frame_decryptor) { + // TODO(bugs.webrtc.org/11993): Expect to be called on the network thread. + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + frame_decryptor_ = std::move(frame_decryptor); +} + +void ChannelReceive::OnLocalSsrcChange(uint32_t local_ssrc) { + // TODO(bugs.webrtc.org/11993): Expect to be called on the network thread. + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + rtp_rtcp_->SetLocalSsrc(local_ssrc); +} + +uint32_t ChannelReceive::GetLocalSsrc() const { + // TODO(bugs.webrtc.org/11993): Expect to be called on the network thread. + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + return rtp_rtcp_->local_media_ssrc(); +} + NetworkStatistics ChannelReceive::GetNetworkStatistics( bool get_and_clear_legacy_stats) const { RTC_DCHECK_RUN_ON(&worker_thread_checker_); @@ -889,14 +943,8 @@ AudioDecodingCallStats ChannelReceive::GetDecodingCallStatistics() const { uint32_t ChannelReceive::GetDelayEstimate() const { RTC_DCHECK_RUN_ON(&worker_thread_checker_); - - uint32_t playout_delay; - { - MutexLock lock(&video_sync_lock_); - playout_delay = playout_delay_ms_; - } // Return the current jitter buffer delay + playout delay. - return acm_receiver_.FilteredCurrentDelayMs() + playout_delay; + return acm_receiver_.FilteredCurrentDelayMs() + playout_delay_ms_; } bool ChannelReceive::SetMinimumPlayoutDelay(int delay_ms) { @@ -918,21 +966,17 @@ bool ChannelReceive::SetMinimumPlayoutDelay(int delay_ms) { bool ChannelReceive::GetPlayoutRtpTimestamp(uint32_t* rtp_timestamp, int64_t* time_ms) const { - RTC_DCHECK_RUNS_SERIALIZED(&video_capture_thread_race_checker_); - { - MutexLock lock(&video_sync_lock_); - if (!playout_timestamp_rtp_time_ms_) - return false; - *rtp_timestamp = playout_timestamp_rtp_; - *time_ms = playout_timestamp_rtp_time_ms_.value(); - return true; - } + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + if (!playout_timestamp_rtp_time_ms_) + return false; + *rtp_timestamp = playout_timestamp_rtp_; + *time_ms = playout_timestamp_rtp_time_ms_.value(); + return true; } void ChannelReceive::SetEstimatedPlayoutNtpTimestampMs(int64_t ntp_timestamp_ms, int64_t time_ms) { - RTC_DCHECK_RUNS_SERIALIZED(&video_capture_thread_race_checker_); - MutexLock lock(&video_sync_lock_); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); playout_timestamp_ntp_ = ntp_timestamp_ms; playout_timestamp_ntp_time_ms_ = time_ms; } @@ -940,7 +984,6 @@ void ChannelReceive::SetEstimatedPlayoutNtpTimestampMs(int64_t ntp_timestamp_ms, absl::optional ChannelReceive::GetCurrentEstimatedPlayoutNtpTimestampMs(int64_t now_ms) const { RTC_DCHECK_RUN_ON(&worker_thread_checker_); - MutexLock lock(&video_sync_lock_); if (!playout_timestamp_ntp_ || !playout_timestamp_ntp_time_ms_) return absl::nullopt; @@ -970,24 +1013,19 @@ absl::optional ChannelReceive::GetSyncInfo() const { return absl::nullopt; } - { - MutexLock lock(&sync_info_lock_); - if (!last_received_rtp_timestamp_ || !last_received_rtp_system_time_ms_) { - return absl::nullopt; - } - info.latest_received_capture_timestamp = *last_received_rtp_timestamp_; - info.latest_receive_time_ms = *last_received_rtp_system_time_ms_; + if (!last_received_rtp_timestamp_ || !last_received_rtp_system_time_ms_) { + return absl::nullopt; } + info.latest_received_capture_timestamp = *last_received_rtp_timestamp_; + info.latest_receive_time_ms = *last_received_rtp_system_time_ms_; int jitter_buffer_delay = acm_receiver_.FilteredCurrentDelayMs(); - { - MutexLock lock(&video_sync_lock_); - info.current_delay_ms = jitter_buffer_delay + playout_delay_ms_; - } + info.current_delay_ms = jitter_buffer_delay + playout_delay_ms_; return info; } +// RTC_RUN_ON(worker_thread_checker_) void ChannelReceive::UpdatePlayoutTimestamp(bool rtcp, int64_t now_ms) { // TODO(bugs.webrtc.org/11993): Expect to be called exclusively on the // network thread. Once that's done, we won't need video_sync_lock_. @@ -1014,14 +1052,11 @@ void ChannelReceive::UpdatePlayoutTimestamp(bool rtcp, int64_t now_ms) { // Remove the playout delay. playout_timestamp -= (delay_ms * (GetRtpTimestampRateHz() / 1000)); - { - MutexLock lock(&video_sync_lock_); - if (!rtcp && playout_timestamp != playout_timestamp_rtp_) { - playout_timestamp_rtp_ = playout_timestamp; - playout_timestamp_rtp_time_ms_ = now_ms; - } - playout_delay_ms_ = delay_ms; + if (!rtcp && playout_timestamp != playout_timestamp_rtp_) { + playout_timestamp_rtp_ = playout_timestamp; + playout_timestamp_rtp_time_ms_ = now_ms; } + playout_delay_ms_ = delay_ms; } int ChannelReceive::GetRtpTimestampRateHz() const { @@ -1039,7 +1074,7 @@ int ChannelReceive::GetRtpTimestampRateHz() const { } int64_t ChannelReceive::GetRTT() const { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); + RTC_DCHECK_RUN_ON(&network_thread_checker_); std::vector report_blocks = rtp_rtcp_->GetLatestReportBlockData(); @@ -1065,7 +1100,6 @@ int64_t ChannelReceive::GetRTT() const { std::unique_ptr CreateChannelReceive( Clock* clock, - ProcessThread* module_process_thread, NetEqFactory* neteq_factory, AudioDeviceModule* audio_device_module, Transport* rtcp_send_transport, @@ -1082,12 +1116,11 @@ std::unique_ptr CreateChannelReceive( const webrtc::CryptoOptions& crypto_options, rtc::scoped_refptr frame_transformer) { return std::make_unique( - clock, module_process_thread, neteq_factory, audio_device_module, - rtcp_send_transport, rtc_event_log, local_ssrc, remote_ssrc, - jitter_buffer_max_packets, jitter_buffer_fast_playout, - jitter_buffer_min_delay_ms, jitter_buffer_enable_rtx_handling, - decoder_factory, codec_pair_id, frame_decryptor, crypto_options, - std::move(frame_transformer)); + clock, neteq_factory, audio_device_module, rtcp_send_transport, + rtc_event_log, local_ssrc, remote_ssrc, jitter_buffer_max_packets, + jitter_buffer_fast_playout, jitter_buffer_min_delay_ms, + jitter_buffer_enable_rtx_handling, decoder_factory, codec_pair_id, + std::move(frame_decryptor), crypto_options, std::move(frame_transformer)); } } // namespace voe diff --git a/audio/channel_receive.h b/audio/channel_receive.h index c55968b55f..deec49feaf 100644 --- a/audio/channel_receive.h +++ b/audio/channel_receive.h @@ -44,7 +44,6 @@ namespace webrtc { class AudioDeviceModule; class FrameDecryptorInterface; class PacketRouter; -class ProcessThread; class RateLimiter; class ReceiveStatistics; class RtcEventLog; @@ -58,6 +57,7 @@ struct CallReceiveStatistics { int64_t payload_bytes_rcvd = 0; int64_t header_and_padding_bytes_rcvd = 0; int packetsReceived; + uint32_t nacks_sent = 0; // The capture NTP time (in local timebase) of the first played out audio // frame. int64_t capture_start_ntp_time_ms_; @@ -159,11 +159,16 @@ class ChannelReceiveInterface : public RtpPacketSinkInterface { virtual void SetDepacketizerToDecoderFrameTransformer( rtc::scoped_refptr frame_transformer) = 0; + + virtual void SetFrameDecryptor( + rtc::scoped_refptr frame_decryptor) = 0; + + virtual void OnLocalSsrcChange(uint32_t local_ssrc) = 0; + virtual uint32_t GetLocalSsrc() const = 0; }; std::unique_ptr CreateChannelReceive( Clock* clock, - ProcessThread* module_process_thread, NetEqFactory* neteq_factory, AudioDeviceModule* audio_device_module, Transport* rtcp_send_transport, diff --git a/audio/channel_receive_frame_transformer_delegate.cc b/audio/channel_receive_frame_transformer_delegate.cc index 261afbb100..7e617df780 100644 --- a/audio/channel_receive_frame_transformer_delegate.cc +++ b/audio/channel_receive_frame_transformer_delegate.cc @@ -47,7 +47,7 @@ class TransformableAudioFrame : public TransformableAudioFrameInterface { ChannelReceiveFrameTransformerDelegate::ChannelReceiveFrameTransformerDelegate( ReceiveFrameCallback receive_frame_callback, rtc::scoped_refptr frame_transformer, - rtc::Thread* channel_receive_thread) + TaskQueueBase* channel_receive_thread) : receive_frame_callback_(receive_frame_callback), frame_transformer_(std::move(frame_transformer)), channel_receive_thread_(channel_receive_thread) {} diff --git a/audio/channel_receive_frame_transformer_delegate.h b/audio/channel_receive_frame_transformer_delegate.h index 0af748e37f..f59834d24e 100644 --- a/audio/channel_receive_frame_transformer_delegate.h +++ b/audio/channel_receive_frame_transformer_delegate.h @@ -32,7 +32,7 @@ class ChannelReceiveFrameTransformerDelegate : public TransformedFrameCallback { ChannelReceiveFrameTransformerDelegate( ReceiveFrameCallback receive_frame_callback, rtc::scoped_refptr frame_transformer, - rtc::Thread* channel_receive_thread); + TaskQueueBase* channel_receive_thread); // Registers |this| as callback for |frame_transformer_|, to get the // transformed frames. @@ -67,7 +67,7 @@ class ChannelReceiveFrameTransformerDelegate : public TransformedFrameCallback { RTC_GUARDED_BY(sequence_checker_); rtc::scoped_refptr frame_transformer_ RTC_GUARDED_BY(sequence_checker_); - rtc::Thread* channel_receive_thread_; + TaskQueueBase* const channel_receive_thread_; }; } // namespace webrtc diff --git a/audio/channel_send.cc b/audio/channel_send.cc index 47afc7982b..06e9238ce8 100644 --- a/audio/channel_send.cc +++ b/audio/channel_send.cc @@ -60,8 +60,9 @@ class TransportSequenceNumberProxy; class VoERtcpObserver; class ChannelSend : public ChannelSendInterface, - public AudioPacketizationCallback { // receive encoded - // packets from the ACM + public AudioPacketizationCallback, // receive encoded + // packets from the ACM + public RtcpPacketTypeCounterObserver { public: // TODO(nisse): Make OnUplinkPacketLossRate public, and delete friend // declaration. @@ -69,7 +70,6 @@ class ChannelSend : public ChannelSendInterface, ChannelSend(Clock* clock, TaskQueueFactory* task_queue_factory, - ProcessThread* module_process_thread, Transport* rtp_transport, RtcpRttStats* rtcp_rtt_stats, RtcEventLog* rtc_event_log, @@ -151,6 +151,11 @@ class ChannelSend : public ChannelSendInterface, rtc::scoped_refptr frame_transformer) override; + // RtcpPacketTypeCounterObserver. + void RtcpPacketTypesCounterUpdated( + uint32_t ssrc, + const RtcpPacketTypeCounter& packet_counter) override; + private: // From AudioPacketizationCallback in the ACM int32_t SendData(AudioFrameType frameType, @@ -180,7 +185,6 @@ class ChannelSend : public ChannelSendInterface, // voe::Channel into parts with single-threaded semantics, and thereby reduce // the need for locks. SequenceChecker worker_thread_checker_; - SequenceChecker module_process_thread_checker_; // Methods accessed from audio and video threads are checked for sequential- // only access. We don't necessarily own and control these threads, so thread // checkers cannot be used. E.g. Chromium may transfer "ownership" from one @@ -189,6 +193,7 @@ class ChannelSend : public ChannelSendInterface, mutable Mutex volume_settings_mutex_; + const uint32_t ssrc_; bool sending_ RTC_GUARDED_BY(&worker_thread_checker_) = false; RtcEventLog* const event_log_; @@ -200,7 +205,6 @@ class ChannelSend : public ChannelSendInterface, uint32_t _timeStamp RTC_GUARDED_BY(encoder_queue_); // uses - ProcessThread* const _moduleProcessThreadPtr; RmsLevel rms_level_ RTC_GUARDED_BY(encoder_queue_); bool input_mute_ RTC_GUARDED_BY(volume_settings_mutex_); bool previous_frame_muted_ RTC_GUARDED_BY(encoder_queue_); @@ -242,6 +246,10 @@ class ChannelSend : public ChannelSendInterface, rtc::TaskQueue encoder_queue_; const bool fixing_timestamp_stall_; + + mutable Mutex rtcp_counter_mutex_; + RtcpPacketTypeCounter rtcp_packet_type_counter_ + RTC_GUARDED_BY(rtcp_counter_mutex_); }; const int kTelephoneEventAttenuationdB = 10; @@ -445,7 +453,6 @@ int32_t ChannelSend::SendRtpAudio(AudioFrameType frameType, ChannelSend::ChannelSend( Clock* clock, TaskQueueFactory* task_queue_factory, - ProcessThread* module_process_thread, Transport* rtp_transport, RtcpRttStats* rtcp_rtt_stats, RtcEventLog* rtc_event_log, @@ -456,10 +463,10 @@ ChannelSend::ChannelSend( uint32_t ssrc, rtc::scoped_refptr frame_transformer, TransportFeedbackObserver* feedback_observer) - : event_log_(rtc_event_log), + : ssrc_(ssrc), + event_log_(rtc_event_log), _timeStamp(0), // This is just an offset, RTP module will add it's own // random offset - _moduleProcessThreadPtr(module_process_thread), input_mute_(false), previous_frame_muted_(false), _includeAudioLevelIndication(false), @@ -475,9 +482,6 @@ ChannelSend::ChannelSend( TaskQueueFactory::Priority::NORMAL)), fixing_timestamp_stall_( !field_trial::IsDisabled("WebRTC-Audio-FixTimestampStall")) { - RTC_DCHECK(module_process_thread); - module_process_thread_checker_.Detach(); - audio_coding_.reset(AudioCodingModule::Create(AudioCodingModule::Config())); RtpRtcpInterface::Configuration configuration; @@ -495,6 +499,7 @@ ChannelSend::ChannelSend( retransmission_rate_limiter_.get(); configuration.extmap_allow_mixed = extmap_allow_mixed; configuration.rtcp_report_interval_ms = rtcp_report_interval_ms; + configuration.rtcp_packet_type_counter_observer = this; configuration.local_media_ssrc = ssrc; @@ -504,8 +509,6 @@ ChannelSend::ChannelSend( rtp_sender_audio_ = std::make_unique(configuration.clock, rtp_rtcp_->RtpSender()); - _moduleProcessThreadPtr->RegisterModule(rtp_rtcp_.get(), RTC_FROM_HERE); - // Ensure that RTCP is enabled by default for the created channel. rtp_rtcp_->SetRTCPStatus(RtcpMode::kCompound); @@ -525,9 +528,6 @@ ChannelSend::~ChannelSend() { StopSend(); int error = audio_coding_->RegisterTransportCallback(NULL); RTC_DCHECK_EQ(0, error); - - if (_moduleProcessThreadPtr) - _moduleProcessThreadPtr->DeRegisterModule(rtp_rtcp_.get()); } void ChannelSend::StartSend() { @@ -790,9 +790,24 @@ CallSendStatistics ChannelSend::GetRTCPStatistics() const { stats.retransmitted_packets_sent = rtp_stats.retransmitted.packets; stats.report_block_datas = rtp_rtcp_->GetLatestReportBlockData(); + { + MutexLock lock(&rtcp_counter_mutex_); + stats.nacks_rcvd = rtcp_packet_type_counter_.nack_packets; + } + return stats; } +void ChannelSend::RtcpPacketTypesCounterUpdated( + uint32_t ssrc, + const RtcpPacketTypeCounter& packet_counter) { + if (ssrc != ssrc_) { + return; + } + MutexLock lock(&rtcp_counter_mutex_); + rtcp_packet_type_counter_ = packet_counter; +} + void ChannelSend::ProcessAndEncodeAudio( std::unique_ptr audio_frame) { RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_); @@ -858,7 +873,6 @@ ANAStats ChannelSend::GetANAStatistics() const { } RtpRtcpInterface* ChannelSend::GetRtpRtcp() const { - RTC_DCHECK(module_process_thread_checker_.IsCurrent()); return rtp_rtcp_.get(); } @@ -930,7 +944,6 @@ void ChannelSend::InitFrameTransformerDelegate( std::unique_ptr CreateChannelSend( Clock* clock, TaskQueueFactory* task_queue_factory, - ProcessThread* module_process_thread, Transport* rtp_transport, RtcpRttStats* rtcp_rtt_stats, RtcEventLog* rtc_event_log, @@ -942,10 +955,10 @@ std::unique_ptr CreateChannelSend( rtc::scoped_refptr frame_transformer, TransportFeedbackObserver* feedback_observer) { return std::make_unique( - clock, task_queue_factory, module_process_thread, rtp_transport, - rtcp_rtt_stats, rtc_event_log, frame_encryptor, crypto_options, - extmap_allow_mixed, rtcp_report_interval_ms, ssrc, - std::move(frame_transformer), feedback_observer); + clock, task_queue_factory, rtp_transport, rtcp_rtt_stats, rtc_event_log, + frame_encryptor, crypto_options, extmap_allow_mixed, + rtcp_report_interval_ms, ssrc, std::move(frame_transformer), + feedback_observer); } } // namespace voe diff --git a/audio/channel_send.h b/audio/channel_send.h index 2e23ef5d2d..67391af956 100644 --- a/audio/channel_send.h +++ b/audio/channel_send.h @@ -28,7 +28,6 @@ namespace webrtc { class FrameEncryptorInterface; -class ProcessThread; class RtcEventLog; class RtpTransportControllerSendInterface; @@ -46,6 +45,7 @@ struct CallSendStatistics { // ReportBlockData represents the latest Report Block that was received for // that pair. std::vector report_block_datas; + uint32_t nacks_rcvd; }; // See section 6.4.2 in http://www.ietf.org/rfc/rfc3550.txt for details. @@ -126,7 +126,6 @@ class ChannelSendInterface { std::unique_ptr CreateChannelSend( Clock* clock, TaskQueueFactory* task_queue_factory, - ProcessThread* module_process_thread, Transport* rtp_transport, RtcpRttStats* rtcp_rtt_stats, RtcEventLog* rtc_event_log, diff --git a/audio/mock_voe_channel_proxy.h b/audio/mock_voe_channel_proxy.h index 7f140d400d..ea2a2ac3f0 100644 --- a/audio/mock_voe_channel_proxy.h +++ b/audio/mock_voe_channel_proxy.h @@ -17,6 +17,7 @@ #include #include +#include "api/crypto/frame_decryptor_interface.h" #include "api/test/mock_frame_encryptor.h" #include "audio/channel_receive.h" #include "audio/channel_send.h" @@ -98,6 +99,13 @@ class MockChannelReceive : public voe::ChannelReceiveInterface { SetDepacketizerToDecoderFrameTransformer, (rtc::scoped_refptr frame_transformer), (override)); + MOCK_METHOD( + void, + SetFrameDecryptor, + (rtc::scoped_refptr frame_decryptor), + (override)); + MOCK_METHOD(void, OnLocalSsrcChange, (uint32_t local_ssrc), (override)); + MOCK_METHOD(uint32_t, GetLocalSsrc, (), (const, override)); }; class MockChannelSend : public voe::ChannelSendInterface { diff --git a/audio/test/audio_end_to_end_test.cc b/audio/test/audio_end_to_end_test.cc index 896b0f2dae..0d8529a913 100644 --- a/audio/test/audio_end_to_end_test.cc +++ b/audio/test/audio_end_to_end_test.cc @@ -92,6 +92,8 @@ void AudioEndToEndTest::ModifyAudioConfigs( {{"stereo", "1"}}); send_config->send_codec_spec = AudioSendStream::Config::SendCodecSpec( test::CallTest::kAudioSendPayloadType, kDefaultFormat); + send_config->min_bitrate_bps = 32000; + send_config->max_bitrate_bps = 32000; } void AudioEndToEndTest::OnAudioStreamsCreated( diff --git a/audio/test/nack_test.cc b/audio/test/nack_test.cc new file mode 100644 index 0000000000..13cfe74a28 --- /dev/null +++ b/audio/test/nack_test.cc @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio/test/audio_end_to_end_test.h" +#include "system_wrappers/include/sleep.h" +#include "test/gtest.h" + +namespace webrtc { +namespace test { + +using NackTest = CallTest; + +TEST_F(NackTest, ShouldNackInLossyNetwork) { + class NackTest : public AudioEndToEndTest { + public: + const int kTestDurationMs = 2000; + const int64_t kRttMs = 30; + const int64_t kLossPercent = 30; + const int kNackHistoryMs = 1000; + + BuiltInNetworkBehaviorConfig GetNetworkPipeConfig() const override { + BuiltInNetworkBehaviorConfig pipe_config; + pipe_config.queue_delay_ms = kRttMs / 2; + pipe_config.loss_percent = kLossPercent; + return pipe_config; + } + + void ModifyAudioConfigs( + AudioSendStream::Config* send_config, + std::vector* receive_configs) override { + ASSERT_EQ(receive_configs->size(), 1U); + (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackHistoryMs; + AudioEndToEndTest::ModifyAudioConfigs(send_config, receive_configs); + } + + void PerformTest() override { SleepMs(kTestDurationMs); } + + void OnStreamsStopped() override { + AudioReceiveStream::Stats recv_stats = + receive_stream()->GetStats(/*get_and_clear_legacy_stats=*/true); + EXPECT_GT(recv_stats.nacks_sent, 0U); + AudioSendStream::Stats send_stats = send_stream()->GetStats(); + EXPECT_GT(send_stats.retransmitted_packets_sent, 0U); + EXPECT_GT(send_stats.nacks_rcvd, 0U); + } + } test; + + RunBaseTest(&test); +} + +} // namespace test +} // namespace webrtc diff --git a/audio/voip/audio_channel.cc b/audio/voip/audio_channel.cc index d11e6d79f9..b4a50eec12 100644 --- a/audio/voip/audio_channel.cc +++ b/audio/voip/audio_channel.cc @@ -32,12 +32,10 @@ AudioChannel::AudioChannel( Transport* transport, uint32_t local_ssrc, TaskQueueFactory* task_queue_factory, - ProcessThread* process_thread, AudioMixer* audio_mixer, rtc::scoped_refptr decoder_factory) - : audio_mixer_(audio_mixer), process_thread_(process_thread) { + : audio_mixer_(audio_mixer) { RTC_DCHECK(task_queue_factory); - RTC_DCHECK(process_thread); RTC_DCHECK(audio_mixer); Clock* clock = Clock::GetRealTimeClock(); @@ -56,9 +54,6 @@ AudioChannel::AudioChannel( rtp_rtcp_->SetSendingMediaStatus(false); rtp_rtcp_->SetRTCPStatus(RtcpMode::kCompound); - // ProcessThread periodically services RTP stack for RTCP. - process_thread_->RegisterModule(rtp_rtcp_.get(), RTC_FROM_HERE); - ingress_ = std::make_unique(rtp_rtcp_.get(), clock, receive_statistics_.get(), std::move(decoder_factory)); @@ -80,12 +75,10 @@ AudioChannel::~AudioChannel() { audio_mixer_->RemoveSource(ingress_.get()); - // AudioEgress could hold current global TaskQueueBase that we need to clear - // before ProcessThread::DeRegisterModule. + // TODO(bugs.webrtc.org/11581): unclear if we still need to clear |egress_| + // here. egress_.reset(); ingress_.reset(); - - process_thread_->DeRegisterModule(rtp_rtcp_.get()); } bool AudioChannel::StartSend() { diff --git a/audio/voip/audio_channel.h b/audio/voip/audio_channel.h index 7b9fa6f74e..7338d9faab 100644 --- a/audio/voip/audio_channel.h +++ b/audio/voip/audio_channel.h @@ -22,7 +22,6 @@ #include "audio/voip/audio_egress.h" #include "audio/voip/audio_ingress.h" #include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" -#include "modules/utility/include/process_thread.h" #include "rtc_base/ref_count.h" namespace webrtc { @@ -35,7 +34,6 @@ class AudioChannel : public rtc::RefCountInterface { AudioChannel(Transport* transport, uint32_t local_ssrc, TaskQueueFactory* task_queue_factory, - ProcessThread* process_thread, AudioMixer* audio_mixer, rtc::scoped_refptr decoder_factory); ~AudioChannel() override; @@ -120,9 +118,6 @@ class AudioChannel : public rtc::RefCountInterface { // Synchronization is handled internally by AudioMixer. AudioMixer* audio_mixer_; - // Synchronization is handled internally by ProcessThread. - ProcessThread* process_thread_; - // Listed in order for safe destruction of AudioChannel object. // Synchronization for these are handled internally. std::unique_ptr receive_statistics_; diff --git a/audio/voip/test/audio_channel_unittest.cc b/audio/voip/test/audio_channel_unittest.cc index f99d163022..a4f518c5bd 100644 --- a/audio/voip/test/audio_channel_unittest.cc +++ b/audio/voip/test/audio_channel_unittest.cc @@ -17,7 +17,6 @@ #include "modules/audio_mixer/audio_mixer_impl.h" #include "modules/audio_mixer/sine_wave_generator.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" -#include "modules/utility/include/process_thread.h" #include "rtc_base/logging.h" #include "test/gmock.h" #include "test/gtest.h" @@ -43,7 +42,6 @@ class AudioChannelTest : public ::testing::Test { AudioChannelTest() : fake_clock_(kStartTime), wave_generator_(1000.0, kAudioLevel) { task_queue_factory_ = std::make_unique(&task_queue_); - process_thread_ = ProcessThread::Create("ModuleProcessThread"); audio_mixer_ = AudioMixerImpl::Create(); encoder_factory_ = CreateBuiltinAudioEncoderFactory(); decoder_factory_ = CreateBuiltinAudioDecoderFactory(); @@ -66,8 +64,8 @@ class AudioChannelTest : public ::testing::Test { // simplify network routing logic. rtc::scoped_refptr audio_channel = rtc::make_ref_counted( - &transport_, ssrc, task_queue_factory_.get(), process_thread_.get(), - audio_mixer_.get(), decoder_factory_); + &transport_, ssrc, task_queue_factory_.get(), audio_mixer_.get(), + decoder_factory_); audio_channel->SetEncoder(kPcmuPayload, kPcmuFormat, encoder_factory_->MakeAudioEncoder( kPcmuPayload, kPcmuFormat, absl::nullopt)); @@ -95,7 +93,6 @@ class AudioChannelTest : public ::testing::Test { rtc::scoped_refptr audio_mixer_; rtc::scoped_refptr decoder_factory_; rtc::scoped_refptr encoder_factory_; - std::unique_ptr process_thread_; rtc::scoped_refptr audio_channel_; }; diff --git a/audio/voip/test/voip_core_unittest.cc b/audio/voip/test/voip_core_unittest.cc index 0d407601a3..896d0d98bb 100644 --- a/audio/voip/test/voip_core_unittest.cc +++ b/audio/voip/test/voip_core_unittest.cc @@ -14,7 +14,6 @@ #include "api/task_queue/default_task_queue_factory.h" #include "modules/audio_device/include/mock_audio_device.h" #include "modules/audio_processing/include/mock_audio_processing.h" -#include "modules/utility/include/mock/mock_process_thread.h" #include "test/gtest.h" #include "test/mock_transport.h" @@ -41,20 +40,15 @@ class VoipCoreTest : public ::testing::Test { rtc::scoped_refptr audio_processing = rtc::make_ref_counted>(); - auto process_thread = std::make_unique>(); - // Hold the pointer to use for testing. - process_thread_ = process_thread.get(); - voip_core_ = std::make_unique( std::move(encoder_factory), std::move(decoder_factory), CreateDefaultTaskQueueFactory(), audio_device_, - std::move(audio_processing), std::move(process_thread)); + std::move(audio_processing)); } std::unique_ptr voip_core_; NiceMock transport_; rtc::scoped_refptr audio_device_; - NiceMock* process_thread_; }; // Validate expected API calls that involves with VoipCore. Some verification is @@ -192,31 +186,5 @@ TEST_F(VoipCoreTest, StopSendAndPlayoutWithoutStarting) { EXPECT_EQ(voip_core_->ReleaseChannel(channel), VoipResult::kOk); } -// This tests correctness on ProcessThread usage where we expect the first/last -// channel creation/release triggers its Start/Stop method once only. -TEST_F(VoipCoreTest, TestProcessThreadOperation) { - EXPECT_CALL(*process_thread_, Start); - EXPECT_CALL(*process_thread_, RegisterModule).Times(2); - - auto channel_one = voip_core_->CreateChannel(&transport_, 0xdeadc0de); - auto channel_two = voip_core_->CreateChannel(&transport_, 0xdeadbeef); - - EXPECT_CALL(*process_thread_, Stop); - EXPECT_CALL(*process_thread_, DeRegisterModule).Times(2); - - EXPECT_EQ(voip_core_->ReleaseChannel(channel_one), VoipResult::kOk); - EXPECT_EQ(voip_core_->ReleaseChannel(channel_two), VoipResult::kOk); - - EXPECT_CALL(*process_thread_, Start); - EXPECT_CALL(*process_thread_, RegisterModule); - - auto channel_three = voip_core_->CreateChannel(&transport_, absl::nullopt); - - EXPECT_CALL(*process_thread_, Stop); - EXPECT_CALL(*process_thread_, DeRegisterModule); - - EXPECT_EQ(voip_core_->ReleaseChannel(channel_three), VoipResult::kOk); -} - } // namespace } // namespace webrtc diff --git a/audio/voip/voip_core.cc b/audio/voip/voip_core.cc index 67ae4c6521..fd66379f4a 100644 --- a/audio/voip/voip_core.cc +++ b/audio/voip/voip_core.cc @@ -41,18 +41,12 @@ VoipCore::VoipCore(rtc::scoped_refptr encoder_factory, rtc::scoped_refptr decoder_factory, std::unique_ptr task_queue_factory, rtc::scoped_refptr audio_device_module, - rtc::scoped_refptr audio_processing, - std::unique_ptr process_thread) { + rtc::scoped_refptr audio_processing) { encoder_factory_ = std::move(encoder_factory); decoder_factory_ = std::move(decoder_factory); task_queue_factory_ = std::move(task_queue_factory); audio_device_module_ = std::move(audio_device_module); audio_processing_ = std::move(audio_processing); - process_thread_ = std::move(process_thread); - - if (!process_thread_) { - process_thread_ = ProcessThread::Create("ModuleProcessThread"); - } audio_mixer_ = AudioMixerImpl::Create(); // AudioTransportImpl depends on audio mixer and audio processing instances. @@ -138,19 +132,13 @@ ChannelId VoipCore::CreateChannel(Transport* transport, } rtc::scoped_refptr channel = - rtc::make_ref_counted( - transport, local_ssrc.value(), task_queue_factory_.get(), - process_thread_.get(), audio_mixer_.get(), decoder_factory_); - - // Check if we need to start the process thread. - bool start_process_thread = false; + rtc::make_ref_counted(transport, local_ssrc.value(), + task_queue_factory_.get(), + audio_mixer_.get(), decoder_factory_); { MutexLock lock(&lock_); - // Start process thread if the channel is the first one. - start_process_thread = channels_.empty(); - channel_id = static_cast(next_channel_id_); channels_[channel_id] = channel; next_channel_id_++; @@ -162,10 +150,6 @@ ChannelId VoipCore::CreateChannel(Transport* transport, // Set ChannelId in audio channel for logging/debugging purpose. channel->SetId(channel_id); - if (start_process_thread) { - process_thread_->Start(); - } - return channel_id; } @@ -194,9 +178,9 @@ VoipResult VoipCore::ReleaseChannel(ChannelId channel_id) { } if (no_channels_after_release) { - // Release audio channel first to have it DeRegisterModule first. + // TODO(bugs.webrtc.org/11581): unclear if we still need to clear |channel| + // here. channel = nullptr; - process_thread_->Stop(); // Make sure to stop playout on ADM if it is playing. if (audio_device_module_->Playing()) { diff --git a/audio/voip/voip_core.h b/audio/voip/voip_core.h index b7c1f2947f..359e07272d 100644 --- a/audio/voip/voip_core.h +++ b/audio/voip/voip_core.h @@ -33,7 +33,6 @@ #include "modules/audio_device/include/audio_device.h" #include "modules/audio_mixer/audio_mixer_impl.h" #include "modules/audio_processing/include/audio_processing.h" -#include "modules/utility/include/process_thread.h" #include "rtc_base/synchronization/mutex.h" namespace webrtc { @@ -61,8 +60,7 @@ class VoipCore : public VoipEngine, rtc::scoped_refptr decoder_factory, std::unique_ptr task_queue_factory, rtc::scoped_refptr audio_device_module, - rtc::scoped_refptr audio_processing, - std::unique_ptr process_thread = nullptr); + rtc::scoped_refptr audio_processing); ~VoipCore() override = default; // Implements VoipEngine interfaces. @@ -160,10 +158,6 @@ class VoipCore : public VoipEngine, // Synchronization is handled internally by AudioDeviceModule. rtc::scoped_refptr audio_device_module_; - // Synchronization is handled internally by ProcessThread. - // Must be placed before |channels_| for proper destruction. - std::unique_ptr process_thread_; - Mutex lock_; // Member to track a next ChannelId for new AudioChannel. diff --git a/build_overrides/build.gni b/build_overrides/build.gni index c21069535b..137b6a40b2 100644 --- a/build_overrides/build.gni +++ b/build_overrides/build.gni @@ -20,11 +20,11 @@ checkout_google_benchmark = true asan_suppressions_file = "//build/sanitizers/asan_suppressions.cc" lsan_suppressions_file = "//tools_webrtc/sanitizers/lsan_suppressions_webrtc.cc" tsan_suppressions_file = "//tools_webrtc/sanitizers/tsan_suppressions_webrtc.cc" -msan_blacklist_path = +msan_ignorelist_path = rebase_path("//tools_webrtc/msan/suppressions.txt", root_build_dir) -ubsan_blacklist_path = +ubsan_ignorelist_path = rebase_path("//tools_webrtc/ubsan/suppressions.txt", root_build_dir) -ubsan_vptr_blacklist_path = +ubsan_vptr_ignorelist_path = rebase_path("//tools_webrtc/ubsan/vptr_suppressions.txt", root_build_dir) # For Chromium, Android 32-bit non-component, non-clang builds hit a 4GiB size diff --git a/call/BUILD.gn b/call/BUILD.gn index 9f0cd037f3..638eb0b910 100644 --- a/call/BUILD.gn +++ b/call/BUILD.gn @@ -35,8 +35,10 @@ rtc_library("call_interfaces") { if (!build_with_mozilla) { sources += [ "audio_send_stream.cc" ] } + deps = [ ":audio_sender_interface", + ":receive_stream_interface", ":rtp_interfaces", ":video_stream_api", "../api:fec_controller_api", @@ -51,7 +53,6 @@ rtc_library("call_interfaces") { "../api/audio:audio_frame_processor", "../api/audio:audio_mixer_api", "../api/audio_codecs:audio_codecs_api", - "../api/crypto:frame_decryptor_interface", "../api/crypto:frame_encryptor_interface", "../api/crypto:options", "../api/neteq:neteq_api", @@ -59,7 +60,6 @@ rtc_library("call_interfaces") { "../api/transport:bitrate_settings", "../api/transport:network_control", "../api/transport:webrtc_key_value_config", - "../api/transport/rtp:rtp_source", "../modules/async_audio_processing", "../modules/audio_device", "../modules/audio_processing", @@ -98,22 +98,29 @@ rtc_library("rtp_interfaces") { "rtp_config.h", "rtp_packet_sink_interface.h", "rtp_stream_receiver_controller_interface.h", + "rtp_transport_config.h", + "rtp_transport_controller_send_factory_interface.h", "rtp_transport_controller_send_interface.h", ] deps = [ "../api:array_view", "../api:fec_controller_api", "../api:frame_transformer_interface", + "../api:network_state_predictor_api", "../api:rtp_headers", "../api:rtp_parameters", "../api/crypto:options", "../api/rtc_event_log", "../api/transport:bitrate_settings", + "../api/transport:network_control", + "../api/transport:webrtc_key_value_config", "../api/units:timestamp", "../common_video:frame_counts", "../modules/rtp_rtcp:rtp_rtcp_format", + "../modules/utility", "../rtc_base:checks", "../rtc_base:rtc_base_approved", + "../rtc_base:rtc_task_queue", ] absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", @@ -140,6 +147,8 @@ rtc_library("rtp_receiver") { "../modules/rtp_rtcp:rtp_rtcp_format", "../rtc_base:checks", "../rtc_base:rtc_base_approved", + "../rtc_base/containers:flat_map", + "../rtc_base/containers:flat_set", ] absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } @@ -150,6 +159,7 @@ rtc_library("rtp_sender") { "rtp_payload_params.h", "rtp_transport_controller_send.cc", "rtp_transport_controller_send.h", + "rtp_transport_controller_send_factory.h", "rtp_video_sender.cc", "rtp_video_sender.h", "rtp_video_sender_interface.h", @@ -313,6 +323,17 @@ rtc_library("call") { ] } +rtc_source_set("receive_stream_interface") { + sources = [ "receive_stream.h" ] + deps = [ + "../api:frame_transformer_interface", + "../api:rtp_parameters", + "../api:scoped_refptr", + "../api/crypto:frame_decryptor_interface", + "../api/transport/rtp:rtp_source", + ] +} + rtc_library("video_stream_api") { sources = [ "video_receive_stream.cc", @@ -321,6 +342,7 @@ rtc_library("video_stream_api") { "video_send_stream.h", ] deps = [ + ":receive_stream_interface", ":rtp_interfaces", "../api:frame_transformer_interface", "../api:rtp_headers", @@ -328,10 +350,8 @@ rtc_library("video_stream_api") { "../api:scoped_refptr", "../api:transport_api", "../api/adaptation:resource_adaptation_api", - "../api/crypto:frame_decryptor_interface", "../api/crypto:frame_encryptor_interface", "../api/crypto:options", - "../api/transport/rtp:rtp_source", "../api/video:recordable_encoded_frame", "../api/video:video_frame", "../api/video:video_rtp_headers", diff --git a/call/adaptation/video_stream_adapter.cc b/call/adaptation/video_stream_adapter.cc index 6620eff311..64e1a77786 100644 --- a/call/adaptation/video_stream_adapter.cc +++ b/call/adaptation/video_stream_adapter.cc @@ -416,8 +416,10 @@ VideoStreamAdapter::AdaptIfFpsDiffInsufficient( const VideoStreamInputState& input_state, const RestrictionsWithCounters& restrictions) const { RTC_DCHECK_EQ(degradation_preference_, DegradationPreference::BALANCED); + int frame_size_pixels = input_state.single_active_stream_pixels().value_or( + input_state.frame_size_pixels().value()); absl::optional min_fps_diff = - balanced_settings_.MinFpsDiff(input_state.frame_size_pixels().value()); + balanced_settings_.MinFpsDiff(frame_size_pixels); if (current_restrictions_.counters.fps_adaptations < restrictions.counters.fps_adaptations && min_fps_diff && input_state.frames_per_second() > 0) { @@ -502,9 +504,10 @@ VideoStreamAdapter::RestrictionsOrState VideoStreamAdapter::DecreaseFramerate( if (degradation_preference_ == DegradationPreference::MAINTAIN_RESOLUTION) { max_frame_rate = GetLowerFrameRateThan(input_state.frames_per_second()); } else if (degradation_preference_ == DegradationPreference::BALANCED) { - max_frame_rate = - balanced_settings_.MinFps(input_state.video_codec_type(), - input_state.frame_size_pixels().value()); + int frame_size_pixels = input_state.single_active_stream_pixels().value_or( + input_state.frame_size_pixels().value()); + max_frame_rate = balanced_settings_.MinFps(input_state.video_codec_type(), + frame_size_pixels); } else { RTC_NOTREACHED(); max_frame_rate = GetLowerFrameRateThan(input_state.frames_per_second()); @@ -561,12 +564,21 @@ VideoStreamAdapter::RestrictionsOrState VideoStreamAdapter::IncreaseFramerate( if (degradation_preference_ == DegradationPreference::MAINTAIN_RESOLUTION) { max_frame_rate = GetHigherFrameRateThan(input_state.frames_per_second()); } else if (degradation_preference_ == DegradationPreference::BALANCED) { - max_frame_rate = - balanced_settings_.MaxFps(input_state.video_codec_type(), - input_state.frame_size_pixels().value()); + int frame_size_pixels = input_state.single_active_stream_pixels().value_or( + input_state.frame_size_pixels().value()); + max_frame_rate = balanced_settings_.MaxFps(input_state.video_codec_type(), + frame_size_pixels); + // Temporary fix for cases when there are fewer framerate adaptation steps + // up than down. Make number of down/up steps equal. + if (max_frame_rate == std::numeric_limits::max() && + current_restrictions.counters.fps_adaptations > 1) { + // Do not unrestrict framerate to allow additional adaptation up steps. + RTC_LOG(LS_INFO) << "Modifying framerate due to remaining fps count."; + max_frame_rate -= current_restrictions.counters.fps_adaptations; + } // In BALANCED, the max_frame_rate must be checked before proceeding. This // is because the MaxFps might be the current Fps and so the balanced - // settings may want to scale up the resolution.= + // settings may want to scale up the resolution. if (!CanIncreaseFrameRateTo(max_frame_rate, current_restrictions.restrictions)) { return Adaptation::Status::kLimitReached; diff --git a/call/audio_receive_stream.h b/call/audio_receive_stream.h index 6f74492927..8403e6bea0 100644 --- a/call/audio_receive_stream.h +++ b/call/audio_receive_stream.h @@ -20,17 +20,14 @@ #include "api/audio_codecs/audio_decoder_factory.h" #include "api/call/transport.h" #include "api/crypto/crypto_options.h" -#include "api/crypto/frame_decryptor_interface.h" -#include "api/frame_transformer_interface.h" #include "api/rtp_parameters.h" -#include "api/scoped_refptr.h" -#include "api/transport/rtp/rtp_source.h" +#include "call/receive_stream.h" #include "call/rtp_config.h" namespace webrtc { class AudioSinkInterface; -class AudioReceiveStream { +class AudioReceiveStream : public MediaReceiveStream { public: struct Stats { Stats(); @@ -42,6 +39,7 @@ class AudioReceiveStream { uint64_t fec_packets_received = 0; uint64_t fec_packets_discarded = 0; uint32_t packets_lost = 0; + uint32_t nacks_sent = 0; std::string codec_name; absl::optional codec_payload_type; uint32_t jitter_ms = 0; @@ -106,29 +104,14 @@ class AudioReceiveStream { std::string ToString() const; // Receive-stream specific RTP settings. - struct Rtp { + struct Rtp : public RtpConfig { Rtp(); ~Rtp(); std::string ToString() const; - // Synchronization source (stream identifier) to be received. - uint32_t remote_ssrc = 0; - - // Sender SSRC used for sending RTCP (such as receiver reports). - uint32_t local_ssrc = 0; - - // Enable feedback for send side bandwidth estimation. - // See - // https://tools.ietf.org/html/draft-holmer-rmcat-transport-wide-cc-extensions - // for details. - bool transport_cc = false; - // See NackConfig for description. NackConfig nack; - - // RTP header extensions used for the received stream. - std::vector extensions; } rtp; Transport* rtcp_send_transport = nullptr; @@ -157,22 +140,26 @@ class AudioReceiveStream { // An optional custom frame decryptor that allows the entire frame to be // decrypted in whatever way the caller choses. This is not required by // default. + // TODO(tommi): Remove this member variable from the struct. It's not + // a part of the AudioReceiveStream state but rather a pass through + // variable. rtc::scoped_refptr frame_decryptor; // An optional frame transformer used by insertable streams to transform // encoded frames. + // TODO(tommi): Remove this member variable from the struct. It's not + // a part of the AudioReceiveStream state but rather a pass through + // variable. rtc::scoped_refptr frame_transformer; }; - // Reconfigure the stream according to the Configuration. - virtual void Reconfigure(const Config& config) = 0; - - // Starts stream activity. - // When a stream is active, it can receive, process and deliver packets. - virtual void Start() = 0; - // Stops stream activity. - // When a stream is stopped, it can't receive, process or deliver packets. - virtual void Stop() = 0; + // Methods that support reconfiguring the stream post initialization. + virtual void SetDecoderMap(std::map decoder_map) = 0; + virtual void SetUseTransportCcAndNackHistory(bool use_transport_cc, + int history_ms) = 0; + // Set/change the rtp header extensions. Must be called on the packet + // delivery thread. + virtual void SetRtpExtensions(std::vector extensions) = 0; // Returns true if the stream has been started. virtual bool IsRunning() const = 0; @@ -202,8 +189,6 @@ class AudioReceiveStream { // Returns current value of base minimum delay in milliseconds. virtual int GetBaseMinimumPlayoutDelayMs() const = 0; - virtual std::vector GetSources() const = 0; - protected: virtual ~AudioReceiveStream() {} }; diff --git a/call/audio_send_stream.cc b/call/audio_send_stream.cc index 9d25b77ba6..916336b929 100644 --- a/call/audio_send_stream.cc +++ b/call/audio_send_stream.cc @@ -12,7 +12,6 @@ #include -#include "rtc_base/string_encode.h" #include "rtc_base/strings/audio_format_to_string.h" #include "rtc_base/strings/string_builder.h" diff --git a/call/audio_send_stream.h b/call/audio_send_stream.h index d21dff4889..e084d4219d 100644 --- a/call/audio_send_stream.h +++ b/call/audio_send_stream.h @@ -70,6 +70,7 @@ class AudioSendStream : public AudioSender { // per-pair the ReportBlockData represents the latest Report Block that was // received for that pair. std::vector report_block_datas; + uint32_t nacks_rcvd = 0; }; struct Config { diff --git a/call/call.cc b/call/call.cc index a9ae07b60a..fb1d7cd3bc 100644 --- a/call/call.cc +++ b/call/call.cc @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -33,6 +34,7 @@ #include "call/receive_time_calculator.h" #include "call/rtp_stream_receiver_controller.h" #include "call/rtp_transport_controller_send.h" +#include "call/rtp_transport_controller_send_factory.h" #include "call/version.h" #include "logging/rtc_event_log/events/rtc_event_audio_receive_stream_config.h" #include "logging/rtc_event_log/events/rtc_event_rtcp_packet_incoming.h" @@ -45,7 +47,7 @@ #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" #include "modules/rtp_rtcp/source/byte_io.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" -#include "modules/rtp_rtcp/source/rtp_utility.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "modules/utility/include/process_thread.h" #include "modules/video_coding/fec_controller_default.h" #include "rtc_base/checks.h" @@ -79,12 +81,10 @@ bool SendPeriodicFeedback(const std::vector& extensions) { return true; } -// TODO(nisse): This really begs for a shared context struct. -bool UseSendSideBwe(const std::vector& extensions, - bool transport_cc) { - if (!transport_cc) +bool UseSendSideBwe(const ReceiveStream::RtpConfig& rtp) { + if (!rtp.transport_cc) return false; - for (const auto& extension : extensions) { + for (const auto& extension : rtp.extensions) { if (extension.uri == RtpExtension::kTransportSequenceNumberUri || extension.uri == RtpExtension::kTransportSequenceNumberV2Uri) return true; @@ -92,18 +92,6 @@ bool UseSendSideBwe(const std::vector& extensions, return false; } -bool UseSendSideBwe(const VideoReceiveStream::Config& config) { - return UseSendSideBwe(config.rtp.extensions, config.rtp.transport_cc); -} - -bool UseSendSideBwe(const AudioReceiveStream::Config& config) { - return UseSendSideBwe(config.rtp.extensions, config.rtp.transport_cc); -} - -bool UseSendSideBwe(const FlexfecReceiveStream::Config& config) { - return UseSendSideBwe(config.rtp_header_extensions, config.transport_cc); -} - const int* FindKeyByValue(const std::map& m, int v) { for (const auto& kv : m) { if (kv.second == v) @@ -156,11 +144,6 @@ std::unique_ptr CreateRtcLogStreamConfig( return rtclog_config; } -bool IsRtcp(const uint8_t* packet, size_t length) { - RtpUtility::RtpHeaderParser rtp_parser(packet, length); - return rtp_parser.RTCP(); -} - TaskQueueBase* GetCurrentTaskQueueOrThread() { TaskQueueBase* current = TaskQueueBase::Current(); if (!current) @@ -168,34 +151,6 @@ TaskQueueBase* GetCurrentTaskQueueOrThread() { return current; } -// Called from the destructor of Call to report the collected send histograms. -void UpdateSendHistograms(Timestamp now, - Timestamp first_sent_packet, - AvgCounter& estimated_send_bitrate_kbps_counter, - AvgCounter& pacer_bitrate_kbps_counter) { - TimeDelta elapsed = now - first_sent_packet; - if (elapsed.seconds() < metrics::kMinRunTimeInSeconds) - return; - - const int kMinRequiredPeriodicSamples = 5; - AggregatedStats send_bitrate_stats = - estimated_send_bitrate_kbps_counter.ProcessAndGetStats(); - if (send_bitrate_stats.num_samples > kMinRequiredPeriodicSamples) { - RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.EstimatedSendBitrateInKbps", - send_bitrate_stats.average); - RTC_LOG(LS_INFO) << "WebRTC.Call.EstimatedSendBitrateInKbps, " - << send_bitrate_stats.ToString(); - } - AggregatedStats pacer_bitrate_stats = - pacer_bitrate_kbps_counter.ProcessAndGetStats(); - if (pacer_bitrate_stats.num_samples > kMinRequiredPeriodicSamples) { - RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.PacerBitrateInKbps", - pacer_bitrate_stats.average); - RTC_LOG(LS_INFO) << "WebRTC.Call.PacerBitrateInKbps, " - << pacer_bitrate_stats.ToString(); - } -} - } // namespace namespace internal { @@ -300,10 +255,6 @@ class Call final : public webrtc::Call, DeliveryStatus DeliverPacket(MediaType media_type, rtc::CopyOnWriteBuffer packet, int64_t packet_time_us) override; - void DeliverPacketAsync(MediaType media_type, - rtc::CopyOnWriteBuffer packet, - int64_t packet_time_us, - PacketCallback callback) override; // Implements RecoveredPacketReceiver. void OnRecoveredPacket(const uint8_t* packet, size_t length) override; @@ -313,6 +264,12 @@ class Call final : public webrtc::Call, void OnAudioTransportOverheadChanged( int transport_overhead_per_packet) override; + void OnLocalSsrcUpdated(webrtc::AudioReceiveStream& stream, + uint32_t local_ssrc) override; + + void OnUpdateSyncGroup(webrtc::AudioReceiveStream& stream, + const std::string& sync_group) override; + void OnSentPacket(const rtc::SentPacket& sent_packet) override; // Implements TargetTransferRateObserver, @@ -325,45 +282,96 @@ class Call final : public webrtc::Call, void SetClientBitratePreferences(const BitrateSettings& preferences) override; private: - DeliveryStatus DeliverRtcp(MediaType media_type, - const uint8_t* packet, - size_t length) - RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_); + // Thread-compatible class that collects received packet stats and exposes + // them as UMA histograms on destruction. + class ReceiveStats { + public: + explicit ReceiveStats(Clock* clock); + ~ReceiveStats(); + + void AddReceivedRtcpBytes(int bytes); + void AddReceivedAudioBytes(int bytes, webrtc::Timestamp arrival_time); + void AddReceivedVideoBytes(int bytes, webrtc::Timestamp arrival_time); + + private: + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_; + RateCounter received_bytes_per_second_counter_ + RTC_GUARDED_BY(sequence_checker_); + RateCounter received_audio_bytes_per_second_counter_ + RTC_GUARDED_BY(sequence_checker_); + RateCounter received_video_bytes_per_second_counter_ + RTC_GUARDED_BY(sequence_checker_); + RateCounter received_rtcp_bytes_per_second_counter_ + RTC_GUARDED_BY(sequence_checker_); + absl::optional first_received_rtp_audio_timestamp_ + RTC_GUARDED_BY(sequence_checker_); + absl::optional last_received_rtp_audio_timestamp_ + RTC_GUARDED_BY(sequence_checker_); + absl::optional first_received_rtp_video_timestamp_ + RTC_GUARDED_BY(sequence_checker_); + absl::optional last_received_rtp_video_timestamp_ + RTC_GUARDED_BY(sequence_checker_); + }; + + // Thread-compatible class that collects sent packet stats and exposes + // them as UMA histograms on destruction, provided SetFirstPacketTime was + // called with a non-empty packet timestamp before the destructor. + class SendStats { + public: + explicit SendStats(Clock* clock); + ~SendStats(); + + void SetFirstPacketTime(absl::optional first_sent_packet_time); + void PauseSendAndPacerBitrateCounters(); + void AddTargetBitrateSample(uint32_t target_bitrate_bps); + void SetMinAllocatableRate(BitrateAllocationLimits limits); + + private: + RTC_NO_UNIQUE_ADDRESS SequenceChecker destructor_sequence_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_; + Clock* const clock_ RTC_GUARDED_BY(destructor_sequence_checker_); + AvgCounter estimated_send_bitrate_kbps_counter_ + RTC_GUARDED_BY(sequence_checker_); + AvgCounter pacer_bitrate_kbps_counter_ RTC_GUARDED_BY(sequence_checker_); + uint32_t min_allocated_send_bitrate_bps_ RTC_GUARDED_BY(sequence_checker_){ + 0}; + absl::optional first_sent_packet_time_ + RTC_GUARDED_BY(destructor_sequence_checker_); + }; + + void DeliverRtcp(MediaType media_type, rtc::CopyOnWriteBuffer packet) + RTC_RUN_ON(network_thread_); DeliveryStatus DeliverRtp(MediaType media_type, rtc::CopyOnWriteBuffer packet, - int64_t packet_time_us) - RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_); - void ConfigureSync(const std::string& sync_group) - RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_); + int64_t packet_time_us) RTC_RUN_ON(worker_thread_); + void ConfigureSync(const std::string& sync_group) RTC_RUN_ON(worker_thread_); void NotifyBweOfReceivedPacket(const RtpPacketReceived& packet, MediaType media_type) - RTC_SHARED_LOCKS_REQUIRED(worker_thread_); + RTC_RUN_ON(worker_thread_); - void UpdateReceiveHistograms(); void UpdateAggregateNetworkState(); // Ensure that necessary process threads are started, and any required // callbacks have been registered. - void EnsureStarted() RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_); - - rtc::TaskQueue* send_transport_queue() const { - return transport_send_ptr_->GetWorkerQueue(); - } + void EnsureStarted() RTC_RUN_ON(worker_thread_); Clock* const clock_; TaskQueueFactory* const task_queue_factory_; TaskQueueBase* const worker_thread_; TaskQueueBase* const network_thread_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker send_transport_sequence_checker_; const int num_cpu_cores_; const rtc::scoped_refptr module_process_thread_; const std::unique_ptr call_stats_; const std::unique_ptr bitrate_allocator_; - Call::Config config_; + const Call::Config config_ RTC_GUARDED_BY(worker_thread_); + // Maps to config_.trials, can be used from any thread via `trials()`. + const WebRtcKeyValueConfig& trials_; - NetworkState audio_network_state_; - NetworkState video_network_state_; + NetworkState audio_network_state_ RTC_GUARDED_BY(worker_thread_); + NetworkState video_network_state_ RTC_GUARDED_BY(worker_thread_); // TODO(bugs.webrtc.org/11993): Move aggregate_network_up_ over to the // network thread. bool aggregate_network_up_ RTC_GUARDED_BY(worker_thread_); @@ -381,39 +389,17 @@ class Call final : public webrtc::Call, // TODO(nisse): Should eventually be injected at creation, // with a single object in the bundled case. - RtpStreamReceiverController audio_receiver_controller_; - RtpStreamReceiverController video_receiver_controller_; + RtpStreamReceiverController audio_receiver_controller_ + RTC_GUARDED_BY(worker_thread_); + RtpStreamReceiverController video_receiver_controller_ + RTC_GUARDED_BY(worker_thread_); // This extra map is used for receive processing which is // independent of media type. - // TODO(nisse): In the RTP transport refactoring, we should have a - // single mapping from ssrc to a more abstract receive stream, with - // accessor methods for all configuration we need at this level. - struct ReceiveRtpConfig { - explicit ReceiveRtpConfig(const webrtc::AudioReceiveStream::Config& config) - : extensions(config.rtp.extensions), - use_send_side_bwe(UseSendSideBwe(config)) {} - explicit ReceiveRtpConfig(const webrtc::VideoReceiveStream::Config& config) - : extensions(config.rtp.extensions), - use_send_side_bwe(UseSendSideBwe(config)) {} - explicit ReceiveRtpConfig(const FlexfecReceiveStream::Config& config) - : extensions(config.rtp_header_extensions), - use_send_side_bwe(UseSendSideBwe(config)) {} - - // Registered RTP header extensions for each stream. Note that RTP header - // extensions are negotiated per track ("m= line") in the SDP, but we have - // no notion of tracks at the Call level. We therefore store the RTP header - // extensions per SSRC instead, which leads to some storage overhead. - const RtpHeaderExtensionMap extensions; - // Set if both RTP extension the RTCP feedback message needed for - // send side BWE are negotiated. - const bool use_send_side_bwe; - }; - // TODO(bugs.webrtc.org/11993): Move receive_rtp_config_ over to the // network thread. - std::map receive_rtp_config_ + std::map receive_rtp_config_ RTC_GUARDED_BY(worker_thread_); // Audio and Video send streams are owned by the client that creates them. @@ -422,6 +408,10 @@ class Call final : public webrtc::Call, std::map video_send_ssrcs_ RTC_GUARDED_BY(worker_thread_); std::set video_send_streams_ RTC_GUARDED_BY(worker_thread_); + // True if |video_send_streams_| is empty, false if not. The atomic variable + // is used to decide UMA send statistics behavior and enables avoiding a + // PostTask(). + std::atomic video_send_streams_empty_{true}; // Each forwarder wraps an adaptation resource that was added to the call. std::vector> @@ -435,49 +425,41 @@ class Call final : public webrtc::Call, RtpPayloadStateMap suspended_video_payload_states_ RTC_GUARDED_BY(worker_thread_); - webrtc::RtcEventLog* event_log_; - - // The following members are only accessed (exclusively) from one thread and - // from the destructor, and therefore doesn't need any explicit - // synchronization. - RateCounter received_bytes_per_second_counter_; - RateCounter received_audio_bytes_per_second_counter_; - RateCounter received_video_bytes_per_second_counter_; - RateCounter received_rtcp_bytes_per_second_counter_; - absl::optional first_received_rtp_audio_ms_; - absl::optional last_received_rtp_audio_ms_; - absl::optional first_received_rtp_video_ms_; - absl::optional last_received_rtp_video_ms_; - - uint32_t last_bandwidth_bps_ RTC_GUARDED_BY(worker_thread_); - // TODO(holmer): Remove this lock once BitrateController no longer calls - // OnNetworkChanged from multiple threads. - uint32_t min_allocated_send_bitrate_bps_ RTC_GUARDED_BY(worker_thread_); - uint32_t configured_max_padding_bitrate_bps_ RTC_GUARDED_BY(worker_thread_); - AvgCounter estimated_send_bitrate_kbps_counter_ - RTC_GUARDED_BY(worker_thread_); - AvgCounter pacer_bitrate_kbps_counter_ RTC_GUARDED_BY(worker_thread_); + webrtc::RtcEventLog* const event_log_; + + // TODO(bugs.webrtc.org/11993) ready to move stats access to the network + // thread. + ReceiveStats receive_stats_ RTC_GUARDED_BY(worker_thread_); + SendStats send_stats_ RTC_GUARDED_BY(send_transport_sequence_checker_); + // |last_bandwidth_bps_| and |configured_max_padding_bitrate_bps_| being + // atomic avoids a PostTask. The variables are used for stats gathering. + std::atomic last_bandwidth_bps_{0}; + std::atomic configured_max_padding_bitrate_bps_{0}; ReceiveSideCongestionController receive_side_cc_; const std::unique_ptr receive_time_calculator_; const std::unique_ptr video_send_delay_stats_; - const int64_t start_ms_; + const Timestamp start_of_call_; // Note that |task_safety_| needs to be at a greater scope than the task queue // owned by |transport_send_| since calls might arrive on the network thread // while Call is being deleted and the task queue is being torn down. - ScopedTaskSafety task_safety_; + const ScopedTaskSafety task_safety_; // Caches transport_send_.get(), to avoid racing with destructor. // Note that this is declared before transport_send_ to ensure that it is not // invalidated until no more tasks can be running on the transport_send_ task // queue. - RtpTransportControllerSendInterface* const transport_send_ptr_; + // For more details on the background of this member variable, see: + // https://webrtc-review.googlesource.com/c/src/+/63023/9/call/call.cc + // https://bugs.chromium.org/p/chromium/issues/detail?id=992640 + RtpTransportControllerSendInterface* const transport_send_ptr_ + RTC_GUARDED_BY(send_transport_sequence_checker_); // Declared last since it will issue callbacks from a task queue. Declaring it // last ensures that it is destroyed first and any running tasks are finished. - std::unique_ptr transport_send_; + const std::unique_ptr transport_send_; bool is_started_ RTC_GUARDED_BY(worker_thread_) = false; @@ -502,11 +484,6 @@ Call* Call::Create(const Call::Config& config) { rtc::scoped_refptr call_thread = SharedModuleThread::Create(ProcessThread::Create("ModuleProcessThread"), nullptr); - return Create(config, std::move(call_thread)); -} - -Call* Call::Create(const Call::Config& config, - rtc::scoped_refptr call_thread) { return Create(config, Clock::GetRealTimeClock(), std::move(call_thread), ProcessThread::Create("PacerThread")); } @@ -516,15 +493,28 @@ Call* Call::Create(const Call::Config& config, rtc::scoped_refptr call_thread, std::unique_ptr pacer_thread) { RTC_DCHECK(config.task_queue_factory); + + RtpTransportControllerSendFactory transport_controller_factory_; + + RtpTransportConfig transportConfig = config.ExtractTransportConfig(); + return new internal::Call( clock, config, - std::make_unique( - clock, config.event_log, config.network_state_predictor_factory, - config.network_controller_factory, config.bitrate_config, - std::move(pacer_thread), config.task_queue_factory, config.trials), + transport_controller_factory_.Create(transportConfig, clock, + std::move(pacer_thread)), std::move(call_thread), config.task_queue_factory); } +Call* Call::Create(const Call::Config& config, + Clock* clock, + rtc::scoped_refptr call_thread, + std::unique_ptr + transportControllerSend) { + RTC_DCHECK(config.task_queue_factory); + return new internal::Call(clock, config, std::move(transportControllerSend), + std::move(call_thread), config.task_queue_factory); +} + class SharedModuleThread::Impl { public: Impl(std::unique_ptr process_thread, @@ -629,6 +619,157 @@ VideoSendStream* Call::CreateVideoSendStream( namespace internal { +Call::ReceiveStats::ReceiveStats(Clock* clock) + : received_bytes_per_second_counter_(clock, nullptr, false), + received_audio_bytes_per_second_counter_(clock, nullptr, false), + received_video_bytes_per_second_counter_(clock, nullptr, false), + received_rtcp_bytes_per_second_counter_(clock, nullptr, false) { + sequence_checker_.Detach(); +} + +void Call::ReceiveStats::AddReceivedRtcpBytes(int bytes) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + if (received_bytes_per_second_counter_.HasSample()) { + // First RTP packet has been received. + received_bytes_per_second_counter_.Add(static_cast(bytes)); + received_rtcp_bytes_per_second_counter_.Add(static_cast(bytes)); + } +} + +void Call::ReceiveStats::AddReceivedAudioBytes(int bytes, + webrtc::Timestamp arrival_time) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + received_bytes_per_second_counter_.Add(bytes); + received_audio_bytes_per_second_counter_.Add(bytes); + if (!first_received_rtp_audio_timestamp_) + first_received_rtp_audio_timestamp_ = arrival_time; + last_received_rtp_audio_timestamp_ = arrival_time; +} + +void Call::ReceiveStats::AddReceivedVideoBytes(int bytes, + webrtc::Timestamp arrival_time) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + received_bytes_per_second_counter_.Add(bytes); + received_video_bytes_per_second_counter_.Add(bytes); + if (!first_received_rtp_video_timestamp_) + first_received_rtp_video_timestamp_ = arrival_time; + last_received_rtp_video_timestamp_ = arrival_time; +} + +Call::ReceiveStats::~ReceiveStats() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + if (first_received_rtp_audio_timestamp_) { + RTC_HISTOGRAM_COUNTS_100000( + "WebRTC.Call.TimeReceivingAudioRtpPacketsInSeconds", + (*last_received_rtp_audio_timestamp_ - + *first_received_rtp_audio_timestamp_) + .seconds()); + } + if (first_received_rtp_video_timestamp_) { + RTC_HISTOGRAM_COUNTS_100000( + "WebRTC.Call.TimeReceivingVideoRtpPacketsInSeconds", + (*last_received_rtp_video_timestamp_ - + *first_received_rtp_video_timestamp_) + .seconds()); + } + const int kMinRequiredPeriodicSamples = 5; + AggregatedStats video_bytes_per_sec = + received_video_bytes_per_second_counter_.GetStats(); + if (video_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) { + RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.VideoBitrateReceivedInKbps", + video_bytes_per_sec.average * 8 / 1000); + RTC_LOG(LS_INFO) << "WebRTC.Call.VideoBitrateReceivedInBps, " + << video_bytes_per_sec.ToStringWithMultiplier(8); + } + AggregatedStats audio_bytes_per_sec = + received_audio_bytes_per_second_counter_.GetStats(); + if (audio_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) { + RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.AudioBitrateReceivedInKbps", + audio_bytes_per_sec.average * 8 / 1000); + RTC_LOG(LS_INFO) << "WebRTC.Call.AudioBitrateReceivedInBps, " + << audio_bytes_per_sec.ToStringWithMultiplier(8); + } + AggregatedStats rtcp_bytes_per_sec = + received_rtcp_bytes_per_second_counter_.GetStats(); + if (rtcp_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) { + RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.RtcpBitrateReceivedInBps", + rtcp_bytes_per_sec.average * 8); + RTC_LOG(LS_INFO) << "WebRTC.Call.RtcpBitrateReceivedInBps, " + << rtcp_bytes_per_sec.ToStringWithMultiplier(8); + } + AggregatedStats recv_bytes_per_sec = + received_bytes_per_second_counter_.GetStats(); + if (recv_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) { + RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.BitrateReceivedInKbps", + recv_bytes_per_sec.average * 8 / 1000); + RTC_LOG(LS_INFO) << "WebRTC.Call.BitrateReceivedInBps, " + << recv_bytes_per_sec.ToStringWithMultiplier(8); + } +} + +Call::SendStats::SendStats(Clock* clock) + : clock_(clock), + estimated_send_bitrate_kbps_counter_(clock, nullptr, true), + pacer_bitrate_kbps_counter_(clock, nullptr, true) { + destructor_sequence_checker_.Detach(); + sequence_checker_.Detach(); +} + +Call::SendStats::~SendStats() { + RTC_DCHECK_RUN_ON(&destructor_sequence_checker_); + if (!first_sent_packet_time_) + return; + + TimeDelta elapsed = clock_->CurrentTime() - *first_sent_packet_time_; + if (elapsed.seconds() < metrics::kMinRunTimeInSeconds) + return; + + const int kMinRequiredPeriodicSamples = 5; + AggregatedStats send_bitrate_stats = + estimated_send_bitrate_kbps_counter_.ProcessAndGetStats(); + if (send_bitrate_stats.num_samples > kMinRequiredPeriodicSamples) { + RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.EstimatedSendBitrateInKbps", + send_bitrate_stats.average); + RTC_LOG(LS_INFO) << "WebRTC.Call.EstimatedSendBitrateInKbps, " + << send_bitrate_stats.ToString(); + } + AggregatedStats pacer_bitrate_stats = + pacer_bitrate_kbps_counter_.ProcessAndGetStats(); + if (pacer_bitrate_stats.num_samples > kMinRequiredPeriodicSamples) { + RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.PacerBitrateInKbps", + pacer_bitrate_stats.average); + RTC_LOG(LS_INFO) << "WebRTC.Call.PacerBitrateInKbps, " + << pacer_bitrate_stats.ToString(); + } +} + +void Call::SendStats::SetFirstPacketTime( + absl::optional first_sent_packet_time) { + RTC_DCHECK_RUN_ON(&destructor_sequence_checker_); + first_sent_packet_time_ = first_sent_packet_time; +} + +void Call::SendStats::PauseSendAndPacerBitrateCounters() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + estimated_send_bitrate_kbps_counter_.ProcessAndPause(); + pacer_bitrate_kbps_counter_.ProcessAndPause(); +} + +void Call::SendStats::AddTargetBitrateSample(uint32_t target_bitrate_bps) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + estimated_send_bitrate_kbps_counter_.Add(target_bitrate_bps / 1000); + // Pacer bitrate may be higher than bitrate estimate if enforcing min + // bitrate. + uint32_t pacer_bitrate_bps = + std::max(target_bitrate_bps, min_allocated_send_bitrate_bps_); + pacer_bitrate_kbps_counter_.Add(pacer_bitrate_bps / 1000); +} + +void Call::SendStats::SetMinAllocatableRate(BitrateAllocationLimits limits) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + min_allocated_send_bitrate_bps_ = limits.min_allocatable_rate.bps(); +} + Call::Call(Clock* clock, const Call::Config& config, std::unique_ptr transport_send, @@ -646,19 +787,13 @@ Call::Call(Clock* clock, call_stats_(new CallStats(clock_, worker_thread_)), bitrate_allocator_(new BitrateAllocator(this)), config_(config), + trials_(*config.trials), audio_network_state_(kNetworkDown), video_network_state_(kNetworkDown), aggregate_network_up_(false), event_log_(config.event_log), - received_bytes_per_second_counter_(clock_, nullptr, true), - received_audio_bytes_per_second_counter_(clock_, nullptr, true), - received_video_bytes_per_second_counter_(clock_, nullptr, true), - received_rtcp_bytes_per_second_counter_(clock_, nullptr, true), - last_bandwidth_bps_(0), - min_allocated_send_bitrate_bps_(0), - configured_max_padding_bitrate_bps_(0), - estimated_send_bitrate_kbps_counter_(clock_, nullptr, true), - pacer_bitrate_kbps_counter_(clock_, nullptr, true), + receive_stats_(clock_), + send_stats_(clock_), receive_side_cc_(clock, absl::bind_front(&PacketRouter::SendCombinedRtcpPacket, transport_send->packet_router()), @@ -667,7 +802,7 @@ Call::Call(Clock* clock, /*network_state_estimator=*/nullptr), receive_time_calculator_(ReceiveTimeCalculator::CreateFromFieldTrial()), video_send_delay_stats_(new SendDelayStats(clock_)), - start_ms_(clock_->TimeInMilliseconds()), + start_of_call_(clock_->CurrentTime()), transport_send_ptr_(transport_send.get()), transport_send_(std::move(transport_send)) { RTC_DCHECK(config.event_log != nullptr); @@ -675,6 +810,8 @@ Call::Call(Clock* clock, RTC_DCHECK(network_thread_); RTC_DCHECK(worker_thread_->IsCurrent()); + send_transport_sequence_checker_.Detach(); + // Do not remove this call; it is here to convince the compiler that the // WebRTC source timestamp string needs to be in the final binary. LoadWebRTCVersionInRegister(); @@ -700,24 +837,11 @@ Call::~Call() { receive_side_cc_.GetRemoteBitrateEstimator(true)); module_process_thread_->process_thread()->DeRegisterModule(&receive_side_cc_); call_stats_->DeregisterStatsObserver(&receive_side_cc_); + send_stats_.SetFirstPacketTime(transport_send_->GetFirstPacketTime()); - absl::optional first_sent_packet_time = - transport_send_->GetFirstPacketTime(); - - Timestamp now = clock_->CurrentTime(); - - // Only update histograms after process threads have been shut down, so that - // they won't try to concurrently update stats. - if (first_sent_packet_time) { - UpdateSendHistograms(now, *first_sent_packet_time, - estimated_send_bitrate_kbps_counter_, - pacer_bitrate_kbps_counter_); - } - - UpdateReceiveHistograms(); - - RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.LifetimeInSeconds", - (now.ms() - start_ms_) / 1000); + RTC_HISTOGRAM_COUNTS_100000( + "WebRTC.Call.LifetimeInSeconds", + (clock_->CurrentTime() - start_of_call_).seconds()); } void Call::EnsureStarted() { @@ -730,10 +854,10 @@ void Call::EnsureStarted() { // This call seems to kick off a number of things, so probably better left // off being kicked off on request rather than in the ctor. - transport_send_ptr_->RegisterTargetTransferRateObserver(this); + transport_send_->RegisterTargetTransferRateObserver(this); module_process_thread_->EnsureStarted(); - transport_send_ptr_->EnsureStarted(); + transport_send_->EnsureStarted(); } void Call::SetClientBitratePreferences(const BitrateSettings& preferences) { @@ -741,52 +865,6 @@ void Call::SetClientBitratePreferences(const BitrateSettings& preferences) { GetTransportControllerSend()->SetClientBitratePreferences(preferences); } -void Call::UpdateReceiveHistograms() { - if (first_received_rtp_audio_ms_) { - RTC_HISTOGRAM_COUNTS_100000( - "WebRTC.Call.TimeReceivingAudioRtpPacketsInSeconds", - (*last_received_rtp_audio_ms_ - *first_received_rtp_audio_ms_) / 1000); - } - if (first_received_rtp_video_ms_) { - RTC_HISTOGRAM_COUNTS_100000( - "WebRTC.Call.TimeReceivingVideoRtpPacketsInSeconds", - (*last_received_rtp_video_ms_ - *first_received_rtp_video_ms_) / 1000); - } - const int kMinRequiredPeriodicSamples = 5; - AggregatedStats video_bytes_per_sec = - received_video_bytes_per_second_counter_.GetStats(); - if (video_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) { - RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.VideoBitrateReceivedInKbps", - video_bytes_per_sec.average * 8 / 1000); - RTC_LOG(LS_INFO) << "WebRTC.Call.VideoBitrateReceivedInBps, " - << video_bytes_per_sec.ToStringWithMultiplier(8); - } - AggregatedStats audio_bytes_per_sec = - received_audio_bytes_per_second_counter_.GetStats(); - if (audio_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) { - RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.AudioBitrateReceivedInKbps", - audio_bytes_per_sec.average * 8 / 1000); - RTC_LOG(LS_INFO) << "WebRTC.Call.AudioBitrateReceivedInBps, " - << audio_bytes_per_sec.ToStringWithMultiplier(8); - } - AggregatedStats rtcp_bytes_per_sec = - received_rtcp_bytes_per_second_counter_.GetStats(); - if (rtcp_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) { - RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.RtcpBitrateReceivedInBps", - rtcp_bytes_per_sec.average * 8); - RTC_LOG(LS_INFO) << "WebRTC.Call.RtcpBitrateReceivedInBps, " - << rtcp_bytes_per_sec.ToStringWithMultiplier(8); - } - AggregatedStats recv_bytes_per_sec = - received_bytes_per_second_counter_.GetStats(); - if (recv_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) { - RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.BitrateReceivedInKbps", - recv_bytes_per_sec.average * 8 / 1000); - RTC_LOG(LS_INFO) << "WebRTC.Call.BitrateReceivedInBps, " - << recv_bytes_per_sec.ToStringWithMultiplier(8); - } -} - PacketReceiver* Call::Receiver() { return this; } @@ -810,9 +888,8 @@ webrtc::AudioSendStream* Call::CreateAudioSendStream( AudioSendStream* send_stream = new AudioSendStream( clock_, config, config_.audio_state, task_queue_factory_, - module_process_thread_->process_thread(), transport_send_ptr_, - bitrate_allocator_.get(), event_log_, call_stats_->AsRtcpRttStats(), - suspended_rtp_state); + transport_send_.get(), bitrate_allocator_.get(), event_log_, + call_stats_->AsRtcpRttStats(), suspended_rtp_state); RTC_DCHECK(audio_send_ssrcs_.find(config.rtp.ssrc) == audio_send_ssrcs_.end()); audio_send_ssrcs_[config.rtp.ssrc] = send_stream; @@ -820,7 +897,7 @@ webrtc::AudioSendStream* Call::CreateAudioSendStream( // TODO(bugs.webrtc.org/11993): call AssociateSendStream and // UpdateAggregateNetworkState asynchronously on the network thread. for (AudioReceiveStream* stream : audio_receive_streams_) { - if (stream->config().rtp.local_ssrc == config.rtp.ssrc) { + if (stream->local_ssrc() == config.rtp.ssrc) { stream->AssociateSendStream(send_stream); } } @@ -848,7 +925,7 @@ void Call::DestroyAudioSendStream(webrtc::AudioSendStream* send_stream) { // TODO(bugs.webrtc.org/11993): call AssociateSendStream and // UpdateAggregateNetworkState asynchronously on the network thread. for (AudioReceiveStream* stream : audio_receive_streams_) { - if (stream->config().rtp.local_ssrc == ssrc) { + if (stream->local_ssrc() == ssrc) { stream->AssociateSendStream(nullptr); } } @@ -866,20 +943,20 @@ webrtc::AudioReceiveStream* Call::CreateAudioReceiveStream( event_log_->Log(std::make_unique( CreateRtcLogStreamConfig(config))); - // TODO(bugs.webrtc.org/11993): Move the registration between |receive_stream| - // and |audio_receiver_controller_| out of AudioReceiveStream construction and - // set it up asynchronously on the network thread (the registration and - // |audio_receiver_controller_| need to live on the network thread). AudioReceiveStream* receive_stream = new AudioReceiveStream( - clock_, &audio_receiver_controller_, transport_send_ptr_->packet_router(), - module_process_thread_->process_thread(), config_.neteq_factory, config, + clock_, transport_send_->packet_router(), config_.neteq_factory, config, config_.audio_state, event_log_); + audio_receive_streams_.insert(receive_stream); + + // TODO(bugs.webrtc.org/11993): Make the registration on the network thread + // (asynchronously). The registration and `audio_receiver_controller_` need + // to live on the network thread. + receive_stream->RegisterWithTransport(&audio_receiver_controller_); // TODO(bugs.webrtc.org/11993): Update the below on the network thread. // We could possibly set up the audio_receiver_controller_ association up // as part of the async setup. - receive_rtp_config_.emplace(config.rtp.remote_ssrc, ReceiveRtpConfig(config)); - audio_receive_streams_.insert(receive_stream); + receive_rtp_config_.emplace(config.rtp.remote_ssrc, receive_stream); ConfigureSync(config.sync_group); @@ -900,20 +977,22 @@ void Call::DestroyAudioReceiveStream( webrtc::internal::AudioReceiveStream* audio_receive_stream = static_cast(receive_stream); + // TODO(bugs.webrtc.org/11993): Access the map, rtp config, call ConfigureSync + // and UpdateAggregateNetworkState on the network thread. The call to + // `UnregisterFromTransport` should also happen on the network thread. + audio_receive_stream->UnregisterFromTransport(); + + uint32_t ssrc = audio_receive_stream->remote_ssrc(); const AudioReceiveStream::Config& config = audio_receive_stream->config(); - uint32_t ssrc = config.rtp.remote_ssrc; - receive_side_cc_.GetRemoteBitrateEstimator(UseSendSideBwe(config)) + receive_side_cc_.GetRemoteBitrateEstimator(UseSendSideBwe(config.rtp)) ->RemoveStream(ssrc); - // TODO(bugs.webrtc.org/11993): Access the map, rtp config, call ConfigureSync - // and UpdateAggregateNetworkState on the network thread. audio_receive_streams_.erase(audio_receive_stream); - const std::string& sync_group = audio_receive_stream->config().sync_group; - const auto it = sync_stream_mapping_.find(sync_group); + const auto it = sync_stream_mapping_.find(config.sync_group); if (it != sync_stream_mapping_.end() && it->second == audio_receive_stream) { sync_stream_mapping_.erase(it); - ConfigureSync(sync_group); + ConfigureSync(config.sync_group); } receive_rtp_config_.erase(ssrc); @@ -947,8 +1026,8 @@ webrtc::VideoSendStream* Call::CreateVideoSendStream( std::vector ssrcs = config.rtp.ssrcs; VideoSendStream* send_stream = new VideoSendStream( - clock_, num_cpu_cores_, module_process_thread_->process_thread(), - task_queue_factory_, call_stats_->AsRtcpRttStats(), transport_send_ptr_, + clock_, num_cpu_cores_, task_queue_factory_, + call_stats_->AsRtcpRttStats(), transport_send_.get(), bitrate_allocator_.get(), video_send_delay_stats_.get(), event_log_, std::move(config), std::move(encoder_config), suspended_video_send_ssrcs_, suspended_video_payload_states_, std::move(fec_controller)); @@ -958,6 +1037,8 @@ webrtc::VideoSendStream* Call::CreateVideoSendStream( video_send_ssrcs_[ssrc] = send_stream; } video_send_streams_.insert(send_stream); + video_send_streams_empty_.store(false, std::memory_order_relaxed); + // Forward resources that were previously added to the call to the new stream. for (const auto& resource_forwarder : adaptation_resource_forwarders_) { resource_forwarder->OnCreateVideoSendStream(send_stream); @@ -971,6 +1052,7 @@ webrtc::VideoSendStream* Call::CreateVideoSendStream( webrtc::VideoSendStream* Call::CreateVideoSendStream( webrtc::VideoSendStream::Config config, VideoEncoderConfig encoder_config) { + RTC_DCHECK_RUN_ON(worker_thread_); if (config_.fec_controller_factory) { RTC_LOG(LS_INFO) << "External FEC Controller will be used."; } @@ -987,9 +1069,12 @@ void Call::DestroyVideoSendStream(webrtc::VideoSendStream* send_stream) { RTC_DCHECK(send_stream != nullptr); RTC_DCHECK_RUN_ON(worker_thread_); - send_stream->Stop(); - - VideoSendStream* send_stream_impl = nullptr; + VideoSendStream* send_stream_impl = + static_cast(send_stream); + VideoSendStream::RtpStateMap rtp_states; + VideoSendStream::RtpPayloadStateMap rtp_payload_states; + send_stream_impl->StopPermanentlyAndGetRtpStates(&rtp_states, + &rtp_payload_states); auto it = video_send_ssrcs_.begin(); while (it != video_send_ssrcs_.end()) { @@ -1000,18 +1085,15 @@ void Call::DestroyVideoSendStream(webrtc::VideoSendStream* send_stream) { ++it; } } + // Stop forwarding resources to the stream being destroyed. for (const auto& resource_forwarder : adaptation_resource_forwarders_) { resource_forwarder->OnDestroyVideoSendStream(send_stream_impl); } video_send_streams_.erase(send_stream_impl); + if (video_send_streams_.empty()) + video_send_streams_empty_.store(true, std::memory_order_relaxed); - RTC_CHECK(send_stream_impl != nullptr); - - VideoSendStream::RtpStateMap rtp_states; - VideoSendStream::RtpPayloadStateMap rtp_payload_states; - send_stream_impl->StopPermanentlyAndGetRtpStates(&rtp_states, - &rtp_payload_states); for (const auto& kv : rtp_states) { suspended_video_send_ssrcs_[kv.first] = kv.second; } @@ -1020,6 +1102,8 @@ void Call::DestroyVideoSendStream(webrtc::VideoSendStream* send_stream) { } UpdateAggregateNetworkState(); + // TODO(tommi): consider deleting on the same thread as runs + // StopPermanentlyAndGetRtpStates. delete send_stream_impl; } @@ -1038,10 +1122,12 @@ webrtc::VideoReceiveStream* Call::CreateVideoReceiveStream( // and set it up asynchronously on the network thread (the registration and // |video_receiver_controller_| need to live on the network thread). VideoReceiveStream2* receive_stream = new VideoReceiveStream2( - task_queue_factory_, worker_thread_, &video_receiver_controller_, - num_cpu_cores_, transport_send_ptr_->packet_router(), - std::move(configuration), module_process_thread_->process_thread(), + task_queue_factory_, this, num_cpu_cores_, + transport_send_->packet_router(), std::move(configuration), call_stats_.get(), clock_, new VCMTiming(clock_)); + // TODO(bugs.webrtc.org/11993): Set this up asynchronously on the network + // thread. + receive_stream->RegisterWithTransport(&video_receiver_controller_); const webrtc::VideoReceiveStream::Config& config = receive_stream->config(); if (config.rtp.rtx_ssrc) { @@ -1049,9 +1135,9 @@ webrtc::VideoReceiveStream* Call::CreateVideoReceiveStream( // stream. Since the transport_send_cc negotiation is per payload // type, we may get an incorrect value for the rtx stream, but // that is unlikely to matter in practice. - receive_rtp_config_.emplace(config.rtp.rtx_ssrc, ReceiveRtpConfig(config)); + receive_rtp_config_.emplace(config.rtp.rtx_ssrc, receive_stream); } - receive_rtp_config_.emplace(config.rtp.remote_ssrc, ReceiveRtpConfig(config)); + receive_rtp_config_.emplace(config.rtp.remote_ssrc, receive_stream); video_receive_streams_.insert(receive_stream); ConfigureSync(config.sync_group); @@ -1069,6 +1155,9 @@ void Call::DestroyVideoReceiveStream( RTC_DCHECK(receive_stream != nullptr); VideoReceiveStream2* receive_stream_impl = static_cast(receive_stream); + // TODO(bugs.webrtc.org/11993): Unregister on the network thread. + receive_stream_impl->UnregisterFromTransport(); + const VideoReceiveStream::Config& config = receive_stream_impl->config(); // Remove all ssrcs pointing to a receive stream. As RTX retransmits on a @@ -1080,7 +1169,7 @@ void Call::DestroyVideoReceiveStream( video_receive_streams_.erase(receive_stream_impl); ConfigureSync(config.sync_group); - receive_side_cc_.GetRemoteBitrateEstimator(UseSendSideBwe(config)) + receive_side_cc_.GetRemoteBitrateEstimator(UseSendSideBwe(config.rtp)) ->RemoveStream(config.rtp.remote_ssrc); UpdateAggregateNetworkState(); @@ -1103,12 +1192,15 @@ FlexfecReceiveStream* Call::CreateFlexfecReceiveStream( // OnRtpPacket until the constructor is finished and the object is // in a valid state, since OnRtpPacket runs on the same thread. receive_stream = new FlexfecReceiveStreamImpl( - clock_, &video_receiver_controller_, config, recovered_packet_receiver, - call_stats_->AsRtcpRttStats(), module_process_thread_->process_thread()); + clock_, config, recovered_packet_receiver, call_stats_->AsRtcpRttStats()); - RTC_DCHECK(receive_rtp_config_.find(config.remote_ssrc) == + // TODO(bugs.webrtc.org/11993): Set this up asynchronously on the network + // thread. + receive_stream->RegisterWithTransport(&video_receiver_controller_); + + RTC_DCHECK(receive_rtp_config_.find(config.rtp.remote_ssrc) == receive_rtp_config_.end()); - receive_rtp_config_.emplace(config.remote_ssrc, ReceiveRtpConfig(config)); + receive_rtp_config_.emplace(config.rtp.remote_ssrc, receive_stream); // TODO(brandtr): Store config in RtcEventLog here. @@ -1119,15 +1211,19 @@ void Call::DestroyFlexfecReceiveStream(FlexfecReceiveStream* receive_stream) { TRACE_EVENT0("webrtc", "Call::DestroyFlexfecReceiveStream"); RTC_DCHECK_RUN_ON(worker_thread_); + FlexfecReceiveStreamImpl* receive_stream_impl = + static_cast(receive_stream); + // TODO(bugs.webrtc.org/11993): Unregister on the network thread. + receive_stream_impl->UnregisterFromTransport(); + RTC_DCHECK(receive_stream != nullptr); - const FlexfecReceiveStream::Config& config = receive_stream->GetConfig(); - uint32_t ssrc = config.remote_ssrc; - receive_rtp_config_.erase(ssrc); + const FlexfecReceiveStream::RtpConfig& rtp = receive_stream->rtp_config(); + receive_rtp_config_.erase(rtp.remote_ssrc); // Remove all SSRCs pointing to the FlexfecReceiveStreamImpl to be // destroyed. - receive_side_cc_.GetRemoteBitrateEstimator(UseSendSideBwe(config)) - ->RemoveStream(ssrc); + receive_side_cc_.GetRemoteBitrateEstimator(UseSendSideBwe(rtp)) + ->RemoveStream(rtp.remote_ssrc); delete receive_stream; } @@ -1143,7 +1239,7 @@ void Call::AddAdaptationResource(rtc::scoped_refptr resource) { } RtpTransportControllerSendInterface* Call::GetTransportControllerSend() { - return transport_send_ptr_; + return transport_send_.get(); } Call::Stats Call::GetStats() const { @@ -1153,7 +1249,7 @@ Call::Stats Call::GetStats() const { // TODO(srte): It is unclear if we only want to report queues if network is // available. stats.pacer_delay_ms = - aggregate_network_up_ ? transport_send_ptr_->GetPacerQueuingDelayMs() : 0; + aggregate_network_up_ ? transport_send_->GetPacerQueuingDelayMs() : 0; stats.rtt_ms = call_stats_->LastProcessedRtt(); @@ -1163,14 +1259,16 @@ Call::Stats Call::GetStats() const { receive_side_cc_.GetRemoteBitrateEstimator(false)->LatestEstimate( &ssrcs, &recv_bandwidth); stats.recv_bandwidth_bps = recv_bandwidth; - stats.send_bandwidth_bps = last_bandwidth_bps_; - stats.max_padding_bitrate_bps = configured_max_padding_bitrate_bps_; + stats.send_bandwidth_bps = + last_bandwidth_bps_.load(std::memory_order_relaxed); + stats.max_padding_bitrate_bps = + configured_max_padding_bitrate_bps_.load(std::memory_order_relaxed); return stats; } const WebRtcKeyValueConfig& Call::trials() const { - return *config_.trials; + return trials_; } TaskQueueBase* Call::network_thread() const { @@ -1252,7 +1350,28 @@ void Call::UpdateAggregateNetworkState() { } aggregate_network_up_ = aggregate_network_up; - transport_send_ptr_->OnNetworkAvailability(aggregate_network_up); + transport_send_->OnNetworkAvailability(aggregate_network_up); +} + +void Call::OnLocalSsrcUpdated(webrtc::AudioReceiveStream& stream, + uint32_t local_ssrc) { + RTC_DCHECK_RUN_ON(worker_thread_); + webrtc::internal::AudioReceiveStream& receive_stream = + static_cast(stream); + + receive_stream.SetLocalSsrc(local_ssrc); + auto it = audio_send_ssrcs_.find(local_ssrc); + receive_stream.AssociateSendStream(it != audio_send_ssrcs_.end() ? it->second + : nullptr); +} + +void Call::OnUpdateSyncGroup(webrtc::AudioReceiveStream& stream, + const std::string& sync_group) { + RTC_DCHECK_RUN_ON(worker_thread_); + webrtc::internal::AudioReceiveStream& receive_stream = + static_cast(stream); + receive_stream.SetSyncGroup(sync_group); + ConfigureSync(sync_group); } void Call::OnSentPacket(const rtc::SentPacket& sent_packet) { @@ -1264,56 +1383,47 @@ void Call::OnSentPacket(const rtc::SentPacket& sent_packet) { // implementations that either just do a PostTask or use locking. video_send_delay_stats_->OnSentPacket(sent_packet.packet_id, clock_->TimeInMilliseconds()); - transport_send_ptr_->OnSentPacket(sent_packet); + transport_send_->OnSentPacket(sent_packet); } void Call::OnStartRateUpdate(DataRate start_rate) { - RTC_DCHECK_RUN_ON(send_transport_queue()); + RTC_DCHECK_RUN_ON(&send_transport_sequence_checker_); bitrate_allocator_->UpdateStartRate(start_rate.bps()); } void Call::OnTargetTransferRate(TargetTransferRate msg) { - RTC_DCHECK_RUN_ON(send_transport_queue()); + RTC_DCHECK_RUN_ON(&send_transport_sequence_checker_); uint32_t target_bitrate_bps = msg.target_rate.bps(); // For controlling the rate of feedback messages. receive_side_cc_.OnBitrateChanged(target_bitrate_bps); bitrate_allocator_->OnNetworkEstimateChanged(msg); - worker_thread_->PostTask( - ToQueuedTask(task_safety_, [this, target_bitrate_bps]() { - RTC_DCHECK_RUN_ON(worker_thread_); - last_bandwidth_bps_ = target_bitrate_bps; - - // Ignore updates if bitrate is zero (the aggregate network state is - // down) or if we're not sending video. - if (target_bitrate_bps == 0 || video_send_streams_.empty()) { - estimated_send_bitrate_kbps_counter_.ProcessAndPause(); - pacer_bitrate_kbps_counter_.ProcessAndPause(); - return; - } + last_bandwidth_bps_.store(target_bitrate_bps, std::memory_order_relaxed); - estimated_send_bitrate_kbps_counter_.Add(target_bitrate_bps / 1000); - // Pacer bitrate may be higher than bitrate estimate if enforcing min - // bitrate. - uint32_t pacer_bitrate_bps = - std::max(target_bitrate_bps, min_allocated_send_bitrate_bps_); - pacer_bitrate_kbps_counter_.Add(pacer_bitrate_bps / 1000); - })); + // Ignore updates if bitrate is zero (the aggregate network state is + // down) or if we're not sending video. + // Using |video_send_streams_empty_| is racy but as the caller can't + // reasonably expect synchronize with changes in |video_send_streams_| (being + // on |send_transport_sequence_checker|), we can avoid a PostTask this way. + if (target_bitrate_bps == 0 || + video_send_streams_empty_.load(std::memory_order_relaxed)) { + send_stats_.PauseSendAndPacerBitrateCounters(); + } else { + send_stats_.AddTargetBitrateSample(target_bitrate_bps); + } } void Call::OnAllocationLimitsChanged(BitrateAllocationLimits limits) { - RTC_DCHECK_RUN_ON(send_transport_queue()); + RTC_DCHECK_RUN_ON(&send_transport_sequence_checker_); transport_send_ptr_->SetAllocatedSendBitrateLimits(limits); - - worker_thread_->PostTask(ToQueuedTask(task_safety_, [this, limits]() { - RTC_DCHECK_RUN_ON(worker_thread_); - min_allocated_send_bitrate_bps_ = limits.min_allocatable_rate.bps(); - configured_max_padding_bitrate_bps_ = limits.max_padding_rate.bps(); - })); + send_stats_.SetMinAllocatableRate(limits); + configured_max_padding_bitrate_bps_.store(limits.max_padding_rate.bps(), + std::memory_order_relaxed); } +// RTC_RUN_ON(worker_thread_) void Call::ConfigureSync(const std::string& sync_group) { // TODO(bugs.webrtc.org/11993): Expect to be called on the network thread. // Set sync only if there was no previous one. @@ -1365,9 +1475,8 @@ void Call::ConfigureSync(const std::string& sync_group) { } } -PacketReceiver::DeliveryStatus Call::DeliverRtcp(MediaType media_type, - const uint8_t* packet, - size_t length) { +// RTC_RUN_ON(network_thread_) +void Call::DeliverRtcp(MediaType media_type, rtc::CopyOnWriteBuffer packet) { TRACE_EVENT0("webrtc", "Call::DeliverRtcp"); // TODO(bugs.webrtc.org/11993): This DCHECK is here just to maintain the @@ -1382,46 +1491,39 @@ PacketReceiver::DeliveryStatus Call::DeliverRtcp(MediaType media_type, // This way we'll also know more about the context of the packet. RTC_DCHECK_EQ(media_type, MediaType::ANY); - // TODO(pbos): Make sure it's a valid packet. - // Return DELIVERY_UNKNOWN_SSRC if it can be determined that - // there's no receiver of the packet. - if (received_bytes_per_second_counter_.HasSample()) { - // First RTP packet has been received. - received_bytes_per_second_counter_.Add(static_cast(length)); - received_rtcp_bytes_per_second_counter_.Add(static_cast(length)); - } - bool rtcp_delivered = false; - if (media_type == MediaType::ANY || media_type == MediaType::VIDEO) { - for (VideoReceiveStream2* stream : video_receive_streams_) { - if (stream->DeliverRtcp(packet, length)) - rtcp_delivered = true; - } - } - if (media_type == MediaType::ANY || media_type == MediaType::AUDIO) { - for (AudioReceiveStream* stream : audio_receive_streams_) { - stream->DeliverRtcp(packet, length); - rtcp_delivered = true; - } - } - if (media_type == MediaType::ANY || media_type == MediaType::VIDEO) { - for (VideoSendStream* stream : video_send_streams_) { - stream->DeliverRtcp(packet, length); - rtcp_delivered = true; - } - } - if (media_type == MediaType::ANY || media_type == MediaType::AUDIO) { - for (auto& kv : audio_send_ssrcs_) { - kv.second->DeliverRtcp(packet, length); - rtcp_delivered = true; - } - } + // TODO(bugs.webrtc.org/11993): This should execute directly on the network + // thread. + worker_thread_->PostTask( + ToQueuedTask(task_safety_, [this, packet = std::move(packet)]() { + RTC_DCHECK_RUN_ON(worker_thread_); - if (rtcp_delivered) { - event_log_->Log(std::make_unique( - rtc::MakeArrayView(packet, length))); - } + receive_stats_.AddReceivedRtcpBytes(static_cast(packet.size())); + bool rtcp_delivered = false; + for (VideoReceiveStream2* stream : video_receive_streams_) { + if (stream->DeliverRtcp(packet.cdata(), packet.size())) + rtcp_delivered = true; + } + + for (AudioReceiveStream* stream : audio_receive_streams_) { + stream->DeliverRtcp(packet.cdata(), packet.size()); + rtcp_delivered = true; + } + + for (VideoSendStream* stream : video_send_streams_) { + stream->DeliverRtcp(packet.cdata(), packet.size()); + rtcp_delivered = true; + } + + for (auto& kv : audio_send_ssrcs_) { + kv.second->DeliverRtcp(packet.cdata(), packet.size()); + rtcp_delivered = true; + } - return rtcp_delivered ? DELIVERY_OK : DELIVERY_PACKET_ERROR; + if (rtcp_delivered) { + event_log_->Log(std::make_unique( + rtc::MakeArrayView(packet.cdata(), packet.size()))); + } + })); } PacketReceiver::DeliveryStatus Call::DeliverRtp(MediaType media_type, @@ -1466,7 +1568,8 @@ PacketReceiver::DeliveryStatus Call::DeliverRtp(MediaType media_type, return DELIVERY_UNKNOWN_SSRC; } - parsed_packet.IdentifyExtensions(it->second.extensions); + parsed_packet.IdentifyExtensions( + RtpHeaderExtensionMap(it->second->rtp_config().extensions)); NotifyBweOfReceivedPacket(parsed_packet, media_type); @@ -1475,29 +1578,19 @@ PacketReceiver::DeliveryStatus Call::DeliverRtp(MediaType media_type, int length = static_cast(parsed_packet.size()); if (media_type == MediaType::AUDIO) { if (audio_receiver_controller_.OnRtpPacket(parsed_packet)) { - received_bytes_per_second_counter_.Add(length); - received_audio_bytes_per_second_counter_.Add(length); + receive_stats_.AddReceivedAudioBytes(length, + parsed_packet.arrival_time()); event_log_->Log( std::make_unique(parsed_packet)); - const int64_t arrival_time_ms = parsed_packet.arrival_time().ms(); - if (!first_received_rtp_audio_ms_) { - first_received_rtp_audio_ms_.emplace(arrival_time_ms); - } - last_received_rtp_audio_ms_.emplace(arrival_time_ms); return DELIVERY_OK; } } else if (media_type == MediaType::VIDEO) { parsed_packet.set_payload_type_frequency(kVideoPayloadTypeFrequency); if (video_receiver_controller_.OnRtpPacket(parsed_packet)) { - received_bytes_per_second_counter_.Add(length); - received_video_bytes_per_second_counter_.Add(length); + receive_stats_.AddReceivedVideoBytes(length, + parsed_packet.arrival_time()); event_log_->Log( std::make_unique(parsed_packet)); - const int64_t arrival_time_ms = parsed_packet.arrival_time().ms(); - if (!first_received_rtp_video_ms_) { - first_received_rtp_video_ms_.emplace(arrival_time_ms); - } - last_received_rtp_video_ms_.emplace(arrival_time_ms); return DELIVERY_OK; } } @@ -1508,38 +1601,16 @@ PacketReceiver::DeliveryStatus Call::DeliverPacket( MediaType media_type, rtc::CopyOnWriteBuffer packet, int64_t packet_time_us) { - RTC_DCHECK_RUN_ON(worker_thread_); - - if (IsRtcp(packet.cdata(), packet.size())) - return DeliverRtcp(media_type, packet.cdata(), packet.size()); + if (IsRtcpPacket(packet)) { + RTC_DCHECK_RUN_ON(network_thread_); + DeliverRtcp(media_type, std::move(packet)); + return DELIVERY_OK; + } + RTC_DCHECK_RUN_ON(worker_thread_); return DeliverRtp(media_type, std::move(packet), packet_time_us); } -void Call::DeliverPacketAsync(MediaType media_type, - rtc::CopyOnWriteBuffer packet, - int64_t packet_time_us, - PacketCallback callback) { - RTC_DCHECK_RUN_ON(network_thread_); - - TaskQueueBase* network_thread = rtc::Thread::Current(); - RTC_DCHECK(network_thread); - - worker_thread_->PostTask(ToQueuedTask( - task_safety_, [this, network_thread, media_type, p = std::move(packet), - packet_time_us, cb = std::move(callback)] { - RTC_DCHECK_RUN_ON(worker_thread_); - DeliveryStatus status = DeliverPacket(media_type, p, packet_time_us); - if (cb) { - network_thread->PostTask( - ToQueuedTask([cb = std::move(cb), status, media_type, - p = std::move(p), packet_time_us]() { - cb(status, media_type, std::move(p), packet_time_us); - })); - } - })); -} - void Call::OnRecoveredPacket(const uint8_t* packet, size_t length) { // TODO(bugs.webrtc.org/11993): Expect to be called on the network thread. // This method is called synchronously via |OnRtpPacket()| (see DeliverRtp) @@ -1563,18 +1634,20 @@ void Call::OnRecoveredPacket(const uint8_t* packet, size_t length) { // which is being torn down. return; } - parsed_packet.IdentifyExtensions(it->second.extensions); + parsed_packet.IdentifyExtensions( + RtpHeaderExtensionMap(it->second->rtp_config().extensions)); // TODO(brandtr): Update here when we support protecting audio packets too. parsed_packet.set_payload_type_frequency(kVideoPayloadTypeFrequency); video_receiver_controller_.OnRtpPacket(parsed_packet); } +// RTC_RUN_ON(worker_thread_) void Call::NotifyBweOfReceivedPacket(const RtpPacketReceived& packet, MediaType media_type) { auto it = receive_rtp_config_.find(packet.Ssrc()); - bool use_send_side_bwe = - (it != receive_rtp_config_.end()) && it->second.use_send_side_bwe; + bool use_send_side_bwe = (it != receive_rtp_config_.end()) && + UseSendSideBwe(it->second->rtp_config()); RTPHeader header; packet.GetHeader(&header); @@ -1585,7 +1658,7 @@ void Call::NotifyBweOfReceivedPacket(const RtpPacketReceived& packet, if (header.extension.hasAbsoluteSendTime) { packet_msg.send_time = header.extension.GetAbsoluteSendTimestamp(); } - transport_send_ptr_->OnReceivedPacket(packet_msg); + transport_send_->OnReceivedPacket(packet_msg); if (!use_send_side_bwe && header.extension.hasTransportSequenceNumber) { // Inconsistent configuration of send side BWE. Do nothing. diff --git a/call/call.h b/call/call.h index 37d784f726..f6388c3c78 100644 --- a/call/call.h +++ b/call/call.h @@ -83,12 +83,15 @@ class Call { }; static Call* Create(const Call::Config& config); - static Call* Create(const Call::Config& config, - rtc::scoped_refptr call_thread); static Call* Create(const Call::Config& config, Clock* clock, rtc::scoped_refptr call_thread, std::unique_ptr pacer_thread); + static Call* Create(const Call::Config& config, + Clock* clock, + rtc::scoped_refptr call_thread, + std::unique_ptr + transportControllerSend); virtual AudioSendStream* CreateAudioSendStream( const AudioSendStream::Config& config) = 0; @@ -152,6 +155,14 @@ class Call { virtual void OnAudioTransportOverheadChanged( int transport_overhead_per_packet) = 0; + // Called when a receive stream's local ssrc has changed and association with + // send streams needs to be updated. + virtual void OnLocalSsrcUpdated(AudioReceiveStream& stream, + uint32_t local_ssrc) = 0; + + virtual void OnUpdateSyncGroup(AudioReceiveStream& stream, + const std::string& sync_group) = 0; + virtual void OnSentPacket(const rtc::SentPacket& sent_packet) = 0; virtual void SetClientBitratePreferences( diff --git a/call/call_config.cc b/call/call_config.cc index 8b3c91222e..23b60ce436 100644 --- a/call/call_config.cc +++ b/call/call_config.cc @@ -22,6 +22,19 @@ CallConfig::CallConfig(RtcEventLog* event_log, CallConfig::CallConfig(const CallConfig& config) = default; +RtpTransportConfig CallConfig::ExtractTransportConfig() const { + RtpTransportConfig transportConfig; + transportConfig.bitrate_config = bitrate_config; + transportConfig.event_log = event_log; + transportConfig.network_controller_factory = network_controller_factory; + transportConfig.network_state_predictor_factory = + network_state_predictor_factory; + transportConfig.task_queue_factory = task_queue_factory; + transportConfig.trials = trials; + + return transportConfig; +} + CallConfig::~CallConfig() = default; } // namespace webrtc diff --git a/call/call_config.h b/call/call_config.h index 95dad36002..ba6dec3ad6 100644 --- a/call/call_config.h +++ b/call/call_config.h @@ -19,6 +19,8 @@ #include "api/transport/network_control.h" #include "api/transport/webrtc_key_value_config.h" #include "call/audio_state.h" +#include "call/rtp_transport_config.h" +#include "call/rtp_transport_controller_send_factory_interface.h" namespace webrtc { @@ -32,6 +34,7 @@ struct CallConfig { explicit CallConfig(RtcEventLog* event_log, TaskQueueBase* network_task_queue = nullptr); CallConfig(const CallConfig&); + RtpTransportConfig ExtractTransportConfig() const; ~CallConfig(); // Bitrate config used until valid bitrate estimates are calculated. Also @@ -69,6 +72,9 @@ struct CallConfig { const WebRtcKeyValueConfig* trials = nullptr; TaskQueueBase* const network_task_queue_ = nullptr; + // RtpTransportControllerSend to use for this call. + RtpTransportControllerSendFactoryInterface* + rtp_transport_controller_send_factory = nullptr; }; } // namespace webrtc diff --git a/call/call_factory.cc b/call/call_factory.cc index cc02c02835..aeb3cbdaa7 100644 --- a/call/call_factory.cc +++ b/call/call_factory.cc @@ -14,11 +14,13 @@ #include #include +#include #include "absl/types/optional.h" #include "api/test/simulated_network.h" #include "call/call.h" #include "call/degraded_call.h" +#include "call/rtp_transport_config.h" #include "rtc_base/checks.h" #include "system_wrappers/include/field_trial.h" @@ -81,10 +83,19 @@ Call* CallFactory::CreateCall(const Call::Config& config) { absl::optional receive_degradation_config = ParseDegradationConfig(false); + RtpTransportConfig transportConfig = config.ExtractTransportConfig(); + if (send_degradation_config || receive_degradation_config) { - return new DegradedCall(std::unique_ptr(Call::Create(config)), - send_degradation_config, receive_degradation_config, - config.task_queue_factory); + return new DegradedCall( + std::unique_ptr(Call::Create( + config, Clock::GetRealTimeClock(), + SharedModuleThread::Create( + ProcessThread::Create("ModuleProcessThread"), nullptr), + config.rtp_transport_controller_send_factory->Create( + transportConfig, Clock::GetRealTimeClock(), + ProcessThread::Create("PacerThread")))), + send_degradation_config, receive_degradation_config, + config.task_queue_factory); } if (!module_thread_) { @@ -95,7 +106,10 @@ Call* CallFactory::CreateCall(const Call::Config& config) { }); } - return Call::Create(config, module_thread_); + return Call::Create(config, Clock::GetRealTimeClock(), module_thread_, + config.rtp_transport_controller_send_factory->Create( + transportConfig, Clock::GetRealTimeClock(), + ProcessThread::Create("PacerThread"))); } std::unique_ptr CreateCallFactory() { diff --git a/call/call_unittest.cc b/call/call_unittest.cc index b06af1eecd..92a037f157 100644 --- a/call/call_unittest.cc +++ b/call/call_unittest.cc @@ -248,7 +248,7 @@ TEST(CallTest, CreateDestroy_FlexfecReceiveStream) { MockTransport rtcp_send_transport; FlexfecReceiveStream::Config config(&rtcp_send_transport); config.payload_type = 118; - config.remote_ssrc = 38837212; + config.rtp.remote_ssrc = 38837212; config.protected_media_ssrcs = {27273}; FlexfecReceiveStream* stream = call->CreateFlexfecReceiveStream(config); @@ -267,7 +267,7 @@ TEST(CallTest, CreateDestroy_FlexfecReceiveStreams) { for (int i = 0; i < 2; ++i) { for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) { - config.remote_ssrc = ssrc; + config.rtp.remote_ssrc = ssrc; config.protected_media_ssrcs = {ssrc + 1}; FlexfecReceiveStream* stream = call->CreateFlexfecReceiveStream(config); EXPECT_NE(stream, nullptr); @@ -295,22 +295,22 @@ TEST(CallTest, MultipleFlexfecReceiveStreamsProtectingSingleVideoStream) { FlexfecReceiveStream* stream; std::list streams; - config.remote_ssrc = 838383; + config.rtp.remote_ssrc = 838383; stream = call->CreateFlexfecReceiveStream(config); EXPECT_NE(stream, nullptr); streams.push_back(stream); - config.remote_ssrc = 424993; + config.rtp.remote_ssrc = 424993; stream = call->CreateFlexfecReceiveStream(config); EXPECT_NE(stream, nullptr); streams.push_back(stream); - config.remote_ssrc = 99383; + config.rtp.remote_ssrc = 99383; stream = call->CreateFlexfecReceiveStream(config); EXPECT_NE(stream, nullptr); streams.push_back(stream); - config.remote_ssrc = 5548; + config.rtp.remote_ssrc = 5548; stream = call->CreateFlexfecReceiveStream(config); EXPECT_NE(stream, nullptr); streams.push_back(stream); diff --git a/call/degraded_call.cc b/call/degraded_call.cc index 73c236bc0c..5462085490 100644 --- a/call/degraded_call.cc +++ b/call/degraded_call.cc @@ -288,6 +288,16 @@ void DegradedCall::OnAudioTransportOverheadChanged( call_->OnAudioTransportOverheadChanged(transport_overhead_per_packet); } +void DegradedCall::OnLocalSsrcUpdated(AudioReceiveStream& stream, + uint32_t local_ssrc) { + call_->OnLocalSsrcUpdated(stream, local_ssrc); +} + +void DegradedCall::OnUpdateSyncGroup(AudioReceiveStream& stream, + const std::string& sync_group) { + call_->OnUpdateSyncGroup(stream, sync_group); +} + void DegradedCall::OnSentPacket(const rtc::SentPacket& sent_packet) { if (send_config_) { // If we have a degraded send-transport, we have already notified call diff --git a/call/degraded_call.h b/call/degraded_call.h index 03fc14f284..70dc126807 100644 --- a/call/degraded_call.h +++ b/call/degraded_call.h @@ -16,6 +16,7 @@ #include #include +#include #include "absl/types/optional.h" #include "api/call/transport.h" @@ -93,6 +94,10 @@ class DegradedCall : public Call, private PacketReceiver { void SignalChannelNetworkState(MediaType media, NetworkState state) override; void OnAudioTransportOverheadChanged( int transport_overhead_per_packet) override; + void OnLocalSsrcUpdated(AudioReceiveStream& stream, + uint32_t local_ssrc) override; + void OnUpdateSyncGroup(AudioReceiveStream& stream, + const std::string& sync_group) override; void OnSentPacket(const rtc::SentPacket& sent_packet) override; protected: diff --git a/call/flexfec_receive_stream.h b/call/flexfec_receive_stream.h index 2f7438f9a4..72e544e7ec 100644 --- a/call/flexfec_receive_stream.h +++ b/call/flexfec_receive_stream.h @@ -19,11 +19,13 @@ #include "api/call/transport.h" #include "api/rtp_headers.h" #include "api/rtp_parameters.h" +#include "call/receive_stream.h" #include "call/rtp_packet_sink_interface.h" namespace webrtc { -class FlexfecReceiveStream : public RtpPacketSinkInterface { +class FlexfecReceiveStream : public RtpPacketSinkInterface, + public ReceiveStream { public: ~FlexfecReceiveStream() override = default; @@ -48,8 +50,7 @@ class FlexfecReceiveStream : public RtpPacketSinkInterface { // Payload type for FlexFEC. int payload_type = -1; - // SSRC for FlexFEC stream to be received. - uint32_t remote_ssrc = 0; + RtpConfig rtp; // Vector containing a single element, corresponding to the SSRC of the // media stream being protected by this FlexFEC stream. The vector MUST have @@ -59,26 +60,14 @@ class FlexfecReceiveStream : public RtpPacketSinkInterface { // protection. std::vector protected_media_ssrcs; - // SSRC for RTCP reports to be sent. - uint32_t local_ssrc = 0; - // What RTCP mode to use in the reports. RtcpMode rtcp_mode = RtcpMode::kCompound; // Transport for outgoing RTCP packets. Transport* rtcp_send_transport = nullptr; - - // |transport_cc| is true whenever the send-side BWE RTCP feedback message - // has been negotiated. This is a prerequisite for enabling send-side BWE. - bool transport_cc = false; - - // RTP header extensions that have been negotiated for this track. - std::vector rtp_header_extensions; }; virtual Stats GetStats() const = 0; - - virtual const Config& GetConfig() const = 0; }; } // namespace webrtc diff --git a/call/flexfec_receive_stream_impl.cc b/call/flexfec_receive_stream_impl.cc index e629bca347..688efb7b5e 100644 --- a/call/flexfec_receive_stream_impl.cc +++ b/call/flexfec_receive_stream_impl.cc @@ -44,21 +44,21 @@ std::string FlexfecReceiveStream::Config::ToString() const { char buf[1024]; rtc::SimpleStringBuilder ss(buf); ss << "{payload_type: " << payload_type; - ss << ", remote_ssrc: " << remote_ssrc; - ss << ", local_ssrc: " << local_ssrc; + ss << ", remote_ssrc: " << rtp.remote_ssrc; + ss << ", local_ssrc: " << rtp.local_ssrc; ss << ", protected_media_ssrcs: ["; size_t i = 0; for (; i + 1 < protected_media_ssrcs.size(); ++i) ss << protected_media_ssrcs[i] << ", "; if (!protected_media_ssrcs.empty()) ss << protected_media_ssrcs[i]; - ss << "], transport_cc: " << (transport_cc ? "on" : "off"); - ss << ", rtp_header_extensions: ["; + ss << "], transport_cc: " << (rtp.transport_cc ? "on" : "off"); + ss << ", rtp.extensions: ["; i = 0; - for (; i + 1 < rtp_header_extensions.size(); ++i) - ss << rtp_header_extensions[i].ToString() << ", "; - if (!rtp_header_extensions.empty()) - ss << rtp_header_extensions[i].ToString(); + for (; i + 1 < rtp.extensions.size(); ++i) + ss << rtp.extensions[i].ToString() << ", "; + if (!rtp.extensions.empty()) + ss << rtp.extensions[i].ToString(); ss << "]}"; return ss.str(); } @@ -68,7 +68,7 @@ bool FlexfecReceiveStream::Config::IsCompleteAndEnabled() const { if (payload_type < 0) return false; // Do we have the necessary SSRC information? - if (remote_ssrc == 0) + if (rtp.remote_ssrc == 0) return false; // TODO(brandtr): Update this check when we support multistream protection. if (protected_media_ssrcs.size() != 1u) @@ -91,7 +91,7 @@ std::unique_ptr MaybeCreateFlexfecReceiver( } RTC_DCHECK_GE(config.payload_type, 0); RTC_DCHECK_LE(config.payload_type, 127); - if (config.remote_ssrc == 0) { + if (config.rtp.remote_ssrc == 0) { RTC_LOG(LS_WARNING) << "Invalid FlexFEC SSRC given. " "This FlexfecReceiveStream will therefore be useless."; @@ -114,7 +114,7 @@ std::unique_ptr MaybeCreateFlexfecReceiver( } RTC_DCHECK_EQ(1U, config.protected_media_ssrcs.size()); return std::unique_ptr(new FlexfecReceiver( - clock, config.remote_ssrc, config.protected_media_ssrcs[0], + clock, config.rtp.remote_ssrc, config.protected_media_ssrcs[0], recovered_packet_receiver)); } @@ -130,7 +130,7 @@ std::unique_ptr CreateRtpRtcpModule( configuration.receive_statistics = receive_statistics; configuration.outgoing_transport = config.rtcp_send_transport; configuration.rtt_stats = rtt_stats; - configuration.local_media_ssrc = config.local_ssrc; + configuration.local_media_ssrc = config.rtp.local_ssrc; return ModuleRtpRtcpImpl2::Create(configuration); } @@ -138,11 +138,9 @@ std::unique_ptr CreateRtpRtcpModule( FlexfecReceiveStreamImpl::FlexfecReceiveStreamImpl( Clock* clock, - RtpStreamReceiverControllerInterface* receiver_controller, const Config& config, RecoveredPacketReceiver* recovered_packet_receiver, - RtcpRttStats* rtt_stats, - ProcessThread* process_thread) + RtcpRttStats* rtt_stats) : config_(config), receiver_(MaybeCreateFlexfecReceiver(clock, config_, @@ -151,32 +149,38 @@ FlexfecReceiveStreamImpl::FlexfecReceiveStreamImpl( rtp_rtcp_(CreateRtpRtcpModule(clock, rtp_receive_statistics_.get(), config_, - rtt_stats)), - process_thread_(process_thread) { + rtt_stats)) { RTC_LOG(LS_INFO) << "FlexfecReceiveStreamImpl: " << config_.ToString(); + packet_sequence_checker_.Detach(); + // RTCP reporting. rtp_rtcp_->SetRTCPStatus(config_.rtcp_mode); - process_thread_->RegisterModule(rtp_rtcp_.get(), RTC_FROM_HERE); +} + +FlexfecReceiveStreamImpl::~FlexfecReceiveStreamImpl() { + RTC_LOG(LS_INFO) << "~FlexfecReceiveStreamImpl: " << config_.ToString(); +} + +void FlexfecReceiveStreamImpl::RegisterWithTransport( + RtpStreamReceiverControllerInterface* receiver_controller) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + RTC_DCHECK(!rtp_stream_receiver_); + + if (!receiver_) + return; - // Register with transport. // TODO(nisse): OnRtpPacket in this class delegates all real work to - // |receiver_|. So maybe we don't need to implement RtpPacketSinkInterface + // `receiver_`. So maybe we don't need to implement RtpPacketSinkInterface // here at all, we'd then delete the OnRtpPacket method and instead register - // |receiver_| as the RtpPacketSinkInterface for this stream. - // TODO(nisse): Passing |this| from the constructor to the RtpDemuxer, before - // the object is fully initialized, is risky. But it works in this case - // because locking in our caller, Call::CreateFlexfecReceiveStream, ensures - // that the demuxer doesn't call OnRtpPacket before this object is fully - // constructed. Registering |receiver_| instead of |this| would solve this - // problem too. + // `receiver_` as the RtpPacketSinkInterface for this stream. rtp_stream_receiver_ = - receiver_controller->CreateReceiver(config_.remote_ssrc, this); + receiver_controller->CreateReceiver(config_.rtp.remote_ssrc, this); } -FlexfecReceiveStreamImpl::~FlexfecReceiveStreamImpl() { - RTC_LOG(LS_INFO) << "~FlexfecReceiveStreamImpl: " << config_.ToString(); - process_thread_->DeRegisterModule(rtp_rtcp_.get()); +void FlexfecReceiveStreamImpl::UnregisterFromTransport() { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + rtp_stream_receiver_.reset(); } void FlexfecReceiveStreamImpl::OnRtpPacket(const RtpPacketReceived& packet) { @@ -186,7 +190,7 @@ void FlexfecReceiveStreamImpl::OnRtpPacket(const RtpPacketReceived& packet) { receiver_->OnRtpPacket(packet); // Do not report media packets in the RTCP RRs generated by |rtp_rtcp_|. - if (packet.Ssrc() == config_.remote_ssrc) { + if (packet.Ssrc() == config_.rtp.remote_ssrc) { rtp_receive_statistics_->OnRtpPacket(packet); } } @@ -197,9 +201,4 @@ FlexfecReceiveStreamImpl::Stats FlexfecReceiveStreamImpl::GetStats() const { return FlexfecReceiveStream::Stats(); } -const FlexfecReceiveStream::Config& FlexfecReceiveStreamImpl::GetConfig() - const { - return config_; -} - } // namespace webrtc diff --git a/call/flexfec_receive_stream_impl.h b/call/flexfec_receive_stream_impl.h index 888dae9ebd..285a33f7bb 100644 --- a/call/flexfec_receive_stream_impl.h +++ b/call/flexfec_receive_stream_impl.h @@ -16,12 +16,12 @@ #include "call/flexfec_receive_stream.h" #include "call/rtp_packet_sink_interface.h" #include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" +#include "rtc_base/system/no_unique_address.h" #include "system_wrappers/include/clock.h" namespace webrtc { class FlexfecReceiver; -class ProcessThread; class ReceiveStatistics; class RecoveredPacketReceiver; class RtcpRttStats; @@ -32,22 +32,37 @@ class RtpStreamReceiverInterface; class FlexfecReceiveStreamImpl : public FlexfecReceiveStream { public: - FlexfecReceiveStreamImpl( - Clock* clock, - RtpStreamReceiverControllerInterface* receiver_controller, - const Config& config, - RecoveredPacketReceiver* recovered_packet_receiver, - RtcpRttStats* rtt_stats, - ProcessThread* process_thread); + FlexfecReceiveStreamImpl(Clock* clock, + const Config& config, + RecoveredPacketReceiver* recovered_packet_receiver, + RtcpRttStats* rtt_stats); + // Destruction happens on the worker thread. Prior to destruction the caller + // must ensure that a registration with the transport has been cleared. See + // `RegisterWithTransport` for details. + // TODO(tommi): As a further improvement to this, performing the full + // destruction on the network thread could be made the default. ~FlexfecReceiveStreamImpl() override; + // Called on the network thread to register/unregister with the network + // transport. + void RegisterWithTransport( + RtpStreamReceiverControllerInterface* receiver_controller); + // If registration has previously been done (via `RegisterWithTransport`) then + // `UnregisterFromTransport` must be called prior to destruction, on the + // network thread. + void UnregisterFromTransport(); + // RtpPacketSinkInterface. void OnRtpPacket(const RtpPacketReceived& packet) override; Stats GetStats() const override; - const Config& GetConfig() const override; + + // ReceiveStream impl. + const RtpConfig& rtp_config() const override { return config_.rtp; } private: + RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_; + // Config. const Config config_; @@ -57,9 +72,9 @@ class FlexfecReceiveStreamImpl : public FlexfecReceiveStream { // RTCP reporting. const std::unique_ptr rtp_receive_statistics_; const std::unique_ptr rtp_rtcp_; - ProcessThread* process_thread_; - std::unique_ptr rtp_stream_receiver_; + std::unique_ptr rtp_stream_receiver_ + RTC_GUARDED_BY(packet_sequence_checker_); }; } // namespace webrtc diff --git a/call/flexfec_receive_stream_unittest.cc b/call/flexfec_receive_stream_unittest.cc index 5e8ee47433..312fe0c907 100644 --- a/call/flexfec_receive_stream_unittest.cc +++ b/call/flexfec_receive_stream_unittest.cc @@ -26,7 +26,6 @@ #include "modules/rtp_rtcp/source/byte_io.h" #include "modules/rtp_rtcp/source/rtp_header_extensions.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" -#include "modules/utility/include/mock/mock_process_thread.h" #include "test/gmock.h" #include "test/gtest.h" #include "test/mock_transport.h" @@ -45,7 +44,7 @@ FlexfecReceiveStream::Config CreateDefaultConfig( Transport* rtcp_send_transport) { FlexfecReceiveStream::Config config(rtcp_send_transport); config.payload_type = kFlexfecPlType; - config.remote_ssrc = ByteReader::ReadBigEndian(kFlexfecSsrc); + config.rtp.remote_ssrc = ByteReader::ReadBigEndian(kFlexfecSsrc); config.protected_media_ssrcs = { ByteReader::ReadBigEndian(kMediaSsrc)}; EXPECT_TRUE(config.IsCompleteAndEnabled()); @@ -64,16 +63,16 @@ TEST(FlexfecReceiveStreamConfigTest, IsCompleteAndEnabled) { MockTransport rtcp_send_transport; FlexfecReceiveStream::Config config(&rtcp_send_transport); - config.local_ssrc = 18374743; + config.rtp.local_ssrc = 18374743; config.rtcp_mode = RtcpMode::kCompound; - config.transport_cc = true; - config.rtp_header_extensions.emplace_back(TransportSequenceNumber::kUri, 7); + config.rtp.transport_cc = true; + config.rtp.extensions.emplace_back(TransportSequenceNumber::kUri, 7); EXPECT_FALSE(config.IsCompleteAndEnabled()); config.payload_type = 123; EXPECT_FALSE(config.IsCompleteAndEnabled()); - config.remote_ssrc = 238423838; + config.rtp.remote_ssrc = 238423838; EXPECT_FALSE(config.IsCompleteAndEnabled()); config.protected_media_ssrcs.push_back(138989393); @@ -87,21 +86,20 @@ class FlexfecReceiveStreamTest : public ::testing::Test { protected: FlexfecReceiveStreamTest() : config_(CreateDefaultConfig(&rtcp_send_transport_)) { - EXPECT_CALL(process_thread_, RegisterModule(_, _)).Times(1); receive_stream_ = std::make_unique( - Clock::GetRealTimeClock(), &rtp_stream_receiver_controller_, config_, - &recovered_packet_receiver_, &rtt_stats_, &process_thread_); + Clock::GetRealTimeClock(), config_, &recovered_packet_receiver_, + &rtt_stats_); + receive_stream_->RegisterWithTransport(&rtp_stream_receiver_controller_); } ~FlexfecReceiveStreamTest() { - EXPECT_CALL(process_thread_, DeRegisterModule(_)).Times(1); + receive_stream_->UnregisterFromTransport(); } MockTransport rtcp_send_transport_; FlexfecReceiveStream::Config config_; MockRecoveredPacketReceiver recovered_packet_receiver_; MockRtcpRttStats rtt_stats_; - MockProcessThread process_thread_; RtpStreamReceiverController rtp_stream_receiver_controller_; std::unique_ptr receive_stream_; }; @@ -144,10 +142,10 @@ TEST_F(FlexfecReceiveStreamTest, RecoversPacket) { // clang-format on ::testing::StrictMock recovered_packet_receiver; - EXPECT_CALL(process_thread_, RegisterModule(_, _)).Times(1); - FlexfecReceiveStreamImpl receive_stream( - Clock::GetRealTimeClock(), &rtp_stream_receiver_controller_, config_, - &recovered_packet_receiver, &rtt_stats_, &process_thread_); + FlexfecReceiveStreamImpl receive_stream(Clock::GetRealTimeClock(), config_, + &recovered_packet_receiver, + &rtt_stats_); + receive_stream.RegisterWithTransport(&rtp_stream_receiver_controller_); EXPECT_CALL(recovered_packet_receiver, OnRecoveredPacket(_, kRtpHeaderSize + kPayloadLength[1])); @@ -155,7 +153,7 @@ TEST_F(FlexfecReceiveStreamTest, RecoversPacket) { receive_stream.OnRtpPacket(ParsePacket(kFlexfecPacket)); // Tear-down - EXPECT_CALL(process_thread_, DeRegisterModule(_)).Times(1); + receive_stream.UnregisterFromTransport(); } } // namespace webrtc diff --git a/call/packet_receiver.h b/call/packet_receiver.h index f18ee65c70..13d3b84c90 100644 --- a/call/packet_receiver.h +++ b/call/packet_receiver.h @@ -10,13 +10,6 @@ #ifndef CALL_PACKET_RECEIVER_H_ #define CALL_PACKET_RECEIVER_H_ -#include -#include -#include -#include -#include -#include - #include "api/media_types.h" #include "rtc_base/copy_on_write_buffer.h" @@ -30,32 +23,6 @@ class PacketReceiver { DELIVERY_PACKET_ERROR, }; - // Definition of the callback to execute when packet delivery is complete. - // The callback will be issued on the same thread as called DeliverPacket. - typedef std::function< - void(DeliveryStatus, MediaType, rtc::CopyOnWriteBuffer, int64_t)> - PacketCallback; - - // Asynchronously handle packet delivery and report back to the caller when - // delivery of the packet has completed. - // Note that if the packet is invalid or can be processed without the need of - // asynchronous operations that the |callback| may have been called before - // the function returns. - // TODO(bugs.webrtc.org/11993): This function is meant to be called on the - // network thread exclusively but while the code is being updated to align - // with those goals, it may be called either on the worker or network threads. - // Update docs etc when the work has been completed. Once we're done with the - // updates, we might be able to go back to returning the status from this - // function instead of having to report it via a callback. - virtual void DeliverPacketAsync(MediaType media_type, - rtc::CopyOnWriteBuffer packet, - int64_t packet_time_us, - PacketCallback callback) { - DeliveryStatus status = DeliverPacket(media_type, packet, packet_time_us); - if (callback) - callback(status, media_type, std::move(packet), packet_time_us); - } - virtual DeliveryStatus DeliverPacket(MediaType media_type, rtc::CopyOnWriteBuffer packet, int64_t packet_time_us) = 0; diff --git a/call/rampup_tests.cc b/call/rampup_tests.cc index 37e3e6c7f6..bf136a5df9 100644 --- a/call/rampup_tests.cc +++ b/call/rampup_tests.cc @@ -295,16 +295,16 @@ void RampUpTester::ModifyFlexfecConfigs( return; RTC_DCHECK_EQ(1, num_flexfec_streams_); (*receive_configs)[0].payload_type = test::CallTest::kFlexfecPayloadType; - (*receive_configs)[0].remote_ssrc = test::CallTest::kFlexfecSendSsrc; + (*receive_configs)[0].rtp.remote_ssrc = test::CallTest::kFlexfecSendSsrc; (*receive_configs)[0].protected_media_ssrcs = {video_ssrcs_[0]}; - (*receive_configs)[0].local_ssrc = video_ssrcs_[0]; + (*receive_configs)[0].rtp.local_ssrc = video_ssrcs_[0]; if (extension_type_ == RtpExtension::kAbsSendTimeUri) { - (*receive_configs)[0].transport_cc = false; - (*receive_configs)[0].rtp_header_extensions.push_back( + (*receive_configs)[0].rtp.transport_cc = false; + (*receive_configs)[0].rtp.extensions.push_back( RtpExtension(extension_type_.c_str(), kAbsSendTimeExtensionId)); } else if (extension_type_ == RtpExtension::kTransportSequenceNumberUri) { - (*receive_configs)[0].transport_cc = true; - (*receive_configs)[0].rtp_header_extensions.push_back(RtpExtension( + (*receive_configs)[0].rtp.transport_cc = true; + (*receive_configs)[0].rtp.extensions.push_back(RtpExtension( extension_type_.c_str(), kTransportSequenceNumberExtensionId)); } } diff --git a/call/receive_stream.h b/call/receive_stream.h new file mode 100644 index 0000000000..0f59b37ae3 --- /dev/null +++ b/call/receive_stream.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef CALL_RECEIVE_STREAM_H_ +#define CALL_RECEIVE_STREAM_H_ + +#include + +#include "api/crypto/frame_decryptor_interface.h" +#include "api/frame_transformer_interface.h" +#include "api/media_types.h" +#include "api/scoped_refptr.h" +#include "api/transport/rtp/rtp_source.h" + +namespace webrtc { + +// Common base interface for MediaReceiveStream based classes and +// FlexfecReceiveStream. +class ReceiveStream { + public: + // Receive-stream specific RTP settings. + struct RtpConfig { + // Synchronization source (stream identifier) to be received. + // This member will not change mid-stream and can be assumed to be const + // post initialization. + uint32_t remote_ssrc = 0; + + // Sender SSRC used for sending RTCP (such as receiver reports). + // This value may change mid-stream and must be done on the same thread + // that the value is read on (i.e. packet delivery). + uint32_t local_ssrc = 0; + + // Enable feedback for send side bandwidth estimation. + // See + // https://tools.ietf.org/html/draft-holmer-rmcat-transport-wide-cc-extensions + // for details. + // This value may change mid-stream and must be done on the same thread + // that the value is read on (i.e. packet delivery). + bool transport_cc = false; + + // RTP header extensions used for the received stream. + // This value may change mid-stream and must be done on the same thread + // that the value is read on (i.e. packet delivery). + std::vector extensions; + }; + + // Called on the packet delivery thread since some members of the config may + // change mid-stream (e.g. the local ssrc). All mutation must also happen on + // the packet delivery thread. Return value can be assumed to + // only be used in the calling context (on the stack basically). + virtual const RtpConfig& rtp_config() const = 0; + + protected: + virtual ~ReceiveStream() {} +}; + +// Either an audio or video receive stream. +class MediaReceiveStream : public ReceiveStream { + public: + // Starts stream activity. + // When a stream is active, it can receive, process and deliver packets. + virtual void Start() = 0; + + // Stops stream activity. Must be called to match with a previous call to + // `Start()`. When a stream has been stopped, it won't receive, decode, + // process or deliver packets to downstream objects such as callback pointers + // set in the config struct. + virtual void Stop() = 0; + + virtual void SetDepacketizerToDecoderFrameTransformer( + rtc::scoped_refptr + frame_transformer) = 0; + + virtual void SetFrameDecryptor( + rtc::scoped_refptr frame_decryptor) = 0; + + virtual std::vector GetSources() const = 0; +}; + +} // namespace webrtc + +#endif // CALL_RECEIVE_STREAM_H_ diff --git a/call/rtp_demuxer.cc b/call/rtp_demuxer.cc index ee96196236..28962fd2eb 100644 --- a/call/rtp_demuxer.cc +++ b/call/rtp_demuxer.cc @@ -36,16 +36,7 @@ size_t RemoveFromMultimapByValue(Container* multimap, const Value& value) { template size_t RemoveFromMapByValue(Map* map, const Value& value) { - size_t count = 0; - for (auto it = map->begin(); it != map->end();) { - if (it->second == value) { - it = map->erase(it); - ++count; - } else { - ++it; - } - } - return count; + return EraseIf(*map, [&](const auto& elem) { return elem.second == value; }); } } // namespace diff --git a/call/rtp_demuxer.h b/call/rtp_demuxer.h index 00c0508584..fb65fce368 100644 --- a/call/rtp_demuxer.h +++ b/call/rtp_demuxer.h @@ -12,13 +12,12 @@ #define CALL_RTP_DEMUXER_H_ #include -#include #include -#include #include #include -#include "rtc_base/hash.h" +#include "rtc_base/containers/flat_map.h" +#include "rtc_base/containers/flat_set.h" namespace webrtc { @@ -45,10 +44,10 @@ struct RtpDemuxerCriteria { std::string rsid; // Will match packets with any of these SSRCs. - std::set ssrcs; + flat_set ssrcs; // Will match packets with any of these payload types. - std::set payload_types; + flat_set payload_types; // Return string representation of demux criteria to facilitate logging std::string ToString() const; @@ -171,26 +170,24 @@ class RtpDemuxer { // Note: Mappings are only modified by AddSink/RemoveSink (except for // SSRC mapping which receives all MID, payload type, or RSID to SSRC bindings // discovered when demuxing packets). - std::unordered_map sink_by_mid_; - std::unordered_map sink_by_ssrc_; - std::unordered_multimap sinks_by_pt_; - std::unordered_map, - RtpPacketSinkInterface*, - webrtc::PairHash> + flat_map sink_by_mid_; + flat_map sink_by_ssrc_; + std::multimap sinks_by_pt_; + flat_map, RtpPacketSinkInterface*> sink_by_mid_and_rsid_; - std::unordered_map sink_by_rsid_; + flat_map sink_by_rsid_; // Tracks all the MIDs that have been identified in added criteria. Used to // determine if a packet should be dropped right away because the MID is // unknown. - std::set known_mids_; + flat_set known_mids_; // Records learned mappings of MID --> SSRC and RSID --> SSRC as packets are // received. // This is stored separately from the sink mappings because if a sink is // removed we want to still remember these associations. - std::unordered_map mid_by_ssrc_; - std::unordered_map rsid_by_ssrc_; + flat_map mid_by_ssrc_; + flat_map rsid_by_ssrc_; // Adds a binding from the SSRC to the given sink. void AddSsrcSinkBinding(uint32_t ssrc, RtpPacketSinkInterface* sink); diff --git a/call/rtp_transport_config.h b/call/rtp_transport_config.h new file mode 100644 index 0000000000..9aa9f14c16 --- /dev/null +++ b/call/rtp_transport_config.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef CALL_RTP_TRANSPORT_CONFIG_H_ +#define CALL_RTP_TRANSPORT_CONFIG_H_ + +#include + +#include "api/network_state_predictor.h" +#include "api/rtc_event_log/rtc_event_log.h" +#include "api/transport/bitrate_settings.h" +#include "api/transport/network_control.h" +#include "api/transport/webrtc_key_value_config.h" +#include "modules/utility/include/process_thread.h" +#include "rtc_base/task_queue.h" + +namespace webrtc { + +struct RtpTransportConfig { + // Bitrate config used until valid bitrate estimates are calculated. Also + // used to cap total bitrate used. This comes from the remote connection. + BitrateConstraints bitrate_config; + + // RtcEventLog to use for this call. Required. + // Use webrtc::RtcEventLog::CreateNull() for a null implementation. + RtcEventLog* event_log = nullptr; + + // Task Queue Factory to be used in this call. Required. + TaskQueueFactory* task_queue_factory = nullptr; + + // NetworkStatePredictor to use for this call. + NetworkStatePredictorFactoryInterface* network_state_predictor_factory = + nullptr; + + // Network controller factory to use for this call. + NetworkControllerFactoryInterface* network_controller_factory = nullptr; + + // Key-value mapping of internal configurations to apply, + // e.g. field trials. + const WebRtcKeyValueConfig* trials = nullptr; +}; +} // namespace webrtc + +#endif // CALL_RTP_TRANSPORT_CONFIG_H_ diff --git a/call/rtp_transport_controller_send.cc b/call/rtp_transport_controller_send.cc index d743a0bf43..f7b6b11fd7 100644 --- a/call/rtp_transport_controller_send.cc +++ b/call/rtp_transport_controller_send.cc @@ -142,6 +142,7 @@ RtpTransportControllerSend::RtpTransportControllerSend( } RtpTransportControllerSend::~RtpTransportControllerSend() { + RTC_DCHECK(video_rtp_senders_.empty()); process_thread_->Stop(); } @@ -156,6 +157,7 @@ RtpVideoSenderInterface* RtpTransportControllerSend::CreateRtpVideoSender( std::unique_ptr fec_controller, const RtpSenderFrameEncryptionConfig& frame_encryption_config, rtc::scoped_refptr frame_transformer) { + RTC_DCHECK_RUN_ON(&main_thread_); video_rtp_senders_.push_back(std::make_unique( clock_, suspended_ssrcs, states, rtp_config, rtcp_report_interval_ms, send_transport, observers, @@ -169,6 +171,7 @@ RtpVideoSenderInterface* RtpTransportControllerSend::CreateRtpVideoSender( void RtpTransportControllerSend::DestroyRtpVideoSender( RtpVideoSenderInterface* rtp_video_sender) { + RTC_DCHECK_RUN_ON(&main_thread_); std::vector>::iterator it = video_rtp_senders_.end(); for (it = video_rtp_senders_.begin(); it != video_rtp_senders_.end(); ++it) { @@ -354,6 +357,7 @@ void RtpTransportControllerSend::OnNetworkRouteChanged( } } void RtpTransportControllerSend::OnNetworkAvailability(bool network_available) { + RTC_DCHECK_RUN_ON(&main_thread_); RTC_LOG(LS_VERBOSE) << "SignalNetworkState " << (network_available ? "Up" : "Down"); NetworkAvailability msg; @@ -470,6 +474,7 @@ RtpTransportControllerSend::ApplyOrLiftRelayCap(bool is_relayed) { void RtpTransportControllerSend::OnTransportOverheadChanged( size_t transport_overhead_bytes_per_packet) { + RTC_DCHECK_RUN_ON(&main_thread_); if (transport_overhead_bytes_per_packet >= kMaxOverheadBytes) { RTC_LOG(LS_ERROR) << "Transport overhead exceeds " << kMaxOverheadBytes; return; diff --git a/call/rtp_transport_controller_send.h b/call/rtp_transport_controller_send.h index f0f74c9f2a..7455060945 100644 --- a/call/rtp_transport_controller_send.h +++ b/call/rtp_transport_controller_send.h @@ -18,6 +18,7 @@ #include #include "api/network_state_predictor.h" +#include "api/sequence_checker.h" #include "api/transport/network_control.h" #include "api/units/data_rate.h" #include "call/rtp_bitrate_configurator.h" @@ -62,6 +63,7 @@ class RtpTransportControllerSend final const WebRtcKeyValueConfig* trials); ~RtpTransportControllerSend() override; + // TODO(tommi): Change to std::unique_ptr<>. RtpVideoSenderInterface* CreateRtpVideoSender( std::map suspended_ssrcs, const std::map& @@ -148,8 +150,10 @@ class RtpTransportControllerSend final Clock* const clock_; RtcEventLog* const event_log_; + SequenceChecker main_thread_; PacketRouter packet_router_; - std::vector> video_rtp_senders_; + std::vector> video_rtp_senders_ + RTC_GUARDED_BY(&main_thread_); RtpBitrateConfigurator bitrate_configurator_; std::map network_routes_; bool pacer_started_; diff --git a/call/rtp_transport_controller_send_factory.h b/call/rtp_transport_controller_send_factory.h new file mode 100644 index 0000000000..a857ca7e6f --- /dev/null +++ b/call/rtp_transport_controller_send_factory.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_H_ +#define CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_H_ + +#include +#include + +#include "call/rtp_transport_controller_send.h" +#include "call/rtp_transport_controller_send_factory_interface.h" + +namespace webrtc { +class RtpTransportControllerSendFactory + : public RtpTransportControllerSendFactoryInterface { + public: + std::unique_ptr Create( + const RtpTransportConfig& config, + Clock* clock, + std::unique_ptr process_thread) override { + return std::make_unique( + clock, config.event_log, config.network_state_predictor_factory, + config.network_controller_factory, config.bitrate_config, + std::move(process_thread), config.task_queue_factory, config.trials); + } + + virtual ~RtpTransportControllerSendFactory() {} +}; +} // namespace webrtc +#endif // CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_H_ diff --git a/call/rtp_transport_controller_send_factory_interface.h b/call/rtp_transport_controller_send_factory_interface.h new file mode 100644 index 0000000000..a0218532a1 --- /dev/null +++ b/call/rtp_transport_controller_send_factory_interface.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_INTERFACE_H_ +#define CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_INTERFACE_H_ + +#include + +#include "call/rtp_transport_config.h" +#include "call/rtp_transport_controller_send_interface.h" +#include "modules/utility/include/process_thread.h" + +namespace webrtc { +// A factory used for dependency injection on the send side of the transport +// controller. +class RtpTransportControllerSendFactoryInterface { + public: + virtual std::unique_ptr Create( + const RtpTransportConfig& config, + Clock* clock, + std::unique_ptr process_thread) = 0; + + virtual ~RtpTransportControllerSendFactoryInterface() {} +}; +} // namespace webrtc +#endif // CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_INTERFACE_H_ diff --git a/call/rtp_video_sender.cc b/call/rtp_video_sender.cc index c2a6a564f4..7fad89b20b 100644 --- a/call/rtp_video_sender.cc +++ b/call/rtp_video_sender.cc @@ -31,6 +31,7 @@ #include "rtc_base/location.h" #include "rtc_base/logging.h" #include "rtc_base/task_queue.h" +#include "rtc_base/trace_event.h" namespace webrtc { @@ -370,7 +371,6 @@ RtpVideoSender::RtpVideoSender( field_trials_.Lookup("WebRTC-Vp9DependencyDescriptor"), "Enabled")), active_(false), - module_process_thread_(nullptr), suspended_ssrcs_(std::move(suspended_ssrcs)), fec_controller_(std::move(fec_controller)), fec_allowed_(true), @@ -398,7 +398,6 @@ RtpVideoSender::RtpVideoSender( RTC_DCHECK_EQ(rtp_config_.ssrcs.size(), rtp_streams_.size()); if (send_side_bwe_with_overhead_ && has_packet_feedback_) transport_->IncludeOverheadInPacedSender(); - module_process_thread_checker_.Detach(); // SSRCs are assumed to be sorted in the same order as |rtp_modules|. for (uint32_t ssrc : rtp_config_.ssrcs) { // Restore state if it previously existed. @@ -459,24 +458,6 @@ RtpVideoSender::~RtpVideoSender() { this); } -void RtpVideoSender::RegisterProcessThread( - ProcessThread* module_process_thread) { - RTC_DCHECK_RUN_ON(&module_process_thread_checker_); - RTC_DCHECK(!module_process_thread_); - module_process_thread_ = module_process_thread; - - for (const RtpStreamSender& stream : rtp_streams_) { - module_process_thread_->RegisterModule(stream.rtp_rtcp.get(), - RTC_FROM_HERE); - } -} - -void RtpVideoSender::DeRegisterProcessThread() { - RTC_DCHECK_RUN_ON(&module_process_thread_checker_); - for (const RtpStreamSender& stream : rtp_streams_) - module_process_thread_->DeRegisterModule(stream.rtp_rtcp.get()); -} - void RtpVideoSender::SetActive(bool active) { MutexLock lock(&mutex_); if (active_ == active) @@ -930,43 +911,45 @@ void RtpVideoSender::OnPacketFeedbackVector( // Map from SSRC to all acked packets for that RTP module. std::map> acked_packets_per_ssrc; for (const StreamPacketInfo& packet : packet_feedback_vector) { - if (packet.received) { - acked_packets_per_ssrc[packet.ssrc].push_back(packet.rtp_sequence_number); + if (packet.received && packet.ssrc) { + acked_packets_per_ssrc[*packet.ssrc].push_back( + packet.rtp_sequence_number); } } - // Map from SSRC to vector of RTP sequence numbers that are indicated as - // lost by feedback, without being trailed by any received packets. - std::map> early_loss_detected_per_ssrc; + // Map from SSRC to vector of RTP sequence numbers that are indicated as + // lost by feedback, without being trailed by any received packets. + std::map> early_loss_detected_per_ssrc; - for (const StreamPacketInfo& packet : packet_feedback_vector) { - if (!packet.received) { - // Last known lost packet, might not be detectable as lost by remote - // jitter buffer. - early_loss_detected_per_ssrc[packet.ssrc].push_back( - packet.rtp_sequence_number); - } else { - // Packet received, so any loss prior to this is already detectable. - early_loss_detected_per_ssrc.erase(packet.ssrc); - } + for (const StreamPacketInfo& packet : packet_feedback_vector) { + // Only include new media packets, not retransmissions/padding/fec. + if (!packet.received && packet.ssrc && !packet.is_retransmission) { + // Last known lost packet, might not be detectable as lost by remote + // jitter buffer. + early_loss_detected_per_ssrc[*packet.ssrc].push_back( + packet.rtp_sequence_number); + } else { + // Packet received, so any loss prior to this is already detectable. + early_loss_detected_per_ssrc.erase(*packet.ssrc); } + } - for (const auto& kv : early_loss_detected_per_ssrc) { - const uint32_t ssrc = kv.first; - auto it = ssrc_to_rtp_module_.find(ssrc); - RTC_DCHECK(it != ssrc_to_rtp_module_.end()); - RTPSender* rtp_sender = it->second->RtpSender(); - for (uint16_t sequence_number : kv.second) { - rtp_sender->ReSendPacket(sequence_number); - } + for (const auto& kv : early_loss_detected_per_ssrc) { + const uint32_t ssrc = kv.first; + auto it = ssrc_to_rtp_module_.find(ssrc); + RTC_CHECK(it != ssrc_to_rtp_module_.end()); + RTPSender* rtp_sender = it->second->RtpSender(); + for (uint16_t sequence_number : kv.second) { + rtp_sender->ReSendPacket(sequence_number); } + } for (const auto& kv : acked_packets_per_ssrc) { const uint32_t ssrc = kv.first; auto it = ssrc_to_rtp_module_.find(ssrc); if (it == ssrc_to_rtp_module_.end()) { - // Packets not for a media SSRC, so likely RTX or FEC. If so, ignore - // since there's no RTP history to clean up anyway. + // No media, likely FEC or padding. Ignore since there's no RTP history to + // clean up anyway. continue; } rtc::ArrayView rtp_sequence_numbers(kv.second); diff --git a/call/rtp_video_sender.h b/call/rtp_video_sender.h index 611edc6b27..991276fe79 100644 --- a/call/rtp_video_sender.h +++ b/call/rtp_video_sender.h @@ -35,7 +35,6 @@ #include "modules/rtp_rtcp/source/rtp_sender_video.h" #include "modules/rtp_rtcp/source/rtp_sequence_number_map.h" #include "modules/rtp_rtcp/source/rtp_video_header.h" -#include "modules/utility/include/process_thread.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/rate_limiter.h" #include "rtc_base/synchronization/mutex.h" @@ -90,15 +89,6 @@ class RtpVideoSender : public RtpVideoSenderInterface, rtc::scoped_refptr frame_transformer); ~RtpVideoSender() override; - // RegisterProcessThread register |module_process_thread| with those objects - // that use it. Registration has to happen on the thread were - // |module_process_thread| was created (libjingle's worker thread). - // TODO(perkj): Replace the use of |module_process_thread| with a TaskQueue, - // maybe |worker_queue|. - void RegisterProcessThread(ProcessThread* module_process_thread) - RTC_LOCKS_EXCLUDED(mutex_) override; - void DeRegisterProcessThread() RTC_LOCKS_EXCLUDED(mutex_) override; - // RtpVideoSender will only route packets if being active, all packets will be // dropped otherwise. void SetActive(bool active) RTC_LOCKS_EXCLUDED(mutex_) override; @@ -185,8 +175,6 @@ class RtpVideoSender : public RtpVideoSenderInterface, mutable Mutex mutex_; bool active_ RTC_GUARDED_BY(mutex_); - ProcessThread* module_process_thread_; - SequenceChecker module_process_thread_checker_; std::map suspended_ssrcs_; const std::unique_ptr fec_controller_; diff --git a/call/rtp_video_sender_interface.h b/call/rtp_video_sender_interface.h index 632c9e835a..a0b4baccb4 100644 --- a/call/rtp_video_sender_interface.h +++ b/call/rtp_video_sender_interface.h @@ -22,7 +22,6 @@ #include "call/rtp_config.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtp_sequence_number_map.h" -#include "modules/utility/include/process_thread.h" #include "modules/video_coding/include/video_codec_interface.h" namespace webrtc { @@ -32,9 +31,6 @@ struct FecProtectionParams; class RtpVideoSenderInterface : public EncodedImageCallback, public FecControllerOverride { public: - virtual void RegisterProcessThread(ProcessThread* module_process_thread) = 0; - virtual void DeRegisterProcessThread() = 0; - // RtpVideoSender will only route packets if being active, all // packets will be dropped otherwise. virtual void SetActive(bool active) = 0; diff --git a/call/rtp_video_sender_unittest.cc b/call/rtp_video_sender_unittest.cc index 85934cc7ed..334d97ccfa 100644 --- a/call/rtp_video_sender_unittest.cc +++ b/call/rtp_video_sender_unittest.cc @@ -462,11 +462,13 @@ TEST(RtpVideoSenderTest, DoesNotRetrasmitAckedPackets) { lost_packet_feedback.rtp_sequence_number = rtp_sequence_numbers[0]; lost_packet_feedback.ssrc = kSsrc1; lost_packet_feedback.received = false; + lost_packet_feedback.is_retransmission = false; StreamFeedbackObserver::StreamPacketInfo received_packet_feedback; received_packet_feedback.rtp_sequence_number = rtp_sequence_numbers[1]; received_packet_feedback.ssrc = kSsrc1; received_packet_feedback.received = true; + lost_packet_feedback.is_retransmission = false; test.router()->OnPacketFeedbackVector( {lost_packet_feedback, received_packet_feedback}); @@ -638,11 +640,13 @@ TEST(RtpVideoSenderTest, EarlyRetransmits) { first_packet_feedback.rtp_sequence_number = frame1_rtp_sequence_number; first_packet_feedback.ssrc = kSsrc1; first_packet_feedback.received = false; + first_packet_feedback.is_retransmission = false; StreamFeedbackObserver::StreamPacketInfo second_packet_feedback; second_packet_feedback.rtp_sequence_number = frame2_rtp_sequence_number; second_packet_feedback.ssrc = kSsrc2; second_packet_feedback.received = true; + first_packet_feedback.is_retransmission = false; test.router()->OnPacketFeedbackVector( {first_packet_feedback, second_packet_feedback}); diff --git a/call/version.cc b/call/version.cc index 807ae466a5..a76af47b41 100644 --- a/call/version.cc +++ b/call/version.cc @@ -13,7 +13,7 @@ namespace webrtc { // The timestamp is always in UTC. -const char* const kSourceTimestamp = "WebRTC source stamp 2021-05-19T04:03:01"; +const char* const kSourceTimestamp = "WebRTC source stamp 2021-07-13T04:01:55"; void LoadWebRTCVersionInRegister() { // Using volatile to instruct the compiler to not optimize `p` away even diff --git a/call/video_receive_stream.cc b/call/video_receive_stream.cc index e0f3de366b..d0518b6e0d 100644 --- a/call/video_receive_stream.cc +++ b/call/video_receive_stream.cc @@ -14,10 +14,18 @@ namespace webrtc { +VideoReceiveStream::Decoder::Decoder(SdpVideoFormat video_format, + int payload_type) + : video_format(std::move(video_format)), payload_type(payload_type) {} VideoReceiveStream::Decoder::Decoder() : video_format("Unset") {} VideoReceiveStream::Decoder::Decoder(const Decoder&) = default; VideoReceiveStream::Decoder::~Decoder() = default; +bool VideoReceiveStream::Decoder::operator==(const Decoder& other) const { + return payload_type == other.payload_type && + video_format == other.video_format; +} + std::string VideoReceiveStream::Decoder::ToString() const { char buf[1024]; rtc::SimpleStringBuilder ss(buf); @@ -74,8 +82,10 @@ std::string VideoReceiveStream::Stats::ToString(int64_t time_ms) const { VideoReceiveStream::Config::Config(const Config&) = default; VideoReceiveStream::Config::Config(Config&&) = default; -VideoReceiveStream::Config::Config(Transport* rtcp_send_transport) - : rtcp_send_transport(rtcp_send_transport) {} +VideoReceiveStream::Config::Config(Transport* rtcp_send_transport, + VideoDecoderFactory* decoder_factory) + : decoder_factory(decoder_factory), + rtcp_send_transport(rtcp_send_transport) {} VideoReceiveStream::Config& VideoReceiveStream::Config::operator=(Config&&) = default; diff --git a/call/video_receive_stream.h b/call/video_receive_stream.h index 4a0a0dcae7..86e5052151 100644 --- a/call/video_receive_stream.h +++ b/call/video_receive_stream.h @@ -20,17 +20,15 @@ #include "api/call/transport.h" #include "api/crypto/crypto_options.h" -#include "api/crypto/frame_decryptor_interface.h" -#include "api/frame_transformer_interface.h" #include "api/rtp_headers.h" #include "api/rtp_parameters.h" -#include "api/transport/rtp/rtp_source.h" #include "api/video/recordable_encoded_frame.h" #include "api/video/video_content_type.h" #include "api/video/video_frame.h" #include "api/video/video_sink_interface.h" #include "api/video/video_timing.h" #include "api/video_codecs/sdp_video_format.h" +#include "call/receive_stream.h" #include "call/rtp_config.h" #include "common_video/frame_counts.h" #include "modules/rtp_rtcp/include/rtcp_statistics.h" @@ -41,7 +39,7 @@ namespace webrtc { class RtpPacketSinkInterface; class VideoDecoderFactory; -class VideoReceiveStream { +class VideoReceiveStream : public MediaReceiveStream { public: // Class for handling moving in/out recording state. struct RecordingState { @@ -53,11 +51,6 @@ class VideoReceiveStream { // Callback stored from the VideoReceiveStream. The VideoReceiveStream // client should not interpret the attribute. std::function callback; - // Memento of internal state in VideoReceiveStream, recording wether - // we're currently causing generation of a keyframe from the sender. Needed - // to avoid sending double keyframe requests. The VideoReceiveStream client - // should not interpret the attribute. - bool keyframe_needed = false; // Memento of when a keyframe request was last sent. The VideoReceiveStream // client should not interpret the attribute. absl::optional last_keyframe_request_ms; @@ -66,9 +59,13 @@ class VideoReceiveStream { // TODO(mflodman) Move all these settings to VideoDecoder and move the // declaration to common_types.h. struct Decoder { + Decoder(SdpVideoFormat video_format, int payload_type); Decoder(); Decoder(const Decoder&); ~Decoder(); + + bool operator==(const Decoder& other) const; + std::string ToString() const; SdpVideoFormat video_format; @@ -157,7 +154,8 @@ class VideoReceiveStream { public: Config() = delete; Config(Config&&); - explicit Config(Transport* rtcp_send_transport); + Config(Transport* rtcp_send_transport, + VideoDecoderFactory* decoder_factory = nullptr); Config& operator=(Config&&); Config& operator=(const Config&) = delete; ~Config(); @@ -174,17 +172,14 @@ class VideoReceiveStream { VideoDecoderFactory* decoder_factory = nullptr; // Receive-stream specific RTP settings. - struct Rtp { + struct Rtp : public RtpConfig { Rtp(); Rtp(const Rtp&); ~Rtp(); std::string ToString() const; - // Synchronization source (stream identifier) to be received. - uint32_t remote_ssrc = 0; - - // Sender SSRC used for sending RTCP (such as receiver reports). - uint32_t local_ssrc = 0; + // See NackConfig for description. + NackConfig nack; // See RtcpMode for description. RtcpMode rtcp_mode = RtcpMode::kCompound; @@ -196,15 +191,9 @@ class VideoReceiveStream { bool receiver_reference_time_report = false; } rtcp_xr; - // See draft-holmer-rmcat-transport-wide-cc-extensions for details. - bool transport_cc = false; - // See LntfConfig for description. LntfConfig lntf; - // See NackConfig for description. - NackConfig nack; - // Payload types for ULPFEC and RED, respectively. int ulpfec_payload_type = -1; int red_payload_type = -1; @@ -228,9 +217,6 @@ class VideoReceiveStream { // meta data is expected to be present in generic frame descriptor // RTP header extension). std::set raw_payload_types; - - // RTP header extensions used for the received stream. - std::vector extensions; } rtp; // Transport for outgoing packets (RTCP). @@ -267,18 +253,9 @@ class VideoReceiveStream { rtc::scoped_refptr frame_transformer; }; - // Starts stream activity. - // When a stream is active, it can receive, process and deliver packets. - virtual void Start() = 0; - // Stops stream activity. - // When a stream is stopped, it can't receive, process or deliver packets. - virtual void Stop() = 0; - // TODO(pbos): Add info on currently-received codec to Stats. virtual Stats GetStats() const = 0; - virtual std::vector GetSources() const = 0; - // Sets a base minimum for the playout delay. Base minimum delay sets lower // bound on minimum delay value determining lower bound on playout delay. // @@ -288,16 +265,6 @@ class VideoReceiveStream { // Returns current value of base minimum delay in milliseconds. virtual int GetBaseMinimumPlayoutDelayMs() const = 0; - // Allows a FrameDecryptor to be attached to a VideoReceiveStream after - // creation without resetting the decoder state. - virtual void SetFrameDecryptor( - rtc::scoped_refptr frame_decryptor) = 0; - - // Allows a frame transformer to be attached to a VideoReceiveStream after - // creation without resetting the decoder state. - virtual void SetDepacketizerToDecoderFrameTransformer( - rtc::scoped_refptr frame_transformer) = 0; - // Sets and returns recording state. The old state is moved out // of the video receive stream and returned to the caller, and |state| // is moved in. If the state's callback is set, it will be called with diff --git a/call/video_send_stream.h b/call/video_send_stream.h index fd7a101b0a..42e6249fcd 100644 --- a/call/video_send_stream.h +++ b/call/video_send_stream.h @@ -218,6 +218,15 @@ class VideoSendStream { // When a stream is stopped, it can't receive, process or deliver packets. virtual void Stop() = 0; + // Accessor for determining if the stream is active. This is an inexpensive + // call that must be made on the same thread as `Start()` and `Stop()` methods + // are called on and will return `true` iff activity has been started either + // via `Start()` or `UpdateActiveSimulcastLayers()`. If activity is either + // stopped or is in the process of being stopped as a result of a call to + // either `Stop()` or `UpdateActiveSimulcastLayers()` where all layers were + // deactivated, the return value will be `false`. + virtual bool started() = 0; + // If the resource is overusing, the VideoSendStream will try to reduce // resolution or frame rate until no resource is overusing. // TODO(https://crbug.com/webrtc/11565): When the ResourceAdaptationProcessor diff --git a/common_audio/resampler/resampler.cc b/common_audio/resampler/resampler.cc index ccfed5a014..0fdb249052 100644 --- a/common_audio/resampler/resampler.cc +++ b/common_audio/resampler/resampler.cc @@ -916,7 +916,6 @@ int Resampler::Push(const int16_t* samplesIn, outLen = (lengthIn * 8) / 11; free(tmp_mem); return 0; - break; } return 0; } diff --git a/examples/BUILD.gn b/examples/BUILD.gn index 704afc5467..b109d903e5 100644 --- a/examples/BUILD.gn +++ b/examples/BUILD.gn @@ -253,8 +253,6 @@ if (is_ios || (is_mac && target_cpu != "x86")) { "objc/AppRTCMobile/ARDAppClient.m", "objc/AppRTCMobile/ARDAppEngineClient.h", "objc/AppRTCMobile/ARDAppEngineClient.m", - "objc/AppRTCMobile/ARDBitrateTracker.h", - "objc/AppRTCMobile/ARDBitrateTracker.m", "objc/AppRTCMobile/ARDCaptureController.h", "objc/AppRTCMobile/ARDCaptureController.m", "objc/AppRTCMobile/ARDExternalSampleCapturer.h", @@ -344,14 +342,14 @@ if (is_ios || (is_mac && target_cpu != "x86")) { "../sdk:base_objc", "../sdk:helpers_objc", "../sdk:mediaconstraints_objc", + "../sdk:metal_objc", "../sdk:peerconnectionfactory_base_objc", "../sdk:peerconnectionfactory_base_objc", - "../sdk:ui_objc", "../sdk:videocapture_objc", "../sdk:videocodec_objc", ] - if (rtc_use_metal_rendering) { - deps += [ "../sdk:metal_objc" ] + if (rtc_ios_macos_use_opengl_rendering) { + deps += [ "../sdk:opengl_ui_objc" ] } frameworks = [ "AVFoundation.framework" ] @@ -501,14 +499,14 @@ if (is_ios || (is_mac && target_cpu != "x86")) { "../sdk:base_objc", "../sdk:default_codec_factory_objc", "../sdk:helpers_objc", + "../sdk:metal_objc", "../sdk:native_api", - "../sdk:ui_objc", "../sdk:videocapture_objc", "../sdk:videotoolbox_objc", ] - if (current_cpu == "arm64") { - deps += [ "../sdk:metal_objc" ] + if (rtc_ios_macos_use_opengl_rendering) { + deps += [ "../sdk:opengl_ui_objc" ] } } @@ -546,9 +544,9 @@ if (is_ios || (is_mac && target_cpu != "x86")) { "../sdk:helpers_objc", "../sdk:mediaconstraints_objc", "../sdk:metal_objc", + "../sdk:opengl_ui_objc", "../sdk:peerconnectionfactory_base_objc", "../sdk:peerconnectionfactory_base_objc", - "../sdk:ui_objc", "../sdk:videocapture_objc", "../sdk:videocodec_objc", ] @@ -762,6 +760,7 @@ if (is_linux || is_chromeos || is_win) { "peerconnection/server/utils.h", ] deps = [ + "../rtc_base:checks", "../rtc_base:rtc_base_approved", "../system_wrappers:field_trial", "../test:field_trial", diff --git a/examples/objc/AppRTCMobile/ARDAppClient.h b/examples/objc/AppRTCMobile/ARDAppClient.h index 1fed247060..8e124ed925 100644 --- a/examples/objc/AppRTCMobile/ARDAppClient.h +++ b/examples/objc/AppRTCMobile/ARDAppClient.h @@ -48,7 +48,7 @@ typedef NS_ENUM(NSInteger, ARDAppClientState) { - (void)appClient:(ARDAppClient *)client didError:(NSError *)error; -- (void)appClient:(ARDAppClient *)client didGetStats:(NSArray *)stats; +- (void)appClient:(ARDAppClient *)client didGetStats:(RTC_OBJC_TYPE(RTCStatisticsReport) *)stats; @optional - (void)appClient:(ARDAppClient *)client diff --git a/examples/objc/AppRTCMobile/ARDAppClient.m b/examples/objc/AppRTCMobile/ARDAppClient.m index ccd5bb0662..fa6a960a54 100644 --- a/examples/objc/AppRTCMobile/ARDAppClient.m +++ b/examples/objc/AppRTCMobile/ARDAppClient.m @@ -191,9 +191,8 @@ - (void)setShouldGetStats:(BOOL)shouldGetStats { repeats:YES timerHandler:^{ ARDAppClient *strongSelf = weakSelf; - [strongSelf.peerConnection statsForTrack:nil - statsOutputLevel:RTCStatsOutputLevelDebug - completionHandler:^(NSArray *stats) { + [strongSelf.peerConnection statisticsWithCompletionHandler:^( + RTC_OBJC_TYPE(RTCStatisticsReport) * stats) { dispatch_async(dispatch_get_main_queue(), ^{ ARDAppClient *strongSelf = weakSelf; [strongSelf.delegate appClient:strongSelf didGetStats:stats]; diff --git a/examples/objc/AppRTCMobile/ARDBitrateTracker.h b/examples/objc/AppRTCMobile/ARDBitrateTracker.h deleted file mode 100644 index 81ac4b4bd5..0000000000 --- a/examples/objc/AppRTCMobile/ARDBitrateTracker.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2015 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import - -/** Class used to estimate bitrate based on byte count. It is expected that - * byte count is monotonocially increasing. This class tracks the times that - * byte count is updated, and measures the bitrate based on the byte difference - * over the interval between updates. - */ -@interface ARDBitrateTracker : NSObject - -/** The bitrate in bits per second. */ -@property(nonatomic, readonly) double bitrate; -/** The bitrate as a formatted string in bps, Kbps or Mbps. */ -@property(nonatomic, readonly) NSString *bitrateString; - -/** Converts the bitrate to a readable format in bps, Kbps or Mbps. */ -+ (NSString *)bitrateStringForBitrate:(double)bitrate; -/** Updates the tracked bitrate with the new byte count. */ -- (void)updateBitrateWithCurrentByteCount:(NSInteger)byteCount; - -@end diff --git a/examples/objc/AppRTCMobile/ARDBitrateTracker.m b/examples/objc/AppRTCMobile/ARDBitrateTracker.m deleted file mode 100644 index 8158229187..0000000000 --- a/examples/objc/AppRTCMobile/ARDBitrateTracker.m +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2015 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "ARDBitrateTracker.h" - -#import - -@implementation ARDBitrateTracker { - CFTimeInterval _prevTime; - NSInteger _prevByteCount; -} - -@synthesize bitrate = _bitrate; - -+ (NSString *)bitrateStringForBitrate:(double)bitrate { - if (bitrate > 1e6) { - return [NSString stringWithFormat:@"%.2fMbps", bitrate * 1e-6]; - } else if (bitrate > 1e3) { - return [NSString stringWithFormat:@"%.0fKbps", bitrate * 1e-3]; - } else { - return [NSString stringWithFormat:@"%.0fbps", bitrate]; - } -} - -- (NSString *)bitrateString { - return [[self class] bitrateStringForBitrate:_bitrate]; -} - -- (void)updateBitrateWithCurrentByteCount:(NSInteger)byteCount { - CFTimeInterval currentTime = CACurrentMediaTime(); - if (_prevTime && (byteCount > _prevByteCount)) { - _bitrate = (byteCount - _prevByteCount) * 8 / (currentTime - _prevTime); - } - _prevByteCount = byteCount; - _prevTime = currentTime; -} - -@end diff --git a/examples/objc/AppRTCMobile/ARDSettingsModel.m b/examples/objc/AppRTCMobile/ARDSettingsModel.m index 8b04c12f47..c628f0fde5 100644 --- a/examples/objc/AppRTCMobile/ARDSettingsModel.m +++ b/examples/objc/AppRTCMobile/ARDSettingsModel.m @@ -77,16 +77,30 @@ - (BOOL)storeVideoResolutionSetting:(NSString *)resolution { - (RTC_OBJC_TYPE(RTCVideoCodecInfo) *)currentVideoCodecSettingFromStore { [self registerStoreDefaults]; NSData *codecData = [[self settingsStore] videoCodec]; - return [NSKeyedUnarchiver unarchiveObjectWithData:codecData]; + Class expectedClass = [RTC_OBJC_TYPE(RTCVideoCodecInfo) class]; + NSError *error; + RTC_OBJC_TYPE(RTCVideoCodecInfo) *videoCodecSetting = + [NSKeyedUnarchiver unarchivedObjectOfClass:expectedClass fromData:codecData error:&error]; + if (!error) { + return videoCodecSetting; + } + return nil; } - (BOOL)storeVideoCodecSetting:(RTC_OBJC_TYPE(RTCVideoCodecInfo) *)videoCodec { if (![[self availableVideoCodecs] containsObject:videoCodec]) { return NO; } - NSData *codecData = [NSKeyedArchiver archivedDataWithRootObject:videoCodec]; - [[self settingsStore] setVideoCodec:codecData]; - return YES; + + NSError *error; + NSData *codecData = [NSKeyedArchiver archivedDataWithRootObject:videoCodec + requiringSecureCoding:NO + error:&error]; + if (!error) { + [[self settingsStore] setVideoCodec:codecData]; + return YES; + } + return NO; } - (nullable NSNumber *)currentMaxBitrateSettingFromStore { @@ -165,14 +179,18 @@ - (int)videoResolutionComponentAtIndex:(int)index inString:(NSString *)resolutio } - (void)registerStoreDefaults { - NSData *codecData = [NSKeyedArchiver archivedDataWithRootObject:[self defaultVideoCodecSetting]]; - [ARDSettingsStore setDefaultsForVideoResolution:[self defaultVideoResolutionSetting] - videoCodec:codecData - bitrate:nil - audioOnly:NO - createAecDump:NO - useManualAudioConfig:YES]; + NSError *error; + NSData *codecData = [NSKeyedArchiver archivedDataWithRootObject:[self defaultVideoCodecSetting] + requiringSecureCoding:NO + error:&error]; + if (!error) { + [ARDSettingsStore setDefaultsForVideoResolution:[self defaultVideoResolutionSetting] + videoCodec:codecData + bitrate:nil + audioOnly:NO + createAecDump:NO + useManualAudioConfig:YES]; + } } - @end NS_ASSUME_NONNULL_END diff --git a/examples/objc/AppRTCMobile/ARDStatsBuilder.h b/examples/objc/AppRTCMobile/ARDStatsBuilder.h index e8224dd707..eaffa67049 100644 --- a/examples/objc/AppRTCMobile/ARDStatsBuilder.h +++ b/examples/objc/AppRTCMobile/ARDStatsBuilder.h @@ -10,10 +10,9 @@ #import +#import "sdk/objc/api/peerconnection/RTCStatisticsReport.h" #import "sdk/objc/base/RTCMacros.h" -@class RTC_OBJC_TYPE(RTCLegacyStatsReport); - /** Class used to accumulate stats information into a single displayable string. */ @interface ARDStatsBuilder : NSObject @@ -22,10 +21,6 @@ * class. */ @property(nonatomic, readonly) NSString *statsString; - -/** Parses the information in the stats report into an appropriate internal - * format used to generate the stats string. - */ -- (void)parseStatsReport:(RTC_OBJC_TYPE(RTCLegacyStatsReport) *)statsReport; +@property(nonatomic) RTC_OBJC_TYPE(RTCStatisticsReport) * stats; @end diff --git a/examples/objc/AppRTCMobile/ARDStatsBuilder.m b/examples/objc/AppRTCMobile/ARDStatsBuilder.m index a74e351d51..7ebf9fb1c7 100644 --- a/examples/objc/AppRTCMobile/ARDStatsBuilder.m +++ b/examples/objc/AppRTCMobile/ARDStatsBuilder.m @@ -13,333 +13,23 @@ #import "sdk/objc/api/peerconnection/RTCLegacyStatsReport.h" #import "sdk/objc/base/RTCMacros.h" -#import "ARDBitrateTracker.h" #import "ARDUtilities.h" -@implementation ARDStatsBuilder { - // Connection stats. - NSString *_connRecvBitrate; - NSString *_connRtt; - NSString *_connSendBitrate; - NSString *_localCandType; - NSString *_remoteCandType; - NSString *_transportType; +@implementation ARDStatsBuilder - // BWE stats. - NSString *_actualEncBitrate; - NSString *_availableRecvBw; - NSString *_availableSendBw; - NSString *_targetEncBitrate; - - // Video send stats. - NSString *_videoEncodeMs; - NSString *_videoInputFps; - NSString *_videoInputHeight; - NSString *_videoInputWidth; - NSString *_videoSendCodec; - NSString *_videoSendBitrate; - NSString *_videoSendFps; - NSString *_videoSendHeight; - NSString *_videoSendWidth; - - // QP stats. - int _videoQPSum; - int _framesEncoded; - int _oldVideoQPSum; - int _oldFramesEncoded; - - // Video receive stats. - NSString *_videoDecodeMs; - NSString *_videoDecodedFps; - NSString *_videoOutputFps; - NSString *_videoRecvBitrate; - NSString *_videoRecvFps; - NSString *_videoRecvHeight; - NSString *_videoRecvWidth; - - // Audio send stats. - NSString *_audioSendBitrate; - NSString *_audioSendCodec; - - // Audio receive stats. - NSString *_audioCurrentDelay; - NSString *_audioExpandRate; - NSString *_audioRecvBitrate; - NSString *_audioRecvCodec; - - // Bitrate trackers. - ARDBitrateTracker *_audioRecvBitrateTracker; - ARDBitrateTracker *_audioSendBitrateTracker; - ARDBitrateTracker *_connRecvBitrateTracker; - ARDBitrateTracker *_connSendBitrateTracker; - ARDBitrateTracker *_videoRecvBitrateTracker; - ARDBitrateTracker *_videoSendBitrateTracker; -} - -- (instancetype)init { - if (self = [super init]) { - _audioSendBitrateTracker = [[ARDBitrateTracker alloc] init]; - _audioRecvBitrateTracker = [[ARDBitrateTracker alloc] init]; - _connSendBitrateTracker = [[ARDBitrateTracker alloc] init]; - _connRecvBitrateTracker = [[ARDBitrateTracker alloc] init]; - _videoSendBitrateTracker = [[ARDBitrateTracker alloc] init]; - _videoRecvBitrateTracker = [[ARDBitrateTracker alloc] init]; - _videoQPSum = 0; - _framesEncoded = 0; - } - return self; -} +@synthesize stats = _stats; - (NSString *)statsString { NSMutableString *result = [NSMutableString string]; - NSString *systemStatsFormat = @"(cpu)%ld%%\n"; - [result appendString:[NSString stringWithFormat:systemStatsFormat, - (long)ARDGetCpuUsagePercentage()]]; - - // Connection stats. - NSString *connStatsFormat = @"CN %@ms | %@->%@/%@ | (s)%@ | (r)%@\n"; - [result appendString:[NSString stringWithFormat:connStatsFormat, - _connRtt, - _localCandType, _remoteCandType, _transportType, - _connSendBitrate, _connRecvBitrate]]; - - // Video send stats. - NSString *videoSendFormat = @"VS (input) %@x%@@%@fps | (sent) %@x%@@%@fps\n" - "VS (enc) %@/%@ | (sent) %@/%@ | %@ms | %@\n" - "AvgQP (past %d encoded frames) = %d\n "; - int avgqp = [self calculateAvgQP]; - - [result appendString:[NSString stringWithFormat:videoSendFormat, - _videoInputWidth, _videoInputHeight, _videoInputFps, - _videoSendWidth, _videoSendHeight, _videoSendFps, - _actualEncBitrate, _targetEncBitrate, - _videoSendBitrate, _availableSendBw, - _videoEncodeMs, - _videoSendCodec, - _framesEncoded - _oldFramesEncoded, avgqp]]; - - // Video receive stats. - NSString *videoReceiveFormat = - @"VR (recv) %@x%@@%@fps | (decoded)%@ | (output)%@fps | %@/%@ | %@ms\n"; - [result appendString:[NSString stringWithFormat:videoReceiveFormat, - _videoRecvWidth, _videoRecvHeight, _videoRecvFps, - _videoDecodedFps, - _videoOutputFps, - _videoRecvBitrate, _availableRecvBw, - _videoDecodeMs]]; - - // Audio send stats. - NSString *audioSendFormat = @"AS %@ | %@\n"; - [result appendString:[NSString stringWithFormat:audioSendFormat, - _audioSendBitrate, _audioSendCodec]]; - // Audio receive stats. - NSString *audioReceiveFormat = @"AR %@ | %@ | %@ms | (expandrate)%@"; - [result appendString:[NSString stringWithFormat:audioReceiveFormat, - _audioRecvBitrate, _audioRecvCodec, _audioCurrentDelay, - _audioExpandRate]]; + [result appendFormat:@"(cpu)%ld%%\n", (long)ARDGetCpuUsagePercentage()]; - return result; -} - -- (void)parseStatsReport:(RTC_OBJC_TYPE(RTCLegacyStatsReport) *)statsReport { - NSString *reportType = statsReport.type; - if ([reportType isEqualToString:@"ssrc"] && - [statsReport.reportId rangeOfString:@"ssrc"].location != NSNotFound) { - if ([statsReport.reportId rangeOfString:@"send"].location != NSNotFound) { - [self parseSendSsrcStatsReport:statsReport]; - } - if ([statsReport.reportId rangeOfString:@"recv"].location != NSNotFound) { - [self parseRecvSsrcStatsReport:statsReport]; - } - } else if ([reportType isEqualToString:@"VideoBwe"]) { - [self parseBweStatsReport:statsReport]; - } else if ([reportType isEqualToString:@"googCandidatePair"]) { - [self parseConnectionStatsReport:statsReport]; + for (NSString *key in _stats.statistics) { + RTC_OBJC_TYPE(RTCStatistics) *stat = _stats.statistics[key]; + [result appendFormat:@"%@\n", stat.description]; } -} - -#pragma mark - Private - -- (int)calculateAvgQP { - int deltaFramesEncoded = _framesEncoded - _oldFramesEncoded; - int deltaQPSum = _videoQPSum - _oldVideoQPSum; - - return deltaFramesEncoded != 0 ? deltaQPSum / deltaFramesEncoded : 0; -} -- (void)updateBweStatOfKey:(NSString *)key value:(NSString *)value { - if ([key isEqualToString:@"googAvailableSendBandwidth"]) { - _availableSendBw = [ARDBitrateTracker bitrateStringForBitrate:value.doubleValue]; - } else if ([key isEqualToString:@"googAvailableReceiveBandwidth"]) { - _availableRecvBw = [ARDBitrateTracker bitrateStringForBitrate:value.doubleValue]; - } else if ([key isEqualToString:@"googActualEncBitrate"]) { - _actualEncBitrate = [ARDBitrateTracker bitrateStringForBitrate:value.doubleValue]; - } else if ([key isEqualToString:@"googTargetEncBitrate"]) { - _targetEncBitrate = [ARDBitrateTracker bitrateStringForBitrate:value.doubleValue]; - } -} - -- (void)parseBweStatsReport:(RTC_OBJC_TYPE(RTCLegacyStatsReport) *)statsReport { - [statsReport.values - enumerateKeysAndObjectsUsingBlock:^(NSString *key, NSString *value, BOOL *stop) { - [self updateBweStatOfKey:key value:value]; - }]; -} - -- (void)updateConnectionStatOfKey:(NSString *)key value:(NSString *)value { - if ([key isEqualToString:@"googRtt"]) { - _connRtt = value; - } else if ([key isEqualToString:@"googLocalCandidateType"]) { - _localCandType = value; - } else if ([key isEqualToString:@"googRemoteCandidateType"]) { - _remoteCandType = value; - } else if ([key isEqualToString:@"googTransportType"]) { - _transportType = value; - } else if ([key isEqualToString:@"bytesReceived"]) { - NSInteger byteCount = value.integerValue; - [_connRecvBitrateTracker updateBitrateWithCurrentByteCount:byteCount]; - _connRecvBitrate = _connRecvBitrateTracker.bitrateString; - } else if ([key isEqualToString:@"bytesSent"]) { - NSInteger byteCount = value.integerValue; - [_connSendBitrateTracker updateBitrateWithCurrentByteCount:byteCount]; - _connSendBitrate = _connSendBitrateTracker.bitrateString; - } -} - -- (void)parseConnectionStatsReport:(RTC_OBJC_TYPE(RTCLegacyStatsReport) *)statsReport { - NSString *activeConnection = statsReport.values[@"googActiveConnection"]; - if (![activeConnection isEqualToString:@"true"]) { - return; - } - [statsReport.values - enumerateKeysAndObjectsUsingBlock:^(NSString *key, NSString *value, BOOL *stop) { - [self updateConnectionStatOfKey:key value:value]; - }]; -} - -- (void)parseSendSsrcStatsReport:(RTC_OBJC_TYPE(RTCLegacyStatsReport) *)statsReport { - NSDictionary *values = statsReport.values; - if ([values objectForKey:@"googFrameRateSent"]) { - // Video track. - [self parseVideoSendStatsReport:statsReport]; - } else if ([values objectForKey:@"audioInputLevel"]) { - // Audio track. - [self parseAudioSendStatsReport:statsReport]; - } -} - -- (void)updateAudioSendStatOfKey:(NSString *)key value:(NSString *)value { - if ([key isEqualToString:@"googCodecName"]) { - _audioSendCodec = value; - } else if ([key isEqualToString:@"bytesSent"]) { - NSInteger byteCount = value.integerValue; - [_audioSendBitrateTracker updateBitrateWithCurrentByteCount:byteCount]; - _audioSendBitrate = _audioSendBitrateTracker.bitrateString; - } -} - -- (void)parseAudioSendStatsReport:(RTC_OBJC_TYPE(RTCLegacyStatsReport) *)statsReport { - [statsReport.values - enumerateKeysAndObjectsUsingBlock:^(NSString *key, NSString *value, BOOL *stop) { - [self updateAudioSendStatOfKey:key value:value]; - }]; -} - -- (void)updateVideoSendStatOfKey:(NSString *)key value:(NSString *)value { - if ([key isEqualToString:@"googCodecName"]) { - _videoSendCodec = value; - } else if ([key isEqualToString:@"googFrameHeightInput"]) { - _videoInputHeight = value; - } else if ([key isEqualToString:@"googFrameWidthInput"]) { - _videoInputWidth = value; - } else if ([key isEqualToString:@"googFrameRateInput"]) { - _videoInputFps = value; - } else if ([key isEqualToString:@"googFrameHeightSent"]) { - _videoSendHeight = value; - } else if ([key isEqualToString:@"googFrameWidthSent"]) { - _videoSendWidth = value; - } else if ([key isEqualToString:@"googFrameRateSent"]) { - _videoSendFps = value; - } else if ([key isEqualToString:@"googAvgEncodeMs"]) { - _videoEncodeMs = value; - } else if ([key isEqualToString:@"bytesSent"]) { - NSInteger byteCount = value.integerValue; - [_videoSendBitrateTracker updateBitrateWithCurrentByteCount:byteCount]; - _videoSendBitrate = _videoSendBitrateTracker.bitrateString; - } else if ([key isEqualToString:@"qpSum"]) { - _oldVideoQPSum = _videoQPSum; - _videoQPSum = value.integerValue; - } else if ([key isEqualToString:@"framesEncoded"]) { - _oldFramesEncoded = _framesEncoded; - _framesEncoded = value.integerValue; - } -} - -- (void)parseVideoSendStatsReport:(RTC_OBJC_TYPE(RTCLegacyStatsReport) *)statsReport { - [statsReport.values - enumerateKeysAndObjectsUsingBlock:^(NSString *key, NSString *value, BOOL *stop) { - [self updateVideoSendStatOfKey:key value:value]; - }]; -} - -- (void)parseRecvSsrcStatsReport:(RTC_OBJC_TYPE(RTCLegacyStatsReport) *)statsReport { - NSDictionary *values = statsReport.values; - if ([values objectForKey:@"googFrameWidthReceived"]) { - // Video track. - [self parseVideoRecvStatsReport:statsReport]; - } else if ([values objectForKey:@"audioOutputLevel"]) { - // Audio track. - [self parseAudioRecvStatsReport:statsReport]; - } -} - -- (void)updateAudioRecvStatOfKey:(NSString *)key value:(NSString *)value { - if ([key isEqualToString:@"googCodecName"]) { - _audioRecvCodec = value; - } else if ([key isEqualToString:@"bytesReceived"]) { - NSInteger byteCount = value.integerValue; - [_audioRecvBitrateTracker updateBitrateWithCurrentByteCount:byteCount]; - _audioRecvBitrate = _audioRecvBitrateTracker.bitrateString; - } else if ([key isEqualToString:@"googSpeechExpandRate"]) { - _audioExpandRate = value; - } else if ([key isEqualToString:@"googCurrentDelayMs"]) { - _audioCurrentDelay = value; - } -} - -- (void)parseAudioRecvStatsReport:(RTC_OBJC_TYPE(RTCLegacyStatsReport) *)statsReport { - [statsReport.values - enumerateKeysAndObjectsUsingBlock:^(NSString *key, NSString *value, BOOL *stop) { - [self updateAudioRecvStatOfKey:key value:value]; - }]; -} - -- (void)updateVideoRecvStatOfKey:(NSString *)key value:(NSString *)value { - if ([key isEqualToString:@"googFrameHeightReceived"]) { - _videoRecvHeight = value; - } else if ([key isEqualToString:@"googFrameWidthReceived"]) { - _videoRecvWidth = value; - } else if ([key isEqualToString:@"googFrameRateReceived"]) { - _videoRecvFps = value; - } else if ([key isEqualToString:@"googFrameRateDecoded"]) { - _videoDecodedFps = value; - } else if ([key isEqualToString:@"googFrameRateOutput"]) { - _videoOutputFps = value; - } else if ([key isEqualToString:@"googDecodeMs"]) { - _videoDecodeMs = value; - } else if ([key isEqualToString:@"bytesReceived"]) { - NSInteger byteCount = value.integerValue; - [_videoRecvBitrateTracker updateBitrateWithCurrentByteCount:byteCount]; - _videoRecvBitrate = _videoRecvBitrateTracker.bitrateString; - } -} - -- (void)parseVideoRecvStatsReport:(RTC_OBJC_TYPE(RTCLegacyStatsReport) *)statsReport { - [statsReport.values - enumerateKeysAndObjectsUsingBlock:^(NSString *key, NSString *value, BOOL *stop) { - [self updateVideoRecvStatOfKey:key value:value]; - }]; + return result; } @end diff --git a/examples/objc/AppRTCMobile/ios/ARDStatsView.h b/examples/objc/AppRTCMobile/ios/ARDStatsView.h index 9c8636476c..72207de64e 100644 --- a/examples/objc/AppRTCMobile/ios/ARDStatsView.h +++ b/examples/objc/AppRTCMobile/ios/ARDStatsView.h @@ -10,8 +10,12 @@ #import +#import "sdk/objc/base/RTCMacros.h" + +@class RTC_OBJC_TYPE(RTCStatisticsReport); + @interface ARDStatsView : UIView -- (void)setStats:(NSArray *)stats; +- (void)setStats:(RTC_OBJC_TYPE(RTCStatisticsReport) *)stats; @end diff --git a/examples/objc/AppRTCMobile/ios/ARDStatsView.m b/examples/objc/AppRTCMobile/ios/ARDStatsView.m index bd97d30fbe..867ba5b09e 100644 --- a/examples/objc/AppRTCMobile/ios/ARDStatsView.m +++ b/examples/objc/AppRTCMobile/ios/ARDStatsView.m @@ -34,10 +34,8 @@ - (instancetype)initWithFrame:(CGRect)frame { return self; } -- (void)setStats:(NSArray *)stats { - for (RTC_OBJC_TYPE(RTCLegacyStatsReport) * report in stats) { - [_statsBuilder parseStatsReport:report]; - } +- (void)setStats:(RTC_OBJC_TYPE(RTCStatisticsReport) *)stats { + _statsBuilder.stats = stats; _statsLabel.text = _statsBuilder.statsString; } diff --git a/examples/objc/AppRTCMobile/ios/ARDVideoCallView.m b/examples/objc/AppRTCMobile/ios/ARDVideoCallView.m index 4301b7ede9..437aea8d56 100644 --- a/examples/objc/AppRTCMobile/ios/ARDVideoCallView.m +++ b/examples/objc/AppRTCMobile/ios/ARDVideoCallView.m @@ -12,10 +12,7 @@ #import -#import "sdk/objc/components/renderer/opengl/RTCEAGLVideoView.h" -#if defined(RTC_SUPPORTS_METAL) -#import "sdk/objc/components/renderer/metal/RTCMTLVideoView.h" // nogncheck -#endif +#import "sdk/objc/components/renderer/metal/RTCMTLVideoView.h" #import "UIImage+ARDUtilities.h" @@ -44,14 +41,7 @@ @implementation ARDVideoCallView { - (instancetype)initWithFrame:(CGRect)frame { if (self = [super initWithFrame:frame]) { -#if defined(RTC_SUPPORTS_METAL) _remoteVideoView = [[RTC_OBJC_TYPE(RTCMTLVideoView) alloc] initWithFrame:CGRectZero]; -#else - RTC_OBJC_TYPE(RTCEAGLVideoView) *remoteView = - [[RTC_OBJC_TYPE(RTCEAGLVideoView) alloc] initWithFrame:CGRectZero]; - remoteView.delegate = self; - _remoteVideoView = remoteView; -#endif [self addSubview:_remoteVideoView]; diff --git a/examples/objc/AppRTCMobile/ios/ARDVideoCallViewController.m b/examples/objc/AppRTCMobile/ios/ARDVideoCallViewController.m index cd26829713..a82d90b290 100644 --- a/examples/objc/AppRTCMobile/ios/ARDVideoCallViewController.m +++ b/examples/objc/AppRTCMobile/ios/ARDVideoCallViewController.m @@ -132,8 +132,7 @@ - (void)appClient:(ARDAppClient *)client }); } -- (void)appClient:(ARDAppClient *)client - didGetStats:(NSArray *)stats { +- (void)appClient:(ARDAppClient *)client didGetStats:(RTC_OBJC_TYPE(RTCStatisticsReport) *)stats { _videoCallView.statsView.stats = stats; [_videoCallView setNeedsLayout]; } diff --git a/examples/objc/AppRTCMobile/ios/broadcast_extension/ARDBroadcastSampleHandler.m b/examples/objc/AppRTCMobile/ios/broadcast_extension/ARDBroadcastSampleHandler.m index d9c816d573..1c276d965f 100644 --- a/examples/objc/AppRTCMobile/ios/broadcast_extension/ARDBroadcastSampleHandler.m +++ b/examples/objc/AppRTCMobile/ios/broadcast_extension/ARDBroadcastSampleHandler.m @@ -120,7 +120,7 @@ - (void)appClient:(ARDAppClient *)client didReceiveRemoteVideoTrack:(RTC_OBJC_TYPE(RTCVideoTrack) *)remoteVideoTrack { } -- (void)appClient:(ARDAppClient *)client didGetStats:(NSArray *)stats { +- (void)appClient:(ARDAppClient *)client didGetStats:(RTC_OBJC_TYPE(RTCStatisticsReport) *)stats { } - (void)appClient:(ARDAppClient *)client didError:(NSError *)error { diff --git a/examples/objcnativeapi/objc/NADViewController.mm b/examples/objcnativeapi/objc/NADViewController.mm index 7f6ffbb7e5..fd244799f8 100644 --- a/examples/objcnativeapi/objc/NADViewController.mm +++ b/examples/objcnativeapi/objc/NADViewController.mm @@ -12,10 +12,7 @@ #import "sdk/objc/base/RTCVideoRenderer.h" #import "sdk/objc/components/capturer/RTCCameraVideoCapturer.h" -#if defined(RTC_SUPPORTS_METAL) -#import "sdk/objc/components/renderer/metal/RTCMTLVideoView.h" // nogncheck -#endif -#import "sdk/objc/components/renderer/opengl/RTCEAGLVideoView.h" +#import "sdk/objc/components/renderer/metal/RTCMTLVideoView.h" #import "sdk/objc/helpers/RTCCameraPreviewView.h" #include @@ -49,11 +46,7 @@ @implementation NADViewController { - (void)loadView { _view = [[UIView alloc] initWithFrame:CGRectZero]; -#if defined(RTC_SUPPORTS_METAL) _remoteVideoView = [[RTC_OBJC_TYPE(RTCMTLVideoView) alloc] initWithFrame:CGRectZero]; -#else - _remoteVideoView = [[RTC_OBJC_TYPE(RTCEAGLVideoView) alloc] initWithFrame:CGRectZero]; -#endif _remoteVideoView.translatesAutoresizingMaskIntoConstraints = NO; [_view addSubview:_remoteVideoView]; diff --git a/examples/peerconnection/client/main.cc b/examples/peerconnection/client/main.cc index cc8bdfbd76..e209171116 100644 --- a/examples/peerconnection/client/main.cc +++ b/examples/peerconnection/client/main.cc @@ -27,7 +27,6 @@ #include "rtc_base/ssl_adapter.h" #include "rtc_base/string_utils.h" // For ToUtf8 #include "rtc_base/win32_socket_init.h" -#include "rtc_base/win32_socket_server.h" #include "system_wrappers/include/field_trial.h" #include "test/field_trial.h" @@ -76,9 +75,8 @@ int PASCAL wWinMain(HINSTANCE instance, wchar_t* cmd_line, int cmd_show) { rtc::WinsockInitializer winsock_init; - rtc::Win32SocketServer w32_ss; - rtc::Win32Thread w32_thread(&w32_ss); - rtc::ThreadManager::Instance()->SetCurrentThread(&w32_thread); + rtc::PhysicalSocketServer ss; + rtc::AutoSocketServerThread main_thread(&ss); WindowsCommandLineArguments win_args; int argc = win_args.argc(); diff --git a/examples/peerconnection/server/data_socket.cc b/examples/peerconnection/server/data_socket.cc index ced0fd1bae..2d595a0e86 100644 --- a/examples/peerconnection/server/data_socket.cc +++ b/examples/peerconnection/server/data_socket.cc @@ -10,7 +10,6 @@ #include "examples/peerconnection/server/data_socket.h" -#include #include #include #include @@ -20,6 +19,7 @@ #endif #include "examples/peerconnection/server/utils.h" +#include "rtc_base/checks.h" static const char kHeaderTerminator[] = "\r\n\r\n"; static const int kHeaderTerminatorLength = sizeof(kHeaderTerminator) - 1; @@ -53,7 +53,7 @@ WinsockInitializer WinsockInitializer::singleton; // bool SocketBase::Create() { - assert(!valid()); + RTC_DCHECK(!valid()); socket_ = ::socket(AF_INET, SOCK_STREAM, 0); return valid(); } @@ -77,7 +77,7 @@ std::string DataSocket::request_arguments() const { } bool DataSocket::PathEquals(const char* path) const { - assert(path); + RTC_DCHECK(path); size_t args = request_path_.find('?'); if (args != std::string::npos) return request_path_.substr(0, args).compare(path) == 0; @@ -85,7 +85,7 @@ bool DataSocket::PathEquals(const char* path) const { } bool DataSocket::OnDataAvailable(bool* close_socket) { - assert(valid()); + RTC_DCHECK(valid()); char buffer[0xfff] = {0}; int bytes = recv(socket_, buffer, sizeof(buffer), 0); if (bytes == SOCKET_ERROR || bytes == 0) { @@ -125,8 +125,8 @@ bool DataSocket::Send(const std::string& status, const std::string& content_type, const std::string& extra_headers, const std::string& data) const { - assert(valid()); - assert(!status.empty()); + RTC_DCHECK(valid()); + RTC_DCHECK(!status.empty()); std::string buffer("HTTP/1.1 " + status + "\r\n"); buffer += @@ -165,8 +165,8 @@ void DataSocket::Clear() { } bool DataSocket::ParseHeaders() { - assert(!request_headers_.empty()); - assert(method_ == INVALID); + RTC_DCHECK(!request_headers_.empty()); + RTC_DCHECK_EQ(method_, INVALID); size_t i = request_headers_.find("\r\n"); if (i == std::string::npos) return false; @@ -174,8 +174,8 @@ bool DataSocket::ParseHeaders() { if (!ParseMethodAndPath(request_headers_.data(), i)) return false; - assert(method_ != INVALID); - assert(!request_path_.empty()); + RTC_DCHECK_NE(method_, INVALID); + RTC_DCHECK(!request_path_.empty()); if (method_ == POST) { const char* headers = request_headers_.data() + i + 2; @@ -225,8 +225,8 @@ bool DataSocket::ParseMethodAndPath(const char* begin, size_t len) { } bool DataSocket::ParseContentLengthAndType(const char* headers, size_t length) { - assert(content_length_ == 0); - assert(content_type_.empty()); + RTC_DCHECK_EQ(content_length_, 0); + RTC_DCHECK(content_type_.empty()); const char* end = headers + length; while (headers && headers < end) { @@ -267,7 +267,7 @@ bool DataSocket::ParseContentLengthAndType(const char* headers, size_t length) { // bool ListeningSocket::Listen(unsigned short port) { - assert(valid()); + RTC_DCHECK(valid()); int enabled = 1; setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR, reinterpret_cast(&enabled), sizeof(enabled)); @@ -284,7 +284,7 @@ bool ListeningSocket::Listen(unsigned short port) { } DataSocket* ListeningSocket::Accept() const { - assert(valid()); + RTC_DCHECK(valid()); struct sockaddr_in addr = {0}; socklen_t size = sizeof(addr); NativeSocket client = diff --git a/examples/peerconnection/server/main.cc b/examples/peerconnection/server/main.cc index b80e4d8247..50b8c23401 100644 --- a/examples/peerconnection/server/main.cc +++ b/examples/peerconnection/server/main.cc @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include #include #include #if defined(WEBRTC_POSIX) @@ -24,6 +23,7 @@ #include "absl/flags/usage.h" #include "examples/peerconnection/server/data_socket.h" #include "examples/peerconnection/server/peer_channel.h" +#include "rtc_base/checks.h" #include "system_wrappers/include/field_trial.h" #include "test/field_trial.h" @@ -41,8 +41,8 @@ ABSL_FLAG(int, port, 8888, "default: 8888"); static const size_t kMaxConnections = (FD_SETSIZE - 2); void HandleBrowserRequest(DataSocket* ds, bool* quit) { - assert(ds && ds->valid()); - assert(quit); + RTC_DCHECK(ds && ds->valid()); + RTC_DCHECK(quit); const std::string& path = ds->request_path(); @@ -162,7 +162,7 @@ int main(int argc, char* argv[]) { if (socket_done) { printf("Disconnecting socket\n"); clients.OnClosing(s); - assert(s->valid()); // Close must not have been called yet. + RTC_DCHECK(s->valid()); // Close must not have been called yet. FD_CLR(s->socket(), &socket_set); delete (*i); i = sockets.erase(i); diff --git a/examples/peerconnection/server/peer_channel.cc b/examples/peerconnection/server/peer_channel.cc index be0f282abd..f53820cc60 100644 --- a/examples/peerconnection/server/peer_channel.cc +++ b/examples/peerconnection/server/peer_channel.cc @@ -10,7 +10,6 @@ #include "examples/peerconnection/server/peer_channel.h" -#include #include #include @@ -18,6 +17,7 @@ #include "examples/peerconnection/server/data_socket.h" #include "examples/peerconnection/server/utils.h" +#include "rtc_base/checks.h" // Set to the peer id of the originator when messages are being // exchanged between peers, but set to the id of the receiving peer @@ -57,9 +57,9 @@ ChannelMember::ChannelMember(DataSocket* socket) id_(++s_member_id_), connected_(true), timestamp_(time(NULL)) { - assert(socket); - assert(socket->method() == DataSocket::GET); - assert(socket->PathEquals("/sign_in")); + RTC_DCHECK(socket); + RTC_DCHECK_EQ(socket->method(), DataSocket::GET); + RTC_DCHECK(socket->PathEquals("/sign_in")); name_ = socket->request_arguments(); if (name_.empty()) name_ = "peer_" + int2str(id_); @@ -85,14 +85,14 @@ std::string ChannelMember::GetPeerIdHeader() const { } bool ChannelMember::NotifyOfOtherMember(const ChannelMember& other) { - assert(&other != this); + RTC_DCHECK_NE(&other, this); QueueResponse("200 OK", "text/plain", GetPeerIdHeader(), other.GetEntry()); return true; } // Returns a string in the form "name,id,connected\n". std::string ChannelMember::GetEntry() const { - assert(name_.length() <= kMaxNameLength); + RTC_DCHECK(name_.length() <= kMaxNameLength); // name, 11-digit int, 1-digit bool, newline, null char entry[kMaxNameLength + 15]; @@ -102,8 +102,8 @@ std::string ChannelMember::GetEntry() const { } void ChannelMember::ForwardRequestToPeer(DataSocket* ds, ChannelMember* peer) { - assert(peer); - assert(ds); + RTC_DCHECK(peer); + RTC_DCHECK(ds); std::string extra_headers(GetPeerIdHeader()); @@ -129,8 +129,8 @@ void ChannelMember::QueueResponse(const std::string& status, const std::string& extra_headers, const std::string& data) { if (waiting_socket_) { - assert(queue_.empty()); - assert(waiting_socket_->method() == DataSocket::GET); + RTC_DCHECK(queue_.empty()); + RTC_DCHECK_EQ(waiting_socket_->method(), DataSocket::GET); bool ok = waiting_socket_->Send(status, true, content_type, extra_headers, data); if (!ok) { @@ -149,9 +149,9 @@ void ChannelMember::QueueResponse(const std::string& status, } void ChannelMember::SetWaitingSocket(DataSocket* ds) { - assert(ds->method() == DataSocket::GET); + RTC_DCHECK_EQ(ds->method(), DataSocket::GET); if (ds && !queue_.empty()) { - assert(waiting_socket_ == NULL); + RTC_DCHECK(!waiting_socket_); const QueuedResponse& response = queue_.front(); ds->Send(response.status, true, response.content_type, response.extra_headers, response.data); @@ -167,13 +167,13 @@ void ChannelMember::SetWaitingSocket(DataSocket* ds) { // static bool PeerChannel::IsPeerConnection(const DataSocket* ds) { - assert(ds); + RTC_DCHECK(ds); return (ds->method() == DataSocket::POST && ds->content_length() > 0) || (ds->method() == DataSocket::GET && ds->PathEquals("/sign_in")); } ChannelMember* PeerChannel::Lookup(DataSocket* ds) const { - assert(ds); + RTC_DCHECK(ds); if (ds->method() != DataSocket::GET && ds->method() != DataSocket::POST) return NULL; @@ -209,7 +209,7 @@ ChannelMember* PeerChannel::Lookup(DataSocket* ds) const { } ChannelMember* PeerChannel::IsTargetedRequest(const DataSocket* ds) const { - assert(ds); + RTC_DCHECK(ds); // Regardless of GET or POST, we look for the peer_id parameter // only in the request_path. const std::string& path = ds->request_path(); @@ -239,7 +239,7 @@ ChannelMember* PeerChannel::IsTargetedRequest(const DataSocket* ds) const { } bool PeerChannel::AddMember(DataSocket* ds) { - assert(IsPeerConnection(ds)); + RTC_DCHECK(IsPeerConnection(ds)); ChannelMember* new_guy = new ChannelMember(ds); Members failures; BroadcastChangedState(*new_guy, &failures); @@ -308,7 +308,7 @@ void PeerChannel::DeleteAll() { void PeerChannel::BroadcastChangedState(const ChannelMember& member, Members* delivery_failures) { // This function should be called prior to DataSocket::Close(). - assert(delivery_failures); + RTC_DCHECK(delivery_failures); if (!member.connected()) { printf("Member disconnected: %s\n", member.name().c_str()); @@ -329,12 +329,12 @@ void PeerChannel::BroadcastChangedState(const ChannelMember& member, } void PeerChannel::HandleDeliveryFailures(Members* failures) { - assert(failures); + RTC_DCHECK(failures); while (!failures->empty()) { Members::iterator i = failures->begin(); ChannelMember* member = *i; - assert(!member->connected()); + RTC_DCHECK(!member->connected()); failures->erase(i); BroadcastChangedState(*member, failures); delete member; @@ -344,14 +344,14 @@ void PeerChannel::HandleDeliveryFailures(Members* failures) { // Builds a simple list of "name,id\n" entries for each member. std::string PeerChannel::BuildResponseForNewMember(const ChannelMember& member, std::string* content_type) { - assert(content_type); + RTC_DCHECK(content_type); *content_type = "text/plain"; // The peer itself will always be the first entry. std::string response(member.GetEntry()); for (Members::iterator i = members_.begin(); i != members_.end(); ++i) { if (member.id() != (*i)->id()) { - assert((*i)->connected()); + RTC_DCHECK((*i)->connected()); response += (*i)->GetEntry(); } } diff --git a/examples/unityplugin/simple_peer_connection.cc b/examples/unityplugin/simple_peer_connection.cc index 128ca76881..c7e5185bdc 100644 --- a/examples/unityplugin/simple_peer_connection.cc +++ b/examples/unityplugin/simple_peer_connection.cc @@ -497,8 +497,9 @@ bool SimplePeerConnection::CreateDataChannel() { struct webrtc::DataChannelInit init; init.ordered = true; init.reliable = true; - data_channel_ = peer_connection_->CreateDataChannel("Hello", &init); - if (data_channel_.get()) { + auto result = peer_connection_->CreateDataChannelOrError("Hello", &init); + if (result.ok()) { + data_channel_ = result.MoveValue(); data_channel_->RegisterObserver(this); RTC_LOG(LS_INFO) << "Succeeds to create data channel"; return true; diff --git a/g3doc/implementation_basics.md b/g3doc/implementation_basics.md new file mode 100644 index 0000000000..933941a0d1 --- /dev/null +++ b/g3doc/implementation_basics.md @@ -0,0 +1,92 @@ + + + +# Basic concepts and primitives + +## Time + +Internally, time is represent using the [webrtc::Timestamp][1] class. This +represents +time with a resolution of one microsecond, using a 64-bit integer, and provides +converters to milliseconds or seconds as needed. + +All timestamps need to be measured from the system monotonic time. + +The epoch is not specified (because we can't always know if the system clock is +correct), but whenever an absolute epoch is needed, the Unix time +epoch (Jan 1, 1970 at 0:00 GMT) is used. + +Conversion from/to other formats (for example milliseconds, NTP times, +timestamp strings) should happen as close to the interface requiring that +format as possible. + +NOTE: There are parts of the codebase that don't use Timestamp, parts of the +codebase that use the NTP epoch, and parts of the codebase that don't use the +monotonic clock. They need to +be updated. + +## Threads + +All execution happens on a TaskQueue instance. How a TaskQueue is implemented +varies by platform, but they all have the [webrtc::TaskQueueBase][3] API. + +This API offers primitives for posting tasks, with or without delay. + +Some core parts use the [rtc::Thread][2], which is a subclass of TaskQueueBase. +This may contain a SocketServer for processing I/O, and is used for policing +certain calling pattern between a few core threads (the NetworkThread cannot +do Invoke on the Worker thread, for instance). + +## Synchronization primitives + +### PostTask and thread-guarded variables + +The preferred method for synchronization is to post tasks between threads, +and to let each thread take care of its own variables (lock-free programming). +All variables in +classes intended to be used with multiple threads should therefore be +annotated with RTC_GUARDED_BY(thread). + +For classes used with only one thread, the recommended pattern is to let +them own a webrtc::SequenceChecker (conventionally named sequence_checker_) +and let all variables be RTC_GUARDED_BY(sequence_checker_). + +Member variables marked const do not need to be guarded, since they never +change. (But note that they may point to objects that can change!) + +When posting tasks with callbacks, it is the duty of the caller to check +that the object one is calling back into still exists when the callback +is made. A helper for this task is the [webrtc::ScopedTaskSafety][5] +flag, which can automatically drop callbacks in this situation, and +associated classes. + +### Synchronization primitives to be used when needed + +When it is absolutely necessary to let one thread wait for another thread +to do something, Thread::Invoke can be used. This function is DISCOURAGED, +since it leads to performance issues, but is currently still widespread. + +When it is absolutely necessary to access one variable from multiple threads, +the webrtc::Mutex can be used. Such variables MUST be marked up with +RTC_GUARDED_BY(mutex), to allow static analysis that lessens the chance of +deadlocks or unintended consequences. + +### Synchronization primitives that are being removed +The following non-exhaustive list of synchronization primitives are +in the (slow) process of being removed from the codebase. + +* sigslot. Use [webrtc::CallbackList][4] instead, or, when there's only one + signal consumer, a single std::function. + +* AsyncInvoker. + +* RecursiveCriticalSection. Try to use [webrtc::Mutex][6] instead, and don't recurse. + + + +[1]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/units/timestamp.h;drc=b95d90b78a3491ef8e8aa0640dd521515ec881ca;l=29 +[2]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/rtc_base/thread.h;drc=1107751b6f11c35259a1c5c8a0f716e227b7e3b4;l=194 +[3]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/task_queue/task_queue_base.h;drc=1107751b6f11c35259a1c5c8a0f716e227b7e3b4;l=25 +[4]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/rtc_base/callback_list.h;drc=54b91412de3f579a2d5ccdead6e04cc2cc5ca3a1;l=162 +[5]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/rtc_base/task_utils/pending_task_safety_flag.h;drc=86ee89f73e4f4799b3ebcc0b5c65837c9601fe6d;l=117 +[6]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/rtc_base/synchronization/mutex.h;drc=0d3c09a8fe5f12dfbc9f1bcd5790fda8830624ec;l=40 diff --git a/g3doc/sitemap.md b/g3doc/sitemap.md index 378333d5d8..c893d6ce3a 100644 --- a/g3doc/sitemap.md +++ b/g3doc/sitemap.md @@ -6,11 +6,15 @@ * [Public C++ API](/api/g3doc/index.md) * [Threading](/api/g3doc/threading_design.md) * Implementation + * [Basic concepts](/g3doc/implementation_basics.md) + * [Supported Platforms and Compilers](/g3doc/supported-platforms-and-compilers.md) * Network * [ICE](/p2p/g3doc/ice.md) * STUN * TURN * [DTLS](/pc/g3doc/dtls_transport.md) + * [RTP](/pc/g3doc/rtp.md) + * [SRTP](/pc/g3doc/srtp.md) * [SCTP](/pc/g3doc/sctp_transport.md) * [Pacing buffer](/modules/pacing/g3doc/index.md) * Congestion control and bandwidth estimation @@ -30,6 +34,7 @@ * [PeerConnection](/pc/g3doc/peer_connection.md) * Desktop capture * Stats + * [Logging](/logging/g3doc/rtc_event_log.md) * Testing * Media Quality and performance * [PeerConnection Framework](/test/pc/e2e/g3doc/index.md) diff --git a/g3doc/supported-platforms-and-compilers.md b/g3doc/supported-platforms-and-compilers.md new file mode 100644 index 0000000000..9e51a29ab7 --- /dev/null +++ b/g3doc/supported-platforms-and-compilers.md @@ -0,0 +1,36 @@ +# WebRTC supported plaftorms and compilers + + + + +## Operating systems and CPUs + +The list of officially supported operating systems and CPUs is: + +* Android: armeabi-v7a, arm64-v8a, x86, x86_64. +* iOS: arm64, x86_64. +* Linux: armeabi-v7a, arm64-v8a, x86, x86_64. +* macOS: x86_64, arm64 (M1). +* Windows: x86_64. + +Other platforms are not officially supported (which means there is no CI +coverage for them) but patches to keep WebRTC working with them are welcomed by +the WebRTC Team. + +## Compilers + +WebRTC officially supports clang on all the supported platforms. The clang +version officially supported is the one used by Chromium (hence the version is +really close to Tip of Tree and can be checked +[here](https://source.chromium.org/chromium/chromium/src/+/main:tools/clang/scripts/update.py) +by looking at the value of `CLANG_REVISION`). + +See also +[here](https://source.chromium.org/chromium/chromium/src/+/main:docs/clang.md) +for some clang related documentation from Chromium. + +MSVC is also supported at version VS 2019 16.61. + +Other compilers are not officially supported (which means there is no CI +coverage for them) but patches to keep WebRTC working with them are welcomed by +the WebRTC Team. diff --git a/logging/BUILD.gn b/logging/BUILD.gn index 519f357345..90a05f7c49 100644 --- a/logging/BUILD.gn +++ b/logging/BUILD.gn @@ -53,6 +53,7 @@ rtc_library("rtc_event_pacing") { deps = [ "../api:scoped_refptr", "../api/rtc_event_log", + "../api/units:timestamp", ] absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] } @@ -73,6 +74,7 @@ rtc_library("rtc_event_audio") { ":rtc_stream_config", "../api:scoped_refptr", "../api/rtc_event_log", + "../api/units:timestamp", "../modules/audio_coding:audio_network_adaptor_config", "../rtc_base:checks", ] @@ -101,6 +103,7 @@ rtc_library("rtc_event_bwe") { "../api:scoped_refptr", "../api/rtc_event_log", "../api/units:data_rate", + "../api/units:timestamp", ] absl_deps = [ "//third_party/abseil-cpp/absl/memory", @@ -115,6 +118,7 @@ rtc_library("rtc_event_frame_events") { ] deps = [ "../api/rtc_event_log", + "../api/units:timestamp", "../api/video:video_frame", "../rtc_base:timeutils", ] @@ -136,6 +140,7 @@ rtc_library("rtc_event_generic_packet_events") { ] deps = [ "../api/rtc_event_log", + "../api/units:timestamp", "../rtc_base:timeutils", ] absl_deps = [ @@ -179,6 +184,7 @@ rtc_library("rtc_event_video") { ":rtc_stream_config", "../api:scoped_refptr", "../api/rtc_event_log", + "../api/units:timestamp", "../rtc_base:checks", ] absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] @@ -452,6 +458,7 @@ rtc_library("ice_log") { "../api:libjingle_logging_api", "../api:libjingle_peerconnection_api", # For api/dtls_transport_interface.h "../api/rtc_event_log", + "../api/units:timestamp", "../rtc_base:rtc_base_approved", ] absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] diff --git a/logging/g3doc/rtc_event_log.md b/logging/g3doc/rtc_event_log.md new file mode 100644 index 0000000000..c7996e0b42 --- /dev/null +++ b/logging/g3doc/rtc_event_log.md @@ -0,0 +1,85 @@ +# RTC event log + + + + +## Overview + +RTC event logs can be enabled to capture in-depth inpformation about sent and +received packets and the internal state of some WebRTC components. The logs are +useful to understand network behavior and to debug issues around connectivity, +bandwidth estimation and audio jitter buffers. + +The contents include: + +* Sent and received RTP headers +* Full RTCP feedback +* ICE candidates, pings and responses +* Bandwidth estimator events, including loss-based estimate, delay-based + estimate, probe results and ALR state +* Audio network adaptation settings +* Audio playout events + +## Binary wire format + +No guarantees are made on the wire format, and the format may change without +prior notice. To maintain compatibility with past and future formats, analysis +tools should be built on top of the provided +[rtc_event_log_parser.h](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/logging/rtc_event_log/rtc_event_log_parser.h) + +In particular, an analysis tool should *not* read the log as a protobuf. + +## Visualization + +Since the logs contain a substantial amount of data, it is usually convenient to +get an overview by visualizing them as a set of plots. Use the command: + +``` +out/Default/event_log_visualizer /path/to/log_file | python +``` + +This visualization requires matplotlib to be installed. The tool is capable of +producing a substantial number of plots, of which only a handful are generated +by default. You can select which plots are generated though the `--plot=` +command line argument. For example, the command + +``` +out/Default/event_log_visualizer \ + --plot=incoming_packet_sizes,incoming_stream_bitrate \ + /path/to/log_file | python +``` + +plots the sizes of incoming packets and the bitrate per incoming stream. + +You can get a full list of options for the `--plot` argument through + +``` +out/Default/event_log_visualizer --list_plots /path/to/log_file +``` + +You can also synchronize the x-axis between all plots (so zooming or +panning in one plot affects all of them), by adding the command line +argument `--shared_xaxis`. + + +## Viewing the raw log contents as text + +If you know which format version the log file uses, you can view the raw +contents as text. For version 1, you can use the command + +``` +out/Default/protoc --decode webrtc.rtclog.EventStream \ + ./logging/rtc_event_log/rtc_event_log.proto < /path/to/log_file +``` + +Similarly, you can use + +``` +out/Default/protoc --decode webrtc.rtclog2.EventStream \ + ./logging/rtc_event_log/rtc_event_log2.proto < /path/to/log_file +``` + +for logs that use version 2. However, note that not all of the contents will be +human readable. Some fields are based on the raw RTP format or may be encoded as +deltas relative to previous fields. Such fields will be printed as a list of +bytes. diff --git a/logging/rtc_event_log/encoder/rtc_event_log_encoder_unittest.cc b/logging/rtc_event_log/encoder/rtc_event_log_encoder_unittest.cc index 458b5af894..063d425af5 100644 --- a/logging/rtc_event_log/encoder/rtc_event_log_encoder_unittest.cc +++ b/logging/rtc_event_log/encoder/rtc_event_log_encoder_unittest.cc @@ -49,12 +49,12 @@ class RtcEventLogEncoderTest RtcEventLogEncoderTest() : seed_(std::get<0>(GetParam())), prng_(seed_), - encoding_(std::get<1>(GetParam())), + encoding_type_(std::get<1>(GetParam())), event_count_(std::get<2>(GetParam())), force_repeated_fields_(std::get<3>(GetParam())), gen_(seed_ * 880001UL), - verifier_(encoding_) { - switch (encoding_) { + verifier_(encoding_type_) { + switch (encoding_type_) { case RtcEventLog::EncodingType::Legacy: encoder_ = std::make_unique(); break; @@ -62,6 +62,8 @@ class RtcEventLogEncoderTest encoder_ = std::make_unique(); break; } + encoded_ = + encoder_->EncodeLogStart(rtc::TimeMillis(), rtc::TimeUTCMillis()); } ~RtcEventLogEncoderTest() override = default; @@ -89,11 +91,12 @@ class RtcEventLogEncoderTest ParsedRtcEventLog parsed_log_; const uint64_t seed_; Random prng_; - const RtcEventLog::EncodingType encoding_; + const RtcEventLog::EncodingType encoding_type_; const size_t event_count_; const bool force_repeated_fields_; test::EventGenerator gen_; test::EventVerifier verifier_; + std::string encoded_; }; void RtcEventLogEncoderTest::TestRtcEventAudioNetworkAdaptation( @@ -105,8 +108,8 @@ void RtcEventLogEncoderTest::TestRtcEventAudioNetworkAdaptation( history_.push_back(event->Copy()); } - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& ana_configs = parsed_log_.audio_network_adaptation_events(); ASSERT_EQ(ana_configs.size(), events.size()); @@ -167,7 +170,7 @@ void RtcEventLogEncoderTest::TestRtpPackets() { // TODO(terelius): Test extensions for legacy encoding, too. RtpHeaderExtensionMap extension_map; - if (encoding_ != RtcEventLog::EncodingType::Legacy) { + if (encoding_type_ != RtcEventLog::EncodingType::Legacy) { extension_map = gen_.NewRtpHeaderExtensionMap(true); } @@ -185,8 +188,8 @@ void RtcEventLogEncoderTest::TestRtpPackets() { } // Encode and parse. - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); // For each SSRC, make sure the RTP packets associated with it to have been // correctly encoded and parsed. @@ -212,8 +215,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventAlrState) { history_.push_back(events[i]->Copy()); } - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& alr_state_events = parsed_log_.alr_state_events(); ASSERT_EQ(alr_state_events.size(), event_count_); @@ -223,7 +226,7 @@ TEST_P(RtcEventLogEncoderTest, RtcEventAlrState) { } TEST_P(RtcEventLogEncoderTest, RtcEventRouteChange) { - if (encoding_ == RtcEventLog::EncodingType::Legacy) { + if (encoding_type_ == RtcEventLog::EncodingType::Legacy) { return; } std::vector> events(event_count_); @@ -233,8 +236,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRouteChange) { history_.push_back(events[i]->Copy()); } - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& route_change_events = parsed_log_.route_change_events(); ASSERT_EQ(route_change_events.size(), event_count_); @@ -244,7 +247,7 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRouteChange) { } TEST_P(RtcEventLogEncoderTest, RtcEventRemoteEstimate) { - if (encoding_ == RtcEventLog::EncodingType::Legacy) { + if (encoding_type_ == RtcEventLog::EncodingType::Legacy) { return; } std::vector> events(event_count_); @@ -255,8 +258,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRemoteEstimate) { history_.push_back(std::make_unique(*events[i])); } - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& parsed_events = parsed_log_.remote_estimate_events(); ASSERT_EQ(parsed_events.size(), event_count_); @@ -409,8 +412,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventAudioPlayout) { original_events_by_ssrc[ssrc].push_back(std::move(event)); } - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& parsed_playout_events_by_ssrc = parsed_log_.audio_playout_events(); @@ -445,8 +448,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventAudioReceiveStreamConfig) { gen_.NewAudioReceiveStreamConfig(ssrc, extensions); history_.push_back(event->Copy()); - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& audio_recv_configs = parsed_log_.audio_recv_configs(); ASSERT_EQ(audio_recv_configs.size(), 1u); @@ -461,8 +464,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventAudioSendStreamConfig) { gen_.NewAudioSendStreamConfig(ssrc, extensions); history_.push_back(event->Copy()); - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& audio_send_configs = parsed_log_.audio_send_configs(); ASSERT_EQ(audio_send_configs.size(), 1u); @@ -479,8 +482,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventBweUpdateDelayBased) { history_.push_back(events[i]->Copy()); } - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& bwe_delay_updates = parsed_log_.bwe_delay_updates(); ASSERT_EQ(bwe_delay_updates.size(), event_count_); @@ -499,8 +502,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventBweUpdateLossBased) { history_.push_back(events[i]->Copy()); } - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& bwe_loss_updates = parsed_log_.bwe_loss_updates(); ASSERT_EQ(bwe_loss_updates.size(), event_count_); @@ -511,7 +514,7 @@ TEST_P(RtcEventLogEncoderTest, RtcEventBweUpdateLossBased) { } TEST_P(RtcEventLogEncoderTest, RtcEventGenericPacketReceived) { - if (encoding_ == RtcEventLog::EncodingType::Legacy) { + if (encoding_type_ == RtcEventLog::EncodingType::Legacy) { return; } std::vector> events( @@ -523,8 +526,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventGenericPacketReceived) { history_.push_back(events[i]->Copy()); } - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& packets_received = parsed_log_.generic_packets_received(); ASSERT_EQ(packets_received.size(), event_count_); @@ -536,7 +539,7 @@ TEST_P(RtcEventLogEncoderTest, RtcEventGenericPacketReceived) { } TEST_P(RtcEventLogEncoderTest, RtcEventGenericPacketSent) { - if (encoding_ == RtcEventLog::EncodingType::Legacy) { + if (encoding_type_ == RtcEventLog::EncodingType::Legacy) { return; } std::vector> events(event_count_); @@ -547,8 +550,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventGenericPacketSent) { history_.push_back(events[i]->Copy()); } - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& packets_sent = parsed_log_.generic_packets_sent(); ASSERT_EQ(packets_sent.size(), event_count_); @@ -559,7 +562,7 @@ TEST_P(RtcEventLogEncoderTest, RtcEventGenericPacketSent) { } TEST_P(RtcEventLogEncoderTest, RtcEventGenericAcksReceived) { - if (encoding_ == RtcEventLog::EncodingType::Legacy) { + if (encoding_type_ == RtcEventLog::EncodingType::Legacy) { return; } std::vector> events(event_count_); @@ -570,8 +573,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventGenericAcksReceived) { history_.push_back(events[i]->Copy()); } - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& decoded_events = parsed_log_.generic_acks_received(); ASSERT_EQ(decoded_events.size(), event_count_); @@ -590,12 +593,11 @@ TEST_P(RtcEventLogEncoderTest, RtcEventDtlsTransportState) { history_.push_back(events[i]->Copy()); } - const std::string encoded = - encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& dtls_transport_states = parsed_log_.dtls_transport_states(); - if (encoding_ == RtcEventLog::EncodingType::Legacy) { + if (encoding_type_ == RtcEventLog::EncodingType::Legacy) { ASSERT_EQ(dtls_transport_states.size(), 0u); return; } @@ -616,12 +618,11 @@ TEST_P(RtcEventLogEncoderTest, RtcEventDtlsWritableState) { history_.push_back(events[i]->Copy()); } - const std::string encoded = - encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& dtls_writable_states = parsed_log_.dtls_writable_states(); - if (encoding_ == RtcEventLog::EncodingType::Legacy) { + if (encoding_type_ == RtcEventLog::EncodingType::Legacy) { ASSERT_EQ(dtls_writable_states.size(), 0u); return; } @@ -654,15 +655,14 @@ TEST_P(RtcEventLogEncoderTest, RtcEventFrameDecoded) { original_events_by_ssrc[ssrc].push_back(std::move(event)); } - const std::string encoded = - encoder_->EncodeBatch(history_.begin(), history_.end()); - auto status = parsed_log_.ParseString(encoded); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + auto status = parsed_log_.ParseString(encoded_); if (!status.ok()) RTC_LOG(LS_ERROR) << status.message(); ASSERT_TRUE(status.ok()); const auto& decoded_frames_by_ssrc = parsed_log_.decoded_frames(); - if (encoding_ == RtcEventLog::EncodingType::Legacy) { + if (encoding_type_ == RtcEventLog::EncodingType::Legacy) { ASSERT_EQ(decoded_frames_by_ssrc.size(), 0u); return; } @@ -695,8 +695,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventIceCandidatePairConfig) { gen_.NewIceCandidatePairConfig(); history_.push_back(event->Copy()); - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& ice_candidate_pair_configs = parsed_log_.ice_candidate_pair_configs(); @@ -710,8 +710,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventIceCandidatePair) { std::unique_ptr event = gen_.NewIceCandidatePair(); history_.push_back(event->Copy()); - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& ice_candidate_pair_events = parsed_log_.ice_candidate_pair_events(); @@ -721,31 +721,35 @@ TEST_P(RtcEventLogEncoderTest, RtcEventIceCandidatePair) { } TEST_P(RtcEventLogEncoderTest, RtcEventLoggingStarted) { - const int64_t timestamp_us = rtc::TimeMicros(); - const int64_t utc_time_us = rtc::TimeUTCMicros(); + const int64_t timestamp_ms = prng_.Rand(1'000'000'000); + const int64_t utc_time_ms = prng_.Rand(1'000'000'000); - std::string encoded = encoder_->EncodeLogStart(timestamp_us, utc_time_us); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + // Overwrite the previously encoded LogStart event. + encoded_ = encoder_->EncodeLogStart(timestamp_ms * 1000, utc_time_ms * 1000); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& start_log_events = parsed_log_.start_log_events(); ASSERT_EQ(start_log_events.size(), 1u); - verifier_.VerifyLoggedStartEvent(timestamp_us, utc_time_us, + verifier_.VerifyLoggedStartEvent(timestamp_ms * 1000, utc_time_ms * 1000, start_log_events[0]); } TEST_P(RtcEventLogEncoderTest, RtcEventLoggingStopped) { - const int64_t start_timestamp_us = rtc::TimeMicros(); - const int64_t start_utc_time_us = rtc::TimeUTCMicros(); - std::string encoded = - encoder_->EncodeLogStart(start_timestamp_us, start_utc_time_us); - - const int64_t stop_timestamp_us = rtc::TimeMicros(); - encoded += encoder_->EncodeLogEnd(stop_timestamp_us); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + const int64_t start_timestamp_ms = prng_.Rand(1'000'000'000); + const int64_t start_utc_time_ms = prng_.Rand(1'000'000'000); + + // Overwrite the previously encoded LogStart event. + encoded_ = encoder_->EncodeLogStart(start_timestamp_ms * 1000, + start_utc_time_ms * 1000); + + const int64_t stop_timestamp_ms = + prng_.Rand(start_timestamp_ms, 2'000'000'000); + encoded_ += encoder_->EncodeLogEnd(stop_timestamp_ms * 1000); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& stop_log_events = parsed_log_.stop_log_events(); ASSERT_EQ(stop_log_events.size(), 1u); - verifier_.VerifyLoggedStopEvent(stop_timestamp_us, stop_log_events[0]); + verifier_.VerifyLoggedStopEvent(stop_timestamp_ms * 1000, stop_log_events[0]); } // TODO(eladalon/terelius): Test with multiple events in the batch. @@ -754,8 +758,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventProbeClusterCreated) { gen_.NewProbeClusterCreated(); history_.push_back(event->Copy()); - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& bwe_probe_cluster_created_events = parsed_log_.bwe_probe_cluster_created_events(); @@ -770,8 +774,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventProbeResultFailure) { gen_.NewProbeResultFailure(); history_.push_back(event->Copy()); - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& bwe_probe_failure_events = parsed_log_.bwe_probe_failure_events(); ASSERT_EQ(bwe_probe_failure_events.size(), 1u); @@ -785,8 +789,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventProbeResultSuccess) { gen_.NewProbeResultSuccess(); history_.push_back(event->Copy()); - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& bwe_probe_success_events = parsed_log_.bwe_probe_success_events(); ASSERT_EQ(bwe_probe_success_events.size(), 1u); @@ -809,8 +813,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpPacketIncoming) { history_.push_back(events[i]->Copy()); } - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& incoming_rtcp_packets = parsed_log_.incoming_rtcp_packets(); ASSERT_EQ(incoming_rtcp_packets.size(), event_count_); @@ -830,8 +834,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpPacketOutgoing) { history_.push_back(events[i]->Copy()); } - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& outgoing_rtcp_packets = parsed_log_.outgoing_rtcp_packets(); ASSERT_EQ(outgoing_rtcp_packets.size(), event_count_); @@ -852,9 +856,9 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpReceiverReport) { for (auto direction : {kIncomingPacket, kOutgoingPacket}) { std::vector events(event_count_); - std::vector timestamps_us(event_count_); + std::vector timestamps_ms(event_count_); for (size_t i = 0; i < event_count_; ++i) { - timestamps_us[i] = rtc::TimeMicros(); + timestamps_ms[i] = rtc::TimeMillis(); events[i] = gen_.NewReceiverReport(); rtc::Buffer buffer = events[i].Build(); if (direction == kIncomingPacket) { @@ -867,15 +871,14 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpReceiverReport) { fake_clock.AdvanceTime(TimeDelta::Millis(prng_.Rand(0, 1000))); } - std::string encoded = - encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& receiver_reports = parsed_log_.receiver_reports(direction); ASSERT_EQ(receiver_reports.size(), event_count_); for (size_t i = 0; i < event_count_; ++i) { - verifier_.VerifyLoggedReceiverReport(timestamps_us[i], events[i], + verifier_.VerifyLoggedReceiverReport(timestamps_ms[i], events[i], receiver_reports[i]); } } @@ -891,9 +894,9 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpSenderReport) { for (auto direction : {kIncomingPacket, kOutgoingPacket}) { std::vector events(event_count_); - std::vector timestamps_us(event_count_); + std::vector timestamps_ms(event_count_); for (size_t i = 0; i < event_count_; ++i) { - timestamps_us[i] = rtc::TimeMicros(); + timestamps_ms[i] = rtc::TimeMillis(); events[i] = gen_.NewSenderReport(); rtc::Buffer buffer = events[i].Build(); if (direction == kIncomingPacket) { @@ -906,15 +909,14 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpSenderReport) { fake_clock.AdvanceTime(TimeDelta::Millis(prng_.Rand(0, 1000))); } - std::string encoded = - encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& sender_reports = parsed_log_.sender_reports(direction); ASSERT_EQ(sender_reports.size(), event_count_); for (size_t i = 0; i < event_count_; ++i) { - verifier_.VerifyLoggedSenderReport(timestamps_us[i], events[i], + verifier_.VerifyLoggedSenderReport(timestamps_ms[i], events[i], sender_reports[i]); } } @@ -930,9 +932,9 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpExtendedReports) { for (auto direction : {kIncomingPacket, kOutgoingPacket}) { std::vector events(event_count_); - std::vector timestamps_us(event_count_); + std::vector timestamps_ms(event_count_); for (size_t i = 0; i < event_count_; ++i) { - timestamps_us[i] = rtc::TimeMicros(); + timestamps_ms[i] = rtc::TimeMillis(); events[i] = gen_.NewExtendedReports(); rtc::Buffer buffer = events[i].Build(); if (direction == kIncomingPacket) { @@ -945,15 +947,14 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpExtendedReports) { fake_clock.AdvanceTime(TimeDelta::Millis(prng_.Rand(0, 1000))); } - std::string encoded = - encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& extended_reports = parsed_log_.extended_reports(direction); ASSERT_EQ(extended_reports.size(), event_count_); for (size_t i = 0; i < event_count_; ++i) { - verifier_.VerifyLoggedExtendedReports(timestamps_us[i], events[i], + verifier_.VerifyLoggedExtendedReports(timestamps_ms[i], events[i], extended_reports[i]); } } @@ -969,9 +970,9 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpFir) { for (auto direction : {kIncomingPacket, kOutgoingPacket}) { std::vector events(event_count_); - std::vector timestamps_us(event_count_); + std::vector timestamps_ms(event_count_); for (size_t i = 0; i < event_count_; ++i) { - timestamps_us[i] = rtc::TimeMicros(); + timestamps_ms[i] = rtc::TimeMillis(); events[i] = gen_.NewFir(); rtc::Buffer buffer = events[i].Build(); if (direction == kIncomingPacket) { @@ -984,15 +985,14 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpFir) { fake_clock.AdvanceTime(TimeDelta::Millis(prng_.Rand(0, 1000))); } - std::string encoded = - encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& firs = parsed_log_.firs(direction); ASSERT_EQ(firs.size(), event_count_); for (size_t i = 0; i < event_count_; ++i) { - verifier_.VerifyLoggedFir(timestamps_us[i], events[i], firs[i]); + verifier_.VerifyLoggedFir(timestamps_ms[i], events[i], firs[i]); } } } @@ -1007,9 +1007,9 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpPli) { for (auto direction : {kIncomingPacket, kOutgoingPacket}) { std::vector events(event_count_); - std::vector timestamps_us(event_count_); + std::vector timestamps_ms(event_count_); for (size_t i = 0; i < event_count_; ++i) { - timestamps_us[i] = rtc::TimeMicros(); + timestamps_ms[i] = rtc::TimeMillis(); events[i] = gen_.NewPli(); rtc::Buffer buffer = events[i].Build(); if (direction == kIncomingPacket) { @@ -1022,15 +1022,14 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpPli) { fake_clock.AdvanceTime(TimeDelta::Millis(prng_.Rand(0, 1000))); } - std::string encoded = - encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& plis = parsed_log_.plis(direction); ASSERT_EQ(plis.size(), event_count_); for (size_t i = 0; i < event_count_; ++i) { - verifier_.VerifyLoggedPli(timestamps_us[i], events[i], plis[i]); + verifier_.VerifyLoggedPli(timestamps_ms[i], events[i], plis[i]); } } } @@ -1045,9 +1044,9 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpBye) { for (auto direction : {kIncomingPacket, kOutgoingPacket}) { std::vector events(event_count_); - std::vector timestamps_us(event_count_); + std::vector timestamps_ms(event_count_); for (size_t i = 0; i < event_count_; ++i) { - timestamps_us[i] = rtc::TimeMicros(); + timestamps_ms[i] = rtc::TimeMillis(); events[i] = gen_.NewBye(); rtc::Buffer buffer = events[i].Build(); if (direction == kIncomingPacket) { @@ -1060,15 +1059,14 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpBye) { fake_clock.AdvanceTime(TimeDelta::Millis(prng_.Rand(0, 1000))); } - std::string encoded = - encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& byes = parsed_log_.byes(direction); ASSERT_EQ(byes.size(), event_count_); for (size_t i = 0; i < event_count_; ++i) { - verifier_.VerifyLoggedBye(timestamps_us[i], events[i], byes[i]); + verifier_.VerifyLoggedBye(timestamps_ms[i], events[i], byes[i]); } } } @@ -1083,9 +1081,9 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpNack) { for (auto direction : {kIncomingPacket, kOutgoingPacket}) { std::vector events(event_count_); - std::vector timestamps_us(event_count_); + std::vector timestamps_ms(event_count_); for (size_t i = 0; i < event_count_; ++i) { - timestamps_us[i] = rtc::TimeMicros(); + timestamps_ms[i] = rtc::TimeMillis(); events[i] = gen_.NewNack(); rtc::Buffer buffer = events[i].Build(); if (direction == kIncomingPacket) { @@ -1098,15 +1096,14 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpNack) { fake_clock.AdvanceTime(TimeDelta::Millis(prng_.Rand(0, 1000))); } - std::string encoded = - encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& nacks = parsed_log_.nacks(direction); ASSERT_EQ(nacks.size(), event_count_); for (size_t i = 0; i < event_count_; ++i) { - verifier_.VerifyLoggedNack(timestamps_us[i], events[i], nacks[i]); + verifier_.VerifyLoggedNack(timestamps_ms[i], events[i], nacks[i]); } } } @@ -1121,9 +1118,9 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpRemb) { for (auto direction : {kIncomingPacket, kOutgoingPacket}) { std::vector events(event_count_); - std::vector timestamps_us(event_count_); + std::vector timestamps_ms(event_count_); for (size_t i = 0; i < event_count_; ++i) { - timestamps_us[i] = rtc::TimeMicros(); + timestamps_ms[i] = rtc::TimeMillis(); events[i] = gen_.NewRemb(); rtc::Buffer buffer = events[i].Build(); if (direction == kIncomingPacket) { @@ -1136,15 +1133,14 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpRemb) { fake_clock.AdvanceTime(TimeDelta::Millis(prng_.Rand(0, 1000))); } - std::string encoded = - encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& rembs = parsed_log_.rembs(direction); ASSERT_EQ(rembs.size(), event_count_); for (size_t i = 0; i < event_count_; ++i) { - verifier_.VerifyLoggedRemb(timestamps_us[i], events[i], rembs[i]); + verifier_.VerifyLoggedRemb(timestamps_ms[i], events[i], rembs[i]); } } } @@ -1160,9 +1156,9 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpTransportFeedback) { for (auto direction : {kIncomingPacket, kOutgoingPacket}) { std::vector events; events.reserve(event_count_); - std::vector timestamps_us(event_count_); + std::vector timestamps_ms(event_count_); for (size_t i = 0; i < event_count_; ++i) { - timestamps_us[i] = rtc::TimeMicros(); + timestamps_ms[i] = rtc::TimeMillis(); events.emplace_back(gen_.NewTransportFeedback()); rtc::Buffer buffer = events[i].Build(); if (direction == kIncomingPacket) { @@ -1175,16 +1171,15 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpTransportFeedback) { fake_clock.AdvanceTime(TimeDelta::Millis(prng_.Rand(0, 1000))); } - std::string encoded = - encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& transport_feedbacks = parsed_log_.transport_feedbacks(direction); ASSERT_EQ(transport_feedbacks.size(), event_count_); for (size_t i = 0; i < event_count_; ++i) { - verifier_.VerifyLoggedTransportFeedback(timestamps_us[i], events[i], + verifier_.VerifyLoggedTransportFeedback(timestamps_ms[i], events[i], transport_feedbacks[i]); } } @@ -1201,9 +1196,9 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpLossNotification) { for (auto direction : {kIncomingPacket, kOutgoingPacket}) { std::vector events; events.reserve(event_count_); - std::vector timestamps_us(event_count_); + std::vector timestamps_ms(event_count_); for (size_t i = 0; i < event_count_; ++i) { - timestamps_us[i] = rtc::TimeMicros(); + timestamps_ms[i] = rtc::TimeMillis(); events.emplace_back(gen_.NewLossNotification()); rtc::Buffer buffer = events[i].Build(); if (direction == kIncomingPacket) { @@ -1216,15 +1211,14 @@ TEST_P(RtcEventLogEncoderTest, RtcEventRtcpLossNotification) { fake_clock.AdvanceTime(TimeDelta::Millis(prng_.Rand(0, 1000))); } - std::string encoded = - encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& loss_notifications = parsed_log_.loss_notifications(direction); ASSERT_EQ(loss_notifications.size(), event_count_); for (size_t i = 0; i < event_count_; ++i) { - verifier_.VerifyLoggedLossNotification(timestamps_us[i], events[i], + verifier_.VerifyLoggedLossNotification(timestamps_ms[i], events[i], loss_notifications[i]); } } @@ -1246,8 +1240,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventVideoReceiveStreamConfig) { gen_.NewVideoReceiveStreamConfig(ssrc, extensions); history_.push_back(event->Copy()); - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& video_recv_configs = parsed_log_.video_recv_configs(); ASSERT_EQ(video_recv_configs.size(), 1u); @@ -1262,8 +1256,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventVideoSendStreamConfig) { gen_.NewVideoSendStreamConfig(ssrc, extensions); history_.push_back(event->Copy()); - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); - ASSERT_TRUE(parsed_log_.ParseString(encoded).ok()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); + ASSERT_TRUE(parsed_log_.ParseString(encoded_).ok()); const auto& video_send_configs = parsed_log_.video_send_configs(); ASSERT_EQ(video_send_configs.size(), 1u); @@ -1283,8 +1277,8 @@ INSTANTIATE_TEST_SUITE_P( class RtcEventLogEncoderSimpleTest : public ::testing::TestWithParam { protected: - RtcEventLogEncoderSimpleTest() : encoding_(GetParam()) { - switch (encoding_) { + RtcEventLogEncoderSimpleTest() : encoding_type_(GetParam()) { + switch (encoding_type_) { case RtcEventLog::EncodingType::Legacy: encoder_ = std::make_unique(); break; @@ -1292,13 +1286,16 @@ class RtcEventLogEncoderSimpleTest encoder_ = std::make_unique(); break; } + encoded_ = + encoder_->EncodeLogStart(rtc::TimeMillis(), rtc::TimeUTCMillis()); } ~RtcEventLogEncoderSimpleTest() override = default; std::deque> history_; std::unique_ptr encoder_; ParsedRtcEventLog parsed_log_; - const RtcEventLog::EncodingType encoding_; + const RtcEventLog::EncodingType encoding_type_; + std::string encoded_; }; TEST_P(RtcEventLogEncoderSimpleTest, RtcEventLargeCompoundRtcpPacketIncoming) { @@ -1320,9 +1317,9 @@ TEST_P(RtcEventLogEncoderSimpleTest, RtcEventLargeCompoundRtcpPacketIncoming) { EXPECT_GT(packet.size(), static_cast(IP_PACKET_SIZE)); auto event = std::make_unique(packet); history_.push_back(event->Copy()); - std::string encoded = encoder_->EncodeBatch(history_.begin(), history_.end()); + encoded_ += encoder_->EncodeBatch(history_.begin(), history_.end()); - ParsedRtcEventLog::ParseStatus status = parsed_log_.ParseString(encoded); + ParsedRtcEventLog::ParseStatus status = parsed_log_.ParseString(encoded_); ASSERT_TRUE(status.ok()) << status.message(); const auto& incoming_rtcp_packets = parsed_log_.incoming_rtcp_packets(); diff --git a/logging/rtc_event_log/events/rtc_event_alr_state.h b/logging/rtc_event_log/events/rtc_event_alr_state.h index 3ad0f005fb..74d66015ef 100644 --- a/logging/rtc_event_log/events/rtc_event_alr_state.h +++ b/logging/rtc_event_log/events/rtc_event_alr_state.h @@ -14,6 +14,7 @@ #include #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" namespace webrtc { @@ -39,13 +40,13 @@ class RtcEventAlrState final : public RtcEvent { struct LoggedAlrStateEvent { LoggedAlrStateEvent() = default; - LoggedAlrStateEvent(int64_t timestamp_us, bool in_alr) - : timestamp_us(timestamp_us), in_alr(in_alr) {} + LoggedAlrStateEvent(Timestamp timestamp, bool in_alr) + : timestamp(timestamp), in_alr(in_alr) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); bool in_alr; }; diff --git a/logging/rtc_event_log/events/rtc_event_audio_network_adaptation.h b/logging/rtc_event_log/events/rtc_event_audio_network_adaptation.h index 2b183bb307..aeeb28e218 100644 --- a/logging/rtc_event_log/events/rtc_event_audio_network_adaptation.h +++ b/logging/rtc_event_log/events/rtc_event_audio_network_adaptation.h @@ -14,6 +14,7 @@ #include #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h" namespace webrtc { @@ -43,14 +44,14 @@ class RtcEventAudioNetworkAdaptation final : public RtcEvent { struct LoggedAudioNetworkAdaptationEvent { LoggedAudioNetworkAdaptationEvent() = default; - LoggedAudioNetworkAdaptationEvent(int64_t timestamp_us, + LoggedAudioNetworkAdaptationEvent(Timestamp timestamp, const AudioEncoderRuntimeConfig& config) - : timestamp_us(timestamp_us), config(config) {} + : timestamp(timestamp), config(config) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); AudioEncoderRuntimeConfig config; }; diff --git a/logging/rtc_event_log/events/rtc_event_audio_playout.h b/logging/rtc_event_log/events/rtc_event_audio_playout.h index 83825217a1..00d07a65bf 100644 --- a/logging/rtc_event_log/events/rtc_event_audio_playout.h +++ b/logging/rtc_event_log/events/rtc_event_audio_playout.h @@ -16,6 +16,7 @@ #include #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" namespace webrtc { @@ -41,13 +42,13 @@ class RtcEventAudioPlayout final : public RtcEvent { struct LoggedAudioPlayoutEvent { LoggedAudioPlayoutEvent() = default; - LoggedAudioPlayoutEvent(int64_t timestamp_us, uint32_t ssrc) - : timestamp_us(timestamp_us), ssrc(ssrc) {} + LoggedAudioPlayoutEvent(Timestamp timestamp, uint32_t ssrc) + : timestamp(timestamp), ssrc(ssrc) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); uint32_t ssrc; }; diff --git a/logging/rtc_event_log/events/rtc_event_audio_receive_stream_config.h b/logging/rtc_event_log/events/rtc_event_audio_receive_stream_config.h index 1edd8e1e46..ccf76025e6 100644 --- a/logging/rtc_event_log/events/rtc_event_audio_receive_stream_config.h +++ b/logging/rtc_event_log/events/rtc_event_audio_receive_stream_config.h @@ -14,6 +14,7 @@ #include #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" #include "logging/rtc_event_log/rtc_stream_config.h" namespace webrtc { @@ -42,13 +43,13 @@ class RtcEventAudioReceiveStreamConfig final : public RtcEvent { struct LoggedAudioRecvConfig { LoggedAudioRecvConfig() = default; - LoggedAudioRecvConfig(int64_t timestamp_us, const rtclog::StreamConfig config) - : timestamp_us(timestamp_us), config(config) {} + LoggedAudioRecvConfig(Timestamp timestamp, const rtclog::StreamConfig config) + : timestamp(timestamp), config(config) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); rtclog::StreamConfig config; }; diff --git a/logging/rtc_event_log/events/rtc_event_audio_send_stream_config.h b/logging/rtc_event_log/events/rtc_event_audio_send_stream_config.h index d3c60683b4..4e93871ae8 100644 --- a/logging/rtc_event_log/events/rtc_event_audio_send_stream_config.h +++ b/logging/rtc_event_log/events/rtc_event_audio_send_stream_config.h @@ -41,13 +41,13 @@ class RtcEventAudioSendStreamConfig final : public RtcEvent { struct LoggedAudioSendConfig { LoggedAudioSendConfig() = default; - LoggedAudioSendConfig(int64_t timestamp_us, const rtclog::StreamConfig config) - : timestamp_us(timestamp_us), config(config) {} + LoggedAudioSendConfig(Timestamp timestamp, const rtclog::StreamConfig config) + : timestamp(timestamp), config(config) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); rtclog::StreamConfig config; }; } // namespace webrtc diff --git a/logging/rtc_event_log/events/rtc_event_bwe_update_delay_based.h b/logging/rtc_event_log/events/rtc_event_bwe_update_delay_based.h index a83ea8b693..522f98fd8d 100644 --- a/logging/rtc_event_log/events/rtc_event_bwe_update_delay_based.h +++ b/logging/rtc_event_log/events/rtc_event_bwe_update_delay_based.h @@ -17,6 +17,7 @@ #include "api/network_state_predictor.h" #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" namespace webrtc { @@ -45,17 +46,17 @@ class RtcEventBweUpdateDelayBased final : public RtcEvent { struct LoggedBweDelayBasedUpdate { LoggedBweDelayBasedUpdate() = default; - LoggedBweDelayBasedUpdate(int64_t timestamp_us, + LoggedBweDelayBasedUpdate(Timestamp timestamp, int32_t bitrate_bps, BandwidthUsage detector_state) - : timestamp_us(timestamp_us), + : timestamp(timestamp), bitrate_bps(bitrate_bps), detector_state(detector_state) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); int32_t bitrate_bps; BandwidthUsage detector_state; }; diff --git a/logging/rtc_event_log/events/rtc_event_bwe_update_loss_based.h b/logging/rtc_event_log/events/rtc_event_bwe_update_loss_based.h index b638f1ac16..b031658ea2 100644 --- a/logging/rtc_event_log/events/rtc_event_bwe_update_loss_based.h +++ b/logging/rtc_event_log/events/rtc_event_bwe_update_loss_based.h @@ -16,6 +16,7 @@ #include #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" namespace webrtc { @@ -47,19 +48,19 @@ class RtcEventBweUpdateLossBased final : public RtcEvent { struct LoggedBweLossBasedUpdate { LoggedBweLossBasedUpdate() = default; - LoggedBweLossBasedUpdate(int64_t timestamp_us, + LoggedBweLossBasedUpdate(Timestamp timestamp, int32_t bitrate_bps, uint8_t fraction_lost, int32_t expected_packets) - : timestamp_us(timestamp_us), + : timestamp(timestamp), bitrate_bps(bitrate_bps), fraction_lost(fraction_lost), expected_packets(expected_packets) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); int32_t bitrate_bps; uint8_t fraction_lost; int32_t expected_packets; diff --git a/logging/rtc_event_log/events/rtc_event_dtls_transport_state.h b/logging/rtc_event_log/events/rtc_event_dtls_transport_state.h index af35a3f3bc..9a3eecb3d3 100644 --- a/logging/rtc_event_log/events/rtc_event_dtls_transport_state.h +++ b/logging/rtc_event_log/events/rtc_event_dtls_transport_state.h @@ -15,6 +15,7 @@ #include "api/dtls_transport_interface.h" #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" namespace webrtc { @@ -41,10 +42,10 @@ class RtcEventDtlsTransportState : public RtcEvent { }; struct LoggedDtlsTransportState { - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); DtlsTransportState dtls_transport_state; }; diff --git a/logging/rtc_event_log/events/rtc_event_dtls_writable_state.h b/logging/rtc_event_log/events/rtc_event_dtls_writable_state.h index c3ecce00ef..c0cc5b87ef 100644 --- a/logging/rtc_event_log/events/rtc_event_dtls_writable_state.h +++ b/logging/rtc_event_log/events/rtc_event_dtls_writable_state.h @@ -14,6 +14,7 @@ #include #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" namespace webrtc { @@ -41,10 +42,10 @@ struct LoggedDtlsWritableState { LoggedDtlsWritableState() = default; explicit LoggedDtlsWritableState(bool writable) : writable(writable) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); bool writable; }; diff --git a/logging/rtc_event_log/events/rtc_event_frame_decoded.h b/logging/rtc_event_log/events/rtc_event_frame_decoded.h index c549aa8831..4a6bb90d02 100644 --- a/logging/rtc_event_log/events/rtc_event_frame_decoded.h +++ b/logging/rtc_event_log/events/rtc_event_frame_decoded.h @@ -16,6 +16,7 @@ #include #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" #include "api/video/video_codec_type.h" namespace webrtc { @@ -56,10 +57,10 @@ class RtcEventFrameDecoded final : public RtcEvent { }; struct LoggedFrameDecoded { - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); int64_t render_time_ms; uint32_t ssrc; int width; diff --git a/logging/rtc_event_log/events/rtc_event_generic_ack_received.h b/logging/rtc_event_log/events/rtc_event_generic_ack_received.h index 76e3cc24c4..75fc83c8b8 100644 --- a/logging/rtc_event_log/events/rtc_event_generic_ack_received.h +++ b/logging/rtc_event_log/events/rtc_event_generic_ack_received.h @@ -16,6 +16,7 @@ #include "absl/types/optional.h" #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" namespace webrtc { @@ -77,19 +78,19 @@ class RtcEventGenericAckReceived final : public RtcEvent { struct LoggedGenericAckReceived { LoggedGenericAckReceived() = default; - LoggedGenericAckReceived(int64_t timestamp_us, + LoggedGenericAckReceived(Timestamp timestamp, int64_t packet_number, int64_t acked_packet_number, absl::optional receive_acked_packet_time_ms) - : timestamp_us(timestamp_us), + : timestamp(timestamp), packet_number(packet_number), acked_packet_number(acked_packet_number), receive_acked_packet_time_ms(receive_acked_packet_time_ms) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); int64_t packet_number; int64_t acked_packet_number; absl::optional receive_acked_packet_time_ms; diff --git a/logging/rtc_event_log/events/rtc_event_generic_packet_received.h b/logging/rtc_event_log/events/rtc_event_generic_packet_received.h index 45e5e4cc44..428e7b3806 100644 --- a/logging/rtc_event_log/events/rtc_event_generic_packet_received.h +++ b/logging/rtc_event_log/events/rtc_event_generic_packet_received.h @@ -14,6 +14,7 @@ #include #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" namespace webrtc { @@ -45,17 +46,17 @@ class RtcEventGenericPacketReceived final : public RtcEvent { struct LoggedGenericPacketReceived { LoggedGenericPacketReceived() = default; - LoggedGenericPacketReceived(int64_t timestamp_us, + LoggedGenericPacketReceived(Timestamp timestamp, int64_t packet_number, int packet_length) - : timestamp_us(timestamp_us), + : timestamp(timestamp), packet_number(packet_number), packet_length(packet_length) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); int64_t packet_number; int packet_length; }; diff --git a/logging/rtc_event_log/events/rtc_event_generic_packet_sent.h b/logging/rtc_event_log/events/rtc_event_generic_packet_sent.h index 9ebafbe2ec..6e626e63a1 100644 --- a/logging/rtc_event_log/events/rtc_event_generic_packet_sent.h +++ b/logging/rtc_event_log/events/rtc_event_generic_packet_sent.h @@ -14,6 +14,7 @@ #include #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" namespace webrtc { @@ -62,24 +63,24 @@ class RtcEventGenericPacketSent final : public RtcEvent { struct LoggedGenericPacketSent { LoggedGenericPacketSent() = default; - LoggedGenericPacketSent(int64_t timestamp_us, + LoggedGenericPacketSent(Timestamp timestamp, int64_t packet_number, size_t overhead_length, size_t payload_length, size_t padding_length) - : timestamp_us(timestamp_us), + : timestamp(timestamp), packet_number(packet_number), overhead_length(overhead_length), payload_length(payload_length), padding_length(padding_length) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } size_t packet_length() const { return payload_length + padding_length + overhead_length; } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); int64_t packet_number; size_t overhead_length; size_t payload_length; diff --git a/logging/rtc_event_log/events/rtc_event_ice_candidate_pair.h b/logging/rtc_event_log/events/rtc_event_ice_candidate_pair.h index 717ddf360d..1f4d825a99 100644 --- a/logging/rtc_event_log/events/rtc_event_ice_candidate_pair.h +++ b/logging/rtc_event_log/events/rtc_event_ice_candidate_pair.h @@ -16,6 +16,7 @@ #include #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" namespace webrtc { @@ -56,19 +57,19 @@ class RtcEventIceCandidatePair final : public RtcEvent { struct LoggedIceCandidatePairEvent { LoggedIceCandidatePairEvent() = default; - LoggedIceCandidatePairEvent(int64_t timestamp_us, + LoggedIceCandidatePairEvent(Timestamp timestamp, IceCandidatePairEventType type, uint32_t candidate_pair_id, uint32_t transaction_id) - : timestamp_us(timestamp_us), + : timestamp(timestamp), type(type), candidate_pair_id(candidate_pair_id), transaction_id(transaction_id) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); IceCandidatePairEventType type; uint32_t candidate_pair_id; uint32_t transaction_id; diff --git a/logging/rtc_event_log/events/rtc_event_ice_candidate_pair_config.h b/logging/rtc_event_log/events/rtc_event_ice_candidate_pair_config.h index ab2eaf2422..465a799780 100644 --- a/logging/rtc_event_log/events/rtc_event_ice_candidate_pair_config.h +++ b/logging/rtc_event_log/events/rtc_event_ice_candidate_pair_config.h @@ -16,6 +16,7 @@ #include #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" namespace webrtc { @@ -113,10 +114,10 @@ class RtcEventIceCandidatePairConfig final : public RtcEvent { }; struct LoggedIceCandidatePairConfig { - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); IceCandidatePairConfigType type; uint32_t candidate_pair_id; IceCandidateType local_candidate_type; diff --git a/logging/rtc_event_log/events/rtc_event_probe_cluster_created.h b/logging/rtc_event_log/events/rtc_event_probe_cluster_created.h index f3221b91fd..974a0c9a5c 100644 --- a/logging/rtc_event_log/events/rtc_event_probe_cluster_created.h +++ b/logging/rtc_event_log/events/rtc_event_probe_cluster_created.h @@ -16,6 +16,7 @@ #include #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" namespace webrtc { @@ -50,21 +51,21 @@ class RtcEventProbeClusterCreated final : public RtcEvent { struct LoggedBweProbeClusterCreatedEvent { LoggedBweProbeClusterCreatedEvent() = default; - LoggedBweProbeClusterCreatedEvent(int64_t timestamp_us, + LoggedBweProbeClusterCreatedEvent(Timestamp timestamp, int32_t id, int32_t bitrate_bps, uint32_t min_packets, uint32_t min_bytes) - : timestamp_us(timestamp_us), + : timestamp(timestamp), id(id), bitrate_bps(bitrate_bps), min_packets(min_packets), min_bytes(min_bytes) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); int32_t id; int32_t bitrate_bps; uint32_t min_packets; diff --git a/logging/rtc_event_log/events/rtc_event_probe_result_failure.h b/logging/rtc_event_log/events/rtc_event_probe_result_failure.h index 868c30b61c..fa61b314b4 100644 --- a/logging/rtc_event_log/events/rtc_event_probe_result_failure.h +++ b/logging/rtc_event_log/events/rtc_event_probe_result_failure.h @@ -16,6 +16,7 @@ #include #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" namespace webrtc { @@ -50,15 +51,15 @@ class RtcEventProbeResultFailure final : public RtcEvent { struct LoggedBweProbeFailureEvent { LoggedBweProbeFailureEvent() = default; - LoggedBweProbeFailureEvent(int64_t timestamp_us, + LoggedBweProbeFailureEvent(Timestamp timestamp, int32_t id, ProbeFailureReason failure_reason) - : timestamp_us(timestamp_us), id(id), failure_reason(failure_reason) {} + : timestamp(timestamp), id(id), failure_reason(failure_reason) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); int32_t id; ProbeFailureReason failure_reason; }; diff --git a/logging/rtc_event_log/events/rtc_event_probe_result_success.h b/logging/rtc_event_log/events/rtc_event_probe_result_success.h index e3746681f6..d00cfa81d6 100644 --- a/logging/rtc_event_log/events/rtc_event_probe_result_success.h +++ b/logging/rtc_event_log/events/rtc_event_probe_result_success.h @@ -16,6 +16,7 @@ #include #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" namespace webrtc { @@ -43,15 +44,15 @@ class RtcEventProbeResultSuccess final : public RtcEvent { struct LoggedBweProbeSuccessEvent { LoggedBweProbeSuccessEvent() = default; - LoggedBweProbeSuccessEvent(int64_t timestamp_us, + LoggedBweProbeSuccessEvent(Timestamp timestamp, int32_t id, int32_t bitrate_bps) - : timestamp_us(timestamp_us), id(id), bitrate_bps(bitrate_bps) {} + : timestamp(timestamp), id(id), bitrate_bps(bitrate_bps) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); int32_t id; int32_t bitrate_bps; }; diff --git a/logging/rtc_event_log/events/rtc_event_remote_estimate.h b/logging/rtc_event_log/events/rtc_event_remote_estimate.h index 29b0c47195..956e05f682 100644 --- a/logging/rtc_event_log/events/rtc_event_remote_estimate.h +++ b/logging/rtc_event_log/events/rtc_event_remote_estimate.h @@ -15,6 +15,7 @@ #include "absl/types/optional.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/data_rate.h" +#include "api/units/timestamp.h" namespace webrtc { @@ -37,10 +38,10 @@ class RtcEventRemoteEstimate final : public RtcEvent { struct LoggedRemoteEstimateEvent { LoggedRemoteEstimateEvent() = default; - int64_t log_time_us() const { return timestamp_ms * 1000; } - int64_t log_time_ms() const { return timestamp_ms; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_ms; + Timestamp timestamp = Timestamp::MinusInfinity(); absl::optional link_capacity_lower; absl::optional link_capacity_upper; }; diff --git a/logging/rtc_event_log/events/rtc_event_route_change.h b/logging/rtc_event_log/events/rtc_event_route_change.h index 455a832141..4a4e9aef80 100644 --- a/logging/rtc_event_log/events/rtc_event_route_change.h +++ b/logging/rtc_event_log/events/rtc_event_route_change.h @@ -14,6 +14,7 @@ #include #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" namespace webrtc { @@ -41,15 +42,13 @@ class RtcEventRouteChange final : public RtcEvent { struct LoggedRouteChangeEvent { LoggedRouteChangeEvent() = default; - LoggedRouteChangeEvent(int64_t timestamp_ms, - bool connected, - uint32_t overhead) - : timestamp_ms(timestamp_ms), connected(connected), overhead(overhead) {} + LoggedRouteChangeEvent(Timestamp timestamp, bool connected, uint32_t overhead) + : timestamp(timestamp), connected(connected), overhead(overhead) {} - int64_t log_time_us() const { return timestamp_ms * 1000; } - int64_t log_time_ms() const { return timestamp_ms; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_ms; + Timestamp timestamp = Timestamp::MinusInfinity(); bool connected; uint32_t overhead; }; diff --git a/logging/rtc_event_log/events/rtc_event_video_receive_stream_config.h b/logging/rtc_event_log/events/rtc_event_video_receive_stream_config.h index 2bf52476a1..e7b9061872 100644 --- a/logging/rtc_event_log/events/rtc_event_video_receive_stream_config.h +++ b/logging/rtc_event_log/events/rtc_event_video_receive_stream_config.h @@ -14,6 +14,7 @@ #include #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" #include "logging/rtc_event_log/rtc_stream_config.h" namespace webrtc { @@ -42,13 +43,13 @@ class RtcEventVideoReceiveStreamConfig final : public RtcEvent { struct LoggedVideoRecvConfig { LoggedVideoRecvConfig() = default; - LoggedVideoRecvConfig(int64_t timestamp_us, const rtclog::StreamConfig config) - : timestamp_us(timestamp_us), config(config) {} + LoggedVideoRecvConfig(Timestamp timestamp, const rtclog::StreamConfig config) + : timestamp(timestamp), config(config) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); rtclog::StreamConfig config; }; diff --git a/logging/rtc_event_log/events/rtc_event_video_send_stream_config.h b/logging/rtc_event_log/events/rtc_event_video_send_stream_config.h index cf95afc4d8..e72e75e49d 100644 --- a/logging/rtc_event_log/events/rtc_event_video_send_stream_config.h +++ b/logging/rtc_event_log/events/rtc_event_video_send_stream_config.h @@ -14,6 +14,7 @@ #include #include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" #include "logging/rtc_event_log/rtc_stream_config.h" namespace webrtc { @@ -41,13 +42,13 @@ class RtcEventVideoSendStreamConfig final : public RtcEvent { struct LoggedVideoSendConfig { LoggedVideoSendConfig() = default; - LoggedVideoSendConfig(int64_t timestamp_us, const rtclog::StreamConfig config) - : timestamp_us(timestamp_us), config(config) {} + LoggedVideoSendConfig(Timestamp timestamp, const rtclog::StreamConfig config) + : timestamp(timestamp), config(config) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); rtclog::StreamConfig config; }; } // namespace webrtc diff --git a/logging/rtc_event_log/logged_events.cc b/logging/rtc_event_log/logged_events.cc index dd0a8aae2a..5ef3de11c0 100644 --- a/logging/rtc_event_log/logged_events.cc +++ b/logging/rtc_event_log/logged_events.cc @@ -40,13 +40,13 @@ LoggedPacketInfo::LoggedPacketInfo(const LoggedPacketInfo&) = default; LoggedPacketInfo::~LoggedPacketInfo() {} -LoggedRtcpPacket::LoggedRtcpPacket(int64_t timestamp_us, +LoggedRtcpPacket::LoggedRtcpPacket(Timestamp timestamp, const std::vector& packet) - : timestamp_us(timestamp_us), raw_data(packet) {} + : timestamp(timestamp), raw_data(packet) {} -LoggedRtcpPacket::LoggedRtcpPacket(int64_t timestamp_us, +LoggedRtcpPacket::LoggedRtcpPacket(Timestamp timestamp, const std::string& packet) - : timestamp_us(timestamp_us), raw_data(packet.size()) { + : timestamp(timestamp), raw_data(packet.size()) { memcpy(raw_data.data(), packet.data(), packet.size()); } diff --git a/logging/rtc_event_log/logged_events.h b/logging/rtc_event_log/logged_events.h index da7653d392..5bce658c30 100644 --- a/logging/rtc_event_log/logged_events.h +++ b/logging/rtc_event_log/logged_events.h @@ -37,19 +37,19 @@ namespace webrtc { // adding a vptr. struct LoggedRtpPacket { - LoggedRtpPacket(int64_t timestamp_us, + LoggedRtpPacket(Timestamp timestamp, RTPHeader header, size_t header_length, size_t total_length) - : timestamp_us(timestamp_us), + : timestamp(timestamp), header(header), header_length(header_length), total_length(total_length) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp; // TODO(terelius): This allocates space for 15 CSRCs even if none are used. RTPHeader header; size_t header_length; @@ -57,145 +57,145 @@ struct LoggedRtpPacket { }; struct LoggedRtpPacketIncoming { - LoggedRtpPacketIncoming(int64_t timestamp_us, + LoggedRtpPacketIncoming(Timestamp timestamp, RTPHeader header, size_t header_length, size_t total_length) - : rtp(timestamp_us, header, header_length, total_length) {} - int64_t log_time_us() const { return rtp.timestamp_us; } - int64_t log_time_ms() const { return rtp.timestamp_us / 1000; } + : rtp(timestamp, header, header_length, total_length) {} + int64_t log_time_us() const { return rtp.timestamp.us(); } + int64_t log_time_ms() const { return rtp.timestamp.ms(); } LoggedRtpPacket rtp; }; struct LoggedRtpPacketOutgoing { - LoggedRtpPacketOutgoing(int64_t timestamp_us, + LoggedRtpPacketOutgoing(Timestamp timestamp, RTPHeader header, size_t header_length, size_t total_length) - : rtp(timestamp_us, header, header_length, total_length) {} - int64_t log_time_us() const { return rtp.timestamp_us; } - int64_t log_time_ms() const { return rtp.timestamp_us / 1000; } + : rtp(timestamp, header, header_length, total_length) {} + int64_t log_time_us() const { return rtp.timestamp.us(); } + int64_t log_time_ms() const { return rtp.timestamp.ms(); } LoggedRtpPacket rtp; }; struct LoggedRtcpPacket { - LoggedRtcpPacket(int64_t timestamp_us, const std::vector& packet); - LoggedRtcpPacket(int64_t timestamp_us, const std::string& packet); + LoggedRtcpPacket(Timestamp timestamp, const std::vector& packet); + LoggedRtcpPacket(Timestamp timestamp, const std::string& packet); LoggedRtcpPacket(const LoggedRtcpPacket&); ~LoggedRtcpPacket(); - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp; std::vector raw_data; }; struct LoggedRtcpPacketIncoming { - LoggedRtcpPacketIncoming(int64_t timestamp_us, + LoggedRtcpPacketIncoming(Timestamp timestamp, const std::vector& packet) - : rtcp(timestamp_us, packet) {} - LoggedRtcpPacketIncoming(uint64_t timestamp_us, const std::string& packet) - : rtcp(timestamp_us, packet) {} + : rtcp(timestamp, packet) {} + LoggedRtcpPacketIncoming(Timestamp timestamp, const std::string& packet) + : rtcp(timestamp, packet) {} - int64_t log_time_us() const { return rtcp.timestamp_us; } - int64_t log_time_ms() const { return rtcp.timestamp_us / 1000; } + int64_t log_time_us() const { return rtcp.timestamp.us(); } + int64_t log_time_ms() const { return rtcp.timestamp.ms(); } LoggedRtcpPacket rtcp; }; struct LoggedRtcpPacketOutgoing { - LoggedRtcpPacketOutgoing(int64_t timestamp_us, + LoggedRtcpPacketOutgoing(Timestamp timestamp, const std::vector& packet) - : rtcp(timestamp_us, packet) {} - LoggedRtcpPacketOutgoing(uint64_t timestamp_us, const std::string& packet) - : rtcp(timestamp_us, packet) {} + : rtcp(timestamp, packet) {} + LoggedRtcpPacketOutgoing(Timestamp timestamp, const std::string& packet) + : rtcp(timestamp, packet) {} - int64_t log_time_us() const { return rtcp.timestamp_us; } - int64_t log_time_ms() const { return rtcp.timestamp_us / 1000; } + int64_t log_time_us() const { return rtcp.timestamp.us(); } + int64_t log_time_ms() const { return rtcp.timestamp.ms(); } LoggedRtcpPacket rtcp; }; struct LoggedRtcpPacketReceiverReport { LoggedRtcpPacketReceiverReport() = default; - LoggedRtcpPacketReceiverReport(int64_t timestamp_us, + LoggedRtcpPacketReceiverReport(Timestamp timestamp, const rtcp::ReceiverReport& rr) - : timestamp_us(timestamp_us), rr(rr) {} + : timestamp(timestamp), rr(rr) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); rtcp::ReceiverReport rr; }; struct LoggedRtcpPacketSenderReport { LoggedRtcpPacketSenderReport() = default; - LoggedRtcpPacketSenderReport(int64_t timestamp_us, + LoggedRtcpPacketSenderReport(Timestamp timestamp, const rtcp::SenderReport& sr) - : timestamp_us(timestamp_us), sr(sr) {} + : timestamp(timestamp), sr(sr) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); rtcp::SenderReport sr; }; struct LoggedRtcpPacketExtendedReports { LoggedRtcpPacketExtendedReports() = default; - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); rtcp::ExtendedReports xr; }; struct LoggedRtcpPacketRemb { LoggedRtcpPacketRemb() = default; - LoggedRtcpPacketRemb(int64_t timestamp_us, const rtcp::Remb& remb) - : timestamp_us(timestamp_us), remb(remb) {} + LoggedRtcpPacketRemb(Timestamp timestamp, const rtcp::Remb& remb) + : timestamp(timestamp), remb(remb) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); rtcp::Remb remb; }; struct LoggedRtcpPacketNack { LoggedRtcpPacketNack() = default; - LoggedRtcpPacketNack(int64_t timestamp_us, const rtcp::Nack& nack) - : timestamp_us(timestamp_us), nack(nack) {} + LoggedRtcpPacketNack(Timestamp timestamp, const rtcp::Nack& nack) + : timestamp(timestamp), nack(nack) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); rtcp::Nack nack; }; struct LoggedRtcpPacketFir { LoggedRtcpPacketFir() = default; - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); rtcp::Fir fir; }; struct LoggedRtcpPacketPli { LoggedRtcpPacketPli() = default; - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); rtcp::Pli pli; }; @@ -204,62 +204,64 @@ struct LoggedRtcpPacketTransportFeedback { : transport_feedback(/*include_timestamps=*/true, /*include_lost*/ true) { } LoggedRtcpPacketTransportFeedback( - int64_t timestamp_us, + Timestamp timestamp, const rtcp::TransportFeedback& transport_feedback) - : timestamp_us(timestamp_us), transport_feedback(transport_feedback) {} + : timestamp(timestamp), transport_feedback(transport_feedback) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); rtcp::TransportFeedback transport_feedback; }; struct LoggedRtcpPacketLossNotification { LoggedRtcpPacketLossNotification() = default; LoggedRtcpPacketLossNotification( - int64_t timestamp_us, + Timestamp timestamp, const rtcp::LossNotification& loss_notification) - : timestamp_us(timestamp_us), loss_notification(loss_notification) {} + : timestamp(timestamp), loss_notification(loss_notification) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); rtcp::LossNotification loss_notification; }; struct LoggedRtcpPacketBye { LoggedRtcpPacketBye() = default; - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp = Timestamp::MinusInfinity(); rtcp::Bye bye; }; struct LoggedStartEvent { - explicit LoggedStartEvent(int64_t timestamp_us) - : LoggedStartEvent(timestamp_us, timestamp_us / 1000) {} + explicit LoggedStartEvent(Timestamp timestamp) + : LoggedStartEvent(timestamp, timestamp) {} - LoggedStartEvent(int64_t timestamp_us, int64_t utc_start_time_ms) - : timestamp_us(timestamp_us), utc_start_time_ms(utc_start_time_ms) {} + LoggedStartEvent(Timestamp timestamp, Timestamp utc_start_time) + : timestamp(timestamp), utc_start_time(utc_start_time) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; - int64_t utc_start_time_ms; + Timestamp utc_time() const { return utc_start_time; } + + Timestamp timestamp; + Timestamp utc_start_time; }; struct LoggedStopEvent { - explicit LoggedStopEvent(int64_t timestamp_us) : timestamp_us(timestamp_us) {} + explicit LoggedStopEvent(Timestamp timestamp) : timestamp(timestamp) {} - int64_t log_time_us() const { return timestamp_us; } - int64_t log_time_ms() const { return timestamp_us / 1000; } + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } - int64_t timestamp_us; + Timestamp timestamp; }; struct InferredRouteChangeEvent { @@ -337,8 +339,5 @@ struct LoggedIceEvent { }; - - - } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_LOGGED_EVENTS_H_ diff --git a/logging/rtc_event_log/rtc_event_log_impl.cc b/logging/rtc_event_log/rtc_event_log_impl.cc index 4a272f08cf..700f639311 100644 --- a/logging/rtc_event_log/rtc_event_log_impl.cc +++ b/logging/rtc_event_log/rtc_event_log_impl.cc @@ -90,8 +90,8 @@ bool RtcEventLogImpl::StartLogging(std::unique_ptr output, return false; } - const int64_t timestamp_us = rtc::TimeMicros(); - const int64_t utc_time_us = rtc::TimeUTCMicros(); + const int64_t timestamp_us = rtc::TimeMillis() * 1000; + const int64_t utc_time_us = rtc::TimeUTCMillis() * 1000; RTC_LOG(LS_INFO) << "Starting WebRTC event log. (Timestamp, UTC) = " "(" << timestamp_us << ", " << utc_time_us << ")."; @@ -253,7 +253,7 @@ void RtcEventLogImpl::StopOutput() { void RtcEventLogImpl::StopLoggingInternal() { if (event_output_) { RTC_DCHECK(event_output_->IsActive()); - const int64_t timestamp_us = rtc::TimeMicros(); + const int64_t timestamp_us = rtc::TimeMillis() * 1000; event_output_->Write(event_encoder_->EncodeLogEnd(timestamp_us)); } StopOutput(); diff --git a/logging/rtc_event_log/rtc_event_log_parser.cc b/logging/rtc_event_log/rtc_event_log_parser.cc index 6cdaa75bb7..08fb9408c1 100644 --- a/logging/rtc_event_log/rtc_event_log_parser.cc +++ b/logging/rtc_event_log/rtc_event_log_parser.cc @@ -390,7 +390,7 @@ ParsedRtcEventLog::ParseStatus StoreRtpPackets( RTC_PARSE_CHECK_OR_RETURN(!proto.has_voice_activity()); } (*rtp_packets_map)[header.ssrc].emplace_back( - proto.timestamp_ms() * 1000, header, proto.header_size(), + Timestamp::Millis(proto.timestamp_ms()), header, proto.header_size(), proto.payload_size() + header.headerLength + header.paddingLength); } @@ -592,7 +592,7 @@ ParsedRtcEventLog::ParseStatus StoreRtpPackets( !voice_activity_values[i].has_value()); } (*rtp_packets_map)[header.ssrc].emplace_back( - 1000 * timestamp_ms, header, header.headerLength, + Timestamp::Millis(timestamp_ms), header, header.headerLength, payload_size_values[i].value() + header.headerLength + header.paddingLength); } @@ -615,7 +615,8 @@ ParsedRtcEventLog::ParseStatus StoreRtcpPackets( !IdenticalRtcpContents(rtcp_packets->back().rtcp.raw_data, proto.raw_packet())) { // Base event - rtcp_packets->emplace_back(proto.timestamp_ms() * 1000, proto.raw_packet()); + rtcp_packets->emplace_back(Timestamp::Millis(proto.timestamp_ms()), + proto.raw_packet()); } const size_t number_of_deltas = @@ -653,7 +654,7 @@ ParsedRtcEventLog::ParseStatus StoreRtcpPackets( continue; } std::string data(raw_packet_values[i]); - rtcp_packets->emplace_back(1000 * timestamp_ms, data); + rtcp_packets->emplace_back(Timestamp::Millis(timestamp_ms), data); } return ParsedRtcEventLog::ParseStatus::Success(); } @@ -672,6 +673,7 @@ ParsedRtcEventLog::ParseStatus StoreRtcpBlocks( std::vector* bye_list, std::vector* transport_feedback_list, std::vector* loss_notification_list) { + Timestamp timestamp = Timestamp::Micros(timestamp_us); rtcp::CommonHeader header; for (const uint8_t* block = packet_begin; block < packet_end; block = header.NextPacket()) { @@ -679,44 +681,44 @@ ParsedRtcEventLog::ParseStatus StoreRtcpBlocks( if (header.type() == rtcp::TransportFeedback::kPacketType && header.fmt() == rtcp::TransportFeedback::kFeedbackMessageType) { LoggedRtcpPacketTransportFeedback parsed_block; - parsed_block.timestamp_us = timestamp_us; + parsed_block.timestamp = timestamp; if (parsed_block.transport_feedback.Parse(header)) transport_feedback_list->push_back(std::move(parsed_block)); } else if (header.type() == rtcp::SenderReport::kPacketType) { LoggedRtcpPacketSenderReport parsed_block; - parsed_block.timestamp_us = timestamp_us; + parsed_block.timestamp = timestamp; if (parsed_block.sr.Parse(header)) { sr_list->push_back(std::move(parsed_block)); } } else if (header.type() == rtcp::ReceiverReport::kPacketType) { LoggedRtcpPacketReceiverReport parsed_block; - parsed_block.timestamp_us = timestamp_us; + parsed_block.timestamp = timestamp; if (parsed_block.rr.Parse(header)) { rr_list->push_back(std::move(parsed_block)); } } else if (header.type() == rtcp::ExtendedReports::kPacketType) { LoggedRtcpPacketExtendedReports parsed_block; - parsed_block.timestamp_us = timestamp_us; + parsed_block.timestamp = timestamp; if (parsed_block.xr.Parse(header)) { xr_list->push_back(std::move(parsed_block)); } } else if (header.type() == rtcp::Fir::kPacketType && header.fmt() == rtcp::Fir::kFeedbackMessageType) { LoggedRtcpPacketFir parsed_block; - parsed_block.timestamp_us = timestamp_us; + parsed_block.timestamp = timestamp; if (parsed_block.fir.Parse(header)) { fir_list->push_back(std::move(parsed_block)); } } else if (header.type() == rtcp::Pli::kPacketType && header.fmt() == rtcp::Pli::kFeedbackMessageType) { LoggedRtcpPacketPli parsed_block; - parsed_block.timestamp_us = timestamp_us; + parsed_block.timestamp = timestamp; if (parsed_block.pli.Parse(header)) { pli_list->push_back(std::move(parsed_block)); } } else if (header.type() == rtcp::Bye::kPacketType) { LoggedRtcpPacketBye parsed_block; - parsed_block.timestamp_us = timestamp_us; + parsed_block.timestamp = timestamp; if (parsed_block.bye.Parse(header)) { bye_list->push_back(std::move(parsed_block)); } @@ -725,7 +727,7 @@ ParsedRtcEventLog::ParseStatus StoreRtcpBlocks( bool type_found = false; if (!type_found) { LoggedRtcpPacketRemb parsed_block; - parsed_block.timestamp_us = timestamp_us; + parsed_block.timestamp = timestamp; if (parsed_block.remb.Parse(header)) { remb_list->push_back(std::move(parsed_block)); type_found = true; @@ -733,7 +735,7 @@ ParsedRtcEventLog::ParseStatus StoreRtcpBlocks( } if (!type_found) { LoggedRtcpPacketLossNotification parsed_block; - parsed_block.timestamp_us = timestamp_us; + parsed_block.timestamp = timestamp; if (parsed_block.loss_notification.Parse(header)) { loss_notification_list->push_back(std::move(parsed_block)); type_found = true; @@ -742,7 +744,7 @@ ParsedRtcEventLog::ParseStatus StoreRtcpBlocks( } else if (header.type() == rtcp::Nack::kPacketType && header.fmt() == rtcp::Nack::kFeedbackMessageType) { LoggedRtcpPacketNack parsed_block; - parsed_block.timestamp_us = timestamp_us; + parsed_block.timestamp = timestamp; if (parsed_block.nack.Parse(header)) { nack_list->push_back(std::move(parsed_block)); } @@ -959,23 +961,21 @@ ParsedRtcEventLog::LoggedRtpStreamOutgoing::~LoggedRtpStreamOutgoing() = ParsedRtcEventLog::LoggedRtpStreamView::LoggedRtpStreamView( uint32_t ssrc, - const LoggedRtpPacketIncoming* ptr, - size_t num_elements) - : ssrc(ssrc), - packet_view(PacketView::Create( - ptr, - num_elements, - offsetof(LoggedRtpPacketIncoming, rtp))) {} + const std::vector& packets) + : ssrc(ssrc), packet_view() { + for (const LoggedRtpPacketIncoming& packet : packets) { + packet_view.push_back(&(packet.rtp)); + } +} ParsedRtcEventLog::LoggedRtpStreamView::LoggedRtpStreamView( uint32_t ssrc, - const LoggedRtpPacketOutgoing* ptr, - size_t num_elements) - : ssrc(ssrc), - packet_view(PacketView::Create( - ptr, - num_elements, - offsetof(LoggedRtpPacketOutgoing, rtp))) {} + const std::vector& packets) + : ssrc(ssrc), packet_view() { + for (const LoggedRtpPacketOutgoing& packet : packets) { + packet_view.push_back(&(packet.rtp)); + } +} ParsedRtcEventLog::LoggedRtpStreamView::LoggedRtpStreamView( const LoggedRtpStreamView&) = default; @@ -1159,18 +1159,16 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::ParseStream( // Build PacketViews for easier iteration over RTP packets. for (const auto& stream : incoming_rtp_packets_by_ssrc_) { incoming_rtp_packet_views_by_ssrc_.emplace_back( - LoggedRtpStreamView(stream.ssrc, stream.incoming_packets.data(), - stream.incoming_packets.size())); + LoggedRtpStreamView(stream.ssrc, stream.incoming_packets)); } for (const auto& stream : outgoing_rtp_packets_by_ssrc_) { outgoing_rtp_packet_views_by_ssrc_.emplace_back( - LoggedRtpStreamView(stream.ssrc, stream.outgoing_packets.data(), - stream.outgoing_packets.size())); + LoggedRtpStreamView(stream.ssrc, stream.outgoing_packets)); } // Set up convenience wrappers around the most commonly used RTCP types. for (const auto& incoming : incoming_rtcp_packets_) { - const int64_t timestamp_us = incoming.rtcp.timestamp_us; + const int64_t timestamp_us = incoming.rtcp.timestamp.us(); const uint8_t* packet_begin = incoming.rtcp.raw_data.data(); const uint8_t* packet_end = packet_begin + incoming.rtcp.raw_data.size(); auto status = StoreRtcpBlocks( @@ -1182,7 +1180,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::ParseStream( } for (const auto& outgoing : outgoing_rtcp_packets_) { - const int64_t timestamp_us = outgoing.rtcp.timestamp_us; + const int64_t timestamp_us = outgoing.rtcp.timestamp.us(); const uint8_t* packet_begin = outgoing.rtcp.raw_data.data(); const uint8_t* packet_end = packet_begin + outgoing.rtcp.raw_data.size(); auto status = StoreRtcpBlocks( @@ -1374,7 +1372,8 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreParsedLegacyEvent( RTC_PARSE_CHECK_OR_RETURN(event.has_timestamp_us()); int64_t timestamp_us = event.timestamp_us(); - video_recv_configs_.emplace_back(timestamp_us, config.value()); + video_recv_configs_.emplace_back(Timestamp::Micros(timestamp_us), + config.value()); incoming_rtp_extensions_maps_[config.value().remote_ssrc] = RtpHeaderExtensionMap(config.value().rtp_extensions); incoming_rtp_extensions_maps_[config.value().rtx_ssrc] = @@ -1388,7 +1387,8 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreParsedLegacyEvent( RTC_PARSE_CHECK_OR_RETURN(event.has_timestamp_us()); int64_t timestamp_us = event.timestamp_us(); - video_send_configs_.emplace_back(timestamp_us, config.value()); + video_send_configs_.emplace_back(Timestamp::Micros(timestamp_us), + config.value()); outgoing_rtp_extensions_maps_[config.value().local_ssrc] = RtpHeaderExtensionMap(config.value().rtp_extensions); outgoing_rtp_extensions_maps_[config.value().rtx_ssrc] = @@ -1402,7 +1402,8 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreParsedLegacyEvent( RTC_PARSE_CHECK_OR_RETURN(event.has_timestamp_us()); int64_t timestamp_us = event.timestamp_us(); - audio_recv_configs_.emplace_back(timestamp_us, config.value()); + audio_recv_configs_.emplace_back(Timestamp::Micros(timestamp_us), + config.value()); incoming_rtp_extensions_maps_[config.value().remote_ssrc] = RtpHeaderExtensionMap(config.value().rtp_extensions); break; @@ -1413,7 +1414,8 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreParsedLegacyEvent( return config.status(); RTC_PARSE_CHECK_OR_RETURN(event.has_timestamp_us()); int64_t timestamp_us = event.timestamp_us(); - audio_send_configs_.emplace_back(timestamp_us, config.value()); + audio_send_configs_.emplace_back(Timestamp::Micros(timestamp_us), + config.value()); outgoing_rtp_extensions_maps_[config.value().local_ssrc] = RtpHeaderExtensionMap(config.value().rtp_extensions); break; @@ -1446,11 +1448,13 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreParsedLegacyEvent( int64_t timestamp_us = event.timestamp_us(); if (direction == kIncomingPacket) { incoming_rtp_packets_map_[parsed_header.ssrc].push_back( - LoggedRtpPacketIncoming(timestamp_us, parsed_header, header_length, + LoggedRtpPacketIncoming(Timestamp::Micros(timestamp_us), + parsed_header, header_length, total_length)); } else { outgoing_rtp_packets_map_[parsed_header.ssrc].push_back( - LoggedRtpPacketOutgoing(timestamp_us, parsed_header, header_length, + LoggedRtpPacketOutgoing(Timestamp::Micros(timestamp_us), + parsed_header, header_length, total_length)); } break; @@ -1469,24 +1473,26 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreParsedLegacyEvent( if (packet == last_incoming_rtcp_packet_) break; incoming_rtcp_packets_.push_back( - LoggedRtcpPacketIncoming(timestamp_us, packet)); + LoggedRtcpPacketIncoming(Timestamp::Micros(timestamp_us), packet)); last_incoming_rtcp_packet_ = packet; } else { outgoing_rtcp_packets_.push_back( - LoggedRtcpPacketOutgoing(timestamp_us, packet)); + LoggedRtcpPacketOutgoing(Timestamp::Micros(timestamp_us), packet)); } break; } case rtclog::Event::LOG_START: { RTC_PARSE_CHECK_OR_RETURN(event.has_timestamp_us()); int64_t timestamp_us = event.timestamp_us(); - start_log_events_.push_back(LoggedStartEvent(timestamp_us)); + start_log_events_.push_back( + LoggedStartEvent(Timestamp::Micros(timestamp_us))); break; } case rtclog::Event::LOG_END: { RTC_PARSE_CHECK_OR_RETURN(event.has_timestamp_us()); int64_t timestamp_us = event.timestamp_us(); - stop_log_events_.push_back(LoggedStopEvent(timestamp_us)); + stop_log_events_.push_back( + LoggedStopEvent(Timestamp::Micros(timestamp_us))); break; } case rtclog::Event::AUDIO_PLAYOUT_EVENT: { @@ -1805,7 +1811,7 @@ ParsedRtcEventLog::GetAudioPlayout(const rtclog::Event& event) const { const rtclog::AudioPlayoutEvent& playout_event = event.audio_playout_event(); LoggedAudioPlayoutEvent res; RTC_PARSE_CHECK_OR_RETURN(event.has_timestamp_us()); - res.timestamp_us = event.timestamp_us(); + res.timestamp = Timestamp::Micros(event.timestamp_us()); RTC_PARSE_CHECK_OR_RETURN(playout_event.has_local_ssrc()); res.ssrc = playout_event.local_ssrc(); return res; @@ -1821,7 +1827,7 @@ ParsedRtcEventLog::GetLossBasedBweUpdate(const rtclog::Event& event) const { LoggedBweLossBasedUpdate bwe_update; RTC_CHECK(event.has_timestamp_us()); - bwe_update.timestamp_us = event.timestamp_us(); + bwe_update.timestamp = Timestamp::Micros(event.timestamp_us()); RTC_PARSE_CHECK_OR_RETURN(loss_event.has_bitrate_bps()); bwe_update.bitrate_bps = loss_event.bitrate_bps(); RTC_PARSE_CHECK_OR_RETURN(loss_event.has_fraction_loss()); @@ -1842,7 +1848,7 @@ ParsedRtcEventLog::GetDelayBasedBweUpdate(const rtclog::Event& event) const { LoggedBweDelayBasedUpdate res; RTC_PARSE_CHECK_OR_RETURN(event.has_timestamp_us()); - res.timestamp_us = event.timestamp_us(); + res.timestamp = Timestamp::Micros(event.timestamp_us()); RTC_PARSE_CHECK_OR_RETURN(delay_event.has_bitrate_bps()); res.bitrate_bps = delay_event.bitrate_bps(); RTC_PARSE_CHECK_OR_RETURN(delay_event.has_detector_state()); @@ -1861,7 +1867,7 @@ ParsedRtcEventLog::GetAudioNetworkAdaptation(const rtclog::Event& event) const { LoggedAudioNetworkAdaptationEvent res; RTC_PARSE_CHECK_OR_RETURN(event.has_timestamp_us()); - res.timestamp_us = event.timestamp_us(); + res.timestamp = Timestamp::Micros(event.timestamp_us()); if (ana_event.has_bitrate_bps()) res.config.bitrate_bps = ana_event.bitrate_bps(); if (ana_event.has_enable_fec()) @@ -1887,7 +1893,7 @@ ParsedRtcEventLog::GetBweProbeClusterCreated(const rtclog::Event& event) const { const rtclog::BweProbeCluster& pcc_event = event.probe_cluster(); LoggedBweProbeClusterCreatedEvent res; RTC_PARSE_CHECK_OR_RETURN(event.has_timestamp_us()); - res.timestamp_us = event.timestamp_us(); + res.timestamp = Timestamp::Micros(event.timestamp_us()); RTC_PARSE_CHECK_OR_RETURN(pcc_event.has_id()); res.id = pcc_event.id(); RTC_PARSE_CHECK_OR_RETURN(pcc_event.has_bitrate_bps()); @@ -1912,7 +1918,7 @@ ParsedRtcEventLog::GetBweProbeFailure(const rtclog::Event& event) const { LoggedBweProbeFailureEvent res; RTC_PARSE_CHECK_OR_RETURN(event.has_timestamp_us()); - res.timestamp_us = event.timestamp_us(); + res.timestamp = Timestamp::Micros(event.timestamp_us()); RTC_PARSE_CHECK_OR_RETURN(pr_event.has_id()); res.id = pr_event.id(); RTC_PARSE_CHECK_OR_RETURN(pr_event.has_result()); @@ -1945,7 +1951,7 @@ ParsedRtcEventLog::GetBweProbeSuccess(const rtclog::Event& event) const { LoggedBweProbeSuccessEvent res; RTC_PARSE_CHECK_OR_RETURN(event.has_timestamp_us()); - res.timestamp_us = event.timestamp_us(); + res.timestamp = Timestamp::Micros(event.timestamp_us()); RTC_PARSE_CHECK_OR_RETURN(pr_event.has_id()); res.id = pr_event.id(); RTC_PARSE_CHECK_OR_RETURN(pr_event.has_bitrate_bps()); @@ -1962,7 +1968,7 @@ ParsedRtcEventLog::GetAlrState(const rtclog::Event& event) const { const rtclog::AlrState& alr_event = event.alr_state(); LoggedAlrStateEvent res; RTC_PARSE_CHECK_OR_RETURN(event.has_timestamp_us()); - res.timestamp_us = event.timestamp_us(); + res.timestamp = Timestamp::Micros(event.timestamp_us()); RTC_PARSE_CHECK_OR_RETURN(alr_event.has_in_alr()); res.in_alr = alr_event.in_alr(); @@ -1979,7 +1985,7 @@ ParsedRtcEventLog::GetIceCandidatePairConfig( const rtclog::IceCandidatePairConfig& config = rtc_event.ice_candidate_pair_config(); RTC_CHECK(rtc_event.has_timestamp_us()); - res.timestamp_us = rtc_event.timestamp_us(); + res.timestamp = Timestamp::Micros(rtc_event.timestamp_us()); RTC_PARSE_CHECK_OR_RETURN(config.has_config_type()); res.type = GetRuntimeIceCandidatePairConfigType(config.config_type()); RTC_PARSE_CHECK_OR_RETURN(config.has_candidate_pair_id()); @@ -2018,7 +2024,7 @@ ParsedRtcEventLog::GetIceCandidatePairEvent( const rtclog::IceCandidatePairEvent& event = rtc_event.ice_candidate_pair_event(); RTC_CHECK(rtc_event.has_timestamp_us()); - res.timestamp_us = rtc_event.timestamp_us(); + res.timestamp = Timestamp::Micros(rtc_event.timestamp_us()); RTC_PARSE_CHECK_OR_RETURN(event.has_event_type()); res.type = GetRuntimeIceCandidatePairEventType(event.event_type()); RTC_PARSE_CHECK_OR_RETURN(event.has_candidate_pair_id()); @@ -2404,7 +2410,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreAlrStateEvent( RTC_PARSE_CHECK_OR_RETURN(proto.has_timestamp_ms()); RTC_PARSE_CHECK_OR_RETURN(proto.has_in_alr()); LoggedAlrStateEvent alr_event; - alr_event.timestamp_us = proto.timestamp_ms() * 1000; + alr_event.timestamp = Timestamp::Millis(proto.timestamp_ms()); alr_event.in_alr = proto.in_alr(); alr_state_events_.push_back(alr_event); @@ -2418,7 +2424,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreRouteChangeEvent( RTC_PARSE_CHECK_OR_RETURN(proto.has_connected()); RTC_PARSE_CHECK_OR_RETURN(proto.has_overhead()); LoggedRouteChangeEvent route_event; - route_event.timestamp_ms = proto.timestamp_ms(); + route_event.timestamp = Timestamp::Millis(proto.timestamp_ms()); route_event.connected = proto.connected(); route_event.overhead = proto.overhead(); @@ -2432,7 +2438,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreRemoteEstimateEvent( RTC_PARSE_CHECK_OR_RETURN(proto.has_timestamp_ms()); // Base event LoggedRemoteEstimateEvent base_event; - base_event.timestamp_ms = proto.timestamp_ms(); + base_event.timestamp = Timestamp::Millis(proto.timestamp_ms()); absl::optional base_link_capacity_lower_kbps; if (proto.has_link_capacity_lower_kbps()) { @@ -2480,7 +2486,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreRemoteEstimateEvent( for (size_t i = 0; i < number_of_deltas; ++i) { LoggedRemoteEstimateEvent event; RTC_PARSE_CHECK_OR_RETURN(timestamp_ms_values[i].has_value()); - event.timestamp_ms = *timestamp_ms_values[i]; + event.timestamp = Timestamp::Millis(*timestamp_ms_values[i]); if (link_capacity_lower_kbps_values[i]) event.link_capacity_lower = DataRate::KilobitsPerSec(*link_capacity_lower_kbps_values[i]); @@ -2499,7 +2505,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreAudioPlayoutEvent( // Base event audio_playout_events_[proto.local_ssrc()].emplace_back( - 1000 * proto.timestamp_ms(), proto.local_ssrc()); + Timestamp::Millis(proto.timestamp_ms()), proto.local_ssrc()); const size_t number_of_deltas = proto.has_number_of_deltas() ? proto.number_of_deltas() : 0u; @@ -2531,8 +2537,8 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreAudioPlayoutEvent( const uint32_t local_ssrc = static_cast(local_ssrc_values[i].value()); - audio_playout_events_[local_ssrc].emplace_back(1000 * timestamp_ms, - local_ssrc); + audio_playout_events_[local_ssrc].emplace_back( + Timestamp::Millis(timestamp_ms), local_ssrc); } return ParseStatus::Success(); } @@ -2565,8 +2571,8 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreStartEvent( RTC_PARSE_CHECK_OR_RETURN(proto.has_version()); RTC_PARSE_CHECK_OR_RETURN(proto.has_utc_time_ms()); RTC_PARSE_CHECK_OR_RETURN_EQ(proto.version(), 2); - LoggedStartEvent start_event(proto.timestamp_ms() * 1000, - proto.utc_time_ms()); + LoggedStartEvent start_event(Timestamp::Millis(proto.timestamp_ms()), + Timestamp::Millis(proto.utc_time_ms())); start_log_events_.push_back(start_event); return ParseStatus::Success(); @@ -2575,7 +2581,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreStartEvent( ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreStopEvent( const rtclog2::EndLogEvent& proto) { RTC_PARSE_CHECK_OR_RETURN(proto.has_timestamp_ms()); - LoggedStopEvent stop_event(proto.timestamp_ms() * 1000); + LoggedStopEvent stop_event(Timestamp::Millis(proto.timestamp_ms())); stop_log_events_.push_back(stop_event); return ParseStatus::Success(); @@ -2589,7 +2595,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreBweLossBasedUpdate( RTC_PARSE_CHECK_OR_RETURN(proto.has_total_packets()); // Base event - bwe_loss_updates_.emplace_back(1000 * proto.timestamp_ms(), + bwe_loss_updates_.emplace_back(Timestamp::Millis(proto.timestamp_ms()), proto.bitrate_bps(), proto.fraction_loss(), proto.total_packets()); @@ -2645,7 +2651,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreBweLossBasedUpdate( const uint32_t total_packets = static_cast(total_packets_values[i].value()); - bwe_loss_updates_.emplace_back(1000 * timestamp_ms, bitrate_bps, + bwe_loss_updates_.emplace_back(Timestamp::Millis(timestamp_ms), bitrate_bps, fraction_loss, total_packets); } return ParseStatus::Success(); @@ -2660,7 +2666,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreBweDelayBasedUpdate( // Base event const BandwidthUsage base_detector_state = GetRuntimeDetectorState(proto.detector_state()); - bwe_delay_updates_.emplace_back(1000 * proto.timestamp_ms(), + bwe_delay_updates_.emplace_back(Timestamp::Millis(proto.timestamp_ms()), proto.bitrate_bps(), base_detector_state); const size_t number_of_deltas = @@ -2704,7 +2710,8 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreBweDelayBasedUpdate( static_cast( detector_state_values[i].value()); - bwe_delay_updates_.emplace_back(1000 * timestamp_ms, bitrate_bps, + bwe_delay_updates_.emplace_back(Timestamp::Millis(timestamp_ms), + bitrate_bps, GetRuntimeDetectorState(detector_state)); } return ParseStatus::Success(); @@ -2714,7 +2721,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreBweProbeClusterCreated( const rtclog2::BweProbeCluster& proto) { LoggedBweProbeClusterCreatedEvent probe_cluster; RTC_PARSE_CHECK_OR_RETURN(proto.has_timestamp_ms()); - probe_cluster.timestamp_us = proto.timestamp_ms() * 1000; + probe_cluster.timestamp = Timestamp::Millis(proto.timestamp_ms()); RTC_PARSE_CHECK_OR_RETURN(proto.has_id()); probe_cluster.id = proto.id(); RTC_PARSE_CHECK_OR_RETURN(proto.has_bitrate_bps()); @@ -2734,7 +2741,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreBweProbeSuccessEvent( const rtclog2::BweProbeResultSuccess& proto) { LoggedBweProbeSuccessEvent probe_result; RTC_PARSE_CHECK_OR_RETURN(proto.has_timestamp_ms()); - probe_result.timestamp_us = proto.timestamp_ms() * 1000; + probe_result.timestamp = Timestamp::Millis(proto.timestamp_ms()); RTC_PARSE_CHECK_OR_RETURN(proto.has_id()); probe_result.id = proto.id(); RTC_PARSE_CHECK_OR_RETURN(proto.has_bitrate_bps()); @@ -2750,7 +2757,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreBweProbeFailureEvent( const rtclog2::BweProbeResultFailure& proto) { LoggedBweProbeFailureEvent probe_result; RTC_PARSE_CHECK_OR_RETURN(proto.has_timestamp_ms()); - probe_result.timestamp_us = proto.timestamp_ms() * 1000; + probe_result.timestamp = Timestamp::Millis(proto.timestamp_ms()); RTC_PARSE_CHECK_OR_RETURN(proto.has_id()); probe_result.id = proto.id(); RTC_PARSE_CHECK_OR_RETURN(proto.has_failure()); @@ -2773,7 +2780,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreFrameDecodedEvents( RTC_PARSE_CHECK_OR_RETURN(proto.has_qp()); LoggedFrameDecoded base_frame; - base_frame.timestamp_us = 1000 * proto.timestamp_ms(); + base_frame.timestamp = Timestamp::Millis(proto.timestamp_ms()); base_frame.ssrc = proto.ssrc(); base_frame.render_time_ms = proto.render_time_ms(); base_frame.width = proto.width(); @@ -2836,7 +2843,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreFrameDecodedEvents( RTC_PARSE_CHECK_OR_RETURN(timestamp_ms_values[i].has_value()); RTC_PARSE_CHECK_OR_RETURN( ToSigned(timestamp_ms_values[i].value(), ×tamp_ms)); - frame.timestamp_us = 1000 * timestamp_ms; + frame.timestamp = Timestamp::Millis(timestamp_ms); RTC_PARSE_CHECK_OR_RETURN(ssrc_values[i].has_value()); RTC_PARSE_CHECK_OR_RETURN_LE(ssrc_values[i].value(), @@ -2881,7 +2888,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreGenericAckReceivedEvent( base_receive_acked_packet_time_ms = proto.receive_acked_packet_time_ms(); } generic_acks_received_.push_back( - {proto.timestamp_ms() * 1000, proto.packet_number(), + {Timestamp::Millis(proto.timestamp_ms()), proto.packet_number(), proto.acked_packet_number(), base_receive_acked_packet_time_ms}); const size_t number_of_deltas = @@ -2940,8 +2947,8 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreGenericAckReceivedEvent( ToSigned(receive_acked_packet_time_ms_values[i].value(), &value)); receive_acked_packet_time_ms = value; } - generic_acks_received_.push_back({timestamp_ms * 1000, packet_number, - acked_packet_number, + generic_acks_received_.push_back({Timestamp::Millis(timestamp_ms), + packet_number, acked_packet_number, receive_acked_packet_time_ms}); } return ParseStatus::Success(); @@ -2958,7 +2965,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreGenericPacketSentEvent( RTC_PARSE_CHECK_OR_RETURN(proto.has_padding_length()); generic_packets_sent_.push_back( - {proto.timestamp_ms() * 1000, proto.packet_number(), + {Timestamp::Millis(proto.timestamp_ms()), proto.packet_number(), static_cast(proto.overhead_length()), static_cast(proto.payload_length()), static_cast(proto.padding_length())}); @@ -3005,7 +3012,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreGenericPacketSentEvent( RTC_PARSE_CHECK_OR_RETURN(payload_length_values[i].has_value()); RTC_PARSE_CHECK_OR_RETURN(padding_length_values[i].has_value()); generic_packets_sent_.push_back( - {timestamp_ms * 1000, packet_number, + {Timestamp::Millis(timestamp_ms), packet_number, static_cast(overhead_length_values[i].value()), static_cast(payload_length_values[i].value()), static_cast(padding_length_values[i].value())}); @@ -3022,7 +3029,7 @@ ParsedRtcEventLog::StoreGenericPacketReceivedEvent( RTC_PARSE_CHECK_OR_RETURN(proto.has_packet_number()); RTC_PARSE_CHECK_OR_RETURN(proto.has_packet_length()); - generic_packets_received_.push_back({proto.timestamp_ms() * 1000, + generic_packets_received_.push_back({Timestamp::Millis(proto.timestamp_ms()), proto.packet_number(), proto.packet_length()}); @@ -3060,7 +3067,7 @@ ParsedRtcEventLog::StoreGenericPacketReceivedEvent( int32_t packet_length = static_cast(packet_length_values[i].value()); generic_packets_received_.push_back( - {timestamp_ms * 1000, packet_number, packet_length}); + {Timestamp::Millis(timestamp_ms), packet_number, packet_length}); } return ParseStatus::Success(); } @@ -3095,8 +3102,8 @@ ParsedRtcEventLog::StoreAudioNetworkAdaptationEvent( // Note: Encoding N as N-1 only done for |num_channels_deltas|. runtime_config.num_channels = proto.num_channels(); } - audio_network_adaptation_events_.emplace_back(1000 * proto.timestamp_ms(), - runtime_config); + audio_network_adaptation_events_.emplace_back( + Timestamp::Millis(proto.timestamp_ms()), runtime_config); } const size_t number_of_deltas = @@ -3217,8 +3224,8 @@ ParsedRtcEventLog::StoreAudioNetworkAdaptationEvent( runtime_config.num_channels = rtc::checked_cast(num_channels_values[i].value()); } - audio_network_adaptation_events_.emplace_back(1000 * timestamp_ms, - runtime_config); + audio_network_adaptation_events_.emplace_back( + Timestamp::Millis(timestamp_ms), runtime_config); } return ParseStatus::Success(); } @@ -3227,7 +3234,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreDtlsTransportState( const rtclog2::DtlsTransportStateEvent& proto) { LoggedDtlsTransportState dtls_state; RTC_PARSE_CHECK_OR_RETURN(proto.has_timestamp_ms()); - dtls_state.timestamp_us = proto.timestamp_ms() * 1000; + dtls_state.timestamp = Timestamp::Millis(proto.timestamp_ms()); RTC_PARSE_CHECK_OR_RETURN(proto.has_dtls_transport_state()); dtls_state.dtls_transport_state = @@ -3241,7 +3248,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreDtlsWritableState( const rtclog2::DtlsWritableState& proto) { LoggedDtlsWritableState dtls_writable_state; RTC_PARSE_CHECK_OR_RETURN(proto.has_timestamp_ms()); - dtls_writable_state.timestamp_us = proto.timestamp_ms() * 1000; + dtls_writable_state.timestamp = Timestamp::Millis(proto.timestamp_ms()); RTC_PARSE_CHECK_OR_RETURN(proto.has_writable()); dtls_writable_state.writable = proto.writable(); @@ -3253,7 +3260,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreIceCandidatePairConfig( const rtclog2::IceCandidatePairConfig& proto) { LoggedIceCandidatePairConfig ice_config; RTC_PARSE_CHECK_OR_RETURN(proto.has_timestamp_ms()); - ice_config.timestamp_us = proto.timestamp_ms() * 1000; + ice_config.timestamp = Timestamp::Millis(proto.timestamp_ms()); RTC_PARSE_CHECK_OR_RETURN(proto.has_config_type()); ice_config.type = GetRuntimeIceCandidatePairConfigType(proto.config_type()); @@ -3291,7 +3298,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreIceCandidateEvent( const rtclog2::IceCandidatePairEvent& proto) { LoggedIceCandidatePairEvent ice_event; RTC_PARSE_CHECK_OR_RETURN(proto.has_timestamp_ms()); - ice_event.timestamp_us = proto.timestamp_ms() * 1000; + ice_event.timestamp = Timestamp::Millis(proto.timestamp_ms()); RTC_PARSE_CHECK_OR_RETURN(proto.has_event_type()); ice_event.type = GetRuntimeIceCandidatePairEventType(proto.event_type()); RTC_PARSE_CHECK_OR_RETURN(proto.has_candidate_pair_id()); @@ -3311,7 +3318,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreVideoRecvConfig( const rtclog2::VideoRecvStreamConfig& proto) { LoggedVideoRecvConfig stream; RTC_PARSE_CHECK_OR_RETURN(proto.has_timestamp_ms()); - stream.timestamp_us = proto.timestamp_ms() * 1000; + stream.timestamp = Timestamp::Millis(proto.timestamp_ms()); RTC_PARSE_CHECK_OR_RETURN(proto.has_remote_ssrc()); stream.config.remote_ssrc = proto.remote_ssrc(); RTC_PARSE_CHECK_OR_RETURN(proto.has_local_ssrc()); @@ -3331,7 +3338,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreVideoSendConfig( const rtclog2::VideoSendStreamConfig& proto) { LoggedVideoSendConfig stream; RTC_PARSE_CHECK_OR_RETURN(proto.has_timestamp_ms()); - stream.timestamp_us = proto.timestamp_ms() * 1000; + stream.timestamp = Timestamp::Millis(proto.timestamp_ms()); RTC_PARSE_CHECK_OR_RETURN(proto.has_ssrc()); stream.config.local_ssrc = proto.ssrc(); if (proto.has_rtx_ssrc()) { @@ -3349,7 +3356,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreAudioRecvConfig( const rtclog2::AudioRecvStreamConfig& proto) { LoggedAudioRecvConfig stream; RTC_PARSE_CHECK_OR_RETURN(proto.has_timestamp_ms()); - stream.timestamp_us = proto.timestamp_ms() * 1000; + stream.timestamp = Timestamp::Millis(proto.timestamp_ms()); RTC_PARSE_CHECK_OR_RETURN(proto.has_remote_ssrc()); stream.config.remote_ssrc = proto.remote_ssrc(); RTC_PARSE_CHECK_OR_RETURN(proto.has_local_ssrc()); @@ -3366,7 +3373,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreAudioSendConfig( const rtclog2::AudioSendStreamConfig& proto) { LoggedAudioSendConfig stream; RTC_PARSE_CHECK_OR_RETURN(proto.has_timestamp_ms()); - stream.timestamp_us = proto.timestamp_ms() * 1000; + stream.timestamp = Timestamp::Millis(proto.timestamp_ms()); RTC_PARSE_CHECK_OR_RETURN(proto.has_ssrc()); stream.config.local_ssrc = proto.ssrc(); if (proto.has_header_extensions()) { diff --git a/logging/rtc_event_log/rtc_event_log_parser.h b/logging/rtc_event_log/rtc_event_log_parser.h index 67e1a09fff..4898022fae 100644 --- a/logging/rtc_event_log/rtc_event_log_parser.h +++ b/logging/rtc_event_log/rtc_event_log_parser.h @@ -64,144 +64,108 @@ namespace webrtc { enum PacketDirection { kIncomingPacket = 0, kOutgoingPacket }; +// This class is used to process lists of LoggedRtpPacketIncoming +// and LoggedRtpPacketOutgoing without duplicating the code. +// TODO(terelius): Remove this class. Instead use e.g. a vector of pointers +// to LoggedRtpPacket or templatize the surrounding code. template -class PacketView; - -template -class PacketIterator { - friend class PacketView; - +class DereferencingVector { public: - // Standard iterator traits. - using difference_type = std::ptrdiff_t; - using value_type = T; - using pointer = T*; - using reference = T&; - using iterator_category = std::bidirectional_iterator_tag; - - // The default-contructed iterator is meaningless, but is required by the - // ForwardIterator concept. - PacketIterator() : ptr_(nullptr), element_size_(0) {} - PacketIterator(const PacketIterator& other) - : ptr_(other.ptr_), element_size_(other.element_size_) {} - PacketIterator(const PacketIterator&& other) - : ptr_(other.ptr_), element_size_(other.element_size_) {} - ~PacketIterator() = default; + template + class DereferencingIterator { + public: + // Standard iterator traits. + using difference_type = std::ptrdiff_t; + using value_type = T; + using pointer = typename std::conditional_t; + using reference = typename std::conditional_t; + using iterator_category = std::bidirectional_iterator_tag; + + using representation = + typename std::conditional_t; + + explicit DereferencingIterator(representation ptr) : ptr_(ptr) {} + + DereferencingIterator(const DereferencingIterator& other) + : ptr_(other.ptr_) {} + DereferencingIterator(const DereferencingIterator&& other) + : ptr_(other.ptr_) {} + ~DereferencingIterator() = default; + + DereferencingIterator& operator=(const DereferencingIterator& other) { + ptr_ = other.ptr_; + return *this; + } + DereferencingIterator& operator=(const DereferencingIterator&& other) { + ptr_ = other.ptr_; + return *this; + } - PacketIterator& operator=(const PacketIterator& other) { - ptr_ = other.ptr_; - element_size_ = other.element_size_; - return *this; - } - PacketIterator& operator=(const PacketIterator&& other) { - ptr_ = other.ptr_; - element_size_ = other.element_size_; - return *this; - } + bool operator==(const DereferencingIterator& other) const { + return ptr_ == other.ptr_; + } + bool operator!=(const DereferencingIterator& other) const { + return ptr_ != other.ptr_; + } - bool operator==(const PacketIterator& other) const { - RTC_DCHECK_EQ(element_size_, other.element_size_); - return ptr_ == other.ptr_; - } - bool operator!=(const PacketIterator& other) const { - RTC_DCHECK_EQ(element_size_, other.element_size_); - return ptr_ != other.ptr_; - } + DereferencingIterator& operator++() { + ++ptr_; + return *this; + } + DereferencingIterator& operator--() { + --ptr_; + return *this; + } + DereferencingIterator operator++(int) { + DereferencingIterator iter_copy(ptr_); + ++ptr_; + return iter_copy; + } + DereferencingIterator operator--(int) { + DereferencingIterator iter_copy(ptr_); + --ptr_; + return iter_copy; + } - PacketIterator& operator++() { - ptr_ += element_size_; - return *this; - } - PacketIterator& operator--() { - ptr_ -= element_size_; - return *this; - } - PacketIterator operator++(int) { - PacketIterator iter_copy(ptr_, element_size_); - ptr_ += element_size_; - return iter_copy; - } - PacketIterator operator--(int) { - PacketIterator iter_copy(ptr_, element_size_); - ptr_ -= element_size_; - return iter_copy; - } + template + std::enable_if_t operator*() { + return **ptr_; + } - T& operator*() { return *reinterpret_cast(ptr_); } - const T& operator*() const { return *reinterpret_cast(ptr_); } + template + std::enable_if_t<_IsConst, reference> operator*() const { + return **ptr_; + } - T* operator->() { return reinterpret_cast(ptr_); } - const T* operator->() const { return reinterpret_cast(ptr_); } + template + std::enable_if_t operator->() { + return *ptr_; + } - private: - PacketIterator(typename std::conditional::value, - const void*, - void*>::type p, - size_t s) - : ptr_(reinterpret_cast(p)), element_size_(s) {} - - typename std::conditional::value, const char*, char*>::type - ptr_; - size_t element_size_; -}; + template + std::enable_if_t<_IsConst, pointer> operator->() const { + return *ptr_; + } -// Suppose that we have a struct S where we are only interested in a specific -// member M. Given an array of S, PacketView can be used to treat the array -// as an array of M, without exposing the type S to surrounding code and without -// accessing the member through a virtual function. In this case, we want to -// have a common view for incoming and outgoing RtpPackets, hence the PacketView -// name. -// Note that constructing a PacketView bypasses the typesystem, so the caller -// has to take extra care when constructing these objects. The implementation -// also requires that the containing struct is standard-layout (e.g. POD). -// -// Usage example: -// struct A {...}; -// struct B { A a; ...}; -// struct C { A a; ...}; -// size_t len = 10; -// B* array1 = new B[len]; -// C* array2 = new C[len]; -// -// PacketView view1 = PacketView::Create(array1, len, offsetof(B, a)); -// PacketView view2 = PacketView::Create(array2, len, offsetof(C, a)); -// -// The following code works with either view1 or view2. -// void f(PacketView view) -// for (A& a : view) { -// DoSomething(a); -// } -template -class PacketView { - public: - template - static PacketView Create(U* ptr, size_t num_elements, size_t offset) { - static_assert(std::is_standard_layout::value, - "PacketView can only be created for standard layout types."); - static_assert(std::is_standard_layout::value, - "PacketView can only be created for standard layout types."); - return PacketView(ptr, num_elements, offset, sizeof(U)); - } + private: + representation ptr_; + }; using value_type = T; using reference = value_type&; using const_reference = const value_type&; - using iterator = PacketIterator; - using const_iterator = PacketIterator; + using iterator = DereferencingIterator; + using const_iterator = DereferencingIterator; using reverse_iterator = std::reverse_iterator; using const_reverse_iterator = std::reverse_iterator; - iterator begin() { return iterator(data_, element_size_); } - iterator end() { - auto end_ptr = data_ + num_elements_ * element_size_; - return iterator(end_ptr, element_size_); - } + iterator begin() { return iterator(elems_.data()); } + iterator end() { return iterator(elems_.data() + elems_.size()); } - const_iterator begin() const { return const_iterator(data_, element_size_); } + const_iterator begin() const { return const_iterator(elems_.data()); } const_iterator end() const { - auto end_ptr = data_ + num_elements_ * element_size_; - return const_iterator(end_ptr, element_size_); + return const_iterator(elems_.data() + elems_.size()); } reverse_iterator rbegin() { return reverse_iterator(end()); } @@ -214,35 +178,27 @@ class PacketView { return const_reverse_iterator(begin()); } - size_t size() const { return num_elements_; } + size_t size() const { return elems_.size(); } - bool empty() const { return num_elements_ == 0; } + bool empty() const { return elems_.empty(); } T& operator[](size_t i) { - auto elem_ptr = data_ + i * element_size_; - return *reinterpret_cast(elem_ptr); + RTC_DCHECK_LT(i, elems_.size()); + return *elems_[i]; } const T& operator[](size_t i) const { - auto elem_ptr = data_ + i * element_size_; - return *reinterpret_cast(elem_ptr); + RTC_DCHECK_LT(i, elems_.size()); + return *elems_[i]; + } + + void push_back(T* elem) { + RTC_DCHECK(elem != nullptr); + elems_.push_back(elem); } private: - PacketView(typename std::conditional::value, - const void*, - void*>::type data, - size_t num_elements, - size_t offset, - size_t element_size) - : data_(reinterpret_cast(data) + offset), - num_elements_(num_elements), - element_size_(element_size) {} - - typename std::conditional::value, const char*, char*>::type - data_; - size_t num_elements_; - size_t element_size_; + std::vector elems_; }; // Conversion functions for version 2 of the wire format. @@ -345,14 +301,12 @@ class ParsedRtcEventLog { struct LoggedRtpStreamView { LoggedRtpStreamView(uint32_t ssrc, - const LoggedRtpPacketIncoming* ptr, - size_t num_elements); + const std::vector& packets); LoggedRtpStreamView(uint32_t ssrc, - const LoggedRtpPacketOutgoing* ptr, - size_t num_elements); + const std::vector& packets); LoggedRtpStreamView(const LoggedRtpStreamView&); uint32_t ssrc; - PacketView packet_view; + DereferencingVector packet_view; }; class LogSegment { diff --git a/logging/rtc_event_log/rtc_event_log_unittest.cc b/logging/rtc_event_log/rtc_event_log_unittest.cc index dca7fb7774..323e4fe009 100644 --- a/logging/rtc_event_log/rtc_event_log_unittest.cc +++ b/logging/rtc_event_log/rtc_event_log_unittest.cc @@ -944,7 +944,7 @@ TEST_P(RtcEventLogCircularBufferTest, KeepsMostRecentEvents) { EXPECT_LT(probe_success_events.size(), kNumEvents); ASSERT_GT(probe_success_events.size(), 1u); - int64_t first_timestamp_us = probe_success_events[0].timestamp_us; + int64_t first_timestamp_ms = probe_success_events[0].timestamp.ms(); uint32_t first_id = probe_success_events[0].id; int32_t first_bitrate_bps = probe_success_events[0].bitrate_bps; // We want to reset the time to what we used when generating the events, but @@ -953,7 +953,7 @@ TEST_P(RtcEventLogCircularBufferTest, KeepsMostRecentEvents) { // destroyed before the new one is created, so we have to reset() first. fake_clock.reset(); fake_clock = std::make_unique(); - fake_clock->SetTime(Timestamp::Micros(first_timestamp_us)); + fake_clock->SetTime(Timestamp::Millis(first_timestamp_ms)); for (size_t i = 1; i < probe_success_events.size(); i++) { fake_clock->AdvanceTime(TimeDelta::Millis(10)); verifier_.VerifyLoggedBweProbeSuccessEvent( @@ -974,4 +974,64 @@ INSTANTIATE_TEST_SUITE_P( // TODO(terelius): Verify parser behavior if the timestamps are not // monotonically increasing in the log. +TEST(DereferencingVectorTest, NonConstVector) { + std::vector v{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; + DereferencingVector even; + EXPECT_TRUE(even.empty()); + EXPECT_EQ(even.size(), 0u); + EXPECT_EQ(even.begin(), even.end()); + for (size_t i = 0; i < v.size(); i += 2) { + even.push_back(&v[i]); + } + EXPECT_FALSE(even.empty()); + EXPECT_EQ(even.size(), 5u); + EXPECT_NE(even.begin(), even.end()); + + // Test direct access. + for (size_t i = 0; i < even.size(); i++) { + EXPECT_EQ(even[i], 2 * static_cast(i)); + } + + // Test iterator. + for (int val : even) { + EXPECT_EQ(val % 2, 0); + } + + // Test modification through iterator. + for (int& val : even) { + val = val * 2; + EXPECT_EQ(val % 2, 0); + } + + // Backing vector should have been modified. + std::vector expected{0, 1, 4, 3, 8, 5, 12, 7, 16, 9}; + for (size_t i = 0; i < v.size(); i++) { + EXPECT_EQ(v[i], expected[i]); + } +} + +TEST(DereferencingVectorTest, ConstVector) { + std::vector v{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; + DereferencingVector odd; + EXPECT_TRUE(odd.empty()); + EXPECT_EQ(odd.size(), 0u); + EXPECT_EQ(odd.begin(), odd.end()); + for (size_t i = 1; i < v.size(); i += 2) { + odd.push_back(&v[i]); + } + EXPECT_FALSE(odd.empty()); + EXPECT_EQ(odd.size(), 5u); + EXPECT_NE(odd.begin(), odd.end()); + + // Test direct access. + for (size_t i = 0; i < odd.size(); i++) { + EXPECT_EQ(odd[i], 2 * static_cast(i) + 1); + } + + // Test iterator. + for (int val : odd) { + EXPECT_EQ(val % 2, 1); + } +} + } // namespace webrtc diff --git a/logging/rtc_event_log/rtc_event_log_unittest_helper.cc b/logging/rtc_event_log/rtc_event_log_unittest_helper.cc index cefd4a6967..0960c98502 100644 --- a/logging/rtc_event_log/rtc_event_log_unittest_helper.cc +++ b/logging/rtc_event_log/rtc_event_log_unittest_helper.cc @@ -1124,10 +1124,10 @@ void EventVerifier::VerifyReportBlock( } void EventVerifier::VerifyLoggedSenderReport( - int64_t log_time_us, + int64_t log_time_ms, const rtcp::SenderReport& original_sr, const LoggedRtcpPacketSenderReport& logged_sr) { - EXPECT_EQ(log_time_us, logged_sr.log_time_us()); + EXPECT_EQ(log_time_ms, logged_sr.log_time_ms()); EXPECT_EQ(original_sr.sender_ssrc(), logged_sr.sr.sender_ssrc()); EXPECT_EQ(original_sr.ntp(), logged_sr.sr.ntp()); EXPECT_EQ(original_sr.rtp_timestamp(), logged_sr.sr.rtp_timestamp()); @@ -1144,10 +1144,10 @@ void EventVerifier::VerifyLoggedSenderReport( } void EventVerifier::VerifyLoggedReceiverReport( - int64_t log_time_us, + int64_t log_time_ms, const rtcp::ReceiverReport& original_rr, const LoggedRtcpPacketReceiverReport& logged_rr) { - EXPECT_EQ(log_time_us, logged_rr.log_time_us()); + EXPECT_EQ(log_time_ms, logged_rr.log_time_ms()); EXPECT_EQ(original_rr.sender_ssrc(), logged_rr.rr.sender_ssrc()); ASSERT_EQ(original_rr.report_blocks().size(), logged_rr.rr.report_blocks().size()); @@ -1158,10 +1158,10 @@ void EventVerifier::VerifyLoggedReceiverReport( } void EventVerifier::VerifyLoggedExtendedReports( - int64_t log_time_us, + int64_t log_time_ms, const rtcp::ExtendedReports& original_xr, const LoggedRtcpPacketExtendedReports& logged_xr) { - EXPECT_EQ(log_time_us, logged_xr.log_time_us()); + EXPECT_EQ(log_time_ms, logged_xr.log_time_ms()); EXPECT_EQ(original_xr.sender_ssrc(), logged_xr.xr.sender_ssrc()); EXPECT_EQ(original_xr.rrtr().has_value(), logged_xr.xr.rrtr().has_value()); @@ -1199,10 +1199,10 @@ void EventVerifier::VerifyLoggedExtendedReports( } } -void EventVerifier::VerifyLoggedFir(int64_t log_time_us, +void EventVerifier::VerifyLoggedFir(int64_t log_time_ms, const rtcp::Fir& original_fir, const LoggedRtcpPacketFir& logged_fir) { - EXPECT_EQ(log_time_us, logged_fir.log_time_us()); + EXPECT_EQ(log_time_ms, logged_fir.log_time_ms()); EXPECT_EQ(original_fir.sender_ssrc(), logged_fir.fir.sender_ssrc()); const auto& original_requests = original_fir.requests(); const auto& logged_requests = logged_fir.fir.requests(); @@ -1213,35 +1213,35 @@ void EventVerifier::VerifyLoggedFir(int64_t log_time_us, } } -void EventVerifier::VerifyLoggedPli(int64_t log_time_us, +void EventVerifier::VerifyLoggedPli(int64_t log_time_ms, const rtcp::Pli& original_pli, const LoggedRtcpPacketPli& logged_pli) { - EXPECT_EQ(log_time_us, logged_pli.log_time_us()); + EXPECT_EQ(log_time_ms, logged_pli.log_time_ms()); EXPECT_EQ(original_pli.sender_ssrc(), logged_pli.pli.sender_ssrc()); EXPECT_EQ(original_pli.media_ssrc(), logged_pli.pli.media_ssrc()); } -void EventVerifier::VerifyLoggedBye(int64_t log_time_us, +void EventVerifier::VerifyLoggedBye(int64_t log_time_ms, const rtcp::Bye& original_bye, const LoggedRtcpPacketBye& logged_bye) { - EXPECT_EQ(log_time_us, logged_bye.log_time_us()); + EXPECT_EQ(log_time_ms, logged_bye.log_time_ms()); EXPECT_EQ(original_bye.sender_ssrc(), logged_bye.bye.sender_ssrc()); EXPECT_EQ(original_bye.csrcs(), logged_bye.bye.csrcs()); EXPECT_EQ(original_bye.reason(), logged_bye.bye.reason()); } -void EventVerifier::VerifyLoggedNack(int64_t log_time_us, +void EventVerifier::VerifyLoggedNack(int64_t log_time_ms, const rtcp::Nack& original_nack, const LoggedRtcpPacketNack& logged_nack) { - EXPECT_EQ(log_time_us, logged_nack.log_time_us()); + EXPECT_EQ(log_time_ms, logged_nack.log_time_ms()); EXPECT_EQ(original_nack.packet_ids(), logged_nack.nack.packet_ids()); } void EventVerifier::VerifyLoggedTransportFeedback( - int64_t log_time_us, + int64_t log_time_ms, const rtcp::TransportFeedback& original_transport_feedback, const LoggedRtcpPacketTransportFeedback& logged_transport_feedback) { - EXPECT_EQ(log_time_us, logged_transport_feedback.log_time_us()); + EXPECT_EQ(log_time_ms, logged_transport_feedback.log_time_ms()); ASSERT_EQ( original_transport_feedback.GetReceivedPackets().size(), logged_transport_feedback.transport_feedback.GetReceivedPackets().size()); @@ -1258,19 +1258,19 @@ void EventVerifier::VerifyLoggedTransportFeedback( } } -void EventVerifier::VerifyLoggedRemb(int64_t log_time_us, +void EventVerifier::VerifyLoggedRemb(int64_t log_time_ms, const rtcp::Remb& original_remb, const LoggedRtcpPacketRemb& logged_remb) { - EXPECT_EQ(log_time_us, logged_remb.log_time_us()); + EXPECT_EQ(log_time_ms, logged_remb.log_time_ms()); EXPECT_EQ(original_remb.ssrcs(), logged_remb.remb.ssrcs()); EXPECT_EQ(original_remb.bitrate_bps(), logged_remb.remb.bitrate_bps()); } void EventVerifier::VerifyLoggedLossNotification( - int64_t log_time_us, + int64_t log_time_ms, const rtcp::LossNotification& original_loss_notification, const LoggedRtcpPacketLossNotification& logged_loss_notification) { - EXPECT_EQ(log_time_us, logged_loss_notification.log_time_us()); + EXPECT_EQ(log_time_ms, logged_loss_notification.log_time_ms()); EXPECT_EQ(original_loss_notification.last_decoded(), logged_loss_notification.loss_notification.last_decoded()); EXPECT_EQ(original_loss_notification.last_received(), @@ -1285,7 +1285,7 @@ void EventVerifier::VerifyLoggedStartEvent( const LoggedStartEvent& logged_event) const { EXPECT_EQ(start_time_us / 1000, logged_event.log_time_ms()); if (encoding_type_ == RtcEventLog::EncodingType::NewFormat) { - EXPECT_EQ(utc_start_time_us / 1000, logged_event.utc_start_time_ms); + EXPECT_EQ(utc_start_time_us / 1000, logged_event.utc_start_time.ms()); } } diff --git a/logging/rtc_event_log/rtc_event_log_unittest_helper.h b/logging/rtc_event_log/rtc_event_log_unittest_helper.h index 94cf3d5ae7..eb16592271 100644 --- a/logging/rtc_event_log/rtc_event_log_unittest_helper.h +++ b/logging/rtc_event_log/rtc_event_log_unittest_helper.h @@ -260,38 +260,38 @@ class EventVerifier { const RtcEventRtcpPacketOutgoing& original_event, const LoggedRtcpPacketOutgoing& logged_event) const; - void VerifyLoggedSenderReport(int64_t log_time_us, + void VerifyLoggedSenderReport(int64_t log_time_ms, const rtcp::SenderReport& original_sr, const LoggedRtcpPacketSenderReport& logged_sr); void VerifyLoggedReceiverReport( - int64_t log_time_us, + int64_t log_time_ms, const rtcp::ReceiverReport& original_rr, const LoggedRtcpPacketReceiverReport& logged_rr); void VerifyLoggedExtendedReports( - int64_t log_time_us, + int64_t log_time_ms, const rtcp::ExtendedReports& original_xr, const LoggedRtcpPacketExtendedReports& logged_xr); - void VerifyLoggedFir(int64_t log_time_us, + void VerifyLoggedFir(int64_t log_time_ms, const rtcp::Fir& original_fir, const LoggedRtcpPacketFir& logged_fir); - void VerifyLoggedPli(int64_t log_time_us, + void VerifyLoggedPli(int64_t log_time_ms, const rtcp::Pli& original_pli, const LoggedRtcpPacketPli& logged_pli); - void VerifyLoggedBye(int64_t log_time_us, + void VerifyLoggedBye(int64_t log_time_ms, const rtcp::Bye& original_bye, const LoggedRtcpPacketBye& logged_bye); - void VerifyLoggedNack(int64_t log_time_us, + void VerifyLoggedNack(int64_t log_time_ms, const rtcp::Nack& original_nack, const LoggedRtcpPacketNack& logged_nack); void VerifyLoggedTransportFeedback( - int64_t log_time_us, + int64_t log_time_ms, const rtcp::TransportFeedback& original_transport_feedback, const LoggedRtcpPacketTransportFeedback& logged_transport_feedback); - void VerifyLoggedRemb(int64_t log_time_us, + void VerifyLoggedRemb(int64_t log_time_ms, const rtcp::Remb& original_remb, const LoggedRtcpPacketRemb& logged_remb); void VerifyLoggedLossNotification( - int64_t log_time_us, + int64_t log_time_ms, const rtcp::LossNotification& original_loss_notification, const LoggedRtcpPacketLossNotification& logged_loss_notification); diff --git a/logging/rtc_event_log/rtc_event_processor_unittest.cc b/logging/rtc_event_log/rtc_event_processor_unittest.cc index 4ec5abee5e..b0cec25f1f 100644 --- a/logging/rtc_event_log/rtc_event_processor_unittest.cc +++ b/logging/rtc_event_log/rtc_event_processor_unittest.cc @@ -29,7 +29,7 @@ std::vector CreateEventList( std::initializer_list timestamp_list) { std::vector v; for (int64_t timestamp_ms : timestamp_list) { - v.emplace_back(timestamp_ms * 1000); // Convert ms to us. + v.emplace_back(Timestamp::Millis(timestamp_ms)); } return v; } @@ -41,7 +41,7 @@ CreateRandomEventLists(size_t num_lists, size_t num_elements, uint64_t seed) { for (size_t elem = 0; elem < num_elements; elem++) { uint32_t i = prng.Rand(0u, num_lists - 1); int64_t timestamp_ms = elem; - lists[i].emplace_back(timestamp_ms * 1000); + lists[i].emplace_back(Timestamp::Millis(timestamp_ms)); } return lists; } @@ -146,8 +146,8 @@ TEST(RtcEventProcessor, DifferentTypes) { result.push_back(elem.log_time_ms()); }; - std::vector events1{LoggedStartEvent(2000)}; - std::vector events2{LoggedStopEvent(1000)}; + std::vector events1{LoggedStartEvent(Timestamp::Millis(2))}; + std::vector events2{LoggedStopEvent(Timestamp::Millis(1))}; RtcEventProcessor processor; processor.AddEvents(events1, f1); processor.AddEvents(events2, f2); diff --git a/media/BUILD.gn b/media/BUILD.gn index c85a037e67..5f0f527b8f 100644 --- a/media/BUILD.gn +++ b/media/BUILD.gn @@ -106,6 +106,7 @@ rtc_library("rtc_media_base") { "../rtc_base:stringutils", "../rtc_base/synchronization:mutex", "../rtc_base/system:file_wrapper", + "../rtc_base/system:no_unique_address", "../rtc_base/system:rtc_export", "../rtc_base/task_utils:pending_task_safety_flag", "../rtc_base/task_utils:to_queued_task", @@ -401,9 +402,10 @@ if (rtc_build_dcsctp) { ":rtc_data_sctp_transport_internal", "../api:array_view", "../media:rtc_media_base", + "../net/dcsctp/public:factory", "../net/dcsctp/public:socket", "../net/dcsctp/public:types", - "../net/dcsctp/socket:dcsctp_socket", + "../net/dcsctp/public:utils", "../net/dcsctp/timer:task_queue_timeout", "../p2p:rtc_p2p", "../rtc_base:checks", @@ -621,6 +623,7 @@ if (rtc_include_tests) { "../modules/audio_processing:api", "../modules/audio_processing:mocks", "../modules/rtp_rtcp", + "../modules/rtp_rtcp:rtp_rtcp_format", "../modules/video_coding:simulcast_test_fixture_impl", "../modules/video_coding:video_codec_interface", "../modules/video_coding:webrtc_h264", @@ -638,6 +641,7 @@ if (rtc_include_tests) { "../rtc_base/experiments:min_video_bitrate_experiment", "../rtc_base/synchronization:mutex", "../rtc_base/third_party/sigslot", + "../system_wrappers:field_trial", "../test:audio_codec_mocks", "../test:fake_video_codecs", "../test:field_trial", diff --git a/media/base/fake_media_engine.h b/media/base/fake_media_engine.h index 6ee37369f9..e4f7b6659f 100644 --- a/media/base/fake_media_engine.h +++ b/media/base/fake_media_engine.h @@ -11,6 +11,7 @@ #ifndef MEDIA_BASE_FAKE_MEDIA_ENGINE_H_ #define MEDIA_BASE_FAKE_MEDIA_ENGINE_H_ +#include #include #include #include @@ -284,7 +285,10 @@ class RtpHelper : public Base { bool fail_set_recv_codecs() const { return fail_set_recv_codecs_; } private: - bool sending_; + // TODO(bugs.webrtc.org/12783): This flag is used from more than one thread. + // As a workaround for tsan, it's currently std::atomic but that might not + // be the appropriate fix. + std::atomic sending_; bool playout_; std::vector recv_extensions_; std::vector send_extensions_; diff --git a/media/base/fake_network_interface.h b/media/base/fake_network_interface.h index 02d53f6781..45b7aa0fc0 100644 --- a/media/base/fake_network_interface.h +++ b/media/base/fake_network_interface.h @@ -18,6 +18,7 @@ #include "media/base/media_channel.h" #include "media/base/rtp_utils.h" #include "rtc_base/byte_order.h" +#include "rtc_base/checks.h" #include "rtc_base/copy_on_write_buffer.h" #include "rtc_base/dscp.h" #include "rtc_base/message_handler.h" @@ -83,14 +84,12 @@ class FakeNetworkInterface : public MediaChannel::NetworkInterface, return static_cast(sent_ssrcs_.size()); } - // Note: callers are responsible for deleting the returned buffer. - const rtc::CopyOnWriteBuffer* GetRtpPacket(int index) - RTC_LOCKS_EXCLUDED(mutex_) { + rtc::CopyOnWriteBuffer GetRtpPacket(int index) RTC_LOCKS_EXCLUDED(mutex_) { webrtc::MutexLock lock(&mutex_); if (index >= static_cast(rtp_packets_.size())) { - return NULL; + return {}; } - return new rtc::CopyOnWriteBuffer(rtp_packets_[index]); + return rtp_packets_[index]; } int NumRtcpPackets() RTC_LOCKS_EXCLUDED(mutex_) { @@ -129,10 +128,7 @@ class FakeNetworkInterface : public MediaChannel::NetworkInterface, rtp_packets_.push_back(*packet); if (conf_) { for (size_t i = 0; i < conf_sent_ssrcs_.size(); ++i) { - if (!SetRtpSsrc(packet->MutableData(), packet->size(), - conf_sent_ssrcs_[i])) { - return false; - } + SetRtpSsrc(conf_sent_ssrcs_[i], *packet); PostMessage(ST_RTP, *packet); } } else { @@ -184,6 +180,11 @@ class FakeNetworkInterface : public MediaChannel::NetworkInterface, } private: + void SetRtpSsrc(uint32_t ssrc, rtc::CopyOnWriteBuffer& buffer) { + RTC_CHECK_GE(buffer.size(), 12); + rtc::SetBE32(buffer.MutableData() + 8, ssrc); + } + void GetNumRtpBytesAndPackets(uint32_t ssrc, int* bytes, int* packets) { if (bytes) { *bytes = 0; diff --git a/media/base/media_channel.h b/media/base/media_channel.h index a4a925e912..7b9a6f138c 100644 --- a/media/base/media_channel.h +++ b/media/base/media_channel.h @@ -372,6 +372,8 @@ struct MediaSenderInfo { int packets_sent = 0; // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-retransmittedpacketssent uint64_t retransmitted_packets_sent = 0; + // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-nackcount + uint32_t nacks_rcvd = 0; int packets_lost = 0; float fraction_lost = 0.0f; int64_t rtt_ms = 0; @@ -426,6 +428,13 @@ struct MediaReceiverInfo { int64_t header_and_padding_bytes_rcvd = 0; int packets_rcvd = 0; int packets_lost = 0; + absl::optional nacks_sent; + // Jitter (network-related) latency (cumulative). + // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-jitterbufferdelay + double jitter_buffer_delay_seconds = 0.0; + // Number of observations for cumulative jitter latency. + // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-jitterbufferemittedcount + uint64_t jitter_buffer_emitted_count = 0; // The timestamp at which the last packet was received, i.e. the time of the // local clock when it was received - not the RTP timestamp of that packet. // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-lastpacketreceivedtimestamp @@ -469,8 +478,6 @@ struct VoiceReceiverInfo : public MediaReceiverInfo { uint64_t concealed_samples = 0; uint64_t silent_concealed_samples = 0; uint64_t concealment_events = 0; - double jitter_buffer_delay_seconds = 0.0; - uint64_t jitter_buffer_emitted_count = 0; double jitter_buffer_target_delay_seconds = 0.0; uint64_t inserted_samples_for_deceleration = 0; uint64_t removed_samples_for_acceleration = 0; @@ -530,7 +537,6 @@ struct VideoSenderInfo : public MediaSenderInfo { std::string encoder_implementation_name; int firs_rcvd = 0; int plis_rcvd = 0; - int nacks_rcvd = 0; int send_frame_width = 0; int send_frame_height = 0; int frames = 0; @@ -575,7 +581,6 @@ struct VideoReceiverInfo : public MediaReceiverInfo { int packets_concealed = 0; int firs_sent = 0; int plis_sent = 0; - int nacks_sent = 0; int frame_width = 0; int frame_height = 0; int framerate_rcvd = 0; @@ -616,12 +621,6 @@ struct VideoReceiverInfo : public MediaReceiverInfo { int max_decode_ms = 0; // Jitter (network-related) latency. int jitter_buffer_ms = 0; - // Jitter (network-related) latency (cumulative). - // https://w3c.github.io/webrtc-stats/#dom-rtcvideoreceiverstats-jitterbufferdelay - double jitter_buffer_delay_seconds = 0; - // Number of observations for cumulative jitter latency. - // https://w3c.github.io/webrtc-stats/#dom-rtcvideoreceiverstats-jitterbufferemittedcount - uint64_t jitter_buffer_emitted_count = 0; // Requested minimum playout latency. int min_playout_delay_ms = 0; // Requested latency to account for rendering delay. diff --git a/media/base/rtp_utils.cc b/media/base/rtp_utils.cc index 4714175226..9f90c468f7 100644 --- a/media/base/rtp_utils.cc +++ b/media/base/rtp_utils.cc @@ -17,6 +17,7 @@ // PacketTimeUpdateParams is defined in asyncpacketsocket.h. // TODO(sergeyu): Find more appropriate place for PacketTimeUpdateParams. #include "media/base/turn_utils.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "rtc_base/async_packet_socket.h" #include "rtc_base/byte_order.h" #include "rtc_base/checks.h" @@ -24,8 +25,6 @@ namespace cricket { -static const uint8_t kRtpVersion = 2; -static const size_t kRtpFlagsOffset = 0; static const size_t kRtpPayloadTypeOffset = 1; static const size_t kRtpSeqNumOffset = 2; static const size_t kRtpTimestampOffset = 4; @@ -119,8 +118,6 @@ void UpdateRtpAuthTag(uint8_t* rtp, memcpy(auth_tag, output, tag_length); } -} // namespace - bool GetUint8(const void* data, size_t offset, int* value) { if (!data || !value) { return false; @@ -146,36 +143,7 @@ bool GetUint32(const void* data, size_t offset, uint32_t* value) { return true; } -bool SetUint8(void* data, size_t offset, uint8_t value) { - if (!data) { - return false; - } - rtc::Set8(data, offset, value); - return true; -} - -bool SetUint16(void* data, size_t offset, uint16_t value) { - if (!data) { - return false; - } - rtc::SetBE16(static_cast(data) + offset, value); - return true; -} - -bool SetUint32(void* data, size_t offset, uint32_t value) { - if (!data) { - return false; - } - rtc::SetBE32(static_cast(data) + offset, value); - return true; -} - -bool GetRtpFlags(const void* data, size_t len, int* value) { - if (len < kMinRtpPacketLen) { - return false; - } - return GetUint8(data, kRtpFlagsOffset, value); -} +} // namespace bool GetRtpPayloadType(const void* data, size_t len, int* value) { if (len < kMinRtpPacketLen) { @@ -209,34 +177,6 @@ bool GetRtpSsrc(const void* data, size_t len, uint32_t* value) { return GetUint32(data, kRtpSsrcOffset, value); } -bool GetRtpHeaderLen(const void* data, size_t len, size_t* value) { - if (!data || len < kMinRtpPacketLen || !value) - return false; - const uint8_t* header = static_cast(data); - // Get base header size + length of CSRCs (not counting extension yet). - size_t header_size = kMinRtpPacketLen + (header[0] & 0xF) * sizeof(uint32_t); - if (len < header_size) - return false; - // If there's an extension, read and add in the extension size. - if (header[0] & 0x10) { - if (len < header_size + sizeof(uint32_t)) - return false; - header_size += - ((rtc::GetBE16(header + header_size + 2) + 1) * sizeof(uint32_t)); - if (len < header_size) - return false; - } - *value = header_size; - return true; -} - -bool GetRtpHeader(const void* data, size_t len, RtpHeader* header) { - return (GetRtpPayloadType(data, len, &(header->payload_type)) && - GetRtpSeqNum(data, len, &(header->seq_num)) && - GetRtpTimestamp(data, len, &(header->timestamp)) && - GetRtpSsrc(data, len, &(header->ssrc))); -} - bool GetRtcpType(const void* data, size_t len, int* value) { if (len < kMinRtcpPacketLen) { return false; @@ -261,47 +201,6 @@ bool GetRtcpSsrc(const void* data, size_t len, uint32_t* value) { return true; } -bool SetRtpSsrc(void* data, size_t len, uint32_t value) { - return SetUint32(data, kRtpSsrcOffset, value); -} - -// Assumes version 2, no padding, no extensions, no csrcs. -bool SetRtpHeader(void* data, size_t len, const RtpHeader& header) { - if (!IsValidRtpPayloadType(header.payload_type) || header.seq_num < 0 || - header.seq_num > static_cast(UINT16_MAX)) { - return false; - } - return (SetUint8(data, kRtpFlagsOffset, kRtpVersion << 6) && - SetUint8(data, kRtpPayloadTypeOffset, header.payload_type & 0x7F) && - SetUint16(data, kRtpSeqNumOffset, - static_cast(header.seq_num)) && - SetUint32(data, kRtpTimestampOffset, header.timestamp) && - SetRtpSsrc(data, len, header.ssrc)); -} - -static bool HasCorrectRtpVersion(rtc::ArrayView packet) { - return packet.data()[0] >> 6 == kRtpVersion; -} - -bool IsRtpPacket(rtc::ArrayView packet) { - return packet.size() >= kMinRtpPacketLen && - HasCorrectRtpVersion( - rtc::reinterpret_array_view(packet)); -} - -// Check the RTP payload type. If 63 < payload type < 96, it's RTCP. -// For additional details, see http://tools.ietf.org/html/rfc5761. -bool IsRtcpPacket(rtc::ArrayView packet) { - if (packet.size() < kMinRtcpPacketLen || - !HasCorrectRtpVersion( - rtc::reinterpret_array_view(packet))) { - return false; - } - - char pt = packet[1] & 0x7F; - return (63 < pt) && (pt < 96); -} - bool IsValidRtpPayloadType(int payload_type) { return payload_type >= 0 && payload_type <= 127; } @@ -327,11 +226,11 @@ absl::string_view RtpPacketTypeToString(RtpPacketType packet_type) { } RtpPacketType InferRtpPacketType(rtc::ArrayView packet) { - // RTCP packets are RTP packets so must check that first. - if (IsRtcpPacket(packet)) { + if (webrtc::IsRtcpPacket( + rtc::reinterpret_array_view(packet))) { return RtpPacketType::kRtcp; } - if (IsRtpPacket(packet)) { + if (webrtc::IsRtpPacket(rtc::reinterpret_array_view(packet))) { return RtpPacketType::kRtp; } return RtpPacketType::kUnknown; @@ -532,7 +431,7 @@ bool ApplyPacketOptions(uint8_t* data, // Making sure we have a valid RTP packet at the end. auto packet = rtc::MakeArrayView(data + rtp_start_pos, rtp_length); - if (!IsRtpPacket(rtc::reinterpret_array_view(packet)) || + if (!webrtc::IsRtpPacket(packet) || !ValidateRtpHeader(data + rtp_start_pos, rtp_length, nullptr)) { RTC_NOTREACHED(); return false; diff --git a/media/base/rtp_utils.h b/media/base/rtp_utils.h index 9ef9f9c7ba..f6b5dbc9f0 100644 --- a/media/base/rtp_utils.h +++ b/media/base/rtp_utils.h @@ -26,13 +26,6 @@ const size_t kMinRtpPacketLen = 12; const size_t kMaxRtpPacketLen = 2048; const size_t kMinRtcpPacketLen = 4; -struct RtpHeader { - int payload_type; - int seq_num; - uint32_t timestamp; - uint32_t ssrc; -}; - enum RtcpTypes { kRtcpTypeSR = 200, // Sender report payload type. kRtcpTypeRR = 201, // Receiver report payload type. @@ -53,18 +46,10 @@ bool GetRtpPayloadType(const void* data, size_t len, int* value); bool GetRtpSeqNum(const void* data, size_t len, int* value); bool GetRtpTimestamp(const void* data, size_t len, uint32_t* value); bool GetRtpSsrc(const void* data, size_t len, uint32_t* value); -bool GetRtpHeaderLen(const void* data, size_t len, size_t* value); + bool GetRtcpType(const void* data, size_t len, int* value); bool GetRtcpSsrc(const void* data, size_t len, uint32_t* value); -bool GetRtpHeader(const void* data, size_t len, RtpHeader* header); - -bool SetRtpSsrc(void* data, size_t len, uint32_t value); -// Assumes version 2, no padding, no extensions, no csrcs. -bool SetRtpHeader(void* data, size_t len, const RtpHeader& header); - -bool IsRtpPacket(rtc::ArrayView packet); -bool IsRtcpPacket(rtc::ArrayView packet); // Checks the packet header to determine if it can be an RTP or RTCP packet. RtpPacketType InferRtpPacketType(rtc::ArrayView packet); // True if |payload type| is 0-127. diff --git a/media/base/rtp_utils_unittest.cc b/media/base/rtp_utils_unittest.cc index a5e8a810f4..14599abca2 100644 --- a/media/base/rtp_utils_unittest.cc +++ b/media/base/rtp_utils_unittest.cc @@ -23,24 +23,7 @@ namespace cricket { static const uint8_t kRtpPacketWithMarker[] = { 0x80, 0x80, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; -// 3 CSRCs (0x01020304, 0x12345678, 0xAABBCCDD) -// Extension (0xBEDE, 0x1122334455667788) -static const uint8_t kRtpPacketWithMarkerAndCsrcAndExtension[] = { - 0x93, 0x80, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x01, 0x02, 0x03, 0x04, 0x12, 0x34, 0x56, 0x78, 0xAA, 0xBB, 0xCC, 0xDD, - 0xBE, 0xDE, 0x00, 0x02, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88}; static const uint8_t kInvalidPacket[] = {0x80, 0x00}; -static const uint8_t kInvalidPacketWithCsrc[] = { - 0x83, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x01, 0x02, 0x03, 0x04, 0x12, 0x34, 0x56, 0x78, 0xAA, 0xBB, 0xCC}; -static const uint8_t kInvalidPacketWithCsrcAndExtension1[] = { - 0x93, 0x80, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x01, 0x01, 0x02, 0x03, 0x04, 0x12, 0x34, - 0x56, 0x78, 0xAA, 0xBB, 0xCC, 0xDD, 0xBE, 0xDE, 0x00}; -static const uint8_t kInvalidPacketWithCsrcAndExtension2[] = { - 0x93, 0x80, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x01, 0x02, 0x03, 0x04, 0x12, 0x34, 0x56, 0x78, 0xAA, 0xBB, 0xCC, 0xDD, - 0xBE, 0xDE, 0x00, 0x02, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77}; // PT = 206, FMT = 1, Sender SSRC = 0x1111, Media SSRC = 0x1111 // No FCI information is needed for PLI. @@ -102,8 +85,6 @@ static const rtc::ArrayView kInvalidPacketArrayView = sizeof(kInvalidPacket)); TEST(RtpUtilsTest, GetRtp) { - EXPECT_TRUE(IsRtpPacket(kPcmuFrameArrayView)); - int pt; EXPECT_TRUE(GetRtpPayloadType(kPcmuFrame, sizeof(kPcmuFrame), &pt)); EXPECT_EQ(0, pt); @@ -123,59 +104,12 @@ TEST(RtpUtilsTest, GetRtp) { EXPECT_TRUE(GetRtpSsrc(kPcmuFrame, sizeof(kPcmuFrame), &ssrc)); EXPECT_EQ(1u, ssrc); - RtpHeader header; - EXPECT_TRUE(GetRtpHeader(kPcmuFrame, sizeof(kPcmuFrame), &header)); - EXPECT_EQ(0, header.payload_type); - EXPECT_EQ(1, header.seq_num); - EXPECT_EQ(0u, header.timestamp); - EXPECT_EQ(1u, header.ssrc); - EXPECT_FALSE(GetRtpPayloadType(kInvalidPacket, sizeof(kInvalidPacket), &pt)); EXPECT_FALSE(GetRtpSeqNum(kInvalidPacket, sizeof(kInvalidPacket), &seq_num)); EXPECT_FALSE(GetRtpTimestamp(kInvalidPacket, sizeof(kInvalidPacket), &ts)); EXPECT_FALSE(GetRtpSsrc(kInvalidPacket, sizeof(kInvalidPacket), &ssrc)); } -TEST(RtpUtilsTest, SetRtpHeader) { - uint8_t packet[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; - - RtpHeader header = {9, 1111, 2222u, 3333u}; - EXPECT_TRUE(SetRtpHeader(packet, sizeof(packet), header)); - - // Bits: 10 0 0 0000 - EXPECT_EQ(128u, packet[0]); - size_t len; - EXPECT_TRUE(GetRtpHeaderLen(packet, sizeof(packet), &len)); - EXPECT_EQ(12U, len); - EXPECT_TRUE(GetRtpHeader(packet, sizeof(packet), &header)); - EXPECT_EQ(9, header.payload_type); - EXPECT_EQ(1111, header.seq_num); - EXPECT_EQ(2222u, header.timestamp); - EXPECT_EQ(3333u, header.ssrc); -} - -TEST(RtpUtilsTest, GetRtpHeaderLen) { - size_t len; - EXPECT_TRUE(GetRtpHeaderLen(kPcmuFrame, sizeof(kPcmuFrame), &len)); - EXPECT_EQ(12U, len); - - EXPECT_TRUE(GetRtpHeaderLen(kRtpPacketWithMarkerAndCsrcAndExtension, - sizeof(kRtpPacketWithMarkerAndCsrcAndExtension), - &len)); - EXPECT_EQ(sizeof(kRtpPacketWithMarkerAndCsrcAndExtension), len); - - EXPECT_FALSE(GetRtpHeaderLen(kInvalidPacket, sizeof(kInvalidPacket), &len)); - EXPECT_FALSE(GetRtpHeaderLen(kInvalidPacketWithCsrc, - sizeof(kInvalidPacketWithCsrc), &len)); - EXPECT_FALSE(GetRtpHeaderLen(kInvalidPacketWithCsrcAndExtension1, - sizeof(kInvalidPacketWithCsrcAndExtension1), - &len)); - EXPECT_FALSE(GetRtpHeaderLen(kInvalidPacketWithCsrcAndExtension2, - sizeof(kInvalidPacketWithCsrcAndExtension2), - &len)); -} - TEST(RtpUtilsTest, GetRtcp) { int pt; EXPECT_TRUE(GetRtcpType(kRtcpReport, sizeof(kRtcpReport), &pt)); diff --git a/media/base/video_source_base.cc b/media/base/video_source_base.cc index d057a24ad8..2454902069 100644 --- a/media/base/video_source_base.cc +++ b/media/base/video_source_base.cc @@ -10,6 +10,8 @@ #include "media/base/video_source_base.h" +#include + #include "absl/algorithm/container.h" #include "rtc_base/checks.h" @@ -52,4 +54,51 @@ VideoSourceBase::SinkPair* VideoSourceBase::FindSinkPair( return nullptr; } +VideoSourceBaseGuarded::VideoSourceBaseGuarded() = default; +VideoSourceBaseGuarded::~VideoSourceBaseGuarded() = default; + +void VideoSourceBaseGuarded::AddOrUpdateSink( + VideoSinkInterface* sink, + const VideoSinkWants& wants) { + RTC_DCHECK_RUN_ON(&source_sequence_); + RTC_DCHECK(sink != nullptr); + + SinkPair* sink_pair = FindSinkPair(sink); + if (!sink_pair) { + sinks_.push_back(SinkPair(sink, wants)); + } else { + sink_pair->wants = wants; + } +} + +void VideoSourceBaseGuarded::RemoveSink( + VideoSinkInterface* sink) { + RTC_DCHECK_RUN_ON(&source_sequence_); + RTC_DCHECK(sink != nullptr); + RTC_DCHECK(FindSinkPair(sink)); + sinks_.erase(std::remove_if(sinks_.begin(), sinks_.end(), + [sink](const SinkPair& sink_pair) { + return sink_pair.sink == sink; + }), + sinks_.end()); +} + +VideoSourceBaseGuarded::SinkPair* VideoSourceBaseGuarded::FindSinkPair( + const VideoSinkInterface* sink) { + RTC_DCHECK_RUN_ON(&source_sequence_); + auto sink_pair_it = absl::c_find_if( + sinks_, + [sink](const SinkPair& sink_pair) { return sink_pair.sink == sink; }); + if (sink_pair_it != sinks_.end()) { + return &*sink_pair_it; + } + return nullptr; +} + +const std::vector& +VideoSourceBaseGuarded::sink_pairs() const { + RTC_DCHECK_RUN_ON(&source_sequence_); + return sinks_; +} + } // namespace rtc diff --git a/media/base/video_source_base.h b/media/base/video_source_base.h index 59b7dab164..2644723aa7 100644 --- a/media/base/video_source_base.h +++ b/media/base/video_source_base.h @@ -17,10 +17,14 @@ #include "api/video/video_frame.h" #include "api/video/video_sink_interface.h" #include "api/video/video_source_interface.h" +#include "rtc_base/system/no_unique_address.h" namespace rtc { -// VideoSourceBase is not thread safe. +// VideoSourceBase is not thread safe. Before using this class, consider using +// VideoSourceBaseGuarded below instead, which is an identical implementation +// but applies a sequence checker to help protect internal state. +// TODO(bugs.webrtc.org/12780): Delete this class. class VideoSourceBase : public VideoSourceInterface { public: VideoSourceBase(); @@ -44,6 +48,36 @@ class VideoSourceBase : public VideoSourceInterface { std::vector sinks_; }; +// VideoSourceBaseGuarded assumes that operations related to sinks, occur on the +// same TQ/thread that the object was constructed on. +class VideoSourceBaseGuarded : public VideoSourceInterface { + public: + VideoSourceBaseGuarded(); + ~VideoSourceBaseGuarded() override; + + void AddOrUpdateSink(VideoSinkInterface* sink, + const VideoSinkWants& wants) override; + void RemoveSink(VideoSinkInterface* sink) override; + + protected: + struct SinkPair { + SinkPair(VideoSinkInterface* sink, VideoSinkWants wants) + : sink(sink), wants(wants) {} + VideoSinkInterface* sink; + VideoSinkWants wants; + }; + + SinkPair* FindSinkPair(const VideoSinkInterface* sink); + const std::vector& sink_pairs() const; + + // Keep the `source_sequence_` checker protected to allow sub classes the + // ability to call Detach() if/when appropriate. + RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker source_sequence_; + + private: + std::vector sinks_ RTC_GUARDED_BY(&source_sequence_); +}; + } // namespace rtc #endif // MEDIA_BASE_VIDEO_SOURCE_BASE_H_ diff --git a/media/engine/fake_webrtc_call.cc b/media/engine/fake_webrtc_call.cc index 76a70aaa57..e8c7f6e0c9 100644 --- a/media/engine/fake_webrtc_call.cc +++ b/media/engine/fake_webrtc_call.cc @@ -96,9 +96,31 @@ bool FakeAudioReceiveStream::DeliverRtp(const uint8_t* packet, return true; } -void FakeAudioReceiveStream::Reconfigure( - const webrtc::AudioReceiveStream::Config& config) { - config_ = config; +void FakeAudioReceiveStream::SetDepacketizerToDecoderFrameTransformer( + rtc::scoped_refptr frame_transformer) { + config_.frame_transformer = std::move(frame_transformer); +} + +void FakeAudioReceiveStream::SetDecoderMap( + std::map decoder_map) { + config_.decoder_map = std::move(decoder_map); +} + +void FakeAudioReceiveStream::SetUseTransportCcAndNackHistory( + bool use_transport_cc, + int history_ms) { + config_.rtp.transport_cc = use_transport_cc; + config_.rtp.nack.rtp_history_ms = history_ms; +} + +void FakeAudioReceiveStream::SetFrameDecryptor( + rtc::scoped_refptr frame_decryptor) { + config_.frame_decryptor = std::move(frame_decryptor); +} + +void FakeAudioReceiveStream::SetRtpExtensions( + std::vector extensions) { + config_.rtp.extensions = std::move(extensions); } webrtc::AudioReceiveStream::Stats FakeAudioReceiveStream::GetStats( @@ -646,6 +668,18 @@ void FakeCall::SignalChannelNetworkState(webrtc::MediaType media, void FakeCall::OnAudioTransportOverheadChanged( int transport_overhead_per_packet) {} +void FakeCall::OnLocalSsrcUpdated(webrtc::AudioReceiveStream& stream, + uint32_t local_ssrc) { + auto& fake_stream = static_cast(stream); + fake_stream.SetLocalSsrc(local_ssrc); +} + +void FakeCall::OnUpdateSyncGroup(webrtc::AudioReceiveStream& stream, + const std::string& sync_group) { + auto& fake_stream = static_cast(stream); + fake_stream.SetSyncGroup(sync_group); +} + void FakeCall::OnSentPacket(const rtc::SentPacket& sent_packet) { last_sent_packet_ = sent_packet; if (sent_packet.packet_id >= 0) { diff --git a/media/engine/fake_webrtc_call.h b/media/engine/fake_webrtc_call.h index fd383dadd1..aeef95477e 100644 --- a/media/engine/fake_webrtc_call.h +++ b/media/engine/fake_webrtc_call.h @@ -100,12 +100,31 @@ class FakeAudioReceiveStream final : public webrtc::AudioReceiveStream { return base_mininum_playout_delay_ms_; } + void SetLocalSsrc(uint32_t local_ssrc) { + config_.rtp.local_ssrc = local_ssrc; + } + + void SetSyncGroup(const std::string& sync_group) { + config_.sync_group = sync_group; + } + private: - // webrtc::AudioReceiveStream implementation. - void Reconfigure(const webrtc::AudioReceiveStream::Config& config) override; + const webrtc::ReceiveStream::RtpConfig& rtp_config() const override { + return config_.rtp; + } void Start() override { started_ = true; } void Stop() override { started_ = false; } bool IsRunning() const override { return started_; } + void SetDepacketizerToDecoderFrameTransformer( + rtc::scoped_refptr frame_transformer) + override; + void SetDecoderMap( + std::map decoder_map) override; + void SetUseTransportCcAndNackHistory(bool use_transport_cc, + int history_ms) override; + void SetFrameDecryptor(rtc::scoped_refptr + frame_decryptor) override; + void SetRtpExtensions(std::vector extensions) override; webrtc::AudioReceiveStream::Stats GetStats( bool get_and_clear_legacy_stats) const override; @@ -177,6 +196,7 @@ class FakeVideoSendStream final const std::vector active_layers) override; void Start() override; void Stop() override; + bool started() override { return IsSending(); } void AddAdaptationResource( rtc::scoped_refptr resource) override; std::vector> GetAdaptationResources() @@ -243,6 +263,9 @@ class FakeVideoReceiveStream final : public webrtc::VideoReceiveStream { private: // webrtc::VideoReceiveStream implementation. + const webrtc::ReceiveStream::RtpConfig& rtp_config() const override { + return config_.rtp; + } void Start() override; void Stop() override; @@ -269,7 +292,11 @@ class FakeFlexfecReceiveStream final : public webrtc::FlexfecReceiveStream { explicit FakeFlexfecReceiveStream( const webrtc::FlexfecReceiveStream::Config& config); - const webrtc::FlexfecReceiveStream::Config& GetConfig() const override; + const webrtc::ReceiveStream::RtpConfig& rtp_config() const override { + return config_.rtp; + } + + const webrtc::FlexfecReceiveStream::Config& GetConfig() const; private: webrtc::FlexfecReceiveStream::Stats GetStats() const override; @@ -373,6 +400,10 @@ class FakeCall final : public webrtc::Call, public webrtc::PacketReceiver { webrtc::NetworkState state) override; void OnAudioTransportOverheadChanged( int transport_overhead_per_packet) override; + void OnLocalSsrcUpdated(webrtc::AudioReceiveStream& stream, + uint32_t local_ssrc) override; + void OnUpdateSyncGroup(webrtc::AudioReceiveStream& stream, + const std::string& sync_group) override; void OnSentPacket(const rtc::SentPacket& sent_packet) override; webrtc::TaskQueueBase* const network_thread_; diff --git a/media/engine/payload_type_mapper.cc b/media/engine/payload_type_mapper.cc index 4c46975997..cbc0a5340d 100644 --- a/media/engine/payload_type_mapper.cc +++ b/media/engine/payload_type_mapper.cc @@ -72,6 +72,8 @@ PayloadTypeMapper::PayloadTypeMapper() {{kCodecParamMinPTime, "10"}, {kCodecParamUseInbandFec, kParamValueTrue}}}, 111}, + // RED for opus is assigned in the lower range, starting at the top. + {{kRedCodecName, 48000, 2}, 63}, // TODO(solenberg): Remove the hard coded 16k,32k,48k DTMF once we // assign payload types dynamically for send side as well. {{kDtmfCodecName, 48000, 1}, 110}, diff --git a/media/engine/payload_type_mapper_unittest.cc b/media/engine/payload_type_mapper_unittest.cc index 9361f76116..9c29827fa9 100644 --- a/media/engine/payload_type_mapper_unittest.cc +++ b/media/engine/payload_type_mapper_unittest.cc @@ -58,6 +58,7 @@ TEST_F(PayloadTypeMapperTest, WebRTCPayloadTypes) { 48000, 2, {{"minptime", "10"}, {"useinbandfec", "1"}}})); + EXPECT_EQ(63, mapper_.FindMappingFor({kRedCodecName, 48000, 2})); // TODO(solenberg): Remove 16k, 32k, 48k DTMF checks once these payload types // are dynamically assigned. EXPECT_EQ(110, mapper_.FindMappingFor({kDtmfCodecName, 48000, 1})); diff --git a/media/engine/simulcast_encoder_adapter.cc b/media/engine/simulcast_encoder_adapter.cc index 832c0bde9f..a955c5f144 100644 --- a/media/engine/simulcast_encoder_adapter.cc +++ b/media/engine/simulcast_encoder_adapter.cc @@ -149,9 +149,13 @@ namespace webrtc { SimulcastEncoderAdapter::EncoderContext::EncoderContext( std::unique_ptr encoder, - bool prefer_temporal_support) + bool prefer_temporal_support, + VideoEncoder::EncoderInfo primary_info, + VideoEncoder::EncoderInfo fallback_info) : encoder_(std::move(encoder)), - prefer_temporal_support_(prefer_temporal_support) {} + prefer_temporal_support_(prefer_temporal_support), + primary_info_(std::move(primary_info)), + fallback_info_(std::move(fallback_info)) {} void SimulcastEncoderAdapter::EncoderContext::Release() { if (encoder_) { @@ -344,20 +348,24 @@ int SimulcastEncoderAdapter::InitEncode( // Two distinct scenarios: // * Singlecast (total_streams_count == 1) or simulcast with simulcast-capable - // underlaying encoder implementation. SEA operates in bypass mode: original - // settings are passed to the underlaying encoder, frame encode complete - // callback is not intercepted. + // underlaying encoder implementation if active_streams_count > 1. SEA + // operates in bypass mode: original settings are passed to the underlaying + // encoder, frame encode complete callback is not intercepted. // * Multi-encoder simulcast or singlecast if layers are deactivated - // (total_streams_count > 1 and active_streams_count >= 1). SEA creates - // N=active_streams_count encoders and configures each to produce a single - // stream. - + // (active_streams_count >= 1). SEA creates N=active_streams_count encoders + // and configures each to produce a single stream. + + int active_streams_count = CountActiveStreams(*inst); + // If we only have a single active layer it is better to create an encoder + // with only one configured layer than creating it with all-but-one disabled + // layers because that way we control scaling. + bool separate_encoders_needed = + !encoder_context->encoder().GetEncoderInfo().supports_simulcast || + active_streams_count == 1; // Singlecast or simulcast with simulcast-capable underlaying encoder. - if (total_streams_count_ == 1 || - encoder_context->encoder().GetEncoderInfo().supports_simulcast) { + if (total_streams_count_ == 1 || !separate_encoders_needed) { int ret = encoder_context->encoder().InitEncode(&codec_, settings); if (ret >= 0) { - int active_streams_count = CountActiveStreams(*inst); stream_contexts_.emplace_back( /*parent=*/nullptr, std::move(encoder_context), /*framerate_controller=*/nullptr, /*stream_idx=*/0, codec_.width, @@ -687,7 +695,7 @@ void SimulcastEncoderAdapter::DestroyStoredEncoders() { std::unique_ptr SimulcastEncoderAdapter::FetchOrCreateEncoderContext( - bool is_lowest_quality_stream) { + bool is_lowest_quality_stream) const { bool prefer_temporal_support = fallback_encoder_factory_ != nullptr && is_lowest_quality_stream && prefer_temporal_support_on_base_layer_; @@ -737,7 +745,8 @@ SimulcastEncoderAdapter::FetchOrCreateEncoderContext( return nullptr; } encoder_context = std::make_unique( - std::move(encoder), prefer_temporal_support); + std::move(encoder), prefer_temporal_support, primary_info, + fallback_info); } encoder_context->encoder().RegisterEncodeCompleteCallback( encoded_complete_callback_); @@ -806,9 +815,11 @@ webrtc::VideoCodec SimulcastEncoderAdapter::MakeStreamCodec( void SimulcastEncoderAdapter::OverrideFromFieldTrial( VideoEncoder::EncoderInfo* info) const { if (encoder_info_override_.requested_resolution_alignment()) { - info->requested_resolution_alignment = - *encoder_info_override_.requested_resolution_alignment(); + info->requested_resolution_alignment = cricket::LeastCommonMultiple( + info->requested_resolution_alignment, + *encoder_info_override_.requested_resolution_alignment()); info->apply_alignment_to_all_simulcast_layers = + info->apply_alignment_to_all_simulcast_layers || encoder_info_override_.apply_alignment_to_all_simulcast_layers(); } if (!encoder_info_override_.resolution_bitrate_limits().empty()) { @@ -832,7 +843,34 @@ VideoEncoder::EncoderInfo SimulcastEncoderAdapter::GetEncoderInfo() const { encoder_info.apply_alignment_to_all_simulcast_layers = false; encoder_info.supports_native_handle = true; encoder_info.scaling_settings.thresholds = absl::nullopt; + if (stream_contexts_.empty()) { + // GetEncoderInfo queried before InitEncode. Only alignment info is needed + // to be filled. + // Create one encoder and query it. + + std::unique_ptr encoder_context = + FetchOrCreateEncoderContext(true); + + const VideoEncoder::EncoderInfo& primary_info = + encoder_context->PrimaryInfo(); + const VideoEncoder::EncoderInfo& fallback_info = + encoder_context->FallbackInfo(); + + encoder_info.requested_resolution_alignment = cricket::LeastCommonMultiple( + primary_info.requested_resolution_alignment, + fallback_info.requested_resolution_alignment); + + encoder_info.apply_alignment_to_all_simulcast_layers = + primary_info.apply_alignment_to_all_simulcast_layers || + fallback_info.apply_alignment_to_all_simulcast_layers; + + if (!primary_info.supports_simulcast || !fallback_info.supports_simulcast) { + encoder_info.apply_alignment_to_all_simulcast_layers = true; + } + + cached_encoder_contexts_.emplace_back(std::move(encoder_context)); + OverrideFromFieldTrial(&encoder_info); return encoder_info; } @@ -842,7 +880,6 @@ VideoEncoder::EncoderInfo SimulcastEncoderAdapter::GetEncoderInfo() const { for (size_t i = 0; i < stream_contexts_.size(); ++i) { VideoEncoder::EncoderInfo encoder_impl_info = stream_contexts_[i].encoder().GetEncoderInfo(); - if (i == 0) { // Encoder name indicates names of all sub-encoders. encoder_info.implementation_name += " ("; @@ -881,7 +918,12 @@ VideoEncoder::EncoderInfo SimulcastEncoderAdapter::GetEncoderInfo() const { encoder_info.requested_resolution_alignment = cricket::LeastCommonMultiple( encoder_info.requested_resolution_alignment, encoder_impl_info.requested_resolution_alignment); - if (encoder_impl_info.apply_alignment_to_all_simulcast_layers) { + // request alignment on all layers if any of the encoders may need it, or + // if any non-top layer encoder requests a non-trivial alignment. + if (encoder_impl_info.apply_alignment_to_all_simulcast_layers || + (encoder_impl_info.requested_resolution_alignment > 1 && + (codec_.simulcastStream[i].height < codec_.height || + codec_.simulcastStream[i].width < codec_.width))) { encoder_info.apply_alignment_to_all_simulcast_layers = true; } } diff --git a/media/engine/simulcast_encoder_adapter.h b/media/engine/simulcast_encoder_adapter.h index 2cb29edfd6..07e3ccd024 100644 --- a/media/engine/simulcast_encoder_adapter.h +++ b/media/engine/simulcast_encoder_adapter.h @@ -71,16 +71,24 @@ class RTC_EXPORT SimulcastEncoderAdapter : public VideoEncoder { class EncoderContext { public: EncoderContext(std::unique_ptr encoder, - bool prefer_temporal_support); + bool prefer_temporal_support, + VideoEncoder::EncoderInfo primary_info, + VideoEncoder::EncoderInfo fallback_info); EncoderContext& operator=(EncoderContext&&) = delete; VideoEncoder& encoder() { return *encoder_; } bool prefer_temporal_support() { return prefer_temporal_support_; } void Release(); + const VideoEncoder::EncoderInfo& PrimaryInfo() { return primary_info_; } + + const VideoEncoder::EncoderInfo& FallbackInfo() { return fallback_info_; } + private: std::unique_ptr encoder_; bool prefer_temporal_support_; + const VideoEncoder::EncoderInfo primary_info_; + const VideoEncoder::EncoderInfo fallback_info_; }; class StreamContext : public EncodedImageCallback { @@ -138,8 +146,11 @@ class RTC_EXPORT SimulcastEncoderAdapter : public VideoEncoder { void DestroyStoredEncoders(); + // This method creates encoder. May reuse previously created encoders from + // |cached_encoder_contexts_|. It's const because it's used from + // const GetEncoderInfo(). std::unique_ptr FetchOrCreateEncoderContext( - bool is_lowest_quality_stream); + bool is_lowest_quality_stream) const; webrtc::VideoCodec MakeStreamCodec(const webrtc::VideoCodec& codec, int stream_idx, @@ -169,9 +180,11 @@ class RTC_EXPORT SimulcastEncoderAdapter : public VideoEncoder { // Used for checking the single-threaded access of the encoder interface. RTC_NO_UNIQUE_ADDRESS SequenceChecker encoder_queue_; - // Store encoders in between calls to Release and InitEncode, so they don't - // have to be recreated. Remaining encoders are destroyed by the destructor. - std::list> cached_encoder_contexts_; + // Store previously created and released encoders , so they don't have to be + // recreated. Remaining encoders are destroyed by the destructor. + // Marked as |mutable| becuase we may need to temporarily create encoder in + // GetEncoderInfo(), which is const. + mutable std::list> cached_encoder_contexts_; const absl::optional experimental_boosted_screenshare_qp_; const bool boost_base_layer_quality_; diff --git a/media/engine/simulcast_encoder_adapter_unittest.cc b/media/engine/simulcast_encoder_adapter_unittest.cc index 65c2b5dbcb..e13cbea8a4 100644 --- a/media/engine/simulcast_encoder_adapter_unittest.cc +++ b/media/engine/simulcast_encoder_adapter_unittest.cc @@ -908,8 +908,6 @@ TEST_F(TestSimulcastEncoderAdapterFake, SetRatesUnderMinBitrate) { } TEST_F(TestSimulcastEncoderAdapterFake, SupportsImplementationName) { - EXPECT_EQ("SimulcastEncoderAdapter", - adapter_->GetEncoderInfo().implementation_name); SimulcastTestFixtureImpl::DefaultSettings( &codec_, static_cast(kTestTemporalLayerProfile), kVideoCodecVP8); @@ -918,6 +916,8 @@ TEST_F(TestSimulcastEncoderAdapterFake, SupportsImplementationName) { encoder_names.push_back("codec2"); encoder_names.push_back("codec3"); helper_->factory()->SetEncoderNames(encoder_names); + EXPECT_EQ("SimulcastEncoderAdapter", + adapter_->GetEncoderInfo().implementation_name); EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings)); EXPECT_EQ("SimulcastEncoderAdapter (codec1, codec2, codec3)", adapter_->GetEncoderInfo().implementation_name); diff --git a/media/engine/webrtc_video_engine.cc b/media/engine/webrtc_video_engine.cc index 0bf4f20639..38a210ee7d 100644 --- a/media/engine/webrtc_video_engine.cc +++ b/media/engine/webrtc_video_engine.cc @@ -47,6 +47,7 @@ namespace cricket { namespace { const int kMinLayerSize = 16; +constexpr int64_t kUnsignaledSsrcCooldownMs = rtc::kNumMillisecsPerSec / 2; const char* StreamTypeToString( webrtc::VideoSendStream::StreamStats::StreamType type) { @@ -105,10 +106,10 @@ void AddDefaultFeedbackParams(VideoCodec* codec, } } -// This function will assign dynamic payload types (in the range [96, 127]) to -// the input codecs, and also add ULPFEC, RED, FlexFEC, and associated RTX -// codecs for recognized codecs (VP8, VP9, H264, and RED). It will also add -// default feedback params to the codecs. +// This function will assign dynamic payload types (in the range [96, 127] +// and then [35, 63]) to the input codecs, and also add ULPFEC, RED, FlexFEC, +// and associated RTX codecs for recognized codecs (VP8, VP9, H264, and RED). +// It will also add default feedback params to the codecs. // is_decoder_factory is needed to keep track of the implict assumption that any // H264 decoder also supports constrained base line profile. // Also, is_decoder_factory is used to decide whether FlexFEC video format @@ -133,16 +134,6 @@ std::vector GetPayloadTypesAndDefaultCodecs( if (supported_formats.empty()) return std::vector(); - // Due to interoperability issues with old Chrome/WebRTC versions only use - // the lower range for new codecs. - static const int kFirstDynamicPayloadTypeLowerRange = 35; - static const int kLastDynamicPayloadTypeLowerRange = 65; - - static const int kFirstDynamicPayloadTypeUpperRange = 96; - static const int kLastDynamicPayloadTypeUpperRange = 127; - int payload_type_upper = kFirstDynamicPayloadTypeUpperRange; - int payload_type_lower = kFirstDynamicPayloadTypeLowerRange; - supported_formats.push_back(webrtc::SdpVideoFormat(kRedCodecName)); supported_formats.push_back(webrtc::SdpVideoFormat(kUlpfecCodecName)); @@ -162,60 +153,65 @@ std::vector GetPayloadTypesAndDefaultCodecs( supported_formats.push_back(flexfec_format); } + // Due to interoperability issues with old Chrome/WebRTC versions that + // ignore the [35, 63] range prefer the lower range for new codecs. + static const int kFirstDynamicPayloadTypeLowerRange = 35; + static const int kLastDynamicPayloadTypeLowerRange = 63; + + static const int kFirstDynamicPayloadTypeUpperRange = 96; + static const int kLastDynamicPayloadTypeUpperRange = 127; + int payload_type_upper = kFirstDynamicPayloadTypeUpperRange; + int payload_type_lower = kFirstDynamicPayloadTypeLowerRange; + std::vector output_codecs; for (const webrtc::SdpVideoFormat& format : supported_formats) { VideoCodec codec(format); bool isCodecValidForLowerRange = absl::EqualsIgnoreCase(codec.name, kFlexfecCodecName) || absl::EqualsIgnoreCase(codec.name, kAv1CodecName); - if (!isCodecValidForLowerRange) { - codec.id = payload_type_upper++; - } else { - codec.id = payload_type_lower++; - } - AddDefaultFeedbackParams(&codec, trials); - output_codecs.push_back(codec); + bool isFecCodec = absl::EqualsIgnoreCase(codec.name, kUlpfecCodecName) || + absl::EqualsIgnoreCase(codec.name, kFlexfecCodecName); - if (payload_type_upper > kLastDynamicPayloadTypeUpperRange) { - RTC_LOG(LS_ERROR) - << "Out of dynamic payload types [96,127], skipping the rest."; - // TODO(https://bugs.chromium.org/p/webrtc/issues/detail?id=12194): - // continue in lower range. - break; - } + // Check if we ran out of payload types. if (payload_type_lower > kLastDynamicPayloadTypeLowerRange) { // TODO(https://bugs.chromium.org/p/webrtc/issues/detail?id=12248): // return an error. - RTC_LOG(LS_ERROR) - << "Out of dynamic payload types [35,65], skipping the rest."; + RTC_LOG(LS_ERROR) << "Out of dynamic payload types [35,63] after " + "fallback from [96, 127], skipping the rest."; + RTC_DCHECK_EQ(payload_type_upper, kLastDynamicPayloadTypeUpperRange); break; } - // Add associated RTX codec for non-FEC codecs. - if (!absl::EqualsIgnoreCase(codec.name, kUlpfecCodecName) && - !absl::EqualsIgnoreCase(codec.name, kFlexfecCodecName)) { - if (!isCodecValidForLowerRange) { - output_codecs.push_back( - VideoCodec::CreateRtxCodec(payload_type_upper++, codec.id)); - } else { - output_codecs.push_back( - VideoCodec::CreateRtxCodec(payload_type_lower++, codec.id)); - } + // Lower range gets used for "new" codecs or when running out of payload + // types in the upper range. + if (isCodecValidForLowerRange || + payload_type_upper >= kLastDynamicPayloadTypeUpperRange) { + codec.id = payload_type_lower++; + } else { + codec.id = payload_type_upper++; + } + AddDefaultFeedbackParams(&codec, trials); + output_codecs.push_back(codec); - if (payload_type_upper > kLastDynamicPayloadTypeUpperRange) { - RTC_LOG(LS_ERROR) - << "Out of dynamic payload types [96,127], skipping rtx."; - // TODO(https://bugs.chromium.org/p/webrtc/issues/detail?id=12194): - // continue in lower range. - break; - } + // Add associated RTX codec for non-FEC codecs. + if (!isFecCodec) { + // Check if we ran out of payload types. if (payload_type_lower > kLastDynamicPayloadTypeLowerRange) { // TODO(https://bugs.chromium.org/p/webrtc/issues/detail?id=12248): // return an error. - RTC_LOG(LS_ERROR) - << "Out of dynamic payload types [35,65], skipping rtx."; + RTC_LOG(LS_ERROR) << "Out of dynamic payload types [35,63] after " + "fallback from [96, 127], skipping the rest."; + RTC_DCHECK_EQ(payload_type_upper, kLastDynamicPayloadTypeUpperRange); break; } + if (isCodecValidForLowerRange || + payload_type_upper >= kLastDynamicPayloadTypeUpperRange) { + output_codecs.push_back( + VideoCodec::CreateRtxCodec(payload_type_lower++, codec.id)); + } else { + output_codecs.push_back( + VideoCodec::CreateRtxCodec(payload_type_upper++, codec.id)); + } } } return output_codecs; @@ -1472,7 +1468,7 @@ bool WebRtcVideoChannel::AddRecvStream(const StreamParams& sp, for (uint32_t used_ssrc : sp.ssrcs) receive_ssrcs_.insert(used_ssrc); - webrtc::VideoReceiveStream::Config config(this); + webrtc::VideoReceiveStream::Config config(this, decoder_factory_); webrtc::FlexfecReceiveStream::Config flexfec_config(this); ConfigureReceiverRtp(&config, &flexfec_config, sp); @@ -1487,8 +1483,8 @@ bool WebRtcVideoChannel::AddRecvStream(const StreamParams& sp, config.frame_transformer = unsignaled_frame_transformer_; receive_streams_[ssrc] = new WebRtcVideoReceiveStream( - this, call_, sp, std::move(config), decoder_factory_, default_stream, - recv_codecs_, flexfec_config); + this, call_, sp, std::move(config), default_stream, recv_codecs_, + flexfec_config); return true; } @@ -1539,14 +1535,14 @@ void WebRtcVideoChannel::ConfigureReceiverRtp( // TODO(brandtr): Generalize when we add support for multistream protection. flexfec_config->payload_type = recv_flexfec_payload_type_; if (!IsDisabled(call_->trials(), "WebRTC-FlexFEC-03-Advertised") && - sp.GetFecFrSsrc(ssrc, &flexfec_config->remote_ssrc)) { + sp.GetFecFrSsrc(ssrc, &flexfec_config->rtp.remote_ssrc)) { flexfec_config->protected_media_ssrcs = {ssrc}; - flexfec_config->local_ssrc = config->rtp.local_ssrc; + flexfec_config->rtp.local_ssrc = config->rtp.local_ssrc; flexfec_config->rtcp_mode = config->rtp.rtcp_mode; // TODO(brandtr): We should be spec-compliant and set |transport_cc| here // based on the rtcp-fb for the FlexFEC codec, not the media codec. - flexfec_config->transport_cc = config->rtp.transport_cc; - flexfec_config->rtp_header_extensions = config->rtp.extensions; + flexfec_config->rtp.transport_cc = config->rtp.transport_cc; + flexfec_config->rtp.extensions = config->rtp.extensions; } } @@ -1570,6 +1566,7 @@ void WebRtcVideoChannel::ResetUnsignaledRecvStream() { RTC_DCHECK_RUN_ON(&thread_checker_); RTC_LOG(LS_INFO) << "ResetUnsignaledRecvStream."; unsignaled_stream_params_ = StreamParams(); + last_unsignalled_ssrc_creation_time_ms_ = absl::nullopt; // Delete any created default streams. This is needed to avoid SSRC collisions // in Call's RtpDemuxer, in the case that |this| has created a default video @@ -1772,7 +1769,23 @@ void WebRtcVideoChannel::OnPacketReceived(rtc::CopyOnWriteBuffer packet, if (demuxer_criteria_id_ != demuxer_criteria_completed_id_) { return; } - + // Ignore unknown ssrcs if we recently created an unsignalled receive + // stream since this shouldn't happen frequently. Getting into a state + // of creating decoders on every packet eats up processing time (e.g. + // https://crbug.com/1069603) and this cooldown prevents that. + if (last_unsignalled_ssrc_creation_time_ms_.has_value()) { + int64_t now_ms = rtc::TimeMillis(); + if (now_ms - last_unsignalled_ssrc_creation_time_ms_.value() < + kUnsignaledSsrcCooldownMs) { + // We've already created an unsignalled ssrc stream within the last + // 0.5 s, ignore with a warning. + RTC_LOG(LS_WARNING) + << "Another unsignalled ssrc packet arrived shortly after the " + << "creation of an unsignalled ssrc stream. Dropping packet."; + return; + } + } + // Let the unsignalled ssrc handler decide whether to drop or deliver. switch (unsignalled_ssrc_handler_->OnUnsignalledSsrc(this, ssrc)) { case UnsignalledSsrcHandler::kDropPacket: return; @@ -1785,6 +1798,7 @@ void WebRtcVideoChannel::OnPacketReceived(rtc::CopyOnWriteBuffer packet, webrtc::PacketReceiver::DELIVERY_OK) { RTC_LOG(LS_WARNING) << "Failed to deliver RTP packet on re-delivery."; } + last_unsignalled_ssrc_creation_time_ms_ = rtc::TimeMillis(); })); } @@ -2805,7 +2819,6 @@ WebRtcVideoChannel::WebRtcVideoReceiveStream::WebRtcVideoReceiveStream( webrtc::Call* call, const StreamParams& sp, webrtc::VideoReceiveStream::Config config, - webrtc::VideoDecoderFactory* decoder_factory, bool default_stream, const std::vector& recv_codecs, const webrtc::FlexfecReceiveStream::Config& flexfec_config) @@ -2817,10 +2830,10 @@ WebRtcVideoChannel::WebRtcVideoReceiveStream::WebRtcVideoReceiveStream( config_(std::move(config)), flexfec_config_(flexfec_config), flexfec_stream_(nullptr), - decoder_factory_(decoder_factory), sink_(NULL), first_frame_timestamp_(-1), estimated_remote_start_ntp_time_ms_(0) { + RTC_DCHECK(config_.decoder_factory); config_.renderer = this; ConfigureCodecs(recv_codecs); flexfec_config_.payload_type = flexfec_config.payload_type; @@ -2862,47 +2875,84 @@ WebRtcVideoChannel::WebRtcVideoReceiveStream::GetRtpParameters() const { return rtp_parameters; } -void WebRtcVideoChannel::WebRtcVideoReceiveStream::ConfigureCodecs( +bool WebRtcVideoChannel::WebRtcVideoReceiveStream::ConfigureCodecs( const std::vector& recv_codecs) { RTC_DCHECK(!recv_codecs.empty()); - config_.decoders.clear(); - config_.rtp.rtx_associated_payload_types.clear(); - config_.rtp.raw_payload_types.clear(); - config_.decoder_factory = decoder_factory_; + + std::map rtx_associated_payload_types; + std::set raw_payload_types; + std::vector decoders; for (const auto& recv_codec : recv_codecs) { - webrtc::SdpVideoFormat video_format(recv_codec.codec.name, - recv_codec.codec.params); - - webrtc::VideoReceiveStream::Decoder decoder; - decoder.video_format = video_format; - decoder.payload_type = recv_codec.codec.id; - decoder.video_format = - webrtc::SdpVideoFormat(recv_codec.codec.name, recv_codec.codec.params); - config_.decoders.push_back(decoder); - config_.rtp.rtx_associated_payload_types[recv_codec.rtx_payload_type] = - recv_codec.codec.id; + decoders.emplace_back( + webrtc::SdpVideoFormat(recv_codec.codec.name, recv_codec.codec.params), + recv_codec.codec.id); + rtx_associated_payload_types.insert( + {recv_codec.rtx_payload_type, recv_codec.codec.id}); if (recv_codec.codec.packetization == kPacketizationParamRaw) { - config_.rtp.raw_payload_types.insert(recv_codec.codec.id); + raw_payload_types.insert(recv_codec.codec.id); } } + bool recreate_needed = (stream_ == nullptr); + const auto& codec = recv_codecs.front(); - config_.rtp.ulpfec_payload_type = codec.ulpfec.ulpfec_payload_type; - config_.rtp.red_payload_type = codec.ulpfec.red_payload_type; + if (config_.rtp.ulpfec_payload_type != codec.ulpfec.ulpfec_payload_type) { + config_.rtp.ulpfec_payload_type = codec.ulpfec.ulpfec_payload_type; + recreate_needed = true; + } + + if (config_.rtp.red_payload_type != codec.ulpfec.red_payload_type) { + config_.rtp.red_payload_type = codec.ulpfec.red_payload_type; + recreate_needed = true; + } + + const bool has_lntf = HasLntf(codec.codec); + if (config_.rtp.lntf.enabled != has_lntf) { + config_.rtp.lntf.enabled = has_lntf; + recreate_needed = true; + } + + const int rtp_history_ms = HasNack(codec.codec) ? kNackHistoryMs : 0; + if (rtp_history_ms != config_.rtp.nack.rtp_history_ms) { + config_.rtp.nack.rtp_history_ms = rtp_history_ms; + recreate_needed = true; + } - config_.rtp.lntf.enabled = HasLntf(codec.codec); - config_.rtp.nack.rtp_history_ms = HasNack(codec.codec) ? kNackHistoryMs : 0; // The rtx-time parameter can be used to override the hardcoded default for // the NACK buffer length. if (codec.rtx_time != -1 && config_.rtp.nack.rtp_history_ms != 0) { config_.rtp.nack.rtp_history_ms = codec.rtx_time; + recreate_needed = true; } - config_.rtp.rtcp_xr.receiver_reference_time_report = HasRrtr(codec.codec); + + const bool has_rtr = HasRrtr(codec.codec); + if (has_rtr != config_.rtp.rtcp_xr.receiver_reference_time_report) { + config_.rtp.rtcp_xr.receiver_reference_time_report = has_rtr; + recreate_needed = true; + } + if (codec.ulpfec.red_rtx_payload_type != -1) { - config_.rtp - .rtx_associated_payload_types[codec.ulpfec.red_rtx_payload_type] = + rtx_associated_payload_types[codec.ulpfec.red_rtx_payload_type] = codec.ulpfec.red_payload_type; } + + if (config_.rtp.rtx_associated_payload_types != + rtx_associated_payload_types) { + rtx_associated_payload_types.swap(config_.rtp.rtx_associated_payload_types); + recreate_needed = true; + } + + if (raw_payload_types != config_.rtp.raw_payload_types) { + raw_payload_types.swap(config_.rtp.raw_payload_types); + recreate_needed = true; + } + + if (decoders != config_.decoders) { + decoders.swap(config_.decoders); + recreate_needed = true; + } + + return recreate_needed; } void WebRtcVideoChannel::WebRtcVideoReceiveStream::SetLocalSsrc( @@ -2919,7 +2969,7 @@ void WebRtcVideoChannel::WebRtcVideoReceiveStream::SetLocalSsrc( } config_.rtp.local_ssrc = local_ssrc; - flexfec_config_.local_ssrc = local_ssrc; + flexfec_config_.rtp.local_ssrc = local_ssrc; RTC_LOG(LS_INFO) << "RecreateWebRtcVideoStream (recv) because of SetLocalSsrc; local_ssrc=" << local_ssrc; @@ -2952,7 +3002,7 @@ void WebRtcVideoChannel::WebRtcVideoReceiveStream::SetFeedbackParameters( config_.rtp.rtcp_mode = rtcp_mode; // TODO(brandtr): We should be spec-compliant and set |transport_cc| here // based on the rtcp-fb for the FlexFEC codec, not the media codec. - flexfec_config_.transport_cc = config_.rtp.transport_cc; + flexfec_config_.rtp.transport_cc = config_.rtp.transport_cc; flexfec_config_.rtcp_mode = config_.rtp.rtcp_mode; RTC_LOG(LS_INFO) << "RecreateWebRtcVideoStream (recv) because of " "SetFeedbackParameters; nack=" @@ -2964,13 +3014,20 @@ void WebRtcVideoChannel::WebRtcVideoReceiveStream::SetRecvParameters( const ChangedRecvParameters& params) { bool video_needs_recreation = false; if (params.codec_settings) { - ConfigureCodecs(*params.codec_settings); - video_needs_recreation = true; + video_needs_recreation = ConfigureCodecs(*params.codec_settings); } + if (params.rtp_header_extensions) { - config_.rtp.extensions = *params.rtp_header_extensions; - flexfec_config_.rtp_header_extensions = *params.rtp_header_extensions; - video_needs_recreation = true; + if (config_.rtp.extensions != *params.rtp_header_extensions) { + config_.rtp.extensions = *params.rtp_header_extensions; + video_needs_recreation = true; + } + + if (flexfec_config_.rtp.extensions != *params.rtp_header_extensions) { + flexfec_config_.rtp.extensions = *params.rtp_header_extensions; + if (flexfec_stream_ || flexfec_config_.IsCompleteAndEnabled()) + video_needs_recreation = true; + } } if (params.flexfec_payload_type) { flexfec_config_.payload_type = *params.flexfec_payload_type; @@ -2978,7 +3035,8 @@ void WebRtcVideoChannel::WebRtcVideoReceiveStream::SetRecvParameters( // configured and instead of recreating the video stream, reconfigure the // flexfec object from within the rtp callback (soon to be on the network // thread). - video_needs_recreation = true; + if (flexfec_stream_ || flexfec_config_.IsCompleteAndEnabled()) + video_needs_recreation = true; } if (video_needs_recreation) { RecreateWebRtcVideoStream(); diff --git a/media/engine/webrtc_video_engine.h b/media/engine/webrtc_video_engine.h index e8125e12a0..a67a010ed7 100644 --- a/media/engine/webrtc_video_engine.h +++ b/media/engine/webrtc_video_engine.h @@ -436,7 +436,6 @@ class WebRtcVideoChannel : public VideoMediaChannel, webrtc::Call* call, const StreamParams& sp, webrtc::VideoReceiveStream::Config config, - webrtc::VideoDecoderFactory* decoder_factory, bool default_stream, const std::vector& recv_codecs, const webrtc::FlexfecReceiveStream::Config& flexfec_config); @@ -484,7 +483,10 @@ class WebRtcVideoChannel : public VideoMediaChannel, private: void RecreateWebRtcVideoStream(); - void ConfigureCodecs(const std::vector& recv_codecs); + // Applies a new receive codecs configration to `config_`. Returns true + // if the internal stream needs to be reconstructed, or false if no changes + // were applied. + bool ConfigureCodecs(const std::vector& recv_codecs); std::string GetCodecNameFromPayloadType(int payload_type); @@ -501,8 +503,6 @@ class WebRtcVideoChannel : public VideoMediaChannel, webrtc::FlexfecReceiveStream::Config flexfec_config_; webrtc::FlexfecReceiveStream* flexfec_stream_; - webrtc::VideoDecoderFactory* const decoder_factory_; - webrtc::Mutex sink_lock_; rtc::VideoSinkInterface* sink_ RTC_GUARDED_BY(sink_lock_); @@ -588,6 +588,8 @@ class WebRtcVideoChannel : public VideoMediaChannel, // is a risk of receiving ssrcs for other, recently added m= sections. uint32_t demuxer_criteria_id_ RTC_GUARDED_BY(thread_checker_) = 0; uint32_t demuxer_criteria_completed_id_ RTC_GUARDED_BY(thread_checker_) = 0; + absl::optional last_unsignalled_ssrc_creation_time_ms_ + RTC_GUARDED_BY(thread_checker_); std::set send_ssrcs_ RTC_GUARDED_BY(thread_checker_); std::set receive_ssrcs_ RTC_GUARDED_BY(thread_checker_); diff --git a/media/engine/webrtc_video_engine_unittest.cc b/media/engine/webrtc_video_engine_unittest.cc index 0c32f8ade0..d0745e35f5 100644 --- a/media/engine/webrtc_video_engine_unittest.cc +++ b/media/engine/webrtc_video_engine_unittest.cc @@ -51,17 +51,19 @@ #include "media/engine/fake_webrtc_video_engine.h" #include "media/engine/simulcast.h" #include "media/engine/webrtc_voice_engine.h" +#include "modules/rtp_rtcp/source/rtp_packet.h" #include "rtc_base/arraysize.h" +#include "rtc_base/event.h" #include "rtc_base/experiments/min_video_bitrate_experiment.h" #include "rtc_base/fake_clock.h" #include "rtc_base/gunit.h" #include "rtc_base/numerics/safe_conversions.h" #include "rtc_base/time_utils.h" +#include "system_wrappers/include/field_trial.h" #include "test/fake_decoder.h" #include "test/field_trial.h" #include "test/frame_forwarder.h" #include "test/gmock.h" -#include "test/rtp_header_parser.h" using ::testing::_; using ::testing::Contains; @@ -95,6 +97,7 @@ static const uint32_t kSsrcs3[] = {1, 2, 3}; static const uint32_t kRtxSsrcs1[] = {4}; static const uint32_t kFlexfecSsrc = 5; static const uint32_t kIncomingUnsignalledSsrc = 0xC0FFEE; +static const int64_t kUnsignalledReceiveStreamCooldownMs = 500; constexpr uint32_t kRtpHeaderSize = 12; @@ -1659,20 +1662,13 @@ class WebRtcVideoChannelBaseTest : public ::testing::Test { return network_interface_.NumRtpPackets(ssrc); } int NumSentSsrcs() { return network_interface_.NumSentSsrcs(); } - const rtc::CopyOnWriteBuffer* GetRtpPacket(int index) { + rtc::CopyOnWriteBuffer GetRtpPacket(int index) { return network_interface_.GetRtpPacket(index); } - static int GetPayloadType(const rtc::CopyOnWriteBuffer* p) { - webrtc::RTPHeader header; - EXPECT_TRUE(ParseRtpPacket(p, &header)); - return header.payloadType; - } - - static bool ParseRtpPacket(const rtc::CopyOnWriteBuffer* p, - webrtc::RTPHeader* header) { - std::unique_ptr parser( - webrtc::RtpHeaderParser::CreateForTest()); - return parser->Parse(p->cdata(), p->size(), header); + static int GetPayloadType(rtc::CopyOnWriteBuffer p) { + webrtc::RtpPacket header; + EXPECT_TRUE(header.Parse(std::move(p))); + return header.PayloadType(); } // Tests that we can send and receive frames. @@ -1683,8 +1679,7 @@ class WebRtcVideoChannelBaseTest : public ::testing::Test { EXPECT_EQ(0, renderer_.num_rendered_frames()); SendFrame(); EXPECT_FRAME_WAIT(1, kVideoWidth, kVideoHeight, kTimeout); - std::unique_ptr p(GetRtpPacket(0)); - EXPECT_EQ(codec.id, GetPayloadType(p.get())); + EXPECT_EQ(codec.id, GetPayloadType(GetRtpPacket(0))); } void SendReceiveManyAndGetStats(const cricket::VideoCodec& codec, @@ -1700,8 +1695,7 @@ class WebRtcVideoChannelBaseTest : public ::testing::Test { EXPECT_FRAME_WAIT(frame + i * fps, kVideoWidth, kVideoHeight, kTimeout); } } - std::unique_ptr p(GetRtpPacket(0)); - EXPECT_EQ(codec.id, GetPayloadType(p.get())); + EXPECT_EQ(codec.id, GetPayloadType(GetRtpPacket(0))); } cricket::VideoSenderInfo GetSenderStats(size_t i) { @@ -1747,6 +1741,7 @@ class WebRtcVideoChannelBaseTest : public ::testing::Test { webrtc::RtcEventLogNull event_log_; webrtc::FieldTrialBasedConfig field_trials_; + std::unique_ptr override_field_trials_; std::unique_ptr task_queue_factory_; std::unique_ptr call_; std::unique_ptr @@ -1801,8 +1796,10 @@ TEST_F(WebRtcVideoChannelBaseTest, OverridesRecvBufferSize) { // Set field trial to override the default recv buffer size, and then re-run // setup where the interface is created and configured. const int kCustomRecvBufferSize = 123456; - webrtc::test::ScopedFieldTrials field_trial( + RTC_DCHECK(!override_field_trials_); + override_field_trials_ = std::make_unique( "WebRTC-IncreasedReceivebuffers/123456/"); + ResetTest(); EXPECT_TRUE(SetOneCodec(DefaultCodec())); @@ -1817,7 +1814,8 @@ TEST_F(WebRtcVideoChannelBaseTest, OverridesRecvBufferSizeWithSuffix) { // Set field trial to override the default recv buffer size, and then re-run // setup where the interface is created and configured. const int kCustomRecvBufferSize = 123456; - webrtc::test::ScopedFieldTrials field_trial( + RTC_DCHECK(!override_field_trials_); + override_field_trials_ = std::make_unique( "WebRTC-IncreasedReceivebuffers/123456_Dogfood/"); ResetTest(); @@ -1834,18 +1832,46 @@ TEST_F(WebRtcVideoChannelBaseTest, InvalidRecvBufferSize) { // then re-run setup where the interface is created and configured. The // default value should still be used. + const char* prev_field_trials = webrtc::field_trial::GetFieldTrialString(); + + std::string field_trial_string; for (std::string group : {" ", "NotANumber", "-1", "0"}) { - std::string field_trial_string = "WebRTC-IncreasedReceivebuffers/"; - field_trial_string += group; - field_trial_string += "/"; - webrtc::test::ScopedFieldTrials field_trial(field_trial_string); - ResetTest(); + std::string trial_string = "WebRTC-IncreasedReceivebuffers/"; + trial_string += group; + trial_string += "/"; + + // Dear reader. Sorry for this... it's a bit of a mess. + // TODO(bugs.webrtc.org/12854): This test needs to be rewritten to not use + // ResetTest and changing global field trials in a loop. + TearDown(); + // This is a hack to appease tsan. Because of the way the test is written + // active state within Call, including running task queues may race with + // the test changing the global field trial variable. + // This particular hack, pauses the transport controller TQ while we + // change the field trial. + rtc::TaskQueue* tq = call_->GetTransportControllerSend()->GetWorkerQueue(); + rtc::Event waiting, resume; + tq->PostTask([&waiting, &resume]() { + waiting.Set(); + resume.Wait(rtc::Event::kForever); + }); + + waiting.Wait(rtc::Event::kForever); + field_trial_string = std::move(trial_string); + webrtc::field_trial::InitFieldTrialsFromString(field_trial_string.c_str()); + + SetUp(); + resume.Set(); + + // OK, now the test can carry on. EXPECT_TRUE(SetOneCodec(DefaultCodec())); EXPECT_TRUE(SetSend(true)); EXPECT_EQ(64 * 1024, network_interface_.sendbuf_size()); EXPECT_EQ(256 * 1024, network_interface_.recvbuf_size()); } + + webrtc::field_trial::InitFieldTrialsFromString(prev_field_trials); } // Test that stats work properly for a 1-1 call. @@ -1868,7 +1894,7 @@ TEST_F(WebRtcVideoChannelBaseTest, GetStats) { EXPECT_EQ(DefaultCodec().id, *info.senders[0].codec_payload_type); EXPECT_EQ(0, info.senders[0].firs_rcvd); EXPECT_EQ(0, info.senders[0].plis_rcvd); - EXPECT_EQ(0, info.senders[0].nacks_rcvd); + EXPECT_EQ(0u, info.senders[0].nacks_rcvd); EXPECT_EQ(kVideoWidth, info.senders[0].send_frame_width); EXPECT_EQ(kVideoHeight, info.senders[0].send_frame_height); EXPECT_GT(info.senders[0].framerate_input, 0); @@ -1892,7 +1918,7 @@ TEST_F(WebRtcVideoChannelBaseTest, GetStats) { // EXPECT_EQ(0, info.receivers[0].packets_concealed); EXPECT_EQ(0, info.receivers[0].firs_sent); EXPECT_EQ(0, info.receivers[0].plis_sent); - EXPECT_EQ(0, info.receivers[0].nacks_sent); + EXPECT_EQ(0U, info.receivers[0].nacks_sent); EXPECT_EQ(kVideoWidth, info.receivers[0].frame_width); EXPECT_EQ(kVideoHeight, info.receivers[0].frame_height); EXPECT_GT(info.receivers[0].framerate_rcvd, 0); @@ -2034,15 +2060,14 @@ TEST_F(WebRtcVideoChannelBaseTest, SetSendSsrc) { EXPECT_TRUE(SetSend(true)); SendFrame(); EXPECT_TRUE_WAIT(NumRtpPackets() > 0, kTimeout); - webrtc::RTPHeader header; - std::unique_ptr p(GetRtpPacket(0)); - EXPECT_TRUE(ParseRtpPacket(p.get(), &header)); - EXPECT_EQ(kSsrc, header.ssrc); + webrtc::RtpPacket header; + EXPECT_TRUE(header.Parse(GetRtpPacket(0))); + EXPECT_EQ(kSsrc, header.Ssrc()); // Packets are being paced out, so these can mismatch between the first and // second call to NumRtpPackets until pending packets are paced out. - EXPECT_EQ_WAIT(NumRtpPackets(), NumRtpPackets(header.ssrc), kTimeout); - EXPECT_EQ_WAIT(NumRtpBytes(), NumRtpBytes(header.ssrc), kTimeout); + EXPECT_EQ_WAIT(NumRtpPackets(), NumRtpPackets(header.Ssrc()), kTimeout); + EXPECT_EQ_WAIT(NumRtpBytes(), NumRtpBytes(header.Ssrc()), kTimeout); EXPECT_EQ(1, NumSentSsrcs()); EXPECT_EQ(0, NumRtpPackets(kSsrc - 1)); EXPECT_EQ(0, NumRtpBytes(kSsrc - 1)); @@ -2059,14 +2084,13 @@ TEST_F(WebRtcVideoChannelBaseTest, SetSendSsrcAfterSetCodecs) { EXPECT_TRUE(SetSend(true)); EXPECT_TRUE(WaitAndSendFrame(0)); EXPECT_TRUE_WAIT(NumRtpPackets() > 0, kTimeout); - webrtc::RTPHeader header; - std::unique_ptr p(GetRtpPacket(0)); - EXPECT_TRUE(ParseRtpPacket(p.get(), &header)); - EXPECT_EQ(999u, header.ssrc); + webrtc::RtpPacket header; + EXPECT_TRUE(header.Parse(GetRtpPacket(0))); + EXPECT_EQ(999u, header.Ssrc()); // Packets are being paced out, so these can mismatch between the first and // second call to NumRtpPackets until pending packets are paced out. - EXPECT_EQ_WAIT(NumRtpPackets(), NumRtpPackets(header.ssrc), kTimeout); - EXPECT_EQ_WAIT(NumRtpBytes(), NumRtpBytes(header.ssrc), kTimeout); + EXPECT_EQ_WAIT(NumRtpPackets(), NumRtpPackets(header.Ssrc()), kTimeout); + EXPECT_EQ_WAIT(NumRtpBytes(), NumRtpBytes(header.Ssrc()), kTimeout); EXPECT_EQ(1, NumSentSsrcs()); EXPECT_EQ(0, NumRtpPackets(kSsrc)); EXPECT_EQ(0, NumRtpBytes(kSsrc)); @@ -2098,12 +2122,10 @@ TEST_F(WebRtcVideoChannelBaseTest, AddRemoveSendStreams) { SendFrame(); EXPECT_FRAME_WAIT(1, kVideoWidth, kVideoHeight, kTimeout); EXPECT_GT(NumRtpPackets(), 0); - webrtc::RTPHeader header; + webrtc::RtpPacket header; size_t last_packet = NumRtpPackets() - 1; - std::unique_ptr p( - GetRtpPacket(static_cast(last_packet))); - EXPECT_TRUE(ParseRtpPacket(p.get(), &header)); - EXPECT_EQ(kSsrc, header.ssrc); + EXPECT_TRUE(header.Parse(GetRtpPacket(static_cast(last_packet)))); + EXPECT_EQ(kSsrc, header.Ssrc()); // Remove the send stream that was added during Setup. EXPECT_TRUE(channel_->RemoveSendStream(kSsrc)); @@ -2118,9 +2140,8 @@ TEST_F(WebRtcVideoChannelBaseTest, AddRemoveSendStreams) { EXPECT_TRUE_WAIT(NumRtpPackets() > rtp_packets, kTimeout); last_packet = NumRtpPackets() - 1; - p.reset(GetRtpPacket(static_cast(last_packet))); - EXPECT_TRUE(ParseRtpPacket(p.get(), &header)); - EXPECT_EQ(789u, header.ssrc); + EXPECT_TRUE(header.Parse(GetRtpPacket(static_cast(last_packet)))); + EXPECT_EQ(789u, header.Ssrc()); } // Tests the behavior of incoming streams in a conference scenario. @@ -2148,8 +2169,7 @@ TEST_F(WebRtcVideoChannelBaseTest, SimulateConference) { EXPECT_FRAME_ON_RENDERER_WAIT(renderer2, 1, kVideoWidth, kVideoHeight, kTimeout); - std::unique_ptr p(GetRtpPacket(0)); - EXPECT_EQ(DefaultCodec().id, GetPayloadType(p.get())); + EXPECT_EQ(DefaultCodec().id, GetPayloadType(GetRtpPacket(0))); EXPECT_EQ(kVideoWidth, renderer1.width()); EXPECT_EQ(kVideoHeight, renderer1.height()); EXPECT_EQ(kVideoWidth, renderer2.width()); @@ -2546,6 +2566,16 @@ class WebRtcVideoChannelTest : public WebRtcVideoEngineTest { cricket::VideoCodec DefaultCodec() { return GetEngineCodec("VP8"); } + // After receciving and processing the packet, enough time is advanced that + // the unsignalled receive stream cooldown is no longer in effect. + void ReceivePacketAndAdvanceTime(rtc::CopyOnWriteBuffer packet, + int64_t packet_time_us) { + channel_->OnPacketReceived(packet, packet_time_us); + rtc::Thread::Current()->ProcessMessages(0); + fake_clock_.AdvanceTime( + webrtc::TimeDelta::Millis(kUnsignalledReceiveStreamCooldownMs)); + } + protected: FakeVideoSendStream* AddSendStream() { return AddSendStream(StreamParams::CreateLegacy(++last_ssrc_)); @@ -2955,7 +2985,7 @@ TEST_F(WebRtcVideoChannelTest, RecvAbsoluteSendTimeHeaderExtensions) { } TEST_F(WebRtcVideoChannelTest, FiltersExtensionsPicksTransportSeqNum) { - webrtc::test::ScopedFieldTrials override_field_trials_( + webrtc::test::ScopedFieldTrials override_field_trials( "WebRTC-FilterAbsSendTimeExtension/Enabled/"); // Enable three redundant extensions. std::vector extensions; @@ -4134,10 +4164,10 @@ TEST_F(WebRtcVideoChannelFlexfecRecvTest, SetDefaultRecvCodecsWithSsrc) { const std::vector& streams = fake_call_->GetFlexfecReceiveStreams(); ASSERT_EQ(1U, streams.size()); - const FakeFlexfecReceiveStream* stream = streams.front(); + const auto* stream = streams.front(); const webrtc::FlexfecReceiveStream::Config& config = stream->GetConfig(); EXPECT_EQ(GetEngineCodec("flexfec-03").id, config.payload_type); - EXPECT_EQ(kFlexfecSsrc, config.remote_ssrc); + EXPECT_EQ(kFlexfecSsrc, config.rtp.remote_ssrc); ASSERT_EQ(1U, config.protected_media_ssrcs.size()); EXPECT_EQ(kSsrcs1[0], config.protected_media_ssrcs[0]); @@ -4250,7 +4280,7 @@ TEST_F(WebRtcVideoChannelFlexfecRecvTest, DuplicateFlexfecCodecIsDropped) { const std::vector& streams = fake_call_->GetFlexfecReceiveStreams(); ASSERT_EQ(1U, streams.size()); - const FakeFlexfecReceiveStream* stream = streams.front(); + const auto* stream = streams.front(); const webrtc::FlexfecReceiveStream::Config& config = stream->GetConfig(); EXPECT_EQ(GetEngineCodec("flexfec-03").id, config.payload_type); } @@ -4326,7 +4356,7 @@ TEST_F(WebRtcVideoChannelFlexfecRecvTest, SetRecvCodecsWithFec) { flexfec_stream->GetConfig(); EXPECT_EQ(GetEngineCodec("flexfec-03").id, flexfec_stream_config.payload_type); - EXPECT_EQ(kFlexfecSsrc, flexfec_stream_config.remote_ssrc); + EXPECT_EQ(kFlexfecSsrc, flexfec_stream_config.rtp.remote_ssrc); ASSERT_EQ(1U, flexfec_stream_config.protected_media_ssrcs.size()); EXPECT_EQ(kSsrcs1[0], flexfec_stream_config.protected_media_ssrcs[0]); const std::vector& video_streams = @@ -4335,17 +4365,17 @@ TEST_F(WebRtcVideoChannelFlexfecRecvTest, SetRecvCodecsWithFec) { const webrtc::VideoReceiveStream::Config& video_stream_config = video_stream->GetConfig(); EXPECT_EQ(video_stream_config.rtp.local_ssrc, - flexfec_stream_config.local_ssrc); + flexfec_stream_config.rtp.local_ssrc); EXPECT_EQ(video_stream_config.rtp.rtcp_mode, flexfec_stream_config.rtcp_mode); EXPECT_EQ(video_stream_config.rtcp_send_transport, flexfec_stream_config.rtcp_send_transport); // TODO(brandtr): Update this EXPECT when we set |transport_cc| in a // spec-compliant way. EXPECT_EQ(video_stream_config.rtp.transport_cc, - flexfec_stream_config.transport_cc); + flexfec_stream_config.rtp.transport_cc); EXPECT_EQ(video_stream_config.rtp.rtcp_mode, flexfec_stream_config.rtcp_mode); EXPECT_EQ(video_stream_config.rtp.extensions, - flexfec_stream_config.rtp_header_extensions); + flexfec_stream_config.rtp.extensions); } // We should not send FlexFEC, even if we advertise it, unless the right @@ -5096,7 +5126,7 @@ TEST_F(WebRtcVideoChannelFlexfecRecvTest, SetRecvParamsWithoutFecDisablesFec) { ASSERT_EQ(1U, streams.size()); const FakeFlexfecReceiveStream* stream = streams.front(); EXPECT_EQ(GetEngineCodec("flexfec-03").id, stream->GetConfig().payload_type); - EXPECT_EQ(kFlexfecSsrc, stream->GetConfig().remote_ssrc); + EXPECT_EQ(kFlexfecSsrc, stream->rtp_config().remote_ssrc); ASSERT_EQ(1U, stream->GetConfig().protected_media_ssrcs.size()); EXPECT_EQ(kSsrcs1[0], stream->GetConfig().protected_media_ssrcs[0]); @@ -5149,7 +5179,7 @@ TEST_F(WebRtcVideoChannelFlexfecSendRecvTest, const FakeFlexfecReceiveStream* stream_with_recv_params = streams.front(); EXPECT_EQ(GetEngineCodec("flexfec-03").id, stream_with_recv_params->GetConfig().payload_type); - EXPECT_EQ(kFlexfecSsrc, stream_with_recv_params->GetConfig().remote_ssrc); + EXPECT_EQ(kFlexfecSsrc, stream_with_recv_params->GetConfig().rtp.remote_ssrc); EXPECT_EQ(1U, stream_with_recv_params->GetConfig().protected_media_ssrcs.size()); EXPECT_EQ(kSsrcs1[0], @@ -5163,7 +5193,7 @@ TEST_F(WebRtcVideoChannelFlexfecSendRecvTest, const FakeFlexfecReceiveStream* stream_with_send_params = streams.front(); EXPECT_EQ(GetEngineCodec("flexfec-03").id, stream_with_send_params->GetConfig().payload_type); - EXPECT_EQ(kFlexfecSsrc, stream_with_send_params->GetConfig().remote_ssrc); + EXPECT_EQ(kFlexfecSsrc, stream_with_send_params->GetConfig().rtp.remote_ssrc); EXPECT_EQ(1U, stream_with_send_params->GetConfig().protected_media_ssrcs.size()); EXPECT_EQ(kSsrcs1[0], @@ -5525,7 +5555,7 @@ TEST_F(WebRtcVideoChannelTest, GetAggregatedStatsReportWithoutSubStreams) { // Comes from substream only. EXPECT_EQ(sender.firs_rcvd, 0); EXPECT_EQ(sender.plis_rcvd, 0); - EXPECT_EQ(sender.nacks_rcvd, 0); + EXPECT_EQ(sender.nacks_rcvd, 0u); EXPECT_EQ(sender.send_frame_width, 0); EXPECT_EQ(sender.send_frame_height, 0); @@ -5649,9 +5679,8 @@ TEST_F(WebRtcVideoChannelTest, GetAggregatedStatsReportForSubStreams) { EXPECT_EQ( sender.plis_rcvd, static_cast(2 * substream.rtcp_packet_type_counts.pli_packets)); - EXPECT_EQ( - sender.nacks_rcvd, - static_cast(2 * substream.rtcp_packet_type_counts.nack_packets)); + EXPECT_EQ(sender.nacks_rcvd, + 2 * substream.rtcp_packet_type_counts.nack_packets); EXPECT_EQ(sender.send_frame_width, substream.width); EXPECT_EQ(sender.send_frame_height, substream.height); @@ -5770,8 +5799,7 @@ TEST_F(WebRtcVideoChannelTest, GetPerLayerStatsReportForSubStreams) { static_cast(substream.rtcp_packet_type_counts.fir_packets)); EXPECT_EQ(sender.plis_rcvd, static_cast(substream.rtcp_packet_type_counts.pli_packets)); - EXPECT_EQ(sender.nacks_rcvd, - static_cast(substream.rtcp_packet_type_counts.nack_packets)); + EXPECT_EQ(sender.nacks_rcvd, substream.rtcp_packet_type_counts.nack_packets); EXPECT_EQ(sender.send_frame_width, substream.width); EXPECT_EQ(sender.send_frame_height, substream.height); @@ -6092,15 +6120,15 @@ TEST_F(WebRtcVideoChannelTest, GetStatsTranslatesSendRtcpPacketTypesCorrectly) { cricket::VideoMediaInfo info; ASSERT_TRUE(channel_->GetStats(&info)); EXPECT_EQ(2, info.senders[0].firs_rcvd); - EXPECT_EQ(3, info.senders[0].nacks_rcvd); + EXPECT_EQ(3u, info.senders[0].nacks_rcvd); EXPECT_EQ(4, info.senders[0].plis_rcvd); EXPECT_EQ(5, info.senders[1].firs_rcvd); - EXPECT_EQ(7, info.senders[1].nacks_rcvd); + EXPECT_EQ(7u, info.senders[1].nacks_rcvd); EXPECT_EQ(9, info.senders[1].plis_rcvd); EXPECT_EQ(7, info.aggregated_senders[0].firs_rcvd); - EXPECT_EQ(10, info.aggregated_senders[0].nacks_rcvd); + EXPECT_EQ(10u, info.aggregated_senders[0].nacks_rcvd); EXPECT_EQ(13, info.aggregated_senders[0].plis_rcvd); } @@ -6118,7 +6146,7 @@ TEST_F(WebRtcVideoChannelTest, EXPECT_EQ(stats.rtcp_packet_type_counts.fir_packets, rtc::checked_cast(info.receivers[0].firs_sent)); EXPECT_EQ(stats.rtcp_packet_type_counts.nack_packets, - rtc::checked_cast(info.receivers[0].nacks_sent)); + info.receivers[0].nacks_sent); EXPECT_EQ(stats.rtcp_packet_type_counts.pli_packets, rtc::checked_cast(info.receivers[0].plis_sent)); } @@ -6278,8 +6306,7 @@ TEST_F(WebRtcVideoChannelTest, DefaultReceiveStreamReconfiguresToUseRtx) { memset(data, 0, sizeof(data)); rtc::SetBE32(&data[8], ssrcs[0]); rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); - rtc::Thread::Current()->ProcessMessages(0); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); ASSERT_EQ(1u, fake_call_->GetVideoReceiveStreams().size()) << "No default receive stream created."; @@ -6440,8 +6467,7 @@ TEST_F(WebRtcVideoChannelTest, RecvUnsignaledSsrcWithSignaledStreamId) { memset(data, 0, sizeof(data)); rtc::SetBE32(&data[8], kIncomingUnsignalledSsrc); rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); - rtc::Thread::Current()->ProcessMessages(0); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); // The stream should now be created with the appropriate sync label. EXPECT_EQ(1u, fake_call_->GetVideoReceiveStreams().size()); @@ -6456,16 +6482,14 @@ TEST_F(WebRtcVideoChannelTest, RecvUnsignaledSsrcWithSignaledStreamId) { // Until the demuxer criteria has been updated, we ignore in-flight ssrcs of // the recently removed unsignaled receive stream. - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); - rtc::Thread::Current()->ProcessMessages(0); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); EXPECT_EQ(0u, fake_call_->GetVideoReceiveStreams().size()); // After the demuxer criteria has been updated, we should proceed to create // unsignalled receive streams. This time when a default video receive stream // is created it won't have a sync_group. channel_->OnDemuxerCriteriaUpdateComplete(); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); - rtc::Thread::Current()->ProcessMessages(0); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); EXPECT_EQ(1u, fake_call_->GetVideoReceiveStreams().size()); EXPECT_TRUE( fake_call_->GetVideoReceiveStreams()[0]->GetConfig().sync_group.empty()); @@ -6482,8 +6506,7 @@ TEST_F(WebRtcVideoChannelTest, memset(data, 0, sizeof(data)); rtc::SetBE32(&data[8], kIncomingUnsignalledSsrc); rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); - rtc::Thread::Current()->ProcessMessages(0); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); // Default receive stream created. const auto& receivers1 = fake_call_->GetVideoReceiveStreams(); @@ -6533,7 +6556,7 @@ TEST_F(WebRtcVideoChannelTest, memset(data, 0, sizeof(data)); rtc::SetBE32(&data[8], kSsrc1); rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); } { // Receive a packet for kSsrc2. @@ -6542,9 +6565,8 @@ TEST_F(WebRtcVideoChannelTest, memset(data, 0, sizeof(data)); rtc::SetBE32(&data[8], kSsrc2); rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); } - rtc::Thread::Current()->ProcessMessages(0); // No unsignaled ssrc for kSsrc2 should have been created, but kSsrc1 should // arrive since it already has a stream. @@ -6566,7 +6588,7 @@ TEST_F(WebRtcVideoChannelTest, memset(data, 0, sizeof(data)); rtc::SetBE32(&data[8], kSsrc1); rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); } { // Receive a packet for kSsrc2. @@ -6575,9 +6597,8 @@ TEST_F(WebRtcVideoChannelTest, memset(data, 0, sizeof(data)); rtc::SetBE32(&data[8], kSsrc2); rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); } - rtc::Thread::Current()->ProcessMessages(0); // An unsignalled ssrc for kSsrc2 should be created and the packet counter // should increase for both ssrcs. @@ -6618,7 +6639,7 @@ TEST_F(WebRtcVideoChannelTest, memset(data, 0, sizeof(data)); rtc::SetBE32(&data[8], kSsrc1); rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); } { // Receive a packet for kSsrc2. @@ -6627,9 +6648,8 @@ TEST_F(WebRtcVideoChannelTest, memset(data, 0, sizeof(data)); rtc::SetBE32(&data[8], kSsrc2); rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); } - rtc::Thread::Current()->ProcessMessages(0); // No unsignaled ssrc for kSsrc1 should have been created, but the packet // count for kSsrc2 should increase. @@ -6650,7 +6670,7 @@ TEST_F(WebRtcVideoChannelTest, memset(data, 0, sizeof(data)); rtc::SetBE32(&data[8], kSsrc1); rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); } { // Receive a packet for kSsrc2. @@ -6659,9 +6679,8 @@ TEST_F(WebRtcVideoChannelTest, memset(data, 0, sizeof(data)); rtc::SetBE32(&data[8], kSsrc2); rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); } - rtc::Thread::Current()->ProcessMessages(0); // An unsignalled ssrc for kSsrc1 should be created and the packet counter // should increase for both ssrcs. @@ -6698,9 +6717,8 @@ TEST_F(WebRtcVideoChannelTest, MultiplePendingDemuxerCriteriaUpdates) { memset(data, 0, sizeof(data)); rtc::SetBE32(&data[8], kSsrc); rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); } - rtc::Thread::Current()->ProcessMessages(0); EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc), 1u); // Signal that the demuxer knows about the first update: the removal. @@ -6715,9 +6733,8 @@ TEST_F(WebRtcVideoChannelTest, MultiplePendingDemuxerCriteriaUpdates) { memset(data, 0, sizeof(data)); rtc::SetBE32(&data[8], kSsrc); rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); } - rtc::Thread::Current()->ProcessMessages(0); EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc), 2u); // Remove the kSsrc again while previous demuxer updates are still pending. @@ -6733,9 +6750,8 @@ TEST_F(WebRtcVideoChannelTest, MultiplePendingDemuxerCriteriaUpdates) { memset(data, 0, sizeof(data)); rtc::SetBE32(&data[8], kSsrc); rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); } - rtc::Thread::Current()->ProcessMessages(0); EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 0u); EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc), 2u); @@ -6751,9 +6767,8 @@ TEST_F(WebRtcVideoChannelTest, MultiplePendingDemuxerCriteriaUpdates) { memset(data, 0, sizeof(data)); rtc::SetBE32(&data[8], kSsrc); rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); } - rtc::Thread::Current()->ProcessMessages(0); EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 0u); EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc), 2u); @@ -6769,11 +6784,72 @@ TEST_F(WebRtcVideoChannelTest, MultiplePendingDemuxerCriteriaUpdates) { memset(data, 0, sizeof(data)); rtc::SetBE32(&data[8], kSsrc); rtc::CopyOnWriteBuffer packet(data, kDataLength); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + } + EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 1u); + EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc), 3u); +} + +TEST_F(WebRtcVideoChannelTest, UnsignalledSsrcHasACooldown) { + const uint32_t kSsrc1 = 1; + const uint32_t kSsrc2 = 2; + + // Send packets for kSsrc1, creating an unsignalled receive stream. + { + // Receive a packet for kSsrc1. + const size_t kDataLength = 12; + uint8_t data[kDataLength]; + memset(data, 0, sizeof(data)); + rtc::SetBE32(&data[8], kSsrc1); + rtc::CopyOnWriteBuffer packet(data, kDataLength); channel_->OnPacketReceived(packet, /* packet_time_us */ -1); } rtc::Thread::Current()->ProcessMessages(0); + fake_clock_.AdvanceTime( + webrtc::TimeDelta::Millis(kUnsignalledReceiveStreamCooldownMs - 1)); + + // We now have an unsignalled receive stream for kSsrc1. EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 1u); - EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc), 3u); + EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc1), 1u); + EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc2), 0u); + + { + // Receive a packet for kSsrc2. + const size_t kDataLength = 12; + uint8_t data[kDataLength]; + memset(data, 0, sizeof(data)); + rtc::SetBE32(&data[8], kSsrc2); + rtc::CopyOnWriteBuffer packet(data, kDataLength); + channel_->OnPacketReceived(packet, /* packet_time_us */ -1); + } + rtc::Thread::Current()->ProcessMessages(0); + + // Not enough time has passed to replace the unsignalled receive stream, so + // the kSsrc2 should be ignored. + EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 1u); + EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc1), 1u); + EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc2), 0u); + + // After 500 ms, kSsrc2 should trigger a new unsignalled receive stream that + // replaces the old one. + fake_clock_.AdvanceTime(webrtc::TimeDelta::Millis(1)); + { + // Receive a packet for kSsrc2. + const size_t kDataLength = 12; + uint8_t data[kDataLength]; + memset(data, 0, sizeof(data)); + rtc::SetBE32(&data[8], kSsrc2); + rtc::CopyOnWriteBuffer packet(data, kDataLength); + channel_->OnPacketReceived(packet, /* packet_time_us */ -1); + } + rtc::Thread::Current()->ProcessMessages(0); + + // The old unsignalled receive stream was destroyed and replaced, so we still + // only have one unsignalled receive stream. But tha packet counter for kSsrc2 + // has now increased. + EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 1u); + EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc1), 1u); + EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc2), 1u); } // Test BaseMinimumPlayoutDelayMs on receive streams. @@ -6809,8 +6885,7 @@ TEST_F(WebRtcVideoChannelTest, BaseMinimumPlayoutDelayMsUnsignaledRecvStream) { memset(data, 0, sizeof(data)); rtc::SetBE32(&data[8], kIncomingUnsignalledSsrc); rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); - rtc::Thread::Current()->ProcessMessages(0); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); recv_stream = fake_call_->GetVideoReceiveStream(kIncomingUnsignalledSsrc); EXPECT_EQ(recv_stream->base_mininum_playout_delay_ms(), 200); @@ -6847,8 +6922,7 @@ void WebRtcVideoChannelTest::TestReceiveUnsignaledSsrcPacket( rtc::Set8(data, 1, payload_type); rtc::SetBE32(&data[8], kIncomingUnsignalledSsrc); rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); - rtc::Thread::Current()->ProcessMessages(0); + ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); if (expect_created_receive_stream) { EXPECT_EQ(1u, fake_call_->GetVideoReceiveStreams().size()) @@ -6928,19 +7002,14 @@ TEST_F(WebRtcVideoChannelTest, ReceiveDifferentUnsignaledSsrc) { channel_->SetDefaultSink(&renderer); // Receive VP8 packet on first SSRC. - uint8_t data[kMinRtpPacketLen]; - cricket::RtpHeader rtpHeader; - rtpHeader.payload_type = GetEngineCodec("VP8").id; - rtpHeader.seq_num = rtpHeader.timestamp = 0; - rtpHeader.ssrc = kIncomingUnsignalledSsrc + 1; - cricket::SetRtpHeader(data, sizeof(data), rtpHeader); - rtc::CopyOnWriteBuffer packet(data, sizeof(data)); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); - rtc::Thread::Current()->ProcessMessages(0); + webrtc::RtpPacket rtp_packet; + rtp_packet.SetPayloadType(GetEngineCodec("VP8").id); + rtp_packet.SetSsrc(kIncomingUnsignalledSsrc + 1); + ReceivePacketAndAdvanceTime(rtp_packet.Buffer(), /* packet_time_us */ -1); // VP8 packet should create default receive stream. ASSERT_EQ(1u, fake_call_->GetVideoReceiveStreams().size()); FakeVideoReceiveStream* recv_stream = fake_call_->GetVideoReceiveStreams()[0]; - EXPECT_EQ(rtpHeader.ssrc, recv_stream->GetConfig().rtp.remote_ssrc); + EXPECT_EQ(rtp_packet.Ssrc(), recv_stream->GetConfig().rtp.remote_ssrc); // Verify that the receive stream sinks to a renderer. webrtc::VideoFrame video_frame = webrtc::VideoFrame::Builder() @@ -6953,16 +7022,13 @@ TEST_F(WebRtcVideoChannelTest, ReceiveDifferentUnsignaledSsrc) { EXPECT_EQ(1, renderer.num_rendered_frames()); // Receive VP9 packet on second SSRC. - rtpHeader.payload_type = GetEngineCodec("VP9").id; - rtpHeader.ssrc = kIncomingUnsignalledSsrc + 2; - cricket::SetRtpHeader(data, sizeof(data), rtpHeader); - rtc::CopyOnWriteBuffer packet2(data, sizeof(data)); - channel_->OnPacketReceived(packet2, /* packet_time_us */ -1); - rtc::Thread::Current()->ProcessMessages(0); + rtp_packet.SetPayloadType(GetEngineCodec("VP9").id); + rtp_packet.SetSsrc(kIncomingUnsignalledSsrc + 2); + ReceivePacketAndAdvanceTime(rtp_packet.Buffer(), /* packet_time_us */ -1); // VP9 packet should replace the default receive SSRC. ASSERT_EQ(1u, fake_call_->GetVideoReceiveStreams().size()); recv_stream = fake_call_->GetVideoReceiveStreams()[0]; - EXPECT_EQ(rtpHeader.ssrc, recv_stream->GetConfig().rtp.remote_ssrc); + EXPECT_EQ(rtp_packet.Ssrc(), recv_stream->GetConfig().rtp.remote_ssrc); // Verify that the receive stream sinks to a renderer. webrtc::VideoFrame video_frame2 = webrtc::VideoFrame::Builder() @@ -6976,16 +7042,13 @@ TEST_F(WebRtcVideoChannelTest, ReceiveDifferentUnsignaledSsrc) { #if defined(WEBRTC_USE_H264) // Receive H264 packet on third SSRC. - rtpHeader.payload_type = 126; - rtpHeader.ssrc = kIncomingUnsignalledSsrc + 3; - cricket::SetRtpHeader(data, sizeof(data), rtpHeader); - rtc::CopyOnWriteBuffer packet3(data, sizeof(data)); - channel_->OnPacketReceived(packet3, /* packet_time_us */ -1); - rtc::Thread::Current()->ProcessMessages(0); + rtp_packet.SetPayloadType(126); + rtp_packet.SetSsrc(kIncomingUnsignalledSsrc + 3); + ReceivePacketAndAdvanceTime(rtp_packet.Buffer(), /* packet_time_us */ -1); // H264 packet should replace the default receive SSRC. ASSERT_EQ(1u, fake_call_->GetVideoReceiveStreams().size()); recv_stream = fake_call_->GetVideoReceiveStreams()[0]; - EXPECT_EQ(rtpHeader.ssrc, recv_stream->GetConfig().rtp.remote_ssrc); + EXPECT_EQ(rtp_packet.Ssrc(), recv_stream->GetConfig().rtp.remote_ssrc); // Verify that the receive stream sinks to a renderer. webrtc::VideoFrame video_frame3 = webrtc::VideoFrame::Builder() @@ -7013,15 +7076,10 @@ TEST_F(WebRtcVideoChannelTest, EXPECT_EQ(0u, fake_call_->GetVideoReceiveStreams().size()); // Receive packet on an unsignaled SSRC. - uint8_t data[kMinRtpPacketLen]; - cricket::RtpHeader rtp_header; - rtp_header.payload_type = GetEngineCodec("VP8").id; - rtp_header.seq_num = rtp_header.timestamp = 0; - rtp_header.ssrc = kSsrcs3[0]; - cricket::SetRtpHeader(data, sizeof(data), rtp_header); - rtc::CopyOnWriteBuffer packet(data, sizeof(data)); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); - rtc::Thread::Current()->ProcessMessages(0); + webrtc::RtpPacket rtp_packet; + rtp_packet.SetPayloadType(GetEngineCodec("VP8").id); + rtp_packet.SetSsrc(kSsrcs3[0]); + ReceivePacketAndAdvanceTime(rtp_packet.Buffer(), /* packet_time_us */ -1); // Default receive stream should be created. ASSERT_EQ(1u, fake_call_->GetVideoReceiveStreams().size()); FakeVideoReceiveStream* recv_stream0 = @@ -7036,11 +7094,8 @@ TEST_F(WebRtcVideoChannelTest, EXPECT_EQ(kSsrcs3[0], recv_stream0->GetConfig().rtp.remote_ssrc); // Receive packet on a different unsignaled SSRC. - rtp_header.ssrc = kSsrcs3[1]; - cricket::SetRtpHeader(data, sizeof(data), rtp_header); - packet.SetData(data, sizeof(data)); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); - rtc::Thread::Current()->ProcessMessages(0); + rtp_packet.SetSsrc(kSsrcs3[1]); + ReceivePacketAndAdvanceTime(rtp_packet.Buffer(), /* packet_time_us */ -1); // New default receive stream should be created, but old stream should remain. ASSERT_EQ(2u, fake_call_->GetVideoReceiveStreams().size()); EXPECT_EQ(recv_stream0, fake_call_->GetVideoReceiveStreams()[0]); @@ -8646,15 +8701,10 @@ TEST_F(WebRtcVideoChannelTest, EXPECT_FALSE(rtp_parameters.encodings[0].ssrc); // Receive VP8 packet. - uint8_t data[kMinRtpPacketLen]; - cricket::RtpHeader rtpHeader; - rtpHeader.payload_type = GetEngineCodec("VP8").id; - rtpHeader.seq_num = rtpHeader.timestamp = 0; - rtpHeader.ssrc = kIncomingUnsignalledSsrc; - cricket::SetRtpHeader(data, sizeof(data), rtpHeader); - rtc::CopyOnWriteBuffer packet(data, sizeof(data)); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); - rtc::Thread::Current()->ProcessMessages(0); + webrtc::RtpPacket rtp_packet; + rtp_packet.SetPayloadType(GetEngineCodec("VP8").id); + rtp_packet.SetSsrc(kIncomingUnsignalledSsrc); + ReceivePacketAndAdvanceTime(rtp_packet.Buffer(), /* packet_time_us */ -1); // The |ssrc| member should still be unset. rtp_parameters = channel_->GetDefaultRtpReceiveParameters(); diff --git a/media/engine/webrtc_voice_engine.cc b/media/engine/webrtc_voice_engine.cc index a23d9ac24c..aa80c8724a 100644 --- a/media/engine/webrtc_voice_engine.cc +++ b/media/engine/webrtc_voice_engine.cc @@ -240,6 +240,49 @@ struct AdaptivePtimeConfig { } }; +// TODO(tommi): Constructing a receive stream could be made simpler. +// Move some of this boiler plate code into the config structs themselves. +webrtc::AudioReceiveStream::Config BuildReceiveStreamConfig( + uint32_t remote_ssrc, + uint32_t local_ssrc, + bool use_transport_cc, + bool use_nack, + const std::vector& stream_ids, + const std::vector& extensions, + webrtc::Transport* rtcp_send_transport, + const rtc::scoped_refptr& decoder_factory, + const std::map& decoder_map, + absl::optional codec_pair_id, + size_t jitter_buffer_max_packets, + bool jitter_buffer_fast_accelerate, + int jitter_buffer_min_delay_ms, + bool jitter_buffer_enable_rtx_handling, + rtc::scoped_refptr frame_decryptor, + const webrtc::CryptoOptions& crypto_options, + rtc::scoped_refptr frame_transformer) { + webrtc::AudioReceiveStream::Config config; + config.rtp.remote_ssrc = remote_ssrc; + config.rtp.local_ssrc = local_ssrc; + config.rtp.transport_cc = use_transport_cc; + config.rtp.nack.rtp_history_ms = use_nack ? kNackRtpHistoryMs : 0; + if (!stream_ids.empty()) { + config.sync_group = stream_ids[0]; + } + config.rtp.extensions = extensions; + config.rtcp_send_transport = rtcp_send_transport; + config.decoder_factory = decoder_factory; + config.decoder_map = decoder_map; + config.codec_pair_id = codec_pair_id; + config.jitter_buffer_max_packets = jitter_buffer_max_packets; + config.jitter_buffer_fast_accelerate = jitter_buffer_fast_accelerate; + config.jitter_buffer_min_delay_ms = jitter_buffer_min_delay_ms; + config.jitter_buffer_enable_rtx_handling = jitter_buffer_enable_rtx_handling; + config.frame_decryptor = std::move(frame_decryptor); + config.crypto_options = crypto_options; + config.frame_transformer = std::move(frame_transformer); + return config; +} + } // namespace WebRtcVoiceEngine::WebRtcVoiceEngine( @@ -1161,48 +1204,11 @@ class WebRtcVoiceMediaChannel::WebRtcAudioSendStream class WebRtcVoiceMediaChannel::WebRtcAudioReceiveStream { public: - WebRtcAudioReceiveStream( - uint32_t remote_ssrc, - uint32_t local_ssrc, - bool use_transport_cc, - bool use_nack, - const std::vector& stream_ids, - const std::vector& extensions, - webrtc::Call* call, - webrtc::Transport* rtcp_send_transport, - const rtc::scoped_refptr& decoder_factory, - const std::map& decoder_map, - absl::optional codec_pair_id, - size_t jitter_buffer_max_packets, - bool jitter_buffer_fast_accelerate, - int jitter_buffer_min_delay_ms, - bool jitter_buffer_enable_rtx_handling, - rtc::scoped_refptr frame_decryptor, - const webrtc::CryptoOptions& crypto_options, - rtc::scoped_refptr frame_transformer) - : call_(call), config_() { + WebRtcAudioReceiveStream(webrtc::AudioReceiveStream::Config config, + webrtc::Call* call) + : call_(call), stream_(call_->CreateAudioReceiveStream(config)) { RTC_DCHECK(call); - config_.rtp.remote_ssrc = remote_ssrc; - config_.rtp.local_ssrc = local_ssrc; - config_.rtp.transport_cc = use_transport_cc; - config_.rtp.nack.rtp_history_ms = use_nack ? kNackRtpHistoryMs : 0; - config_.rtp.extensions = extensions; - config_.rtcp_send_transport = rtcp_send_transport; - config_.jitter_buffer_max_packets = jitter_buffer_max_packets; - config_.jitter_buffer_fast_accelerate = jitter_buffer_fast_accelerate; - config_.jitter_buffer_min_delay_ms = jitter_buffer_min_delay_ms; - config_.jitter_buffer_enable_rtx_handling = - jitter_buffer_enable_rtx_handling; - if (!stream_ids.empty()) { - config_.sync_group = stream_ids[0]; - } - config_.decoder_factory = decoder_factory; - config_.decoder_map = decoder_map; - config_.codec_pair_id = codec_pair_id; - config_.frame_decryptor = frame_decryptor; - config_.crypto_options = crypto_options; - config_.frame_transformer = std::move(frame_transformer); - RecreateAudioReceiveStream(); + RTC_DCHECK(stream_); } WebRtcAudioReceiveStream() = delete; @@ -1214,63 +1220,37 @@ class WebRtcVoiceMediaChannel::WebRtcAudioReceiveStream { call_->DestroyAudioReceiveStream(stream_); } - void SetFrameDecryptor( - rtc::scoped_refptr frame_decryptor) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - config_.frame_decryptor = frame_decryptor; - RecreateAudioReceiveStream(); + webrtc::AudioReceiveStream& stream() { + RTC_DCHECK(stream_); + return *stream_; } - void SetLocalSsrc(uint32_t local_ssrc) { + void SetFrameDecryptor( + rtc::scoped_refptr frame_decryptor) { RTC_DCHECK_RUN_ON(&worker_thread_checker_); - if (local_ssrc != config_.rtp.local_ssrc) { - config_.rtp.local_ssrc = local_ssrc; - RecreateAudioReceiveStream(); - } + stream_->SetFrameDecryptor(std::move(frame_decryptor)); } - void SetUseTransportCcAndRecreateStream(bool use_transport_cc, - bool use_nack) { + void SetUseTransportCc(bool use_transport_cc, bool use_nack) { RTC_DCHECK_RUN_ON(&worker_thread_checker_); - config_.rtp.transport_cc = use_transport_cc; - config_.rtp.nack.rtp_history_ms = use_nack ? kNackRtpHistoryMs : 0; - ReconfigureAudioReceiveStream(); + stream_->SetUseTransportCcAndNackHistory(use_transport_cc, + use_nack ? kNackRtpHistoryMs : 0); } - void SetRtpExtensionsAndRecreateStream( - const std::vector& extensions) { + void SetRtpExtensions(const std::vector& extensions) { RTC_DCHECK_RUN_ON(&worker_thread_checker_); - config_.rtp.extensions = extensions; - RecreateAudioReceiveStream(); + stream_->SetRtpExtensions(extensions); } // Set a new payload type -> decoder map. void SetDecoderMap(const std::map& decoder_map) { RTC_DCHECK_RUN_ON(&worker_thread_checker_); - config_.decoder_map = decoder_map; - ReconfigureAudioReceiveStream(); - } - - void MaybeRecreateAudioReceiveStream( - const std::vector& stream_ids) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - std::string sync_group; - if (!stream_ids.empty()) { - sync_group = stream_ids[0]; - } - if (config_.sync_group != sync_group) { - RTC_LOG(LS_INFO) << "Recreating AudioReceiveStream for SSRC=" - << config_.rtp.remote_ssrc - << " because of sync group change."; - config_.sync_group = sync_group; - RecreateAudioReceiveStream(); - } + stream_->SetDecoderMap(decoder_map); } webrtc::AudioReceiveStream::Stats GetStats( bool get_and_clear_legacy_stats) const { RTC_DCHECK_RUN_ON(&worker_thread_checker_); - RTC_DCHECK(stream_); return stream_->GetStats(get_and_clear_legacy_stats); } @@ -1284,13 +1264,11 @@ class WebRtcVoiceMediaChannel::WebRtcAudioReceiveStream { void SetOutputVolume(double volume) { RTC_DCHECK_RUN_ON(&worker_thread_checker_); - output_volume_ = volume; stream_->SetGain(volume); } void SetPlayout(bool playout) { RTC_DCHECK_RUN_ON(&worker_thread_checker_); - RTC_DCHECK(stream_); if (playout) { stream_->Start(); } else { @@ -1300,79 +1278,47 @@ class WebRtcVoiceMediaChannel::WebRtcAudioReceiveStream { bool SetBaseMinimumPlayoutDelayMs(int delay_ms) { RTC_DCHECK_RUN_ON(&worker_thread_checker_); - RTC_DCHECK(stream_); - if (stream_->SetBaseMinimumPlayoutDelayMs(delay_ms)) { - // Memorize only valid delay because during stream recreation it will be - // passed to the constructor and it must be valid value. - config_.jitter_buffer_min_delay_ms = delay_ms; + if (stream_->SetBaseMinimumPlayoutDelayMs(delay_ms)) return true; - } else { - RTC_LOG(LS_ERROR) << "Failed to SetBaseMinimumPlayoutDelayMs" - " on AudioReceiveStream on SSRC=" - << config_.rtp.remote_ssrc - << " with delay_ms=" << delay_ms; - return false; - } + + RTC_LOG(LS_ERROR) << "Failed to SetBaseMinimumPlayoutDelayMs" + " on AudioReceiveStream on SSRC=" + << stream_->rtp_config().remote_ssrc + << " with delay_ms=" << delay_ms; + return false; } int GetBaseMinimumPlayoutDelayMs() const { RTC_DCHECK_RUN_ON(&worker_thread_checker_); - RTC_DCHECK(stream_); return stream_->GetBaseMinimumPlayoutDelayMs(); } std::vector GetSources() { RTC_DCHECK_RUN_ON(&worker_thread_checker_); - RTC_DCHECK(stream_); return stream_->GetSources(); } webrtc::RtpParameters GetRtpParameters() const { webrtc::RtpParameters rtp_parameters; rtp_parameters.encodings.emplace_back(); - rtp_parameters.encodings[0].ssrc = config_.rtp.remote_ssrc; - rtp_parameters.header_extensions = config_.rtp.extensions; - + const auto& config = stream_->rtp_config(); + rtp_parameters.encodings[0].ssrc = config.remote_ssrc; + rtp_parameters.header_extensions = config.extensions; return rtp_parameters; } void SetDepacketizerToDecoderFrameTransformer( rtc::scoped_refptr frame_transformer) { RTC_DCHECK_RUN_ON(&worker_thread_checker_); - config_.frame_transformer = std::move(frame_transformer); - ReconfigureAudioReceiveStream(); + stream_->SetDepacketizerToDecoderFrameTransformer(frame_transformer); } private: - void RecreateAudioReceiveStream() { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - bool was_running = false; - if (stream_) { - was_running = stream_->IsRunning(); - call_->DestroyAudioReceiveStream(stream_); - } - stream_ = call_->CreateAudioReceiveStream(config_); - RTC_CHECK(stream_); - stream_->SetGain(output_volume_); - if (was_running) - SetPlayout(was_running); - stream_->SetSink(raw_audio_sink_.get()); - } - - void ReconfigureAudioReceiveStream() { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - RTC_DCHECK(stream_); - stream_->Reconfigure(config_); - } - webrtc::SequenceChecker worker_thread_checker_; webrtc::Call* call_ = nullptr; - webrtc::AudioReceiveStream::Config config_; - // The stream is owned by WebRtcAudioReceiveStream and may be reallocated if - // configuration changes. - webrtc::AudioReceiveStream* stream_ = nullptr; - float output_volume_ = 1.0; - std::unique_ptr raw_audio_sink_; + webrtc::AudioReceiveStream* const stream_ = nullptr; + std::unique_ptr raw_audio_sink_ + RTC_GUARDED_BY(worker_thread_checker_); }; WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel( @@ -1477,7 +1423,7 @@ bool WebRtcVoiceMediaChannel::SetRecvParameters( if (recv_rtp_extensions_ != filtered_extensions) { recv_rtp_extensions_.swap(filtered_extensions); for (auto& it : recv_streams_) { - it.second->SetRtpExtensionsAndRecreateStream(recv_rtp_extensions_); + it.second->SetRtpExtensions(recv_rtp_extensions_); } } return true; @@ -1842,8 +1788,8 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs( recv_transport_cc_enabled_ = send_codec_spec_->transport_cc_enabled; recv_nack_enabled_ = send_codec_spec_->nack_enabled; for (auto& kv : recv_streams_) { - kv.second->SetUseTransportCcAndRecreateStream(recv_transport_cc_enabled_, - recv_nack_enabled_); + kv.second->SetUseTransportCc(recv_transport_cc_enabled_, + recv_nack_enabled_); } } @@ -1939,10 +1885,8 @@ bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) { // same SSRC in order to send receiver reports. if (send_streams_.size() == 1) { receiver_reports_ssrc_ = ssrc; - for (const auto& kv : recv_streams_) { - // TODO(solenberg): Allow applications to set the RTCP SSRC of receive - // streams instead, so we can avoid reconfiguring the streams here. - kv.second->SetLocalSsrc(ssrc); + for (auto& kv : recv_streams_) { + call_->OnLocalSsrcUpdated(kv.second->stream(), ssrc); } } @@ -1995,9 +1939,12 @@ bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) { const uint32_t ssrc = sp.first_ssrc(); // If this stream was previously received unsignaled, we promote it, possibly - // recreating the AudioReceiveStream, if stream ids have changed. + // updating the sync group if stream ids have changed. if (MaybeDeregisterUnsignaledRecvStream(ssrc)) { - recv_streams_[ssrc]->MaybeRecreateAudioReceiveStream(sp.stream_ids()); + auto stream_ids = sp.stream_ids(); + std::string sync_group = stream_ids.empty() ? std::string() : stream_ids[0]; + call_->OnUpdateSyncGroup(recv_streams_[ssrc]->stream(), + std::move(sync_group)); return true; } @@ -2007,16 +1954,18 @@ bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) { } // Create a new channel for receiving audio data. + auto config = BuildReceiveStreamConfig( + ssrc, receiver_reports_ssrc_, recv_transport_cc_enabled_, + recv_nack_enabled_, sp.stream_ids(), recv_rtp_extensions_, this, + engine()->decoder_factory_, decoder_map_, codec_pair_id_, + engine()->audio_jitter_buffer_max_packets_, + engine()->audio_jitter_buffer_fast_accelerate_, + engine()->audio_jitter_buffer_min_delay_ms_, + engine()->audio_jitter_buffer_enable_rtx_handling_, + unsignaled_frame_decryptor_, crypto_options_, nullptr); + recv_streams_.insert(std::make_pair( - ssrc, new WebRtcAudioReceiveStream( - ssrc, receiver_reports_ssrc_, recv_transport_cc_enabled_, - recv_nack_enabled_, sp.stream_ids(), recv_rtp_extensions_, - call_, this, engine()->decoder_factory_, decoder_map_, - codec_pair_id_, engine()->audio_jitter_buffer_max_packets_, - engine()->audio_jitter_buffer_fast_accelerate_, - engine()->audio_jitter_buffer_min_delay_ms_, - engine()->audio_jitter_buffer_enable_rtx_handling_, - unsignaled_frame_decryptor_, crypto_options_, nullptr))); + ssrc, new WebRtcAudioReceiveStream(std::move(config), call_))); recv_streams_[ssrc]->SetPlayout(playout_); return true; @@ -2371,6 +2320,7 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info, sinfo.retransmitted_packets_sent = stats.retransmitted_packets_sent; sinfo.packets_lost = stats.packets_lost; sinfo.fraction_lost = stats.fraction_lost; + sinfo.nacks_rcvd = stats.nacks_rcvd; sinfo.codec_name = stats.codec_name; sinfo.codec_payload_type = stats.codec_payload_type; sinfo.jitter_ms = stats.jitter_ms; @@ -2470,6 +2420,10 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info, rinfo.sender_reports_bytes_sent = stats.sender_reports_bytes_sent; rinfo.sender_reports_reports_count = stats.sender_reports_reports_count; + if (recv_nack_enabled_) { + rinfo.nacks_sent = stats.nacks_sent; + } + info->receivers.push_back(rinfo); } diff --git a/media/engine/webrtc_voice_engine_unittest.cc b/media/engine/webrtc_voice_engine_unittest.cc index ecf8c07e6a..c570b1a03a 100644 --- a/media/engine/webrtc_voice_engine_unittest.cc +++ b/media/engine/webrtc_voice_engine_unittest.cc @@ -2845,7 +2845,7 @@ TEST_P(WebRtcVoiceEngineTestFake, AddRecvStreamAfterUnsignaled_NoRecreate) { EXPECT_EQ(audio_receive_stream_id, streams.front()->id()); } -TEST_P(WebRtcVoiceEngineTestFake, AddRecvStreamAfterUnsignaled_Recreate) { +TEST_P(WebRtcVoiceEngineTestFake, AddRecvStreamAfterUnsignaled_Updates) { EXPECT_TRUE(SetupChannel()); // Spawn unsignaled stream with SSRC=1. @@ -2854,17 +2854,26 @@ TEST_P(WebRtcVoiceEngineTestFake, AddRecvStreamAfterUnsignaled_Recreate) { EXPECT_TRUE( GetRecvStream(1).VerifyLastPacket(kPcmuFrame, sizeof(kPcmuFrame))); - // Verify that the underlying stream object in Call *is* recreated when a + // Verify that the underlying stream object in Call gets updated when a // stream with SSRC=1 is added, and which has changed stream parameters. const auto& streams = call_.GetAudioReceiveStreams(); EXPECT_EQ(1u, streams.size()); + // The sync_group id should be empty. + EXPECT_TRUE(streams.front()->GetConfig().sync_group.empty()); + + const std::string new_stream_id("stream_id"); int audio_receive_stream_id = streams.front()->id(); cricket::StreamParams stream_params; stream_params.ssrcs.push_back(1); - stream_params.set_stream_ids({"stream_id"}); + stream_params.set_stream_ids({new_stream_id}); + EXPECT_TRUE(channel_->AddRecvStream(stream_params)); EXPECT_EQ(1u, streams.size()); - EXPECT_NE(audio_receive_stream_id, streams.front()->id()); + // The audio receive stream should not have been recreated. + EXPECT_EQ(audio_receive_stream_id, streams.front()->id()); + + // The sync_group id should now match with the new stream params. + EXPECT_EQ(new_stream_id, streams.front()->GetConfig().sync_group); } // Test that AddRecvStream creates new stream. diff --git a/media/sctp/dcsctp_transport.cc b/media/sctp/dcsctp_transport.cc index f0ec69a766..3b89af1ec2 100644 --- a/media/sctp/dcsctp_transport.cc +++ b/media/sctp/dcsctp_transport.cc @@ -19,9 +19,10 @@ #include "absl/types/optional.h" #include "api/array_view.h" #include "media/base/media_channel.h" +#include "net/dcsctp/public/dcsctp_socket_factory.h" #include "net/dcsctp/public/packet_observer.h" +#include "net/dcsctp/public/text_pcap_packet_observer.h" #include "net/dcsctp/public/types.h" -#include "net/dcsctp/socket/dcsctp_socket.h" #include "p2p/base/packet_transport_internal.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" @@ -73,51 +74,33 @@ absl::optional ToDataMessageType(dcsctp::PPID ppid) { return absl::nullopt; } +absl::optional ToErrorCauseCode( + dcsctp::ErrorKind error) { + switch (error) { + case dcsctp::ErrorKind::kParseFailed: + return cricket::SctpErrorCauseCode::kUnrecognizedParameters; + case dcsctp::ErrorKind::kPeerReported: + return cricket::SctpErrorCauseCode::kUserInitiatedAbort; + case dcsctp::ErrorKind::kWrongSequence: + case dcsctp::ErrorKind::kProtocolViolation: + return cricket::SctpErrorCauseCode::kProtocolViolation; + case dcsctp::ErrorKind::kResourceExhaustion: + return cricket::SctpErrorCauseCode::kOutOfResource; + case dcsctp::ErrorKind::kTooManyRetries: + case dcsctp::ErrorKind::kUnsupportedOperation: + case dcsctp::ErrorKind::kNoError: + case dcsctp::ErrorKind::kNotConnected: + // No SCTP error cause code matches those + break; + } + return absl::nullopt; +} + bool IsEmptyPPID(dcsctp::PPID ppid) { WebrtcPPID webrtc_ppid = static_cast(ppid.value()); return webrtc_ppid == WebrtcPPID::kStringEmpty || webrtc_ppid == WebrtcPPID::kBinaryEmpty; } - -// Print outs all sent and received packets to the logs, at LS_VERBOSE severity. -class TextPcapPacketObserver : public dcsctp::PacketObserver { - public: - explicit TextPcapPacketObserver(absl::string_view name) : name_(name) {} - - void OnSentPacket(dcsctp::TimeMs now, rtc::ArrayView payload) { - PrintPacket("O ", now, payload); - } - - void OnReceivedPacket(dcsctp::TimeMs now, - rtc::ArrayView payload) { - PrintPacket("I ", now, payload); - } - - private: - void PrintPacket(absl::string_view prefix, - dcsctp::TimeMs now, - rtc::ArrayView payload) { - rtc::StringBuilder s; - s << prefix; - int64_t remaining = *now % (24 * 60 * 60 * 1000); - int hours = remaining / (60 * 60 * 1000); - remaining = remaining % (60 * 60 * 1000); - int minutes = remaining / (60 * 1000); - remaining = remaining % (60 * 1000); - int seconds = remaining / 1000; - int ms = remaining % 1000; - s.AppendFormat("%02d:%02d:%02d.%03d", hours, minutes, seconds, ms); - s << " 0000"; - for (uint8_t byte : payload) { - s.AppendFormat(" %02x", byte); - } - s << " # SCTP_PACKET " << name_; - RTC_LOG(LS_VERBOSE) << s.str(); - } - - const std::string name_; -}; - } // namespace DcSctpTransport::DcSctpTransport(rtc::Thread* network_thread, @@ -174,11 +157,13 @@ bool DcSctpTransport::Start(int local_sctp_port, std::unique_ptr packet_observer; if (RTC_LOG_CHECK_LEVEL(LS_VERBOSE)) { - packet_observer = std::make_unique(debug_name_); + packet_observer = + std::make_unique(debug_name_); } - socket_ = std::make_unique( - debug_name_, *this, std::move(packet_observer), options); + dcsctp::DcSctpSocketFactory factory; + socket_ = + factory.Create(debug_name_, *this, std::move(packet_observer), options); } else { if (local_sctp_port != socket_->options().local_port || remote_sctp_port != socket_->options().remote_port) { @@ -237,11 +222,11 @@ bool DcSctpTransport::SendData(int sid, auto max_message_size = socket_->options().max_message_size; if (max_message_size > 0 && payload.size() > max_message_size) { - RTC_LOG(LS_ERROR) << debug_name_ - << "->SendData(...): " - "Trying to send packet bigger " - "than the max message size: " - << payload.size() << " vs max of " << max_message_size; + RTC_LOG(LS_WARNING) << debug_name_ + << "->SendData(...): " + "Trying to send packet bigger " + "than the max message size: " + << payload.size() << " vs max of " << max_message_size; *result = cricket::SDR_ERROR; return false; } @@ -348,9 +333,9 @@ void DcSctpTransport::SendPacket(rtc::ArrayView data) { data.size(), rtc::PacketOptions(), 0); if (result < 0) { - RTC_LOG(LS_ERROR) << debug_name_ << "->SendPacket(length=" << data.size() - << ") failed with error: " << transport_->GetError() - << "."; + RTC_LOG(LS_WARNING) << debug_name_ << "->SendPacket(length=" << data.size() + << ") failed with error: " << transport_->GetError() + << "."; } } @@ -366,7 +351,7 @@ uint32_t DcSctpTransport::GetRandomInt(uint32_t low, uint32_t high) { return random_.Rand(low, high); } -void DcSctpTransport::NotifyOutgoingMessageBufferEmpty() { +void DcSctpTransport::OnTotalBufferedAmountLow() { if (!ready_to_send_data_) { ready_to_send_data_ = true; SignalReadyToSendData(); @@ -375,10 +360,10 @@ void DcSctpTransport::NotifyOutgoingMessageBufferEmpty() { void DcSctpTransport::OnMessageReceived(dcsctp::DcSctpMessage message) { RTC_DCHECK_RUN_ON(network_thread_); - RTC_LOG(LS_INFO) << debug_name_ - << "->OnMessageReceived(sid=" << message.stream_id().value() - << ", ppid=" << message.ppid().value() - << ", length=" << message.payload().size() << ")."; + RTC_LOG(LS_VERBOSE) << debug_name_ << "->OnMessageReceived(sid=" + << message.stream_id().value() + << ", ppid=" << message.ppid().value() + << ", length=" << message.payload().size() << ")."; cricket::ReceiveDataParams receive_data_params; receive_data_params.sid = message.stream_id().value(); auto type = ToDataMessageType(message.ppid()); @@ -412,6 +397,14 @@ void DcSctpTransport::OnAborted(dcsctp::ErrorKind error, << "->OnAborted(error=" << dcsctp::ToString(error) << ", message=" << message << ")."; ready_to_send_data_ = false; + RTCError rtc_error(RTCErrorType::OPERATION_ERROR_WITH_DATA, + std::string(message)); + rtc_error.set_error_detail(RTCErrorDetailType::SCTP_FAILURE); + auto code = ToErrorCauseCode(error); + if (code.has_value()) { + rtc_error.set_sctp_cause_code(static_cast(*code)); + } + SignalClosedAbruptly(rtc_error); } void DcSctpTransport::OnConnected() { @@ -435,7 +428,7 @@ void DcSctpTransport::OnStreamsResetFailed( absl::string_view reason) { // TODO(orphis): Need a test to check for correct behavior for (auto& stream_id : outgoing_streams) { - RTC_LOG(LS_ERROR) + RTC_LOG(LS_WARNING) << debug_name_ << "->OnStreamsResetFailed(...): Outgoing stream reset failed" << ", sid=" << stream_id.value() << ", reason: " << reason << "."; @@ -490,8 +483,9 @@ void DcSctpTransport::OnTransportWritableState( RTC_DCHECK_RUN_ON(network_thread_); RTC_DCHECK_EQ(transport_, transport); - RTC_LOG(LS_INFO) << debug_name_ << "->OnTransportWritableState(), writable=" - << transport->writable(); + RTC_LOG(LS_VERBOSE) << debug_name_ + << "->OnTransportWritableState(), writable=" + << transport->writable(); MaybeConnectSocket(); } @@ -518,7 +512,7 @@ void DcSctpTransport::OnTransportReadPacket( void DcSctpTransport::OnTransportClosed( rtc::PacketTransportInternal* transport) { RTC_LOG(LS_VERBOSE) << debug_name_ << "->OnTransportClosed()."; - SignalClosedAbruptly(); + SignalClosedAbruptly({}); } void DcSctpTransport::MaybeConnectSocket() { diff --git a/media/sctp/dcsctp_transport.h b/media/sctp/dcsctp_transport.h index f154c44928..15933383b5 100644 --- a/media/sctp/dcsctp_transport.h +++ b/media/sctp/dcsctp_transport.h @@ -63,7 +63,7 @@ class DcSctpTransport : public cricket::SctpTransportInternal, std::unique_ptr CreateTimeout() override; dcsctp::TimeMs TimeMillis() override; uint32_t GetRandomInt(uint32_t low, uint32_t high) override; - void NotifyOutgoingMessageBufferEmpty() override; + void OnTotalBufferedAmountLow() override; void OnMessageReceived(dcsctp::DcSctpMessage message) override; void OnError(dcsctp::ErrorKind error, absl::string_view message) override; void OnAborted(dcsctp::ErrorKind error, absl::string_view message) override; diff --git a/media/sctp/sctp_transport_internal.h b/media/sctp/sctp_transport_internal.h index 96c35ffb93..b1327165b6 100644 --- a/media/sctp/sctp_transport_internal.h +++ b/media/sctp/sctp_transport_internal.h @@ -53,6 +53,24 @@ constexpr uint16_t kMinSctpSid = 0; // usrsctp.h) const int kSctpDefaultPort = 5000; +// Error cause codes defined at +// https://www.iana.org/assignments/sctp-parameters/sctp-parameters.xhtml#sctp-parameters-24 +enum class SctpErrorCauseCode : uint16_t { + kInvalidStreamIdentifier = 1, + kMissingMandatoryParameter = 2, + kStaleCookieError = 3, + kOutOfResource = 4, + kUnresolvableAddress = 5, + kUnrecognizedChunkType = 6, + kInvalidMandatoryParameter = 7, + kUnrecognizedParameters = 8, + kNoUserData = 9, + kCookieReceivedWhileShuttingDown = 10, + kRestartWithNewAddresses = 11, + kUserInitiatedAbort = 12, + kProtocolViolation = 13, +}; + // Abstract SctpTransport interface for use internally (by PeerConnection etc.). // Exists to allow mock/fake SctpTransports to be created. class SctpTransportInternal { @@ -137,8 +155,8 @@ class SctpTransportInternal { // and outgoing streams reset). sigslot::signal1 SignalClosingProcedureComplete; // Fired when the underlying DTLS transport has closed due to an error - // or an incoming DTLS disconnect. - sigslot::signal0<> SignalClosedAbruptly; + // or an incoming DTLS disconnect or SCTP transport errors. + sigslot::signal1 SignalClosedAbruptly; // Helper for debugging. virtual void set_debug_name_for_testing(const char* debug_name) = 0; diff --git a/media/sctp/usrsctp_transport.cc b/media/sctp/usrsctp_transport.cc index d43c017207..7824a72934 100644 --- a/media/sctp/usrsctp_transport.cc +++ b/media/sctp/usrsctp_transport.cc @@ -53,6 +53,7 @@ constexpr int kSctpErrorReturn = 0; #include "rtc_base/thread_annotations.h" #include "rtc_base/trace_event.h" +namespace cricket { namespace { // The biggest SCTP packet. Starting from a 'safe' wire MTU value of 1280, @@ -236,9 +237,39 @@ sctp_sendv_spa CreateSctpSendParams(int sid, } return spa; } -} // namespace -namespace cricket { +std::string SctpErrorCauseCodeToString(SctpErrorCauseCode code) { + switch (code) { + case SctpErrorCauseCode::kInvalidStreamIdentifier: + return "Invalid Stream Identifier"; + case SctpErrorCauseCode::kMissingMandatoryParameter: + return "Missing Mandatory Parameter"; + case SctpErrorCauseCode::kStaleCookieError: + return "Stale Cookie Error"; + case SctpErrorCauseCode::kOutOfResource: + return "Out of Resource"; + case SctpErrorCauseCode::kUnresolvableAddress: + return "Unresolvable Address"; + case SctpErrorCauseCode::kUnrecognizedChunkType: + return "Unrecognized Chunk Type"; + case SctpErrorCauseCode::kInvalidMandatoryParameter: + return "Invalid Mandatory Parameter"; + case SctpErrorCauseCode::kUnrecognizedParameters: + return "Unrecognized Parameters"; + case SctpErrorCauseCode::kNoUserData: + return "No User Data"; + case SctpErrorCauseCode::kCookieReceivedWhileShuttingDown: + return "Cookie Received Whilte Shutting Down"; + case SctpErrorCauseCode::kRestartWithNewAddresses: + return "Restart With New Addresses"; + case SctpErrorCauseCode::kUserInitiatedAbort: + return "User Initiated Abort"; + case SctpErrorCauseCode::kProtocolViolation: + return "Protocol Violation"; + } + return "Unknown error"; +} +} // namespace // Maps SCTP transport ID to UsrsctpTransport object, necessary in send // threshold callback and outgoing packet callback. It also provides a facility @@ -1211,7 +1242,11 @@ void UsrsctpTransport::OnPacketRead(rtc::PacketTransportInternal* transport, } void UsrsctpTransport::OnClosed(rtc::PacketTransportInternal* transport) { - SignalClosedAbruptly(); + webrtc::RTCError error = + webrtc::RTCError(webrtc::RTCErrorType::OPERATION_ERROR_WITH_DATA, + "Transport channel closed"); + error.set_error_detail(webrtc::RTCErrorDetailType::SCTP_FAILURE); + SignalClosedAbruptly(error); } void UsrsctpTransport::OnSendThresholdCallback() { @@ -1497,9 +1532,17 @@ void UsrsctpTransport::OnNotificationAssocChange( // came up, send any queued resets. SendQueuedStreamResets(); break; - case SCTP_COMM_LOST: + case SCTP_COMM_LOST: { RTC_LOG(LS_INFO) << "Association change SCTP_COMM_LOST"; + webrtc::RTCError error = webrtc::RTCError( + webrtc::RTCErrorType::OPERATION_ERROR_WITH_DATA, + SctpErrorCauseCodeToString( + static_cast(change.sac_error))); + error.set_error_detail(webrtc::RTCErrorDetailType::SCTP_FAILURE); + error.set_sctp_cause_code(change.sac_error); + SignalClosedAbruptly(error); break; + } case SCTP_RESTART: RTC_LOG(LS_INFO) << "Association change SCTP_RESTART"; break; diff --git a/modules/audio_coding/BUILD.gn b/modules/audio_coding/BUILD.gn index 445b314129..d1d17267e5 100644 --- a/modules/audio_coding/BUILD.gn +++ b/modules/audio_coding/BUILD.gn @@ -1058,6 +1058,7 @@ rtc_library("neteq_tools_minimal") { deps = [ ":default_neteq_factory", ":neteq", + "../../api:array_view", "../../api:neteq_simulator_api", "../../api:rtp_headers", "../../api/audio:audio_frame_api", @@ -1068,7 +1069,6 @@ rtc_library("neteq_tools_minimal") { "../../rtc_base:checks", "../../rtc_base:rtc_base_approved", "../../system_wrappers", - "../rtp_rtcp", "../rtp_rtcp:rtp_rtcp_format", ] absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] @@ -1606,6 +1606,7 @@ if (rtc_include_tests) { deps += [ ":isac_fix", ":webrtc_opus", + "../../rtc_base:checks", "../../rtc_base:rtc_base_approved", "../../test:test_main", "../../test:test_support", diff --git a/modules/audio_coding/OWNERS b/modules/audio_coding/OWNERS index f7a0e4797e..c27c2a8d2d 100644 --- a/modules/audio_coding/OWNERS +++ b/modules/audio_coding/OWNERS @@ -1,3 +1,4 @@ henrik.lundin@webrtc.org minyue@webrtc.org ivoc@webrtc.org +jakobi@webrtc.org diff --git a/modules/audio_coding/acm2/acm_receiver.cc b/modules/audio_coding/acm2/acm_receiver.cc index 0e615cae82..3214ce6f7b 100644 --- a/modules/audio_coding/acm2/acm_receiver.cc +++ b/modules/audio_coding/acm2/acm_receiver.cc @@ -146,20 +146,22 @@ int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame, bool* muted) { RTC_DCHECK(muted); - // Accessing members, take the lock. - MutexLock lock(&mutex_); - if (neteq_->GetAudio(audio_frame, muted) != NetEq::kOK) { + int current_sample_rate_hz = 0; + if (neteq_->GetAudio(audio_frame, muted, ¤t_sample_rate_hz) != + NetEq::kOK) { RTC_LOG(LERROR) << "AcmReceiver::GetAudio - NetEq Failed."; return -1; } - const int current_sample_rate_hz = neteq_->last_output_sample_rate_hz(); + RTC_DCHECK_NE(current_sample_rate_hz, 0); // Update if resampling is required. const bool need_resampling = (desired_freq_hz != -1) && (current_sample_rate_hz != desired_freq_hz); + // Accessing members, take the lock. + MutexLock lock(&mutex_); if (need_resampling && !resampled_last_output_frame_) { // Prime the resampler with the last frame. int16_t temp_output[AudioFrame::kMaxDataSizeSamples]; @@ -174,8 +176,8 @@ int AcmReceiver::GetAudio(int desired_freq_hz, } } - // TODO(henrik.lundin) Glitches in the output may appear if the output rate - // from NetEq changes. See WebRTC issue 3923. + // TODO(bugs.webrtc.org/3923) Glitches in the output may appear if the output + // rate from NetEq changes. if (need_resampling) { // TODO(yujo): handle this more efficiently for muted frames. int samples_per_channel_int = resampler_.Resample10Msec( diff --git a/modules/audio_coding/acm2/acm_receiver_unittest.cc b/modules/audio_coding/acm2/acm_receiver_unittest.cc index a8da77e6b6..2338a53235 100644 --- a/modules/audio_coding/acm2/acm_receiver_unittest.cc +++ b/modules/audio_coding/acm2/acm_receiver_unittest.cc @@ -119,7 +119,7 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback, rtp_header_, rtc::ArrayView(payload_data, payload_len_bytes)); if (ret_val < 0) { - assert(false); + RTC_NOTREACHED(); return -1; } rtp_header_.sequenceNumber++; diff --git a/modules/audio_coding/acm2/acm_resampler.cc b/modules/audio_coding/acm2/acm_resampler.cc index ca3583e32c..367ec2b9cd 100644 --- a/modules/audio_coding/acm2/acm_resampler.cc +++ b/modules/audio_coding/acm2/acm_resampler.cc @@ -31,7 +31,7 @@ int ACMResampler::Resample10Msec(const int16_t* in_audio, size_t in_length = in_freq_hz * num_audio_channels / 100; if (in_freq_hz == out_freq_hz) { if (out_capacity_samples < in_length) { - assert(false); + RTC_NOTREACHED(); return -1; } memcpy(out_audio, in_audio, in_length * sizeof(int16_t)); diff --git a/modules/audio_coding/acm2/acm_send_test.cc b/modules/audio_coding/acm2/acm_send_test.cc index b3e1e1ecb2..cda668dab8 100644 --- a/modules/audio_coding/acm2/acm_send_test.cc +++ b/modules/audio_coding/acm2/acm_send_test.cc @@ -51,8 +51,8 @@ AcmSendTestOldApi::AcmSendTestOldApi(InputAudioFile* audio_source, input_frame_.sample_rate_hz_ = source_rate_hz_; input_frame_.num_channels_ = 1; input_frame_.samples_per_channel_ = input_block_size_samples_; - assert(input_block_size_samples_ * input_frame_.num_channels_ <= - AudioFrame::kMaxDataSizeSamples); + RTC_DCHECK_LE(input_block_size_samples_ * input_frame_.num_channels_, + AudioFrame::kMaxDataSizeSamples); acm_->RegisterTransportCallback(this); } @@ -81,8 +81,8 @@ bool AcmSendTestOldApi::RegisterCodec(const char* payload_name, factory->MakeAudioEncoder(payload_type, format, absl::nullopt)); codec_registered_ = true; input_frame_.num_channels_ = num_channels; - assert(input_block_size_samples_ * input_frame_.num_channels_ <= - AudioFrame::kMaxDataSizeSamples); + RTC_DCHECK_LE(input_block_size_samples_ * input_frame_.num_channels_, + AudioFrame::kMaxDataSizeSamples); return codec_registered_; } @@ -90,13 +90,13 @@ void AcmSendTestOldApi::RegisterExternalCodec( std::unique_ptr external_speech_encoder) { input_frame_.num_channels_ = external_speech_encoder->NumChannels(); acm_->SetEncoder(std::move(external_speech_encoder)); - assert(input_block_size_samples_ * input_frame_.num_channels_ <= - AudioFrame::kMaxDataSizeSamples); + RTC_DCHECK_LE(input_block_size_samples_ * input_frame_.num_channels_, + AudioFrame::kMaxDataSizeSamples); codec_registered_ = true; } std::unique_ptr AcmSendTestOldApi::NextPacket() { - assert(codec_registered_); + RTC_DCHECK(codec_registered_); if (filter_.test(static_cast(payload_type_))) { // This payload type should be filtered out. Since the payload type is the // same throughout the whole test run, no packet at all will be delivered. @@ -133,15 +133,16 @@ int32_t AcmSendTestOldApi::SendData(AudioFrameType frame_type, payload_type_ = payload_type; timestamp_ = timestamp; last_payload_vec_.assign(payload_data, payload_data + payload_len_bytes); - assert(last_payload_vec_.size() == payload_len_bytes); + RTC_DCHECK_EQ(last_payload_vec_.size(), payload_len_bytes); data_to_send_ = true; return 0; } std::unique_ptr AcmSendTestOldApi::CreatePacket() { const size_t kRtpHeaderSize = 12; - size_t allocated_bytes = last_payload_vec_.size() + kRtpHeaderSize; - uint8_t* packet_memory = new uint8_t[allocated_bytes]; + rtc::CopyOnWriteBuffer packet_buffer(last_payload_vec_.size() + + kRtpHeaderSize); + uint8_t* packet_memory = packet_buffer.MutableData(); // Populate the header bytes. packet_memory[0] = 0x80; packet_memory[1] = static_cast(payload_type_); @@ -162,8 +163,8 @@ std::unique_ptr AcmSendTestOldApi::CreatePacket() { // Copy the payload data. memcpy(packet_memory + kRtpHeaderSize, &last_payload_vec_[0], last_payload_vec_.size()); - std::unique_ptr packet( - new Packet(packet_memory, allocated_bytes, clock_.TimeInMilliseconds())); + auto packet = std::make_unique(std::move(packet_buffer), + clock_.TimeInMilliseconds()); RTC_DCHECK(packet); RTC_DCHECK(packet->valid_header()); return packet; diff --git a/modules/audio_coding/acm2/audio_coding_module.cc b/modules/audio_coding/acm2/audio_coding_module.cc index 648ae6e5ea..7d0f4d1e84 100644 --- a/modules/audio_coding/acm2/audio_coding_module.cc +++ b/modules/audio_coding/acm2/audio_coding_module.cc @@ -343,13 +343,13 @@ int AudioCodingModuleImpl::Add10MsData(const AudioFrame& audio_frame) { int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame, InputData* input_data) { if (audio_frame.samples_per_channel_ == 0) { - assert(false); + RTC_NOTREACHED(); RTC_LOG(LS_ERROR) << "Cannot Add 10 ms audio, payload length is zero"; return -1; } if (audio_frame.sample_rate_hz_ > kMaxInputSampleRateHz) { - assert(false); + RTC_NOTREACHED(); RTC_LOG(LS_ERROR) << "Cannot Add 10 ms audio, input frequency not valid"; return -1; } diff --git a/modules/audio_coding/acm2/audio_coding_module_unittest.cc b/modules/audio_coding/acm2/audio_coding_module_unittest.cc index 5b0577745c..74654565e3 100644 --- a/modules/audio_coding/acm2/audio_coding_module_unittest.cc +++ b/modules/audio_coding/acm2/audio_coding_module_unittest.cc @@ -840,9 +840,12 @@ class AcmReceiverBitExactnessOldApi : public ::testing::Test { std::string win64, std::string android_arm32, std::string android_arm64, - std::string android_arm64_clang) { + std::string android_arm64_clang, + std::string mac_arm64) { #if defined(_WIN32) && defined(WEBRTC_ARCH_64_BITS) return win64; +#elif defined(WEBRTC_MAC) && defined(WEBRTC_ARCH_ARM64) + return mac_arm64; #elif defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM) return android_arm32; #elif defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64) @@ -918,7 +921,13 @@ class AcmReceiverBitExactnessOldApi : public ::testing::Test { TEST_F(AcmReceiverBitExactnessOldApi, 8kHzOutput) { std::string others_checksum_reference = GetCPUInfo(kAVX2) != 0 ? "e0c966d7b8c36ff60167988fa35d33e0" +// TODO(bugs.webrtc.org/12941): Linux x86 optimized builds have a different +// checksum. +#if defined(WEBRTC_LINUX) && defined(NDEBUG) + : "5af28619e3a3c606b2242c9a12f4f64e"; +#else : "7d8f6b84abd1e57ec010a53bc2130652"; +#endif std::string win64_checksum_reference = GetCPUInfo(kAVX2) != 0 ? "405a50f0bcb8827e20aa944299fc59f6" : "0ed5830930f5527a01bbec0ba11f8541"; @@ -926,13 +935,21 @@ TEST_F(AcmReceiverBitExactnessOldApi, 8kHzOutput) { PlatformChecksum(others_checksum_reference, win64_checksum_reference, "b892ed69c38b21b16c132ec2ce03aa7b", "4598140b5e4f7ee66c5adad609e65a3e", - "5fec8d770778ef7969ec98c56d9eb10f")); + "5fec8d770778ef7969ec98c56d9eb10f", + "636efe6d0a148f22c5383f356da3deac")); } TEST_F(AcmReceiverBitExactnessOldApi, 16kHzOutput) { std::string others_checksum_reference = GetCPUInfo(kAVX2) != 0 ? "a63c578e1195c8420f453962c6d8519c" + +// TODO(bugs.webrtc.org/12941): Linux x86 optimized builds have a different +// checksum. +#if defined(WEBRTC_LINUX) && defined(NDEBUG) + : "f788cc9200ac4a7d498d9081987808a3"; +#else : "6bac83762c1306b932cd25a560155681"; +#endif std::string win64_checksum_reference = GetCPUInfo(kAVX2) != 0 ? "58fd62a5c49ee513f9fa6fe7dbf62c97" : "0509cf0672f543efb4b050e8cffefb1d"; @@ -940,13 +957,20 @@ TEST_F(AcmReceiverBitExactnessOldApi, 16kHzOutput) { PlatformChecksum(others_checksum_reference, win64_checksum_reference, "3cea9abbeabbdea9a79719941b241af5", "f2aad418af974a3b1694d5ae5cc2c3c7", - "9d4b92c31c00e321a4cff29ad002d6a2")); + "9d4b92c31c00e321a4cff29ad002d6a2", + "1e2d1b482fdc924f79a838503ee7ead5")); } TEST_F(AcmReceiverBitExactnessOldApi, 32kHzOutput) { std::string others_checksum_reference = GetCPUInfo(kAVX2) != 0 ? "8775ce387f44dc5ff4a26da295d5ee7c" +// TODO(bugs.webrtc.org/12941): Linux x86 optimized builds have a different +// checksum. +#if defined(WEBRTC_LINUX) && defined(NDEBUG) + : "5b84b2a179cb8533a8f9bcd19612e7f0"; +#else : "e319222ca47733709f90fdf33c8574db"; +#endif std::string win64_checksum_reference = GetCPUInfo(kAVX2) != 0 ? "04ce6a1dac5ffdd8438d804623d0132f" : "39a4a7a1c455b35baeffb9fd193d7858"; @@ -954,13 +978,20 @@ TEST_F(AcmReceiverBitExactnessOldApi, 32kHzOutput) { PlatformChecksum(others_checksum_reference, win64_checksum_reference, "4df55b3b62bcbf4328786d474ae87f61", "100869c8dcde51346c2073e52a272d98", - "ff58d3153d2780a3df6bc2068844cb2d")); + "ff58d3153d2780a3df6bc2068844cb2d", + "51788e9784a10ae14a030f075a039205")); } TEST_F(AcmReceiverBitExactnessOldApi, 48kHzOutput) { std::string others_checksum_reference = GetCPUInfo(kAVX2) != 0 ? "7a55700b7ca9aa60237db58b33e55606" +// TODO(bugs.webrtc.org/12941): Linux x86 optimized builds have a different +// checksum. +#if defined(WEBRTC_LINUX) && defined(NDEBUG) + : "a2459749062f96297283cce4a8c7e6db"; +#else : "57d1d316c88279f4f3da3511665069a9"; +#endif std::string win64_checksum_reference = GetCPUInfo(kAVX2) != 0 ? "f59833d9b0924f4b0704707dd3589f80" : "74cbe7345e2b6b45c1e455a5d1e921ca"; @@ -968,7 +999,8 @@ TEST_F(AcmReceiverBitExactnessOldApi, 48kHzOutput) { PlatformChecksum(others_checksum_reference, win64_checksum_reference, "f52bc7bf0f499c9da25932fdf176c4ec", "bd44bf97e7899186532f91235cef444d", - "364d403dae55d73cd69e6dbd6b723a4d")); + "364d403dae55d73cd69e6dbd6b723a4d", + "71bc5c15a151400517c2119d1602ee9f")); } TEST_F(AcmReceiverBitExactnessOldApi, 48kHzOutputExternalDecoder) { @@ -1048,7 +1080,13 @@ TEST_F(AcmReceiverBitExactnessOldApi, 48kHzOutputExternalDecoder) { new rtc::RefCountedObject); std::string others_checksum_reference = GetCPUInfo(kAVX2) != 0 ? "7a55700b7ca9aa60237db58b33e55606" +// TODO(bugs.webrtc.org/12941): Linux x86 optimized builds have a different +// checksum. +#if defined(WEBRTC_LINUX) && defined(NDEBUG) + : "a2459749062f96297283cce4a8c7e6db"; +#else : "57d1d316c88279f4f3da3511665069a9"; +#endif std::string win64_checksum_reference = GetCPUInfo(kAVX2) != 0 ? "f59833d9b0924f4b0704707dd3589f80" : "74cbe7345e2b6b45c1e455a5d1e921ca"; @@ -1056,7 +1094,8 @@ TEST_F(AcmReceiverBitExactnessOldApi, 48kHzOutputExternalDecoder) { PlatformChecksum(others_checksum_reference, win64_checksum_reference, "f52bc7bf0f499c9da25932fdf176c4ec", "bd44bf97e7899186532f91235cef444d", - "364d403dae55d73cd69e6dbd6b723a4d"), + "364d403dae55d73cd69e6dbd6b723a4d", + "71bc5c15a151400517c2119d1602ee9f"), factory, [](AudioCodingModule* acm) { acm->SetReceiveCodecs({{0, {"MockPCMu", 8000, 1}}, {103, {"ISAC", 16000, 1}}, @@ -1277,12 +1316,14 @@ TEST_F(AcmSenderBitExactnessOldApi, IsacWb30ms) { "9336a9b993cbd8a751f0e8958e66c89c", "5c2eb46199994506236f68b2c8e51b0d", "343f1f42be0607c61e6516aece424609", + "2c9cb15d4ed55b5a0cadd04883bc73b0", "2c9cb15d4ed55b5a0cadd04883bc73b0"), AcmReceiverBitExactnessOldApi::PlatformChecksum( "3c79f16f34218271f3dca4e2b1dfe1bb", "d42cb5195463da26c8129bbfe73a22e6", "83de248aea9c3c2bd680b6952401b4ca", "3c79f16f34218271f3dca4e2b1dfe1bb", + "3c79f16f34218271f3dca4e2b1dfe1bb", "3c79f16f34218271f3dca4e2b1dfe1bb"), 33, test::AcmReceiveTestOldApi::kMonoOutput); } @@ -1294,12 +1335,14 @@ TEST_F(AcmSenderBitExactnessOldApi, IsacWb60ms) { "14d63c5f08127d280e722e3191b73bdd", "9a81e467eb1485f84aca796f8ea65011", "ef75e900e6f375e3061163c53fd09a63", + "1ad29139a04782a33daad8c2b9b35875", "1ad29139a04782a33daad8c2b9b35875"), AcmReceiverBitExactnessOldApi::PlatformChecksum( "9e0a0ab743ad987b55b8e14802769c56", "ebe04a819d3a9d83a83a17f271e1139a", "97aeef98553b5a4b5a68f8b716e8eaf0", "9e0a0ab743ad987b55b8e14802769c56", + "9e0a0ab743ad987b55b8e14802769c56", "9e0a0ab743ad987b55b8e14802769c56"), 16, test::AcmReceiveTestOldApi::kMonoOutput); } @@ -1314,13 +1357,21 @@ TEST_F(AcmSenderBitExactnessOldApi, IsacWb60ms) { TEST_F(AcmSenderBitExactnessOldApi, MAYBE_IsacSwb30ms) { ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 32000, 1, 104, 960, 960)); Run(AcmReceiverBitExactnessOldApi::PlatformChecksum( +// TODO(bugs.webrtc.org/12941): Linux x86 optimized builds have a different +// checksum. +#if defined(WEBRTC_LINUX) && defined(NDEBUG) && defined(WEBRTC_ARCH_X86) + "13d4d2a4c9e8e94a4b74a176e4bf7cc4", +#else "5683b58da0fbf2063c7adc2e6bfb3fb8", +#endif "2b3c387d06f00b7b7aad4c9be56fb83d", "android_arm32_audio", - "android_arm64_audio", "android_arm64_clang_audio"), + "android_arm64_audio", "android_arm64_clang_audio", + "5683b58da0fbf2063c7adc2e6bfb3fb8"), AcmReceiverBitExactnessOldApi::PlatformChecksum( "ce86106a93419aefb063097108ec94ab", "bcc2041e7744c7ebd9f701866856849c", "android_arm32_payload", - "android_arm64_payload", "android_arm64_clang_payload"), + "android_arm64_payload", "android_arm64_clang_payload", + "ce86106a93419aefb063097108ec94ab"), 33, test::AcmReceiveTestOldApi::kMonoOutput); } #endif @@ -1396,11 +1447,13 @@ TEST_F(AcmSenderBitExactnessOldApi, MAYBE_Ilbc_30ms) { Run(AcmReceiverBitExactnessOldApi::PlatformChecksum( "7b6ec10910debd9af08011d3ed5249f7", "7b6ec10910debd9af08011d3ed5249f7", "android_arm32_audio", - "android_arm64_audio", "android_arm64_clang_audio"), + "android_arm64_audio", "android_arm64_clang_audio", + "7b6ec10910debd9af08011d3ed5249f7"), AcmReceiverBitExactnessOldApi::PlatformChecksum( "cfae2e9f6aba96e145f2bcdd5050ce78", "cfae2e9f6aba96e145f2bcdd5050ce78", "android_arm32_payload", - "android_arm64_payload", "android_arm64_clang_payload"), + "android_arm64_payload", "android_arm64_clang_payload", + "cfae2e9f6aba96e145f2bcdd5050ce78"), 33, test::AcmReceiveTestOldApi::kMonoOutput); } #endif @@ -1415,11 +1468,13 @@ TEST_F(AcmSenderBitExactnessOldApi, MAYBE_G722_20ms) { Run(AcmReceiverBitExactnessOldApi::PlatformChecksum( "e99c89be49a46325d03c0d990c292d68", "e99c89be49a46325d03c0d990c292d68", "android_arm32_audio", - "android_arm64_audio", "android_arm64_clang_audio"), + "android_arm64_audio", "android_arm64_clang_audio", + "e99c89be49a46325d03c0d990c292d68"), AcmReceiverBitExactnessOldApi::PlatformChecksum( "fc68a87e1380614e658087cb35d5ca10", "fc68a87e1380614e658087cb35d5ca10", "android_arm32_payload", - "android_arm64_payload", "android_arm64_clang_payload"), + "android_arm64_payload", "android_arm64_clang_payload", + "fc68a87e1380614e658087cb35d5ca10"), 50, test::AcmReceiveTestOldApi::kMonoOutput); } @@ -1433,11 +1488,13 @@ TEST_F(AcmSenderBitExactnessOldApi, MAYBE_G722_stereo_20ms) { Run(AcmReceiverBitExactnessOldApi::PlatformChecksum( "e280aed283e499d37091b481ca094807", "e280aed283e499d37091b481ca094807", "android_arm32_audio", - "android_arm64_audio", "android_arm64_clang_audio"), + "android_arm64_audio", "android_arm64_clang_audio", + "e280aed283e499d37091b481ca094807"), AcmReceiverBitExactnessOldApi::PlatformChecksum( "66516152eeaa1e650ad94ff85f668dac", "66516152eeaa1e650ad94ff85f668dac", "android_arm32_payload", - "android_arm64_payload", "android_arm64_clang_payload"), + "android_arm64_payload", "android_arm64_clang_payload", + "66516152eeaa1e650ad94ff85f668dac"), 50, test::AcmReceiveTestOldApi::kStereoOutput); } @@ -1456,14 +1513,16 @@ const std::string audio_checksum = audio_maybe_sse, "6fcceb83acf427730570bc13eeac920c", "fd96f15d547c4e155daeeef4253b174e", - "fd96f15d547c4e155daeeef4253b174e"); + "fd96f15d547c4e155daeeef4253b174e", + "Mac_arm64_checksum_placeholder"); const std::string payload_checksum = AcmReceiverBitExactnessOldApi::PlatformChecksum( payload_maybe_sse, payload_maybe_sse, "4bd846d0aa5656ecd5dfd85701a1b78c", "7efbfc9f8e3b4b2933ae2d01ab919028", - "7efbfc9f8e3b4b2933ae2d01ab919028"); + "7efbfc9f8e3b4b2933ae2d01ab919028", + "Mac_arm64_checksum_placeholder"); } // namespace // TODO(http://bugs.webrtc.org/12518): Enable the test after Opus has been @@ -1523,13 +1582,13 @@ TEST_F(AcmSenderBitExactnessNewApi, DISABLED_OpusManyChannels) { "audio checksum check downstream|8051617907766bec5f4e4a4f7c6d5291", "8051617907766bec5f4e4a4f7c6d5291", "6183752a62dc1368f959eb3a8c93b846", "android arm64 audio checksum", - "48bf1f3ca0b72f3c9cdfbe79956122b1"), + "48bf1f3ca0b72f3c9cdfbe79956122b1", "Mac_arm64_checksum_placeholder"), // payload_checksum, AcmReceiverBitExactnessOldApi::PlatformChecksum( // payload checksum "payload checksum check downstream|b09c52e44b2bdd9a0809e3a5b1623a76", "b09c52e44b2bdd9a0809e3a5b1623a76", "2ea535ef60f7d0c9d89e3002d4c2124f", "android arm64 payload checksum", - "e87995a80f50a0a735a230ca8b04a67d"), + "e87995a80f50a0a735a230ca8b04a67d", "Mac_arm64_checksum_placeholder"), 50, test::AcmReceiveTestOldApi::kQuadOutput, decoder_factory); } @@ -1552,12 +1611,12 @@ TEST_F(AcmSenderBitExactnessNewApi, DISABLED_OpusFromFormat_stereo_20ms_voip) { Run(AcmReceiverBitExactnessOldApi::PlatformChecksum( audio_maybe_sse, audio_maybe_sse, "f1cefe107ffdced7694d7f735342adf3", "3b1bfe5dd8ed16ee5b04b93a5b5e7e48", - "3b1bfe5dd8ed16ee5b04b93a5b5e7e48"), + "3b1bfe5dd8ed16ee5b04b93a5b5e7e48", "Mac_arm64_checksum_placeholder"), AcmReceiverBitExactnessOldApi::PlatformChecksum( payload_maybe_sse, payload_maybe_sse, "5e79a2f51c633fe145b6c10ae198d1aa", "e730050cb304d54d853fd285ab0424fa", - "e730050cb304d54d853fd285ab0424fa"), + "e730050cb304d54d853fd285ab0424fa", "Mac_arm64_checksum_placeholder"), 50, test::AcmReceiveTestOldApi::kStereoOutput); } diff --git a/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc b/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc index 20752639fc..903ac64aff 100644 --- a/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc +++ b/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc @@ -11,6 +11,7 @@ #include "modules/audio_coding/codecs/isac/fix/include/isacfix.h" #include "modules/audio_coding/codecs/isac/fix/source/settings.h" #include "modules/audio_coding/codecs/tools/audio_codec_speed_test.h" +#include "rtc_base/checks.h" using std::string; @@ -83,7 +84,7 @@ float IsacSpeedTest::EncodeABlock(int16_t* in_data, } clocks = clock() - clocks; *encoded_bytes = static_cast(value); - assert(*encoded_bytes <= max_bytes); + RTC_DCHECK_LE(*encoded_bytes, max_bytes); return 1000.0 * clocks / CLOCKS_PER_SEC; } diff --git a/modules/audio_coding/codecs/legacy_encoded_audio_frame_unittest.cc b/modules/audio_coding/codecs/legacy_encoded_audio_frame_unittest.cc index 2ca1d4ca98..f081a5380f 100644 --- a/modules/audio_coding/codecs/legacy_encoded_audio_frame_unittest.cc +++ b/modules/audio_coding/codecs/legacy_encoded_audio_frame_unittest.cc @@ -88,7 +88,7 @@ class SplitBySamplesTest : public ::testing::TestWithParam { samples_per_ms_ = 8; break; default: - assert(false); + RTC_NOTREACHED(); break; } } diff --git a/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc b/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc index 3d5ba0b7c8..f61aacc474 100644 --- a/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc +++ b/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc @@ -10,6 +10,7 @@ #include "modules/audio_coding/codecs/tools/audio_codec_speed_test.h" +#include "rtc_base/checks.h" #include "rtc_base/format_macros.h" #include "test/gtest.h" #include "test/testsupport/file_utils.h" @@ -43,7 +44,7 @@ void AudioCodecSpeedTest::SetUp() { save_out_data_ = get<4>(GetParam()); FILE* fp = fopen(in_filename_.c_str(), "rb"); - assert(fp != NULL); + RTC_DCHECK(fp); // Obtain file size. fseek(fp, 0, SEEK_END); @@ -83,7 +84,7 @@ void AudioCodecSpeedTest::SetUp() { out_filename = test::OutputPath() + out_filename + ".pcm"; out_file_ = fopen(out_filename.c_str(), "wb"); - assert(out_file_ != NULL); + RTC_DCHECK(out_file_); printf("Output to be saved in %s.\n", out_filename.c_str()); } diff --git a/modules/audio_coding/neteq/accelerate.cc b/modules/audio_coding/neteq/accelerate.cc index 6161a8f91b..e97191d8d2 100644 --- a/modules/audio_coding/neteq/accelerate.cc +++ b/modules/audio_coding/neteq/accelerate.cc @@ -69,7 +69,7 @@ Accelerate::ReturnCodes Accelerate::CheckCriteriaAndStretch( peak_index = (fs_mult_120 / peak_index) * peak_index; } - assert(fs_mult_120 >= peak_index); // Should be handled in Process(). + RTC_DCHECK_GE(fs_mult_120, peak_index); // Should be handled in Process(). // Copy first part; 0 to 15 ms. output->PushBackInterleaved( rtc::ArrayView(input, fs_mult_120 * num_channels_)); diff --git a/modules/audio_coding/neteq/audio_decoder_unittest.cc b/modules/audio_coding/neteq/audio_decoder_unittest.cc index 662da2fdad..2277872ee4 100644 --- a/modules/audio_coding/neteq/audio_decoder_unittest.cc +++ b/modules/audio_coding/neteq/audio_decoder_unittest.cc @@ -77,9 +77,9 @@ double MseInputOutput(const std::vector& input, size_t num_samples, size_t channels, int delay) { - assert(delay < static_cast(num_samples)); - assert(num_samples <= input.size()); - assert(num_samples * channels <= output.size()); + RTC_DCHECK_LT(delay, static_cast(num_samples)); + RTC_DCHECK_LE(num_samples, input.size()); + RTC_DCHECK_LE(num_samples * channels, output.size()); if (num_samples == 0) return 0.0; double squared_sum = 0.0; @@ -303,7 +303,7 @@ class AudioDecoderPcm16BTest : public AudioDecoderTest { frame_size_ = 20 * codec_input_rate_hz_ / 1000; data_length_ = 10 * frame_size_; decoder_ = new AudioDecoderPcm16B(codec_input_rate_hz_, 1); - assert(decoder_); + RTC_DCHECK(decoder_); AudioEncoderPcm16B::Config config; config.sample_rate_hz = codec_input_rate_hz_; config.frame_size_ms = @@ -320,7 +320,7 @@ class AudioDecoderIlbcTest : public AudioDecoderTest { frame_size_ = 240; data_length_ = 10 * frame_size_; decoder_ = new AudioDecoderIlbcImpl; - assert(decoder_); + RTC_DCHECK(decoder_); AudioEncoderIlbcConfig config; config.frame_size_ms = 30; audio_encoder_.reset(new AudioEncoderIlbcImpl(config, payload_type_)); @@ -414,7 +414,7 @@ class AudioDecoderG722Test : public AudioDecoderTest { frame_size_ = 160; data_length_ = 10 * frame_size_; decoder_ = new AudioDecoderG722Impl; - assert(decoder_); + RTC_DCHECK(decoder_); AudioEncoderG722Config config; config.frame_size_ms = 10; config.num_channels = 1; @@ -430,7 +430,7 @@ class AudioDecoderG722StereoTest : public AudioDecoderTest { frame_size_ = 160; data_length_ = 10 * frame_size_; decoder_ = new AudioDecoderG722StereoImpl; - assert(decoder_); + RTC_DCHECK(decoder_); AudioEncoderG722Config config; config.frame_size_ms = 10; config.num_channels = 2; @@ -587,7 +587,9 @@ TEST_F(AudioDecoderIsacFixTest, EncodeDecode) { int delay = 54; // Delay from input to output. #if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM) static const int kEncodedBytes = 685; -#elif defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64) +#elif defined(WEBRTC_ARCH_ARM64) + static const int kEncodedBytes = 673; +#elif defined(WEBRTC_MAC) && defined(WEBRTC_ARCH_ARM64) // M1 Mac static const int kEncodedBytes = 673; #else static const int kEncodedBytes = 671; diff --git a/modules/audio_coding/neteq/audio_multi_vector.cc b/modules/audio_coding/neteq/audio_multi_vector.cc index 349d75dcdc..290d7eae22 100644 --- a/modules/audio_coding/neteq/audio_multi_vector.cc +++ b/modules/audio_coding/neteq/audio_multi_vector.cc @@ -19,7 +19,7 @@ namespace webrtc { AudioMultiVector::AudioMultiVector(size_t N) { - assert(N > 0); + RTC_DCHECK_GT(N, 0); if (N < 1) N = 1; for (size_t n = 0; n < N; ++n) { @@ -29,7 +29,7 @@ AudioMultiVector::AudioMultiVector(size_t N) { } AudioMultiVector::AudioMultiVector(size_t N, size_t initial_size) { - assert(N > 0); + RTC_DCHECK_GT(N, 0); if (N < 1) N = 1; for (size_t n = 0; n < N; ++n) { @@ -91,7 +91,7 @@ void AudioMultiVector::PushBackInterleaved( } void AudioMultiVector::PushBack(const AudioMultiVector& append_this) { - assert(num_channels_ == append_this.num_channels_); + RTC_DCHECK_EQ(num_channels_, append_this.num_channels_); if (num_channels_ == append_this.num_channels_) { for (size_t i = 0; i < num_channels_; ++i) { channels_[i]->PushBack(append_this[i]); @@ -101,10 +101,10 @@ void AudioMultiVector::PushBack(const AudioMultiVector& append_this) { void AudioMultiVector::PushBackFromIndex(const AudioMultiVector& append_this, size_t index) { - assert(index < append_this.Size()); + RTC_DCHECK_LT(index, append_this.Size()); index = std::min(index, append_this.Size() - 1); size_t length = append_this.Size() - index; - assert(num_channels_ == append_this.num_channels_); + RTC_DCHECK_EQ(num_channels_, append_this.num_channels_); if (num_channels_ == append_this.num_channels_) { for (size_t i = 0; i < num_channels_; ++i) { channels_[i]->PushBack(append_this[i], length, index); @@ -162,9 +162,9 @@ size_t AudioMultiVector::ReadInterleavedFromEnd(size_t length, void AudioMultiVector::OverwriteAt(const AudioMultiVector& insert_this, size_t length, size_t position) { - assert(num_channels_ == insert_this.num_channels_); + RTC_DCHECK_EQ(num_channels_, insert_this.num_channels_); // Cap |length| at the length of |insert_this|. - assert(length <= insert_this.Size()); + RTC_DCHECK_LE(length, insert_this.Size()); length = std::min(length, insert_this.Size()); if (num_channels_ == insert_this.num_channels_) { for (size_t i = 0; i < num_channels_; ++i) { @@ -175,7 +175,7 @@ void AudioMultiVector::OverwriteAt(const AudioMultiVector& insert_this, void AudioMultiVector::CrossFade(const AudioMultiVector& append_this, size_t fade_length) { - assert(num_channels_ == append_this.num_channels_); + RTC_DCHECK_EQ(num_channels_, append_this.num_channels_); if (num_channels_ == append_this.num_channels_) { for (size_t i = 0; i < num_channels_; ++i) { channels_[i]->CrossFade(append_this[i], fade_length); @@ -188,7 +188,7 @@ size_t AudioMultiVector::Channels() const { } size_t AudioMultiVector::Size() const { - assert(channels_[0]); + RTC_DCHECK(channels_[0]); return channels_[0]->Size(); } @@ -202,13 +202,13 @@ void AudioMultiVector::AssertSize(size_t required_size) { } bool AudioMultiVector::Empty() const { - assert(channels_[0]); + RTC_DCHECK(channels_[0]); return channels_[0]->Empty(); } void AudioMultiVector::CopyChannel(size_t from_channel, size_t to_channel) { - assert(from_channel < num_channels_); - assert(to_channel < num_channels_); + RTC_DCHECK_LT(from_channel, num_channels_); + RTC_DCHECK_LT(to_channel, num_channels_); channels_[from_channel]->CopyTo(channels_[to_channel]); } diff --git a/modules/audio_coding/neteq/audio_vector.cc b/modules/audio_coding/neteq/audio_vector.cc index b3ad48fc43..5e435e944d 100644 --- a/modules/audio_coding/neteq/audio_vector.cc +++ b/modules/audio_coding/neteq/audio_vector.cc @@ -247,8 +247,8 @@ void AudioVector::OverwriteAt(const int16_t* insert_this, void AudioVector::CrossFade(const AudioVector& append_this, size_t fade_length) { // Fade length cannot be longer than the current vector or |append_this|. - assert(fade_length <= Size()); - assert(fade_length <= append_this.Size()); + RTC_DCHECK_LE(fade_length, Size()); + RTC_DCHECK_LE(fade_length, append_this.Size()); fade_length = std::min(fade_length, Size()); fade_length = std::min(fade_length, append_this.Size()); size_t position = Size() - fade_length + begin_index_; @@ -265,7 +265,7 @@ void AudioVector::CrossFade(const AudioVector& append_this, (16384 - alpha) * append_this[i] + 8192) >> 14; } - assert(alpha >= 0); // Verify that the slope was correct. + RTC_DCHECK_GE(alpha, 0); // Verify that the slope was correct. // Append what is left of |append_this|. size_t samples_to_push_back = append_this.Size() - fade_length; if (samples_to_push_back > 0) diff --git a/modules/audio_coding/neteq/background_noise.cc b/modules/audio_coding/neteq/background_noise.cc index c0dcc5e04d..ae4645c78e 100644 --- a/modules/audio_coding/neteq/background_noise.cc +++ b/modules/audio_coding/neteq/background_noise.cc @@ -136,7 +136,7 @@ void BackgroundNoise::GenerateBackgroundNoise( int16_t* buffer) { constexpr size_t kNoiseLpcOrder = kMaxLpcOrder; int16_t scaled_random_vector[kMaxSampleRate / 8000 * 125]; - assert(num_noise_samples <= (kMaxSampleRate / 8000 * 125)); + RTC_DCHECK_LE(num_noise_samples, (kMaxSampleRate / 8000 * 125)); RTC_DCHECK_GE(random_vector.size(), num_noise_samples); int16_t* noise_samples = &buffer[kNoiseLpcOrder]; if (initialized()) { @@ -178,44 +178,44 @@ void BackgroundNoise::GenerateBackgroundNoise( } int32_t BackgroundNoise::Energy(size_t channel) const { - assert(channel < num_channels_); + RTC_DCHECK_LT(channel, num_channels_); return channel_parameters_[channel].energy; } void BackgroundNoise::SetMuteFactor(size_t channel, int16_t value) { - assert(channel < num_channels_); + RTC_DCHECK_LT(channel, num_channels_); channel_parameters_[channel].mute_factor = value; } int16_t BackgroundNoise::MuteFactor(size_t channel) const { - assert(channel < num_channels_); + RTC_DCHECK_LT(channel, num_channels_); return channel_parameters_[channel].mute_factor; } const int16_t* BackgroundNoise::Filter(size_t channel) const { - assert(channel < num_channels_); + RTC_DCHECK_LT(channel, num_channels_); return channel_parameters_[channel].filter; } const int16_t* BackgroundNoise::FilterState(size_t channel) const { - assert(channel < num_channels_); + RTC_DCHECK_LT(channel, num_channels_); return channel_parameters_[channel].filter_state; } void BackgroundNoise::SetFilterState(size_t channel, rtc::ArrayView input) { - assert(channel < num_channels_); + RTC_DCHECK_LT(channel, num_channels_); size_t length = std::min(input.size(), kMaxLpcOrder); memcpy(channel_parameters_[channel].filter_state, input.data(), length * sizeof(int16_t)); } int16_t BackgroundNoise::Scale(size_t channel) const { - assert(channel < num_channels_); + RTC_DCHECK_LT(channel, num_channels_); return channel_parameters_[channel].scale; } int16_t BackgroundNoise::ScaleShift(size_t channel) const { - assert(channel < num_channels_); + RTC_DCHECK_LT(channel, num_channels_); return channel_parameters_[channel].scale_shift; } @@ -240,7 +240,7 @@ void BackgroundNoise::IncrementEnergyThreshold(size_t channel, // to the limited-width operations, it is not exactly the same. The // difference should be inaudible, but bit-exactness would not be // maintained. - assert(channel < num_channels_); + RTC_DCHECK_LT(channel, num_channels_); ChannelParameters& parameters = channel_parameters_[channel]; int32_t temp_energy = (kThresholdIncrement * parameters.low_energy_update_threshold) >> 16; @@ -278,7 +278,7 @@ void BackgroundNoise::SaveParameters(size_t channel, const int16_t* filter_state, int32_t sample_energy, int32_t residual_energy) { - assert(channel < num_channels_); + RTC_DCHECK_LT(channel, num_channels_); ChannelParameters& parameters = channel_parameters_[channel]; memcpy(parameters.filter, lpc_coefficients, (kMaxLpcOrder + 1) * sizeof(int16_t)); diff --git a/modules/audio_coding/neteq/comfort_noise.cc b/modules/audio_coding/neteq/comfort_noise.cc index a21cddab4d..b02e3d747f 100644 --- a/modules/audio_coding/neteq/comfort_noise.cc +++ b/modules/audio_coding/neteq/comfort_noise.cc @@ -45,8 +45,8 @@ int ComfortNoise::UpdateParameters(const Packet& packet) { int ComfortNoise::Generate(size_t requested_length, AudioMultiVector* output) { // TODO(hlundin): Change to an enumerator and skip assert. - assert(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ == 32000 || - fs_hz_ == 48000); + RTC_DCHECK(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ == 32000 || + fs_hz_ == 48000); // Not adapted for multi-channel yet. if (output->Channels() != 1) { RTC_LOG(LS_ERROR) << "No multi-channel support"; diff --git a/modules/audio_coding/neteq/decision_logic.cc b/modules/audio_coding/neteq/decision_logic.cc index cb6daf062e..d702729881 100644 --- a/modules/audio_coding/neteq/decision_logic.cc +++ b/modules/audio_coding/neteq/decision_logic.cc @@ -96,7 +96,8 @@ void DecisionLogic::SoftReset() { void DecisionLogic::SetSampleRate(int fs_hz, size_t output_size_samples) { // TODO(hlundin): Change to an enumerator and skip assert. - assert(fs_hz == 8000 || fs_hz == 16000 || fs_hz == 32000 || fs_hz == 48000); + RTC_DCHECK(fs_hz == 8000 || fs_hz == 16000 || fs_hz == 32000 || + fs_hz == 48000); sample_rate_ = fs_hz; output_size_samples_ = output_size_samples; } diff --git a/modules/audio_coding/neteq/dsp_helper.cc b/modules/audio_coding/neteq/dsp_helper.cc index 05b0f70bcf..91979f2d48 100644 --- a/modules/audio_coding/neteq/dsp_helper.cc +++ b/modules/audio_coding/neteq/dsp_helper.cc @@ -89,7 +89,7 @@ int DspHelper::RampSignal(AudioMultiVector* signal, size_t length, int factor, int increment) { - assert(start_index + length <= signal->Size()); + RTC_DCHECK_LE(start_index + length, signal->Size()); if (start_index + length > signal->Size()) { // Wrong parameters. Do nothing and return the scale factor unaltered. return factor; @@ -355,7 +355,7 @@ int DspHelper::DownsampleTo4kHz(const int16_t* input, break; } default: { - assert(false); + RTC_NOTREACHED(); return -1; } } diff --git a/modules/audio_coding/neteq/expand.cc b/modules/audio_coding/neteq/expand.cc index 8df2c7afde..ffaa4c74aa 100644 --- a/modules/audio_coding/neteq/expand.cc +++ b/modules/audio_coding/neteq/expand.cc @@ -48,9 +48,10 @@ Expand::Expand(BackgroundNoise* background_noise, stop_muting_(false), expand_duration_samples_(0), channel_parameters_(new ChannelParameters[num_channels_]) { - assert(fs == 8000 || fs == 16000 || fs == 32000 || fs == 48000); - assert(fs <= static_cast(kMaxSampleRate)); // Should not be possible. - assert(num_channels_ > 0); + RTC_DCHECK(fs == 8000 || fs == 16000 || fs == 32000 || fs == 48000); + RTC_DCHECK_LE(fs, + static_cast(kMaxSampleRate)); // Should not be possible. + RTC_DCHECK_GT(num_channels_, 0); memset(expand_lags_, 0, sizeof(expand_lags_)); Reset(); } @@ -91,7 +92,7 @@ int Expand::Process(AudioMultiVector* output) { // Extract a noise segment. size_t rand_length = max_lag_; // This only applies to SWB where length could be larger than 256. - assert(rand_length <= kMaxSampleRate / 8000 * 120 + 30); + RTC_DCHECK_LE(rand_length, kMaxSampleRate / 8000 * 120 + 30); GenerateRandomVector(2, rand_length, random_vector); } @@ -110,8 +111,8 @@ int Expand::Process(AudioMultiVector* output) { ChannelParameters& parameters = channel_parameters_[channel_ix]; if (current_lag_index_ == 0) { // Use only expand_vector0. - assert(expansion_vector_position + temp_length <= - parameters.expand_vector0.Size()); + RTC_DCHECK_LE(expansion_vector_position + temp_length, + parameters.expand_vector0.Size()); parameters.expand_vector0.CopyTo(temp_length, expansion_vector_position, voiced_vector_storage); } else if (current_lag_index_ == 1) { @@ -126,10 +127,10 @@ int Expand::Process(AudioMultiVector* output) { voiced_vector_storage, temp_length); } else if (current_lag_index_ == 2) { // Mix 1/2 of expand_vector0 with 1/2 of expand_vector1. - assert(expansion_vector_position + temp_length <= - parameters.expand_vector0.Size()); - assert(expansion_vector_position + temp_length <= - parameters.expand_vector1.Size()); + RTC_DCHECK_LE(expansion_vector_position + temp_length, + parameters.expand_vector0.Size()); + RTC_DCHECK_LE(expansion_vector_position + temp_length, + parameters.expand_vector1.Size()); std::unique_ptr temp_0(new int16_t[temp_length]); parameters.expand_vector0.CopyTo(temp_length, expansion_vector_position, @@ -303,7 +304,7 @@ int Expand::Process(AudioMultiVector* output) { if (channel_ix == 0) { output->AssertSize(current_lag); } else { - assert(output->Size() == current_lag); + RTC_DCHECK_EQ(output->Size(), current_lag); } (*output)[channel_ix].OverwriteAt(temp_data, current_lag, 0); } @@ -465,7 +466,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) { size_t start_index = std::min(distortion_lag, correlation_lag); size_t correlation_lags = static_cast( WEBRTC_SPL_ABS_W16((distortion_lag - correlation_lag)) + 1); - assert(correlation_lags <= static_cast(99 * fs_mult + 1)); + RTC_DCHECK_LE(correlation_lags, static_cast(99 * fs_mult + 1)); for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) { ChannelParameters& parameters = channel_parameters_[channel_ix]; @@ -659,7 +660,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) { // |kRandomTableSize|. memcpy(random_vector, RandomVector::kRandomTable, sizeof(int16_t) * RandomVector::kRandomTableSize); - assert(noise_length <= kMaxSampleRate / 8000 * 120 + 30); + RTC_DCHECK_LE(noise_length, kMaxSampleRate / 8000 * 120 + 30); random_vector_->IncreaseSeedIncrement(2); random_vector_->Generate( noise_length - RandomVector::kRandomTableSize, diff --git a/modules/audio_coding/neteq/expand.h b/modules/audio_coding/neteq/expand.h index 45d78d0823..3b0cea3d93 100644 --- a/modules/audio_coding/neteq/expand.h +++ b/modules/audio_coding/neteq/expand.h @@ -59,7 +59,7 @@ class Expand { // Returns the mute factor for |channel|. int16_t MuteFactor(size_t channel) const { - assert(channel < num_channels_); + RTC_DCHECK_LT(channel, num_channels_); return channel_parameters_[channel].mute_factor; } diff --git a/modules/audio_coding/neteq/merge.cc b/modules/audio_coding/neteq/merge.cc index 5bf239bfc5..770e2e3590 100644 --- a/modules/audio_coding/neteq/merge.cc +++ b/modules/audio_coding/neteq/merge.cc @@ -38,7 +38,7 @@ Merge::Merge(int fs_hz, expand_(expand), sync_buffer_(sync_buffer), expanded_(num_channels_) { - assert(num_channels_ > 0); + RTC_DCHECK_GT(num_channels_, 0); } Merge::~Merge() = default; @@ -47,9 +47,9 @@ size_t Merge::Process(int16_t* input, size_t input_length, AudioMultiVector* output) { // TODO(hlundin): Change to an enumerator and skip assert. - assert(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ == 32000 || - fs_hz_ == 48000); - assert(fs_hz_ <= kMaxSampleRate); // Should not be possible. + RTC_DCHECK(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ == 32000 || + fs_hz_ == 48000); + RTC_DCHECK_LE(fs_hz_, kMaxSampleRate); // Should not be possible. if (input_length == 0) { return 0; } @@ -64,7 +64,7 @@ size_t Merge::Process(int16_t* input, input_vector.PushBackInterleaved( rtc::ArrayView(input, input_length)); size_t input_length_per_channel = input_vector.Size(); - assert(input_length_per_channel == input_length / num_channels_); + RTC_DCHECK_EQ(input_length_per_channel, input_length / num_channels_); size_t best_correlation_index = 0; size_t output_length = 0; @@ -142,10 +142,10 @@ size_t Merge::Process(int16_t* input, output_length = best_correlation_index + input_length_per_channel; if (channel == 0) { - assert(output->Empty()); // Output should be empty at this point. + RTC_DCHECK(output->Empty()); // Output should be empty at this point. output->AssertSize(output_length); } else { - assert(output->Size() == output_length); + RTC_DCHECK_EQ(output->Size(), output_length); } (*output)[channel].OverwriteAt(temp_data_.data(), output_length, 0); } @@ -165,7 +165,7 @@ size_t Merge::GetExpandedSignal(size_t* old_length, size_t* expand_period) { // Check how much data that is left since earlier. *old_length = sync_buffer_->FutureLength(); // Should never be less than overlap_length. - assert(*old_length >= expand_->overlap_length()); + RTC_DCHECK_GE(*old_length, expand_->overlap_length()); // Generate data to merge the overlap with using expand. expand_->SetParametersForMergeAfterExpand(); @@ -182,7 +182,7 @@ size_t Merge::GetExpandedSignal(size_t* old_length, size_t* expand_period) { // This is the truncated length. } // This assert should always be true thanks to the if statement above. - assert(210 * kMaxSampleRate / 8000 >= *old_length); + RTC_DCHECK_GE(210 * kMaxSampleRate / 8000, *old_length); AudioMultiVector expanded_temp(num_channels_); expand_->Process(&expanded_temp); @@ -191,8 +191,8 @@ size_t Merge::GetExpandedSignal(size_t* old_length, size_t* expand_period) { expanded_.Clear(); // Copy what is left since earlier into the expanded vector. expanded_.PushBackFromIndex(*sync_buffer_, sync_buffer_->next_index()); - assert(expanded_.Size() == *old_length); - assert(expanded_temp.Size() > 0); + RTC_DCHECK_EQ(expanded_.Size(), *old_length); + RTC_DCHECK_GT(expanded_temp.Size(), 0); // Do "ugly" copy and paste from the expanded in order to generate more data // to correlate (but not interpolate) with. const size_t required_length = static_cast((120 + 80 + 2) * fs_mult_); @@ -204,7 +204,7 @@ size_t Merge::GetExpandedSignal(size_t* old_length, size_t* expand_period) { // Trim the length to exactly |required_length|. expanded_.PopBack(expanded_.Size() - required_length); } - assert(expanded_.Size() >= required_length); + RTC_DCHECK_GE(expanded_.Size(), required_length); return required_length; } @@ -373,7 +373,7 @@ size_t Merge::CorrelateAndPeakSearch(size_t start_position, while (((best_correlation_index + input_length) < (timestamps_per_call_ + expand_->overlap_length())) || ((best_correlation_index + input_length) < start_position)) { - assert(false); // Should never happen. + RTC_NOTREACHED(); // Should never happen. best_correlation_index += expand_period; // Jump one lag ahead. } return best_correlation_index; diff --git a/modules/audio_coding/neteq/nack_tracker.cc b/modules/audio_coding/neteq/nack_tracker.cc index 8358769804..9a873eee07 100644 --- a/modules/audio_coding/neteq/nack_tracker.cc +++ b/modules/audio_coding/neteq/nack_tracker.cc @@ -44,7 +44,7 @@ NackTracker* NackTracker::Create(int nack_threshold_packets) { } void NackTracker::UpdateSampleRate(int sample_rate_hz) { - assert(sample_rate_hz > 0); + RTC_DCHECK_GT(sample_rate_hz, 0); sample_rate_khz_ = sample_rate_hz / 1000; } @@ -120,9 +120,9 @@ uint32_t NackTracker::EstimateTimestamp(uint16_t sequence_num) { } void NackTracker::AddToList(uint16_t sequence_number_current_received_rtp) { - assert(!any_rtp_decoded_ || - IsNewerSequenceNumber(sequence_number_current_received_rtp, - sequence_num_last_decoded_rtp_)); + RTC_DCHECK(!any_rtp_decoded_ || + IsNewerSequenceNumber(sequence_number_current_received_rtp, + sequence_num_last_decoded_rtp_)); // Packets with sequence numbers older than |upper_bound_missing| are // considered missing, and the rest are considered late. @@ -164,7 +164,7 @@ void NackTracker::UpdateLastDecodedPacket(uint16_t sequence_number, ++it) it->second.time_to_play_ms = TimeToPlay(it->second.estimated_timestamp); } else { - assert(sequence_number == sequence_num_last_decoded_rtp_); + RTC_DCHECK_EQ(sequence_number, sequence_num_last_decoded_rtp_); // Same sequence number as before. 10 ms is elapsed, update estimations for // time-to-play. diff --git a/modules/audio_coding/neteq/neteq_impl.cc b/modules/audio_coding/neteq/neteq_impl.cc index 6ac157fc12..8b07d7e47c 100644 --- a/modules/audio_coding/neteq/neteq_impl.cc +++ b/modules/audio_coding/neteq/neteq_impl.cc @@ -258,6 +258,7 @@ void SetAudioFrameActivityAndType(bool vad_enabled, int NetEqImpl::GetAudio(AudioFrame* audio_frame, bool* muted, + int* current_sample_rate_hz, absl::optional action_override) { TRACE_EVENT0("webrtc", "NetEqImpl::GetAudio"); MutexLock lock(&mutex_); @@ -296,6 +297,11 @@ int NetEqImpl::GetAudio(AudioFrame* audio_frame, } } + if (current_sample_rate_hz) { + *current_sample_rate_hz = delayed_last_output_sample_rate_hz_.value_or( + last_output_sample_rate_hz_); + } + return kOK; } @@ -337,7 +343,7 @@ void NetEqImpl::RemoveAllPayloadTypes() { bool NetEqImpl::SetMinimumDelay(int delay_ms) { MutexLock lock(&mutex_); if (delay_ms >= 0 && delay_ms <= 10000) { - assert(controller_.get()); + RTC_DCHECK(controller_.get()); return controller_->SetMinimumDelay( std::max(delay_ms - output_delay_chain_ms_, 0)); } @@ -347,7 +353,7 @@ bool NetEqImpl::SetMinimumDelay(int delay_ms) { bool NetEqImpl::SetMaximumDelay(int delay_ms) { MutexLock lock(&mutex_); if (delay_ms >= 0 && delay_ms <= 10000) { - assert(controller_.get()); + RTC_DCHECK(controller_.get()); return controller_->SetMaximumDelay( std::max(delay_ms - output_delay_chain_ms_, 0)); } @@ -386,7 +392,7 @@ int NetEqImpl::FilteredCurrentDelayMs() const { int NetEqImpl::NetworkStatistics(NetEqNetworkStatistics* stats) { MutexLock lock(&mutex_); - assert(decoder_database_.get()); + RTC_DCHECK(decoder_database_.get()); *stats = CurrentNetworkStatisticsInternal(); stats_->GetNetworkStatistics(decoder_frame_length_, stats); // Compensate for output delay chain. @@ -403,13 +409,13 @@ NetEqNetworkStatistics NetEqImpl::CurrentNetworkStatistics() const { } NetEqNetworkStatistics NetEqImpl::CurrentNetworkStatisticsInternal() const { - assert(decoder_database_.get()); + RTC_DCHECK(decoder_database_.get()); NetEqNetworkStatistics stats; const size_t total_samples_in_buffers = packet_buffer_->NumSamplesInBuffer(decoder_frame_length_) + sync_buffer_->FutureLength(); - assert(controller_.get()); + RTC_DCHECK(controller_.get()); stats.preferred_buffer_size_ms = controller_->TargetLevelMs(); stats.jitter_peaks_found = controller_->PeakFound(); RTC_DCHECK_GT(fs_hz_, 0); @@ -443,13 +449,13 @@ NetEqOperationsAndState NetEqImpl::GetOperationsAndState() const { void NetEqImpl::EnableVad() { MutexLock lock(&mutex_); - assert(vad_.get()); + RTC_DCHECK(vad_.get()); vad_->Enable(); } void NetEqImpl::DisableVad() { MutexLock lock(&mutex_); - assert(vad_.get()); + RTC_DCHECK(vad_.get()); vad_->Disable(); } @@ -500,8 +506,8 @@ void NetEqImpl::FlushBuffers() { MutexLock lock(&mutex_); RTC_LOG(LS_VERBOSE) << "FlushBuffers"; packet_buffer_->Flush(stats_.get()); - assert(sync_buffer_.get()); - assert(expand_.get()); + RTC_DCHECK(sync_buffer_.get()); + RTC_DCHECK(expand_.get()); sync_buffer_->Flush(); sync_buffer_->set_next_index(sync_buffer_->next_index() - expand_->overlap_length()); @@ -622,8 +628,7 @@ int NetEqImpl::InsertPacketInternal(const RTPHeader& rtp_header, if (update_sample_rate_and_channels) { nack_->Reset(); } - nack_->UpdateLastReceivedPacket(rtp_header.sequenceNumber, - rtp_header.timestamp); + nack_->UpdateLastReceivedPacket(main_sequence_number, main_timestamp); } // Check for RED payload type, and separate payloads into several packets. @@ -792,12 +797,12 @@ int NetEqImpl::InsertPacketInternal(const RTPHeader& rtp_header, size_t channels = 1; if (!decoder_database_->IsComfortNoise(payload_type)) { AudioDecoder* decoder = decoder_database_->GetDecoder(payload_type); - assert(decoder); // Payloads are already checked to be valid. + RTC_DCHECK(decoder); // Payloads are already checked to be valid. channels = decoder->Channels(); } const DecoderDatabase::DecoderInfo* decoder_info = decoder_database_->GetDecoderInfo(payload_type); - assert(decoder_info); + RTC_DCHECK(decoder_info); if (decoder_info->SampleRateHz() != fs_hz_ || channels != algorithm_buffer_->Channels()) { SetSampleRateAndChannels(decoder_info->SampleRateHz(), channels); @@ -811,7 +816,7 @@ int NetEqImpl::InsertPacketInternal(const RTPHeader& rtp_header, const DecoderDatabase::DecoderInfo* dec_info = decoder_database_->GetDecoderInfo(main_payload_type); - assert(dec_info); // Already checked that the payload type is known. + RTC_DCHECK(dec_info); // Already checked that the payload type is known. NetEqController::PacketArrivedInfo info; info.is_cng_or_dtmf = dec_info->IsComfortNoise() || dec_info->IsDtmf(); @@ -889,7 +894,7 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame, int decode_return_value = Decode(&packet_list, &operation, &length, &speech_type); - assert(vad_.get()); + RTC_DCHECK(vad_.get()); bool sid_frame_available = (operation == Operation::kRfc3389Cng && !packet_list.empty()); vad_->Update(decoded_buffer_.get(), static_cast(length), speech_type, @@ -960,7 +965,7 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame, } case Operation::kUndefined: { RTC_LOG(LS_ERROR) << "Invalid operation kUndefined."; - assert(false); // This should not happen. + RTC_NOTREACHED(); // This should not happen. last_mode_ = Mode::kError; return kInvalidOperation; } @@ -1096,7 +1101,7 @@ int NetEqImpl::GetDecision(Operation* operation, *play_dtmf = false; *operation = Operation::kUndefined; - assert(sync_buffer_.get()); + RTC_DCHECK(sync_buffer_.get()); uint32_t end_timestamp = sync_buffer_->end_timestamp(); if (!new_codec_) { const uint32_t five_seconds_samples = 5 * fs_hz_; @@ -1123,7 +1128,7 @@ int NetEqImpl::GetDecision(Operation* operation, // Don't use this packet, discard it. if (packet_buffer_->DiscardNextPacket(stats_.get()) != PacketBuffer::kOK) { - assert(false); // Must be ok by design. + RTC_NOTREACHED(); // Must be ok by design. } // Check buffer again. if (!new_codec_) { @@ -1134,7 +1139,7 @@ int NetEqImpl::GetDecision(Operation* operation, } } - assert(expand_.get()); + RTC_DCHECK(expand_.get()); const int samples_left = static_cast(sync_buffer_->FutureLength() - expand_->overlap_length()); if (last_mode_ == Mode::kAccelerateSuccess || @@ -1154,8 +1159,8 @@ int NetEqImpl::GetDecision(Operation* operation, } // Get instruction. - assert(sync_buffer_.get()); - assert(expand_.get()); + RTC_DCHECK(sync_buffer_.get()); + RTC_DCHECK(expand_.get()); generated_noise_samples = generated_noise_stopwatch_ ? generated_noise_stopwatch_->ElapsedTicks() * output_size_samples_ + @@ -1223,7 +1228,7 @@ int NetEqImpl::GetDecision(Operation* operation, // Check conditions for reset. if (new_codec_ || *operation == Operation::kUndefined) { // The only valid reason to get kUndefined is that new_codec_ is set. - assert(new_codec_); + RTC_DCHECK(new_codec_); if (*play_dtmf && !packet) { timestamp_ = dtmf_event->timestamp; } else { @@ -1395,7 +1400,7 @@ int NetEqImpl::Decode(PacketList* packet_list, uint8_t payload_type = packet.payload_type; if (!decoder_database_->IsComfortNoise(payload_type)) { decoder = decoder_database_->GetDecoder(payload_type); - assert(decoder); + RTC_DCHECK(decoder); if (!decoder) { RTC_LOG(LS_WARNING) << "Unknown payload type " << static_cast(payload_type); @@ -1408,7 +1413,7 @@ int NetEqImpl::Decode(PacketList* packet_list, // We have a new decoder. Re-init some values. const DecoderDatabase::DecoderInfo* decoder_info = decoder_database_->GetDecoderInfo(payload_type); - assert(decoder_info); + RTC_DCHECK(decoder_info); if (!decoder_info) { RTC_LOG(LS_WARNING) << "Unknown payload type " << static_cast(payload_type); @@ -1480,8 +1485,8 @@ int NetEqImpl::Decode(PacketList* packet_list, // Don't increment timestamp if codec returned CNG speech type // since in this case, the we will increment the CNGplayedTS counter. // Increase with number of samples per channel. - assert(*decoded_length == 0 || - (decoder && decoder->Channels() == sync_buffer_->Channels())); + RTC_DCHECK(*decoded_length == 0 || + (decoder && decoder->Channels() == sync_buffer_->Channels())); sync_buffer_->IncreaseEndTimestamp( *decoded_length / static_cast(sync_buffer_->Channels())); } @@ -1530,16 +1535,16 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list, // Do decoding. while (!packet_list->empty() && !decoder_database_->IsComfortNoise( packet_list->front().payload_type)) { - assert(decoder); // At this point, we must have a decoder object. + RTC_DCHECK(decoder); // At this point, we must have a decoder object. // The number of channels in the |sync_buffer_| should be the same as the // number decoder channels. - assert(sync_buffer_->Channels() == decoder->Channels()); - assert(decoded_buffer_length_ >= kMaxFrameSize * decoder->Channels()); - assert(operation == Operation::kNormal || - operation == Operation::kAccelerate || - operation == Operation::kFastAccelerate || - operation == Operation::kMerge || - operation == Operation::kPreemptiveExpand); + RTC_DCHECK_EQ(sync_buffer_->Channels(), decoder->Channels()); + RTC_DCHECK_GE(decoded_buffer_length_, kMaxFrameSize * decoder->Channels()); + RTC_DCHECK(operation == Operation::kNormal || + operation == Operation::kAccelerate || + operation == Operation::kFastAccelerate || + operation == Operation::kMerge || + operation == Operation::kPreemptiveExpand); auto opt_result = packet_list->front().frame->Decode( rtc::ArrayView(&decoded_buffer_[*decoded_length], @@ -1576,9 +1581,10 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list, // If the list is not empty at this point, either a decoding error terminated // the while-loop, or list must hold exactly one CNG packet. - assert(packet_list->empty() || *decoded_length < 0 || - (packet_list->size() == 1 && decoder_database_->IsComfortNoise( - packet_list->front().payload_type))); + RTC_DCHECK( + packet_list->empty() || *decoded_length < 0 || + (packet_list->size() == 1 && + decoder_database_->IsComfortNoise(packet_list->front().payload_type))); return 0; } @@ -1586,7 +1592,7 @@ void NetEqImpl::DoNormal(const int16_t* decoded_buffer, size_t decoded_length, AudioDecoder::SpeechType speech_type, bool play_dtmf) { - assert(normal_.get()); + RTC_DCHECK(normal_.get()); normal_->Process(decoded_buffer, decoded_length, last_mode_, algorithm_buffer_.get()); if (decoded_length != 0) { @@ -1609,7 +1615,7 @@ void NetEqImpl::DoMerge(int16_t* decoded_buffer, size_t decoded_length, AudioDecoder::SpeechType speech_type, bool play_dtmf) { - assert(merge_.get()); + RTC_DCHECK(merge_.get()); size_t new_length = merge_->Process(decoded_buffer, decoded_length, algorithm_buffer_.get()); // Correction can be negative. @@ -1770,7 +1776,7 @@ int NetEqImpl::DoAccelerate(int16_t* decoded_buffer, sync_buffer_->Size() - borrowed_samples_per_channel); sync_buffer_->PushFrontZeros(borrowed_samples_per_channel - length); algorithm_buffer_->PopFront(length); - assert(algorithm_buffer_->Empty()); + RTC_DCHECK(algorithm_buffer_->Empty()); } else { sync_buffer_->ReplaceAtIndex( *algorithm_buffer_, borrowed_samples_per_channel, @@ -1859,7 +1865,7 @@ int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer, int NetEqImpl::DoRfc3389Cng(PacketList* packet_list, bool play_dtmf) { if (!packet_list->empty()) { // Must have exactly one SID frame at this point. - assert(packet_list->size() == 1); + RTC_DCHECK_EQ(packet_list->size(), 1); const Packet& packet = packet_list->front(); if (!decoder_database_->IsComfortNoise(packet.payload_type)) { RTC_LOG(LS_ERROR) << "Trying to decode non-CNG payload as CNG."; @@ -1942,14 +1948,14 @@ int NetEqImpl::DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf) { // // it must be copied to the speech buffer. // // TODO(hlundin): This code seems incorrect. (Legacy.) Write test and // // verify correct operation. - // assert(false); + // RTC_NOTREACHED(); // // Must generate enough data to replace all of the |sync_buffer_| // // "future". // int required_length = sync_buffer_->FutureLength(); - // assert(dtmf_tone_generator_->initialized()); + // RTC_DCHECK(dtmf_tone_generator_->initialized()); // dtmf_return_value = dtmf_tone_generator_->Generate(required_length, // algorithm_buffer_); - // assert((size_t) required_length == algorithm_buffer_->Size()); + // RTC_DCHECK((size_t) required_length == algorithm_buffer_->Size()); // if (dtmf_return_value < 0) { // algorithm_buffer_->Zeros(output_size_samples_); // return dtmf_return_value; @@ -1959,7 +1965,7 @@ int NetEqImpl::DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf) { // // data. // // TODO(hlundin): It seems that this overwriting has gone lost. // // Not adapted for multi-channel yet. - // assert(algorithm_buffer_->Channels() == 1); + // RTC_DCHECK(algorithm_buffer_->Channels() == 1); // if (algorithm_buffer_->Channels() != 1) { // RTC_LOG(LS_WARNING) << "DTMF not supported for more than one channel"; // return kStereoNotSupported; @@ -2001,7 +2007,7 @@ int NetEqImpl::DtmfOverdub(const DtmfEvent& dtmf_event, if (dtmf_return_value == 0) { dtmf_return_value = dtmf_tone_generator_->Generate(overdub_length, &dtmf_output); - assert(overdub_length == dtmf_output.Size()); + RTC_DCHECK_EQ(overdub_length, dtmf_output.Size()); } dtmf_output.ReadInterleaved(overdub_length, &output[out_index]); return dtmf_return_value < 0 ? dtmf_return_value : 0; @@ -2032,7 +2038,7 @@ int NetEqImpl::ExtractPackets(size_t required_samples, next_packet = nullptr; if (!packet) { RTC_LOG(LS_ERROR) << "Should always be able to extract a packet here"; - assert(false); // Should always be able to extract a packet here. + RTC_NOTREACHED(); // Should always be able to extract a packet here. return -1; } const uint64_t waiting_time_ms = packet->waiting_time->ElapsedMs(); @@ -2125,8 +2131,9 @@ void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) { RTC_LOG(LS_VERBOSE) << "SetSampleRateAndChannels " << fs_hz << " " << channels; // TODO(hlundin): Change to an enumerator and skip assert. - assert(fs_hz == 8000 || fs_hz == 16000 || fs_hz == 32000 || fs_hz == 48000); - assert(channels > 0); + RTC_DCHECK(fs_hz == 8000 || fs_hz == 16000 || fs_hz == 32000 || + fs_hz == 48000); + RTC_DCHECK_GT(channels, 0); // Before changing the sample rate, end and report any ongoing expand event. stats_->EndExpandEvent(fs_hz_); @@ -2142,7 +2149,7 @@ void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) { cng_decoder->Reset(); // Reinit post-decode VAD with new sample rate. - assert(vad_.get()); // Cannot be NULL here. + RTC_DCHECK(vad_.get()); // Cannot be NULL here. vad_->Init(); // Delete algorithm buffer and create a new one. @@ -2185,8 +2192,8 @@ void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) { } NetEqImpl::OutputType NetEqImpl::LastOutputType() { - assert(vad_.get()); - assert(expand_.get()); + RTC_DCHECK(vad_.get()); + RTC_DCHECK(expand_.get()); if (last_mode_ == Mode::kCodecInternalCng || last_mode_ == Mode::kRfc3389Cng) { return OutputType::kCNG; diff --git a/modules/audio_coding/neteq/neteq_impl.h b/modules/audio_coding/neteq/neteq_impl.h index e130422a30..88da6dcbd5 100644 --- a/modules/audio_coding/neteq/neteq_impl.h +++ b/modules/audio_coding/neteq/neteq_impl.h @@ -133,6 +133,7 @@ class NetEqImpl : public webrtc::NetEq { int GetAudio( AudioFrame* audio_frame, bool* muted, + int* current_sample_rate_hz = nullptr, absl::optional action_override = absl::nullopt) override; void SetCodecs(const std::map& codecs) override; diff --git a/modules/audio_coding/neteq/neteq_impl_unittest.cc b/modules/audio_coding/neteq/neteq_impl_unittest.cc index b3c25cae2d..53b4dae17d 100644 --- a/modules/audio_coding/neteq/neteq_impl_unittest.cc +++ b/modules/audio_coding/neteq/neteq_impl_unittest.cc @@ -736,6 +736,15 @@ class NetEqImplTestSampleRateParameter const int initial_sample_rate_hz_; }; +class NetEqImplTestSdpFormatParameter + : public NetEqImplTest, + public testing::WithParamInterface { + protected: + NetEqImplTestSdpFormatParameter() + : NetEqImplTest(), sdp_format_(GetParam()) {} + const SdpAudioFormat sdp_format_; +}; + // This test does the following: // 0. Set up NetEq with initial sample rate given by test parameter, and a codec // sample rate of 16000. @@ -919,6 +928,67 @@ INSTANTIATE_TEST_SUITE_P(SampleRates, NetEqImplTestSampleRateParameter, testing::Values(8000, 16000, 32000, 48000)); +TEST_P(NetEqImplTestSdpFormatParameter, GetNackListScaledTimestamp) { + UseNoMocks(); + CreateInstance(); + + neteq_->EnableNack(128); + + const uint8_t kPayloadType = 17; // Just an arbitrary number. + const int kPayloadSampleRateHz = sdp_format_.clockrate_hz; + const size_t kPayloadLengthSamples = + static_cast(10 * kPayloadSampleRateHz / 1000); // 10 ms. + const size_t kPayloadLengthBytes = kPayloadLengthSamples * 2; + std::vector payload(kPayloadLengthBytes, 0); + RTPHeader rtp_header; + rtp_header.payloadType = kPayloadType; + rtp_header.sequenceNumber = 0x1234; + rtp_header.timestamp = 0x12345678; + rtp_header.ssrc = 0x87654321; + + EXPECT_TRUE(neteq_->RegisterPayloadType(kPayloadType, sdp_format_)); + + auto insert_packet = [&](bool lost = false) { + rtp_header.sequenceNumber++; + rtp_header.timestamp += kPayloadLengthSamples; + if (!lost) + EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload)); + }; + + // Insert and decode 10 packets. + for (size_t i = 0; i < 10; ++i) { + insert_packet(); + } + AudioFrame output; + size_t count_loops = 0; + do { + bool muted; + // Make sure we don't hang the test if we never go to PLC. + ASSERT_LT(++count_loops, 100u); + EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted)); + } while (output.speech_type_ == AudioFrame::kNormalSpeech); + + insert_packet(); + + insert_packet(/*lost=*/true); + + // Ensure packet gets marked as missing. + for (int i = 0; i < 5; ++i) { + insert_packet(); + } + + // Missing packet recoverable with 5ms RTT. + EXPECT_THAT(neteq_->GetNackList(5), Not(IsEmpty())); + + // No packets should have TimeToPlay > 500ms. + EXPECT_THAT(neteq_->GetNackList(500), IsEmpty()); +} + +INSTANTIATE_TEST_SUITE_P(GetNackList, + NetEqImplTestSdpFormatParameter, + testing::Values(SdpAudioFormat("g722", 8000, 1), + SdpAudioFormat("opus", 48000, 2))); + // This test verifies that NetEq can handle comfort noise and enters/quits codec // internal CNG mode properly. TEST_F(NetEqImplTest, CodecInternalCng) { diff --git a/modules/audio_coding/neteq/neteq_unittest.cc b/modules/audio_coding/neteq/neteq_unittest.cc index 1369ead63c..bdd90e96cc 100644 --- a/modules/audio_coding/neteq/neteq_unittest.cc +++ b/modules/audio_coding/neteq/neteq_unittest.cc @@ -34,7 +34,6 @@ #include "rtc_base/ignore_wundef.h" #include "rtc_base/message_digest.h" #include "rtc_base/numerics/safe_conversions.h" -#include "rtc_base/string_encode.h" #include "rtc_base/strings/string_builder.h" #include "rtc_base/system/arch.h" #include "test/field_trial.h" @@ -83,17 +82,29 @@ TEST_F(NetEqDecodingTest, MAYBE_TestBitExactness) { const std::string input_rtp_file = webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp"); - const std::string output_checksum = - PlatformChecksum("6c35140ce4d75874bdd60aa1872400b05fd05ca2", - "ab451bb8301d9a92fbf4de91556b56f1ea38b4ce", "not used", - "6c35140ce4d75874bdd60aa1872400b05fd05ca2", - "64b46bb3c1165537a880ae8404afce2efba456c0"); - - const std::string network_stats_checksum = - PlatformChecksum("90594d85fa31d3d9584d79293bf7aa4ee55ed751", - "77b9c3640b81aff6a38d69d07dd782d39c15321d", "not used", - "90594d85fa31d3d9584d79293bf7aa4ee55ed751", - "90594d85fa31d3d9584d79293bf7aa4ee55ed751"); + const std::string output_checksum = PlatformChecksum( +// TODO(bugs.webrtc.org/12941): Linux x86 optimized builds have a different +// checksum. +#if defined(WEBRTC_LINUX) && defined(NDEBUG) && defined(WEBRTC_ARCH_X86) + "8d9c177b7f2f9398c0944a851edffae214de2c56", +#else + "6c35140ce4d75874bdd60aa1872400b05fd05ca2", +#endif + "ab451bb8301d9a92fbf4de91556b56f1ea38b4ce", "not used", + "6c35140ce4d75874bdd60aa1872400b05fd05ca2", + "64b46bb3c1165537a880ae8404afce2efba456c0"); + + const std::string network_stats_checksum = PlatformChecksum( +// TODO(bugs.webrtc.org/12941): Linux x86 optimized builds have a different +// checksum. +#if defined(WEBRTC_LINUX) && defined(NDEBUG) && defined(WEBRTC_ARCH_X86) + "8cc08e3cd6801dcba4fcc15eb4036c19296a140d", +#else + "90594d85fa31d3d9584d79293bf7aa4ee55ed751", +#endif + "77b9c3640b81aff6a38d69d07dd782d39c15321d", "not used", + "90594d85fa31d3d9584d79293bf7aa4ee55ed751", + "90594d85fa31d3d9584d79293bf7aa4ee55ed751"); DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum, absl::GetFlag(FLAGS_gen_ref)); @@ -1066,7 +1077,7 @@ TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithAcceleration) { expected_target_delay += neteq_->TargetDelayMs() * 2 * kSamples; // We have two packets in the buffer and kAccelerate operation will // extract 20 ms of data. - neteq_->GetAudio(&out_frame_, &muted, NetEq::Operation::kAccelerate); + neteq_->GetAudio(&out_frame_, &muted, nullptr, NetEq::Operation::kAccelerate); // Check jitter buffer delay. NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics(); diff --git a/modules/audio_coding/neteq/red_payload_splitter.cc b/modules/audio_coding/neteq/red_payload_splitter.cc index f5cd9c29e4..2f21a5ff6c 100644 --- a/modules/audio_coding/neteq/red_payload_splitter.cc +++ b/modules/audio_coding/neteq/red_payload_splitter.cc @@ -41,7 +41,7 @@ bool RedPayloadSplitter::SplitRed(PacketList* packet_list) { PacketList::iterator it = packet_list->begin(); while (it != packet_list->end()) { const Packet& red_packet = *it; - assert(!red_packet.payload.empty()); + RTC_DCHECK(!red_packet.payload.empty()); const uint8_t* payload_ptr = red_packet.payload.data(); size_t payload_length = red_packet.payload.size(); diff --git a/modules/audio_coding/neteq/red_payload_splitter_unittest.cc b/modules/audio_coding/neteq/red_payload_splitter_unittest.cc index 1cf616748f..7275232daa 100644 --- a/modules/audio_coding/neteq/red_payload_splitter_unittest.cc +++ b/modules/audio_coding/neteq/red_payload_splitter_unittest.cc @@ -103,7 +103,7 @@ Packet CreateRedPayload(size_t num_payloads, rtc::checked_cast((num_payloads - i - 1) * timestamp_offset); *payload_ptr = this_offset >> 6; ++payload_ptr; - assert(kPayloadLength <= 1023); // Max length described by 10 bits. + RTC_DCHECK_LE(kPayloadLength, 1023); // Max length described by 10 bits. *payload_ptr = ((this_offset & 0x3F) << 2) | (kPayloadLength >> 8); ++payload_ptr; *payload_ptr = kPayloadLength & 0xFF; diff --git a/modules/audio_coding/neteq/statistics_calculator.cc b/modules/audio_coding/neteq/statistics_calculator.cc index 708780a8a8..12a0e3c9ec 100644 --- a/modules/audio_coding/neteq/statistics_calculator.cc +++ b/modules/audio_coding/neteq/statistics_calculator.cc @@ -375,7 +375,7 @@ uint16_t StatisticsCalculator::CalculateQ14Ratio(size_t numerator, return 0; } else if (numerator < denominator) { // Ratio must be smaller than 1 in Q14. - assert((numerator << 14) / denominator < (1 << 14)); + RTC_DCHECK_LT((numerator << 14) / denominator, (1 << 14)); return static_cast((numerator << 14) / denominator); } else { // Will not produce a ratio larger than 1, since this is probably an error. diff --git a/modules/audio_coding/neteq/sync_buffer.cc b/modules/audio_coding/neteq/sync_buffer.cc index 4949bb201f..73e0628ea6 100644 --- a/modules/audio_coding/neteq/sync_buffer.cc +++ b/modules/audio_coding/neteq/sync_buffer.cc @@ -28,7 +28,7 @@ void SyncBuffer::PushBack(const AudioMultiVector& append_this) { next_index_ -= samples_added; } else { // This means that we are pushing out future data that was never used. - // assert(false); + // RTC_NOTREACHED(); // TODO(hlundin): This assert must be disabled to support 60 ms frames. // This should not happen even for 60 ms frames, but it does. Investigate // why. diff --git a/modules/audio_coding/neteq/time_stretch.cc b/modules/audio_coding/neteq/time_stretch.cc index ba24e0bfc3..b7680292bd 100644 --- a/modules/audio_coding/neteq/time_stretch.cc +++ b/modules/audio_coding/neteq/time_stretch.cc @@ -66,7 +66,7 @@ TimeStretch::ReturnCodes TimeStretch::Process(const int16_t* input, DspHelper::PeakDetection(auto_correlation_, kCorrelationLen, kNumPeaks, fs_mult_, &peak_index, &peak_value); // Assert that |peak_index| stays within boundaries. - assert(peak_index <= (2 * kCorrelationLen - 1) * fs_mult_); + RTC_DCHECK_LE(peak_index, (2 * kCorrelationLen - 1) * fs_mult_); // Compensate peak_index for displaced starting position. The displacement // happens in AutoCorrelation(). Here, |kMinLag| is in the down-sampled 4 kHz @@ -74,8 +74,9 @@ TimeStretch::ReturnCodes TimeStretch::Process(const int16_t* input, // multiplication by fs_mult_ * 2. peak_index += kMinLag * fs_mult_ * 2; // Assert that |peak_index| stays within boundaries. - assert(peak_index >= static_cast(20 * fs_mult_)); - assert(peak_index <= 20 * fs_mult_ + (2 * kCorrelationLen - 1) * fs_mult_); + RTC_DCHECK_GE(peak_index, static_cast(20 * fs_mult_)); + RTC_DCHECK_LE(peak_index, + 20 * fs_mult_ + (2 * kCorrelationLen - 1) * fs_mult_); // Calculate scaling to ensure that |peak_index| samples can be square-summed // without overflowing. diff --git a/modules/audio_coding/neteq/time_stretch.h b/modules/audio_coding/neteq/time_stretch.h index aede9cadf3..26d295f669 100644 --- a/modules/audio_coding/neteq/time_stretch.h +++ b/modules/audio_coding/neteq/time_stretch.h @@ -42,9 +42,9 @@ class TimeStretch { num_channels_(num_channels), background_noise_(background_noise), max_input_value_(0) { - assert(sample_rate_hz_ == 8000 || sample_rate_hz_ == 16000 || - sample_rate_hz_ == 32000 || sample_rate_hz_ == 48000); - assert(num_channels_ > 0); + RTC_DCHECK(sample_rate_hz_ == 8000 || sample_rate_hz_ == 16000 || + sample_rate_hz_ == 32000 || sample_rate_hz_ == 48000); + RTC_DCHECK_GT(num_channels_, 0); memset(auto_correlation_, 0, sizeof(auto_correlation_)); } diff --git a/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc b/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc index 6b325b6c5c..6cbba20e5f 100644 --- a/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc +++ b/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc @@ -37,14 +37,15 @@ ConstantPcmPacketSource::ConstantPcmPacketSource(size_t payload_len_samples, std::unique_ptr ConstantPcmPacketSource::NextPacket() { RTC_CHECK_GT(packet_len_bytes_, kHeaderLenBytes); - uint8_t* packet_memory = new uint8_t[packet_len_bytes_]; + rtc::CopyOnWriteBuffer packet_buffer(packet_len_bytes_); + uint8_t* packet_memory = packet_buffer.MutableData(); // Fill the payload part of the packet memory with the pre-encoded value. for (unsigned i = 0; i < 2 * payload_len_samples_; ++i) packet_memory[kHeaderLenBytes + i] = encoded_sample_[i % 2]; WriteHeader(packet_memory); // |packet| assumes ownership of |packet_memory|. - std::unique_ptr packet( - new Packet(packet_memory, packet_len_bytes_, next_arrival_time_ms_)); + auto packet = + std::make_unique(std::move(packet_buffer), next_arrival_time_ms_); next_arrival_time_ms_ += payload_len_samples_ / samples_per_ms_; return packet; } diff --git a/modules/audio_coding/neteq/tools/neteq_test.cc b/modules/audio_coding/neteq/tools/neteq_test.cc index 0988d2c8e5..22f5ad6931 100644 --- a/modules/audio_coding/neteq/tools/neteq_test.cc +++ b/modules/audio_coding/neteq/tools/neteq_test.cc @@ -172,7 +172,7 @@ NetEqTest::SimulationStepResult NetEqTest::RunToNextGetAudio() { } AudioFrame out_frame; bool muted; - int error = neteq_->GetAudio(&out_frame, &muted, + int error = neteq_->GetAudio(&out_frame, &muted, nullptr, ActionToOperations(next_action_)); next_action_ = absl::nullopt; RTC_CHECK(!muted) << "The code does not handle enable_muted_state"; diff --git a/modules/audio_coding/neteq/tools/output_audio_file.h b/modules/audio_coding/neteq/tools/output_audio_file.h index d729c9cbeb..7220a36d69 100644 --- a/modules/audio_coding/neteq/tools/output_audio_file.h +++ b/modules/audio_coding/neteq/tools/output_audio_file.h @@ -36,7 +36,7 @@ class OutputAudioFile : public AudioSink { } bool WriteArray(const int16_t* audio, size_t num_samples) override { - assert(out_file_); + RTC_DCHECK(out_file_); return fwrite(audio, sizeof(*audio), num_samples, out_file_) == num_samples; } diff --git a/modules/audio_coding/neteq/tools/packet.cc b/modules/audio_coding/neteq/tools/packet.cc index 48959e4f62..e540173f43 100644 --- a/modules/audio_coding/neteq/tools/packet.cc +++ b/modules/audio_coding/neteq/tools/packet.cc @@ -10,30 +10,22 @@ #include "modules/audio_coding/neteq/tools/packet.h" -#include - -#include - -#include "modules/rtp_rtcp/source/rtp_utility.h" +#include "api/array_view.h" +#include "modules/rtp_rtcp/source/rtp_packet_received.h" #include "rtc_base/checks.h" +#include "rtc_base/copy_on_write_buffer.h" namespace webrtc { namespace test { -using webrtc::RtpUtility::RtpHeaderParser; - -Packet::Packet(uint8_t* packet_memory, - size_t allocated_bytes, +Packet::Packet(rtc::CopyOnWriteBuffer packet, size_t virtual_packet_length_bytes, double time_ms, - const RtpUtility::RtpHeaderParser& parser, - const RtpHeaderExtensionMap* extension_map /*= nullptr*/) - : payload_memory_(packet_memory), - packet_length_bytes_(allocated_bytes), + const RtpHeaderExtensionMap* extension_map) + : packet_(std::move(packet)), virtual_packet_length_bytes_(virtual_packet_length_bytes), - virtual_payload_length_bytes_(0), time_ms_(time_ms), - valid_header_(ParseHeader(parser, extension_map)) {} + valid_header_(ParseHeader(extension_map)) {} Packet::Packet(const RTPHeader& header, size_t virtual_packet_length_bytes, @@ -45,23 +37,6 @@ Packet::Packet(const RTPHeader& header, time_ms_(time_ms), valid_header_(true) {} -Packet::Packet(uint8_t* packet_memory, size_t allocated_bytes, double time_ms) - : Packet(packet_memory, - allocated_bytes, - allocated_bytes, - time_ms, - RtpUtility::RtpHeaderParser(packet_memory, allocated_bytes)) {} - -Packet::Packet(uint8_t* packet_memory, - size_t allocated_bytes, - size_t virtual_packet_length_bytes, - double time_ms) - : Packet(packet_memory, - allocated_bytes, - virtual_packet_length_bytes, - time_ms, - RtpUtility::RtpHeaderParser(packet_memory, allocated_bytes)) {} - Packet::~Packet() = default; bool Packet::ExtractRedHeaders(std::list* headers) const { @@ -77,9 +52,8 @@ bool Packet::ExtractRedHeaders(std::list* headers) const { // +-+-+-+-+-+-+-+-+ // - RTC_DCHECK(payload_); - const uint8_t* payload_ptr = payload_; - const uint8_t* payload_end_ptr = payload_ptr + payload_length_bytes_; + const uint8_t* payload_ptr = payload(); + const uint8_t* payload_end_ptr = payload_ptr + payload_length_bytes(); // Find all RED headers with the extension bit set to 1. That is, all headers // but the last one. @@ -111,27 +85,43 @@ void Packet::DeleteRedHeaders(std::list* headers) { } } -bool Packet::ParseHeader(const RtpHeaderParser& parser, - const RtpHeaderExtensionMap* extension_map) { - bool valid_header = parser.Parse(&header_, extension_map); - - // Special case for dummy packets that have padding marked in the RTP header. - // This causes the RTP header parser to report failure, but is fine in this - // context. - const bool header_only_with_padding = - (header_.headerLength == packet_length_bytes_ && - header_.paddingLength > 0); - if (!valid_header && !header_only_with_padding) { - return false; +bool Packet::ParseHeader(const RtpHeaderExtensionMap* extension_map) { + // Use RtpPacketReceived instead of RtpPacket because former already has a + // converter into legacy RTPHeader. + webrtc::RtpPacketReceived rtp_packet(extension_map); + + // Because of the special case of dummy packets that have padding marked in + // the RTP header, but do not have rtp payload with the padding size, handle + // padding manually. Regular RTP packet parser reports failure, but it is fine + // in this context. + bool padding = (packet_[0] & 0b0010'0000); + size_t padding_size = 0; + if (padding) { + // Clear the padding bit to prevent failure when rtp payload is omited. + rtc::CopyOnWriteBuffer packet(packet_); + packet.MutableData()[0] &= ~0b0010'0000; + if (!rtp_packet.Parse(std::move(packet))) { + return false; + } + if (rtp_packet.payload_size() > 0) { + padding_size = rtp_packet.data()[rtp_packet.size() - 1]; + } + if (padding_size > rtp_packet.payload_size()) { + return false; + } + } else { + if (!rtp_packet.Parse(packet_)) { + return false; + } } - RTC_DCHECK_LE(header_.headerLength, packet_length_bytes_); - payload_ = &payload_memory_[header_.headerLength]; - RTC_DCHECK_GE(packet_length_bytes_, header_.headerLength); - payload_length_bytes_ = packet_length_bytes_ - header_.headerLength; - RTC_CHECK_GE(virtual_packet_length_bytes_, packet_length_bytes_); - RTC_DCHECK_GE(virtual_packet_length_bytes_, header_.headerLength); + rtp_payload_ = rtc::MakeArrayView(packet_.data() + rtp_packet.headers_size(), + rtp_packet.payload_size() - padding_size); + rtp_packet.GetHeader(&header_); + + RTC_CHECK_GE(virtual_packet_length_bytes_, rtp_packet.size()); + RTC_DCHECK_GE(virtual_packet_length_bytes_, rtp_packet.headers_size()); virtual_payload_length_bytes_ = - virtual_packet_length_bytes_ - header_.headerLength; + virtual_packet_length_bytes_ - rtp_packet.headers_size(); return true; } diff --git a/modules/audio_coding/neteq/tools/packet.h b/modules/audio_coding/neteq/tools/packet.h index f4189aae10..ef118d9f0b 100644 --- a/modules/audio_coding/neteq/tools/packet.h +++ b/modules/audio_coding/neteq/tools/packet.h @@ -12,62 +12,46 @@ #define MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_H_ #include -#include -#include "api/rtp_headers.h" // NOLINT(build/include) +#include "api/array_view.h" +#include "api/rtp_headers.h" #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" #include "rtc_base/constructor_magic.h" +#include "rtc_base/copy_on_write_buffer.h" namespace webrtc { - -namespace RtpUtility { -class RtpHeaderParser; -} // namespace RtpUtility - namespace test { // Class for handling RTP packets in test applications. class Packet { public: // Creates a packet, with the packet payload (including header bytes) in - // |packet_memory|. The length of |packet_memory| is |allocated_bytes|. - // The new object assumes ownership of |packet_memory| and will delete it - // when the Packet object is deleted. The |time_ms| is an extra time - // associated with this packet, typically used to denote arrival time. - // The first bytes in |packet_memory| will be parsed using |parser|. - // |virtual_packet_length_bytes| is typically used when reading RTP dump files + // `packet`. The `time_ms` is an extra time associated with this packet, + // typically used to denote arrival time. + // `virtual_packet_length_bytes` is typically used when reading RTP dump files // that only contain the RTP headers, and no payload (a.k.a RTP dummy files or - // RTP light). The |virtual_packet_length_bytes| tells what size the packet - // had on wire, including the now discarded payload, whereas |allocated_bytes| - // is the length of the remaining payload (typically only the RTP header). - Packet(uint8_t* packet_memory, - size_t allocated_bytes, + // RTP light). The `virtual_packet_length_bytes` tells what size the packet + // had on wire, including the now discarded payload. + Packet(rtc::CopyOnWriteBuffer packet, size_t virtual_packet_length_bytes, double time_ms, - const RtpUtility::RtpHeaderParser& parser, const RtpHeaderExtensionMap* extension_map = nullptr); + Packet(rtc::CopyOnWriteBuffer packet, + double time_ms, + const RtpHeaderExtensionMap* extension_map = nullptr) + : Packet(packet, packet.size(), time_ms, extension_map) {} + // Same as above, but creates the packet from an already parsed RTPHeader. // This is typically used when reading RTP dump files that only contain the - // RTP headers, and no payload. The |virtual_packet_length_bytes| tells what + // RTP headers, and no payload. The `virtual_packet_length_bytes` tells what // size the packet had on wire, including the now discarded payload, - // The |virtual_payload_length_bytes| tells the size of the payload. + // The `virtual_payload_length_bytes` tells the size of the payload. Packet(const RTPHeader& header, size_t virtual_packet_length_bytes, size_t virtual_payload_length_bytes, double time_ms); - // The following constructors are the same as the first two, but without a - // parser. Note that when the object is constructed using any of these - // methods, the header will be parsed using a default RtpHeaderParser object. - // In particular, RTP header extensions won't be parsed. - Packet(uint8_t* packet_memory, size_t allocated_bytes, double time_ms); - - Packet(uint8_t* packet_memory, - size_t allocated_bytes, - size_t virtual_packet_length_bytes, - double time_ms); - virtual ~Packet(); // Parses the first bytes of the RTP payload, interpreting them as RED headers @@ -80,11 +64,11 @@ class Packet { // itself. static void DeleteRedHeaders(std::list* headers); - const uint8_t* payload() const { return payload_; } + const uint8_t* payload() const { return rtp_payload_.data(); } - size_t packet_length_bytes() const { return packet_length_bytes_; } + size_t packet_length_bytes() const { return packet_.size(); } - size_t payload_length_bytes() const { return payload_length_bytes_; } + size_t payload_length_bytes() const { return rtp_payload_.size(); } size_t virtual_packet_length_bytes() const { return virtual_packet_length_bytes_; @@ -100,21 +84,17 @@ class Packet { bool valid_header() const { return valid_header_; } private: - bool ParseHeader(const webrtc::RtpUtility::RtpHeaderParser& parser, - const RtpHeaderExtensionMap* extension_map); + bool ParseHeader(const RtpHeaderExtensionMap* extension_map); void CopyToHeader(RTPHeader* destination) const; RTPHeader header_; - const std::unique_ptr payload_memory_; - const uint8_t* payload_ = nullptr; // First byte after header. - const size_t packet_length_bytes_ = 0; // Total length of packet. - size_t payload_length_bytes_ = 0; // Length of the payload, after RTP header. - // Zero for dummy RTP packets. + const rtc::CopyOnWriteBuffer packet_; + rtc::ArrayView rtp_payload_; // Empty for dummy RTP packets. // Virtual lengths are used when parsing RTP header files (dummy RTP files). const size_t virtual_packet_length_bytes_; size_t virtual_payload_length_bytes_ = 0; const double time_ms_; // Used to denote a packet's arrival time. - const bool valid_header_; // Set by the RtpHeaderParser. + const bool valid_header_; RTC_DISALLOW_COPY_AND_ASSIGN(Packet); }; diff --git a/modules/audio_coding/neteq/tools/packet_unittest.cc b/modules/audio_coding/neteq/tools/packet_unittest.cc index 7f3d6630c3..7cc9a48ee6 100644 --- a/modules/audio_coding/neteq/tools/packet_unittest.cc +++ b/modules/audio_coding/neteq/tools/packet_unittest.cc @@ -42,16 +42,15 @@ void MakeRtpHeader(int payload_type, TEST(TestPacket, RegularPacket) { const size_t kPacketLengthBytes = 100; - uint8_t* packet_memory = new uint8_t[kPacketLengthBytes]; + rtc::CopyOnWriteBuffer packet_memory(kPacketLengthBytes); const uint8_t kPayloadType = 17; const uint16_t kSequenceNumber = 4711; const uint32_t kTimestamp = 47114711; const uint32_t kSsrc = 0x12345678; MakeRtpHeader(kPayloadType, kSequenceNumber, kTimestamp, kSsrc, - packet_memory); + packet_memory.MutableData()); const double kPacketTime = 1.0; - // Hand over ownership of |packet_memory| to |packet|. - Packet packet(packet_memory, kPacketLengthBytes, kPacketTime); + Packet packet(std::move(packet_memory), kPacketTime); ASSERT_TRUE(packet.valid_header()); EXPECT_EQ(kPayloadType, packet.header().payloadType); EXPECT_EQ(kSequenceNumber, packet.header().sequenceNumber); @@ -70,16 +69,44 @@ TEST(TestPacket, RegularPacket) { TEST(TestPacket, DummyPacket) { const size_t kPacketLengthBytes = kHeaderLengthBytes; // Only RTP header. const size_t kVirtualPacketLengthBytes = 100; - uint8_t* packet_memory = new uint8_t[kPacketLengthBytes]; + rtc::CopyOnWriteBuffer packet_memory(kPacketLengthBytes); const uint8_t kPayloadType = 17; const uint16_t kSequenceNumber = 4711; const uint32_t kTimestamp = 47114711; const uint32_t kSsrc = 0x12345678; MakeRtpHeader(kPayloadType, kSequenceNumber, kTimestamp, kSsrc, - packet_memory); + packet_memory.MutableData()); const double kPacketTime = 1.0; - // Hand over ownership of |packet_memory| to |packet|. - Packet packet(packet_memory, kPacketLengthBytes, kVirtualPacketLengthBytes, + Packet packet(std::move(packet_memory), kVirtualPacketLengthBytes, + kPacketTime); + ASSERT_TRUE(packet.valid_header()); + EXPECT_EQ(kPayloadType, packet.header().payloadType); + EXPECT_EQ(kSequenceNumber, packet.header().sequenceNumber); + EXPECT_EQ(kTimestamp, packet.header().timestamp); + EXPECT_EQ(kSsrc, packet.header().ssrc); + EXPECT_EQ(0, packet.header().numCSRCs); + EXPECT_EQ(kPacketLengthBytes, packet.packet_length_bytes()); + EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes, + packet.payload_length_bytes()); + EXPECT_EQ(kVirtualPacketLengthBytes, packet.virtual_packet_length_bytes()); + EXPECT_EQ(kVirtualPacketLengthBytes - kHeaderLengthBytes, + packet.virtual_payload_length_bytes()); + EXPECT_EQ(kPacketTime, packet.time_ms()); +} + +TEST(TestPacket, DummyPaddingPacket) { + const size_t kPacketLengthBytes = kHeaderLengthBytes; // Only RTP header. + const size_t kVirtualPacketLengthBytes = 100; + rtc::CopyOnWriteBuffer packet_memory(kPacketLengthBytes); + const uint8_t kPayloadType = 17; + const uint16_t kSequenceNumber = 4711; + const uint32_t kTimestamp = 47114711; + const uint32_t kSsrc = 0x12345678; + MakeRtpHeader(kPayloadType, kSequenceNumber, kTimestamp, kSsrc, + packet_memory.MutableData()); + packet_memory.MutableData()[0] |= 0b0010'0000; // Set the padding bit. + const double kPacketTime = 1.0; + Packet packet(std::move(packet_memory), kVirtualPacketLengthBytes, kPacketTime); ASSERT_TRUE(packet.valid_header()); EXPECT_EQ(kPayloadType, packet.header().payloadType); @@ -133,19 +160,19 @@ int MakeRedHeader(int payload_type, TEST(TestPacket, RED) { const size_t kPacketLengthBytes = 100; - uint8_t* packet_memory = new uint8_t[kPacketLengthBytes]; + rtc::CopyOnWriteBuffer packet_memory(kPacketLengthBytes); const uint8_t kRedPayloadType = 17; const uint16_t kSequenceNumber = 4711; const uint32_t kTimestamp = 47114711; const uint32_t kSsrc = 0x12345678; MakeRtpHeader(kRedPayloadType, kSequenceNumber, kTimestamp, kSsrc, - packet_memory); + packet_memory.MutableData()); // Create four RED headers. // Payload types are just the same as the block index the offset is 100 times // the block index. const int kRedBlocks = 4; - uint8_t* payload_ptr = - &packet_memory[kHeaderLengthBytes]; // First byte after header. + uint8_t* payload_ptr = packet_memory.MutableData() + + kHeaderLengthBytes; // First byte after header. for (int i = 0; i < kRedBlocks; ++i) { int payload_type = i; // Offset value is not used for the last block. diff --git a/modules/audio_coding/neteq/tools/rtp_analyze.cc b/modules/audio_coding/neteq/tools/rtp_analyze.cc index dad3750940..46fc2d744e 100644 --- a/modules/audio_coding/neteq/tools/rtp_analyze.cc +++ b/modules/audio_coding/neteq/tools/rtp_analyze.cc @@ -56,7 +56,7 @@ int main(int argc, char* argv[]) { printf("Input file: %s\n", args[1]); std::unique_ptr file_source( webrtc::test::RtpFileSource::Create(args[1])); - assert(file_source.get()); + RTC_DCHECK(file_source.get()); // Set RTP extension IDs. bool print_audio_level = false; if (absl::GetFlag(FLAGS_audio_level) != -1) { @@ -151,7 +151,7 @@ int main(int argc, char* argv[]) { packet->ExtractRedHeaders(&red_headers); while (!red_headers.empty()) { webrtc::RTPHeader* red = red_headers.front(); - assert(red); + RTC_DCHECK(red); fprintf(out_file, "* %5u %10u %10u %5i\n", red->sequenceNumber, red->timestamp, static_cast(packet->time_ms()), red->payloadType); diff --git a/modules/audio_coding/neteq/tools/rtp_file_source.cc b/modules/audio_coding/neteq/tools/rtp_file_source.cc index 78523308e3..16b225e5df 100644 --- a/modules/audio_coding/neteq/tools/rtp_file_source.cc +++ b/modules/audio_coding/neteq/tools/rtp_file_source.cc @@ -62,12 +62,9 @@ std::unique_ptr RtpFileSource::NextPacket() { // Read the next one. continue; } - std::unique_ptr packet_memory(new uint8_t[temp_packet.length]); - memcpy(packet_memory.get(), temp_packet.data, temp_packet.length); - RtpUtility::RtpHeaderParser parser(packet_memory.get(), temp_packet.length); auto packet = std::make_unique( - packet_memory.release(), temp_packet.length, - temp_packet.original_length, temp_packet.time_ms, parser, + rtc::CopyOnWriteBuffer(temp_packet.data, temp_packet.length), + temp_packet.original_length, temp_packet.time_ms, &rtp_header_extension_map_); if (!packet->valid_header()) { continue; diff --git a/modules/audio_coding/neteq/tools/rtp_generator.cc b/modules/audio_coding/neteq/tools/rtp_generator.cc index accd1635b5..a37edef20a 100644 --- a/modules/audio_coding/neteq/tools/rtp_generator.cc +++ b/modules/audio_coding/neteq/tools/rtp_generator.cc @@ -18,7 +18,7 @@ namespace test { uint32_t RtpGenerator::GetRtpHeader(uint8_t payload_type, size_t payload_length_samples, RTPHeader* rtp_header) { - assert(rtp_header); + RTC_DCHECK(rtp_header); if (!rtp_header) { return 0; } @@ -31,7 +31,7 @@ uint32_t RtpGenerator::GetRtpHeader(uint8_t payload_type, rtp_header->numCSRCs = 0; uint32_t this_send_time = next_send_time_ms_; - assert(samples_per_ms_ > 0); + RTC_DCHECK_GT(samples_per_ms_, 0); next_send_time_ms_ += ((1.0 + drift_factor_) * payload_length_samples) / samples_per_ms_; return this_send_time; diff --git a/modules/audio_coding/test/Channel.cc b/modules/audio_coding/test/Channel.cc index 9456145d8c..d7bd6a968b 100644 --- a/modules/audio_coding/test/Channel.cc +++ b/modules/audio_coding/test/Channel.cc @@ -125,7 +125,7 @@ void Channel::CalcStatistics(const RTPHeader& rtp_header, size_t payloadSize) { (uint32_t)((uint32_t)rtp_header.timestamp - (uint32_t)currentPayloadStr->lastTimestamp); } - assert(_lastFrameSizeSample > 0); + RTC_DCHECK_GT(_lastFrameSizeSample, 0); int k = 0; for (; k < MAX_NUM_FRAMESIZES; ++k) { if ((currentPayloadStr->frameSizeStats[k].frameSizeSample == diff --git a/modules/audio_device/linux/audio_device_alsa_linux.cc b/modules/audio_device/linux/audio_device_alsa_linux.cc index 9e6bd168fc..60e01e1239 100644 --- a/modules/audio_device/linux/audio_device_alsa_linux.cc +++ b/modules/audio_device/linux/audio_device_alsa_linux.cc @@ -1490,7 +1490,7 @@ bool AudioDeviceLinuxALSA::PlayThreadProcess() { Lock(); _playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer); - assert(_playoutFramesLeft == _playoutFramesIn10MS); + RTC_DCHECK_EQ(_playoutFramesLeft, _playoutFramesIn10MS); } if (static_cast(avail_frames) > _playoutFramesLeft) @@ -1509,7 +1509,7 @@ bool AudioDeviceLinuxALSA::PlayThreadProcess() { UnLock(); return true; } else { - assert(frames == avail_frames); + RTC_DCHECK_EQ(frames, avail_frames); _playoutFramesLeft -= frames; } @@ -1559,7 +1559,7 @@ bool AudioDeviceLinuxALSA::RecThreadProcess() { UnLock(); return true; } else if (frames > 0) { - assert(frames == avail_frames); + RTC_DCHECK_EQ(frames, avail_frames); int left_size = LATE(snd_pcm_frames_to_bytes)(_handleRecord, _recordingFramesLeft); diff --git a/modules/audio_device/linux/latebindingsymboltable_linux.h b/modules/audio_device/linux/latebindingsymboltable_linux.h index edb62aede8..6cfb659749 100644 --- a/modules/audio_device/linux/latebindingsymboltable_linux.h +++ b/modules/audio_device/linux/latebindingsymboltable_linux.h @@ -11,10 +11,10 @@ #ifndef AUDIO_DEVICE_LATEBINDINGSYMBOLTABLE_LINUX_H_ #define AUDIO_DEVICE_LATEBINDINGSYMBOLTABLE_LINUX_H_ -#include #include // for NULL #include +#include "rtc_base/checks.h" #include "rtc_base/constructor_magic.h" // This file provides macros for creating "symbol table" classes to simplify the @@ -59,7 +59,7 @@ class LateBindingSymbolTable { // We do not use this, but we offer it for theoretical convenience. static const char* GetSymbolName(int index) { - assert(index < NumSymbols()); + RTC_DCHECK_LT(index, NumSymbols()); return kSymbolNames[index]; } @@ -100,8 +100,8 @@ class LateBindingSymbolTable { // Retrieves the given symbol. NOTE: Recommended to use LATESYM_GET below // instead of this. void* GetSymbol(int index) const { - assert(IsLoaded()); - assert(index < NumSymbols()); + RTC_DCHECK(IsLoaded()); + RTC_DCHECK_LT(index, NumSymbols()); return symbols_[index]; } diff --git a/modules/audio_device/mac/audio_mixer_manager_mac.cc b/modules/audio_device/mac/audio_mixer_manager_mac.cc index fe963746ac..942e7db3b3 100644 --- a/modules/audio_device/mac/audio_mixer_manager_mac.cc +++ b/modules/audio_device/mac/audio_mixer_manager_mac.cc @@ -225,7 +225,7 @@ int32_t AudioMixerManagerMac::SetSpeakerVolume(uint32_t volume) { // volume range is 0.0 - 1.0, convert from 0 -255 const Float32 vol = (Float32)(volume / 255.0); - assert(vol <= 1.0 && vol >= 0.0); + RTC_DCHECK(vol <= 1.0 && vol >= 0.0); // Does the capture device have a master volume control? // If so, use it exclusively. @@ -311,7 +311,7 @@ int32_t AudioMixerManagerMac::SpeakerVolume(uint32_t& volume) const { return -1; } - assert(channels > 0); + RTC_DCHECK_GT(channels, 0); // vol 0.0 to 1.0 -> convert to 0 - 255 volume = static_cast(255 * vol / channels + 0.5); } @@ -522,7 +522,7 @@ int32_t AudioMixerManagerMac::SpeakerMute(bool& enabled) const { return -1; } - assert(channels > 0); + RTC_DCHECK_GT(channels, 0); // 1 means muted enabled = static_cast(muted); } @@ -690,7 +690,7 @@ int32_t AudioMixerManagerMac::MicrophoneMute(bool& enabled) const { return -1; } - assert(channels > 0); + RTC_DCHECK_GT(channels, 0); // 1 means muted enabled = static_cast(muted); } @@ -757,7 +757,7 @@ int32_t AudioMixerManagerMac::SetMicrophoneVolume(uint32_t volume) { // volume range is 0.0 - 1.0, convert from 0 - 255 const Float32 vol = (Float32)(volume / 255.0); - assert(vol <= 1.0 && vol >= 0.0); + RTC_DCHECK(vol <= 1.0 && vol >= 0.0); // Does the capture device have a master volume control? // If so, use it exclusively. @@ -843,7 +843,7 @@ int32_t AudioMixerManagerMac::MicrophoneVolume(uint32_t& volume) const { return -1; } - assert(channels > 0); + RTC_DCHECK_GT(channels, 0); // vol 0.0 to 1.0 -> convert to 0 - 255 volume = static_cast(255 * volFloat32 / channels + 0.5); } diff --git a/modules/audio_device/win/audio_device_core_win.cc b/modules/audio_device/win/audio_device_core_win.cc index 328fefa027..a3723edb56 100644 --- a/modules/audio_device/win/audio_device_core_win.cc +++ b/modules/audio_device/win/audio_device_core_win.cc @@ -281,7 +281,7 @@ bool AudioDeviceWindowsCore::CoreAudioIsSupported() { DWORD messageLength = ::FormatMessageW(dwFlags, 0, hr, dwLangID, errorText, MAXERRORLENGTH, NULL); - assert(messageLength <= MAXERRORLENGTH); + RTC_DCHECK_LE(messageLength, MAXERRORLENGTH); // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.). for (; messageLength && ::isspace(errorText[messageLength - 1]); @@ -469,7 +469,7 @@ AudioDeviceWindowsCore::AudioDeviceWindowsCore() CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL, __uuidof(IMMDeviceEnumerator), reinterpret_cast(&_ptrEnumerator)); - assert(NULL != _ptrEnumerator); + RTC_DCHECK(_ptrEnumerator); // DMO initialization for built-in WASAPI AEC. { @@ -1411,7 +1411,7 @@ int32_t AudioDeviceWindowsCore::SetPlayoutDevice(uint16_t index) { HRESULT hr(S_OK); - assert(_ptrRenderCollection != NULL); + RTC_DCHECK(_ptrRenderCollection); // Select an endpoint rendering device given the specified index SAFE_RELEASE(_ptrDeviceOut); @@ -1461,7 +1461,7 @@ int32_t AudioDeviceWindowsCore::SetPlayoutDevice( HRESULT hr(S_OK); - assert(_ptrEnumerator != NULL); + RTC_DCHECK(_ptrEnumerator); // Select an endpoint rendering device given the specified role SAFE_RELEASE(_ptrDeviceOut); @@ -1677,7 +1677,7 @@ int32_t AudioDeviceWindowsCore::SetRecordingDevice(uint16_t index) { HRESULT hr(S_OK); - assert(_ptrCaptureCollection != NULL); + RTC_DCHECK(_ptrCaptureCollection); // Select an endpoint capture device given the specified index SAFE_RELEASE(_ptrDeviceIn); @@ -1727,7 +1727,7 @@ int32_t AudioDeviceWindowsCore::SetRecordingDevice( HRESULT hr(S_OK); - assert(_ptrEnumerator != NULL); + RTC_DCHECK(_ptrEnumerator); // Select an endpoint capture device given the specified role SAFE_RELEASE(_ptrDeviceIn); @@ -2036,8 +2036,8 @@ int32_t AudioDeviceWindowsCore::InitPlayout() { // handles device initialization itself. // Reference: http://msdn.microsoft.com/en-us/library/ff819492(v=vs.85).aspx int32_t AudioDeviceWindowsCore::InitRecordingDMO() { - assert(_builtInAecEnabled); - assert(_dmo != NULL); + RTC_DCHECK(_builtInAecEnabled); + RTC_DCHECK(_dmo); if (SetDMOProperties() == -1) { return -1; @@ -2356,7 +2356,7 @@ int32_t AudioDeviceWindowsCore::StartRecording() { } } - assert(_hRecThread == NULL); + RTC_DCHECK(_hRecThread); _hRecThread = CreateThread(NULL, 0, lpStartAddress, this, 0, NULL); if (_hRecThread == NULL) { RTC_LOG(LS_ERROR) << "failed to create the recording thread"; @@ -2421,8 +2421,8 @@ int32_t AudioDeviceWindowsCore::StopRecording() { ResetEvent(_hShutdownCaptureEvent); // Must be manually reset. // Ensure that the thread has released these interfaces properly. - assert(err == -1 || _ptrClientIn == NULL); - assert(err == -1 || _ptrCaptureClient == NULL); + RTC_DCHECK(err == -1 || _ptrClientIn == NULL); + RTC_DCHECK(err == -1 || _ptrCaptureClient == NULL); _recIsInitialized = false; _recording = false; @@ -2433,7 +2433,7 @@ int32_t AudioDeviceWindowsCore::StopRecording() { _hRecThread = NULL; if (_builtInAecEnabled) { - assert(_dmo != NULL); + RTC_DCHECK(_dmo); // This is necessary. Otherwise the DMO can generate garbage render // audio even after rendering has stopped. HRESULT hr = _dmo->FreeStreamingResources(); @@ -2493,7 +2493,7 @@ int32_t AudioDeviceWindowsCore::StartPlayout() { MutexLock lockScoped(&mutex_); // Create thread which will drive the rendering. - assert(_hPlayThread == NULL); + RTC_DCHECK(_hPlayThread); _hPlayThread = CreateThread(NULL, 0, WSAPIRenderThread, this, 0, NULL); if (_hPlayThread == NULL) { RTC_LOG(LS_ERROR) << "failed to create the playout thread"; @@ -2954,7 +2954,7 @@ void AudioDeviceWindowsCore::RevertCaptureThreadPriority() { } DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO() { - assert(_mediaBuffer != NULL); + RTC_DCHECK(_mediaBuffer); bool keepRecording = true; // Initialize COM as MTA in this thread. @@ -3010,7 +3010,7 @@ DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO() { if (FAILED(hr)) { _TraceCOMError(hr); keepRecording = false; - assert(false); + RTC_NOTREACHED(); break; } @@ -3022,7 +3022,7 @@ DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO() { if (FAILED(hr)) { _TraceCOMError(hr); keepRecording = false; - assert(false); + RTC_NOTREACHED(); break; } @@ -3031,8 +3031,8 @@ DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO() { // TODO(andrew): verify that this is always satisfied. It might // be that ProcessOutput will try to return more than 10 ms if // we fail to call it frequently enough. - assert(kSamplesProduced == static_cast(_recBlockSize)); - assert(sizeof(BYTE) == sizeof(int8_t)); + RTC_DCHECK_EQ(kSamplesProduced, static_cast(_recBlockSize)); + RTC_DCHECK_EQ(sizeof(BYTE), sizeof(int8_t)); _ptrAudioBuffer->SetRecordedBuffer(reinterpret_cast(data), kSamplesProduced); _ptrAudioBuffer->SetVQEData(0, 0); @@ -3047,7 +3047,7 @@ DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO() { if (FAILED(hr)) { _TraceCOMError(hr); keepRecording = false; - assert(false); + RTC_NOTREACHED(); break; } @@ -3228,7 +3228,7 @@ DWORD AudioDeviceWindowsCore::DoCaptureThread() { pData = NULL; } - assert(framesAvailable != 0); + RTC_DCHECK_NE(framesAvailable, 0); if (pData) { CopyMemory(&syncBuffer[syncBufIndex * _recAudioFrameSize], pData, @@ -3237,8 +3237,8 @@ DWORD AudioDeviceWindowsCore::DoCaptureThread() { ZeroMemory(&syncBuffer[syncBufIndex * _recAudioFrameSize], framesAvailable * _recAudioFrameSize); } - assert(syncBufferSize >= (syncBufIndex * _recAudioFrameSize) + - framesAvailable * _recAudioFrameSize); + RTC_DCHECK_GE(syncBufferSize, (syncBufIndex * _recAudioFrameSize) + + framesAvailable * _recAudioFrameSize); // Release the capture buffer // @@ -3377,7 +3377,7 @@ void AudioDeviceWindowsCore::_UnLock() RTC_NO_THREAD_SAFETY_ANALYSIS { int AudioDeviceWindowsCore::SetDMOProperties() { HRESULT hr = S_OK; - assert(_dmo != NULL); + RTC_DCHECK(_dmo); rtc::scoped_refptr ps; { @@ -3517,8 +3517,8 @@ int32_t AudioDeviceWindowsCore::_RefreshDeviceList(EDataFlow dir) { HRESULT hr = S_OK; IMMDeviceCollection* pCollection = NULL; - assert(dir == eRender || dir == eCapture); - assert(_ptrEnumerator != NULL); + RTC_DCHECK(dir == eRender || dir == eCapture); + RTC_DCHECK(_ptrEnumerator); // Create a fresh list of devices using the specified direction hr = _ptrEnumerator->EnumAudioEndpoints(dir, DEVICE_STATE_ACTIVE, @@ -3553,7 +3553,7 @@ int16_t AudioDeviceWindowsCore::_DeviceListCount(EDataFlow dir) { HRESULT hr = S_OK; UINT count = 0; - assert(eRender == dir || eCapture == dir); + RTC_DCHECK(eRender == dir || eCapture == dir); if (eRender == dir && NULL != _ptrRenderCollection) { hr = _ptrRenderCollection->GetCount(&count); @@ -3589,7 +3589,7 @@ int32_t AudioDeviceWindowsCore::_GetListDeviceName(EDataFlow dir, HRESULT hr = S_OK; IMMDevice* pDevice = NULL; - assert(dir == eRender || dir == eCapture); + RTC_DCHECK(dir == eRender || dir == eCapture); if (eRender == dir && NULL != _ptrRenderCollection) { hr = _ptrRenderCollection->Item(index, &pDevice); @@ -3626,9 +3626,9 @@ int32_t AudioDeviceWindowsCore::_GetDefaultDeviceName(EDataFlow dir, HRESULT hr = S_OK; IMMDevice* pDevice = NULL; - assert(dir == eRender || dir == eCapture); - assert(role == eConsole || role == eCommunications); - assert(_ptrEnumerator != NULL); + RTC_DCHECK(dir == eRender || dir == eCapture); + RTC_DCHECK(role == eConsole || role == eCommunications); + RTC_DCHECK(_ptrEnumerator); hr = _ptrEnumerator->GetDefaultAudioEndpoint(dir, role, &pDevice); @@ -3663,7 +3663,7 @@ int32_t AudioDeviceWindowsCore::_GetListDeviceID(EDataFlow dir, HRESULT hr = S_OK; IMMDevice* pDevice = NULL; - assert(dir == eRender || dir == eCapture); + RTC_DCHECK(dir == eRender || dir == eCapture); if (eRender == dir && NULL != _ptrRenderCollection) { hr = _ptrRenderCollection->Item(index, &pDevice); @@ -3700,9 +3700,9 @@ int32_t AudioDeviceWindowsCore::_GetDefaultDeviceID(EDataFlow dir, HRESULT hr = S_OK; IMMDevice* pDevice = NULL; - assert(dir == eRender || dir == eCapture); - assert(role == eConsole || role == eCommunications); - assert(_ptrEnumerator != NULL); + RTC_DCHECK(dir == eRender || dir == eCapture); + RTC_DCHECK(role == eConsole || role == eCommunications); + RTC_DCHECK(_ptrEnumerator); hr = _ptrEnumerator->GetDefaultAudioEndpoint(dir, role, &pDevice); @@ -3727,8 +3727,8 @@ int32_t AudioDeviceWindowsCore::_GetDefaultDeviceIndex(EDataFlow dir, WCHAR szDeviceID[MAX_PATH] = {0}; const size_t kDeviceIDLength = sizeof(szDeviceID) / sizeof(szDeviceID[0]); - assert(kDeviceIDLength == - sizeof(szDefaultDeviceID) / sizeof(szDefaultDeviceID[0])); + RTC_DCHECK_EQ(kDeviceIDLength, + sizeof(szDefaultDeviceID) / sizeof(szDefaultDeviceID[0])); if (_GetDefaultDeviceID(dir, role, szDefaultDeviceID, kDeviceIDLength) == -1) { @@ -3801,8 +3801,8 @@ int32_t AudioDeviceWindowsCore::_GetDeviceName(IMMDevice* pDevice, IPropertyStore* pProps = NULL; PROPVARIANT varName; - assert(pszBuffer != NULL); - assert(bufferLen > 0); + RTC_DCHECK(pszBuffer); + RTC_DCHECK_GT(bufferLen, 0); if (pDevice != NULL) { hr = pDevice->OpenPropertyStore(STGM_READ, &pProps); @@ -3867,8 +3867,8 @@ int32_t AudioDeviceWindowsCore::_GetDeviceID(IMMDevice* pDevice, HRESULT hr = E_FAIL; LPWSTR pwszID = NULL; - assert(pszBuffer != NULL); - assert(bufferLen > 0); + RTC_DCHECK(pszBuffer); + RTC_DCHECK_GT(bufferLen, 0); if (pDevice != NULL) { hr = pDevice->GetId(&pwszID); @@ -3897,7 +3897,7 @@ int32_t AudioDeviceWindowsCore::_GetDefaultDevice(EDataFlow dir, HRESULT hr(S_OK); - assert(_ptrEnumerator != NULL); + RTC_DCHECK(_ptrEnumerator); hr = _ptrEnumerator->GetDefaultAudioEndpoint(dir, role, ppDevice); if (FAILED(hr)) { @@ -3917,7 +3917,7 @@ int32_t AudioDeviceWindowsCore::_GetListDevice(EDataFlow dir, IMMDevice** ppDevice) { HRESULT hr(S_OK); - assert(_ptrEnumerator != NULL); + RTC_DCHECK(_ptrEnumerator); IMMDeviceCollection* pCollection = NULL; @@ -3951,7 +3951,7 @@ int32_t AudioDeviceWindowsCore::_EnumerateEndpointDevicesAll( EDataFlow dataFlow) const { RTC_DLOG(LS_VERBOSE) << __FUNCTION__; - assert(_ptrEnumerator != NULL); + RTC_DCHECK(_ptrEnumerator); HRESULT hr = S_OK; IMMDeviceCollection* pCollection = NULL; @@ -4143,7 +4143,7 @@ void AudioDeviceWindowsCore::_TraceCOMError(HRESULT hr) const { DWORD messageLength = ::FormatMessageW(dwFlags, 0, hr, dwLangID, errorText, MAXERRORLENGTH, NULL); - assert(messageLength <= MAXERRORLENGTH); + RTC_DCHECK_LE(messageLength, MAXERRORLENGTH); // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.). for (; messageLength && ::isspace(errorText[messageLength - 1]); diff --git a/modules/audio_processing/aec3/aec_state.h b/modules/audio_processing/aec3/aec_state.h index 125ae83a2b..e2f70a4c68 100644 --- a/modules/audio_processing/aec3/aec_state.h +++ b/modules/audio_processing/aec3/aec_state.h @@ -75,6 +75,12 @@ class AecState { return erle_estimator_.Erle(onset_compensated); } + // Returns the non-capped ERLE. + rtc::ArrayView> ErleUnbounded() + const { + return erle_estimator_.ErleUnbounded(); + } + // Returns the fullband ERLE estimate in log2 units. float FullBandErleLog2() const { return erle_estimator_.FullbandErleLog2(); } diff --git a/modules/audio_processing/aec3/echo_canceller3.cc b/modules/audio_processing/aec3/echo_canceller3.cc index 4e4632ddaf..181b649f6d 100644 --- a/modules/audio_processing/aec3/echo_canceller3.cc +++ b/modules/audio_processing/aec3/echo_canceller3.cc @@ -49,7 +49,11 @@ void RetrieveFieldTrialValue(const char* trial_name, ParseFieldTrial({&field_trial_param}, field_trial_str); float field_trial_value = static_cast(field_trial_param.Get()); - if (field_trial_value >= min && field_trial_value <= max) { + if (field_trial_value >= min && field_trial_value <= max && + field_trial_value != *value_to_update) { + RTC_LOG(LS_INFO) << "Key " << trial_name + << " changing AEC3 parameter value from " + << *value_to_update << " to " << field_trial_value; *value_to_update = field_trial_value; } } @@ -65,7 +69,11 @@ void RetrieveFieldTrialValue(const char* trial_name, ParseFieldTrial({&field_trial_param}, field_trial_str); float field_trial_value = field_trial_param.Get(); - if (field_trial_value >= min && field_trial_value <= max) { + if (field_trial_value >= min && field_trial_value <= max && + field_trial_value != *value_to_update) { + RTC_LOG(LS_INFO) << "Key " << trial_name + << " changing AEC3 parameter value from " + << *value_to_update << " to " << field_trial_value; *value_to_update = field_trial_value; } } @@ -737,6 +745,10 @@ EchoCanceller3::EchoCanceller3(const EchoCanceller3Config& config, std::vector>>( 1, std::vector>(num_capture_channels_)); } + + RTC_LOG(LS_INFO) << "AEC3 created with sample rate: " << sample_rate_hz_ + << " Hz, num render channels: " << num_render_channels_ + << ", num capture channels: " << num_capture_channels_; } EchoCanceller3::~EchoCanceller3() = default; diff --git a/modules/audio_processing/aec3/echo_remover.cc b/modules/audio_processing/aec3/echo_remover.cc index 6c177c9a10..2bfaa951d8 100644 --- a/modules/audio_processing/aec3/echo_remover.cc +++ b/modules/audio_processing/aec3/echo_remover.cc @@ -172,6 +172,7 @@ class EchoRemoverImpl final : public EchoRemover { std::vector> Y2_heap_; std::vector> E2_heap_; std::vector> R2_heap_; + std::vector> R2_unbounded_heap_; std::vector> S2_linear_heap_; std::vector Y_heap_; std::vector E_heap_; @@ -218,6 +219,7 @@ EchoRemoverImpl::EchoRemoverImpl(const EchoCanceller3Config& config, Y2_heap_(NumChannelsOnHeap(num_capture_channels_)), E2_heap_(NumChannelsOnHeap(num_capture_channels_)), R2_heap_(NumChannelsOnHeap(num_capture_channels_)), + R2_unbounded_heap_(NumChannelsOnHeap(num_capture_channels_)), S2_linear_heap_(NumChannelsOnHeap(num_capture_channels_)), Y_heap_(NumChannelsOnHeap(num_capture_channels_)), E_heap_(NumChannelsOnHeap(num_capture_channels_)), @@ -264,6 +266,8 @@ void EchoRemoverImpl::ProcessCapture( E2_stack; std::array, kMaxNumChannelsOnStack> R2_stack; + std::array, kMaxNumChannelsOnStack> + R2_unbounded_stack; std::array, kMaxNumChannelsOnStack> S2_linear_stack; std::array Y_stack; @@ -280,6 +284,8 @@ void EchoRemoverImpl::ProcessCapture( E2_stack.data(), num_capture_channels_); rtc::ArrayView> R2( R2_stack.data(), num_capture_channels_); + rtc::ArrayView> R2_unbounded( + R2_unbounded_stack.data(), num_capture_channels_); rtc::ArrayView> S2_linear( S2_linear_stack.data(), num_capture_channels_); rtc::ArrayView Y(Y_stack.data(), num_capture_channels_); @@ -301,6 +307,8 @@ void EchoRemoverImpl::ProcessCapture( E2_heap_.data(), num_capture_channels_); R2 = rtc::ArrayView>( R2_heap_.data(), num_capture_channels_); + R2_unbounded = rtc::ArrayView>( + R2_unbounded_heap_.data(), num_capture_channels_); S2_linear = rtc::ArrayView>( S2_linear_heap_.data(), num_capture_channels_); Y = rtc::ArrayView(Y_heap_.data(), num_capture_channels_); @@ -406,8 +414,8 @@ void EchoRemoverImpl::ProcessCapture( if (capture_output_used_) { // Estimate the residual echo power. residual_echo_estimator_.Estimate(aec_state_, *render_buffer, S2_linear, Y2, - suppression_gain_.IsDominantNearend(), - R2); + suppression_gain_.IsDominantNearend(), R2, + R2_unbounded); // Suppressor nearend estimate. if (aec_state_.UsableLinearEstimate()) { @@ -430,7 +438,7 @@ void EchoRemoverImpl::ProcessCapture( // Compute preferred gains. float high_bands_gain; - suppression_gain_.GetGain(nearend_spectrum, echo_spectrum, R2, + suppression_gain_.GetGain(nearend_spectrum, echo_spectrum, R2, R2_unbounded, cng_.NoiseSpectrum(), render_signal_analyzer_, aec_state_, x, clock_drift, &high_bands_gain, &G); diff --git a/modules/audio_processing/aec3/erle_estimator.h b/modules/audio_processing/aec3/erle_estimator.h index cae896e82c..55797592a9 100644 --- a/modules/audio_processing/aec3/erle_estimator.h +++ b/modules/audio_processing/aec3/erle_estimator.h @@ -62,6 +62,18 @@ class ErleEstimator { : subband_erle_estimator_.Erle(onset_compensated); } + // Returns the non-capped subband ERLE. + rtc::ArrayView> ErleUnbounded() + const { + // Unbounded ERLE is only used with the subband erle estimator where the + // ERLE is often capped at low values. When the signal dependent ERLE + // estimator is used the capped ERLE is returned. + return !signal_dependent_erle_estimator_ + ? subband_erle_estimator_.ErleUnbounded() + : signal_dependent_erle_estimator_->Erle( + /*onset_compensated=*/false); + } + // Returns the subband ERLE that are estimated during onsets (only used for // testing). rtc::ArrayView> ErleDuringOnsets() diff --git a/modules/audio_processing/aec3/erle_estimator_unittest.cc b/modules/audio_processing/aec3/erle_estimator_unittest.cc index 6df71424bc..e38f2386f7 100644 --- a/modules/audio_processing/aec3/erle_estimator_unittest.cc +++ b/modules/audio_processing/aec3/erle_estimator_unittest.cc @@ -50,6 +50,16 @@ void VerifyErle( EXPECT_NEAR(kTrueErle, erle_time_domain, 0.5); } +void VerifyErleGreaterOrEqual( + rtc::ArrayView> erle1, + rtc::ArrayView> erle2) { + for (size_t ch = 0; ch < erle1.size(); ++ch) { + for (size_t i = 0; i < kFftLengthBy2Plus1; ++i) { + EXPECT_GE(erle1[ch][i], erle2[ch][i]); + } + } +} + void FormFarendTimeFrame(std::vector>>* x) { const std::array frame = { 7459.88, 17209.6, 17383, 20768.9, 16816.7, 18386.3, 4492.83, 9675.85, @@ -156,9 +166,10 @@ TEST_P(ErleEstimatorMultiChannel, VerifyErleIncreaseAndHold) { kNumBands, std::vector>( num_render_channels, std::vector(kBlockSize, 0.f))); std::vector>> - filter_frequency_response( - config.filter.refined.length_blocks, - std::vector>(num_capture_channels)); + filter_frequency_response( + config.filter.refined.length_blocks, + std::vector>( + num_capture_channels)); std::unique_ptr render_delay_buffer( RenderDelayBuffer::Create(config, kSampleRateHz, num_render_channels)); @@ -181,6 +192,10 @@ TEST_P(ErleEstimatorMultiChannel, VerifyErleIncreaseAndHold) { VerifyErle(estimator.Erle(/*onset_compensated=*/true), std::pow(2.f, estimator.FullbandErleLog2()), config.erle.max_l, config.erle.max_h); + VerifyErleGreaterOrEqual(estimator.Erle(/*onset_compensated=*/false), + estimator.Erle(/*onset_compensated=*/true)); + VerifyErleGreaterOrEqual(estimator.ErleUnbounded(), + estimator.Erle(/*onset_compensated=*/false)); FormNearendFrame(&x, &X2, E2, Y2); // Verifies that the ERLE is not immediately decreased during nearend @@ -194,6 +209,10 @@ TEST_P(ErleEstimatorMultiChannel, VerifyErleIncreaseAndHold) { VerifyErle(estimator.Erle(/*onset_compensated=*/true), std::pow(2.f, estimator.FullbandErleLog2()), config.erle.max_l, config.erle.max_h); + VerifyErleGreaterOrEqual(estimator.Erle(/*onset_compensated=*/false), + estimator.Erle(/*onset_compensated=*/true)); + VerifyErleGreaterOrEqual(estimator.ErleUnbounded(), + estimator.Erle(/*onset_compensated=*/false)); } TEST_P(ErleEstimatorMultiChannel, VerifyErleTrackingOnOnsets) { @@ -212,9 +231,10 @@ TEST_P(ErleEstimatorMultiChannel, VerifyErleTrackingOnOnsets) { kNumBands, std::vector>( num_render_channels, std::vector(kBlockSize, 0.f))); std::vector>> - filter_frequency_response( - config.filter.refined.length_blocks, - std::vector>(num_capture_channels)); + filter_frequency_response( + config.filter.refined.length_blocks, + std::vector>( + num_capture_channels)); std::unique_ptr render_delay_buffer( RenderDelayBuffer::Create(config, kSampleRateHz, num_render_channels)); diff --git a/modules/audio_processing/aec3/residual_echo_estimator.cc b/modules/audio_processing/aec3/residual_echo_estimator.cc index 0688429d47..15bebecb5f 100644 --- a/modules/audio_processing/aec3/residual_echo_estimator.cc +++ b/modules/audio_processing/aec3/residual_echo_estimator.cc @@ -177,7 +177,8 @@ void ResidualEchoEstimator::Estimate( rtc::ArrayView> S2_linear, rtc::ArrayView> Y2, bool dominant_nearend, - rtc::ArrayView> R2) { + rtc::ArrayView> R2, + rtc::ArrayView> R2_unbounded) { RTC_DCHECK_EQ(R2.size(), Y2.size()); RTC_DCHECK_EQ(R2.size(), S2_linear.size()); @@ -193,14 +194,18 @@ void ResidualEchoEstimator::Estimate( if (aec_state.SaturatedEcho()) { for (size_t ch = 0; ch < num_capture_channels; ++ch) { std::copy(Y2[ch].begin(), Y2[ch].end(), R2[ch].begin()); + std::copy(Y2[ch].begin(), Y2[ch].end(), R2_unbounded[ch].begin()); } } else { const bool onset_compensated = erle_onset_compensation_in_dominant_nearend_ || !dominant_nearend; LinearEstimate(S2_linear, aec_state.Erle(onset_compensated), R2); + LinearEstimate(S2_linear, aec_state.ErleUnbounded(), R2_unbounded); } - AddReverb(ReverbType::kLinear, aec_state, render_buffer, R2); + UpdateReverb(ReverbType::kLinear, aec_state, render_buffer); + AddReverb(R2); + AddReverb(R2_unbounded); } else { const float echo_path_gain = GetEchoPathGain(aec_state, /*gain_for_early_reflections=*/true); @@ -210,6 +215,7 @@ void ResidualEchoEstimator::Estimate( if (aec_state.SaturatedEcho()) { for (size_t ch = 0; ch < num_capture_channels; ++ch) { std::copy(Y2[ch].begin(), Y2[ch].end(), R2[ch].begin()); + std::copy(Y2[ch].begin(), Y2[ch].end(), R2_unbounded[ch].begin()); } } else { // Estimate the echo generating signal power. @@ -229,11 +235,14 @@ void ResidualEchoEstimator::Estimate( } NonLinearEstimate(echo_path_gain, X2, R2); + NonLinearEstimate(echo_path_gain, X2, R2_unbounded); } if (config_.echo_model.model_reverb_in_nonlinear_mode && !aec_state.TransparentModeActive()) { - AddReverb(ReverbType::kNonLinear, aec_state, render_buffer, R2); + UpdateReverb(ReverbType::kNonLinear, aec_state, render_buffer); + AddReverb(R2); + AddReverb(R2_unbounded); } } @@ -244,6 +253,7 @@ void ResidualEchoEstimator::Estimate( for (size_t ch = 0; ch < num_capture_channels; ++ch) { for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { R2[ch][k] *= residual_scaling[k]; + R2_unbounded[ch][k] *= residual_scaling[k]; } } } @@ -292,14 +302,10 @@ void ResidualEchoEstimator::UpdateRenderNoisePower( } } -// Adds the estimated power of the reverb to the residual echo power. -void ResidualEchoEstimator::AddReverb( - ReverbType reverb_type, - const AecState& aec_state, - const RenderBuffer& render_buffer, - rtc::ArrayView> R2) { - const size_t num_capture_channels = R2.size(); - +// Updates the reverb estimation. +void ResidualEchoEstimator::UpdateReverb(ReverbType reverb_type, + const AecState& aec_state, + const RenderBuffer& render_buffer) { // Choose reverb partition based on what type of echo power model is used. const size_t first_reverb_partition = reverb_type == ReverbType::kLinear @@ -334,6 +340,11 @@ void ResidualEchoEstimator::AddReverb( echo_reverb_.UpdateReverbNoFreqShaping(render_power, echo_path_gain, aec_state.ReverbDecay()); } +} +// Adds the estimated power of the reverb to the residual echo power. +void ResidualEchoEstimator::AddReverb( + rtc::ArrayView> R2) const { + const size_t num_capture_channels = R2.size(); // Add the reverb power. rtc::ArrayView reverb_power = diff --git a/modules/audio_processing/aec3/residual_echo_estimator.h b/modules/audio_processing/aec3/residual_echo_estimator.h index 9e977766cb..c071854c4a 100644 --- a/modules/audio_processing/aec3/residual_echo_estimator.h +++ b/modules/audio_processing/aec3/residual_echo_estimator.h @@ -40,7 +40,8 @@ class ResidualEchoEstimator { rtc::ArrayView> S2_linear, rtc::ArrayView> Y2, bool dominant_nearend, - rtc::ArrayView> R2); + rtc::ArrayView> R2, + rtc::ArrayView> R2_unbounded); private: enum class ReverbType { kLinear, kNonLinear }; @@ -52,12 +53,15 @@ class ResidualEchoEstimator { // render signal. void UpdateRenderNoisePower(const RenderBuffer& render_buffer); + // Updates the reverb estimation. + void UpdateReverb(ReverbType reverb_type, + const AecState& aec_state, + const RenderBuffer& render_buffer); + // Adds the estimated unmodelled echo power to the residual echo power // estimate. - void AddReverb(ReverbType reverb_type, - const AecState& aec_state, - const RenderBuffer& render_buffer, - rtc::ArrayView> R2); + void AddReverb( + rtc::ArrayView> R2) const; // Gets the echo path gain to apply. float GetEchoPathGain(const AecState& aec_state, diff --git a/modules/audio_processing/aec3/residual_echo_estimator_unittest.cc b/modules/audio_processing/aec3/residual_echo_estimator_unittest.cc index e80838b5f6..3d760b7dda 100644 --- a/modules/audio_processing/aec3/residual_echo_estimator_unittest.cc +++ b/modules/audio_processing/aec3/residual_echo_estimator_unittest.cc @@ -48,6 +48,8 @@ TEST_P(ResidualEchoEstimatorMultiChannel, BasicTest) { num_capture_channels); std::vector> Y2(num_capture_channels); std::vector> R2(num_capture_channels); + std::vector> R2_unbounded( + num_capture_channels); std::vector>> x( kNumBands, std::vector>( num_render_channels, std::vector(kBlockSize, 0.f))); @@ -100,7 +102,8 @@ TEST_P(ResidualEchoEstimatorMultiChannel, BasicTest) { output); estimator.Estimate(aec_state, *render_delay_buffer->GetRenderBuffer(), - S2_linear, Y2, /*dominant_nearend=*/false, R2); + S2_linear, Y2, /*dominant_nearend=*/false, R2, + R2_unbounded); } } diff --git a/modules/audio_processing/aec3/subband_erle_estimator.cc b/modules/audio_processing/aec3/subband_erle_estimator.cc index 1e957f23ac..dc7f92fd99 100644 --- a/modules/audio_processing/aec3/subband_erle_estimator.cc +++ b/modules/audio_processing/aec3/subband_erle_estimator.cc @@ -49,6 +49,7 @@ SubbandErleEstimator::SubbandErleEstimator(const EchoCanceller3Config& config, accum_spectra_(num_capture_channels), erle_(num_capture_channels), erle_onset_compensated_(num_capture_channels), + erle_unbounded_(num_capture_channels), erle_during_onsets_(num_capture_channels), coming_onset_(num_capture_channels), hold_counters_(num_capture_channels) { @@ -62,6 +63,7 @@ void SubbandErleEstimator::Reset() { for (size_t ch = 0; ch < num_capture_channels; ++ch) { erle_[ch].fill(min_erle_); erle_onset_compensated_[ch].fill(min_erle_); + erle_unbounded_[ch].fill(min_erle_); erle_during_onsets_[ch].fill(min_erle_); coming_onset_[ch].fill(true); hold_counters_[ch].fill(0); @@ -90,6 +92,10 @@ void SubbandErleEstimator::Update( auto& erle_oc = erle_onset_compensated_[ch]; erle_oc[0] = erle_oc[1]; erle_oc[kFftLengthBy2] = erle_oc[kFftLengthBy2 - 1]; + + auto& erle_u = erle_unbounded_[ch]; + erle_u[0] = erle_u[1]; + erle_u[kFftLengthBy2] = erle_u[kFftLengthBy2 - 1]; } } @@ -163,6 +169,11 @@ void SubbandErleEstimator::UpdateBands( update_erle_band(erle_onset_compensated_[ch][k], new_erle[k], low_render_energy, min_erle_, max_erle_[k]); } + + // Virtually unbounded ERLE. + constexpr float kUnboundedErleMax = 100000.0f; + update_erle_band(erle_unbounded_[ch][k], new_erle[k], low_render_energy, + min_erle_, kUnboundedErleMax); } } } diff --git a/modules/audio_processing/aec3/subband_erle_estimator.h b/modules/audio_processing/aec3/subband_erle_estimator.h index ffed6a57a5..8bf9c4d645 100644 --- a/modules/audio_processing/aec3/subband_erle_estimator.h +++ b/modules/audio_processing/aec3/subband_erle_estimator.h @@ -47,6 +47,12 @@ class SubbandErleEstimator { : erle_; } + // Returns the non-capped ERLE estimate. + rtc::ArrayView> ErleUnbounded() + const { + return erle_unbounded_; + } + // Returns the ERLE estimate at onsets (only used for testing). rtc::ArrayView> ErleDuringOnsets() const { @@ -88,6 +94,7 @@ class SubbandErleEstimator { std::vector> erle_; // ERLE lowered during render onsets. std::vector> erle_onset_compensated_; + std::vector> erle_unbounded_; // Estimation of ERLE during render onsets. std::vector> erle_during_onsets_; std::vector> coming_onset_; diff --git a/modules/audio_processing/aec3/subtractor.cc b/modules/audio_processing/aec3/subtractor.cc index d10e4ffc52..2eae686752 100644 --- a/modules/audio_processing/aec3/subtractor.cc +++ b/modules/audio_processing/aec3/subtractor.cc @@ -91,7 +91,20 @@ Subtractor::Subtractor(const EchoCanceller3Config& config, std::vector(GetTimeDomainLength(std::max( config_.filter.refined_initial.length_blocks, config_.filter.refined.length_blocks)), - 0.f)) { + 0.f)), + coarse_impulse_responses_(0) { + // Set up the storing of coarse impulse responses if data dumping is + // available. + if (ApmDataDumper::IsAvailable()) { + coarse_impulse_responses_.resize(num_capture_channels_); + const size_t filter_size = GetTimeDomainLength( + std::max(config_.filter.coarse_initial.length_blocks, + config_.filter.coarse.length_blocks)); + for (std::vector& impulse_response : coarse_impulse_responses_) { + impulse_response.resize(filter_size, 0.f); + } + } + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { refined_filters_[ch] = std::make_unique( config_.filter.refined.length_blocks, @@ -285,7 +298,14 @@ void Subtractor::Process(const RenderBuffer& render_buffer, config_.filter.coarse_reset_hangover_blocks; } - coarse_filter_[ch]->Adapt(render_buffer, G); + if (ApmDataDumper::IsAvailable()) { + RTC_DCHECK_LT(ch, coarse_impulse_responses_.size()); + coarse_filter_[ch]->Adapt(render_buffer, G, + &coarse_impulse_responses_[ch]); + } else { + coarse_filter_[ch]->Adapt(render_buffer, G); + } + if (ch == 0) { data_dumper_->DumpRaw("aec3_subtractor_G_coarse", G.re); data_dumper_->DumpRaw("aec3_subtractor_G_coarse", G.im); diff --git a/modules/audio_processing/aec3/subtractor.h b/modules/audio_processing/aec3/subtractor.h index 560f6568eb..767e4aad46 100644 --- a/modules/audio_processing/aec3/subtractor.h +++ b/modules/audio_processing/aec3/subtractor.h @@ -78,6 +78,15 @@ class Subtractor { refined_impulse_responses_[0].data(), GetTimeDomainLength( refined_filters_[0]->max_filter_size_partitions()))); + if (ApmDataDumper::IsAvailable()) { + RTC_DCHECK_GT(coarse_impulse_responses_.size(), 0); + data_dumper_->DumpRaw( + "aec3_subtractor_h_coarse", + rtc::ArrayView( + coarse_impulse_responses_[0].data(), + GetTimeDomainLength( + coarse_filter_[0]->max_filter_size_partitions()))); + } refined_filters_[0]->DumpFilter("aec3_subtractor_H_refined"); coarse_filter_[0]->DumpFilter("aec3_subtractor_H_coarse"); @@ -132,6 +141,7 @@ class Subtractor { std::vector>> refined_frequency_responses_; std::vector> refined_impulse_responses_; + std::vector> coarse_impulse_responses_; }; } // namespace webrtc diff --git a/modules/audio_processing/aec3/suppression_gain.cc b/modules/audio_processing/aec3/suppression_gain.cc index 5b01c52908..6405d71c2d 100644 --- a/modules/audio_processing/aec3/suppression_gain.cc +++ b/modules/audio_processing/aec3/suppression_gain.cc @@ -23,10 +23,15 @@ #include "modules/audio_processing/logging/apm_data_dumper.h" #include "rtc_base/atomic_ops.h" #include "rtc_base/checks.h" +#include "system_wrappers/include/field_trial.h" namespace webrtc { namespace { +bool UseUnboundedEchoSpectrum() { + return field_trial::IsEnabled("WebRTC-Aec3UseUnboundedEchoSpectrum"); +} + void LimitLowFrequencyGains(std::array* gain) { // Limit the low frequency gains to avoid the impact of the high-pass filter // on the lower-frequency gain influencing the overall achieved gain. @@ -230,16 +235,20 @@ void SuppressionGain::GetMinGain( min_gain[k] = std::min(min_gain[k], 1.f); } - const bool is_nearend_state = dominant_nearend_detector_->IsNearendState(); - for (size_t k = 0; k < 6; ++k) { - const auto& dec = is_nearend_state ? nearend_params_.max_dec_factor_lf - : normal_params_.max_dec_factor_lf; - - // Make sure the gains of the low frequencies do not decrease too - // quickly after strong nearend. - if (last_nearend[k] > last_echo[k]) { - min_gain[k] = std::max(min_gain[k], last_gain_[k] * dec); - min_gain[k] = std::min(min_gain[k], 1.f); + if (!initial_state_ || + config_.suppressor.lf_smoothing_during_initial_phase) { + const float& dec = dominant_nearend_detector_->IsNearendState() + ? nearend_params_.max_dec_factor_lf + : normal_params_.max_dec_factor_lf; + + for (int k = 0; k <= config_.suppressor.last_lf_smoothing_band; ++k) { + // Make sure the gains of the low frequencies do not decrease too + // quickly after strong nearend. + if (last_nearend[k] > last_echo[k] || + k <= config_.suppressor.last_permanent_lf_smoothing_band) { + min_gain[k] = std::max(min_gain[k], last_gain_[k] * dec); + min_gain[k] = std::min(min_gain[k], 1.f); + } } } } else { @@ -333,8 +342,13 @@ SuppressionGain::SuppressionGain(const EchoCanceller3Config& config, num_capture_channels_, aec3::MovingAverage(kFftLengthBy2Plus1, config.suppressor.nearend_average_blocks)), - nearend_params_(config_.suppressor.nearend_tuning), - normal_params_(config_.suppressor.normal_tuning) { + nearend_params_(config_.suppressor.last_lf_band, + config_.suppressor.first_hf_band, + config_.suppressor.nearend_tuning), + normal_params_(config_.suppressor.last_lf_band, + config_.suppressor.first_hf_band, + config_.suppressor.normal_tuning), + use_unbounded_echo_spectrum_(UseUnboundedEchoSpectrum()) { RTC_DCHECK_LT(0, state_change_duration_blocks_); last_gain_.fill(1.f); if (config_.suppressor.use_subband_nearend_detection) { @@ -355,6 +369,8 @@ void SuppressionGain::GetGain( rtc::ArrayView> echo_spectrum, rtc::ArrayView> residual_echo_spectrum, + rtc::ArrayView> + residual_echo_spectrum_unbounded, rtc::ArrayView> comfort_noise_spectrum, const RenderSignalAnalyzer& render_signal_analyzer, @@ -366,8 +382,13 @@ void SuppressionGain::GetGain( RTC_DCHECK(high_bands_gain); RTC_DCHECK(low_band_gain); + // Choose residual echo spectrum for the dominant nearend detector. + const auto echo = use_unbounded_echo_spectrum_ + ? residual_echo_spectrum_unbounded + : residual_echo_spectrum; + // Update the nearend state selection. - dominant_nearend_detector_->Update(nearend_spectrum, residual_echo_spectrum, + dominant_nearend_detector_->Update(nearend_spectrum, echo, comfort_noise_spectrum, initial_state_); // Compute gain for the lower band. @@ -383,6 +404,9 @@ void SuppressionGain::GetGain( *high_bands_gain = UpperBandsGain(echo_spectrum, comfort_noise_spectrum, narrow_peak_band, aec_state.SaturatedEcho(), render, *low_band_gain); + + data_dumper_->DumpRaw("aec3_dominant_nearend", + dominant_nearend_detector_->IsNearendState()); } void SuppressionGain::SetInitialState(bool state) { @@ -419,23 +443,23 @@ bool SuppressionGain::LowNoiseRenderDetector::Detect( } SuppressionGain::GainParameters::GainParameters( + int last_lf_band, + int first_hf_band, const EchoCanceller3Config::Suppressor::Tuning& tuning) : max_inc_factor(tuning.max_inc_factor), max_dec_factor_lf(tuning.max_dec_factor_lf) { // Compute per-band masking thresholds. - constexpr size_t kLastLfBand = 5; - constexpr size_t kFirstHfBand = 8; - RTC_DCHECK_LT(kLastLfBand, kFirstHfBand); + RTC_DCHECK_LT(last_lf_band, first_hf_band); auto& lf = tuning.mask_lf; auto& hf = tuning.mask_hf; RTC_DCHECK_LT(lf.enr_transparent, lf.enr_suppress); RTC_DCHECK_LT(hf.enr_transparent, hf.enr_suppress); - for (size_t k = 0; k < kFftLengthBy2Plus1; k++) { + for (int k = 0; k < static_cast(kFftLengthBy2Plus1); k++) { float a; - if (k <= kLastLfBand) { + if (k <= last_lf_band) { a = 0.f; - } else if (k < kFirstHfBand) { - a = (k - kLastLfBand) / static_cast(kFirstHfBand - kLastLfBand); + } else if (k < first_hf_band) { + a = (k - last_lf_band) / static_cast(first_hf_band - last_lf_band); } else { a = 1.f; } diff --git a/modules/audio_processing/aec3/suppression_gain.h b/modules/audio_processing/aec3/suppression_gain.h index d049baeaaf..7c4a1c9f7d 100644 --- a/modules/audio_processing/aec3/suppression_gain.h +++ b/modules/audio_processing/aec3/suppression_gain.h @@ -42,6 +42,8 @@ class SuppressionGain { rtc::ArrayView> echo_spectrum, rtc::ArrayView> residual_echo_spectrum, + rtc::ArrayView> + residual_echo_spectrum_unbounded, rtc::ArrayView> comfort_noise_spectrum, const RenderSignalAnalyzer& render_signal_analyzer, @@ -103,6 +105,8 @@ class SuppressionGain { struct GainParameters { explicit GainParameters( + int last_lf_band, + int first_hf_band, const EchoCanceller3Config::Suppressor::Tuning& tuning); const float max_inc_factor; const float max_dec_factor_lf; @@ -126,6 +130,9 @@ class SuppressionGain { std::vector nearend_smoothers_; const GainParameters nearend_params_; const GainParameters normal_params_; + // Determines if the dominant nearend detector uses the unbounded residual + // echo spectrum. + const bool use_unbounded_echo_spectrum_; std::unique_ptr dominant_nearend_detector_; RTC_DISALLOW_COPY_AND_ASSIGN(SuppressionGain); diff --git a/modules/audio_processing/aec3/suppression_gain_unittest.cc b/modules/audio_processing/aec3/suppression_gain_unittest.cc index 26bfc24ebb..999b0f27ab 100644 --- a/modules/audio_processing/aec3/suppression_gain_unittest.cc +++ b/modules/audio_processing/aec3/suppression_gain_unittest.cc @@ -26,29 +26,30 @@ namespace aec3 { // Verifies that the check for non-null output gains works. TEST(SuppressionGainDeathTest, NullOutputGains) { - std::vector> E2(1, {0.f}); - std::vector> R2(1, {0.f}); + std::vector> E2(1, {0.0f}); + std::vector> R2(1, {0.0f}); + std::vector> R2_unbounded(1, {0.0f}); std::vector> S2(1); - std::vector> N2(1, {0.f}); + std::vector> N2(1, {0.0f}); for (auto& S2_k : S2) { - S2_k.fill(.1f); + S2_k.fill(0.1f); } FftData E; FftData Y; - E.re.fill(0.f); - E.im.fill(0.f); - Y.re.fill(0.f); - Y.im.fill(0.f); + E.re.fill(0.0f); + E.im.fill(0.0f); + Y.re.fill(0.0f); + Y.im.fill(0.0f); float high_bands_gain; AecState aec_state(EchoCanceller3Config{}, 1); EXPECT_DEATH( SuppressionGain(EchoCanceller3Config{}, DetectOptimization(), 16000, 1) - .GetGain(E2, S2, R2, N2, + .GetGain(E2, S2, R2, R2_unbounded, N2, RenderSignalAnalyzer((EchoCanceller3Config{})), aec_state, std::vector>>( 3, std::vector>( - 1, std::vector(kBlockSize, 0.f))), + 1, std::vector(kBlockSize, 0.0f))), false, &high_bands_gain, nullptr), ""); } @@ -67,15 +68,17 @@ TEST(SuppressionGain, BasicGainComputation) { float high_bands_gain; std::vector> E2(kNumCaptureChannels); std::vector> S2(kNumCaptureChannels, - {0.f}); + {0.0f}); std::vector> Y2(kNumCaptureChannels); std::vector> R2(kNumCaptureChannels); + std::vector> R2_unbounded( + kNumCaptureChannels); std::vector> N2(kNumCaptureChannels); std::array g; std::vector output(kNumCaptureChannels); std::vector>> x( kNumBands, std::vector>( - kNumRenderChannels, std::vector(kBlockSize, 0.f))); + kNumRenderChannels, std::vector(kBlockSize, 0.0f))); EchoCanceller3Config config; AecState aec_state(config, kNumCaptureChannels); ApmDataDumper data_dumper(42); @@ -89,8 +92,9 @@ TEST(SuppressionGain, BasicGainComputation) { for (size_t ch = 0; ch < kNumCaptureChannels; ++ch) { E2[ch].fill(10.f); Y2[ch].fill(10.f); - R2[ch].fill(.1f); - N2[ch].fill(100.f); + R2[ch].fill(0.1f); + R2_unbounded[ch].fill(0.1f); + N2[ch].fill(100.0f); } for (auto& subtractor_output : output) { subtractor_output.Reset(); @@ -107,17 +111,18 @@ TEST(SuppressionGain, BasicGainComputation) { aec_state.Update(delay_estimate, subtractor.FilterFrequencyResponses(), subtractor.FilterImpulseResponses(), *render_delay_buffer->GetRenderBuffer(), E2, Y2, output); - suppression_gain.GetGain(E2, S2, R2, N2, analyzer, aec_state, x, false, - &high_bands_gain, &g); + suppression_gain.GetGain(E2, S2, R2, R2_unbounded, N2, analyzer, aec_state, + x, false, &high_bands_gain, &g); } std::for_each(g.begin(), g.end(), - [](float a) { EXPECT_NEAR(1.f, a, 0.001); }); + [](float a) { EXPECT_NEAR(1.0f, a, 0.001f); }); // Ensure that a strong nearend is detected to mask any echoes. for (size_t ch = 0; ch < kNumCaptureChannels; ++ch) { E2[ch].fill(100.f); Y2[ch].fill(100.f); R2[ch].fill(0.1f); + R2_unbounded[ch].fill(0.1f); S2[ch].fill(0.1f); N2[ch].fill(0.f); } @@ -126,22 +131,23 @@ TEST(SuppressionGain, BasicGainComputation) { aec_state.Update(delay_estimate, subtractor.FilterFrequencyResponses(), subtractor.FilterImpulseResponses(), *render_delay_buffer->GetRenderBuffer(), E2, Y2, output); - suppression_gain.GetGain(E2, S2, R2, N2, analyzer, aec_state, x, false, - &high_bands_gain, &g); + suppression_gain.GetGain(E2, S2, R2, R2_unbounded, N2, analyzer, aec_state, + x, false, &high_bands_gain, &g); } std::for_each(g.begin(), g.end(), - [](float a) { EXPECT_NEAR(1.f, a, 0.001); }); + [](float a) { EXPECT_NEAR(1.0f, a, 0.001f); }); // Add a strong echo to one of the channels and ensure that it is suppressed. - E2[1].fill(1000000000.f); - R2[1].fill(10000000000000.f); + E2[1].fill(1000000000.0f); + R2[1].fill(10000000000000.0f); + R2_unbounded[1].fill(10000000000000.0f); for (int k = 0; k < 10; ++k) { - suppression_gain.GetGain(E2, S2, R2, N2, analyzer, aec_state, x, false, - &high_bands_gain, &g); + suppression_gain.GetGain(E2, S2, R2, R2_unbounded, N2, analyzer, aec_state, + x, false, &high_bands_gain, &g); } std::for_each(g.begin(), g.end(), - [](float a) { EXPECT_NEAR(0.f, a, 0.001); }); + [](float a) { EXPECT_NEAR(0.0f, a, 0.001f); }); } } // namespace aec3 diff --git a/modules/audio_processing/aec3/transparent_mode.cc b/modules/audio_processing/aec3/transparent_mode.cc index 7cfa3e8eae..489f53f4f1 100644 --- a/modules/audio_processing/aec3/transparent_mode.cc +++ b/modules/audio_processing/aec3/transparent_mode.cc @@ -11,6 +11,7 @@ #include "modules/audio_processing/aec3/transparent_mode.h" #include "rtc_base/checks.h" +#include "rtc_base/logging.h" #include "system_wrappers/include/field_trial.h" namespace webrtc { @@ -228,11 +229,14 @@ class LegacyTransparentModeImpl : public TransparentMode { std::unique_ptr TransparentMode::Create( const EchoCanceller3Config& config) { if (config.ep_strength.bounded_erl || DeactivateTransparentMode()) { + RTC_LOG(LS_INFO) << "AEC3 Transparent Mode: Disabled"; return nullptr; } if (ActivateTransparentModeHmm()) { + RTC_LOG(LS_INFO) << "AEC3 Transparent Mode: HMM"; return std::make_unique(); } + RTC_LOG(LS_INFO) << "AEC3 Transparent Mode: Legacy"; return std::make_unique(config); } diff --git a/modules/audio_processing/agc/BUILD.gn b/modules/audio_processing/agc/BUILD.gn index 3b2b205385..4bb8c5494b 100644 --- a/modules/audio_processing/agc/BUILD.gn +++ b/modules/audio_processing/agc/BUILD.gn @@ -20,6 +20,7 @@ rtc_library("agc") { configs += [ "..:apm_debug_dump" ] deps = [ ":clipping_predictor", + ":clipping_predictor_evaluator", ":gain_control_interface", ":gain_map", ":level_estimation", @@ -58,6 +59,18 @@ rtc_library("clipping_predictor") { absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } +rtc_library("clipping_predictor_evaluator") { + sources = [ + "clipping_predictor_evaluator.cc", + "clipping_predictor_evaluator.h", + ] + deps = [ + "../../../rtc_base:checks", + "../../../rtc_base:logging", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] +} + rtc_library("clipping_predictor_level_buffer") { sources = [ "clipping_predictor_level_buffer.cc", @@ -66,6 +79,7 @@ rtc_library("clipping_predictor_level_buffer") { deps = [ "../../../rtc_base:checks", "../../../rtc_base:logging", + "../../../rtc_base:rtc_base_approved", ] absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } @@ -128,6 +142,7 @@ if (rtc_include_tests) { testonly = true sources = [ "agc_manager_direct_unittest.cc", + "clipping_predictor_evaluator_unittest.cc", "clipping_predictor_level_buffer_unittest.cc", "clipping_predictor_unittest.cc", "loudness_histogram_unittest.cc", @@ -138,15 +153,19 @@ if (rtc_include_tests) { deps = [ ":agc", ":clipping_predictor", + ":clipping_predictor_evaluator", ":clipping_predictor_level_buffer", ":gain_control_interface", ":level_estimation", "..:mocks", "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../../../rtc_base:safe_conversions", "../../../test:field_trial", "../../../test:fileutils", "../../../test:test_support", "//testing/gtest", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } } diff --git a/modules/audio_processing/agc/agc_manager_direct.cc b/modules/audio_processing/agc/agc_manager_direct.cc index 46304d2819..e2a5b998a4 100644 --- a/modules/audio_processing/agc/agc_manager_direct.cc +++ b/modules/audio_processing/agc/agc_manager_direct.cc @@ -49,6 +49,10 @@ constexpr int kMaxResidualGainChange = 15; // restrictions from clipping events. constexpr int kSurplusCompressionGain = 6; +// History size for the clipping predictor evaluator (unit: number of 10 ms +// frames). +constexpr int kClippingPredictorEvaluatorHistorySize = 32; + using ClippingPredictorConfig = AudioProcessing::Config::GainController1:: AnalogGainController::ClippingPredictor; @@ -129,26 +133,40 @@ float ComputeClippedRatio(const float* const* audio, return static_cast(num_clipped) / (samples_per_channel); } -std::unique_ptr CreateClippingPredictor( - int num_capture_channels, - const ClippingPredictorConfig& config) { - if (config.enabled) { - RTC_LOG(LS_INFO) << "[agc] Clipping prediction enabled."; - switch (config.mode) { - case ClippingPredictorConfig::kClippingEventPrediction: - return CreateClippingEventPredictor(num_capture_channels, config); - case ClippingPredictorConfig::kAdaptiveStepClippingPeakPrediction: - return CreateAdaptiveStepClippingPeakPredictor(num_capture_channels, - config); - case ClippingPredictorConfig::kFixedStepClippingPeakPrediction: - return CreateFixedStepClippingPeakPredictor(num_capture_channels, - config); +void LogClippingPredictorMetrics(const ClippingPredictorEvaluator& evaluator) { + RTC_LOG(LS_INFO) << "Clipping predictor metrics: TP " + << evaluator.true_positives() << " TN " + << evaluator.true_negatives() << " FP " + << evaluator.false_positives() << " FN " + << evaluator.false_negatives(); + const float precision_denominator = + evaluator.true_positives() + evaluator.false_positives(); + const float recall_denominator = + evaluator.true_positives() + evaluator.false_negatives(); + if (precision_denominator > 0 && recall_denominator > 0) { + const float precision = evaluator.true_positives() / precision_denominator; + const float recall = evaluator.true_positives() / recall_denominator; + RTC_LOG(LS_INFO) << "Clipping predictor metrics: P " << precision << " R " + << recall; + const float f1_score_denominator = precision + recall; + if (f1_score_denominator > 0.0f) { + const float f1_score = 2 * precision * recall / f1_score_denominator; + RTC_LOG(LS_INFO) << "Clipping predictor metrics: F1 " << f1_score; + RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.Agc.ClippingPredictor.F1Score", + std::round(f1_score * 100.0f), /*min=*/0, + /*max=*/100, + /*bucket_count=*/50); } - } else { - return nullptr; } } +void LogClippingMetrics(int clipping_rate) { + RTC_LOG(LS_INFO) << "Input clipping rate: " << clipping_rate << "%"; + RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.Agc.InputClippingRate", + clipping_rate, /*min=*/0, /*max=*/100, + /*bucket_count=*/50); +} + } // namespace MonoAgc::MonoAgc(ApmDataDumper* data_dumper, @@ -418,14 +436,15 @@ void MonoAgc::UpdateCompressor() { int AgcManagerDirect::instance_counter_ = 0; -AgcManagerDirect::AgcManagerDirect(Agc* agc, - int startup_min_level, - int clipped_level_min, - int sample_rate_hz, - int clipped_level_step, - float clipped_ratio_threshold, - int clipped_wait_frames, - const ClippingPredictorConfig& clipping_cfg) +AgcManagerDirect::AgcManagerDirect( + Agc* agc, + int startup_min_level, + int clipped_level_min, + int sample_rate_hz, + int clipped_level_step, + float clipped_ratio_threshold, + int clipped_wait_frames, + const ClippingPredictorConfig& clipping_config) : AgcManagerDirect(/*num_capture_channels*/ 1, startup_min_level, clipped_level_min, @@ -434,21 +453,22 @@ AgcManagerDirect::AgcManagerDirect(Agc* agc, clipped_level_step, clipped_ratio_threshold, clipped_wait_frames, - clipping_cfg) { + clipping_config) { RTC_DCHECK(channel_agcs_[0]); RTC_DCHECK(agc); channel_agcs_[0]->set_agc(agc); } -AgcManagerDirect::AgcManagerDirect(int num_capture_channels, - int startup_min_level, - int clipped_level_min, - bool disable_digital_adaptive, - int sample_rate_hz, - int clipped_level_step, - float clipped_ratio_threshold, - int clipped_wait_frames, - const ClippingPredictorConfig& clipping_cfg) +AgcManagerDirect::AgcManagerDirect( + int num_capture_channels, + int startup_min_level, + int clipped_level_min, + bool disable_digital_adaptive, + int sample_rate_hz, + int clipped_level_step, + float clipped_ratio_threshold, + int clipped_wait_frames, + const ClippingPredictorConfig& clipping_config) : data_dumper_( new ApmDataDumper(rtc::AtomicOps::Increment(&instance_counter_))), use_min_channel_level_(!UseMaxAnalogChannelLevel()), @@ -463,7 +483,13 @@ AgcManagerDirect::AgcManagerDirect(int num_capture_channels, channel_agcs_(num_capture_channels), new_compressions_to_set_(num_capture_channels), clipping_predictor_( - CreateClippingPredictor(num_capture_channels, clipping_cfg)) { + CreateClippingPredictor(num_capture_channels, clipping_config)), + use_clipping_predictor_step_(!!clipping_predictor_ && + clipping_config.use_predicted_step), + clipping_predictor_evaluator_(kClippingPredictorEvaluatorHistorySize), + clipping_predictor_log_counter_(0), + clipping_rate_log_(0.0f), + clipping_rate_log_counter_(0) { const int min_mic_level = GetMinMicLevel(); for (size_t ch = 0; ch < channel_agcs_.size(); ++ch) { ApmDataDumper* data_dumper_ch = ch == 0 ? data_dumper_.get() : nullptr; @@ -492,6 +518,10 @@ void AgcManagerDirect::Initialize() { capture_output_used_ = true; AggregateChannelLevels(); + clipping_predictor_evaluator_.Reset(); + clipping_predictor_log_counter_ = 0; + clipping_rate_log_ = 0.0f; + clipping_rate_log_counter_ = 0; } void AgcManagerDirect::SetupDigitalGainControl( @@ -531,12 +561,7 @@ void AgcManagerDirect::AnalyzePreProcess(const float* const* audio, if (!!clipping_predictor_) { AudioFrameView frame = AudioFrameView( audio, num_capture_channels_, static_cast(samples_per_channel)); - clipping_predictor_->Process(frame); - } - - if (frames_since_clipped_ < clipped_wait_frames_) { - ++frames_since_clipped_; - return; + clipping_predictor_->Analyze(frame); } // Check for clipped samples, as the AGC has difficulty detecting pitch @@ -550,6 +575,20 @@ void AgcManagerDirect::AnalyzePreProcess(const float* const* audio, // gain is increased, through SetMaxLevel(). float clipped_ratio = ComputeClippedRatio(audio, num_capture_channels_, samples_per_channel); + clipping_rate_log_ = std::max(clipped_ratio, clipping_rate_log_); + clipping_rate_log_counter_++; + constexpr int kNumFramesIn30Seconds = 3000; + if (clipping_rate_log_counter_ == kNumFramesIn30Seconds) { + LogClippingMetrics(std::round(100.0f * clipping_rate_log_)); + clipping_rate_log_ = 0.0f; + clipping_rate_log_counter_ = 0; + } + + if (frames_since_clipped_ < clipped_wait_frames_) { + ++frames_since_clipped_; + return; + } + const bool clipping_detected = clipped_ratio > clipped_ratio_threshold_; bool clipping_predicted = false; int predicted_step = 0; @@ -558,11 +597,26 @@ void AgcManagerDirect::AnalyzePreProcess(const float* const* audio, const auto step = clipping_predictor_->EstimateClippedLevelStep( channel, stream_analog_level_, clipped_level_step_, channel_agcs_[channel]->min_mic_level(), kMaxMicLevel); - if (step.has_value()) { + if (use_clipping_predictor_step_ && step.has_value()) { predicted_step = std::max(predicted_step, step.value()); clipping_predicted = true; } } + // Clipping prediction evaluation. + absl::optional prediction_interval = + clipping_predictor_evaluator_.Observe(clipping_detected, + clipping_predicted); + if (prediction_interval.has_value()) { + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.Agc.ClippingPredictor.PredictionInterval", + prediction_interval.value(), /*min=*/0, + /*max=*/49, /*bucket_count=*/50); + } + clipping_predictor_log_counter_++; + if (clipping_predictor_log_counter_ == kNumFramesIn30Seconds) { + LogClippingPredictorMetrics(clipping_predictor_evaluator_); + clipping_predictor_log_counter_ = 0; + } } if (clipping_detected || clipping_predicted) { int step = clipped_level_step_; @@ -580,6 +634,7 @@ void AgcManagerDirect::AnalyzePreProcess(const float* const* audio, frames_since_clipped_ = 0; if (!!clipping_predictor_) { clipping_predictor_->Reset(); + clipping_predictor_evaluator_.Reset(); } } AggregateChannelLevels(); @@ -663,8 +718,4 @@ void AgcManagerDirect::AggregateChannelLevels() { } } -bool AgcManagerDirect::clipping_predictor_enabled() const { - return !!clipping_predictor_; -} - } // namespace webrtc diff --git a/modules/audio_processing/agc/agc_manager_direct.h b/modules/audio_processing/agc/agc_manager_direct.h index 55a7ffa2eb..d80a255ced 100644 --- a/modules/audio_processing/agc/agc_manager_direct.h +++ b/modules/audio_processing/agc/agc_manager_direct.h @@ -16,6 +16,7 @@ #include "absl/types/optional.h" #include "modules/audio_processing/agc/agc.h" #include "modules/audio_processing/agc/clipping_predictor.h" +#include "modules/audio_processing/agc/clipping_predictor_evaluator.h" #include "modules/audio_processing/audio_buffer.h" #include "modules/audio_processing/logging/apm_data_dumper.h" #include "rtc_base/gtest_prod_util.h" @@ -41,16 +42,17 @@ class AgcManagerDirect final { // samples required to declare a clipping event, limited to (0.f, 1.f). // `clipped_wait_frames` is the time in frames to wait after a clipping event // before checking again, limited to values higher than 0. - AgcManagerDirect(int num_capture_channels, - int startup_min_level, - int clipped_level_min, - bool disable_digital_adaptive, - int sample_rate_hz, - int clipped_level_step, - float clipped_ratio_threshold, - int clipped_wait_frames, - const AudioProcessing::Config::GainController1:: - AnalogGainController::ClippingPredictor& clipping_cfg); + AgcManagerDirect( + int num_capture_channels, + int startup_min_level, + int clipped_level_min, + bool disable_digital_adaptive, + int sample_rate_hz, + int clipped_level_step, + float clipped_ratio_threshold, + int clipped_wait_frames, + const AudioProcessing::Config::GainController1::AnalogGainController:: + ClippingPredictor& clipping_config); ~AgcManagerDirect(); AgcManagerDirect(const AgcManagerDirect&) = delete; @@ -72,12 +74,17 @@ class AgcManagerDirect final { int num_channels() const { return num_capture_channels_; } int sample_rate_hz() const { return sample_rate_hz_; } - // Returns true if clipping prediction was set to be used in ctor. - bool clipping_predictor_enabled() const; - // If available, returns a new compression gain for the digital gain control. absl::optional GetDigitalComressionGain(); + // Returns true if clipping prediction is enabled. + bool clipping_predictor_enabled() const { return !!clipping_predictor_; } + + // Returns true if clipping prediction is used to adjust the analog gain. + bool use_clipping_predictor_step() const { + return use_clipping_predictor_step_; + } + private: friend class AgcManagerDirectTest; @@ -99,20 +106,24 @@ class AgcManagerDirect final { ClippingParametersVerified); FRIEND_TEST_ALL_PREFIXES(AgcManagerDirectStandaloneTest, DisableClippingPredictorDoesNotLowerVolume); + FRIEND_TEST_ALL_PREFIXES( + AgcManagerDirectStandaloneTest, + EnableClippingPredictorWithUnusedPredictedStepDoesNotLowerVolume); FRIEND_TEST_ALL_PREFIXES(AgcManagerDirectStandaloneTest, EnableClippingPredictorLowersVolume); // Dependency injection for testing. Don't delete |agc| as the memory is owned // by the manager. - AgcManagerDirect(Agc* agc, - int startup_min_level, - int clipped_level_min, - int sample_rate_hz, - int clipped_level_step, - float clipped_ratio_threshold, - int clipped_wait_frames, - const AudioProcessing::Config::GainController1:: - AnalogGainController::ClippingPredictor& clipping_cfg); + AgcManagerDirect( + Agc* agc, + int startup_min_level, + int clipped_level_min, + int sample_rate_hz, + int clipped_level_step, + float clipped_ratio_threshold, + int clipped_wait_frames, + const AudioProcessing::Config::GainController1::AnalogGainController:: + ClippingPredictor& clipping_config); void AnalyzePreProcess(const float* const* audio, size_t samples_per_channel); @@ -138,6 +149,11 @@ class AgcManagerDirect final { std::vector> new_compressions_to_set_; const std::unique_ptr clipping_predictor_; + const bool use_clipping_predictor_step_; + ClippingPredictorEvaluator clipping_predictor_evaluator_; + int clipping_predictor_log_counter_; + float clipping_rate_log_; + int clipping_rate_log_counter_; }; class MonoAgc { diff --git a/modules/audio_processing/agc/agc_manager_direct_unittest.cc b/modules/audio_processing/agc/agc_manager_direct_unittest.cc index 07bb04022b..bb284f9abc 100644 --- a/modules/audio_processing/agc/agc_manager_direct_unittest.cc +++ b/modules/audio_processing/agc/agc_manager_direct_unittest.cc @@ -907,33 +907,62 @@ TEST(AgcManagerDirectStandaloneTest, kClippedWaitFrames, default_config); manager->Initialize(); EXPECT_FALSE(manager->clipping_predictor_enabled()); + EXPECT_FALSE(manager->use_clipping_predictor_step()); +} + +TEST(AgcManagerDirectStandaloneTest, ClippingPredictorDisabledByDefault) { + constexpr ClippingPredictorConfig kDefaultConfig; + EXPECT_FALSE(kDefaultConfig.enabled); } TEST(AgcManagerDirectStandaloneTest, EnableClippingPredictorEnablesClippingPredictor) { - const ClippingPredictorConfig config( - {/*enabled=*/true, ClippingPredictorConfig::kClippingEventPrediction, - /*window_length=*/5, /*reference_window_length=*/5, - /*reference_window_delay=*/5, /*clipping_threshold=*/-1.0f, - /*crest_factor_margin=*/3.0f}); + // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. + ClippingPredictorConfig config; + config.enabled = true; + config.use_predicted_step = true; std::unique_ptr manager = CreateAgcManagerDirect( kInitialVolume, kClippedLevelStep, kClippedRatioThreshold, kClippedWaitFrames, config); manager->Initialize(); EXPECT_TRUE(manager->clipping_predictor_enabled()); + EXPECT_TRUE(manager->use_clipping_predictor_step()); } TEST(AgcManagerDirectStandaloneTest, DisableClippingPredictorDoesNotLowerVolume) { - const ClippingPredictorConfig default_config; - EXPECT_FALSE(default_config.enabled); + // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. + constexpr ClippingPredictorConfig kConfig{/*enabled=*/false}; AgcManagerDirect manager(new ::testing::NiceMock(), kInitialVolume, kClippedMin, kSampleRateHz, kClippedLevelStep, - kClippedRatioThreshold, kClippedWaitFrames, - default_config); + kClippedRatioThreshold, kClippedWaitFrames, kConfig); manager.Initialize(); manager.set_stream_analog_level(/*level=*/255); EXPECT_FALSE(manager.clipping_predictor_enabled()); + EXPECT_FALSE(manager.use_clipping_predictor_step()); + EXPECT_EQ(manager.stream_analog_level(), 255); + manager.Process(nullptr); + CallPreProcessAudioBuffer(/*num_calls=*/10, /*peak_ratio=*/0.99f, manager); + EXPECT_EQ(manager.stream_analog_level(), 255); + CallPreProcessAudioBuffer(/*num_calls=*/300, /*peak_ratio=*/0.99f, manager); + EXPECT_EQ(manager.stream_analog_level(), 255); + CallPreProcessAudioBuffer(/*num_calls=*/10, /*peak_ratio=*/0.99f, manager); + EXPECT_EQ(manager.stream_analog_level(), 255); +} + +TEST(AgcManagerDirectStandaloneTest, + EnableClippingPredictorWithUnusedPredictedStepDoesNotLowerVolume) { + // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. + ClippingPredictorConfig config; + config.enabled = true; + config.use_predicted_step = false; + AgcManagerDirect manager(new ::testing::NiceMock(), kInitialVolume, + kClippedMin, kSampleRateHz, kClippedLevelStep, + kClippedRatioThreshold, kClippedWaitFrames, config); + manager.Initialize(); + manager.set_stream_analog_level(/*level=*/255); + EXPECT_TRUE(manager.clipping_predictor_enabled()); + EXPECT_FALSE(manager.use_clipping_predictor_step()); EXPECT_EQ(manager.stream_analog_level(), 255); manager.Process(nullptr); CallPreProcessAudioBuffer(/*num_calls=*/10, /*peak_ratio=*/0.99f, manager); @@ -945,17 +974,17 @@ TEST(AgcManagerDirectStandaloneTest, } TEST(AgcManagerDirectStandaloneTest, EnableClippingPredictorLowersVolume) { - const ClippingPredictorConfig config( - {/*enabled=*/true, ClippingPredictorConfig::kClippingEventPrediction, - /*window_length=*/5, /*reference_window_length=*/5, - /*reference_window_delay=*/5, /*clipping_threshold=*/-1.0f, - /*crest_factor_margin=*/3.0f}); + // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. + ClippingPredictorConfig config; + config.enabled = true; + config.use_predicted_step = true; AgcManagerDirect manager(new ::testing::NiceMock(), kInitialVolume, kClippedMin, kSampleRateHz, kClippedLevelStep, kClippedRatioThreshold, kClippedWaitFrames, config); manager.Initialize(); manager.set_stream_analog_level(/*level=*/255); EXPECT_TRUE(manager.clipping_predictor_enabled()); + EXPECT_TRUE(manager.use_clipping_predictor_step()); EXPECT_EQ(manager.stream_analog_level(), 255); manager.Process(nullptr); CallPreProcessAudioBuffer(/*num_calls=*/10, /*peak_ratio=*/0.99f, manager); diff --git a/modules/audio_processing/agc/clipping_predictor.cc b/modules/audio_processing/agc/clipping_predictor.cc index deb95f633e..982bbca2ee 100644 --- a/modules/audio_processing/agc/clipping_predictor.cc +++ b/modules/audio_processing/agc/clipping_predictor.cc @@ -25,9 +25,6 @@ namespace { constexpr int kClippingPredictorMaxGainChange = 15; -using ClippingPredictorConfig = AudioProcessing::Config::GainController1:: - AnalogGainController::ClippingPredictor; - // Estimates the new level from the gain error; a copy of the function // `LevelFromGainError` in agc_manager_direct.cc. int LevelFromGainError(int gain_error, @@ -110,7 +107,7 @@ class ClippingEventPredictor : public ClippingPredictor { // Analyzes a frame of audio and stores the framewise metrics in // `ch_buffers_`. - void Process(const AudioFrameView& frame) { + void Analyze(const AudioFrameView& frame) { const int num_channels = frame.num_channels(); RTC_DCHECK_EQ(num_channels, ch_buffers_.size()); const int samples_per_channel = frame.samples_per_channel(); @@ -249,7 +246,7 @@ class ClippingPeakPredictor : public ClippingPredictor { // Analyzes a frame of audio and stores the framewise metrics in // `ch_buffers_`. - void Process(const AudioFrameView& frame) { + void Analyze(const AudioFrameView& frame) { const int num_channels = frame.num_channels(); RTC_DCHECK_EQ(num_channels, ch_buffers_.size()); const int samples_per_channel = frame.samples_per_channel(); @@ -352,31 +349,35 @@ class ClippingPeakPredictor : public ClippingPredictor { } // namespace -std::unique_ptr CreateClippingEventPredictor( +std::unique_ptr CreateClippingPredictor( int num_channels, - const ClippingPredictorConfig& config) { - return std::make_unique( - num_channels, config.window_length, config.reference_window_length, - config.reference_window_delay, config.clipping_threshold, - config.crest_factor_margin); -} - -std::unique_ptr CreateFixedStepClippingPeakPredictor( - int num_channels, - const ClippingPredictorConfig& config) { - return std::make_unique( - num_channels, config.window_length, config.reference_window_length, - config.reference_window_delay, config.clipping_threshold, - /*adaptive_step_estimation=*/false); -} - -std::unique_ptr CreateAdaptiveStepClippingPeakPredictor( - int num_channels, - const ClippingPredictorConfig& config) { - return std::make_unique( - num_channels, config.window_length, config.reference_window_length, - config.reference_window_delay, config.clipping_threshold, - /*adaptive_step_estimation=*/true); + const AudioProcessing::Config::GainController1::AnalogGainController:: + ClippingPredictor& config) { + if (!config.enabled) { + RTC_LOG(LS_INFO) << "[agc] Clipping prediction disabled."; + return nullptr; + } + RTC_LOG(LS_INFO) << "[agc] Clipping prediction enabled."; + using ClippingPredictorMode = AudioProcessing::Config::GainController1:: + AnalogGainController::ClippingPredictor::Mode; + switch (config.mode) { + case ClippingPredictorMode::kClippingEventPrediction: + return std::make_unique( + num_channels, config.window_length, config.reference_window_length, + config.reference_window_delay, config.clipping_threshold, + config.crest_factor_margin); + case ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction: + return std::make_unique( + num_channels, config.window_length, config.reference_window_length, + config.reference_window_delay, config.clipping_threshold, + /*adaptive_step_estimation=*/true); + case ClippingPredictorMode::kFixedStepClippingPeakPrediction: + return std::make_unique( + num_channels, config.window_length, config.reference_window_length, + config.reference_window_delay, config.clipping_threshold, + /*adaptive_step_estimation=*/false); + } + RTC_NOTREACHED(); } } // namespace webrtc diff --git a/modules/audio_processing/agc/clipping_predictor.h b/modules/audio_processing/agc/clipping_predictor.h index 0fe98273fb..ee2b6ef1e7 100644 --- a/modules/audio_processing/agc/clipping_predictor.h +++ b/modules/audio_processing/agc/clipping_predictor.h @@ -20,19 +20,26 @@ namespace webrtc { -// Frame-wise clipping prediction and clipped level step estimation. Processing -// is done in two steps: Calling `Process` analyses a frame of audio and stores -// the frame metrics and `EstimateClippedLevelStep` produces an estimate for the -// required analog gain level decrease if clipping is predicted. +// Frame-wise clipping prediction and clipped level step estimation. Analyzes +// 10 ms multi-channel frames and estimates an analog mic level decrease step +// to possibly avoid clipping when predicted. `Analyze()` and +// `EstimateClippedLevelStep()` can be called in any order. class ClippingPredictor { public: virtual ~ClippingPredictor() = default; virtual void Reset() = 0; - // Estimates the analog gain clipped level step for channel `channel`. - // Returns absl::nullopt if clipping is not predicted, otherwise returns the - // suggested decrease in the analog gain level. + // Analyzes a 10 ms multi-channel audio frame. + virtual void Analyze(const AudioFrameView& frame) = 0; + + // Predicts if clipping is going to occur for the specified `channel` in the + // near-future and, if so, it returns a recommended analog mic level decrease + // step. Returns absl::nullopt if clipping is not predicted. + // `level` is the current analog mic level, `default_step` is the amount the + // mic level is lowered by the analog controller with every clipping event and + // `min_mic_level` and `max_mic_level` is the range of allowed analog mic + // levels. virtual absl::optional EstimateClippedLevelStep( int channel, int level, @@ -40,27 +47,13 @@ class ClippingPredictor { int min_mic_level, int max_mic_level) const = 0; - // Analyses a frame of audio and stores the resulting metrics in `data_`. - virtual void Process(const AudioFrameView& frame) = 0; }; -// Creates a ClippingPredictor based on crest factor-based clipping event -// prediction. -std::unique_ptr CreateClippingEventPredictor( - int num_channels, - const AudioProcessing::Config::GainController1::AnalogGainController:: - ClippingPredictor& config); - -// Creates a ClippingPredictor based on crest factor-based peak estimation and -// fixed-step clipped level step estimation. -std::unique_ptr CreateFixedStepClippingPeakPredictor( - int num_channels, - const AudioProcessing::Config::GainController1::AnalogGainController:: - ClippingPredictor& config); - -// Creates a ClippingPredictor based on crest factor-based peak estimation and -// adaptive-step clipped level step estimation. -std::unique_ptr CreateAdaptiveStepClippingPeakPredictor( +// Creates a ClippingPredictor based on the provided `config`. When enabled, +// the following must hold for `config`: +// `window_length < reference_window_length + reference_window_delay`. +// Returns `nullptr` if `config.enabled` is false. +std::unique_ptr CreateClippingPredictor( int num_channels, const AudioProcessing::Config::GainController1::AnalogGainController:: ClippingPredictor& config); diff --git a/modules/audio_processing/agc/clipping_predictor_evaluator.cc b/modules/audio_processing/agc/clipping_predictor_evaluator.cc new file mode 100644 index 0000000000..2a4ea922cf --- /dev/null +++ b/modules/audio_processing/agc/clipping_predictor_evaluator.cc @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/agc/clipping_predictor_evaluator.h" + +#include + +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" + +namespace webrtc { +namespace { + +// Returns the index of the oldest item in the ring buffer for a non-empty +// ring buffer with give `size`, `tail` index and `capacity`. +int OldestExpectedDetectionIndex(int size, int tail, int capacity) { + RTC_DCHECK_GT(size, 0); + return tail - size + (tail < size ? capacity : 0); +} + +} // namespace + +ClippingPredictorEvaluator::ClippingPredictorEvaluator(int history_size) + : history_size_(history_size), + ring_buffer_capacity_(history_size + 1), + ring_buffer_(ring_buffer_capacity_), + true_positives_(0), + true_negatives_(0), + false_positives_(0), + false_negatives_(0) { + RTC_DCHECK_GT(history_size_, 0); + Reset(); +} + +ClippingPredictorEvaluator::~ClippingPredictorEvaluator() = default; + +absl::optional ClippingPredictorEvaluator::Observe( + bool clipping_detected, + bool clipping_predicted) { + RTC_DCHECK_GE(ring_buffer_size_, 0); + RTC_DCHECK_LE(ring_buffer_size_, ring_buffer_capacity_); + RTC_DCHECK_GE(ring_buffer_tail_, 0); + RTC_DCHECK_LT(ring_buffer_tail_, ring_buffer_capacity_); + + DecreaseTimesToLive(); + if (clipping_predicted) { + // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. + Push(/*expected_detection=*/{/*ttl=*/history_size_, /*detected=*/false}); + } + // Clipping is expected if there are expected detections regardless of + // whether all the expected detections have been previously matched - i.e., + // `ExpectedDetection::detected` is true. + const bool clipping_expected = ring_buffer_size_ > 0; + + absl::optional prediction_interval; + if (clipping_expected && clipping_detected) { + prediction_interval = FindEarliestPredictionInterval(); + // Add a true positive for each unexpired expected detection. + const int num_modified_items = MarkExpectedDetectionAsDetected(); + true_positives_ += num_modified_items; + RTC_DCHECK(prediction_interval.has_value() || num_modified_items == 0); + RTC_DCHECK(!prediction_interval.has_value() || num_modified_items > 0); + } else if (clipping_expected && !clipping_detected) { + // Add a false positive if there is one expected detection that has expired + // and that has never been matched before. Note that there is at most one + // unmatched expired detection. + if (HasExpiredUnmatchedExpectedDetection()) { + false_positives_++; + } + } else if (!clipping_expected && clipping_detected) { + false_negatives_++; + } else { + RTC_DCHECK(!clipping_expected && !clipping_detected); + true_negatives_++; + } + return prediction_interval; +} + +void ClippingPredictorEvaluator::Reset() { + // Empty the ring buffer of expected detections. + ring_buffer_tail_ = 0; + ring_buffer_size_ = 0; +} + +// Cost: O(1). +void ClippingPredictorEvaluator::Push(ExpectedDetection value) { + ring_buffer_[ring_buffer_tail_] = value; + ring_buffer_tail_++; + if (ring_buffer_tail_ == ring_buffer_capacity_) { + ring_buffer_tail_ = 0; + } + ring_buffer_size_ = std::min(ring_buffer_capacity_, ring_buffer_size_ + 1); +} + +// Cost: O(N). +void ClippingPredictorEvaluator::DecreaseTimesToLive() { + bool expired_found = false; + for (int i = ring_buffer_tail_ - ring_buffer_size_; i < ring_buffer_tail_; + ++i) { + int index = i >= 0 ? i : ring_buffer_capacity_ + i; + RTC_DCHECK_GE(index, 0); + RTC_DCHECK_LT(index, ring_buffer_.size()); + RTC_DCHECK_GE(ring_buffer_[index].ttl, 0); + if (ring_buffer_[index].ttl == 0) { + RTC_DCHECK(!expired_found) + << "There must be at most one expired item in the ring buffer."; + expired_found = true; + RTC_DCHECK_EQ(index, OldestExpectedDetectionIndex(ring_buffer_size_, + ring_buffer_tail_, + ring_buffer_capacity_)) + << "The expired item must be the oldest in the ring buffer."; + } + ring_buffer_[index].ttl--; + } + if (expired_found) { + ring_buffer_size_--; + } +} + +// Cost: O(N). +absl::optional ClippingPredictorEvaluator::FindEarliestPredictionInterval() + const { + absl::optional prediction_interval; + for (int i = ring_buffer_tail_ - ring_buffer_size_; i < ring_buffer_tail_; + ++i) { + int index = i >= 0 ? i : ring_buffer_capacity_ + i; + RTC_DCHECK_GE(index, 0); + RTC_DCHECK_LT(index, ring_buffer_.size()); + if (!ring_buffer_[index].detected) { + prediction_interval = std::max(prediction_interval.value_or(0), + history_size_ - ring_buffer_[index].ttl); + } + } + return prediction_interval; +} + +// Cost: O(N). +int ClippingPredictorEvaluator::MarkExpectedDetectionAsDetected() { + int num_modified_items = 0; + for (int i = ring_buffer_tail_ - ring_buffer_size_; i < ring_buffer_tail_; + ++i) { + int index = i >= 0 ? i : ring_buffer_capacity_ + i; + RTC_DCHECK_GE(index, 0); + RTC_DCHECK_LT(index, ring_buffer_.size()); + if (!ring_buffer_[index].detected) { + num_modified_items++; + } + ring_buffer_[index].detected = true; + } + return num_modified_items; +} + +// Cost: O(1). +bool ClippingPredictorEvaluator::HasExpiredUnmatchedExpectedDetection() const { + if (ring_buffer_size_ == 0) { + return false; + } + // If an expired item, that is `ttl` equal to 0, exists, it must be the + // oldest. + const int oldest_index = OldestExpectedDetectionIndex( + ring_buffer_size_, ring_buffer_tail_, ring_buffer_capacity_); + RTC_DCHECK_GE(oldest_index, 0); + RTC_DCHECK_LT(oldest_index, ring_buffer_.size()); + return ring_buffer_[oldest_index].ttl == 0 && + !ring_buffer_[oldest_index].detected; +} + +} // namespace webrtc diff --git a/modules/audio_processing/agc/clipping_predictor_evaluator.h b/modules/audio_processing/agc/clipping_predictor_evaluator.h new file mode 100644 index 0000000000..e76f25d5e1 --- /dev/null +++ b/modules/audio_processing/agc/clipping_predictor_evaluator.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AGC_CLIPPING_PREDICTOR_EVALUATOR_H_ +#define MODULES_AUDIO_PROCESSING_AGC_CLIPPING_PREDICTOR_EVALUATOR_H_ + +#include + +#include "absl/types/optional.h" + +namespace webrtc { + +// Counts true/false positives/negatives while observing sequences of flag pairs +// that indicate whether clipping has been detected and/or if clipping is +// predicted. When a true positive is found measures the time interval between +// prediction and detection events. +// From the time a prediction is observed and for a period equal to +// `history_size` calls to `Observe()`, one or more detections are expected. If +// the expectation is met, a true positives is added and the time interval +// between the earliest prediction and the detection is recorded; otherwise, +// when the deadline is reached, a false positive is added. Note that one +// detection matches all the expected detections that have not expired - i.e., +// one detection counts as multiple true positives. +// If a detection is observed, but no prediction has been observed over the past +// `history_size` calls to `Observe()`, then a false negative is added; +// otherwise, a true negative is added. +class ClippingPredictorEvaluator { + public: + // Ctor. `history_size` indicates how long to wait for a call to `Observe()` + // having `clipping_detected` set to true from the time clipping is predicted. + explicit ClippingPredictorEvaluator(int history_size); + ClippingPredictorEvaluator(const ClippingPredictorEvaluator&) = delete; + ClippingPredictorEvaluator& operator=(const ClippingPredictorEvaluator&) = + delete; + ~ClippingPredictorEvaluator(); + + // Observes whether clipping has been detected and/or if clipping is + // predicted. When predicted one or more detections are expected in the next + // `history_size_` calls of `Observe()`. When true positives are found returns + // the prediction interval between the earliest prediction and the detection. + absl::optional Observe(bool clipping_detected, bool clipping_predicted); + + // Removes any expectation recently set after a call to `Observe()` having + // `clipping_predicted` set to true. + void Reset(); + + // Metrics getters. + int true_positives() const { return true_positives_; } + int true_negatives() const { return true_negatives_; } + int false_positives() const { return false_positives_; } + int false_negatives() const { return false_negatives_; } + + private: + const int history_size_; + + // State of a detection expected to be observed after a prediction. + struct ExpectedDetection { + // Time to live (TTL); remaining number of `Observe()` calls to match a call + // having `clipping_detected` set to true. + int ttl; + // True if an `Observe()` call having `clipping_detected` set to true has + // been observed. + bool detected; + }; + // Ring buffer of expected detections. + const int ring_buffer_capacity_; + std::vector ring_buffer_; + int ring_buffer_tail_; + int ring_buffer_size_; + + // Pushes `expected_detection` into `expected_matches_ring_buffer_`. + void Push(ExpectedDetection expected_detection); + // Decreased the TTLs in `expected_matches_ring_buffer_` and removes expired + // items. + void DecreaseTimesToLive(); + // Returns the prediction interval for the earliest unexpired expected + // detection if any. + absl::optional FindEarliestPredictionInterval() const; + // Marks all the items in `expected_matches_ring_buffer_` as `detected` and + // returns the number of updated items. + int MarkExpectedDetectionAsDetected(); + // Returns true if `expected_matches_ring_buffer_` has an item having `ttl` + // equal to 0 (expired) and `detected` equal to false (unmatched). + bool HasExpiredUnmatchedExpectedDetection() const; + + // Metrics. + int true_positives_; + int true_negatives_; + int false_positives_; + int false_negatives_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AGC_CLIPPING_PREDICTOR_EVALUATOR_H_ diff --git a/modules/audio_processing/agc/clipping_predictor_evaluator_unittest.cc b/modules/audio_processing/agc/clipping_predictor_evaluator_unittest.cc new file mode 100644 index 0000000000..1eb83eae61 --- /dev/null +++ b/modules/audio_processing/agc/clipping_predictor_evaluator_unittest.cc @@ -0,0 +1,568 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/agc/clipping_predictor_evaluator.h" + +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "rtc_base/numerics/safe_conversions.h" +#include "rtc_base/random.h" +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +using testing::Eq; +using testing::Optional; + +constexpr bool kDetected = true; +constexpr bool kNotDetected = false; + +constexpr bool kPredicted = true; +constexpr bool kNotPredicted = false; + +int SumTrueFalsePositivesNegatives( + const ClippingPredictorEvaluator& evaluator) { + return evaluator.true_positives() + evaluator.true_negatives() + + evaluator.false_positives() + evaluator.false_negatives(); +} + +// Checks the metrics after init - i.e., no call to `Observe()`. +TEST(ClippingPredictorEvaluatorTest, Init) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + EXPECT_EQ(evaluator.true_positives(), 0); + EXPECT_EQ(evaluator.true_negatives(), 0); + EXPECT_EQ(evaluator.false_positives(), 0); + EXPECT_EQ(evaluator.false_negatives(), 0); +} + +class ClippingPredictorEvaluatorParameterization + : public ::testing::TestWithParam> { + protected: + uint64_t seed() const { + return rtc::checked_cast(std::get<0>(GetParam())); + } + int history_size() const { return std::get<1>(GetParam()); } +}; + +// Checks that after each call to `Observe()` at most one metric changes. +TEST_P(ClippingPredictorEvaluatorParameterization, AtMostOneMetricChanges) { + constexpr int kNumCalls = 123; + Random random_generator(seed()); + ClippingPredictorEvaluator evaluator(history_size()); + + for (int i = 0; i < kNumCalls; ++i) { + SCOPED_TRACE(i); + // Read metrics before `Observe()` is called. + const int last_tp = evaluator.true_positives(); + const int last_tn = evaluator.true_negatives(); + const int last_fp = evaluator.false_positives(); + const int last_fn = evaluator.false_negatives(); + // `Observe()` a random observation. + bool clipping_detected = random_generator.Rand(); + bool clipping_predicted = random_generator.Rand(); + evaluator.Observe(clipping_detected, clipping_predicted); + + // Check that at most one metric has changed. + int num_changes = 0; + num_changes += last_tp == evaluator.true_positives() ? 0 : 1; + num_changes += last_tn == evaluator.true_negatives() ? 0 : 1; + num_changes += last_fp == evaluator.false_positives() ? 0 : 1; + num_changes += last_fn == evaluator.false_negatives() ? 0 : 1; + EXPECT_GE(num_changes, 0); + EXPECT_LE(num_changes, 1); + } +} + +// Checks that after each call to `Observe()` each metric either remains +// unchanged or grows. +TEST_P(ClippingPredictorEvaluatorParameterization, MetricsAreWeaklyMonotonic) { + constexpr int kNumCalls = 123; + Random random_generator(seed()); + ClippingPredictorEvaluator evaluator(history_size()); + + for (int i = 0; i < kNumCalls; ++i) { + SCOPED_TRACE(i); + // Read metrics before `Observe()` is called. + const int last_tp = evaluator.true_positives(); + const int last_tn = evaluator.true_negatives(); + const int last_fp = evaluator.false_positives(); + const int last_fn = evaluator.false_negatives(); + // `Observe()` a random observation. + bool clipping_detected = random_generator.Rand(); + bool clipping_predicted = random_generator.Rand(); + evaluator.Observe(clipping_detected, clipping_predicted); + + // Check that metrics are weakly monotonic. + EXPECT_GE(evaluator.true_positives(), last_tp); + EXPECT_GE(evaluator.true_negatives(), last_tn); + EXPECT_GE(evaluator.false_positives(), last_fp); + EXPECT_GE(evaluator.false_negatives(), last_fn); + } +} + +// Checks that after each call to `Observe()` the growth speed of each metrics +// is bounded. +TEST_P(ClippingPredictorEvaluatorParameterization, BoundedMetricsGrowth) { + constexpr int kNumCalls = 123; + Random random_generator(seed()); + ClippingPredictorEvaluator evaluator(history_size()); + + for (int i = 0; i < kNumCalls; ++i) { + SCOPED_TRACE(i); + // Read metrics before `Observe()` is called. + const int last_tp = evaluator.true_positives(); + const int last_tn = evaluator.true_negatives(); + const int last_fp = evaluator.false_positives(); + const int last_fn = evaluator.false_negatives(); + // `Observe()` a random observation. + bool clipping_detected = random_generator.Rand(); + bool clipping_predicted = random_generator.Rand(); + evaluator.Observe(clipping_detected, clipping_predicted); + + // Check that TPs grow by at most `history_size() + 1`. Such an upper bound + // is reached when multiple predictions are matched by a single detection. + EXPECT_LE(evaluator.true_positives() - last_tp, history_size() + 1); + // Check that TNs, FPs and FNs grow by at most one. `max_growth`. + EXPECT_LE(evaluator.true_negatives() - last_tn, 1); + EXPECT_LE(evaluator.false_positives() - last_fp, 1); + EXPECT_LE(evaluator.false_negatives() - last_fn, 1); + } +} + +// Checks that `Observe()` returns a prediction interval if and only if one or +// more true positives are found. +TEST_P(ClippingPredictorEvaluatorParameterization, + PredictionIntervalIfAndOnlyIfTruePositives) { + constexpr int kNumCalls = 123; + Random random_generator(seed()); + ClippingPredictorEvaluator evaluator(history_size()); + + for (int i = 0; i < kNumCalls; ++i) { + SCOPED_TRACE(i); + // Read true positives before `Observe()` is called. + const int last_tp = evaluator.true_positives(); + // `Observe()` a random observation. + bool clipping_detected = random_generator.Rand(); + bool clipping_predicted = random_generator.Rand(); + absl::optional prediction_interval = + evaluator.Observe(clipping_detected, clipping_predicted); + + // Check that the prediction interval is returned when a true positive is + // found. + if (evaluator.true_positives() == last_tp) { + EXPECT_FALSE(prediction_interval.has_value()); + } else { + EXPECT_TRUE(prediction_interval.has_value()); + } + } +} + +INSTANTIATE_TEST_SUITE_P( + ClippingPredictorEvaluatorTest, + ClippingPredictorEvaluatorParameterization, + ::testing::Combine(::testing::Values(4, 8, 15, 16, 23, 42), + ::testing::Values(1, 10, 21))); + +// Checks that, observing a detection and a prediction after init, produces a +// true positive. +TEST(ClippingPredictorEvaluatorTest, OneTruePositiveAfterInit) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + evaluator.Observe(kDetected, kPredicted); + EXPECT_EQ(evaluator.true_positives(), 1); + + EXPECT_EQ(evaluator.true_negatives(), 0); + EXPECT_EQ(evaluator.false_positives(), 0); + EXPECT_EQ(evaluator.false_negatives(), 0); +} + +// Checks that, observing a detection but no prediction after init, produces a +// false negative. +TEST(ClippingPredictorEvaluatorTest, OneFalseNegativeAfterInit) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + evaluator.Observe(kDetected, kNotPredicted); + EXPECT_EQ(evaluator.false_negatives(), 1); + + EXPECT_EQ(evaluator.true_positives(), 0); + EXPECT_EQ(evaluator.true_negatives(), 0); + EXPECT_EQ(evaluator.false_positives(), 0); +} + +// Checks that, observing no detection but a prediction after init, produces a +// false positive after expiration. +TEST(ClippingPredictorEvaluatorTest, OneFalsePositiveAfterInit) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + evaluator.Observe(kNotDetected, kPredicted); + EXPECT_EQ(evaluator.false_positives(), 0); + evaluator.Observe(kNotDetected, kNotPredicted); + evaluator.Observe(kNotDetected, kNotPredicted); + evaluator.Observe(kNotDetected, kNotPredicted); + EXPECT_EQ(evaluator.false_positives(), 1); + + EXPECT_EQ(evaluator.true_positives(), 0); + EXPECT_EQ(evaluator.true_negatives(), 0); + EXPECT_EQ(evaluator.false_negatives(), 0); +} + +// Checks that, observing no detection and no prediction after init, produces a +// true negative. +TEST(ClippingPredictorEvaluatorTest, OneTrueNegativeAfterInit) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + evaluator.Observe(kNotDetected, kNotPredicted); + EXPECT_EQ(evaluator.true_negatives(), 1); + + EXPECT_EQ(evaluator.true_positives(), 0); + EXPECT_EQ(evaluator.false_positives(), 0); + EXPECT_EQ(evaluator.false_negatives(), 0); +} + +// Checks that the evaluator detects true negatives when clipping is neither +// predicted nor detected. +TEST(ClippingPredictorEvaluatorTest, NeverDetectedAndNotPredicted) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + evaluator.Observe(kNotDetected, kNotPredicted); + evaluator.Observe(kNotDetected, kNotPredicted); + evaluator.Observe(kNotDetected, kNotPredicted); + evaluator.Observe(kNotDetected, kNotPredicted); + EXPECT_EQ(evaluator.true_negatives(), 4); + + EXPECT_EQ(evaluator.true_positives(), 0); + EXPECT_EQ(evaluator.false_positives(), 0); + EXPECT_EQ(evaluator.false_negatives(), 0); +} + +// Checks that the evaluator detects a false negative when clipping is detected +// but not predicted. +TEST(ClippingPredictorEvaluatorTest, DetectedButNotPredicted) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + evaluator.Observe(kNotDetected, kNotPredicted); + evaluator.Observe(kNotDetected, kNotPredicted); + evaluator.Observe(kNotDetected, kNotPredicted); + evaluator.Observe(kDetected, kNotPredicted); + EXPECT_EQ(evaluator.false_negatives(), 1); + + EXPECT_EQ(evaluator.true_positives(), 0); + EXPECT_EQ(evaluator.true_negatives(), 3); + EXPECT_EQ(evaluator.false_positives(), 0); +} + +// Checks that the evaluator does not detect a false positive when clipping is +// predicted but not detected until the observation period expires. +TEST(ClippingPredictorEvaluatorTest, + PredictedOnceAndNeverDetectedBeforeDeadline) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + evaluator.Observe(kNotDetected, kPredicted); + evaluator.Observe(kNotDetected, kNotPredicted); + EXPECT_EQ(evaluator.false_positives(), 0); + evaluator.Observe(kNotDetected, kNotPredicted); + EXPECT_EQ(evaluator.false_positives(), 0); + evaluator.Observe(kNotDetected, kNotPredicted); + EXPECT_EQ(evaluator.false_positives(), 1); + + EXPECT_EQ(evaluator.true_positives(), 0); + EXPECT_EQ(evaluator.true_negatives(), 0); + EXPECT_EQ(evaluator.false_negatives(), 0); +} + +// Checks that the evaluator detects a false positive when clipping is predicted +// but detected after the observation period expires. +TEST(ClippingPredictorEvaluatorTest, PredictedOnceButDetectedAfterDeadline) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + evaluator.Observe(kNotDetected, kPredicted); + evaluator.Observe(kNotDetected, kNotPredicted); + evaluator.Observe(kNotDetected, kNotPredicted); + evaluator.Observe(kNotDetected, kNotPredicted); + evaluator.Observe(kDetected, kNotPredicted); + EXPECT_EQ(evaluator.false_positives(), 1); + + EXPECT_EQ(evaluator.true_positives(), 0); + EXPECT_EQ(evaluator.true_negatives(), 0); + EXPECT_EQ(evaluator.false_negatives(), 1); +} + +// Checks that a prediction followed by a detection counts as true positive. +TEST(ClippingPredictorEvaluatorTest, PredictedOnceAndThenImmediatelyDetected) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + evaluator.Observe(kNotDetected, kPredicted); + EXPECT_EQ(evaluator.false_positives(), 0); + evaluator.Observe(kDetected, kNotPredicted); + EXPECT_EQ(evaluator.true_positives(), 1); + + EXPECT_EQ(evaluator.true_negatives(), 0); + EXPECT_EQ(evaluator.false_positives(), 0); + EXPECT_EQ(evaluator.false_negatives(), 0); +} + +// Checks that a prediction followed by a delayed detection counts as true +// positive if the delay is within the observation period. +TEST(ClippingPredictorEvaluatorTest, PredictedOnceAndDetectedBeforeDeadline) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + evaluator.Observe(kNotDetected, kPredicted); + EXPECT_EQ(evaluator.false_positives(), 0); + evaluator.Observe(kNotDetected, kNotPredicted); + EXPECT_EQ(evaluator.false_positives(), 0); + evaluator.Observe(kDetected, kNotPredicted); + EXPECT_EQ(evaluator.true_positives(), 1); + + EXPECT_EQ(evaluator.true_negatives(), 0); + EXPECT_EQ(evaluator.false_positives(), 0); + EXPECT_EQ(evaluator.false_negatives(), 0); +} + +// Checks that a prediction followed by a delayed detection counts as true +// positive if the delay equals the observation period. +TEST(ClippingPredictorEvaluatorTest, PredictedOnceAndDetectedAtDeadline) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + evaluator.Observe(kNotDetected, kPredicted); + EXPECT_EQ(evaluator.false_positives(), 0); + evaluator.Observe(kNotDetected, kNotPredicted); + EXPECT_EQ(evaluator.false_positives(), 0); + evaluator.Observe(kNotDetected, kNotPredicted); + EXPECT_EQ(evaluator.false_positives(), 0); + evaluator.Observe(kDetected, kNotPredicted); + EXPECT_EQ(evaluator.true_positives(), 1); + + EXPECT_EQ(evaluator.true_negatives(), 0); + EXPECT_EQ(evaluator.false_positives(), 0); + EXPECT_EQ(evaluator.false_negatives(), 0); +} + +// Checks that a prediction followed by a multiple adjacent detections within +// the deadline counts as a single true positive and that, after the deadline, +// a detection counts as a false negative. +TEST(ClippingPredictorEvaluatorTest, PredictedOnceAndDetectedMultipleTimes) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + evaluator.Observe(kNotDetected, kPredicted); + evaluator.Observe(kNotDetected, kNotPredicted); + // Multiple detections. + evaluator.Observe(kDetected, kNotPredicted); + EXPECT_EQ(evaluator.true_positives(), 1); + evaluator.Observe(kDetected, kNotPredicted); + EXPECT_EQ(evaluator.true_positives(), 1); + // A detection outside of the observation period counts as false negative. + evaluator.Observe(kDetected, kNotPredicted); + EXPECT_EQ(evaluator.false_negatives(), 1); + EXPECT_EQ(SumTrueFalsePositivesNegatives(evaluator), 2); + + EXPECT_EQ(evaluator.true_negatives(), 0); + EXPECT_EQ(evaluator.false_positives(), 0); +} + +// Checks that a false positive is added when clipping is detected after a too +// early prediction. +TEST(ClippingPredictorEvaluatorTest, + PredictedMultipleTimesAndDetectedOnceAfterDeadline) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + evaluator.Observe(kNotDetected, kPredicted); // ---+ + evaluator.Observe(kNotDetected, kPredicted); // | + evaluator.Observe(kNotDetected, kPredicted); // | + evaluator.Observe(kNotDetected, kPredicted); // <--+ Not matched. + // The time to match a detection after the first prediction expired. + EXPECT_EQ(evaluator.false_positives(), 1); + evaluator.Observe(kDetected, kNotPredicted); + // The detection above does not match the first prediction because it happened + // after the deadline of the 1st prediction. + EXPECT_EQ(evaluator.false_positives(), 1); + + EXPECT_EQ(evaluator.true_positives(), 3); + EXPECT_EQ(evaluator.true_negatives(), 0); + EXPECT_EQ(evaluator.false_negatives(), 0); +} + +// Checks that multiple consecutive predictions match the first detection +// observed before the expected detection deadline expires. +TEST(ClippingPredictorEvaluatorTest, PredictedMultipleTimesAndDetectedOnce) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + evaluator.Observe(kNotDetected, kPredicted); // --+ + evaluator.Observe(kNotDetected, kPredicted); // | --+ + evaluator.Observe(kNotDetected, kPredicted); // | | --+ + evaluator.Observe(kDetected, kNotPredicted); // <-+ <-+ <-+ + EXPECT_EQ(evaluator.true_positives(), 3); + // The following observations do not generate any true negatives as they + // belong to the observation period of the last prediction - for which a + // detection has already been matched. + const int true_negatives = evaluator.true_negatives(); + evaluator.Observe(kNotDetected, kNotPredicted); + evaluator.Observe(kNotDetected, kNotPredicted); + EXPECT_EQ(evaluator.true_negatives(), true_negatives); + + EXPECT_EQ(evaluator.true_negatives(), 0); + EXPECT_EQ(evaluator.false_positives(), 0); + EXPECT_EQ(evaluator.false_negatives(), 0); +} + +// Checks that multiple consecutive predictions match the multiple detections +// observed before the expected detection deadline expires. +TEST(ClippingPredictorEvaluatorTest, + PredictedMultipleTimesAndDetectedMultipleTimes) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + evaluator.Observe(kNotDetected, kPredicted); // --+ + evaluator.Observe(kNotDetected, kPredicted); // | --+ + evaluator.Observe(kNotDetected, kPredicted); // | | --+ + evaluator.Observe(kDetected, kNotPredicted); // <-+ <-+ <-+ + evaluator.Observe(kDetected, kNotPredicted); // <-+ <-+ + EXPECT_EQ(evaluator.true_positives(), 3); + // The following observation does not generate a true negative as it belongs + // to the observation period of the last prediction - for which two detections + // have already been matched. + const int true_negatives = evaluator.true_negatives(); + evaluator.Observe(kNotDetected, kNotPredicted); + EXPECT_EQ(evaluator.true_negatives(), true_negatives); + + EXPECT_EQ(evaluator.true_negatives(), 0); + EXPECT_EQ(evaluator.false_positives(), 0); + EXPECT_EQ(evaluator.false_negatives(), 0); +} + +// Checks that multiple consecutive predictions match all the detections +// observed before the expected detection deadline expires. +TEST(ClippingPredictorEvaluatorTest, PredictedMultipleTimesAndAllDetected) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + evaluator.Observe(kNotDetected, kPredicted); // --+ + evaluator.Observe(kNotDetected, kPredicted); // | --+ + evaluator.Observe(kNotDetected, kPredicted); // | | --+ + evaluator.Observe(kDetected, kNotPredicted); // <-+ <-+ <-+ + evaluator.Observe(kDetected, kNotPredicted); // <-+ <-+ + evaluator.Observe(kDetected, kNotPredicted); // <-+ + EXPECT_EQ(evaluator.true_positives(), 3); + EXPECT_EQ(evaluator.true_negatives(), 0); + EXPECT_EQ(evaluator.false_positives(), 0); + EXPECT_EQ(evaluator.false_negatives(), 0); +} + +// Checks that multiple non-consecutive predictions match all the detections +// observed before the expected detection deadline expires. +TEST(ClippingPredictorEvaluatorTest, + PredictedMultipleTimesWithGapAndAllDetected) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + evaluator.Observe(kNotDetected, kPredicted); // --+ + evaluator.Observe(kNotDetected, kNotPredicted); // | + evaluator.Observe(kNotDetected, kPredicted); // | --+ + evaluator.Observe(kDetected, kNotPredicted); // <-+ <-+ + evaluator.Observe(kDetected, kNotPredicted); // <-+ + evaluator.Observe(kDetected, kNotPredicted); // <-+ + EXPECT_EQ(evaluator.true_positives(), 2); + EXPECT_EQ(evaluator.true_negatives(), 0); + EXPECT_EQ(evaluator.false_positives(), 0); + EXPECT_EQ(evaluator.false_negatives(), 0); +} + +class ClippingPredictorEvaluatorPredictionIntervalParameterization + : public ::testing::TestWithParam> { + protected: + int num_extra_observe_calls() const { return std::get<0>(GetParam()); } + int history_size() const { return std::get<1>(GetParam()); } +}; + +// Checks that the minimum prediction interval is returned if clipping is +// correctly predicted as soon as detected - i.e., no anticipation. +TEST_P(ClippingPredictorEvaluatorPredictionIntervalParameterization, + MinimumPredictionInterval) { + ClippingPredictorEvaluator evaluator(history_size()); + for (int i = 0; i < num_extra_observe_calls(); ++i) { + EXPECT_EQ(evaluator.Observe(kNotDetected, kNotPredicted), absl::nullopt); + } + absl::optional prediction_interval = + evaluator.Observe(kDetected, kPredicted); + EXPECT_THAT(prediction_interval, Optional(Eq(0))); +} + +// Checks that a prediction interval between the minimum and the maximum is +// returned if clipping is correctly predicted before it is detected but not as +// early as possible. +TEST_P(ClippingPredictorEvaluatorPredictionIntervalParameterization, + IntermediatePredictionInterval) { + ClippingPredictorEvaluator evaluator(history_size()); + for (int i = 0; i < num_extra_observe_calls(); ++i) { + EXPECT_EQ(evaluator.Observe(kNotDetected, kNotPredicted), absl::nullopt); + } + EXPECT_EQ(evaluator.Observe(kNotDetected, kPredicted), absl::nullopt); + EXPECT_EQ(evaluator.Observe(kNotDetected, kPredicted), absl::nullopt); + EXPECT_EQ(evaluator.Observe(kNotDetected, kPredicted), absl::nullopt); + absl::optional prediction_interval = + evaluator.Observe(kDetected, kPredicted); + EXPECT_THAT(prediction_interval, Optional(Eq(3))); +} + +// Checks that the maximum prediction interval is returned if clipping is +// correctly predicted as early as possible. +TEST_P(ClippingPredictorEvaluatorPredictionIntervalParameterization, + MaximumPredictionInterval) { + ClippingPredictorEvaluator evaluator(history_size()); + for (int i = 0; i < num_extra_observe_calls(); ++i) { + EXPECT_EQ(evaluator.Observe(kNotDetected, kNotPredicted), absl::nullopt); + } + for (int i = 0; i < history_size(); ++i) { + EXPECT_EQ(evaluator.Observe(kNotDetected, kPredicted), absl::nullopt); + } + absl::optional prediction_interval = + evaluator.Observe(kDetected, kPredicted); + EXPECT_THAT(prediction_interval, Optional(Eq(history_size()))); +} + +// Checks that `Observe()` returns the prediction interval as soon as a true +// positive is found and never again while ongoing detections are matched to a +// previously observed prediction. +TEST_P(ClippingPredictorEvaluatorPredictionIntervalParameterization, + PredictionIntervalReturnedOnce) { + ASSERT_LT(num_extra_observe_calls(), history_size()); + ClippingPredictorEvaluator evaluator(history_size()); + // Observe predictions before detection. + for (int i = 0; i < num_extra_observe_calls(); ++i) { + EXPECT_EQ(evaluator.Observe(kNotDetected, kPredicted), absl::nullopt); + } + // Observe a detection. + absl::optional prediction_interval = + evaluator.Observe(kDetected, kPredicted); + EXPECT_TRUE(prediction_interval.has_value()); + // `Observe()` does not return a prediction interval anymore during ongoing + // detections observed while a detection is still expected. + for (int i = 0; i < history_size(); ++i) { + EXPECT_EQ(evaluator.Observe(kDetected, kNotPredicted), absl::nullopt); + } +} + +INSTANTIATE_TEST_SUITE_P( + ClippingPredictorEvaluatorTest, + ClippingPredictorEvaluatorPredictionIntervalParameterization, + ::testing::Combine(::testing::Values(0, 3, 5), ::testing::Values(7, 11))); + +// Checks that, when a detection is expected, the expectation is removed if and +// only if `Reset()` is called after a prediction is observed. +TEST(ClippingPredictorEvaluatorTest, NoFalsePositivesAfterReset) { + constexpr int kHistorySize = 2; + + ClippingPredictorEvaluator with_reset(kHistorySize); + with_reset.Observe(kNotDetected, kPredicted); + with_reset.Reset(); + with_reset.Observe(kNotDetected, kNotPredicted); + with_reset.Observe(kNotDetected, kNotPredicted); + EXPECT_EQ(with_reset.true_positives(), 0); + EXPECT_EQ(with_reset.true_negatives(), 2); + EXPECT_EQ(with_reset.false_positives(), 0); + EXPECT_EQ(with_reset.false_negatives(), 0); + + ClippingPredictorEvaluator no_reset(kHistorySize); + no_reset.Observe(kNotDetected, kPredicted); + no_reset.Observe(kNotDetected, kNotPredicted); + no_reset.Observe(kNotDetected, kNotPredicted); + EXPECT_EQ(no_reset.true_positives(), 0); + EXPECT_EQ(no_reset.true_negatives(), 0); + EXPECT_EQ(no_reset.false_positives(), 1); + EXPECT_EQ(no_reset.false_negatives(), 0); +} + +} // namespace +} // namespace webrtc diff --git a/modules/audio_processing/agc/clipping_predictor_unittest.cc b/modules/audio_processing/agc/clipping_predictor_unittest.cc index ab76abab68..e848e1a724 100644 --- a/modules/audio_processing/agc/clipping_predictor_unittest.cc +++ b/modules/audio_processing/agc/clipping_predictor_unittest.cc @@ -10,6 +10,8 @@ #include "modules/audio_processing/agc/clipping_predictor.h" +#include +#include #include #include "rtc_base/checks.h" @@ -21,57 +23,61 @@ namespace { using ::testing::Eq; using ::testing::Optional; +using ClippingPredictorConfig = AudioProcessing::Config::GainController1:: + AnalogGainController::ClippingPredictor; +using ClippingPredictorMode = AudioProcessing::Config::GainController1:: + AnalogGainController::ClippingPredictor::Mode; constexpr int kSampleRateHz = 32000; constexpr int kNumChannels = 1; constexpr int kSamplesPerChannel = kSampleRateHz / 100; -constexpr int kWindowLength = 5; -constexpr int kReferenceWindowLength = 5; -constexpr int kReferenceWindowDelay = 5; constexpr int kMaxMicLevel = 255; constexpr int kMinMicLevel = 12; constexpr int kDefaultClippedLevelStep = 15; +constexpr float kMaxSampleS16 = + static_cast(std::numeric_limits::max()); -using ClippingPredictorConfig = AudioProcessing::Config::GainController1:: - AnalogGainController::ClippingPredictor; +// Threshold in dB corresponding to a signal with an amplitude equal to 99% of +// the dynamic range - i.e., computed as `20*log10(0.99)`. +constexpr float kClippingThresholdDb = -0.08729610804900176f; -void CallProcess(int num_calls, +void CallAnalyze(int num_calls, const AudioFrameView& frame, ClippingPredictor& predictor) { for (int i = 0; i < num_calls; ++i) { - predictor.Process(frame); + predictor.Analyze(frame); } } -// Creates and processes an audio frame with a non-zero (approx. 4.15dB) crest +// Creates and analyzes an audio frame with a non-zero (approx. 4.15dB) crest // factor. -void ProcessNonZeroCrestFactorAudio(int num_calls, +void AnalyzeNonZeroCrestFactorAudio(int num_calls, int num_channels, float peak_ratio, ClippingPredictor& predictor) { RTC_DCHECK_GT(num_calls, 0); RTC_DCHECK_GT(num_channels, 0); - RTC_DCHECK_LE(peak_ratio, 1.f); + RTC_DCHECK_LE(peak_ratio, 1.0f); std::vector audio(num_channels); - std::vector audio_data(num_channels * kSamplesPerChannel, 0.f); + std::vector audio_data(num_channels * kSamplesPerChannel, 0.0f); for (int channel = 0; channel < num_channels; ++channel) { audio[channel] = &audio_data[channel * kSamplesPerChannel]; for (int sample = 0; sample < kSamplesPerChannel; sample += 10) { - audio[channel][sample] = 0.1f * peak_ratio * 32767.f; - audio[channel][sample + 1] = 0.2f * peak_ratio * 32767.f; - audio[channel][sample + 2] = 0.3f * peak_ratio * 32767.f; - audio[channel][sample + 3] = 0.4f * peak_ratio * 32767.f; - audio[channel][sample + 4] = 0.5f * peak_ratio * 32767.f; - audio[channel][sample + 5] = 0.6f * peak_ratio * 32767.f; - audio[channel][sample + 6] = 0.7f * peak_ratio * 32767.f; - audio[channel][sample + 7] = 0.8f * peak_ratio * 32767.f; - audio[channel][sample + 8] = 0.9f * peak_ratio * 32767.f; - audio[channel][sample + 9] = 1.f * peak_ratio * 32767.f; + audio[channel][sample] = 0.1f * peak_ratio * kMaxSampleS16; + audio[channel][sample + 1] = 0.2f * peak_ratio * kMaxSampleS16; + audio[channel][sample + 2] = 0.3f * peak_ratio * kMaxSampleS16; + audio[channel][sample + 3] = 0.4f * peak_ratio * kMaxSampleS16; + audio[channel][sample + 4] = 0.5f * peak_ratio * kMaxSampleS16; + audio[channel][sample + 5] = 0.6f * peak_ratio * kMaxSampleS16; + audio[channel][sample + 6] = 0.7f * peak_ratio * kMaxSampleS16; + audio[channel][sample + 7] = 0.8f * peak_ratio * kMaxSampleS16; + audio[channel][sample + 8] = 0.9f * peak_ratio * kMaxSampleS16; + audio[channel][sample + 9] = 1.0f * peak_ratio * kMaxSampleS16; } } - auto frame = AudioFrameView(audio.data(), num_channels, - kSamplesPerChannel); - CallProcess(num_calls, frame, predictor); + AudioFrameView frame(audio.data(), num_channels, + kSamplesPerChannel); + CallAnalyze(num_calls, frame, predictor); } void CheckChannelEstimatesWithValue(int num_channels, @@ -82,6 +88,7 @@ void CheckChannelEstimatesWithValue(int num_channels, const ClippingPredictor& predictor, int expected) { for (int i = 0; i < num_channels; ++i) { + SCOPED_TRACE(i); EXPECT_THAT(predictor.EstimateClippedLevelStep( i, level, default_step, min_mic_level, max_mic_level), Optional(Eq(expected))); @@ -95,14 +102,15 @@ void CheckChannelEstimatesWithoutValue(int num_channels, int max_mic_level, const ClippingPredictor& predictor) { for (int i = 0; i < num_channels; ++i) { + SCOPED_TRACE(i); EXPECT_EQ(predictor.EstimateClippedLevelStep(i, level, default_step, min_mic_level, max_mic_level), absl::nullopt); } } -// Creates and processes an audio frame with a zero crest factor. -void ProcessZeroCrestFactorAudio(int num_calls, +// Creates and analyzes an audio frame with a zero crest factor. +void AnalyzeZeroCrestFactorAudio(int num_calls, int num_channels, float peak_ratio, ClippingPredictor& predictor) { @@ -114,131 +122,156 @@ void ProcessZeroCrestFactorAudio(int num_calls, for (int channel = 0; channel < num_channels; ++channel) { audio[channel] = &audio_data[channel * kSamplesPerChannel]; for (int sample = 0; sample < kSamplesPerChannel; ++sample) { - audio[channel][sample] = peak_ratio * 32767.f; + audio[channel][sample] = peak_ratio * kMaxSampleS16; } } auto frame = AudioFrameView(audio.data(), num_channels, kSamplesPerChannel); - CallProcess(num_calls, frame, predictor); + CallAnalyze(num_calls, frame, predictor); +} + +TEST(ClippingPeakPredictorTest, NoPredictorCreated) { + auto predictor = + CreateClippingPredictor(kNumChannels, /*config=*/{/*enabled=*/false}); + EXPECT_FALSE(predictor); +} + +TEST(ClippingPeakPredictorTest, ClippingEventPredictionCreated) { + // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. + auto predictor = CreateClippingPredictor( + kNumChannels, + /*config=*/{/*enabled=*/true, + /*mode=*/ClippingPredictorMode::kClippingEventPrediction}); + EXPECT_TRUE(predictor); +} + +TEST(ClippingPeakPredictorTest, AdaptiveStepClippingPeakPredictionCreated) { + // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. + auto predictor = CreateClippingPredictor( + kNumChannels, /*config=*/{ + /*enabled=*/true, + /*mode=*/ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction}); + EXPECT_TRUE(predictor); +} + +TEST(ClippingPeakPredictorTest, FixedStepClippingPeakPredictionCreated) { + // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. + auto predictor = CreateClippingPredictor( + kNumChannels, /*config=*/{ + /*enabled=*/true, + /*mode=*/ClippingPredictorMode::kFixedStepClippingPeakPrediction}); + EXPECT_TRUE(predictor); } class ClippingPredictorParameterization : public ::testing::TestWithParam> { protected: int num_channels() const { return std::get<0>(GetParam()); } - int window_length() const { return std::get<1>(GetParam()); } - int reference_window_length() const { return std::get<2>(GetParam()); } - int reference_window_delay() const { return std::get<3>(GetParam()); } -}; - -class ClippingEventPredictorParameterization - : public ::testing::TestWithParam> { - protected: - float clipping_threshold() const { return std::get<0>(GetParam()); } - float crest_factor_margin() const { return std::get<1>(GetParam()); } -}; - -class ClippingPeakPredictorParameterization - : public ::testing::TestWithParam> { - protected: - float adaptive_step_estimation() const { return std::get<0>(GetParam()); } - float clipping_threshold() const { return std::get<1>(GetParam()); } + ClippingPredictorConfig GetConfig(ClippingPredictorMode mode) const { + // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. + return {/*enabled=*/true, + /*mode=*/mode, + /*window_length=*/std::get<1>(GetParam()), + /*reference_window_length=*/std::get<2>(GetParam()), + /*reference_window_delay=*/std::get<3>(GetParam()), + /*clipping_threshold=*/-1.0f, + /*crest_factor_margin=*/0.5f}; + } }; TEST_P(ClippingPredictorParameterization, CheckClippingEventPredictorEstimateAfterCrestFactorDrop) { - if (reference_window_length() + reference_window_delay() > window_length()) { - ClippingPredictorConfig config; - config.window_length = window_length(); - config.reference_window_length = reference_window_length(); - config.reference_window_delay = reference_window_delay(); - config.clipping_threshold = -1.0f; - config.crest_factor_margin = 0.5f; - auto predictor = CreateClippingEventPredictor(num_channels(), config); - ProcessNonZeroCrestFactorAudio( - reference_window_length() + reference_window_delay() - window_length(), - num_channels(), /*peak_ratio=*/0.99f, *predictor); - CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255, - kDefaultClippedLevelStep, kMinMicLevel, - kMaxMicLevel, *predictor); - ProcessZeroCrestFactorAudio(window_length(), num_channels(), - /*peak_ratio=*/0.99f, *predictor); - CheckChannelEstimatesWithValue( - num_channels(), /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel, - kMaxMicLevel, *predictor, kDefaultClippedLevelStep); + const ClippingPredictorConfig config = + GetConfig(ClippingPredictorMode::kClippingEventPrediction); + if (config.reference_window_length + config.reference_window_delay <= + config.window_length) { + return; } + auto predictor = CreateClippingPredictor(num_channels(), config); + AnalyzeNonZeroCrestFactorAudio( + /*num_calls=*/config.reference_window_length + + config.reference_window_delay - config.window_length, + num_channels(), /*peak_ratio=*/0.99f, *predictor); + CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255, + kDefaultClippedLevelStep, kMinMicLevel, + kMaxMicLevel, *predictor); + AnalyzeZeroCrestFactorAudio(config.window_length, num_channels(), + /*peak_ratio=*/0.99f, *predictor); + CheckChannelEstimatesWithValue( + num_channels(), /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel, + kMaxMicLevel, *predictor, kDefaultClippedLevelStep); } TEST_P(ClippingPredictorParameterization, CheckClippingEventPredictorNoEstimateAfterConstantCrestFactor) { - if (reference_window_length() + reference_window_delay() > window_length()) { - ClippingPredictorConfig config; - config.window_length = window_length(); - config.reference_window_length = reference_window_length(); - config.reference_window_delay = reference_window_delay(); - config.clipping_threshold = -1.0f; - config.crest_factor_margin = 0.5f; - auto predictor = CreateClippingEventPredictor(num_channels(), config); - ProcessNonZeroCrestFactorAudio( - reference_window_length() + reference_window_delay() - window_length(), - num_channels(), /*peak_ratio=*/0.99f, *predictor); - CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255, - kDefaultClippedLevelStep, kMinMicLevel, - kMaxMicLevel, *predictor); - ProcessNonZeroCrestFactorAudio(window_length(), num_channels(), - /*peak_ratio=*/0.99f, *predictor); - CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255, - kDefaultClippedLevelStep, kMinMicLevel, - kMaxMicLevel, *predictor); + const ClippingPredictorConfig config = + GetConfig(ClippingPredictorMode::kClippingEventPrediction); + if (config.reference_window_length + config.reference_window_delay <= + config.window_length) { + return; } + auto predictor = CreateClippingPredictor(num_channels(), config); + AnalyzeNonZeroCrestFactorAudio( + /*num_calls=*/config.reference_window_length + + config.reference_window_delay - config.window_length, + num_channels(), /*peak_ratio=*/0.99f, *predictor); + CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255, + kDefaultClippedLevelStep, kMinMicLevel, + kMaxMicLevel, *predictor); + AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/config.window_length, + num_channels(), + /*peak_ratio=*/0.99f, *predictor); + CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255, + kDefaultClippedLevelStep, kMinMicLevel, + kMaxMicLevel, *predictor); } TEST_P(ClippingPredictorParameterization, CheckClippingPeakPredictorEstimateAfterHighCrestFactor) { - if (reference_window_length() + reference_window_delay() > window_length()) { - ClippingPredictorConfig config; - config.window_length = window_length(); - config.reference_window_length = reference_window_length(); - config.reference_window_delay = reference_window_delay(); - config.clipping_threshold = -1.0f; - auto predictor = - CreateAdaptiveStepClippingPeakPredictor(num_channels(), config); - ProcessNonZeroCrestFactorAudio( - reference_window_length() + reference_window_delay() - window_length(), - num_channels(), /*peak_ratio=*/0.99f, *predictor); - CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255, - kDefaultClippedLevelStep, kMinMicLevel, - kMaxMicLevel, *predictor); - ProcessNonZeroCrestFactorAudio(window_length(), num_channels(), - /*peak_ratio=*/0.99f, *predictor); - CheckChannelEstimatesWithValue( - num_channels(), /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel, - kMaxMicLevel, *predictor, kDefaultClippedLevelStep); + const ClippingPredictorConfig config = + GetConfig(ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction); + if (config.reference_window_length + config.reference_window_delay <= + config.window_length) { + return; } + auto predictor = CreateClippingPredictor(num_channels(), config); + AnalyzeNonZeroCrestFactorAudio( + /*num_calls=*/config.reference_window_length + + config.reference_window_delay - config.window_length, + num_channels(), /*peak_ratio=*/0.99f, *predictor); + CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255, + kDefaultClippedLevelStep, kMinMicLevel, + kMaxMicLevel, *predictor); + AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/config.window_length, + num_channels(), + /*peak_ratio=*/0.99f, *predictor); + CheckChannelEstimatesWithValue( + num_channels(), /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel, + kMaxMicLevel, *predictor, kDefaultClippedLevelStep); } TEST_P(ClippingPredictorParameterization, CheckClippingPeakPredictorNoEstimateAfterLowCrestFactor) { - if (reference_window_length() + reference_window_delay() > window_length()) { - ClippingPredictorConfig config; - config.window_length = window_length(); - config.reference_window_length = reference_window_length(); - config.reference_window_delay = reference_window_delay(); - config.clipping_threshold = -1.0f; - auto predictor = - CreateAdaptiveStepClippingPeakPredictor(num_channels(), config); - ProcessZeroCrestFactorAudio( - reference_window_length() + reference_window_delay() - window_length(), - num_channels(), /*peak_ratio=*/0.99f, *predictor); - CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255, - kDefaultClippedLevelStep, kMinMicLevel, - kMaxMicLevel, *predictor); - ProcessNonZeroCrestFactorAudio(window_length(), num_channels(), - /*peak_ratio=*/0.99f, *predictor); - CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255, - kDefaultClippedLevelStep, kMinMicLevel, - kMaxMicLevel, *predictor); + const ClippingPredictorConfig config = + GetConfig(ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction); + if (config.reference_window_length + config.reference_window_delay <= + config.window_length) { + return; } + auto predictor = CreateClippingPredictor(num_channels(), config); + AnalyzeZeroCrestFactorAudio( + /*num_calls=*/config.reference_window_length + + config.reference_window_delay - config.window_length, + num_channels(), /*peak_ratio=*/0.99f, *predictor); + CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255, + kDefaultClippedLevelStep, kMinMicLevel, + kMaxMicLevel, *predictor); + AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/config.window_length, + num_channels(), + /*peak_ratio=*/0.99f, *predictor); + CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255, + kDefaultClippedLevelStep, kMinMicLevel, + kMaxMicLevel, *predictor); } INSTANTIATE_TEST_SUITE_P(GainController1ClippingPredictor, @@ -248,24 +281,37 @@ INSTANTIATE_TEST_SUITE_P(GainController1ClippingPredictor, ::testing::Values(1, 5), ::testing::Values(0, 1, 5))); +class ClippingEventPredictorParameterization + : public ::testing::TestWithParam> { + protected: + ClippingPredictorConfig GetConfig() const { + // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. + return {/*enabled=*/true, + /*mode=*/ClippingPredictorMode::kClippingEventPrediction, + /*window_length=*/5, + /*reference_window_length=*/5, + /*reference_window_delay=*/5, + /*clipping_threshold=*/std::get<0>(GetParam()), + /*crest_factor_margin=*/std::get<1>(GetParam())}; + } +}; + TEST_P(ClippingEventPredictorParameterization, CheckEstimateAfterCrestFactorDrop) { - ClippingPredictorConfig config; - config.window_length = kWindowLength; - config.reference_window_length = kReferenceWindowLength; - config.reference_window_delay = kReferenceWindowDelay; - config.clipping_threshold = clipping_threshold(); - config.crest_factor_margin = crest_factor_margin(); - auto predictor = CreateClippingEventPredictor(kNumChannels, config); - ProcessNonZeroCrestFactorAudio(kReferenceWindowLength, kNumChannels, - /*peak_ratio=*/0.99f, *predictor); + const ClippingPredictorConfig config = GetConfig(); + auto predictor = CreateClippingPredictor(kNumChannels, config); + AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/config.reference_window_length, + kNumChannels, /*peak_ratio=*/0.99f, + *predictor); CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel, kMaxMicLevel, *predictor); - ProcessZeroCrestFactorAudio(kWindowLength, kNumChannels, /*peak_ratio=*/0.99f, - *predictor); - if (clipping_threshold() < 20 * std::log10f(0.99f) && - crest_factor_margin() < 4.15f) { + AnalyzeZeroCrestFactorAudio(config.window_length, kNumChannels, + /*peak_ratio=*/0.99f, *predictor); + // TODO(bugs.webrtc.org/12774): Add clarifying comment. + // TODO(bugs.webrtc.org/12774): Remove 4.15f threshold and split tests. + if (config.clipping_threshold < kClippingThresholdDb && + config.crest_factor_margin < 4.15f) { CheckChannelEstimatesWithValue( kNumChannels, /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel, kMaxMicLevel, *predictor, kDefaultClippedLevelStep); @@ -281,62 +327,90 @@ INSTANTIATE_TEST_SUITE_P(GainController1ClippingPredictor, ::testing::Combine(::testing::Values(-1.0f, 0.0f), ::testing::Values(3.0f, 4.16f))); -TEST_P(ClippingPeakPredictorParameterization, - CheckEstimateAfterHighCrestFactor) { - ClippingPredictorConfig config; - config.window_length = kWindowLength; - config.reference_window_length = kReferenceWindowLength; - config.reference_window_delay = kReferenceWindowDelay; - config.clipping_threshold = clipping_threshold(); - auto predictor = - adaptive_step_estimation() - ? CreateAdaptiveStepClippingPeakPredictor(kNumChannels, config) - : CreateFixedStepClippingPeakPredictor(kNumChannels, config); - ProcessNonZeroCrestFactorAudio(kReferenceWindowLength, kNumChannels, +class ClippingPredictorModeParameterization + : public ::testing::TestWithParam { + protected: + ClippingPredictorConfig GetConfig(float clipping_threshold_dbfs) const { + // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. + return {/*enabled=*/true, + /*mode=*/GetParam(), + /*window_length=*/5, + /*reference_window_length=*/5, + /*reference_window_delay=*/5, + /*clipping_threshold=*/clipping_threshold_dbfs, + /*crest_factor_margin=*/3.0f}; + } +}; + +TEST_P(ClippingPredictorModeParameterization, + CheckEstimateAfterHighCrestFactorWithNoClippingMargin) { + const ClippingPredictorConfig config = GetConfig( + /*clipping_threshold_dbfs=*/0.0f); + auto predictor = CreateClippingPredictor(kNumChannels, config); + AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/config.reference_window_length, + kNumChannels, /*peak_ratio=*/0.99f, + *predictor); + CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255, + kDefaultClippedLevelStep, kMinMicLevel, + kMaxMicLevel, *predictor); + AnalyzeZeroCrestFactorAudio(config.window_length, kNumChannels, + /*peak_ratio=*/0.99f, *predictor); + // Since the clipping threshold is set to 0 dBFS, `EstimateClippedLevelStep()` + // is expected to return an unavailable value. + CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255, + kDefaultClippedLevelStep, kMinMicLevel, + kMaxMicLevel, *predictor); +} + +TEST_P(ClippingPredictorModeParameterization, + CheckEstimateAfterHighCrestFactorWithClippingMargin) { + const ClippingPredictorConfig config = + GetConfig(/*clipping_threshold_dbfs=*/-1.0f); + auto predictor = CreateClippingPredictor(kNumChannels, config); + AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/config.reference_window_length, + kNumChannels, /*peak_ratio=*/0.99f, *predictor); CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel, kMaxMicLevel, *predictor); - ProcessZeroCrestFactorAudio(kWindowLength, kNumChannels, + AnalyzeZeroCrestFactorAudio(config.window_length, kNumChannels, /*peak_ratio=*/0.99f, *predictor); - if (clipping_threshold() < 20 * std::log10(0.99f)) { - if (adaptive_step_estimation()) { - CheckChannelEstimatesWithValue(kNumChannels, /*level=*/255, - kDefaultClippedLevelStep, kMinMicLevel, - kMaxMicLevel, *predictor, - /*expected=*/17); - } else { - CheckChannelEstimatesWithValue( - kNumChannels, /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel, - kMaxMicLevel, *predictor, kDefaultClippedLevelStep); - } - } else { - CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255, - kDefaultClippedLevelStep, kMinMicLevel, - kMaxMicLevel, *predictor); - } + // TODO(bugs.webrtc.org/12774): Add clarifying comment. + const float expected_step = + config.mode == ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction + ? 17 + : kDefaultClippedLevelStep; + CheckChannelEstimatesWithValue(kNumChannels, /*level=*/255, + kDefaultClippedLevelStep, kMinMicLevel, + kMaxMicLevel, *predictor, expected_step); } -INSTANTIATE_TEST_SUITE_P(GainController1ClippingPredictor, - ClippingPeakPredictorParameterization, - ::testing::Combine(::testing::Values(true, false), - ::testing::Values(-1.0f, 0.0f))); +INSTANTIATE_TEST_SUITE_P( + GainController1ClippingPredictor, + ClippingPredictorModeParameterization, + ::testing::Values( + ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction, + ClippingPredictorMode::kFixedStepClippingPeakPrediction)); TEST(ClippingEventPredictorTest, CheckEstimateAfterReset) { - ClippingPredictorConfig config; - config.window_length = kWindowLength; - config.reference_window_length = kReferenceWindowLength; - config.reference_window_delay = kReferenceWindowDelay; - config.clipping_threshold = -1.0f; - config.crest_factor_margin = 3.0f; - auto predictor = CreateClippingEventPredictor(kNumChannels, config); - ProcessNonZeroCrestFactorAudio(kReferenceWindowLength, kNumChannels, + // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. + constexpr ClippingPredictorConfig kConfig{ + /*enabled=*/true, + /*mode=*/ClippingPredictorMode::kClippingEventPrediction, + /*window_length=*/5, + /*reference_window_length=*/5, + /*reference_window_delay=*/5, + /*clipping_threshold=*/-1.0f, + /*crest_factor_margin=*/3.0f}; + auto predictor = CreateClippingPredictor(kNumChannels, kConfig); + AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/kConfig.reference_window_length, + kNumChannels, /*peak_ratio=*/0.99f, *predictor); CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel, kMaxMicLevel, *predictor); predictor->Reset(); - ProcessZeroCrestFactorAudio(kWindowLength, kNumChannels, + AnalyzeZeroCrestFactorAudio(kConfig.window_length, kNumChannels, /*peak_ratio=*/0.99f, *predictor); CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel, @@ -344,21 +418,23 @@ TEST(ClippingEventPredictorTest, CheckEstimateAfterReset) { } TEST(ClippingPeakPredictorTest, CheckNoEstimateAfterReset) { - ClippingPredictorConfig config; - config.window_length = kWindowLength; - config.reference_window_length = kReferenceWindowLength; - config.reference_window_delay = kReferenceWindowDelay; - config.clipping_threshold = -1.0f; - config.crest_factor_margin = 3.0f; - auto predictor = - CreateAdaptiveStepClippingPeakPredictor(kNumChannels, config); - ProcessNonZeroCrestFactorAudio(kReferenceWindowLength, kNumChannels, + // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. + constexpr ClippingPredictorConfig kConfig{ + /*enabled=*/true, + /*mode=*/ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction, + /*window_length=*/5, + /*reference_window_length=*/5, + /*reference_window_delay=*/5, + /*clipping_threshold=*/-1.0f}; + auto predictor = CreateClippingPredictor(kNumChannels, kConfig); + AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/kConfig.reference_window_length, + kNumChannels, /*peak_ratio=*/0.99f, *predictor); CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel, kMaxMicLevel, *predictor); predictor->Reset(); - ProcessZeroCrestFactorAudio(kWindowLength, kNumChannels, + AnalyzeZeroCrestFactorAudio(kConfig.window_length, kNumChannels, /*peak_ratio=*/0.99f, *predictor); CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel, @@ -366,19 +442,22 @@ TEST(ClippingPeakPredictorTest, CheckNoEstimateAfterReset) { } TEST(ClippingPeakPredictorTest, CheckAdaptiveStepEstimate) { - ClippingPredictorConfig config; - config.window_length = kWindowLength; - config.reference_window_length = kReferenceWindowLength; - config.reference_window_delay = kReferenceWindowDelay; - config.clipping_threshold = -1.0f; - auto predictor = - CreateAdaptiveStepClippingPeakPredictor(kNumChannels, config); - ProcessNonZeroCrestFactorAudio(kReferenceWindowLength, kNumChannels, - /*peak_ratio=*/0.99f, *predictor); + // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. + constexpr ClippingPredictorConfig kConfig{ + /*enabled=*/true, + /*mode=*/ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction, + /*window_length=*/5, + /*reference_window_length=*/5, + /*reference_window_delay=*/5, + /*clipping_threshold=*/-1.0f}; + auto predictor = CreateClippingPredictor(kNumChannels, kConfig); + AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/kConfig.reference_window_length, + kNumChannels, /*peak_ratio=*/0.99f, + *predictor); CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel, kMaxMicLevel, *predictor); - ProcessZeroCrestFactorAudio(kWindowLength, kNumChannels, + AnalyzeZeroCrestFactorAudio(kConfig.window_length, kNumChannels, /*peak_ratio=*/0.99f, *predictor); CheckChannelEstimatesWithValue(kNumChannels, /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel, @@ -386,18 +465,22 @@ TEST(ClippingPeakPredictorTest, CheckAdaptiveStepEstimate) { } TEST(ClippingPeakPredictorTest, CheckFixedStepEstimate) { - ClippingPredictorConfig config; - config.window_length = kWindowLength; - config.reference_window_length = kReferenceWindowLength; - config.reference_window_delay = kReferenceWindowDelay; - config.clipping_threshold = -1.0f; - auto predictor = CreateFixedStepClippingPeakPredictor(kNumChannels, config); - ProcessNonZeroCrestFactorAudio(kReferenceWindowLength, kNumChannels, - /*peak_ratio=*/0.99f, *predictor); + // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. + constexpr ClippingPredictorConfig kConfig{ + /*enabled=*/true, + /*mode=*/ClippingPredictorMode::kFixedStepClippingPeakPrediction, + /*window_length=*/5, + /*reference_window_length=*/5, + /*reference_window_delay=*/5, + /*clipping_threshold=*/-1.0f}; + auto predictor = CreateClippingPredictor(kNumChannels, kConfig); + AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/kConfig.reference_window_length, + kNumChannels, /*peak_ratio=*/0.99f, + *predictor); CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel, kMaxMicLevel, *predictor); - ProcessZeroCrestFactorAudio(kWindowLength, kNumChannels, + AnalyzeZeroCrestFactorAudio(kConfig.window_length, kNumChannels, /*peak_ratio=*/0.99f, *predictor); CheckChannelEstimatesWithValue( kNumChannels, /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel, diff --git a/modules/audio_processing/agc2/rnn_vad/rnn_fc.cc b/modules/audio_processing/agc2/rnn_vad/rnn_fc.cc index b04807f19f..ecbb198c96 100644 --- a/modules/audio_processing/agc2/rnn_vad/rnn_fc.cc +++ b/modules/audio_processing/agc2/rnn_vad/rnn_fc.cc @@ -56,10 +56,8 @@ rtc::FunctionView GetActivationFunction( switch (activation_function) { case ActivationFunction::kTansigApproximated: return ::rnnoise::TansigApproximated; - break; case ActivationFunction::kSigmoidApproximated: return ::rnnoise::SigmoidApproximated; - break; } } diff --git a/modules/audio_processing/include/audio_processing.h b/modules/audio_processing/include/audio_processing.h index 1f3c19c464..64b1b5d107 100644 --- a/modules/audio_processing/include/audio_processing.h +++ b/modules/audio_processing/include/audio_processing.h @@ -348,31 +348,28 @@ class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface { struct ClippingPredictor { bool enabled = false; enum Mode { - // Sets clipping prediction for clipping event prediction with fixed - // step estimation. + // Clipping event prediction mode with fixed step estimation. kClippingEventPrediction, - // Sets clipping prediction for clipped peak estimation with - // adaptive step estimation. + // Clipped peak estimation mode with adaptive step estimation. kAdaptiveStepClippingPeakPrediction, - // Sets clipping prediction for clipped peak estimation with fixed - // step estimation. + // Clipped peak estimation mode with fixed step estimation. kFixedStepClippingPeakPrediction, }; Mode mode = kClippingEventPrediction; - // Number of frames in the sliding analysis window. Limited to values - // higher than zero. + // Number of frames in the sliding analysis window. int window_length = 5; - // Number of frames in the sliding reference window. Limited to values - // higher than zero. + // Number of frames in the sliding reference window. int reference_window_length = 5; - // Number of frames the reference window is delayed. Limited to values - // zero and higher. An additional requirement: - // |window_length < reference_window_length + reference_window_delay|. + // Reference window delay (unit: number of frames). int reference_window_delay = 5; - // Clipping predictor ste estimation threshold (dB). + // Clipping prediction threshold (dBFS). float clipping_threshold = -1.0f; // Crest factor drop threshold (dB). float crest_factor_margin = 3.0f; + // If true, the recommended clipped level step is used to modify the + // analog gain. Otherwise, the predictor runs without affecting the + // analog gain. + bool use_predicted_step = true; } clipping_predictor; } analog_gain_controller; } gain_controller1; diff --git a/modules/audio_processing/logging/apm_data_dumper.h b/modules/audio_processing/logging/apm_data_dumper.h index 6d32b32ab5..9c2ac3be5d 100644 --- a/modules/audio_processing/logging/apm_data_dumper.h +++ b/modules/audio_processing/logging/apm_data_dumper.h @@ -65,6 +65,15 @@ class ApmDataDumper { #endif } + // Returns whether dumping functionality is enabled/available. + static bool IsAvailable() { +#if WEBRTC_APM_DEBUG_DUMP == 1 + return true; +#else + return false; +#endif + } + // Default dump set. static constexpr size_t kDefaultDumpSet = 0; diff --git a/modules/congestion_controller/goog_cc/goog_cc_network_control.cc b/modules/congestion_controller/goog_cc/goog_cc_network_control.cc index 0a0b1801f2..2344f45a65 100644 --- a/modules/congestion_controller/goog_cc/goog_cc_network_control.cc +++ b/modules/congestion_controller/goog_cc/goog_cc_network_control.cc @@ -465,7 +465,7 @@ NetworkControlUpdate GoogCcNetworkController::OnTransportPacketsFeedback( expected_packets_since_last_loss_update_ += report.PacketsWithFeedback().size(); for (const auto& packet_feedback : report.PacketsWithFeedback()) { - if (packet_feedback.receive_time.IsInfinite()) + if (!packet_feedback.IsReceived()) lost_packets_since_last_loss_update_ += 1; } if (report.feedback_time > next_loss_update_) { diff --git a/modules/congestion_controller/goog_cc/goog_cc_network_control_unittest.cc b/modules/congestion_controller/goog_cc/goog_cc_network_control_unittest.cc index 8eb4a00431..7e8d7b9ac6 100644 --- a/modules/congestion_controller/goog_cc/goog_cc_network_control_unittest.cc +++ b/modules/congestion_controller/goog_cc/goog_cc_network_control_unittest.cc @@ -124,6 +124,35 @@ void UpdatesTargetRateBasedOnLinkCapacity(std::string test_name = "") { truth->PrintRow(); EXPECT_NEAR(client->target_rate().kbps(), 90, 25); } + +DataRate RunRembDipScenario(std::string test_name) { + Scenario s(test_name); + NetworkSimulationConfig net_conf; + net_conf.bandwidth = DataRate::KilobitsPerSec(2000); + net_conf.delay = TimeDelta::Millis(50); + auto* client = s.CreateClient("send", [&](CallClientConfig* c) { + c->transport.rates.start_rate = DataRate::KilobitsPerSec(1000); + }); + auto send_net = {s.CreateSimulationNode(net_conf)}; + auto ret_net = {s.CreateSimulationNode(net_conf)}; + auto* route = s.CreateRoutes( + client, send_net, s.CreateClient("return", CallClientConfig()), ret_net); + s.CreateVideoStream(route->forward(), VideoStreamConfig()); + + s.RunFor(TimeDelta::Seconds(10)); + EXPECT_GT(client->send_bandwidth().kbps(), 1500); + + DataRate RembLimit = DataRate::KilobitsPerSec(250); + client->SetRemoteBitrate(RembLimit); + s.RunFor(TimeDelta::Seconds(1)); + EXPECT_EQ(client->send_bandwidth(), RembLimit); + + DataRate RembLimitLifted = DataRate::KilobitsPerSec(10000); + client->SetRemoteBitrate(RembLimitLifted); + s.RunFor(TimeDelta::Seconds(10)); + + return client->send_bandwidth(); +} } // namespace class GoogCcNetworkControllerTest : public ::testing::Test { @@ -850,33 +879,17 @@ TEST_F(GoogCcNetworkControllerTest, IsFairToTCP) { EXPECT_LT(client->send_bandwidth().kbps(), 750); } -TEST(GoogCcScenario, RampupOnRembCapLifted) { +TEST(GoogCcScenario, FastRampupOnRembCapLiftedWithFieldTrial) { ScopedFieldTrials trial("WebRTC-Bwe-ReceiverLimitCapsOnly/Enabled/"); - Scenario s("googcc_unit/rampup_ramb_cap_lifted"); - NetworkSimulationConfig net_conf; - net_conf.bandwidth = DataRate::KilobitsPerSec(2000); - net_conf.delay = TimeDelta::Millis(50); - auto* client = s.CreateClient("send", [&](CallClientConfig* c) { - c->transport.rates.start_rate = DataRate::KilobitsPerSec(1000); - }); - auto send_net = {s.CreateSimulationNode(net_conf)}; - auto ret_net = {s.CreateSimulationNode(net_conf)}; - auto* route = s.CreateRoutes( - client, send_net, s.CreateClient("return", CallClientConfig()), ret_net); - s.CreateVideoStream(route->forward(), VideoStreamConfig()); - - s.RunFor(TimeDelta::Seconds(10)); - EXPECT_GT(client->send_bandwidth().kbps(), 1500); - - DataRate RembLimit = DataRate::KilobitsPerSec(250); - client->SetRemoteBitrate(RembLimit); - s.RunFor(TimeDelta::Seconds(1)); - EXPECT_EQ(client->send_bandwidth(), RembLimit); + DataRate final_estimate = + RunRembDipScenario("googcc_unit/fast_rampup_on_remb_cap_lifted"); + EXPECT_GT(final_estimate.kbps(), 1500); +} - DataRate RembLimitLifted = DataRate::KilobitsPerSec(10000); - client->SetRemoteBitrate(RembLimitLifted); - s.RunFor(TimeDelta::Seconds(10)); - EXPECT_GT(client->send_bandwidth().kbps(), 1500); +TEST(GoogCcScenario, SlowRampupOnRembCapLifted) { + DataRate final_estimate = + RunRembDipScenario("googcc_unit/default_slow_rampup_on_remb_cap_lifted"); + EXPECT_LT(final_estimate.kbps(), 1000); } } // namespace test diff --git a/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.cc b/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.cc index 2211d26f0a..c7f53c62f2 100644 --- a/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.cc +++ b/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.cc @@ -36,7 +36,7 @@ double GetIncreaseFactor(const LossBasedControlConfig& config, TimeDelta rtt) { } auto rtt_range = config.increase_high_rtt.Get() - config.increase_low_rtt; if (rtt_range <= TimeDelta::Zero()) { - RTC_DCHECK(false); // Only on misconfiguration. + RTC_NOTREACHED(); // Only on misconfiguration. return config.min_increase_factor; } auto rtt_offset = rtt - config.increase_low_rtt; @@ -57,7 +57,7 @@ DataRate BitrateFromLoss(double loss, DataRate loss_bandwidth_balance, double exponent) { if (exponent <= 0) { - RTC_DCHECK(false); + RTC_NOTREACHED(); return DataRate::Infinity(); } if (loss < 1e-5) @@ -69,7 +69,7 @@ double ExponentialUpdate(TimeDelta window, TimeDelta interval) { // Use the convention that exponential window length (which is really // infinite) is the time it takes to dampen to 1/e. if (window <= TimeDelta::Zero()) { - RTC_DCHECK(false); + RTC_NOTREACHED(); return 1.0f; } return 1.0f - exp(interval / window * -1.0); @@ -134,12 +134,12 @@ void LossBasedBandwidthEstimation::UpdateLossStatistics( const std::vector& packet_results, Timestamp at_time) { if (packet_results.empty()) { - RTC_DCHECK(false); + RTC_NOTREACHED(); return; } int loss_count = 0; for (const auto& pkt : packet_results) { - loss_count += pkt.receive_time.IsInfinite() ? 1 : 0; + loss_count += !pkt.IsReceived() ? 1 : 0; } last_loss_ratio_ = static_cast(loss_count) / packet_results.size(); const TimeDelta time_passed = last_loss_packet_report_.IsFinite() diff --git a/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc b/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc index a2865d9f5a..c5f51df99b 100644 --- a/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc +++ b/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc @@ -587,8 +587,7 @@ DataRate SendSideBandwidthEstimation::GetUpperLimit() const { DataRate upper_limit = delay_based_limit_; if (!receiver_limit_caps_only_) upper_limit = std::min(upper_limit, receiver_limit_); - upper_limit = std::min(upper_limit, max_bitrate_configured_); - return upper_limit; + return std::min(upper_limit, max_bitrate_configured_); } void SendSideBandwidthEstimation::MaybeLogLowBitrateWarning(DataRate bitrate, diff --git a/modules/congestion_controller/pcc/monitor_interval.cc b/modules/congestion_controller/pcc/monitor_interval.cc index c8efd5b59a..6bc9f4a7ef 100644 --- a/modules/congestion_controller/pcc/monitor_interval.cc +++ b/modules/congestion_controller/pcc/monitor_interval.cc @@ -47,7 +47,7 @@ void PccMonitorInterval::OnPacketsFeedback( feedback_collection_done_ = true; return; } - if (packet_result.receive_time.IsInfinite()) { + if (!packet_result.IsReceived()) { lost_packets_sent_time_.push_back(packet_result.sent_packet.send_time); } else { received_packets_.push_back( diff --git a/modules/congestion_controller/pcc/rtt_tracker.cc b/modules/congestion_controller/pcc/rtt_tracker.cc index 0814912b49..af9dc8f11b 100644 --- a/modules/congestion_controller/pcc/rtt_tracker.cc +++ b/modules/congestion_controller/pcc/rtt_tracker.cc @@ -23,7 +23,7 @@ void RttTracker::OnPacketsFeedback( Timestamp feedback_received_time) { TimeDelta packet_rtt = TimeDelta::MinusInfinity(); for (const PacketResult& packet_result : packet_feedbacks) { - if (packet_result.receive_time.IsInfinite()) + if (!packet_result.IsReceived()) continue; packet_rtt = std::max( packet_rtt, diff --git a/modules/congestion_controller/rtp/transport_feedback_adapter_unittest.cc b/modules/congestion_controller/rtp/transport_feedback_adapter_unittest.cc index 3849cb3707..933abd9bf0 100644 --- a/modules/congestion_controller/rtp/transport_feedback_adapter_unittest.cc +++ b/modules/congestion_controller/rtp/transport_feedback_adapter_unittest.cc @@ -27,9 +27,9 @@ using ::testing::_; using ::testing::Invoke; namespace webrtc { -namespace webrtc_cc { namespace { +constexpr uint32_t kSsrc = 8492; const PacedPacketInfo kPacingInfo0(0, 5, 2000); const PacedPacketInfo kPacingInfo1(1, 8, 4000); const PacedPacketInfo kPacingInfo2(2, 14, 7000); @@ -49,8 +49,8 @@ void ComparePacketFeedbackVectors(const std::vector& truth, // equal. However, the difference must be the same for all x. TimeDelta arrival_time_delta = truth[0].receive_time - input[0].receive_time; for (size_t i = 0; i < len; ++i) { - RTC_CHECK(truth[i].receive_time.IsFinite()); - if (input[i].receive_time.IsFinite()) { + RTC_CHECK(truth[i].IsReceived()); + if (input[i].IsReceived()) { EXPECT_EQ(truth[i].receive_time - input[i].receive_time, arrival_time_delta); } @@ -77,10 +77,6 @@ PacketResult CreatePacket(int64_t receive_time_ms, return res; } -} // namespace - -namespace test { - class MockStreamFeedbackObserver : public webrtc::StreamFeedbackObserver { public: MOCK_METHOD(void, @@ -89,6 +85,8 @@ class MockStreamFeedbackObserver : public webrtc::StreamFeedbackObserver { (override)); }; +} // namespace + class TransportFeedbackAdapterTest : public ::testing::Test { public: TransportFeedbackAdapterTest() : clock_(0) {} @@ -108,7 +106,7 @@ class TransportFeedbackAdapterTest : public ::testing::Test { void OnSentPacket(const PacketResult& packet_feedback) { RtpPacketSendInfo packet_info; - packet_info.ssrc = kSsrc; + packet_info.media_ssrc = kSsrc; packet_info.transport_sequence_number = packet_feedback.sent_packet.sequence_number; packet_info.rtp_sequence_number = 0; @@ -122,8 +120,6 @@ class TransportFeedbackAdapterTest : public ::testing::Test { packet_feedback.sent_packet.send_time.ms(), rtc::PacketInfo())); } - static constexpr uint32_t kSsrc = 8492; - SimulatedClock clock_; std::unique_ptr adapter_; }; @@ -393,7 +389,7 @@ TEST_F(TransportFeedbackAdapterTest, IgnoreDuplicatePacketSentCalls) { // Add a packet and then mark it as sent. RtpPacketSendInfo packet_info; - packet_info.ssrc = kSsrc; + packet_info.media_ssrc = kSsrc; packet_info.transport_sequence_number = packet.sent_packet.sequence_number; packet_info.length = packet.sent_packet.size.bytes(); packet_info.pacing_info = packet.sent_packet.pacing_info; @@ -412,6 +408,4 @@ TEST_F(TransportFeedbackAdapterTest, IgnoreDuplicatePacketSentCalls) { EXPECT_FALSE(duplicate_packet.has_value()); } -} // namespace test -} // namespace webrtc_cc } // namespace webrtc diff --git a/modules/congestion_controller/rtp/transport_feedback_demuxer.cc b/modules/congestion_controller/rtp/transport_feedback_demuxer.cc index c958a1c3cb..6ab3ad80fa 100644 --- a/modules/congestion_controller/rtp/transport_feedback_demuxer.cc +++ b/modules/congestion_controller/rtp/transport_feedback_demuxer.cc @@ -38,15 +38,16 @@ void TransportFeedbackDemuxer::DeRegisterStreamFeedbackObserver( void TransportFeedbackDemuxer::AddPacket(const RtpPacketSendInfo& packet_info) { MutexLock lock(&lock_); - if (packet_info.ssrc != 0) { - StreamFeedbackObserver::StreamPacketInfo info; - info.ssrc = packet_info.ssrc; - info.rtp_sequence_number = packet_info.rtp_sequence_number; - info.received = false; - history_.insert( - {seq_num_unwrapper_.Unwrap(packet_info.transport_sequence_number), - info}); - } + + StreamFeedbackObserver::StreamPacketInfo info; + info.ssrc = packet_info.media_ssrc; + info.rtp_sequence_number = packet_info.rtp_sequence_number; + info.received = false; + info.is_retransmission = + packet_info.packet_type == RtpPacketMediaType::kRetransmission; + history_.insert( + {seq_num_unwrapper_.Unwrap(packet_info.transport_sequence_number), info}); + while (history_.size() > kMaxPacketsInHistory) { history_.erase(history_.begin()); } diff --git a/modules/congestion_controller/rtp/transport_feedback_demuxer_unittest.cc b/modules/congestion_controller/rtp/transport_feedback_demuxer_unittest.cc index 6514a4eda7..482f58d1bb 100644 --- a/modules/congestion_controller/rtp/transport_feedback_demuxer_unittest.cc +++ b/modules/congestion_controller/rtp/transport_feedback_demuxer_unittest.cc @@ -16,7 +16,11 @@ namespace webrtc { namespace { -using ::testing::_; +using ::testing::AllOf; +using ::testing::ElementsAre; +using ::testing::Field; +using PacketInfo = StreamFeedbackObserver::StreamPacketInfo; + static constexpr uint32_t kSsrc = 8492; class MockStreamFeedbackObserver : public webrtc::StreamFeedbackObserver { @@ -28,41 +32,65 @@ class MockStreamFeedbackObserver : public webrtc::StreamFeedbackObserver { }; RtpPacketSendInfo CreatePacket(uint32_t ssrc, - int16_t rtp_sequence_number, - int64_t transport_sequence_number) { + uint16_t rtp_sequence_number, + int64_t transport_sequence_number, + bool is_retransmission) { RtpPacketSendInfo res; - res.ssrc = ssrc; + res.media_ssrc = ssrc; res.transport_sequence_number = transport_sequence_number; res.rtp_sequence_number = rtp_sequence_number; + res.packet_type = is_retransmission ? RtpPacketMediaType::kRetransmission + : RtpPacketMediaType::kVideo; return res; } } // namespace + TEST(TransportFeedbackDemuxerTest, ObserverSanity) { TransportFeedbackDemuxer demuxer; MockStreamFeedbackObserver mock; demuxer.RegisterStreamFeedbackObserver({kSsrc}, &mock); - demuxer.AddPacket(CreatePacket(kSsrc, 55, 1)); - demuxer.AddPacket(CreatePacket(kSsrc, 56, 2)); - demuxer.AddPacket(CreatePacket(kSsrc, 57, 3)); + const uint16_t kRtpStartSeq = 55; + const int64_t kTransportStartSeq = 1; + demuxer.AddPacket(CreatePacket(kSsrc, kRtpStartSeq, kTransportStartSeq, + /*is_retransmit=*/false)); + demuxer.AddPacket(CreatePacket(kSsrc, kRtpStartSeq + 1, + kTransportStartSeq + 1, + /*is_retransmit=*/false)); + demuxer.AddPacket(CreatePacket( + kSsrc, kRtpStartSeq + 2, kTransportStartSeq + 2, /*is_retransmit=*/true)); rtcp::TransportFeedback feedback; - feedback.SetBase(1, 1000); - ASSERT_TRUE(feedback.AddReceivedPacket(1, 1000)); - ASSERT_TRUE(feedback.AddReceivedPacket(2, 2000)); - ASSERT_TRUE(feedback.AddReceivedPacket(3, 3000)); + feedback.SetBase(kTransportStartSeq, 1000); + ASSERT_TRUE(feedback.AddReceivedPacket(kTransportStartSeq, 1000)); + // Drop middle packet. + ASSERT_TRUE(feedback.AddReceivedPacket(kTransportStartSeq + 2, 3000)); - EXPECT_CALL(mock, OnPacketFeedbackVector(_)).Times(1); + EXPECT_CALL( + mock, OnPacketFeedbackVector(ElementsAre( + AllOf(Field(&PacketInfo::received, true), + Field(&PacketInfo::ssrc, kSsrc), + Field(&PacketInfo::rtp_sequence_number, kRtpStartSeq), + Field(&PacketInfo::is_retransmission, false)), + AllOf(Field(&PacketInfo::received, false), + Field(&PacketInfo::ssrc, kSsrc), + Field(&PacketInfo::rtp_sequence_number, kRtpStartSeq + 1), + Field(&PacketInfo::is_retransmission, false)), + AllOf(Field(&PacketInfo::received, true), + Field(&PacketInfo::ssrc, kSsrc), + Field(&PacketInfo::rtp_sequence_number, kRtpStartSeq + 2), + Field(&PacketInfo::is_retransmission, true))))); demuxer.OnTransportFeedback(feedback); demuxer.DeRegisterStreamFeedbackObserver(&mock); - demuxer.AddPacket(CreatePacket(kSsrc, 58, 4)); + demuxer.AddPacket( + CreatePacket(kSsrc, kRtpStartSeq + 3, kTransportStartSeq + 3, false)); rtcp::TransportFeedback second_feedback; - second_feedback.SetBase(4, 4000); - ASSERT_TRUE(second_feedback.AddReceivedPacket(4, 4000)); + second_feedback.SetBase(kTransportStartSeq + 3, 4000); + ASSERT_TRUE(second_feedback.AddReceivedPacket(kTransportStartSeq + 3, 4000)); - EXPECT_CALL(mock, OnPacketFeedbackVector(_)).Times(0); + EXPECT_CALL(mock, OnPacketFeedbackVector).Times(0); demuxer.OnTransportFeedback(second_feedback); } } // namespace webrtc diff --git a/modules/desktop_capture/cropping_window_capturer_win.cc b/modules/desktop_capture/cropping_window_capturer_win.cc index de36adb01e..31ddbe1b33 100644 --- a/modules/desktop_capture/cropping_window_capturer_win.cc +++ b/modules/desktop_capture/cropping_window_capturer_win.cc @@ -118,7 +118,7 @@ struct TopWindowVerifierContext : public SelectedWindowContext { // firing an assert when enabled, report that the selected window isn't // topmost to avoid inadvertent capture of other windows. RTC_LOG(LS_ERROR) << "Failed to enumerate windows: " << lastError; - RTC_DCHECK(false); + RTC_NOTREACHED(); return false; } } @@ -130,6 +130,8 @@ class CroppingWindowCapturerWin : public CroppingWindowCapturer { public: explicit CroppingWindowCapturerWin(const DesktopCaptureOptions& options) : CroppingWindowCapturer(options), + enumerate_current_process_windows_( + options.enumerate_current_process_windows()), full_screen_window_detector_(options.full_screen_window_detector()) {} void CaptureFrame() override; @@ -148,6 +150,8 @@ class CroppingWindowCapturerWin : public CroppingWindowCapturer { WindowCaptureHelperWin window_capture_helper_; + bool enumerate_current_process_windows_; + rtc::scoped_refptr full_screen_window_detector_; }; @@ -164,7 +168,12 @@ void CroppingWindowCapturerWin::CaptureFrame() { // it uses responsiveness check which could lead to performance // issues. SourceList result; - if (!webrtc::GetWindowList(GetWindowListFlags::kNone, &result)) + int window_list_flags = + enumerate_current_process_windows_ + ? GetWindowListFlags::kNone + : GetWindowListFlags::kIgnoreCurrentProcessWindows; + + if (!webrtc::GetWindowList(window_list_flags, &result)) return false; // Filter out windows not visible on current desktop diff --git a/modules/desktop_capture/desktop_capture_options.h b/modules/desktop_capture/desktop_capture_options.h index 4cb19a56d6..a693803aa0 100644 --- a/modules/desktop_capture/desktop_capture_options.h +++ b/modules/desktop_capture/desktop_capture_options.h @@ -98,6 +98,24 @@ class RTC_EXPORT DesktopCaptureOptions { } #if defined(WEBRTC_WIN) + // Enumerating windows owned by the current process on Windows has some + // complications due to |GetWindowText*()| APIs potentially causing a + // deadlock (see the comments in the |GetWindowListHandler()| function in + // window_capture_utils.cc for more details on the deadlock). + // To avoid this issue, consumers can either ensure that the thread that runs + // their message loop never waits on |GetSourceList()|, or they can set this + // flag to false which will prevent windows running in the current process + // from being enumerated and included in the results. Consumers can still + // provide the WindowId for their own windows to |SelectSource()| and capture + // them. + bool enumerate_current_process_windows() const { + return enumerate_current_process_windows_; + } + void set_enumerate_current_process_windows( + bool enumerate_current_process_windows) { + enumerate_current_process_windows_ = enumerate_current_process_windows; + } + bool allow_use_magnification_api() const { return allow_use_magnification_api_; } @@ -158,6 +176,7 @@ class RTC_EXPORT DesktopCaptureOptions { rtc::scoped_refptr full_screen_window_detector_; #if defined(WEBRTC_WIN) + bool enumerate_current_process_windows_ = true; bool allow_use_magnification_api_ = false; bool allow_directx_capturer_ = false; bool allow_cropping_window_capturer_ = false; diff --git a/modules/desktop_capture/desktop_region.cc b/modules/desktop_capture/desktop_region.cc index befbcc6f41..96f142d3dd 100644 --- a/modules/desktop_capture/desktop_region.cc +++ b/modules/desktop_capture/desktop_region.cc @@ -10,11 +10,11 @@ #include "modules/desktop_capture/desktop_region.h" -#include - #include #include +#include "rtc_base/checks.h" + namespace webrtc { DesktopRegion::RowSpan::RowSpan(int32_t left, int32_t right) @@ -109,7 +109,7 @@ void DesktopRegion::AddRect(const DesktopRect& rect) { // If the |top| falls in the middle of the |row| then split |row| into // two, at |top|, and leave |row| referring to the lower of the two, // ready to insert a new span into. - assert(top <= row->second->bottom); + RTC_DCHECK_LE(top, row->second->bottom); Rows::iterator new_row = rows_.insert( row, Rows::value_type(top, new Row(row->second->top, top))); row->second->top = top; @@ -148,7 +148,7 @@ void DesktopRegion::AddRects(const DesktopRect* rects, int count) { } void DesktopRegion::MergeWithPrecedingRow(Rows::iterator row) { - assert(row != rows_.end()); + RTC_DCHECK(row != rows_.end()); if (row != rows_.begin()) { Rows::iterator previous_row = row; @@ -230,7 +230,7 @@ void DesktopRegion::IntersectRows(const RowSpanSet& set1, RowSpanSet::const_iterator end1 = set1.end(); RowSpanSet::const_iterator it2 = set2.begin(); RowSpanSet::const_iterator end2 = set2.end(); - assert(it1 != end1 && it2 != end2); + RTC_DCHECK(it1 != end1 && it2 != end2); do { // Arrange for |it1| to always be the left-most of the spans. @@ -247,7 +247,7 @@ void DesktopRegion::IntersectRows(const RowSpanSet& set1, int32_t left = it2->left; int32_t right = std::min(it1->right, it2->right); - assert(left < right); + RTC_DCHECK_LT(left, right); output->push_back(RowSpan(left, right)); @@ -302,7 +302,7 @@ void DesktopRegion::Subtract(const DesktopRegion& region) { // If |top| falls in the middle of |row_a| then split |row_a| into two, at // |top|, and leave |row_a| referring to the lower of the two, ready to // subtract spans from. - assert(top <= row_a->second->bottom); + RTC_DCHECK_LE(top, row_a->second->bottom); Rows::iterator new_row = rows_.insert( row_a, Rows::value_type(top, new Row(row_a->second->top, top))); row_a->second->top = top; @@ -420,7 +420,7 @@ void DesktopRegion::AddSpanToRow(Row* row, int left, int right) { // Find the first span that ends at or after |left|. RowSpanSet::iterator start = std::lower_bound( row->spans.begin(), row->spans.end(), left, CompareSpanRight); - assert(start < row->spans.end()); + RTC_DCHECK(start < row->spans.end()); // Find the first span that starts after |right|. RowSpanSet::iterator end = @@ -467,7 +467,7 @@ bool DesktopRegion::IsSpanInRow(const Row& row, const RowSpan& span) { void DesktopRegion::SubtractRows(const RowSpanSet& set_a, const RowSpanSet& set_b, RowSpanSet* output) { - assert(!set_a.empty() && !set_b.empty()); + RTC_DCHECK(!set_a.empty() && !set_b.empty()); RowSpanSet::const_iterator it_b = set_b.begin(); @@ -503,7 +503,7 @@ DesktopRegion::Iterator::Iterator(const DesktopRegion& region) row_(region.rows_.begin()), previous_row_(region.rows_.end()) { if (!IsAtEnd()) { - assert(row_->second->spans.size() > 0); + RTC_DCHECK_GT(row_->second->spans.size(), 0); row_span_ = row_->second->spans.begin(); UpdateCurrentRect(); } @@ -516,7 +516,7 @@ bool DesktopRegion::Iterator::IsAtEnd() const { } void DesktopRegion::Iterator::Advance() { - assert(!IsAtEnd()); + RTC_DCHECK(!IsAtEnd()); while (true) { ++row_span_; @@ -524,7 +524,7 @@ void DesktopRegion::Iterator::Advance() { previous_row_ = row_; ++row_; if (row_ != region_.rows_.end()) { - assert(row_->second->spans.size() > 0); + RTC_DCHECK_GT(row_->second->spans.size(), 0); row_span_ = row_->second->spans.begin(); } } @@ -544,7 +544,7 @@ void DesktopRegion::Iterator::Advance() { break; } - assert(!IsAtEnd()); + RTC_DCHECK(!IsAtEnd()); UpdateCurrentRect(); } diff --git a/modules/desktop_capture/linux/base_capturer_pipewire.cc b/modules/desktop_capture/linux/base_capturer_pipewire.cc index c302a086ea..e5d001e476 100644 --- a/modules/desktop_capture/linux/base_capturer_pipewire.cc +++ b/modules/desktop_capture/linux/base_capturer_pipewire.cc @@ -772,37 +772,27 @@ void BaseCapturerPipeWire::HandleBuffer(pw_buffer* buffer) { // Use video metadata when video size from metadata is set and smaller than // video stream size, so we need to adjust it. - bool video_is_full_width = true; - bool video_is_full_height = true; + bool video_metadata_use = false; + #if PW_CHECK_VERSION(0, 3, 0) - if (video_metadata && video_metadata->region.size.width != 0 && - video_metadata->region.size.height != 0) { - if (video_metadata->region.size.width < - static_cast(desktop_size_.width())) { - video_is_full_width = false; - } else if (video_metadata->region.size.height < - static_cast(desktop_size_.height())) { - video_is_full_height = false; - } - } + const struct spa_rectangle* video_metadata_size = + video_metadata ? &video_metadata->region.size : nullptr; #else - if (video_metadata && video_metadata->width != 0 && - video_metadata->height != 0) { - if (video_metadata->width < desktop_size_.width()) { - } else if (video_metadata->height < desktop_size_.height()) { - video_is_full_height = false; - } - } + const struct spa_meta_video_crop* video_metadata_size = video_metadata; #endif + if (video_metadata_size && video_metadata_size->width != 0 && + video_metadata_size->height != 0 && + (static_cast(video_metadata_size->width) < desktop_size_.width() || + static_cast(video_metadata_size->height) < + desktop_size_.height())) { + video_metadata_use = true; + } + DesktopSize video_size_prev = video_size_; - if (!video_is_full_height || !video_is_full_width) { -#if PW_CHECK_VERSION(0, 3, 0) - video_size_ = DesktopSize(video_metadata->region.size.width, - video_metadata->region.size.height); -#else - video_size_ = DesktopSize(video_metadata->width, video_metadata->height); -#endif + if (video_metadata_use) { + video_size_ = + DesktopSize(video_metadata_size->width, video_metadata_size->height); } else { video_size_ = desktop_size_; } @@ -827,25 +817,25 @@ void BaseCapturerPipeWire::HandleBuffer(pw_buffer* buffer) { // Adjust source content based on metadata video position #if PW_CHECK_VERSION(0, 3, 0) - if (!video_is_full_height && + if (video_metadata_use && (video_metadata->region.position.y + video_size_.height() <= desktop_size_.height())) { src += src_stride * video_metadata->region.position.y; } const int x_offset = - !video_is_full_width && + video_metadata_use && (video_metadata->region.position.x + video_size_.width() <= desktop_size_.width()) ? video_metadata->region.position.x * kBytesPerPixel : 0; #else - if (!video_is_full_height && + if (video_metadata_use && (video_metadata->y + video_size_.height() <= desktop_size_.height())) { src += src_stride * video_metadata->y; } const int x_offset = - !video_is_full_width && + video_metadata_use && (video_metadata->x + video_size_.width() <= desktop_size_.width()) ? video_metadata->x * kBytesPerPixel : 0; @@ -1036,6 +1026,23 @@ void BaseCapturerPipeWire::SourcesRequest() { // We don't want to allow selection of multiple sources. g_variant_builder_add(&builder, "{sv}", "multiple", g_variant_new_boolean(false)); + + Scoped variant( + g_dbus_proxy_get_cached_property(proxy_, "AvailableCursorModes")); + if (variant.get()) { + uint32_t modes = 0; + g_variant_get(variant.get(), "u", &modes); + // Request mouse cursor to be embedded as part of the stream, otherwise it + // is hidden by default. Make request only if this mode is advertised by + // the portal implementation. + if (modes & + static_cast(BaseCapturerPipeWire::CursorMode::kEmbedded)) { + g_variant_builder_add(&builder, "{sv}", "cursor_mode", + g_variant_new_uint32(static_cast( + BaseCapturerPipeWire::CursorMode::kEmbedded))); + } + } + variant_string = g_strdup_printf("webrtc%d", g_random_int_range(0, G_MAXINT)); g_variant_builder_add(&builder, "{sv}", "handle_token", g_variant_new_string(variant_string.get())); diff --git a/modules/desktop_capture/linux/base_capturer_pipewire.h b/modules/desktop_capture/linux/base_capturer_pipewire.h index 75d20dbf1d..52264188a7 100644 --- a/modules/desktop_capture/linux/base_capturer_pipewire.h +++ b/modules/desktop_capture/linux/base_capturer_pipewire.h @@ -47,6 +47,12 @@ class BaseCapturerPipeWire : public DesktopCapturer { kAny = 0b11 }; + enum class CursorMode : uint32_t { + kHidden = 0b01, + kEmbedded = 0b10, + kMetadata = 0b100 + }; + explicit BaseCapturerPipeWire(CaptureSourceType source_type); ~BaseCapturerPipeWire() override; diff --git a/modules/desktop_capture/linux/x_error_trap.cc b/modules/desktop_capture/linux/x_error_trap.cc index 53c907fc45..13233d8274 100644 --- a/modules/desktop_capture/linux/x_error_trap.cc +++ b/modules/desktop_capture/linux/x_error_trap.cc @@ -10,55 +10,40 @@ #include "modules/desktop_capture/linux/x_error_trap.h" -#include #include -#if defined(TOOLKIT_GTK) -#include -#endif // !defined(TOOLKIT_GTK) +#include "rtc_base/checks.h" namespace webrtc { namespace { -#if !defined(TOOLKIT_GTK) - // TODO(sergeyu): This code is not thread safe. Fix it. Bug 2202. static bool g_xserver_error_trap_enabled = false; static int g_last_xserver_error_code = 0; int XServerErrorHandler(Display* display, XErrorEvent* error_event) { - assert(g_xserver_error_trap_enabled); + RTC_DCHECK(g_xserver_error_trap_enabled); g_last_xserver_error_code = error_event->error_code; return 0; } -#endif // !defined(TOOLKIT_GTK) - } // namespace XErrorTrap::XErrorTrap(Display* display) : original_error_handler_(NULL), enabled_(true) { -#if defined(TOOLKIT_GTK) - gdk_error_trap_push(); -#else // !defined(TOOLKIT_GTK) - assert(!g_xserver_error_trap_enabled); + RTC_DCHECK(!g_xserver_error_trap_enabled); original_error_handler_ = XSetErrorHandler(&XServerErrorHandler); g_xserver_error_trap_enabled = true; g_last_xserver_error_code = 0; -#endif // !defined(TOOLKIT_GTK) } int XErrorTrap::GetLastErrorAndDisable() { enabled_ = false; -#if defined(TOOLKIT_GTK) - return gdk_error_trap_push(); -#else // !defined(TOOLKIT_GTK) - assert(g_xserver_error_trap_enabled); + RTC_DCHECK(g_xserver_error_trap_enabled); XSetErrorHandler(original_error_handler_); g_xserver_error_trap_enabled = false; return g_last_xserver_error_code; -#endif // !defined(TOOLKIT_GTK) } XErrorTrap::~XErrorTrap() { diff --git a/modules/desktop_capture/mouse_cursor.cc b/modules/desktop_capture/mouse_cursor.cc index 3b61e10a8b..e826552b0f 100644 --- a/modules/desktop_capture/mouse_cursor.cc +++ b/modules/desktop_capture/mouse_cursor.cc @@ -10,9 +10,8 @@ #include "modules/desktop_capture/mouse_cursor.h" -#include - #include "modules/desktop_capture/desktop_frame.h" +#include "rtc_base/checks.h" namespace webrtc { @@ -20,8 +19,8 @@ MouseCursor::MouseCursor() {} MouseCursor::MouseCursor(DesktopFrame* image, const DesktopVector& hotspot) : image_(image), hotspot_(hotspot) { - assert(0 <= hotspot_.x() && hotspot_.x() <= image_->size().width()); - assert(0 <= hotspot_.y() && hotspot_.y() <= image_->size().height()); + RTC_DCHECK(0 <= hotspot_.x() && hotspot_.x() <= image_->size().width()); + RTC_DCHECK(0 <= hotspot_.y() && hotspot_.y() <= image_->size().height()); } MouseCursor::~MouseCursor() {} diff --git a/modules/desktop_capture/mouse_cursor_monitor_unittest.cc b/modules/desktop_capture/mouse_cursor_monitor_unittest.cc index ee2dff32af..268e5e3475 100644 --- a/modules/desktop_capture/mouse_cursor_monitor_unittest.cc +++ b/modules/desktop_capture/mouse_cursor_monitor_unittest.cc @@ -65,7 +65,7 @@ TEST_F(MouseCursorMonitorTest, MAYBE(FromScreen)) { MouseCursorMonitor::CreateForScreen( DesktopCaptureOptions::CreateDefault(), webrtc::kFullDesktopScreenId)); - assert(capturer.get()); + RTC_DCHECK(capturer.get()); capturer->Init(this, MouseCursorMonitor::SHAPE_AND_POSITION); capturer->Capture(); @@ -102,7 +102,7 @@ TEST_F(MouseCursorMonitorTest, MAYBE(FromWindow)) { std::unique_ptr capturer( MouseCursorMonitor::CreateForWindow( DesktopCaptureOptions::CreateDefault(), sources[i].id)); - assert(capturer.get()); + RTC_DCHECK(capturer.get()); capturer->Init(this, MouseCursorMonitor::SHAPE_AND_POSITION); capturer->Capture(); @@ -118,7 +118,7 @@ TEST_F(MouseCursorMonitorTest, MAYBE(ShapeOnly)) { MouseCursorMonitor::CreateForScreen( DesktopCaptureOptions::CreateDefault(), webrtc::kFullDesktopScreenId)); - assert(capturer.get()); + RTC_DCHECK(capturer.get()); capturer->Init(this, MouseCursorMonitor::SHAPE_ONLY); capturer->Capture(); diff --git a/modules/desktop_capture/mouse_cursor_monitor_win.cc b/modules/desktop_capture/mouse_cursor_monitor_win.cc index bf0d8534e3..5a10ee1251 100644 --- a/modules/desktop_capture/mouse_cursor_monitor_win.cc +++ b/modules/desktop_capture/mouse_cursor_monitor_win.cc @@ -77,7 +77,7 @@ MouseCursorMonitorWin::MouseCursorMonitorWin(ScreenId screen) callback_(NULL), mode_(SHAPE_AND_POSITION), desktop_dc_(NULL) { - assert(screen >= kFullDesktopScreenId); + RTC_DCHECK_GE(screen, kFullDesktopScreenId); memset(&last_cursor_, 0, sizeof(CURSORINFO)); } @@ -87,8 +87,8 @@ MouseCursorMonitorWin::~MouseCursorMonitorWin() { } void MouseCursorMonitorWin::Init(Callback* callback, Mode mode) { - assert(!callback_); - assert(callback); + RTC_DCHECK(!callback_); + RTC_DCHECK(callback); callback_ = callback; mode_ = mode; @@ -97,7 +97,7 @@ void MouseCursorMonitorWin::Init(Callback* callback, Mode mode) { } void MouseCursorMonitorWin::Capture() { - assert(callback_); + RTC_DCHECK(callback_); CURSORINFO cursor_info; cursor_info.cbSize = sizeof(CURSORINFO); @@ -158,7 +158,7 @@ void MouseCursorMonitorWin::Capture() { position = position.subtract(cropped_rect.top_left()); } } else { - assert(screen_ != kInvalidScreenId); + RTC_DCHECK_NE(screen_, kInvalidScreenId); DesktopRect rect = GetScreenRect(); if (inside) inside = rect.Contains(position); @@ -169,7 +169,7 @@ void MouseCursorMonitorWin::Capture() { } DesktopRect MouseCursorMonitorWin::GetScreenRect() { - assert(screen_ != kInvalidScreenId); + RTC_DCHECK_NE(screen_, kInvalidScreenId); if (screen_ == kFullDesktopScreenId) { return DesktopRect::MakeXYWH(GetSystemMetrics(SM_XVIRTUALSCREEN), GetSystemMetrics(SM_YVIRTUALSCREEN), diff --git a/modules/desktop_capture/screen_capturer_helper.cc b/modules/desktop_capture/screen_capturer_helper.cc index 535b653c08..e8bd3fc450 100644 --- a/modules/desktop_capture/screen_capturer_helper.cc +++ b/modules/desktop_capture/screen_capturer_helper.cc @@ -74,7 +74,7 @@ static int UpToMultiple(int x, int n, int nMask) { void ScreenCapturerHelper::ExpandToGrid(const DesktopRegion& region, int log_grid_size, DesktopRegion* result) { - assert(log_grid_size >= 1); + RTC_DCHECK_GE(log_grid_size, 1); int grid_size = 1 << log_grid_size; int grid_size_mask = ~(grid_size - 1); diff --git a/modules/desktop_capture/screen_capturer_unittest.cc b/modules/desktop_capture/screen_capturer_unittest.cc index ea77069278..ba6b8bfe3d 100644 --- a/modules/desktop_capture/screen_capturer_unittest.cc +++ b/modules/desktop_capture/screen_capturer_unittest.cc @@ -99,7 +99,13 @@ ACTION_P(SaveUniquePtrArg, dest) { *dest = std::move(*arg1); } -TEST_F(ScreenCapturerTest, GetScreenListAndSelectScreen) { +// TODO(bugs.webrtc.org/12950): Re-enable when libc++ issue is fixed. +#if defined(WEBRTC_LINUX) && defined(MEMORY_SANITIZER) +#define MAYBE_GetScreenListAndSelectScreen DISABLED_GetScreenListAndSelectScreen +#else +#define MAYBE_GetScreenListAndSelectScreen GetScreenListAndSelectScreen +#endif +TEST_F(ScreenCapturerTest, MAYBE_GetScreenListAndSelectScreen) { webrtc::DesktopCapturer::SourceList screens; EXPECT_TRUE(capturer_->GetSourceList(&screens)); for (const auto& screen : screens) { diff --git a/modules/desktop_capture/win/wgc_capturer_win.cc b/modules/desktop_capture/win/wgc_capturer_win.cc index 88859b6e84..442c827a67 100644 --- a/modules/desktop_capture/win/wgc_capturer_win.cc +++ b/modules/desktop_capture/win/wgc_capturer_win.cc @@ -57,7 +57,8 @@ std::unique_ptr WgcCapturerWin::CreateRawWindowCapturer( const DesktopCaptureOptions& options) { return std::make_unique( std::make_unique(), - std::make_unique()); + std::make_unique( + options.enumerate_current_process_windows())); } // static diff --git a/modules/desktop_capture/win/wgc_capturer_win.h b/modules/desktop_capture/win/wgc_capturer_win.h index 9d461d38a1..58f3fc318a 100644 --- a/modules/desktop_capture/win/wgc_capturer_win.h +++ b/modules/desktop_capture/win/wgc_capturer_win.h @@ -38,7 +38,8 @@ class SourceEnumerator { class WindowEnumerator final : public SourceEnumerator { public: - WindowEnumerator() = default; + explicit WindowEnumerator(bool enumerate_current_process_windows) + : enumerate_current_process_windows_(enumerate_current_process_windows) {} WindowEnumerator(const WindowEnumerator&) = delete; WindowEnumerator& operator=(const WindowEnumerator&) = delete; @@ -48,12 +49,13 @@ class WindowEnumerator final : public SourceEnumerator { bool FindAllSources(DesktopCapturer::SourceList* sources) override { // WGC fails to capture windows with the WS_EX_TOOLWINDOW style, so we // provide it as a filter to ensure windows with the style are not returned. - return window_capture_helper_.EnumerateCapturableWindows(sources, - WS_EX_TOOLWINDOW); + return window_capture_helper_.EnumerateCapturableWindows( + sources, enumerate_current_process_windows_, WS_EX_TOOLWINDOW); } private: WindowCaptureHelperWin window_capture_helper_; + bool enumerate_current_process_windows_; }; class ScreenEnumerator final : public SourceEnumerator { diff --git a/modules/desktop_capture/win/window_capture_utils.cc b/modules/desktop_capture/win/window_capture_utils.cc index 7c5cc70087..aaaef0a80d 100644 --- a/modules/desktop_capture/win/window_capture_utils.cc +++ b/modules/desktop_capture/win/window_capture_utils.cc @@ -32,26 +32,21 @@ struct GetWindowListParams { DesktopCapturer::SourceList* result) : ignore_untitled(flags & GetWindowListFlags::kIgnoreUntitled), ignore_unresponsive(flags & GetWindowListFlags::kIgnoreUnresponsive), + ignore_current_process_windows( + flags & GetWindowListFlags::kIgnoreCurrentProcessWindows), ex_style_filters(ex_style_filters), result(result) {} const bool ignore_untitled; const bool ignore_unresponsive; + const bool ignore_current_process_windows; const LONG ex_style_filters; DesktopCapturer::SourceList* const result; }; -// If a window is owned by the current process and unresponsive, then making a -// blocking call such as GetWindowText may lead to a deadlock. -// -// https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-getwindowtexta#remarks -bool CanSafelyMakeBlockingCalls(HWND hwnd) { +bool IsWindowOwnedByCurrentProcess(HWND hwnd) { DWORD process_id; GetWindowThreadProcessId(hwnd, &process_id); - if (process_id != GetCurrentProcessId() || IsWindowResponding(hwnd)) { - return true; - } - - return false; + return process_id == GetCurrentProcessId(); } BOOL CALLBACK GetWindowListHandler(HWND hwnd, LPARAM param) { @@ -85,11 +80,26 @@ BOOL CALLBACK GetWindowListHandler(HWND hwnd, LPARAM param) { window.id = reinterpret_cast(hwnd); // GetWindowText* are potentially blocking operations if |hwnd| is - // owned by the current process, and can lead to a deadlock if the message - // pump is waiting on this thread. If we've filtered out unresponsive - // windows, this is not a concern, but otherwise we need to check if we can - // safely make blocking calls. - if (params->ignore_unresponsive || CanSafelyMakeBlockingCalls(hwnd)) { + // owned by the current process. The APIs will send messages to the window's + // message loop, and if the message loop is waiting on this operation we will + // enter a deadlock. + // https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-getwindowtexta#remarks + // + // To help consumers avoid this, there is a DesktopCaptureOption to ignore + // windows owned by the current process. Consumers should either ensure that + // the thread running their message loop never waits on this operation, or use + // the option to exclude these windows from the source list. + bool owned_by_current_process = IsWindowOwnedByCurrentProcess(hwnd); + if (owned_by_current_process && params->ignore_current_process_windows) { + return TRUE; + } + + // Even if consumers request to enumerate windows owned by the current + // process, we should not call GetWindowText* on unresponsive windows owned by + // the current process because we will hang. Unfortunately, we could still + // hang if the window becomes unresponsive after this check, hence the option + // to avoid these completely. + if (!owned_by_current_process || IsWindowResponding(hwnd)) { const size_t kTitleLength = 500; WCHAR window_title[kTitleLength] = L""; if (GetWindowTextLength(hwnd) != 0 && @@ -445,10 +455,15 @@ bool WindowCaptureHelperWin::IsWindowCloaked(HWND hwnd) { bool WindowCaptureHelperWin::EnumerateCapturableWindows( DesktopCapturer::SourceList* results, + bool enumerate_current_process_windows, LONG ex_style_filters) { - if (!webrtc::GetWindowList((GetWindowListFlags::kIgnoreUntitled | - GetWindowListFlags::kIgnoreUnresponsive), - results, ex_style_filters)) { + int flags = (GetWindowListFlags::kIgnoreUntitled | + GetWindowListFlags::kIgnoreUnresponsive); + if (!enumerate_current_process_windows) { + flags |= GetWindowListFlags::kIgnoreCurrentProcessWindows; + } + + if (!webrtc::GetWindowList(flags, results, ex_style_filters)) { return false; } diff --git a/modules/desktop_capture/win/window_capture_utils.h b/modules/desktop_capture/win/window_capture_utils.h index 11b2c2c1b5..a6a295d068 100644 --- a/modules/desktop_capture/win/window_capture_utils.h +++ b/modules/desktop_capture/win/window_capture_utils.h @@ -78,6 +78,7 @@ enum GetWindowListFlags { kNone = 0x00, kIgnoreUntitled = 1 << 0, kIgnoreUnresponsive = 1 << 1, + kIgnoreCurrentProcessWindows = 1 << 2, }; // Retrieves the list of top-level windows on the screen. @@ -85,7 +86,8 @@ enum GetWindowListFlags { // - Those that are invisible or minimized. // - Program Manager & Start menu. // - [with kIgnoreUntitled] windows with no title. -// - [with kIgnoreUnresponsive] windows that unresponsive. +// - [with kIgnoreUnresponsive] windows that are unresponsive. +// - [with kIgnoreCurrentProcessWindows] windows owned by the current process. // - Any windows with extended styles that match |ex_style_filters|. // Returns false if native APIs failed. bool GetWindowList(int flags, @@ -115,6 +117,7 @@ class WindowCaptureHelperWin { // extended window styles (e.g. WS_EX_TOOLWINDOW) and prevent windows that // match from being included in |results|. bool EnumerateCapturableWindows(DesktopCapturer::SourceList* results, + bool enumerate_current_process_windows, LONG ex_style_filters = 0); private: diff --git a/modules/desktop_capture/win/window_capture_utils_unittest.cc b/modules/desktop_capture/win/window_capture_utils_unittest.cc index 52f6714383..4b426fc464 100644 --- a/modules/desktop_capture/win/window_capture_utils_unittest.cc +++ b/modules/desktop_capture/win/window_capture_utils_unittest.cc @@ -137,4 +137,18 @@ TEST(WindowCaptureUtilsTest, IgnoreUntitledWindows) { DestroyTestWindow(info); } +TEST(WindowCaptureUtilsTest, IgnoreCurrentProcessWindows) { + WindowInfo info = CreateTestWindow(kWindowTitle); + DesktopCapturer::SourceList window_list; + ASSERT_TRUE(GetWindowList(GetWindowListFlags::kIgnoreCurrentProcessWindows, + &window_list)); + EXPECT_EQ(std::find_if(window_list.begin(), window_list.end(), + [&info](DesktopCapturer::Source window) { + return reinterpret_cast(window.id) == + info.hwnd; + }), + window_list.end()); + DestroyTestWindow(info); +} + } // namespace webrtc diff --git a/modules/desktop_capture/win/window_capturer_win_gdi.cc b/modules/desktop_capture/win/window_capturer_win_gdi.cc index 277c02e7c5..25677e9868 100644 --- a/modules/desktop_capture/win/window_capturer_win_gdi.cc +++ b/modules/desktop_capture/win/window_capturer_win_gdi.cc @@ -95,11 +95,14 @@ BOOL CALLBACK OwnedWindowCollector(HWND hwnd, LPARAM param) { return TRUE; } -WindowCapturerWinGdi::WindowCapturerWinGdi() {} +WindowCapturerWinGdi::WindowCapturerWinGdi( + bool enumerate_current_process_windows) + : enumerate_current_process_windows_(enumerate_current_process_windows) {} WindowCapturerWinGdi::~WindowCapturerWinGdi() {} bool WindowCapturerWinGdi::GetSourceList(SourceList* sources) { - if (!window_capture_helper_.EnumerateCapturableWindows(sources)) + if (!window_capture_helper_.EnumerateCapturableWindows( + sources, enumerate_current_process_windows_)) return false; std::map new_map; @@ -350,7 +353,8 @@ WindowCapturerWinGdi::CaptureResults WindowCapturerWinGdi::CaptureFrame( if (!owned_windows_.empty()) { if (!owned_window_capturer_) { - owned_window_capturer_ = std::make_unique(); + owned_window_capturer_ = std::make_unique( + enumerate_current_process_windows_); } // Owned windows are stored in top-down z-order, so this iterates in @@ -389,7 +393,8 @@ WindowCapturerWinGdi::CaptureResults WindowCapturerWinGdi::CaptureFrame( // static std::unique_ptr WindowCapturerWinGdi::CreateRawWindowCapturer( const DesktopCaptureOptions& options) { - return std::unique_ptr(new WindowCapturerWinGdi()); + return std::unique_ptr( + new WindowCapturerWinGdi(options.enumerate_current_process_windows())); } } // namespace webrtc diff --git a/modules/desktop_capture/win/window_capturer_win_gdi.h b/modules/desktop_capture/win/window_capturer_win_gdi.h index c954c230c9..5091458a12 100644 --- a/modules/desktop_capture/win/window_capturer_win_gdi.h +++ b/modules/desktop_capture/win/window_capturer_win_gdi.h @@ -24,7 +24,7 @@ namespace webrtc { class WindowCapturerWinGdi : public DesktopCapturer { public: - WindowCapturerWinGdi(); + explicit WindowCapturerWinGdi(bool enumerate_current_process_windows); // Disallow copy and assign WindowCapturerWinGdi(const WindowCapturerWinGdi&) = delete; @@ -61,6 +61,8 @@ class WindowCapturerWinGdi : public DesktopCapturer { WindowCaptureHelperWin window_capture_helper_; + bool enumerate_current_process_windows_; + // This map is used to avoid flickering for the case when SelectWindow() calls // are interleaved with Capture() calls. std::map window_size_map_; diff --git a/modules/desktop_capture/window_capturer_null.cc b/modules/desktop_capture/window_capturer_null.cc index 66e76a50fb..e7c7b0a134 100644 --- a/modules/desktop_capture/window_capturer_null.cc +++ b/modules/desktop_capture/window_capturer_null.cc @@ -8,10 +8,9 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include - #include "modules/desktop_capture/desktop_capturer.h" #include "modules/desktop_capture/desktop_frame.h" +#include "rtc_base/checks.h" #include "rtc_base/constructor_magic.h" namespace webrtc { @@ -49,8 +48,8 @@ bool WindowCapturerNull::SelectSource(SourceId id) { } void WindowCapturerNull::Start(Callback* callback) { - assert(!callback_); - assert(callback); + RTC_DCHECK(!callback_); + RTC_DCHECK(callback); callback_ = callback; } diff --git a/modules/desktop_capture/window_capturer_unittest.cc b/modules/desktop_capture/window_capturer_unittest.cc index 8a611e760a..519c04601b 100644 --- a/modules/desktop_capture/window_capturer_unittest.cc +++ b/modules/desktop_capture/window_capturer_unittest.cc @@ -44,7 +44,13 @@ class WindowCapturerTest : public ::testing::Test, }; // Verify that we can enumerate windows. -TEST_F(WindowCapturerTest, Enumerate) { +// TODO(bugs.webrtc.org/12950): Re-enable when libc++ issue is fixed +#if defined(WEBRTC_LINUX) && defined(MEMORY_SANITIZER) +#define MAYBE_Enumerate DISABLED_Enumerate +#else +#define MAYBE_Enumerate Enumerate +#endif +TEST_F(WindowCapturerTest, MAYBE_Enumerate) { DesktopCapturer::SourceList sources; EXPECT_TRUE(capturer_->GetSourceList(&sources)); @@ -54,8 +60,9 @@ TEST_F(WindowCapturerTest, Enumerate) { } } -// Flaky on Linux. See: crbug.com/webrtc/7830 -#if defined(WEBRTC_LINUX) +// Flaky on Linux. See: crbug.com/webrtc/7830. +// Failing on macOS 11: See bugs.webrtc.org/12801 +#if defined(WEBRTC_LINUX) || defined(WEBRTC_MAC) #define MAYBE_Capture DISABLED_Capture #else #define MAYBE_Capture Capture diff --git a/modules/remote_bitrate_estimator/BUILD.gn b/modules/remote_bitrate_estimator/BUILD.gn index 85077038aa..923f00a74c 100644 --- a/modules/remote_bitrate_estimator/BUILD.gn +++ b/modules/remote_bitrate_estimator/BUILD.gn @@ -47,6 +47,8 @@ rtc_library("remote_bitrate_estimator") { "../../api/transport:network_control", "../../api/transport:webrtc_key_value_config", "../../api/units:data_rate", + "../../api/units:data_size", + "../../api/units:time_delta", "../../api/units:timestamp", "../../modules:module_api", "../../modules:module_api_public", @@ -76,10 +78,9 @@ if (!build_with_chromium) { "tools/bwe_rtp.h", ] deps = [ - ":remote_bitrate_estimator", "../../rtc_base:rtc_base_approved", "../../test:rtp_test_utils", - "../rtp_rtcp", + "../rtp_rtcp:rtp_rtcp_format", ] absl_deps = [ "//third_party/abseil-cpp/absl/flags:flag", @@ -92,10 +93,10 @@ if (!build_with_chromium) { sources = [ "tools/rtp_to_text.cc" ] deps = [ ":bwe_rtp", - "../../modules/rtp_rtcp", "../../rtc_base:macromagic", "../../rtc_base:stringutils", "../../test:rtp_test_utils", + "../rtp_rtcp:rtp_rtcp_format", ] } } diff --git a/modules/remote_bitrate_estimator/aimd_rate_control.cc b/modules/remote_bitrate_estimator/aimd_rate_control.cc index 2ca298b7fa..bf7119cc7d 100644 --- a/modules/remote_bitrate_estimator/aimd_rate_control.cc +++ b/modules/remote_bitrate_estimator/aimd_rate_control.cc @@ -362,7 +362,7 @@ void AimdRateControl::ChangeBitrate(const RateControlInput& input, break; } default: - assert(false); + RTC_NOTREACHED(); } current_bitrate_ = ClampBitrate(new_bitrate.value_or(current_bitrate_)); @@ -417,7 +417,7 @@ void AimdRateControl::ChangeState(const RateControlInput& input, rate_control_state_ = RateControlState::kRcHold; break; default: - assert(false); + RTC_NOTREACHED(); } } diff --git a/modules/remote_bitrate_estimator/inter_arrival.cc b/modules/remote_bitrate_estimator/inter_arrival.cc index b8e683b89a..a8cf47fbfe 100644 --- a/modules/remote_bitrate_estimator/inter_arrival.cc +++ b/modules/remote_bitrate_estimator/inter_arrival.cc @@ -37,9 +37,9 @@ bool InterArrival::ComputeDeltas(uint32_t timestamp, uint32_t* timestamp_delta, int64_t* arrival_time_delta_ms, int* packet_size_delta) { - assert(timestamp_delta != NULL); - assert(arrival_time_delta_ms != NULL); - assert(packet_size_delta != NULL); + RTC_DCHECK(timestamp_delta); + RTC_DCHECK(arrival_time_delta_ms); + RTC_DCHECK(packet_size_delta); bool calculated_deltas = false; if (current_timestamp_group_.IsFirstPacket()) { // We don't have enough data to update the filter, so we store it until we @@ -85,7 +85,7 @@ bool InterArrival::ComputeDeltas(uint32_t timestamp, } else { num_consecutive_reordered_packets_ = 0; } - assert(*arrival_time_delta_ms >= 0); + RTC_DCHECK_GE(*arrival_time_delta_ms, 0); *packet_size_delta = static_cast(current_timestamp_group_.size) - static_cast(prev_timestamp_group_.size); calculated_deltas = true; @@ -141,7 +141,7 @@ bool InterArrival::BelongsToBurst(int64_t arrival_time_ms, if (!burst_grouping_) { return false; } - assert(current_timestamp_group_.complete_time_ms >= 0); + RTC_DCHECK_GE(current_timestamp_group_.complete_time_ms, 0); int64_t arrival_time_delta_ms = arrival_time_ms - current_timestamp_group_.complete_time_ms; uint32_t timestamp_diff = timestamp - current_timestamp_group_.timestamp; diff --git a/modules/remote_bitrate_estimator/overuse_estimator.cc b/modules/remote_bitrate_estimator/overuse_estimator.cc index 74449bec66..3427d5880c 100644 --- a/modules/remote_bitrate_estimator/overuse_estimator.cc +++ b/modules/remote_bitrate_estimator/overuse_estimator.cc @@ -110,7 +110,7 @@ void OveruseEstimator::Update(int64_t t_delta, bool positive_semi_definite = E_[0][0] + E_[1][1] >= 0 && E_[0][0] * E_[1][1] - E_[0][1] * E_[1][0] >= 0 && E_[0][0] >= 0; - assert(positive_semi_definite); + RTC_DCHECK(positive_semi_definite); if (!positive_semi_definite) { RTC_LOG(LS_ERROR) << "The over-use estimator's covariance matrix is no longer " diff --git a/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc b/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc index 4196f6dc57..ae960ab960 100644 --- a/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc +++ b/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc @@ -13,18 +13,36 @@ #include #include +#include +#include #include "api/transport/field_trial_based_config.h" +#include "api/units/data_rate.h" +#include "api/units/data_size.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" #include "modules/remote_bitrate_estimator/include/bwe_defines.h" #include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h" #include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/logging.h" #include "rtc_base/thread_annotations.h" #include "system_wrappers/include/metrics.h" namespace webrtc { namespace { + +constexpr TimeDelta kMinClusterDelta = TimeDelta::Millis(1); +constexpr TimeDelta kInitialProbingInterval = TimeDelta::Seconds(2); +constexpr int kTimestampGroupLengthMs = 5; +constexpr int kAbsSendTimeInterArrivalUpshift = 8; +constexpr int kInterArrivalShift = + RTPHeaderExtension::kAbsSendTimeFraction + kAbsSendTimeInterArrivalUpshift; +constexpr int kMinClusterSize = 4; +constexpr int kMaxProbePackets = 15; +constexpr int kExpectedNumberOfProbes = 3; +constexpr double kTimestampToMs = + 1000.0 / static_cast(1 << kInterArrivalShift); + absl::optional OptionalRateFromOptionalBps( absl::optional bitrate_bps) { if (bitrate_bps) { @@ -33,62 +51,48 @@ absl::optional OptionalRateFromOptionalBps( return absl::nullopt; } } -} // namespace - -enum { - kTimestampGroupLengthMs = 5, - kAbsSendTimeInterArrivalUpshift = 8, - kInterArrivalShift = RTPHeaderExtension::kAbsSendTimeFraction + - kAbsSendTimeInterArrivalUpshift, - kInitialProbingIntervalMs = 2000, - kMinClusterSize = 4, - kMaxProbePackets = 15, - kExpectedNumberOfProbes = 3 -}; - -static const double kTimestampToMs = - 1000.0 / static_cast(1 << kInterArrivalShift); template std::vector Keys(const std::map& map) { std::vector keys; keys.reserve(map.size()); - for (typename std::map::const_iterator it = map.begin(); - it != map.end(); ++it) { - keys.push_back(it->first); + for (const auto& kv_pair : map) { + keys.push_back(kv_pair.first); } return keys; } -uint32_t ConvertMsTo24Bits(int64_t time_ms) { - uint32_t time_24_bits = - static_cast(((static_cast(time_ms) - << RTPHeaderExtension::kAbsSendTimeFraction) + - 500) / - 1000) & - 0x00FFFFFF; - return time_24_bits; -} +} // namespace RemoteBitrateEstimatorAbsSendTime::~RemoteBitrateEstimatorAbsSendTime() = default; bool RemoteBitrateEstimatorAbsSendTime::IsWithinClusterBounds( - int send_delta_ms, + TimeDelta send_delta, const Cluster& cluster_aggregate) { if (cluster_aggregate.count == 0) return true; - float cluster_mean = cluster_aggregate.send_mean_ms / - static_cast(cluster_aggregate.count); - return fabs(static_cast(send_delta_ms) - cluster_mean) < 2.5f; + TimeDelta cluster_mean = + cluster_aggregate.send_mean / cluster_aggregate.count; + return (send_delta - cluster_mean).Abs() < TimeDelta::Micros(2'500); } -void RemoteBitrateEstimatorAbsSendTime::AddCluster(std::list* clusters, - Cluster* cluster) { - cluster->send_mean_ms /= static_cast(cluster->count); - cluster->recv_mean_ms /= static_cast(cluster->count); - cluster->mean_size /= cluster->count; - clusters->push_back(*cluster); +void RemoteBitrateEstimatorAbsSendTime::MaybeAddCluster( + const Cluster& cluster_aggregate, + std::list& clusters) { + if (cluster_aggregate.count < kMinClusterSize || + cluster_aggregate.send_mean <= TimeDelta::Zero() || + cluster_aggregate.recv_mean <= TimeDelta::Zero()) { + return; + } + + Cluster cluster; + cluster.send_mean = cluster_aggregate.send_mean / cluster_aggregate.count; + cluster.recv_mean = cluster_aggregate.recv_mean / cluster_aggregate.count; + cluster.mean_size = cluster_aggregate.mean_size / cluster_aggregate.count; + cluster.count = cluster_aggregate.count; + cluster.num_above_min_delta = cluster_aggregate.num_above_min_delta; + clusters.push_back(cluster); } RemoteBitrateEstimatorAbsSendTime::RemoteBitrateEstimatorAbsSendTime( @@ -96,91 +100,77 @@ RemoteBitrateEstimatorAbsSendTime::RemoteBitrateEstimatorAbsSendTime( Clock* clock) : clock_(clock), observer_(observer), - inter_arrival_(), - estimator_(), detector_(&field_trials_), - incoming_bitrate_(kBitrateWindowMs, 8000), - incoming_bitrate_initialized_(false), - total_probes_received_(0), - first_packet_time_ms_(-1), - last_update_ms_(-1), - uma_recorded_(false), remote_rate_(&field_trials_) { RTC_DCHECK(clock_); RTC_DCHECK(observer_); RTC_LOG(LS_INFO) << "RemoteBitrateEstimatorAbsSendTime: Instantiating."; } -void RemoteBitrateEstimatorAbsSendTime::ComputeClusters( - std::list* clusters) const { - Cluster current; - int64_t prev_send_time = -1; - int64_t prev_recv_time = -1; - for (std::list::const_iterator it = probes_.begin(); - it != probes_.end(); ++it) { - if (prev_send_time >= 0) { - int send_delta_ms = it->send_time_ms - prev_send_time; - int recv_delta_ms = it->recv_time_ms - prev_recv_time; - if (send_delta_ms >= 1 && recv_delta_ms >= 1) { - ++current.num_above_min_delta; +std::list +RemoteBitrateEstimatorAbsSendTime::ComputeClusters() const { + std::list clusters; + Cluster cluster_aggregate; + Timestamp prev_send_time = Timestamp::MinusInfinity(); + Timestamp prev_recv_time = Timestamp::MinusInfinity(); + for (const Probe& probe : probes_) { + if (prev_send_time.IsFinite()) { + TimeDelta send_delta = probe.send_time - prev_send_time; + TimeDelta recv_delta = probe.recv_time - prev_recv_time; + if (send_delta >= kMinClusterDelta && recv_delta >= kMinClusterDelta) { + ++cluster_aggregate.num_above_min_delta; } - if (!IsWithinClusterBounds(send_delta_ms, current)) { - if (current.count >= kMinClusterSize && current.send_mean_ms > 0.0f && - current.recv_mean_ms > 0.0f) { - AddCluster(clusters, ¤t); - } - current = Cluster(); + if (!IsWithinClusterBounds(send_delta, cluster_aggregate)) { + MaybeAddCluster(cluster_aggregate, clusters); + cluster_aggregate = Cluster(); } - current.send_mean_ms += send_delta_ms; - current.recv_mean_ms += recv_delta_ms; - current.mean_size += it->payload_size; - ++current.count; + cluster_aggregate.send_mean += send_delta; + cluster_aggregate.recv_mean += recv_delta; + cluster_aggregate.mean_size += probe.payload_size; + ++cluster_aggregate.count; } - prev_send_time = it->send_time_ms; - prev_recv_time = it->recv_time_ms; - } - if (current.count >= kMinClusterSize && current.send_mean_ms > 0.0f && - current.recv_mean_ms > 0.0f) { - AddCluster(clusters, ¤t); + prev_send_time = probe.send_time; + prev_recv_time = probe.recv_time; } + MaybeAddCluster(cluster_aggregate, clusters); + return clusters; } -std::list::const_iterator +const RemoteBitrateEstimatorAbsSendTime::Cluster* RemoteBitrateEstimatorAbsSendTime::FindBestProbe( const std::list& clusters) const { - int highest_probe_bitrate_bps = 0; - std::list::const_iterator best_it = clusters.end(); - for (std::list::const_iterator it = clusters.begin(); - it != clusters.end(); ++it) { - if (it->send_mean_ms == 0 || it->recv_mean_ms == 0) + DataRate highest_probe_bitrate = DataRate::Zero(); + const Cluster* best = nullptr; + for (const auto& cluster : clusters) { + if (cluster.send_mean == TimeDelta::Zero() || + cluster.recv_mean == TimeDelta::Zero()) { continue; - if (it->num_above_min_delta > it->count / 2 && - (it->recv_mean_ms - it->send_mean_ms <= 2.0f && - it->send_mean_ms - it->recv_mean_ms <= 5.0f)) { - int probe_bitrate_bps = - std::min(it->GetSendBitrateBps(), it->GetRecvBitrateBps()); - if (probe_bitrate_bps > highest_probe_bitrate_bps) { - highest_probe_bitrate_bps = probe_bitrate_bps; - best_it = it; + } + if (cluster.num_above_min_delta > cluster.count / 2 && + (cluster.recv_mean - cluster.send_mean <= TimeDelta::Millis(2) && + cluster.send_mean - cluster.recv_mean <= TimeDelta::Millis(5))) { + DataRate probe_bitrate = + std::min(cluster.SendBitrate(), cluster.RecvBitrate()); + if (probe_bitrate > highest_probe_bitrate) { + highest_probe_bitrate = probe_bitrate; + best = &cluster; } } else { - int send_bitrate_bps = it->mean_size * 8 * 1000 / it->send_mean_ms; - int recv_bitrate_bps = it->mean_size * 8 * 1000 / it->recv_mean_ms; - RTC_LOG(LS_INFO) << "Probe failed, sent at " << send_bitrate_bps - << " bps, received at " << recv_bitrate_bps - << " bps. Mean send delta: " << it->send_mean_ms - << " ms, mean recv delta: " << it->recv_mean_ms - << " ms, num probes: " << it->count; + RTC_LOG(LS_INFO) << "Probe failed, sent at " + << cluster.SendBitrate().bps() << " bps, received at " + << cluster.RecvBitrate().bps() + << " bps. Mean send delta: " << cluster.send_mean.ms() + << " ms, mean recv delta: " << cluster.recv_mean.ms() + << " ms, num probes: " << cluster.count; break; } } - return best_it; + return best; } RemoteBitrateEstimatorAbsSendTime::ProbeResult -RemoteBitrateEstimatorAbsSendTime::ProcessClusters(int64_t now_ms) { - std::list clusters; - ComputeClusters(&clusters); +RemoteBitrateEstimatorAbsSendTime::ProcessClusters(Timestamp now) { + std::list clusters = ComputeClusters(); if (clusters.empty()) { // If we reach the max number of probe packets and still have no clusters, // we will remove the oldest one. @@ -189,21 +179,18 @@ RemoteBitrateEstimatorAbsSendTime::ProcessClusters(int64_t now_ms) { return ProbeResult::kNoUpdate; } - std::list::const_iterator best_it = FindBestProbe(clusters); - if (best_it != clusters.end()) { - int probe_bitrate_bps = - std::min(best_it->GetSendBitrateBps(), best_it->GetRecvBitrateBps()); + if (const Cluster* best = FindBestProbe(clusters)) { + DataRate probe_bitrate = std::min(best->SendBitrate(), best->RecvBitrate()); // Make sure that a probe sent on a lower bitrate than our estimate can't // reduce the estimate. - if (IsBitrateImproving(probe_bitrate_bps)) { + if (IsBitrateImproving(probe_bitrate)) { RTC_LOG(LS_INFO) << "Probe successful, sent at " - << best_it->GetSendBitrateBps() << " bps, received at " - << best_it->GetRecvBitrateBps() - << " bps. Mean send delta: " << best_it->send_mean_ms - << " ms, mean recv delta: " << best_it->recv_mean_ms - << " ms, num probes: " << best_it->count; - remote_rate_.SetEstimate(DataRate::BitsPerSec(probe_bitrate_bps), - Timestamp::Millis(now_ms)); + << best->SendBitrate().bps() << " bps, received at " + << best->RecvBitrate().bps() + << " bps. Mean send delta: " << best->send_mean.ms() + << " ms, mean recv delta: " << best->recv_mean.ms() + << " ms, num probes: " << best->count; + remote_rate_.SetEstimate(probe_bitrate, now); return ProbeResult::kBitrateUpdated; } } @@ -216,11 +203,11 @@ RemoteBitrateEstimatorAbsSendTime::ProcessClusters(int64_t now_ms) { } bool RemoteBitrateEstimatorAbsSendTime::IsBitrateImproving( - int new_bitrate_bps) const { - bool initial_probe = !remote_rate_.ValidEstimate() && new_bitrate_bps > 0; - bool bitrate_above_estimate = - remote_rate_.ValidEstimate() && - new_bitrate_bps > remote_rate_.LatestEstimate().bps(); + DataRate probe_bitrate) const { + bool initial_probe = + !remote_rate_.ValidEstimate() && probe_bitrate > DataRate::Zero(); + bool bitrate_above_estimate = remote_rate_.ValidEstimate() && + probe_bitrate > remote_rate_.LatestEstimate(); return initial_probe || bitrate_above_estimate; } @@ -235,14 +222,15 @@ void RemoteBitrateEstimatorAbsSendTime::IncomingPacket( "is missing absolute send time extension!"; return; } - IncomingPacketInfo(arrival_time_ms, header.extension.absoluteSendTime, - payload_size, header.ssrc); + IncomingPacketInfo(Timestamp::Millis(arrival_time_ms), + header.extension.absoluteSendTime, + DataSize::Bytes(payload_size), header.ssrc); } void RemoteBitrateEstimatorAbsSendTime::IncomingPacketInfo( - int64_t arrival_time_ms, + Timestamp arrival_time, uint32_t send_time_24bits, - size_t payload_size, + DataSize payload_size, uint32_t ssrc) { RTC_CHECK(send_time_24bits < (1ul << 24)); if (!uma_recorded_) { @@ -253,15 +241,16 @@ void RemoteBitrateEstimatorAbsSendTime::IncomingPacketInfo( // Shift up send time to use the full 32 bits that inter_arrival works with, // so wrapping works properly. uint32_t timestamp = send_time_24bits << kAbsSendTimeInterArrivalUpshift; - int64_t send_time_ms = static_cast(timestamp) * kTimestampToMs; + Timestamp send_time = + Timestamp::Millis(static_cast(timestamp) * kTimestampToMs); - int64_t now_ms = clock_->TimeInMilliseconds(); + Timestamp now = clock_->CurrentTime(); // TODO(holmer): SSRCs are only needed for REMB, should be broken out from // here. // Check if incoming bitrate estimate is valid, and if it needs to be reset. absl::optional incoming_bitrate = - incoming_bitrate_.Rate(arrival_time_ms); + incoming_bitrate_.Rate(arrival_time.ms()); if (incoming_bitrate) { incoming_bitrate_initialized_ = true; } else if (incoming_bitrate_initialized_) { @@ -271,74 +260,82 @@ void RemoteBitrateEstimatorAbsSendTime::IncomingPacketInfo( incoming_bitrate_.Reset(); incoming_bitrate_initialized_ = false; } - incoming_bitrate_.Update(payload_size, arrival_time_ms); + incoming_bitrate_.Update(payload_size.bytes(), arrival_time.ms()); - if (first_packet_time_ms_ == -1) - first_packet_time_ms_ = now_ms; + if (first_packet_time_.IsInfinite()) { + first_packet_time_ = now; + } uint32_t ts_delta = 0; int64_t t_delta = 0; int size_delta = 0; bool update_estimate = false; - uint32_t target_bitrate_bps = 0; + DataRate target_bitrate = DataRate::Zero(); std::vector ssrcs; { MutexLock lock(&mutex_); - TimeoutStreams(now_ms); - RTC_DCHECK(inter_arrival_.get()); - RTC_DCHECK(estimator_.get()); - ssrcs_[ssrc] = now_ms; + TimeoutStreams(now); + RTC_DCHECK(inter_arrival_); + RTC_DCHECK(estimator_); + // TODO(danilchap): Replace 5 lines below with insert_or_assign when that + // c++17 function is available. + auto inserted = ssrcs_.insert(std::make_pair(ssrc, now)); + if (!inserted.second) { + // Already inserted, update. + inserted.first->second = now; + } // For now only try to detect probes while we don't have a valid estimate. // We currently assume that only packets larger than 200 bytes are paced by // the sender. - const size_t kMinProbePacketSize = 200; + static constexpr DataSize kMinProbePacketSize = DataSize::Bytes(200); if (payload_size > kMinProbePacketSize && (!remote_rate_.ValidEstimate() || - now_ms - first_packet_time_ms_ < kInitialProbingIntervalMs)) { + now - first_packet_time_ < kInitialProbingInterval)) { // TODO(holmer): Use a map instead to get correct order? if (total_probes_received_ < kMaxProbePackets) { - int send_delta_ms = -1; - int recv_delta_ms = -1; + TimeDelta send_delta = TimeDelta::Millis(-1); + TimeDelta recv_delta = TimeDelta::Millis(-1); if (!probes_.empty()) { - send_delta_ms = send_time_ms - probes_.back().send_time_ms; - recv_delta_ms = arrival_time_ms - probes_.back().recv_time_ms; + send_delta = send_time - probes_.back().send_time; + recv_delta = arrival_time - probes_.back().recv_time; } - RTC_LOG(LS_INFO) << "Probe packet received: send time=" << send_time_ms - << " ms, recv time=" << arrival_time_ms - << " ms, send delta=" << send_delta_ms - << " ms, recv delta=" << recv_delta_ms << " ms."; + RTC_LOG(LS_INFO) << "Probe packet received: send time=" + << send_time.ms() + << " ms, recv time=" << arrival_time.ms() + << " ms, send delta=" << send_delta.ms() + << " ms, recv delta=" << recv_delta.ms() << " ms."; } - probes_.push_back(Probe(send_time_ms, arrival_time_ms, payload_size)); + probes_.emplace_back(send_time, arrival_time, payload_size); ++total_probes_received_; // Make sure that a probe which updated the bitrate immediately has an // effect by calling the OnReceiveBitrateChanged callback. - if (ProcessClusters(now_ms) == ProbeResult::kBitrateUpdated) + if (ProcessClusters(now) == ProbeResult::kBitrateUpdated) update_estimate = true; } - if (inter_arrival_->ComputeDeltas(timestamp, arrival_time_ms, now_ms, - payload_size, &ts_delta, &t_delta, + if (inter_arrival_->ComputeDeltas(timestamp, arrival_time.ms(), now.ms(), + payload_size.bytes(), &ts_delta, &t_delta, &size_delta)) { double ts_delta_ms = (1000.0 * ts_delta) / (1 << kInterArrivalShift); estimator_->Update(t_delta, ts_delta_ms, size_delta, detector_.State(), - arrival_time_ms); + arrival_time.ms()); detector_.Detect(estimator_->offset(), ts_delta_ms, - estimator_->num_of_deltas(), arrival_time_ms); + estimator_->num_of_deltas(), arrival_time.ms()); } if (!update_estimate) { // Check if it's time for a periodic update or if we should update because // of an over-use. - if (last_update_ms_ == -1 || - now_ms - last_update_ms_ > remote_rate_.GetFeedbackInterval().ms()) { + if (last_update_.IsInfinite() || + now.ms() - last_update_.ms() > + remote_rate_.GetFeedbackInterval().ms()) { update_estimate = true; } else if (detector_.State() == BandwidthUsage::kBwOverusing) { absl::optional incoming_rate = - incoming_bitrate_.Rate(arrival_time_ms); + incoming_bitrate_.Rate(arrival_time.ms()); if (incoming_rate && remote_rate_.TimeToReduceFurther( - Timestamp::Millis(now_ms), - DataRate::BitsPerSec(*incoming_rate))) { + now, DataRate::BitsPerSec(*incoming_rate))) { update_estimate = true; } } @@ -349,18 +346,16 @@ void RemoteBitrateEstimatorAbsSendTime::IncomingPacketInfo( // We also have to update the estimate immediately if we are overusing // and the target bitrate is too high compared to what we are receiving. const RateControlInput input( - detector_.State(), - OptionalRateFromOptionalBps(incoming_bitrate_.Rate(arrival_time_ms))); - target_bitrate_bps = - remote_rate_.Update(&input, Timestamp::Millis(now_ms)) - .bps(); + detector_.State(), OptionalRateFromOptionalBps( + incoming_bitrate_.Rate(arrival_time.ms()))); + target_bitrate = remote_rate_.Update(&input, now); update_estimate = remote_rate_.ValidEstimate(); ssrcs = Keys(ssrcs_); } } if (update_estimate) { - last_update_ms_ = now_ms; - observer_->OnReceiveBitrateChanged(ssrcs, target_bitrate_bps); + last_update_ = now; + observer_->OnReceiveBitrateChanged(ssrcs, target_bitrate.bps()); } } @@ -371,9 +366,9 @@ int64_t RemoteBitrateEstimatorAbsSendTime::TimeUntilNextProcess() { return kDisabledModuleTime; } -void RemoteBitrateEstimatorAbsSendTime::TimeoutStreams(int64_t now_ms) { - for (Ssrcs::iterator it = ssrcs_.begin(); it != ssrcs_.end();) { - if ((now_ms - it->second) > kStreamTimeOutMs) { +void RemoteBitrateEstimatorAbsSendTime::TimeoutStreams(Timestamp now) { + for (auto it = ssrcs_.begin(); it != ssrcs_.end();) { + if (now - it->second > TimeDelta::Millis(kStreamTimeOutMs)) { ssrcs_.erase(it++); } else { ++it; @@ -381,17 +376,17 @@ void RemoteBitrateEstimatorAbsSendTime::TimeoutStreams(int64_t now_ms) { } if (ssrcs_.empty()) { // We can't update the estimate if we don't have any active streams. - inter_arrival_.reset( - new InterArrival((kTimestampGroupLengthMs << kInterArrivalShift) / 1000, - kTimestampToMs, true)); - estimator_.reset(new OveruseEstimator(OverUseDetectorOptions())); + inter_arrival_ = std::make_unique( + (kTimestampGroupLengthMs << kInterArrivalShift) / 1000, kTimestampToMs, + true); + estimator_ = std::make_unique(OverUseDetectorOptions()); // We deliberately don't reset the first_packet_time_ms_ here for now since // we only probe for bandwidth in the beginning of a call right now. } } void RemoteBitrateEstimatorAbsSendTime::OnRttUpdate(int64_t avg_rtt_ms, - int64_t max_rtt_ms) { + int64_t /*max_rtt_ms*/) { MutexLock lock(&mutex_); remote_rate_.SetRtt(TimeDelta::Millis(avg_rtt_ms)); } diff --git a/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h b/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h index f42a28f8c8..4117382577 100644 --- a/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h +++ b/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h @@ -21,6 +21,10 @@ #include "api/rtp_headers.h" #include "api/transport/field_trial_based_config.h" +#include "api/units/data_rate.h" +#include "api/units/data_size.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" #include "modules/remote_bitrate_estimator/aimd_rate_control.h" #include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h" #include "modules/remote_bitrate_estimator/inter_arrival.h" @@ -35,42 +39,6 @@ namespace webrtc { -struct Probe { - Probe(int64_t send_time_ms, int64_t recv_time_ms, size_t payload_size) - : send_time_ms(send_time_ms), - recv_time_ms(recv_time_ms), - payload_size(payload_size) {} - int64_t send_time_ms; - int64_t recv_time_ms; - size_t payload_size; -}; - -struct Cluster { - Cluster() - : send_mean_ms(0.0f), - recv_mean_ms(0.0f), - mean_size(0), - count(0), - num_above_min_delta(0) {} - - int GetSendBitrateBps() const { - RTC_CHECK_GT(send_mean_ms, 0.0f); - return mean_size * 8 * 1000 / send_mean_ms; - } - - int GetRecvBitrateBps() const { - RTC_CHECK_GT(recv_mean_ms, 0.0f); - return mean_size * 8 * 1000 / recv_mean_ms; - } - - float send_mean_ms; - float recv_mean_ms; - // TODO(holmer): Add some variance metric as well? - size_t mean_size; - int count; - int num_above_min_delta; -}; - class RemoteBitrateEstimatorAbsSendTime : public RemoteBitrateEstimator { public: RemoteBitrateEstimatorAbsSendTime(RemoteBitrateObserver* observer, @@ -100,32 +68,54 @@ class RemoteBitrateEstimatorAbsSendTime : public RemoteBitrateEstimator { void SetMinBitrate(int min_bitrate_bps) override; private: - typedef std::map Ssrcs; + struct Probe { + Probe(Timestamp send_time, Timestamp recv_time, DataSize payload_size) + : send_time(send_time), + recv_time(recv_time), + payload_size(payload_size) {} + + Timestamp send_time; + Timestamp recv_time; + DataSize payload_size; + }; + + struct Cluster { + DataRate SendBitrate() const { return mean_size / send_mean; } + DataRate RecvBitrate() const { return mean_size / recv_mean; } + + TimeDelta send_mean = TimeDelta::Zero(); + TimeDelta recv_mean = TimeDelta::Zero(); + // TODO(holmer): Add some variance metric as well? + DataSize mean_size = DataSize::Zero(); + int count = 0; + int num_above_min_delta = 0; + }; + enum class ProbeResult { kBitrateUpdated, kNoUpdate }; - static bool IsWithinClusterBounds(int send_delta_ms, + static bool IsWithinClusterBounds(TimeDelta send_delta, const Cluster& cluster_aggregate); - static void AddCluster(std::list* clusters, Cluster* cluster); + static void MaybeAddCluster(const Cluster& cluster_aggregate, + std::list& clusters); - void IncomingPacketInfo(int64_t arrival_time_ms, + void IncomingPacketInfo(Timestamp arrival_time, uint32_t send_time_24bits, - size_t payload_size, + DataSize payload_size, uint32_t ssrc); - void ComputeClusters(std::list* clusters) const; + std::list ComputeClusters() const; - std::list::const_iterator FindBestProbe( - const std::list& clusters) const; + const Cluster* FindBestProbe(const std::list& clusters) const; // Returns true if a probe which changed the estimate was detected. - ProbeResult ProcessClusters(int64_t now_ms) + ProbeResult ProcessClusters(Timestamp now) RTC_EXCLUSIVE_LOCKS_REQUIRED(&mutex_); - bool IsBitrateImproving(int probe_bitrate_bps) const + bool IsBitrateImproving(DataRate probe_bitrate) const RTC_EXCLUSIVE_LOCKS_REQUIRED(&mutex_); - void TimeoutStreams(int64_t now_ms) RTC_EXCLUSIVE_LOCKS_REQUIRED(&mutex_); + void TimeoutStreams(Timestamp now) RTC_EXCLUSIVE_LOCKS_REQUIRED(&mutex_); rtc::RaceChecker network_race_; Clock* const clock_; @@ -134,18 +124,16 @@ class RemoteBitrateEstimatorAbsSendTime : public RemoteBitrateEstimator { std::unique_ptr inter_arrival_; std::unique_ptr estimator_; OveruseDetector detector_; - RateStatistics incoming_bitrate_; - bool incoming_bitrate_initialized_; - std::vector recent_propagation_delta_ms_; - std::vector recent_update_time_ms_; + RateStatistics incoming_bitrate_{kBitrateWindowMs, 8000}; + bool incoming_bitrate_initialized_ = false; std::list probes_; - size_t total_probes_received_; - int64_t first_packet_time_ms_; - int64_t last_update_ms_; - bool uma_recorded_; + size_t total_probes_received_ = 0; + Timestamp first_packet_time_ = Timestamp::MinusInfinity(); + Timestamp last_update_ = Timestamp::MinusInfinity(); + bool uma_recorded_ = false; mutable Mutex mutex_; - Ssrcs ssrcs_ RTC_GUARDED_BY(&mutex_); + std::map ssrcs_ RTC_GUARDED_BY(&mutex_); AimdRateControl remote_rate_ RTC_GUARDED_BY(&mutex_); }; diff --git a/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc b/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc index 46d8fbc434..ddaa1de088 100644 --- a/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc +++ b/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc @@ -234,7 +234,7 @@ bool RemoteBitrateEstimatorSingleStream::LatestEstimate( std::vector* ssrcs, uint32_t* bitrate_bps) const { MutexLock lock(&mutex_); - assert(bitrate_bps); + RTC_DCHECK(bitrate_bps); if (!remote_rate_->ValidEstimate()) { return false; } @@ -248,7 +248,7 @@ bool RemoteBitrateEstimatorSingleStream::LatestEstimate( void RemoteBitrateEstimatorSingleStream::GetSsrcs( std::vector* ssrcs) const { - assert(ssrcs); + RTC_DCHECK(ssrcs); ssrcs->resize(overuse_detectors_.size()); int i = 0; for (SsrcOveruseEstimatorMap::const_iterator it = overuse_detectors_.begin(); diff --git a/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc b/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc index 5e117942c1..66f8ca053a 100644 --- a/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc +++ b/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc @@ -46,7 +46,7 @@ RtpStream::RtpStream(int fps, next_rtcp_time_(rtcp_receive_time), rtp_timestamp_offset_(timestamp_offset), kNtpFracPerMs(4.294967296E6) { - assert(fps_ > 0); + RTC_DCHECK_GT(fps_, 0); } void RtpStream::set_rtp_timestamp_offset(uint32_t offset) { @@ -60,7 +60,7 @@ int64_t RtpStream::GenerateFrame(int64_t time_now_us, PacketList* packets) { if (time_now_us < next_rtp_time_) { return next_rtp_time_; } - assert(packets != NULL); + RTC_DCHECK(packets); size_t bits_per_frame = (bitrate_bps_ + fps_ / 2) / fps_; size_t n_packets = std::max((bits_per_frame + 4 * kMtu) / (8 * kMtu), 1u); @@ -173,9 +173,9 @@ void StreamGenerator::set_rtp_timestamp_offset(uint32_t ssrc, uint32_t offset) { // it possible to simulate different types of channels. int64_t StreamGenerator::GenerateFrame(RtpStream::PacketList* packets, int64_t time_now_us) { - assert(packets != NULL); - assert(packets->empty()); - assert(capacity_ > 0); + RTC_DCHECK(packets); + RTC_DCHECK(packets->empty()); + RTC_DCHECK_GT(capacity_, 0); StreamMap::iterator it = std::min_element(streams_.begin(), streams_.end(), RtpStream::Compare); (*it).second->GenerateFrame(time_now_us, packets); diff --git a/modules/remote_bitrate_estimator/tools/bwe_rtp.cc b/modules/remote_bitrate_estimator/tools/bwe_rtp.cc index c0b3a37ba5..403f81fd03 100644 --- a/modules/remote_bitrate_estimator/tools/bwe_rtp.cc +++ b/modules/remote_bitrate_estimator/tools/bwe_rtp.cc @@ -18,10 +18,8 @@ #include "absl/flags/flag.h" #include "absl/flags/parse.h" -#include "modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h" -#include "modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h" +#include "modules/rtp_rtcp/include/rtp_header_extension_map.h" #include "test/rtp_file_reader.h" -#include "test/rtp_header_parser.h" ABSL_FLAG(std::string, extension_type, @@ -65,14 +63,11 @@ std::set SsrcFilter() { return ssrcs; } -std::unique_ptr ParseArgsAndSetupEstimator( +bool ParseArgsAndSetupRtpReader( int argc, char** argv, - webrtc::Clock* clock, - webrtc::RemoteBitrateObserver* observer, - std::unique_ptr* rtp_reader, - std::unique_ptr* estimator, - std::string* estimator_used) { + std::unique_ptr& rtp_reader, + webrtc::RtpHeaderExtensionMap& rtp_header_extensions) { absl::ParseCommandLine(argc, argv); std::string filename = InputFile(); @@ -84,16 +79,16 @@ std::unique_ptr ParseArgsAndSetupEstimator( fprintf(stderr, "\n"); if (filename.substr(filename.find_last_of('.')) == ".pcap") { fprintf(stderr, "Opening as pcap\n"); - rtp_reader->reset(webrtc::test::RtpFileReader::Create( + rtp_reader.reset(webrtc::test::RtpFileReader::Create( webrtc::test::RtpFileReader::kPcap, filename.c_str(), SsrcFilter())); } else { fprintf(stderr, "Opening as rtp\n"); - rtp_reader->reset(webrtc::test::RtpFileReader::Create( + rtp_reader.reset(webrtc::test::RtpFileReader::Create( webrtc::test::RtpFileReader::kRtpDump, filename.c_str())); } - if (!*rtp_reader) { + if (!rtp_reader) { fprintf(stderr, "Cannot open input file %s\n", filename.c_str()); - return nullptr; + return false; } fprintf(stderr, "Input file: %s\n\n", filename.c_str()); @@ -105,31 +100,10 @@ std::unique_ptr ParseArgsAndSetupEstimator( fprintf(stderr, "Extension: abs\n"); } else { fprintf(stderr, "Unknown extension type\n"); - return nullptr; + return false; } - // Setup the RTP header parser and the bitrate estimator. - auto parser = webrtc::RtpHeaderParser::CreateForTest(); - parser->RegisterRtpHeaderExtension(extension, ExtensionId()); - if (estimator) { - switch (extension) { - case webrtc::kRtpExtensionAbsoluteSendTime: { - estimator->reset( - new webrtc::RemoteBitrateEstimatorAbsSendTime(observer, clock)); - *estimator_used = "AbsoluteSendTimeRemoteBitrateEstimator"; - break; - } - case webrtc::kRtpExtensionTransmissionTimeOffset: { - estimator->reset( - new webrtc::RemoteBitrateEstimatorSingleStream(observer, clock)); - *estimator_used = "RemoteBitrateEstimator"; - break; - } - default: - assert(false); - return nullptr; - } - } + rtp_header_extensions.RegisterByType(ExtensionId(), extension); - return parser; + return true; } diff --git a/modules/remote_bitrate_estimator/tools/bwe_rtp.h b/modules/remote_bitrate_estimator/tools/bwe_rtp.h index 4285f926b5..3b161db37b 100644 --- a/modules/remote_bitrate_estimator/tools/bwe_rtp.h +++ b/modules/remote_bitrate_estimator/tools/bwe_rtp.h @@ -12,25 +12,14 @@ #define MODULES_REMOTE_BITRATE_ESTIMATOR_TOOLS_BWE_RTP_H_ #include -#include -namespace webrtc { -class Clock; -class RemoteBitrateEstimator; -class RemoteBitrateObserver; -class RtpHeaderParser; -namespace test { -class RtpFileReader; -} -} // namespace webrtc +#include "modules/rtp_rtcp/include/rtp_header_extension_map.h" +#include "test/rtp_file_reader.h" -std::unique_ptr ParseArgsAndSetupEstimator( +bool ParseArgsAndSetupRtpReader( int argc, char** argv, - webrtc::Clock* clock, - webrtc::RemoteBitrateObserver* observer, - std::unique_ptr* rtp_reader, - std::unique_ptr* estimator, - std::string* estimator_used); + std::unique_ptr& rtp_reader, + webrtc::RtpHeaderExtensionMap& rtp_header_extensions); #endif // MODULES_REMOTE_BITRATE_ESTIMATOR_TOOLS_BWE_RTP_H_ diff --git a/modules/remote_bitrate_estimator/tools/rtp_to_text.cc b/modules/remote_bitrate_estimator/tools/rtp_to_text.cc index 7f1e009793..98f502a42e 100644 --- a/modules/remote_bitrate_estimator/tools/rtp_to_text.cc +++ b/modules/remote_bitrate_estimator/tools/rtp_to_text.cc @@ -13,17 +13,19 @@ #include #include "modules/remote_bitrate_estimator/tools/bwe_rtp.h" +#include "modules/rtp_rtcp/include/rtp_header_extension_map.h" +#include "modules/rtp_rtcp/source/rtp_header_extensions.h" +#include "modules/rtp_rtcp/source/rtp_packet.h" #include "rtc_base/format_macros.h" #include "rtc_base/strings/string_builder.h" #include "test/rtp_file_reader.h" -#include "test/rtp_header_parser.h" int main(int argc, char* argv[]) { std::unique_ptr reader; - std::unique_ptr parser(ParseArgsAndSetupEstimator( - argc, argv, nullptr, nullptr, &reader, nullptr, nullptr)); - if (!parser) + webrtc::RtpHeaderExtensionMap rtp_header_extensions; + if (!ParseArgsAndSetupRtpReader(argc, argv, reader, rtp_header_extensions)) { return -1; + } bool arrival_time_only = (argc >= 5 && strncmp(argv[4], "-t", 2) == 0); @@ -35,11 +37,15 @@ int main(int argc, char* argv[]) { int non_zero_ts_offsets = 0; webrtc::test::RtpPacket packet; while (reader->NextPacket(&packet)) { - webrtc::RTPHeader header; - parser->Parse(packet.data, packet.length, &header); - if (header.extension.absoluteSendTime != 0) + webrtc::RtpPacket header(&rtp_header_extensions); + header.Parse(packet.data, packet.length); + uint32_t abs_send_time = 0; + if (header.GetExtension(&abs_send_time) && + abs_send_time != 0) ++non_zero_abs_send_time; - if (header.extension.transmissionTimeOffset != 0) + int32_t toffset = 0; + if (header.GetExtension(&toffset) && + toffset != 0) ++non_zero_ts_offsets; if (arrival_time_only) { rtc::StringBuilder ss; @@ -47,11 +53,9 @@ int main(int argc, char* argv[]) { fprintf(stdout, "%s\n", ss.str().c_str()); } else { fprintf(stdout, "%u %u %d %u %u %d %u %" RTC_PRIuS " %" RTC_PRIuS "\n", - header.sequenceNumber, header.timestamp, - header.extension.transmissionTimeOffset, - header.extension.absoluteSendTime, packet.time_ms, - header.markerBit, header.ssrc, packet.length, - packet.original_length); + header.SequenceNumber(), header.Timestamp(), toffset, + abs_send_time, packet.time_ms, header.Marker(), header.Ssrc(), + packet.length, packet.original_length); } ++packet_counter; } diff --git a/modules/rtp_rtcp/BUILD.gn b/modules/rtp_rtcp/BUILD.gn index 8204b6d724..778baf6e15 100644 --- a/modules/rtp_rtcp/BUILD.gn +++ b/modules/rtp_rtcp/BUILD.gn @@ -52,6 +52,7 @@ rtc_library("rtp_rtcp_format") { "source/rtp_packet.h", "source/rtp_packet_received.h", "source/rtp_packet_to_send.h", + "source/rtp_util.h", "source/rtp_video_layers_allocation_extension.h", ] sources = [ @@ -96,6 +97,7 @@ rtc_library("rtp_rtcp_format") { "source/rtp_packet.cc", "source/rtp_packet_received.cc", "source/rtp_packet_to_send.cc", + "source/rtp_util.cc", "source/rtp_video_layers_allocation_extension.cc", ] @@ -140,8 +142,6 @@ rtc_library("rtp_rtcp") { "include/ulpfec_receiver.h", "source/absolute_capture_time_interpolator.cc", "source/absolute_capture_time_interpolator.h", - "source/absolute_capture_time_receiver.cc", # DEPRECATED - "source/absolute_capture_time_receiver.h", # DEPRECATED "source/absolute_capture_time_sender.cc", "source/absolute_capture_time_sender.h", "source/active_decode_targets_helper.cc", @@ -296,6 +296,7 @@ rtc_library("rtp_rtcp") { "../../rtc_base:rtc_base_approved", "../../rtc_base:rtc_numerics", "../../rtc_base:safe_minmax", + "../../rtc_base/containers:flat_map", "../../rtc_base/experiments:field_trial_parser", "../../rtc_base/synchronization:mutex", "../../rtc_base/system:no_unique_address", @@ -303,6 +304,7 @@ rtc_library("rtp_rtcp") { "../../rtc_base/task_utils:repeating_task", "../../rtc_base/task_utils:to_queued_task", "../../rtc_base/time:timestamp_extrapolator", + "../../rtc_base/containers:flat_map", "../../system_wrappers", "../../system_wrappers:metrics", "../remote_bitrate_estimator", @@ -371,6 +373,7 @@ rtc_library("rtcp_transceiver") { "../../api:rtp_headers", "../../api:transport_api", "../../api/task_queue", + "../../api/units:timestamp", "../../api/video:video_bitrate_allocation", "../../rtc_base:checks", "../../rtc_base:rtc_base_approved", @@ -440,6 +443,14 @@ rtc_library("mock_rtp_rtcp") { absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } +rtc_library("rtp_packetizer_av1_test_helper") { + testonly = true + sources = [ + "source/rtp_packetizer_av1_test_helper.cc", + "source/rtp_packetizer_av1_test_helper.h", + ] +} + if (rtc_include_tests) { if (!build_with_chromium) { rtc_executable("test_packet_masks_metrics") { @@ -541,6 +552,7 @@ if (rtc_include_tests) { "source/rtp_sender_unittest.cc", "source/rtp_sender_video_unittest.cc", "source/rtp_sequence_number_map_unittest.cc", + "source/rtp_util_unittest.cc", "source/rtp_utility_unittest.cc", "source/rtp_video_layers_allocation_extension_unittest.cc", "source/source_tracker_unittest.cc", @@ -559,6 +571,7 @@ if (rtc_include_tests) { ":fec_test_helper", ":mock_rtp_rtcp", ":rtcp_transceiver", + ":rtp_packetizer_av1_test_helper", ":rtp_rtcp", ":rtp_rtcp_format", ":rtp_rtcp_legacy", @@ -574,6 +587,7 @@ if (rtc_include_tests) { "../../api/transport:field_trial_based_config", "../../api/transport/rtp:dependency_descriptor", "../../api/units:data_size", + "../../api/units:time_delta", "../../api/units:timestamp", "../../api/video:encoded_image", "../../api/video:video_bitrate_allocation", diff --git a/modules/rtp_rtcp/include/rtp_rtcp_defines.h b/modules/rtp_rtcp/include/rtp_rtcp_defines.h index d255320534..998a754cc0 100644 --- a/modules/rtp_rtcp/include/rtp_rtcp_defines.h +++ b/modules/rtp_rtcp/include/rtp_rtcp_defines.h @@ -57,6 +57,7 @@ enum RTPExtensionType : int { kRtpExtensionNone, kRtpExtensionTransmissionTimeOffset, kRtpExtensionAudioLevel, + kRtpExtensionCsrcAudioLevel, kRtpExtensionInbandComfortNoise, kRtpExtensionAbsoluteSendTime, kRtpExtensionAbsoluteCaptureTime, @@ -227,8 +228,10 @@ struct RtpPacketSendInfo { RtpPacketSendInfo() = default; uint16_t transport_sequence_number = 0; + // TODO(bugs.webrtc.org/12713): Remove once downstream usage is gone. uint32_t ssrc = 0; - uint16_t rtp_sequence_number = 0; + absl::optional media_ssrc; + uint16_t rtp_sequence_number = 0; // Only valid if |media_ssrc| is set. uint32_t rtp_timestamp = 0; size_t length = 0; absl::optional packet_type; @@ -266,9 +269,13 @@ class RtcpFeedbackSenderInterface { class StreamFeedbackObserver { public: struct StreamPacketInfo { - uint32_t ssrc; - uint16_t rtp_sequence_number; bool received; + + // |rtp_sequence_number| and |is_retransmission| are only valid if |ssrc| + // is populated. + absl::optional ssrc; + uint16_t rtp_sequence_number; + bool is_retransmission; }; virtual ~StreamFeedbackObserver() = default; diff --git a/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h b/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h index d523128e38..a7707ecc19 100644 --- a/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h +++ b/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h @@ -34,6 +34,7 @@ class MockRtpRtcpInterface : public RtpRtcpInterface { (const uint8_t* incoming_packet, size_t packet_length), (override)); MOCK_METHOD(void, SetRemoteSSRC, (uint32_t ssrc), (override)); + MOCK_METHOD(void, SetLocalSsrc, (uint32_t ssrc), (override)); MOCK_METHOD(void, SetMaxRtpPacketSize, (size_t size), (override)); MOCK_METHOD(size_t, MaxRtpPacketSize, (), (const, override)); MOCK_METHOD(void, diff --git a/modules/rtp_rtcp/source/absolute_capture_time_receiver.cc b/modules/rtp_rtcp/source/absolute_capture_time_receiver.cc deleted file mode 100644 index efb75506d0..0000000000 --- a/modules/rtp_rtcp/source/absolute_capture_time_receiver.cc +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "modules/rtp_rtcp/source/absolute_capture_time_receiver.h" - -namespace webrtc { - -AbsoluteCaptureTimeReceiver::AbsoluteCaptureTimeReceiver(Clock* clock) - : AbsoluteCaptureTimeInterpolator(clock) {} - -void AbsoluteCaptureTimeReceiver::SetRemoteToLocalClockOffset( - absl::optional value_q32x32) { - capture_clock_offset_updater_.SetRemoteToLocalClockOffset(value_q32x32); -} - -absl::optional -AbsoluteCaptureTimeReceiver::OnReceivePacket( - uint32_t source, - uint32_t rtp_timestamp, - uint32_t rtp_clock_frequency, - const absl::optional& received_extension) { - auto extension = AbsoluteCaptureTimeInterpolator::OnReceivePacket( - source, rtp_timestamp, rtp_clock_frequency, received_extension); - - if (extension.has_value()) { - extension->estimated_capture_clock_offset = - capture_clock_offset_updater_.AdjustEstimatedCaptureClockOffset( - extension->estimated_capture_clock_offset); - } - - return extension; -} - -} // namespace webrtc diff --git a/modules/rtp_rtcp/source/absolute_capture_time_receiver.h b/modules/rtp_rtcp/source/absolute_capture_time_receiver.h deleted file mode 100644 index ad1bd7eb5d..0000000000 --- a/modules/rtp_rtcp/source/absolute_capture_time_receiver.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef MODULES_RTP_RTCP_SOURCE_ABSOLUTE_CAPTURE_TIME_RECEIVER_H_ -#define MODULES_RTP_RTCP_SOURCE_ABSOLUTE_CAPTURE_TIME_RECEIVER_H_ - -#include "modules/rtp_rtcp/source/absolute_capture_time_interpolator.h" -#include "modules/rtp_rtcp/source/capture_clock_offset_updater.h" -#include "system_wrappers/include/clock.h" - -namespace webrtc { - -// DEPRECATED. Use `AbsoluteCaptureTimeInterpolator` instead. -class AbsoluteCaptureTimeReceiver : public AbsoluteCaptureTimeInterpolator { - public: - explicit AbsoluteCaptureTimeReceiver(Clock* clock); - - absl::optional OnReceivePacket( - uint32_t source, - uint32_t rtp_timestamp, - uint32_t rtp_clock_frequency, - const absl::optional& received_extension); - - void SetRemoteToLocalClockOffset(absl::optional value_q32x32); - - private: - CaptureClockOffsetUpdater capture_clock_offset_updater_; -}; - -} // namespace webrtc - -#endif // MODULES_RTP_RTCP_SOURCE_ABSOLUTE_CAPTURE_TIME_RECEIVER_H_ diff --git a/modules/rtp_rtcp/source/absolute_capture_time_sender.cc b/modules/rtp_rtcp/source/absolute_capture_time_sender.cc index 83ba6cac91..28266769ff 100644 --- a/modules/rtp_rtcp/source/absolute_capture_time_sender.cc +++ b/modules/rtp_rtcp/source/absolute_capture_time_sender.cc @@ -12,7 +12,7 @@ #include -#include "modules/rtp_rtcp/source/absolute_capture_time_receiver.h" +#include "modules/rtp_rtcp/source/absolute_capture_time_interpolator.h" #include "system_wrappers/include/ntp_time.h" namespace webrtc { @@ -26,7 +26,7 @@ constexpr TimeDelta AbsoluteCaptureTimeSender::kInterpolationMaxInterval; constexpr TimeDelta AbsoluteCaptureTimeSender::kInterpolationMaxError; static_assert( - AbsoluteCaptureTimeReceiver::kInterpolationMaxInterval >= + AbsoluteCaptureTimeInterpolator::kInterpolationMaxInterval >= AbsoluteCaptureTimeSender::kInterpolationMaxInterval, "Receivers should be as willing to interpolate timestamps as senders."); @@ -36,7 +36,7 @@ AbsoluteCaptureTimeSender::AbsoluteCaptureTimeSender(Clock* clock) uint32_t AbsoluteCaptureTimeSender::GetSource( uint32_t ssrc, rtc::ArrayView csrcs) { - return AbsoluteCaptureTimeReceiver::GetSource(ssrc, csrcs); + return AbsoluteCaptureTimeInterpolator::GetSource(ssrc, csrcs); } absl::optional AbsoluteCaptureTimeSender::OnSendPacket( @@ -108,7 +108,7 @@ bool AbsoluteCaptureTimeSender::ShouldSendExtension( // Should if interpolation would introduce too much error. const uint64_t interpolated_absolute_capture_timestamp = - AbsoluteCaptureTimeReceiver::InterpolateAbsoluteCaptureTimestamp( + AbsoluteCaptureTimeInterpolator::InterpolateAbsoluteCaptureTimestamp( rtp_timestamp, rtp_clock_frequency, last_rtp_timestamp_, last_absolute_capture_timestamp_); const int64_t interpolation_error_ms = UQ32x32ToInt64Ms(std::min( diff --git a/modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.cc b/modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.cc index 3f7d22c498..c542557526 100644 --- a/modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.cc +++ b/modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.cc @@ -313,7 +313,9 @@ void DEPRECATED_RtpSenderEgress::AddPacketToTransportFeedback( } RtpPacketSendInfo packet_info; + // TODO(bugs.webrtc.org/12713): Remove once downstream usage is gone. packet_info.ssrc = ssrc_; + packet_info.media_ssrc = ssrc_; packet_info.transport_sequence_number = packet_id; packet_info.rtp_sequence_number = packet.SequenceNumber(); packet_info.length = packet_size; diff --git a/modules/rtp_rtcp/source/nack_rtx_unittest.cc b/modules/rtp_rtcp/source/nack_rtx_unittest.cc index 8afaf3ee61..fc035047b0 100644 --- a/modules/rtp_rtcp/source/nack_rtx_unittest.cc +++ b/modules/rtp_rtcp/source/nack_rtx_unittest.cc @@ -218,7 +218,6 @@ class RtpRtcpRtxNackTest : public ::testing::Test { if (length > 0) rtp_rtcp_module_->SendNACK(nack_list, length); fake_clock.AdvanceTimeMilliseconds(28); // 33ms - 5ms delay. - rtp_rtcp_module_->Process(); // Prepare next frame. timestamp += 3000; } @@ -265,7 +264,6 @@ TEST_F(RtpRtcpRtxNackTest, LongNackList) { // Prepare next frame. timestamp += 3000; fake_clock.AdvanceTimeMilliseconds(33); - rtp_rtcp_module_->Process(); } EXPECT_FALSE(transport_.expected_sequence_numbers_.empty()); EXPECT_FALSE(media_stream_.sequence_numbers_.empty()); diff --git a/modules/rtp_rtcp/source/receive_statistics_impl.h b/modules/rtp_rtcp/source/receive_statistics_impl.h index 44f5144df9..1a70fe4ad7 100644 --- a/modules/rtp_rtcp/source/receive_statistics_impl.h +++ b/modules/rtp_rtcp/source/receive_statistics_impl.h @@ -14,7 +14,6 @@ #include #include #include -#include #include #include @@ -22,6 +21,7 @@ #include "modules/include/module_common_types_public.h" #include "modules/rtp_rtcp/include/receive_statistics.h" #include "modules/rtp_rtcp/source/rtcp_packet/report_block.h" +#include "rtc_base/containers/flat_map.h" #include "rtc_base/rate_statistics.h" #include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" @@ -195,8 +195,7 @@ class ReceiveStatisticsImpl : public ReceiveStatistics { size_t last_returned_ssrc_idx_; std::vector all_ssrcs_; int max_reordering_threshold_; - std::unordered_map> + flat_map> statisticians_; }; diff --git a/modules/rtp_rtcp/source/rtcp_receiver.cc b/modules/rtp_rtcp/source/rtcp_receiver.cc index 526acf555e..3ab78df17c 100644 --- a/modules/rtp_rtcp/source/rtcp_receiver.cc +++ b/modules/rtp_rtcp/source/rtcp_receiver.cc @@ -39,6 +39,7 @@ #include "modules/rtp_rtcp/source/rtcp_packet/tmmbr.h" #include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h" #include "modules/rtp_rtcp/source/rtp_rtcp_config.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" #include "modules/rtp_rtcp/source/time_util.h" #include "modules/rtp_rtcp/source/tmmbr_help.h" #include "rtc_base/checks.h" @@ -84,8 +85,14 @@ bool ResetTimestampIfExpired(const Timestamp now, } // namespace +constexpr size_t RTCPReceiver::RegisteredSsrcs::kMediaSsrcIndex; +constexpr size_t RTCPReceiver::RegisteredSsrcs::kMaxSsrcs; + RTCPReceiver::RegisteredSsrcs::RegisteredSsrcs( - const RtpRtcpInterface::Configuration& config) { + bool disable_sequence_checker, + const RtpRtcpInterface::Configuration& config) + : packet_sequence_checker_(disable_sequence_checker) { + packet_sequence_checker_.Detach(); ssrcs_.push_back(config.local_media_ssrc); if (config.rtx_send_ssrc) { ssrcs_.push_back(*config.rtx_send_ssrc); @@ -100,6 +107,21 @@ RTCPReceiver::RegisteredSsrcs::RegisteredSsrcs( RTC_DCHECK_LE(ssrcs_.size(), RTCPReceiver::RegisteredSsrcs::kMaxSsrcs); } +bool RTCPReceiver::RegisteredSsrcs::contains(uint32_t ssrc) const { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + return absl::c_linear_search(ssrcs_, ssrc); +} + +uint32_t RTCPReceiver::RegisteredSsrcs::media_ssrc() const { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + return ssrcs_[kMediaSsrcIndex]; +} + +void RTCPReceiver::RegisteredSsrcs::set_media_ssrc(uint32_t ssrc) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + ssrcs_[kMediaSsrcIndex] = ssrc; +} + struct RTCPReceiver::PacketInformation { uint32_t packet_type_flags = 0; // RTCPPacketTypeFlags bit field. @@ -116,13 +138,47 @@ struct RTCPReceiver::PacketInformation { std::unique_ptr loss_notification; }; +RTCPReceiver::RTCPReceiver(const RtpRtcpInterface::Configuration& config, + ModuleRtpRtcpImpl2* owner) + : clock_(config.clock), + receiver_only_(config.receiver_only), + rtp_rtcp_(owner), + main_ssrc_(config.local_media_ssrc), + registered_ssrcs_(false, config), + rtcp_bandwidth_observer_(config.bandwidth_callback), + rtcp_intra_frame_observer_(config.intra_frame_callback), + rtcp_loss_notification_observer_(config.rtcp_loss_notification_observer), + network_state_estimate_observer_(config.network_state_estimate_observer), + transport_feedback_observer_(config.transport_feedback_callback), + bitrate_allocation_observer_(config.bitrate_allocation_observer), + report_interval_(config.rtcp_report_interval_ms > 0 + ? TimeDelta::Millis(config.rtcp_report_interval_ms) + : (config.audio ? kDefaultAudioReportInterval + : kDefaultVideoReportInterval)), + // TODO(bugs.webrtc.org/10774): Remove fallback. + remote_ssrc_(0), + remote_sender_rtp_time_(0), + remote_sender_packet_count_(0), + remote_sender_octet_count_(0), + remote_sender_reports_count_(0), + xr_rrtr_status_(config.non_sender_rtt_measurement), + xr_rr_rtt_ms_(0), + oldest_tmmbr_info_ms_(0), + cname_callback_(config.rtcp_cname_callback), + report_block_data_observer_(config.report_block_data_observer), + packet_type_counter_observer_(config.rtcp_packet_type_counter_observer), + num_skipped_packets_(0), + last_skipped_packets_warning_ms_(clock_->TimeInMilliseconds()) { + RTC_DCHECK(owner); +} + RTCPReceiver::RTCPReceiver(const RtpRtcpInterface::Configuration& config, ModuleRtpRtcp* owner) : clock_(config.clock), receiver_only_(config.receiver_only), rtp_rtcp_(owner), main_ssrc_(config.local_media_ssrc), - registered_ssrcs_(config), + registered_ssrcs_(true, config), rtcp_bandwidth_observer_(config.bandwidth_callback), rtcp_intra_frame_observer_(config.intra_frame_callback), rtcp_loss_notification_observer_(config.rtcp_loss_notification_observer), @@ -148,6 +204,19 @@ RTCPReceiver::RTCPReceiver(const RtpRtcpInterface::Configuration& config, num_skipped_packets_(0), last_skipped_packets_warning_ms_(clock_->TimeInMilliseconds()) { RTC_DCHECK(owner); + // Dear reader - if you're here because of this log statement and are + // wondering what this is about, chances are that you are using an instance + // of RTCPReceiver without using the webrtc APIs. This creates a bit of a + // problem for WebRTC because this class is a part of an internal + // implementation that is constantly changing and being improved. + // The intention of this log statement is to give a heads up that changes + // are coming and encourage you to use the public APIs or be prepared that + // things might break down the line as more changes land. A thing you could + // try out for now is to replace the `CustomSequenceChecker` in the header + // with a regular `SequenceChecker` and see if that triggers an + // error in your code. If it does, chances are you have your own threading + // model that is not the same as WebRTC internally has. + RTC_LOG(LS_INFO) << "************** !!!DEPRECATION WARNING!! **************"; } RTCPReceiver::~RTCPReceiver() {} @@ -178,11 +247,31 @@ void RTCPReceiver::SetRemoteSSRC(uint32_t ssrc) { remote_ssrc_ = ssrc; } +void RTCPReceiver::set_local_media_ssrc(uint32_t ssrc) { + registered_ssrcs_.set_media_ssrc(ssrc); +} + +uint32_t RTCPReceiver::local_media_ssrc() const { + return registered_ssrcs_.media_ssrc(); +} + uint32_t RTCPReceiver::RemoteSSRC() const { MutexLock lock(&rtcp_receiver_lock_); return remote_ssrc_; } +void RTCPReceiver::RttStats::AddRtt(TimeDelta rtt) { + last_rtt_ = rtt; + if (rtt < min_rtt_) { + min_rtt_ = rtt; + } + if (rtt > max_rtt_) { + max_rtt_ = rtt; + } + sum_rtt_ += rtt; + ++num_rtts_; +} + int32_t RTCPReceiver::RTT(uint32_t remote_ssrc, int64_t* last_rtt_ms, int64_t* avg_rtt_ms, @@ -190,32 +279,26 @@ int32_t RTCPReceiver::RTT(uint32_t remote_ssrc, int64_t* max_rtt_ms) const { MutexLock lock(&rtcp_receiver_lock_); - auto it = received_report_blocks_.find(main_ssrc_); - if (it == received_report_blocks_.end()) - return -1; - - auto it_info = it->second.find(remote_ssrc); - if (it_info == it->second.end()) - return -1; - - const ReportBlockData* report_block_data = &it_info->second; - - if (report_block_data->num_rtts() == 0) + auto it = rtts_.find(remote_ssrc); + if (it == rtts_.end()) { return -1; + } - if (last_rtt_ms) - *last_rtt_ms = report_block_data->last_rtt_ms(); + if (last_rtt_ms) { + *last_rtt_ms = it->second.last_rtt().ms(); + } if (avg_rtt_ms) { - *avg_rtt_ms = - report_block_data->sum_rtt_ms() / report_block_data->num_rtts(); + *avg_rtt_ms = it->second.average_rtt().ms(); } - if (min_rtt_ms) - *min_rtt_ms = report_block_data->min_rtt_ms(); + if (min_rtt_ms) { + *min_rtt_ms = it->second.min_rtt().ms(); + } - if (max_rtt_ms) - *max_rtt_ms = report_block_data->max_rtt_ms(); + if (max_rtt_ms) { + *max_rtt_ms = it->second.max_rtt().ms(); + } return 0; } @@ -243,26 +326,14 @@ absl::optional RTCPReceiver::OnPeriodicRttUpdate( // amount of time. MutexLock lock(&rtcp_receiver_lock_); if (last_received_rb_.IsInfinite() || last_received_rb_ > newer_than) { - // Stow away the report block for the main ssrc. We'll use the associated - // data map to look up each sender and check the last_rtt_ms(). - auto main_report_it = received_report_blocks_.find(main_ssrc_); - if (main_report_it != received_report_blocks_.end()) { - const ReportBlockDataMap& main_data_map = main_report_it->second; - int64_t max_rtt = 0; - for (const auto& reports_per_receiver : received_report_blocks_) { - for (const auto& report : reports_per_receiver.second) { - const RTCPReportBlock& block = report.second.report_block(); - auto it_info = main_data_map.find(block.sender_ssrc); - if (it_info != main_data_map.end()) { - const ReportBlockData* report_block_data = &it_info->second; - if (report_block_data->num_rtts() > 0) { - max_rtt = std::max(report_block_data->last_rtt_ms(), max_rtt); - } - } - } + TimeDelta max_rtt = TimeDelta::MinusInfinity(); + for (const auto& rtt_stats : rtts_) { + if (rtt_stats.second.last_rtt() > max_rtt) { + max_rtt = rtt_stats.second.last_rtt(); } - if (max_rtt) - rtt.emplace(TimeDelta::Millis(max_rtt)); + } + if (max_rtt.IsFinite()) { + rtt = max_rtt; } } @@ -332,8 +403,7 @@ RTCPReceiver::ConsumeReceivedXrReferenceTimeInfo() { std::vector last_xr_rtis; last_xr_rtis.reserve(last_xr_rtis_size); - const uint32_t now_ntp = - CompactNtp(TimeMicrosToNtp(clock_->TimeInMicroseconds())); + const uint32_t now_ntp = CompactNtp(clock_->CurrentNtpTime()); for (size_t i = 0; i < last_xr_rtis_size; ++i) { RrtrInformation& rrtr = received_rrtrs_.front(); @@ -349,9 +419,9 @@ RTCPReceiver::ConsumeReceivedXrReferenceTimeInfo() { std::vector RTCPReceiver::GetLatestReportBlockData() const { std::vector result; MutexLock lock(&rtcp_receiver_lock_); - for (const auto& reports_per_receiver : received_report_blocks_) - for (const auto& report : reports_per_receiver.second) - result.push_back(report.second); + for (const auto& report : received_report_blocks_) { + result.push_back(report.second); + } return result; } @@ -481,7 +551,7 @@ void RTCPReceiver::HandleSenderReport(const CommonHeader& rtcp_block, remote_sender_ntp_time_ = sender_report.ntp(); remote_sender_rtp_time_ = sender_report.rtp_timestamp(); - last_received_sr_ntp_ = TimeMicrosToNtp(clock_->TimeInMicroseconds()); + last_received_sr_ntp_ = clock_->CurrentNtpTime(); remote_sender_packet_count_ = sender_report.sender_packet_count(); remote_sender_octet_count_ = sender_report.sender_octet_count(); remote_sender_reports_count_++; @@ -534,7 +604,7 @@ void RTCPReceiver::HandleReportBlock(const ReportBlock& report_block, last_received_rb_ = clock_->CurrentTime(); ReportBlockData* report_block_data = - &received_report_blocks_[report_block.source_ssrc()][remote_ssrc]; + &received_report_blocks_[report_block.source_ssrc()]; RTCPReportBlock rtcp_report_block; rtcp_report_block.sender_ssrc = remote_ssrc; rtcp_report_block.source_ssrc = report_block.source_ssrc(); @@ -571,13 +641,16 @@ void RTCPReceiver::HandleReportBlock(const ReportBlock& report_block, uint32_t delay_ntp = report_block.delay_since_last_sr(); // Local NTP time. uint32_t receive_time_ntp = - CompactNtp(TimeMicrosToNtp(last_received_rb_.us())); + CompactNtp(clock_->ConvertTimestampToNtpTime(last_received_rb_)); // RTT in 1/(2^16) seconds. uint32_t rtt_ntp = receive_time_ntp - delay_ntp - send_time_ntp; // Convert to 1/1000 seconds (milliseconds). rtt_ms = CompactNtpRttToMs(rtt_ntp); report_block_data->AddRoundTripTimeSample(rtt_ms); + if (report_block.source_ssrc() == main_ssrc_) { + rtts_[remote_ssrc].AddRtt(TimeDelta::Millis(rtt_ms)); + } packet_information->rtt_ms = rtt_ms; } @@ -735,8 +808,10 @@ void RTCPReceiver::HandleBye(const CommonHeader& rtcp_block) { } // Clear our lists. - for (auto& reports_per_receiver : received_report_blocks_) - reports_per_receiver.second.erase(bye.sender_ssrc()); + rtts_.erase(bye.sender_ssrc()); + EraseIf(received_report_blocks_, [&](const auto& elem) { + return elem.second.report_block().sender_ssrc == bye.sender_ssrc(); + }); TmmbrInformation* tmmbr_info = GetTmmbrInformation(bye.sender_ssrc()); if (tmmbr_info) @@ -774,8 +849,7 @@ void RTCPReceiver::HandleXr(const CommonHeader& rtcp_block, void RTCPReceiver::HandleXrReceiveReferenceTime(uint32_t sender_ssrc, const rtcp::Rrtr& rrtr) { uint32_t received_remote_mid_ntp_time = CompactNtp(rrtr.ntp()); - uint32_t local_receive_mid_ntp_time = - CompactNtp(TimeMicrosToNtp(clock_->TimeInMicroseconds())); + uint32_t local_receive_mid_ntp_time = CompactNtp(clock_->CurrentNtpTime()); auto it = received_rrtrs_ssrc_it_.find(sender_ssrc); if (it != received_rrtrs_ssrc_it_.end()) { @@ -809,7 +883,7 @@ void RTCPReceiver::HandleXrDlrrReportBlock(const rtcp::ReceiveTimeInfo& rti) { return; uint32_t delay_ntp = rti.delay_since_last_rr; - uint32_t now_ntp = CompactNtp(TimeMicrosToNtp(clock_->TimeInMicroseconds())); + uint32_t now_ntp = CompactNtp(clock_->CurrentNtpTime()); uint32_t rtt_ntp = now_ntp - delay_ntp - send_time_ntp; xr_rr_rtt_ms_ = CompactNtpRttToMs(rtt_ntp); diff --git a/modules/rtp_rtcp/source/rtcp_receiver.h b/modules/rtp_rtcp/source/rtcp_receiver.h index 429df55d49..fa9f367c9e 100644 --- a/modules/rtp_rtcp/source/rtcp_receiver.h +++ b/modules/rtp_rtcp/source/rtcp_receiver.h @@ -13,12 +13,11 @@ #include #include -#include #include -#include #include #include "api/array_view.h" +#include "api/sequence_checker.h" #include "modules/rtp_rtcp/include/report_block_data.h" #include "modules/rtp_rtcp/include/rtcp_statistics.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" @@ -26,12 +25,17 @@ #include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h" #include "modules/rtp_rtcp/source/rtcp_packet/tmmb_item.h" #include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" +#include "rtc_base/containers/flat_map.h" #include "rtc_base/synchronization/mutex.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/thread_annotations.h" #include "system_wrappers/include/ntp_time.h" namespace webrtc { + +class ModuleRtpRtcpImpl2; class VideoBitrateAllocationObserver; + namespace rtcp { class CommonHeader; class ReportBlock; @@ -57,6 +61,10 @@ class RTCPReceiver final { RTCPReceiver(const RtpRtcpInterface::Configuration& config, ModuleRtpRtcp* owner); + + RTCPReceiver(const RtpRtcpInterface::Configuration& config, + ModuleRtpRtcpImpl2* owner); + ~RTCPReceiver(); void IncomingPacket(const uint8_t* packet, size_t packet_size) { @@ -66,9 +74,14 @@ class RTCPReceiver final { int64_t LastReceivedReportBlockMs() const; + void set_local_media_ssrc(uint32_t ssrc); + uint32_t local_media_ssrc() const; + void SetRemoteSSRC(uint32_t ssrc); uint32_t RemoteSSRC() const; + bool receiver_only() const { return receiver_only_; } + // Get received NTP. // The types for the arguments below derive from the specification: // - `remote_sender_packet_count`: `RTCSentRtpStreamStats.packetsSent` [1] @@ -103,9 +116,8 @@ class RTCPReceiver final { bool sending); // A snapshot of Report Blocks with additional data of interest to statistics. - // Within this list, the sender-source SSRC pair is unique and per-pair the - // ReportBlockData represents the latest Report Block that was received for - // that pair. + // Within this list, the source SSRC is unique and ReportBlockData represents + // the latest Report Block that was received for that SSRC. std::vector GetLatestReportBlockData() const; // Returns true if we haven't received an RTCP RR for several RTCP @@ -126,21 +138,48 @@ class RTCPReceiver final { void NotifyTmmbrUpdated(); private: +#if RTC_DCHECK_IS_ON + class CustomSequenceChecker : public SequenceChecker { + public: + explicit CustomSequenceChecker(bool disable_checks) + : disable_checks_(disable_checks) {} + bool IsCurrent() const { + if (disable_checks_) + return true; + return SequenceChecker::IsCurrent(); + } + + private: + const bool disable_checks_; + }; +#else + class CustomSequenceChecker : public SequenceChecker { + public: + explicit CustomSequenceChecker(bool) {} + }; +#endif + // A lightweight inlined set of local SSRCs. class RegisteredSsrcs { public: + static constexpr size_t kMediaSsrcIndex = 0; static constexpr size_t kMaxSsrcs = 3; // Initializes the set of registered local SSRCS by extracting them from the - // provided `config`. - explicit RegisteredSsrcs(const RtpRtcpInterface::Configuration& config); + // provided `config`. The `disable_sequence_checker` flag is a workaround + // to be able to use a sequence checker without breaking downstream + // code that currently doesn't follow the same threading rules as webrtc. + RegisteredSsrcs(bool disable_sequence_checker, + const RtpRtcpInterface::Configuration& config); // Indicates if `ssrc` is in the set of registered local SSRCs. - bool contains(uint32_t ssrc) const { - return absl::c_linear_search(ssrcs_, ssrc); - } + bool contains(uint32_t ssrc) const; + uint32_t media_ssrc() const; + void set_media_ssrc(uint32_t ssrc); private: - absl::InlinedVector ssrcs_; + RTC_NO_UNIQUE_ADDRESS CustomSequenceChecker packet_sequence_checker_; + absl::InlinedVector ssrcs_ + RTC_GUARDED_BY(packet_sequence_checker_); }; struct PacketInformation; @@ -184,14 +223,26 @@ class RTCPReceiver final { uint8_t sequence_number; }; - // TODO(boivie): `ReportBlockDataMap` and `ReportBlockMap` should be converted - // to std::unordered_map, but as there are too many tests that assume a - // specific order, it's not easily done. + class RttStats { + public: + RttStats() = default; + RttStats(const RttStats&) = default; + RttStats& operator=(const RttStats&) = default; + + void AddRtt(TimeDelta rtt); - // RTCP report blocks mapped by remote SSRC. - using ReportBlockDataMap = std::map; - // RTCP report blocks map mapped by source SSRC. - using ReportBlockMap = std::map; + TimeDelta last_rtt() const { return last_rtt_; } + TimeDelta min_rtt() const { return min_rtt_; } + TimeDelta max_rtt() const { return max_rtt_; } + TimeDelta average_rtt() const { return sum_rtt_ / num_rtts_; } + + private: + TimeDelta last_rtt_ = TimeDelta::Zero(); + TimeDelta min_rtt_ = TimeDelta::PlusInfinity(); + TimeDelta max_rtt_ = TimeDelta::MinusInfinity(); + TimeDelta sum_rtt_ = TimeDelta::Zero(); + size_t num_rtts_ = 0; + }; bool ParseCompoundPacket(rtc::ArrayView packet, PacketInformation* packet_information); @@ -290,7 +341,7 @@ class RTCPReceiver final { ModuleRtpRtcp* const rtp_rtcp_; const uint32_t main_ssrc_; // The set of registered local SSRCs. - const RegisteredSsrcs registered_ssrcs_; + RegisteredSsrcs registered_ssrcs_; RtcpBandwidthObserver* const rtcp_bandwidth_observer_; RtcpIntraFrameObserver* const rtcp_intra_frame_observer_; @@ -316,7 +367,7 @@ class RTCPReceiver final { std::list received_rrtrs_ RTC_GUARDED_BY(rtcp_receiver_lock_); // Received RRTR information mapped by remote ssrc. - std::unordered_map::iterator> + flat_map::iterator> received_rrtrs_ssrc_it_ RTC_GUARDED_BY(rtcp_receiver_lock_); // Estimated rtt, zero when there is no valid estimate. @@ -325,11 +376,16 @@ class RTCPReceiver final { int64_t oldest_tmmbr_info_ms_ RTC_GUARDED_BY(rtcp_receiver_lock_); // Mapped by remote ssrc. - std::unordered_map tmmbr_infos_ + flat_map tmmbr_infos_ RTC_GUARDED_BY(rtcp_receiver_lock_); - ReportBlockMap received_report_blocks_ RTC_GUARDED_BY(rtcp_receiver_lock_); - std::unordered_map last_fir_ + // Round-Trip Time per remote sender ssrc. + flat_map rtts_ RTC_GUARDED_BY(rtcp_receiver_lock_); + + // Report blocks per local source ssrc. + flat_map received_report_blocks_ + RTC_GUARDED_BY(rtcp_receiver_lock_); + flat_map last_fir_ RTC_GUARDED_BY(rtcp_receiver_lock_); // The last time we received an RTCP Report block for this module. diff --git a/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc b/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc index c71bbcc9a8..3065534108 100644 --- a/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc +++ b/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc @@ -250,8 +250,7 @@ TEST(RtcpReceiverTest, InjectSrPacketCalculatesRTT) { int64_t rtt_ms = 0; EXPECT_EQ(-1, receiver.RTT(kSenderSsrc, &rtt_ms, nullptr, nullptr, nullptr)); - uint32_t sent_ntp = - CompactNtp(TimeMicrosToNtp(mocks.clock.TimeInMicroseconds())); + uint32_t sent_ntp = CompactNtp(mocks.clock.CurrentNtpTime()); mocks.clock.AdvanceTimeMilliseconds(kRttMs + kDelayMs); rtcp::SenderReport sr; @@ -282,8 +281,7 @@ TEST(RtcpReceiverTest, InjectSrPacketCalculatesNegativeRTTAsOne) { int64_t rtt_ms = 0; EXPECT_EQ(-1, receiver.RTT(kSenderSsrc, &rtt_ms, nullptr, nullptr, nullptr)); - uint32_t sent_ntp = - CompactNtp(TimeMicrosToNtp(mocks.clock.TimeInMicroseconds())); + uint32_t sent_ntp = CompactNtp(mocks.clock.CurrentNtpTime()); mocks.clock.AdvanceTimeMilliseconds(kRttMs + kDelayMs); rtcp::SenderReport sr; @@ -313,8 +311,7 @@ TEST(RtcpReceiverTest, const uint32_t kDelayNtp = 123000; const int64_t kDelayMs = CompactNtpRttToMs(kDelayNtp); - uint32_t sent_ntp = - CompactNtp(TimeMicrosToNtp(mocks.clock.TimeInMicroseconds())); + uint32_t sent_ntp = CompactNtp(mocks.clock.CurrentNtpTime()); mocks.clock.AdvanceTimeMilliseconds(kRttMs + kDelayMs); rtcp::SenderReport sr; @@ -500,7 +497,8 @@ TEST(RtcpReceiverTest, InjectRrPacketWithTwoReportBlocks) { kSequenceNumbers[1]))))); } -TEST(RtcpReceiverTest, InjectRrPacketsFromTwoRemoteSsrcs) { +TEST(RtcpReceiverTest, + InjectRrPacketsFromTwoRemoteSsrcsReturnsLatestReportBlock) { const uint32_t kSenderSsrc2 = 0x20304; const uint16_t kSequenceNumbers[] = {10, 12423}; const int32_t kCumLost[] = {13, 555}; @@ -555,14 +553,6 @@ TEST(RtcpReceiverTest, InjectRrPacketsFromTwoRemoteSsrcs) { EXPECT_THAT( receiver.GetLatestReportBlockData(), UnorderedElementsAre( - Property( - &ReportBlockData::report_block, - AllOf(Field(&RTCPReportBlock::source_ssrc, kReceiverMainSsrc), - Field(&RTCPReportBlock::sender_ssrc, kSenderSsrc), - Field(&RTCPReportBlock::fraction_lost, kFracLost[0]), - Field(&RTCPReportBlock::packets_lost, kCumLost[0]), - Field(&RTCPReportBlock::extended_highest_sequence_number, - kSequenceNumbers[0]))), Property( &ReportBlockData::report_block, AllOf(Field(&RTCPReportBlock::source_ssrc, kReceiverMainSsrc), @@ -831,8 +821,7 @@ TEST(RtcpReceiverTest, InjectExtendedReportsDlrrPacketWithSubBlock) { receiver.IncomingPacket(xr.Build()); - uint32_t compact_ntp_now = - CompactNtp(TimeMicrosToNtp(mocks.clock.TimeInMicroseconds())); + uint32_t compact_ntp_now = CompactNtp(mocks.clock.CurrentNtpTime()); EXPECT_TRUE(receiver.GetAndResetXrRrRtt(&rtt_ms)); uint32_t rtt_ntp = compact_ntp_now - kDelay - kLastRR; EXPECT_NEAR(CompactNtpRttToMs(rtt_ntp), rtt_ms, 1); @@ -856,8 +845,7 @@ TEST(RtcpReceiverTest, InjectExtendedReportsDlrrPacketWithMultipleSubBlocks) { receiver.IncomingPacket(xr.Build()); - uint32_t compact_ntp_now = - CompactNtp(TimeMicrosToNtp(mocks.clock.TimeInMicroseconds())); + uint32_t compact_ntp_now = CompactNtp(mocks.clock.CurrentNtpTime()); int64_t rtt_ms = 0; EXPECT_TRUE(receiver.GetAndResetXrRrRtt(&rtt_ms)); uint32_t rtt_ntp = compact_ntp_now - kDelay - kLastRR; @@ -936,7 +924,7 @@ TEST(RtcpReceiverTest, RttCalculatedAfterExtendedReportsDlrr) { const int64_t kRttMs = rand.Rand(1, 9 * 3600 * 1000); const uint32_t kDelayNtp = rand.Rand(0, 0x7fffffff); const int64_t kDelayMs = CompactNtpRttToMs(kDelayNtp); - NtpTime now = TimeMicrosToNtp(mocks.clock.TimeInMicroseconds()); + NtpTime now = mocks.clock.CurrentNtpTime(); uint32_t sent_ntp = CompactNtp(now); mocks.clock.AdvanceTimeMilliseconds(kRttMs + kDelayMs); @@ -962,7 +950,7 @@ TEST(RtcpReceiverTest, XrDlrrCalculatesNegativeRttAsOne) { const int64_t kRttMs = rand.Rand(-3600 * 1000, -1); const uint32_t kDelayNtp = rand.Rand(0, 0x7fffffff); const int64_t kDelayMs = CompactNtpRttToMs(kDelayNtp); - NtpTime now = TimeMicrosToNtp(mocks.clock.TimeInMicroseconds()); + NtpTime now = mocks.clock.CurrentNtpTime(); uint32_t sent_ntp = CompactNtp(now); mocks.clock.AdvanceTimeMilliseconds(kRttMs + kDelayMs); @@ -1334,8 +1322,7 @@ TEST(RtcpReceiverTest, VerifyRttObtainedFromReportBlockDataObserver) { const uint32_t kDelayNtp = 123000; const int64_t kDelayMs = CompactNtpRttToMs(kDelayNtp); - uint32_t sent_ntp = - CompactNtp(TimeMicrosToNtp(mocks.clock.TimeInMicroseconds())); + uint32_t sent_ntp = CompactNtp(mocks.clock.CurrentNtpTime()); mocks.clock.AdvanceTimeMilliseconds(kRttMs + kDelayMs); rtcp::SenderReport sr; diff --git a/modules/rtp_rtcp/source/rtcp_sender.cc b/modules/rtp_rtcp/source/rtcp_sender.cc index ba63fd036f..8f5e3b104c 100644 --- a/modules/rtp_rtcp/source/rtcp_sender.cc +++ b/modules/rtp_rtcp/source/rtcp_sender.cc @@ -16,7 +16,11 @@ #include #include +#include "absl/types/optional.h" #include "api/rtc_event_log/rtc_event_log.h" +#include "api/rtp_headers.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" #include "logging/rtc_event_log/events/rtc_event_rtcp_packet_outgoing.h" #include "modules/rtp_rtcp/source/rtcp_packet/app.h" #include "modules/rtp_rtcp/source/rtcp_packet/bye.h" @@ -34,6 +38,7 @@ #include "modules/rtp_rtcp/source/rtcp_packet/tmmbr.h" #include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h" #include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" #include "modules/rtp_rtcp/source/time_util.h" #include "modules/rtp_rtcp/source/tmmbr_help.h" #include "rtc_base/checks.h" @@ -49,7 +54,6 @@ const uint32_t kRtcpAnyExtendedReports = kRtcpXrReceiverReferenceTime | kRtcpXrTargetBitrate; constexpr int32_t kDefaultVideoReportInterval = 1000; constexpr int32_t kDefaultAudioReportInterval = 5000; - } // namespace // Helper to put several RTCP packets into lower layer datagram RTCP packet. @@ -103,19 +107,38 @@ class RTCPSender::RtcpContext { RtcpContext(const FeedbackState& feedback_state, int32_t nack_size, const uint16_t* nack_list, - int64_t now_us) + Timestamp now) : feedback_state_(feedback_state), nack_size_(nack_size), nack_list_(nack_list), - now_us_(now_us) {} + now_(now) {} const FeedbackState& feedback_state_; const int32_t nack_size_; const uint16_t* nack_list_; - const int64_t now_us_; + const Timestamp now_; }; -RTCPSender::RTCPSender(const RtpRtcpInterface::Configuration& config) +RTCPSender::Configuration RTCPSender::Configuration::FromRtpRtcpConfiguration( + const RtpRtcpInterface::Configuration& configuration) { + RTCPSender::Configuration result; + result.audio = configuration.audio; + result.local_media_ssrc = configuration.local_media_ssrc; + result.clock = configuration.clock; + result.outgoing_transport = configuration.outgoing_transport; + result.non_sender_rtt_measurement = configuration.non_sender_rtt_measurement; + result.event_log = configuration.event_log; + if (configuration.rtcp_report_interval_ms) { + result.rtcp_report_interval = + TimeDelta::Millis(configuration.rtcp_report_interval_ms); + } + result.receive_statistics = configuration.receive_statistics; + result.rtcp_packet_type_counter_observer = + configuration.rtcp_packet_type_counter_observer; + return result; +} + +RTCPSender::RTCPSender(Configuration config) : audio_(config.audio), ssrc_(config.local_media_ssrc), clock_(config.clock), @@ -123,15 +146,14 @@ RTCPSender::RTCPSender(const RtpRtcpInterface::Configuration& config) method_(RtcpMode::kOff), event_log_(config.event_log), transport_(config.outgoing_transport), - report_interval_ms_(config.rtcp_report_interval_ms > 0 - ? config.rtcp_report_interval_ms - : (config.audio ? kDefaultAudioReportInterval - : kDefaultVideoReportInterval)), + report_interval_(config.rtcp_report_interval.value_or( + TimeDelta::Millis(config.audio ? kDefaultAudioReportInterval + : kDefaultVideoReportInterval))), + schedule_next_rtcp_send_evaluation_function_( + std::move(config.schedule_next_rtcp_send_evaluation_function)), sending_(false), - next_time_to_send_rtcp_(0), timestamp_offset_(0), last_rtp_timestamp_(0), - last_frame_capture_time_ms_(-1), remote_ssrc_(0), receive_statistics_(config.receive_statistics), @@ -174,10 +196,11 @@ RtcpMode RTCPSender::Status() const { void RTCPSender::SetRTCPStatus(RtcpMode new_method) { MutexLock lock(&mutex_rtcp_sender_); - if (method_ == RtcpMode::kOff && new_method != RtcpMode::kOff) { + if (new_method == RtcpMode::kOff) { + next_time_to_send_rtcp_ = absl::nullopt; + } else if (method_ == RtcpMode::kOff) { // When switching on, reschedule the next packet - next_time_to_send_rtcp_ = - clock_->TimeInMilliseconds() + (report_interval_ms_ / 2); + SetNextRtcpSendEvaluationDuration(report_interval_ / 2); } method_ = new_method; } @@ -260,7 +283,7 @@ void RTCPSender::SetRemb(int64_t bitrate_bps, std::vector ssrcs) { SetFlag(kRtcpRemb, /*is_volatile=*/false); // Send a REMB immediately if we have a new REMB. The frequency of REMBs is // throttled by the caller. - next_time_to_send_rtcp_ = clock_->TimeInMilliseconds(); + SetNextRtcpSendEvaluationDuration(TimeDelta::Zero()); } void RTCPSender::UnsetRemb() { @@ -285,20 +308,20 @@ void RTCPSender::SetTimestampOffset(uint32_t timestamp_offset) { } void RTCPSender::SetLastRtpTime(uint32_t rtp_timestamp, - int64_t capture_time_ms, - int8_t payload_type) { + absl::optional capture_time, + absl::optional payload_type) { MutexLock lock(&mutex_rtcp_sender_); // For compatibility with clients who don't set payload type correctly on all // calls. - if (payload_type != -1) { - last_payload_type_ = payload_type; + if (payload_type.has_value()) { + last_payload_type_ = *payload_type; } last_rtp_timestamp_ = rtp_timestamp; - if (capture_time_ms <= 0) { + if (!capture_time.has_value()) { // We don't currently get a capture time from VoiceEngine. - last_frame_capture_time_ms_ = clock_->TimeInMilliseconds(); + last_frame_capture_time_ = clock_->CurrentTime(); } else { - last_frame_capture_time_ms_ = capture_time_ms; + last_frame_capture_time_ = *capture_time; } } @@ -307,6 +330,16 @@ void RTCPSender::SetRtpClockRate(int8_t payload_type, int rtp_clock_rate_hz) { rtp_clock_rates_khz_[payload_type] = rtp_clock_rate_hz / 1000; } +uint32_t RTCPSender::SSRC() const { + MutexLock lock(&mutex_rtcp_sender_); + return ssrc_; +} + +void RTCPSender::SetSsrc(uint32_t ssrc) { + MutexLock lock(&mutex_rtcp_sender_); + ssrc_ = ssrc; +} + void RTCPSender::SetRemoteSSRC(uint32_t ssrc) { MutexLock lock(&mutex_rtcp_sender_); remote_ssrc_ = ssrc; @@ -381,25 +414,27 @@ bool RTCPSender::TimeToSendRTCPReport(bool sendKeyframeBeforeRTP) const { a value of the RTCP bandwidth below the intended average */ - int64_t now = clock_->TimeInMilliseconds(); + Timestamp now = clock_->CurrentTime(); MutexLock lock(&mutex_rtcp_sender_); - + RTC_DCHECK( + (method_ == RtcpMode::kOff && !next_time_to_send_rtcp_.has_value()) || + (method_ != RtcpMode::kOff && next_time_to_send_rtcp_.has_value())); if (method_ == RtcpMode::kOff) return false; if (!audio_ && sendKeyframeBeforeRTP) { // for video key-frames we want to send the RTCP before the large key-frame // if we have a 100 ms margin - now += RTCP_SEND_BEFORE_KEY_FRAME_MS; + now += RTCP_SEND_BEFORE_KEY_FRAME; } - return now >= next_time_to_send_rtcp_; + return now >= *next_time_to_send_rtcp_; } void RTCPSender::BuildSR(const RtcpContext& ctx, PacketSender& sender) { // Timestamp shouldn't be estimated before first media frame. - RTC_DCHECK_GE(last_frame_capture_time_ms_, 0); + RTC_DCHECK(last_frame_capture_time_.has_value()); // The timestamp of this RTCP packet should be estimated as the timestamp of // the frame being captured at this moment. We are calculating that // timestamp as the last frame's timestamp + the time since the last frame @@ -414,11 +449,12 @@ void RTCPSender::BuildSR(const RtcpContext& ctx, PacketSender& sender) { // when converted to milliseconds, uint32_t rtp_timestamp = timestamp_offset_ + last_rtp_timestamp_ + - ((ctx.now_us_ + 500) / 1000 - last_frame_capture_time_ms_) * rtp_rate; + ((ctx.now_.us() + 500) / 1000 - last_frame_capture_time_->ms()) * + rtp_rate; rtcp::SenderReport report; report.SetSenderSsrc(ssrc_); - report.SetNtp(TimeMicrosToNtp(ctx.now_us_)); + report.SetNtp(clock_->ConvertTimestampToNtpTime(ctx.now_)); report.SetRtpTimestamp(rtp_timestamp); report.SetPacketCount(ctx.feedback_state_.packets_sent); report.SetOctetCount(ctx.feedback_state_.media_bytes_sent); @@ -584,7 +620,7 @@ void RTCPSender::BuildExtendedReports(const RtcpContext& ctx, if (!sending_ && xr_send_receiver_reference_time_enabled_) { rtcp::Rrtr rrtr; - rrtr.SetNtp(TimeMicrosToNtp(ctx.now_us_)); + rrtr.SetNtp(clock_->ConvertTimestampToNtpTime(ctx.now_)); xr.SetRrtr(rrtr); } @@ -653,7 +689,7 @@ absl::optional RTCPSender::ComputeCompoundRTCPPacket( SetFlag(packet_type, true); // Prevent sending streams to send SR before any media has been sent. - const bool can_calculate_rtp_timestamp = (last_frame_capture_time_ms_ >= 0); + const bool can_calculate_rtp_timestamp = last_frame_capture_time_.has_value(); if (!can_calculate_rtp_timestamp) { bool consumed_sr_flag = ConsumeFlag(kRtcpSr); bool consumed_report_flag = sending_ && ConsumeFlag(kRtcpReport); @@ -673,7 +709,7 @@ absl::optional RTCPSender::ComputeCompoundRTCPPacket( // We need to send our NTP even if we haven't received any reports. RtcpContext context(feedback_state, nack_size, nack_list, - clock_->TimeInMicroseconds()); + clock_->CurrentTime()); PrepareReport(feedback_state); @@ -744,24 +780,25 @@ void RTCPSender::PrepareReport(const FeedbackState& feedback_state) { } // generate next time to send an RTCP report - int min_interval_ms = report_interval_ms_; + TimeDelta min_interval = report_interval_; if (!audio_ && sending_) { // Calculate bandwidth for video; 360 / send bandwidth in kbit/s. int send_bitrate_kbit = feedback_state.send_bitrate / 1000; if (send_bitrate_kbit != 0) { - min_interval_ms = 360000 / send_bitrate_kbit; - min_interval_ms = std::min(min_interval_ms, report_interval_ms_); + min_interval = std::min(TimeDelta::Millis(360000 / send_bitrate_kbit), + report_interval_); } } // The interval between RTCP packets is varied randomly over the // range [1/2,3/2] times the calculated interval. - int time_to_next = - random_.Rand(min_interval_ms * 1 / 2, min_interval_ms * 3 / 2); + int min_interval_int = rtc::dchecked_cast(min_interval.ms()); + TimeDelta time_to_next = TimeDelta::Millis( + random_.Rand(min_interval_int * 1 / 2, min_interval_int * 3 / 2)); - RTC_DCHECK_GT(time_to_next, 0); - next_time_to_send_rtcp_ = clock_->TimeInMilliseconds() + time_to_next; + RTC_DCHECK(!time_to_next.IsZero()); + SetNextRtcpSendEvaluationDuration(time_to_next); // RtcpSender expected to be used for sending either just sender reports // or just receiver reports. @@ -783,7 +820,7 @@ std::vector RTCPSender::CreateReportBlocks( if (!result.empty() && ((feedback_state.last_rr_ntp_secs != 0) || (feedback_state.last_rr_ntp_frac != 0))) { // Get our NTP as late as possible to avoid a race. - uint32_t now = CompactNtp(TimeMicrosToNtp(clock_->TimeInMicroseconds())); + uint32_t now = CompactNtp(clock_->CurrentNtpTime()); uint32_t receive_time = feedback_state.last_rr_ntp_secs & 0x0000FFFF; receive_time <<= 16; @@ -855,7 +892,7 @@ void RTCPSender::SetVideoBitrateAllocation( RTC_LOG(LS_INFO) << "Emitting TargetBitrate XR for SSRC " << ssrc_ << " with new layers enabled/disabled: " << video_bitrate_allocation_.ToString(); - next_time_to_send_rtcp_ = clock_->TimeInMilliseconds(); + SetNextRtcpSendEvaluationDuration(TimeDelta::Zero()); } else { video_bitrate_allocation_ = bitrate; } @@ -916,4 +953,12 @@ void RTCPSender::SendCombinedRtcpPacket( sender.Send(); } +void RTCPSender::SetNextRtcpSendEvaluationDuration(TimeDelta duration) { + next_time_to_send_rtcp_ = clock_->CurrentTime() + duration; + // TODO(bugs.webrtc.org/11581): make unconditional once downstream consumers + // are using the callback method. + if (schedule_next_rtcp_send_evaluation_function_) + schedule_next_rtcp_send_evaluation_function_(duration); +} + } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtcp_sender.h b/modules/rtp_rtcp/source/rtcp_sender.h index aab2c9051f..2d1c7da0fc 100644 --- a/modules/rtp_rtcp/source/rtcp_sender.h +++ b/modules/rtp_rtcp/source/rtcp_sender.h @@ -19,6 +19,8 @@ #include "absl/types/optional.h" #include "api/call/transport.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" #include "api/video/video_bitrate_allocation.h" #include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h" #include "modules/rtp_rtcp/include/receive_statistics.h" @@ -42,6 +44,43 @@ class RtcEventLog; class RTCPSender final { public: + struct Configuration { + // TODO(bugs.webrtc.org/11581): Remove this temporary conversion utility + // once rtc_rtcp_impl.cc/h are gone. + static Configuration FromRtpRtcpConfiguration( + const RtpRtcpInterface::Configuration& config); + + // True for a audio version of the RTP/RTCP module object false will create + // a video version. + bool audio = false; + // SSRCs for media and retransmission, respectively. + // FlexFec SSRC is fetched from |flexfec_sender|. + uint32_t local_media_ssrc = 0; + // The clock to use to read time. If nullptr then system clock will be used. + Clock* clock = nullptr; + // Transport object that will be called when packets are ready to be sent + // out on the network. + Transport* outgoing_transport = nullptr; + // Estimate RTT as non-sender as described in + // https://tools.ietf.org/html/rfc3611#section-4.4 and #section-4.5 + bool non_sender_rtt_measurement = false; + // Optional callback which, if specified, is used by RTCPSender to schedule + // the next time to evaluate if RTCP should be sent by means of + // TimeToSendRTCPReport/SendRTCP. + // The RTCPSender client still needs to call TimeToSendRTCPReport/SendRTCP + // to actually get RTCP sent. + // + // Note: It's recommended to use the callback to ensure program design that + // doesn't use polling. + // TODO(bugs.webrtc.org/11581): Make mandatory once downstream consumers + // have migrated to the callback solution. + std::function schedule_next_rtcp_send_evaluation_function; + + RtcEventLog* event_log = nullptr; + absl::optional rtcp_report_interval; + ReceiveStatisticsProvider* receive_statistics = nullptr; + RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer = nullptr; + }; struct FeedbackState { FeedbackState(); FeedbackState(const FeedbackState&); @@ -63,7 +102,7 @@ class RTCPSender final { RTCPReceiver* receiver; }; - explicit RTCPSender(const RtpRtcpInterface::Configuration& config); + explicit RTCPSender(Configuration config); RTCPSender() = delete; RTCPSender(const RTCPSender&) = delete; @@ -84,17 +123,16 @@ class RTCPSender final { void SetTimestampOffset(uint32_t timestamp_offset) RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); - // TODO(bugs.webrtc.org/6458): Remove default parameter value when all the - // depending projects are updated to correctly set payload type. void SetLastRtpTime(uint32_t rtp_timestamp, - int64_t capture_time_ms, - int8_t payload_type = -1) + absl::optional capture_time, + absl::optional payload_type) RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); void SetRtpClockRate(int8_t payload_type, int rtp_clock_rate_hz) RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); - uint32_t SSRC() const { return ssrc_; } + uint32_t SSRC() const; + void SetSsrc(uint32_t ssrc); void SetRemoteSSRC(uint32_t ssrc) RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); @@ -186,8 +224,16 @@ class RTCPSender final { void BuildNACK(const RtcpContext& context, PacketSender& sender) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); + // |duration| being TimeDelta::Zero() means schedule immediately. + void SetNextRtcpSendEvaluationDuration(TimeDelta duration) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); + const bool audio_; - const uint32_t ssrc_; + // TODO(bugs.webrtc.org/11581): `mutex_rtcp_sender_` shouldn't be required if + // we consistently run network related operations on the network thread. + // This is currently not possible due to callbacks from the process thread in + // ModuleRtpRtcpImpl2. + uint32_t ssrc_ RTC_GUARDED_BY(mutex_rtcp_sender_); Clock* const clock_; Random random_ RTC_GUARDED_BY(mutex_rtcp_sender_); RtcpMode method_ RTC_GUARDED_BY(mutex_rtcp_sender_); @@ -195,16 +241,22 @@ class RTCPSender final { RtcEventLog* const event_log_; Transport* const transport_; - const int report_interval_ms_; + const TimeDelta report_interval_; + // Set from + // RTCPSender::Configuration::schedule_next_rtcp_send_evaluation_function. + const std::function + schedule_next_rtcp_send_evaluation_function_; mutable Mutex mutex_rtcp_sender_; bool sending_ RTC_GUARDED_BY(mutex_rtcp_sender_); - int64_t next_time_to_send_rtcp_ RTC_GUARDED_BY(mutex_rtcp_sender_); + absl::optional next_time_to_send_rtcp_ + RTC_GUARDED_BY(mutex_rtcp_sender_); uint32_t timestamp_offset_ RTC_GUARDED_BY(mutex_rtcp_sender_); uint32_t last_rtp_timestamp_ RTC_GUARDED_BY(mutex_rtcp_sender_); - int64_t last_frame_capture_time_ms_ RTC_GUARDED_BY(mutex_rtcp_sender_); + absl::optional last_frame_capture_time_ + RTC_GUARDED_BY(mutex_rtcp_sender_); // SSRC that we receive on our RTP channel uint32_t remote_ssrc_ RTC_GUARDED_BY(mutex_rtcp_sender_); std::string cname_ RTC_GUARDED_BY(mutex_rtcp_sender_); diff --git a/modules/rtp_rtcp/source/rtcp_sender_unittest.cc b/modules/rtp_rtcp/source/rtcp_sender_unittest.cc index 81eee4a5b0..347be79398 100644 --- a/modules/rtp_rtcp/source/rtcp_sender_unittest.cc +++ b/modules/rtp_rtcp/source/rtcp_sender_unittest.cc @@ -14,12 +14,12 @@ #include #include "absl/base/macros.h" +#include "api/units/time_delta.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtcp_packet/bye.h" #include "modules/rtp_rtcp/source/rtcp_packet/common_header.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" #include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" -#include "modules/rtp_rtcp/source/time_util.h" #include "rtc_base/rate_limiter.h" #include "test/gmock.h" #include "test/gtest.h" @@ -72,43 +72,50 @@ static const uint32_t kStartRtpTimestamp = 0x34567; static const uint32_t kRtpTimestamp = 0x45678; std::unique_ptr CreateRtcpSender( - const RtpRtcpInterface::Configuration& config, + const RTCPSender::Configuration& config, bool init_timestamps = true) { auto rtcp_sender = std::make_unique(config); rtcp_sender->SetRemoteSSRC(kRemoteSsrc); if (init_timestamps) { rtcp_sender->SetTimestampOffset(kStartRtpTimestamp); - rtcp_sender->SetLastRtpTime(kRtpTimestamp, - config.clock->TimeInMilliseconds(), + rtcp_sender->SetLastRtpTime(kRtpTimestamp, config.clock->CurrentTime(), /*payload_type=*/0); } return rtcp_sender; } - } // namespace class RtcpSenderTest : public ::testing::Test { protected: RtcpSenderTest() : clock_(1335900000), - receive_statistics_(ReceiveStatistics::Create(&clock_)), - retransmission_rate_limiter_(&clock_, 1000) { - RtpRtcpInterface::Configuration configuration = GetDefaultConfig(); - rtp_rtcp_impl_.reset(new ModuleRtpRtcpImpl2(configuration)); + receive_statistics_(ReceiveStatistics::Create(&clock_)) { + rtp_rtcp_impl_.reset(new ModuleRtpRtcpImpl2(GetDefaultRtpRtcpConfig())); } - RtpRtcpInterface::Configuration GetDefaultConfig() { - RtpRtcpInterface::Configuration configuration; + RTCPSender::Configuration GetDefaultConfig() { + RTCPSender::Configuration configuration; configuration.audio = false; configuration.clock = &clock_; configuration.outgoing_transport = &test_transport_; - configuration.retransmission_rate_limiter = &retransmission_rate_limiter_; - configuration.rtcp_report_interval_ms = 1000; + configuration.rtcp_report_interval = TimeDelta::Millis(1000); configuration.receive_statistics = receive_statistics_.get(); configuration.local_media_ssrc = kSenderSsrc; return configuration; } + RtpRtcpInterface::Configuration GetDefaultRtpRtcpConfig() { + RTCPSender::Configuration config = GetDefaultConfig(); + RtpRtcpInterface::Configuration result; + result.audio = config.audio; + result.clock = config.clock; + result.outgoing_transport = config.outgoing_transport; + result.rtcp_report_interval_ms = config.rtcp_report_interval->ms(); + result.receive_statistics = config.receive_statistics; + result.local_media_ssrc = config.local_media_ssrc; + return result; + } + void InsertIncomingPacket(uint32_t remote_ssrc, uint16_t seq_num) { RtpPacketReceived packet; packet.SetSsrc(remote_ssrc); @@ -128,7 +135,6 @@ class RtcpSenderTest : public ::testing::Test { TestTransport test_transport_; std::unique_ptr receive_statistics_; std::unique_ptr rtp_rtcp_impl_; - RateLimiter retransmission_rate_limiter_; }; TEST_F(RtcpSenderTest, SetRtcpStatus) { @@ -160,7 +166,7 @@ TEST_F(RtcpSenderTest, SendSr) { rtcp_sender->SetSendingStatus(feedback_state, true); feedback_state.packets_sent = kPacketCount; feedback_state.media_bytes_sent = kOctetCount; - NtpTime ntp = TimeMicrosToNtp(clock_.TimeInMicroseconds()); + NtpTime ntp = clock_.CurrentNtpTime(); EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state, kRtcpSr)); EXPECT_EQ(1, parser()->sender_report()->num_packets()); EXPECT_EQ(kSenderSsrc, parser()->sender_report()->sender_ssrc()); @@ -207,11 +213,11 @@ TEST_F(RtcpSenderTest, SendConsecutiveSrWithExactSlope) { } TEST_F(RtcpSenderTest, DoNotSendSrBeforeRtp) { - RtpRtcpInterface::Configuration config; + RTCPSender::Configuration config; config.clock = &clock_; config.receive_statistics = receive_statistics_.get(); config.outgoing_transport = &test_transport_; - config.rtcp_report_interval_ms = 1000; + config.rtcp_report_interval = TimeDelta::Millis(1000); config.local_media_ssrc = kSenderSsrc; auto rtcp_sender = CreateRtcpSender(config, /*init_timestamps=*/false); rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); @@ -228,11 +234,11 @@ TEST_F(RtcpSenderTest, DoNotSendSrBeforeRtp) { } TEST_F(RtcpSenderTest, DoNotSendCompundBeforeRtp) { - RtpRtcpInterface::Configuration config; + RTCPSender::Configuration config; config.clock = &clock_; config.receive_statistics = receive_statistics_.get(); config.outgoing_transport = &test_transport_; - config.rtcp_report_interval_ms = 1000; + config.rtcp_report_interval = TimeDelta::Millis(1000); config.local_media_ssrc = kSenderSsrc; auto rtcp_sender = CreateRtcpSender(config, /*init_timestamps=*/false); rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); @@ -511,12 +517,12 @@ TEST_F(RtcpSenderTest, SendXrWithMultipleDlrrSubBlocks) { } TEST_F(RtcpSenderTest, SendXrWithRrtr) { - RtpRtcpInterface::Configuration config = GetDefaultConfig(); + RTCPSender::Configuration config = GetDefaultConfig(); config.non_sender_rtt_measurement = true; auto rtcp_sender = CreateRtcpSender(config); rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); rtcp_sender->SetSendingStatus(feedback_state(), false); - NtpTime ntp = TimeMicrosToNtp(clock_.TimeInMicroseconds()); + NtpTime ntp = clock_.CurrentNtpTime(); EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport)); EXPECT_EQ(1, parser()->xr()->num_packets()); EXPECT_EQ(kSenderSsrc, parser()->xr()->sender_ssrc()); @@ -526,7 +532,7 @@ TEST_F(RtcpSenderTest, SendXrWithRrtr) { } TEST_F(RtcpSenderTest, TestNoXrRrtrSentIfSending) { - RtpRtcpInterface::Configuration config = GetDefaultConfig(); + RTCPSender::Configuration config = GetDefaultConfig(); config.non_sender_rtt_measurement = true; auto rtcp_sender = CreateRtcpSender(config); rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); @@ -536,7 +542,7 @@ TEST_F(RtcpSenderTest, TestNoXrRrtrSentIfSending) { } TEST_F(RtcpSenderTest, TestNoXrRrtrSentIfNotEnabled) { - RtpRtcpInterface::Configuration config = GetDefaultConfig(); + RTCPSender::Configuration config = GetDefaultConfig(); config.non_sender_rtt_measurement = false; auto rtcp_sender = CreateRtcpSender(config); rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); @@ -547,12 +553,12 @@ TEST_F(RtcpSenderTest, TestNoXrRrtrSentIfNotEnabled) { TEST_F(RtcpSenderTest, TestRegisterRtcpPacketTypeObserver) { RtcpPacketTypeCounterObserverImpl observer; - RtpRtcpInterface::Configuration config; + RTCPSender::Configuration config; config.clock = &clock_; config.receive_statistics = receive_statistics_.get(); config.outgoing_transport = &test_transport_; config.rtcp_packet_type_counter_observer = &observer; - config.rtcp_report_interval_ms = 1000; + config.rtcp_report_interval = TimeDelta::Millis(1000); auto rtcp_sender = CreateRtcpSender(config); rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpPli)); @@ -644,16 +650,16 @@ TEST_F(RtcpSenderTest, ByeMustBeLast) { })); // Re-configure rtcp_sender with mock_transport_ - RtpRtcpInterface::Configuration config; + RTCPSender::Configuration config; config.clock = &clock_; config.receive_statistics = receive_statistics_.get(); config.outgoing_transport = &mock_transport; - config.rtcp_report_interval_ms = 1000; + config.rtcp_report_interval = TimeDelta::Millis(1000); config.local_media_ssrc = kSenderSsrc; auto rtcp_sender = CreateRtcpSender(config); rtcp_sender->SetTimestampOffset(kStartRtpTimestamp); - rtcp_sender->SetLastRtpTime(kRtpTimestamp, clock_.TimeInMilliseconds(), + rtcp_sender->SetLastRtpTime(kRtpTimestamp, clock_.CurrentTime(), /*payload_type=*/0); // Set up REMB info to be included with BYE. diff --git a/modules/rtp_rtcp/source/rtcp_transceiver.cc b/modules/rtp_rtcp/source/rtcp_transceiver.cc index 1de581849b..41fa5e6206 100644 --- a/modules/rtp_rtcp/source/rtcp_transceiver.cc +++ b/modules/rtp_rtcp/source/rtcp_transceiver.cc @@ -14,6 +14,7 @@ #include #include +#include "api/units/timestamp.h" #include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h" #include "rtc_base/checks.h" #include "rtc_base/event.h" @@ -23,7 +24,8 @@ namespace webrtc { RtcpTransceiver::RtcpTransceiver(const RtcpTransceiverConfig& config) - : task_queue_(config.task_queue), + : clock_(config.clock), + task_queue_(config.task_queue), rtcp_transceiver_(std::make_unique(config)) { RTC_DCHECK(task_queue_); } @@ -82,9 +84,9 @@ void RtcpTransceiver::SetReadyToSend(bool ready) { void RtcpTransceiver::ReceivePacket(rtc::CopyOnWriteBuffer packet) { RTC_CHECK(rtcp_transceiver_); RtcpTransceiverImpl* ptr = rtcp_transceiver_.get(); - int64_t now_us = rtc::TimeMicros(); - task_queue_->PostTask(ToQueuedTask( - [ptr, packet, now_us] { ptr->ReceivePacket(packet, now_us); })); + Timestamp now = clock_->CurrentTime(); + task_queue_->PostTask( + ToQueuedTask([ptr, packet, now] { ptr->ReceivePacket(packet, now); })); } void RtcpTransceiver::SendCompoundPacket() { diff --git a/modules/rtp_rtcp/source/rtcp_transceiver.h b/modules/rtp_rtcp/source/rtcp_transceiver.h index 2d1f37cd44..52f4610716 100644 --- a/modules/rtp_rtcp/source/rtcp_transceiver.h +++ b/modules/rtp_rtcp/source/rtcp_transceiver.h @@ -20,6 +20,7 @@ #include "modules/rtp_rtcp/source/rtcp_transceiver_config.h" #include "modules/rtp_rtcp/source/rtcp_transceiver_impl.h" #include "rtc_base/copy_on_write_buffer.h" +#include "system_wrappers/include/clock.h" namespace webrtc { // @@ -93,6 +94,7 @@ class RtcpTransceiver : public RtcpFeedbackSenderInterface { void SendFullIntraRequest(std::vector ssrcs, bool new_request); private: + Clock* const clock_; TaskQueueBase* const task_queue_; std::unique_ptr rtcp_transceiver_; }; diff --git a/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc b/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc index 0102616d59..5753ffd692 100644 --- a/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc +++ b/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc @@ -40,7 +40,7 @@ namespace webrtc { namespace { struct SenderReportTimes { - int64_t local_received_time_us; + Timestamp local_received_time; NtpTime remote_sent_time; }; @@ -92,9 +92,7 @@ RtcpTransceiverImpl::RtcpTransceiverImpl(const RtcpTransceiverConfig& config) : config_(config), ready_to_send_(config.initial_ready_to_send) { RTC_CHECK(config_.Validate()); if (ready_to_send_ && config_.schedule_periodic_compound_packets) { - config_.task_queue->PostTask(ToQueuedTask([this] { - SchedulePeriodicCompoundPackets(config_.initial_report_delay_ms); - })); + SchedulePeriodicCompoundPackets(config_.initial_report_delay_ms); } } @@ -133,13 +131,13 @@ void RtcpTransceiverImpl::SetReadyToSend(bool ready) { } void RtcpTransceiverImpl::ReceivePacket(rtc::ArrayView packet, - int64_t now_us) { + Timestamp now) { while (!packet.empty()) { rtcp::CommonHeader rtcp_block; if (!rtcp_block.Parse(packet.data(), packet.size())) return; - HandleReceivedPacket(rtcp_block, now_us); + HandleReceivedPacket(rtcp_block, now); // TODO(danilchap): Use packet.remove_prefix() when that function exists. packet = packet.subview(rtcp_block.packet_size()); @@ -228,16 +226,16 @@ void RtcpTransceiverImpl::SendFullIntraRequest( void RtcpTransceiverImpl::HandleReceivedPacket( const rtcp::CommonHeader& rtcp_packet_header, - int64_t now_us) { + Timestamp now) { switch (rtcp_packet_header.type()) { case rtcp::Bye::kPacketType: HandleBye(rtcp_packet_header); break; case rtcp::SenderReport::kPacketType: - HandleSenderReport(rtcp_packet_header, now_us); + HandleSenderReport(rtcp_packet_header, now); break; case rtcp::ExtendedReports::kPacketType: - HandleExtendedReports(rtcp_packet_header, now_us); + HandleExtendedReports(rtcp_packet_header, now); break; } } @@ -256,17 +254,14 @@ void RtcpTransceiverImpl::HandleBye( void RtcpTransceiverImpl::HandleSenderReport( const rtcp::CommonHeader& rtcp_packet_header, - int64_t now_us) { + Timestamp now) { rtcp::SenderReport sender_report; if (!sender_report.Parse(rtcp_packet_header)) return; RemoteSenderState& remote_sender = remote_senders_[sender_report.sender_ssrc()]; - absl::optional& last = - remote_sender.last_received_sender_report; - last.emplace(); - last->local_received_time_us = now_us; - last->remote_sent_time = sender_report.ntp(); + remote_sender.last_received_sender_report = + absl::optional({now, sender_report.ntp()}); for (MediaReceiverRtcpObserver* observer : remote_sender.observers) observer->OnSenderReport(sender_report.sender_ssrc(), sender_report.ntp(), @@ -275,26 +270,27 @@ void RtcpTransceiverImpl::HandleSenderReport( void RtcpTransceiverImpl::HandleExtendedReports( const rtcp::CommonHeader& rtcp_packet_header, - int64_t now_us) { + Timestamp now) { rtcp::ExtendedReports extended_reports; if (!extended_reports.Parse(rtcp_packet_header)) return; if (extended_reports.dlrr()) - HandleDlrr(extended_reports.dlrr(), now_us); + HandleDlrr(extended_reports.dlrr(), now); if (extended_reports.target_bitrate()) HandleTargetBitrate(*extended_reports.target_bitrate(), extended_reports.sender_ssrc()); } -void RtcpTransceiverImpl::HandleDlrr(const rtcp::Dlrr& dlrr, int64_t now_us) { +void RtcpTransceiverImpl::HandleDlrr(const rtcp::Dlrr& dlrr, Timestamp now) { if (!config_.non_sender_rtt_measurement || config_.rtt_observer == nullptr) return; // Delay and last_rr are transferred using 32bit compact ntp resolution. // Convert packet arrival time to same format through 64bit ntp format. - uint32_t receive_time_ntp = CompactNtp(TimeMicrosToNtp(now_us)); + uint32_t receive_time_ntp = + CompactNtp(config_.clock->ConvertTimestampToNtpTime(now)); for (const rtcp::ReceiveTimeInfo& rti : dlrr.sub_blocks()) { if (rti.ssrc != config_.feedback_ssrc) continue; @@ -353,13 +349,16 @@ void RtcpTransceiverImpl::SchedulePeriodicCompoundPackets(int64_t delay_ms) { void RtcpTransceiverImpl::CreateCompoundPacket(PacketSender* sender) { RTC_DCHECK(sender->IsEmpty()); const uint32_t sender_ssrc = config_.feedback_ssrc; - int64_t now_us = rtc::TimeMicros(); + Timestamp now = config_.clock->CurrentTime(); rtcp::ReceiverReport receiver_report; receiver_report.SetSenderSsrc(sender_ssrc); - receiver_report.SetReportBlocks(CreateReportBlocks(now_us)); - sender->AppendPacket(receiver_report); + receiver_report.SetReportBlocks(CreateReportBlocks(now)); + if (config_.rtcp_mode == RtcpMode::kCompound || + !receiver_report.report_blocks().empty()) { + sender->AppendPacket(receiver_report); + } - if (!config_.cname.empty()) { + if (!config_.cname.empty() && !sender->IsEmpty()) { rtcp::Sdes sdes; bool added = sdes.AddCName(config_.feedback_ssrc, config_.cname); RTC_DCHECK(added) << "Failed to add cname " << config_.cname @@ -377,7 +376,7 @@ void RtcpTransceiverImpl::CreateCompoundPacket(PacketSender* sender) { rtcp::ExtendedReports xr; rtcp::Rrtr rrtr; - rrtr.SetNtp(TimeMicrosToNtp(now_us)); + rrtr.SetNtp(config_.clock->ConvertTimestampToNtpTime(now)); xr.SetRrtr(rrtr); xr.SetSenderSsrc(sender_ssrc); @@ -428,7 +427,7 @@ void RtcpTransceiverImpl::SendImmediateFeedback( } std::vector RtcpTransceiverImpl::CreateReportBlocks( - int64_t now_us) { + Timestamp now) { if (!config_.receive_statistics) return {}; // TODO(danilchap): Support sending more than @@ -448,7 +447,7 @@ std::vector RtcpTransceiverImpl::CreateReportBlocks( *it->second.last_received_sender_report; last_sr = CompactNtp(last_sender_report.remote_sent_time); last_delay = SaturatedUsToCompactNtp( - now_us - last_sender_report.local_received_time_us); + now.us() - last_sender_report.local_received_time.us()); report_block.SetLastSr(last_sr); report_block.SetDelayLastSr(last_delay); } diff --git a/modules/rtp_rtcp/source/rtcp_transceiver_impl.h b/modules/rtp_rtcp/source/rtcp_transceiver_impl.h index 6a6454662c..bcdee83e56 100644 --- a/modules/rtp_rtcp/source/rtcp_transceiver_impl.h +++ b/modules/rtp_rtcp/source/rtcp_transceiver_impl.h @@ -18,6 +18,7 @@ #include "absl/types/optional.h" #include "api/array_view.h" +#include "api/units/timestamp.h" #include "modules/rtp_rtcp/source/rtcp_packet/common_header.h" #include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h" #include "modules/rtp_rtcp/source/rtcp_packet/remb.h" @@ -48,7 +49,7 @@ class RtcpTransceiverImpl { void SetReadyToSend(bool ready); - void ReceivePacket(rtc::ArrayView packet, int64_t now_us); + void ReceivePacket(rtc::ArrayView packet, Timestamp now); void SendCompoundPacket(); @@ -76,15 +77,15 @@ class RtcpTransceiverImpl { struct RemoteSenderState; void HandleReceivedPacket(const rtcp::CommonHeader& rtcp_packet_header, - int64_t now_us); + Timestamp now); // Individual rtcp packet handlers. void HandleBye(const rtcp::CommonHeader& rtcp_packet_header); void HandleSenderReport(const rtcp::CommonHeader& rtcp_packet_header, - int64_t now_us); + Timestamp now); void HandleExtendedReports(const rtcp::CommonHeader& rtcp_packet_header, - int64_t now_us); + Timestamp now); // Extended Reports blocks handlers. - void HandleDlrr(const rtcp::Dlrr& dlrr, int64_t now_us); + void HandleDlrr(const rtcp::Dlrr& dlrr, Timestamp now); void HandleTargetBitrate(const rtcp::TargetBitrate& target_bitrate, uint32_t remote_ssrc); @@ -97,7 +98,7 @@ class RtcpTransceiverImpl { void SendPeriodicCompoundPacket(); void SendImmediateFeedback(const rtcp::RtcpPacket& rtcp_packet); // Generate Report Blocks to be send in Sender or Receiver Report. - std::vector CreateReportBlocks(int64_t now_us); + std::vector CreateReportBlocks(Timestamp now); const RtcpTransceiverConfig config_; diff --git a/modules/rtp_rtcp/source/rtcp_transceiver_impl_unittest.cc b/modules/rtp_rtcp/source/rtcp_transceiver_impl_unittest.cc index b7694df1e8..06e1083aa8 100644 --- a/modules/rtp_rtcp/source/rtcp_transceiver_impl_unittest.cc +++ b/modules/rtp_rtcp/source/rtcp_transceiver_impl_unittest.cc @@ -16,6 +16,8 @@ #include "absl/memory/memory.h" #include "api/rtp_headers.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" #include "api/video/video_bitrate_allocation.h" #include "modules/rtp_rtcp/include/receive_statistics.h" #include "modules/rtp_rtcp/mocks/mock_rtcp_rtt_stats.h" @@ -24,8 +26,9 @@ #include "modules/rtp_rtcp/source/rtcp_packet/compound_packet.h" #include "modules/rtp_rtcp/source/time_util.h" #include "rtc_base/event.h" -#include "rtc_base/fake_clock.h" #include "rtc_base/task_queue_for_test.h" +#include "rtc_base/time_utils.h" +#include "system_wrappers/include/clock.h" #include "test/gmock.h" #include "test/gtest.h" #include "test/mock_transport.h" @@ -35,6 +38,7 @@ namespace { using ::testing::_; using ::testing::ElementsAre; +using ::testing::NiceMock; using ::testing::Return; using ::testing::SizeIs; using ::testing::StrictMock; @@ -46,8 +50,10 @@ using ::webrtc::NtpTime; using ::webrtc::RtcpTransceiverConfig; using ::webrtc::RtcpTransceiverImpl; using ::webrtc::SaturatedUsToCompactNtp; +using ::webrtc::SimulatedClock; using ::webrtc::TaskQueueForTest; -using ::webrtc::TimeMicrosToNtp; +using ::webrtc::TimeDelta; +using ::webrtc::Timestamp; using ::webrtc::VideoBitrateAllocation; using ::webrtc::rtcp::Bye; using ::webrtc::rtcp::CompoundPacket; @@ -142,9 +148,11 @@ RtcpTransceiverConfig DefaultTestConfig() { } TEST(RtcpTransceiverImplTest, NeedToStopPeriodicTaskToDestroyOnTaskQueue) { + SimulatedClock clock(0); FakeRtcpTransport transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; config.task_queue = queue.Get(); config.schedule_periodic_compound_packets = true; config.outgoing_transport = &transport; @@ -161,10 +169,31 @@ TEST(RtcpTransceiverImplTest, NeedToStopPeriodicTaskToDestroyOnTaskQueue) { ASSERT_TRUE(done.Wait(/*milliseconds=*/1000)); } +TEST(RtcpTransceiverImplTest, CanBeDestroyedRightAfterCreation) { + SimulatedClock clock(0); + FakeRtcpTransport transport; + TaskQueueForTest queue("rtcp"); + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + config.task_queue = queue.Get(); + config.schedule_periodic_compound_packets = true; + config.outgoing_transport = &transport; + + rtc::Event done; + queue.PostTask([&] { + RtcpTransceiverImpl rtcp_transceiver(config); + rtcp_transceiver.StopPeriodicTask(); + done.Set(); + }); + ASSERT_TRUE(done.Wait(/*milliseconds=*/1000)); +} + TEST(RtcpTransceiverImplTest, CanDestroyAfterTaskQueue) { + SimulatedClock clock(0); FakeRtcpTransport transport; auto* queue = new TaskQueueForTest("rtcp"); RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; config.task_queue = queue->Get(); config.schedule_periodic_compound_packets = true; config.outgoing_transport = &transport; @@ -177,9 +206,11 @@ TEST(RtcpTransceiverImplTest, CanDestroyAfterTaskQueue) { } TEST(RtcpTransceiverImplTest, DelaysSendingFirstCompondPacket) { + SimulatedClock clock(0); TaskQueueForTest queue("rtcp"); FakeRtcpTransport transport; RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &transport; config.initial_report_delay_ms = 10; config.task_queue = queue.Get(); @@ -202,9 +233,11 @@ TEST(RtcpTransceiverImplTest, DelaysSendingFirstCompondPacket) { } TEST(RtcpTransceiverImplTest, PeriodicallySendsPackets) { + SimulatedClock clock(0); TaskQueueForTest queue("rtcp"); FakeRtcpTransport transport; RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &transport; config.initial_report_delay_ms = 0; config.report_period_ms = kReportPeriodMs; @@ -236,9 +269,11 @@ TEST(RtcpTransceiverImplTest, PeriodicallySendsPackets) { } TEST(RtcpTransceiverImplTest, SendCompoundPacketDelaysPeriodicSendPackets) { + SimulatedClock clock(0); TaskQueueForTest queue("rtcp"); FakeRtcpTransport transport; RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &transport; config.initial_report_delay_ms = 0; config.report_period_ms = kReportPeriodMs; @@ -282,8 +317,10 @@ TEST(RtcpTransceiverImplTest, SendCompoundPacketDelaysPeriodicSendPackets) { } TEST(RtcpTransceiverImplTest, SendsNoRtcpWhenNetworkStateIsDown) { + SimulatedClock clock(0); MockTransport mock_transport; RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; config.initial_ready_to_send = false; config.outgoing_transport = &mock_transport; RtcpTransceiverImpl rtcp_transceiver(config); @@ -301,8 +338,10 @@ TEST(RtcpTransceiverImplTest, SendsNoRtcpWhenNetworkStateIsDown) { } TEST(RtcpTransceiverImplTest, SendsRtcpWhenNetworkStateIsUp) { + SimulatedClock clock(0); MockTransport mock_transport; RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; config.initial_ready_to_send = false; config.outgoing_transport = &mock_transport; RtcpTransceiverImpl rtcp_transceiver(config); @@ -322,9 +361,11 @@ TEST(RtcpTransceiverImplTest, SendsRtcpWhenNetworkStateIsUp) { } TEST(RtcpTransceiverImplTest, SendsPeriodicRtcpWhenNetworkStateIsUp) { + SimulatedClock clock(0); TaskQueueForTest queue("rtcp"); FakeRtcpTransport transport; RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; config.schedule_periodic_compound_packets = true; config.initial_ready_to_send = false; config.outgoing_transport = &transport; @@ -348,7 +389,9 @@ TEST(RtcpTransceiverImplTest, SendsPeriodicRtcpWhenNetworkStateIsUp) { TEST(RtcpTransceiverImplTest, SendsMinimalCompoundPacket) { const uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; config.cname = "cname"; RtcpPacketParser rtcp_parser; @@ -369,9 +412,52 @@ TEST(RtcpTransceiverImplTest, SendsMinimalCompoundPacket) { EXPECT_EQ(rtcp_parser.sdes()->chunks()[0].cname, config.cname); } +TEST(RtcpTransceiverImplTest, AvoidsEmptyPacketsInReducedMode) { + MockTransport transport; + EXPECT_CALL(transport, SendRtcp).Times(0); + NiceMock receive_statistics; + SimulatedClock clock(0); + + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + config.outgoing_transport = &transport; + config.rtcp_mode = webrtc::RtcpMode::kReducedSize; + config.schedule_periodic_compound_packets = false; + config.receive_statistics = &receive_statistics; + RtcpTransceiverImpl rtcp_transceiver(config); + + rtcp_transceiver.SendCompoundPacket(); +} + +TEST(RtcpTransceiverImplTest, AvoidsEmptyReceiverReportsInReducedMode) { + RtcpPacketParser rtcp_parser; + RtcpParserTransport transport(&rtcp_parser); + NiceMock receive_statistics; + SimulatedClock clock(0); + + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + config.outgoing_transport = &transport; + config.rtcp_mode = webrtc::RtcpMode::kReducedSize; + config.schedule_periodic_compound_packets = false; + config.receive_statistics = &receive_statistics; + // Set it to produce something (RRTR) in the "periodic" rtcp packets. + config.non_sender_rtt_measurement = true; + RtcpTransceiverImpl rtcp_transceiver(config); + + // Rather than waiting for the right time to produce the periodic packet, + // trigger it manually. + rtcp_transceiver.SendCompoundPacket(); + + EXPECT_EQ(rtcp_parser.receiver_report()->num_packets(), 0); + EXPECT_GT(rtcp_parser.xr()->num_packets(), 0); +} + TEST(RtcpTransceiverImplTest, SendsNoRembInitially) { const uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -387,7 +473,9 @@ TEST(RtcpTransceiverImplTest, SendsNoRembInitially) { TEST(RtcpTransceiverImplTest, SetRembIncludesRembInNextCompoundPacket) { const uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -406,7 +494,9 @@ TEST(RtcpTransceiverImplTest, SetRembIncludesRembInNextCompoundPacket) { TEST(RtcpTransceiverImplTest, SetRembUpdatesValuesToSend) { const uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -431,7 +521,9 @@ TEST(RtcpTransceiverImplTest, SetRembUpdatesValuesToSend) { TEST(RtcpTransceiverImplTest, SetRembSendsImmediatelyIfSendRembOnChange) { const uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.send_remb_on_change = true; config.feedback_ssrc = kSenderSsrc; RtcpPacketParser rtcp_parser; @@ -457,7 +549,9 @@ TEST(RtcpTransceiverImplTest, SetRembSendsImmediatelyIfSendRembOnChange) { TEST(RtcpTransceiverImplTest, SetRembSendsImmediatelyIfSendRembOnChangeReducedSize) { const uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.send_remb_on_change = true; config.rtcp_mode = webrtc::RtcpMode::kReducedSize; config.feedback_ssrc = kSenderSsrc; @@ -475,7 +569,9 @@ TEST(RtcpTransceiverImplTest, TEST(RtcpTransceiverImplTest, SetRembIncludesRembInAllCompoundPackets) { const uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -493,7 +589,9 @@ TEST(RtcpTransceiverImplTest, SetRembIncludesRembInAllCompoundPackets) { TEST(RtcpTransceiverImplTest, SendsNoRembAfterUnset) { const uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -522,7 +620,9 @@ TEST(RtcpTransceiverImplTest, ReceiverReportUsesReceiveStatistics) { EXPECT_CALL(receive_statistics, RtcpReportBlocks(_)) .WillRepeatedly(Return(report_blocks)); + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -543,9 +643,12 @@ TEST(RtcpTransceiverImplTest, ReceiverReportUsesReceiveStatistics) { TEST(RtcpTransceiverImplTest, MultipleObserversOnSameSsrc) { const uint32_t kRemoteSsrc = 12345; + SimulatedClock clock(0); StrictMock observer1; StrictMock observer2; - RtcpTransceiverImpl rtcp_transceiver(DefaultTestConfig()); + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + RtcpTransceiverImpl rtcp_transceiver(config); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer1); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer2); @@ -559,14 +662,17 @@ TEST(RtcpTransceiverImplTest, MultipleObserversOnSameSsrc) { EXPECT_CALL(observer1, OnSenderReport(kRemoteSsrc, kRemoteNtp, kRemoteRtp)); EXPECT_CALL(observer2, OnSenderReport(kRemoteSsrc, kRemoteNtp, kRemoteRtp)); - rtcp_transceiver.ReceivePacket(raw_packet, /*now_us=*/0); + rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0)); } TEST(RtcpTransceiverImplTest, DoesntCallsObserverAfterRemoved) { const uint32_t kRemoteSsrc = 12345; + SimulatedClock clock(0); StrictMock observer1; StrictMock observer2; - RtcpTransceiverImpl rtcp_transceiver(DefaultTestConfig()); + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + RtcpTransceiverImpl rtcp_transceiver(config); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer1); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer2); @@ -578,15 +684,18 @@ TEST(RtcpTransceiverImplTest, DoesntCallsObserverAfterRemoved) { EXPECT_CALL(observer1, OnSenderReport(_, _, _)).Times(0); EXPECT_CALL(observer2, OnSenderReport(_, _, _)); - rtcp_transceiver.ReceivePacket(raw_packet, /*now_us=*/0); + rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0)); } TEST(RtcpTransceiverImplTest, CallsObserverOnSenderReportBySenderSsrc) { const uint32_t kRemoteSsrc1 = 12345; const uint32_t kRemoteSsrc2 = 22345; + SimulatedClock clock(0); StrictMock observer1; StrictMock observer2; - RtcpTransceiverImpl rtcp_transceiver(DefaultTestConfig()); + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + RtcpTransceiverImpl rtcp_transceiver(config); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc1, &observer1); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc2, &observer2); @@ -600,15 +709,18 @@ TEST(RtcpTransceiverImplTest, CallsObserverOnSenderReportBySenderSsrc) { EXPECT_CALL(observer1, OnSenderReport(kRemoteSsrc1, kRemoteNtp, kRemoteRtp)); EXPECT_CALL(observer2, OnSenderReport(_, _, _)).Times(0); - rtcp_transceiver.ReceivePacket(raw_packet, /*now_us=*/0); + rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0)); } TEST(RtcpTransceiverImplTest, CallsObserverOnByeBySenderSsrc) { const uint32_t kRemoteSsrc1 = 12345; const uint32_t kRemoteSsrc2 = 22345; + SimulatedClock clock(0); StrictMock observer1; StrictMock observer2; - RtcpTransceiverImpl rtcp_transceiver(DefaultTestConfig()); + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + RtcpTransceiverImpl rtcp_transceiver(config); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc1, &observer1); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc2, &observer2); @@ -618,15 +730,18 @@ TEST(RtcpTransceiverImplTest, CallsObserverOnByeBySenderSsrc) { EXPECT_CALL(observer1, OnBye(kRemoteSsrc1)); EXPECT_CALL(observer2, OnBye(_)).Times(0); - rtcp_transceiver.ReceivePacket(raw_packet, /*now_us=*/0); + rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0)); } TEST(RtcpTransceiverImplTest, CallsObserverOnTargetBitrateBySenderSsrc) { const uint32_t kRemoteSsrc1 = 12345; const uint32_t kRemoteSsrc2 = 22345; + SimulatedClock clock(0); StrictMock observer1; StrictMock observer2; - RtcpTransceiverImpl rtcp_transceiver(DefaultTestConfig()); + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + RtcpTransceiverImpl rtcp_transceiver(config); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc1, &observer1); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc2, &observer2); @@ -647,13 +762,16 @@ TEST(RtcpTransceiverImplTest, CallsObserverOnTargetBitrateBySenderSsrc) { bitrate_allocation.SetBitrate(1, 1, /*bitrate_bps=*/80000); EXPECT_CALL(observer1, OnBitrateAllocation(kRemoteSsrc1, bitrate_allocation)); EXPECT_CALL(observer2, OnBitrateAllocation(_, _)).Times(0); - rtcp_transceiver.ReceivePacket(raw_packet, /*now_us=*/0); + rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0)); } TEST(RtcpTransceiverImplTest, SkipsIncorrectTargetBitrateEntries) { const uint32_t kRemoteSsrc = 12345; + SimulatedClock clock(0); MockMediaReceiverRtcpObserver observer; - RtcpTransceiverImpl rtcp_transceiver(DefaultTestConfig()); + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + RtcpTransceiverImpl rtcp_transceiver(config); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer); webrtc::rtcp::TargetBitrate target_bitrate; @@ -669,13 +787,16 @@ TEST(RtcpTransceiverImplTest, SkipsIncorrectTargetBitrateEntries) { VideoBitrateAllocation expected_allocation; expected_allocation.SetBitrate(0, 0, /*bitrate_bps=*/10000); EXPECT_CALL(observer, OnBitrateAllocation(kRemoteSsrc, expected_allocation)); - rtcp_transceiver.ReceivePacket(raw_packet, /*now_us=*/0); + rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0)); } TEST(RtcpTransceiverImplTest, CallsObserverOnByeBehindSenderReport) { const uint32_t kRemoteSsrc = 12345; + SimulatedClock clock(0); MockMediaReceiverRtcpObserver observer; - RtcpTransceiverImpl rtcp_transceiver(DefaultTestConfig()); + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + RtcpTransceiverImpl rtcp_transceiver(config); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer); CompoundPacket compound; @@ -689,13 +810,16 @@ TEST(RtcpTransceiverImplTest, CallsObserverOnByeBehindSenderReport) { EXPECT_CALL(observer, OnBye(kRemoteSsrc)); EXPECT_CALL(observer, OnSenderReport(kRemoteSsrc, _, _)); - rtcp_transceiver.ReceivePacket(raw_packet, /*now_us=*/0); + rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0)); } TEST(RtcpTransceiverImplTest, CallsObserverOnByeBehindUnknownRtcpPacket) { const uint32_t kRemoteSsrc = 12345; + SimulatedClock clock(0); MockMediaReceiverRtcpObserver observer; - RtcpTransceiverImpl rtcp_transceiver(DefaultTestConfig()); + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + RtcpTransceiverImpl rtcp_transceiver(config); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer); CompoundPacket compound; @@ -708,7 +832,7 @@ TEST(RtcpTransceiverImplTest, CallsObserverOnByeBehindUnknownRtcpPacket) { auto raw_packet = compound.Build(); EXPECT_CALL(observer, OnBye(kRemoteSsrc)); - rtcp_transceiver.ReceivePacket(raw_packet, /*now_us=*/0); + rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0)); } TEST(RtcpTransceiverImplTest, @@ -722,7 +846,9 @@ TEST(RtcpTransceiverImplTest, EXPECT_CALL(receive_statistics, RtcpReportBlocks(_)) .WillOnce(Return(statistics_report_blocks)); + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -736,7 +862,7 @@ TEST(RtcpTransceiverImplTest, sr.SetSenderSsrc(kRemoteSsrc1); sr.SetNtp(kRemoteNtp); auto raw_packet = sr.Build(); - rtcp_transceiver.ReceivePacket(raw_packet, /*now_us=*/0); + rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0)); // Trigger sending ReceiverReport. rtcp_transceiver.SendCompoundPacket(); @@ -759,7 +885,7 @@ TEST(RtcpTransceiverImplTest, WhenSendsReceiverReportCalculatesDelaySinceLastSenderReport) { const uint32_t kRemoteSsrc1 = 4321; const uint32_t kRemoteSsrc2 = 5321; - rtc::ScopedFakeClock clock; + std::vector statistics_report_blocks(2); statistics_report_blocks[0].SetMediaSsrc(kRemoteSsrc1); statistics_report_blocks[1].SetMediaSsrc(kRemoteSsrc2); @@ -767,7 +893,9 @@ TEST(RtcpTransceiverImplTest, EXPECT_CALL(receive_statistics, RtcpReportBlocks(_)) .WillOnce(Return(statistics_report_blocks)); + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -775,18 +903,19 @@ TEST(RtcpTransceiverImplTest, config.receive_statistics = &receive_statistics; RtcpTransceiverImpl rtcp_transceiver(config); - auto receive_sender_report = [&rtcp_transceiver](uint32_t remote_ssrc) { + auto receive_sender_report = [&rtcp_transceiver, + &clock](uint32_t remote_ssrc) { SenderReport sr; sr.SetSenderSsrc(remote_ssrc); auto raw_packet = sr.Build(); - rtcp_transceiver.ReceivePacket(raw_packet, rtc::TimeMicros()); + rtcp_transceiver.ReceivePacket(raw_packet, clock.CurrentTime()); }; receive_sender_report(kRemoteSsrc1); - clock.AdvanceTime(webrtc::TimeDelta::Millis(100)); + clock.AdvanceTime(TimeDelta::Millis(100)); receive_sender_report(kRemoteSsrc2); - clock.AdvanceTime(webrtc::TimeDelta::Millis(100)); + clock.AdvanceTime(TimeDelta::Millis(100)); // Trigger ReceiverReport back. rtcp_transceiver.SendCompoundPacket(); @@ -808,7 +937,9 @@ TEST(RtcpTransceiverImplTest, SendsNack) { const uint32_t kSenderSsrc = 1234; const uint32_t kRemoteSsrc = 4321; std::vector kMissingSequenceNumbers = {34, 37, 38}; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; @@ -827,7 +958,9 @@ TEST(RtcpTransceiverImplTest, SendsNack) { TEST(RtcpTransceiverImplTest, RequestKeyFrameWithPictureLossIndication) { const uint32_t kSenderSsrc = 1234; const uint32_t kRemoteSsrc = 4321; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; @@ -846,7 +979,9 @@ TEST(RtcpTransceiverImplTest, RequestKeyFrameWithPictureLossIndication) { TEST(RtcpTransceiverImplTest, RequestKeyFrameWithFullIntraRequest) { const uint32_t kSenderSsrc = 1234; const uint32_t kRemoteSsrcs[] = {4321, 5321}; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; @@ -863,7 +998,9 @@ TEST(RtcpTransceiverImplTest, RequestKeyFrameWithFullIntraRequest) { } TEST(RtcpTransceiverImplTest, RequestKeyFrameWithFirIncreaseSeqNoPerSsrc) { + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -893,7 +1030,9 @@ TEST(RtcpTransceiverImplTest, RequestKeyFrameWithFirIncreaseSeqNoPerSsrc) { } TEST(RtcpTransceiverImplTest, SendFirDoesNotIncreaseSeqNoIfOldRequest) { + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -919,7 +1058,9 @@ TEST(RtcpTransceiverImplTest, SendFirDoesNotIncreaseSeqNoIfOldRequest) { TEST(RtcpTransceiverImplTest, KeyFrameRequestCreatesCompoundPacket) { const uint32_t kRemoteSsrcs[] = {4321}; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; // Turn periodic off to ensure sent rtcp packet is explicitly requested. config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; @@ -938,7 +1079,9 @@ TEST(RtcpTransceiverImplTest, KeyFrameRequestCreatesCompoundPacket) { TEST(RtcpTransceiverImplTest, KeyFrameRequestCreatesReducedSizePacket) { const uint32_t kRemoteSsrcs[] = {4321}; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; // Turn periodic off to ensure sent rtcp packet is explicitly requested. config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; @@ -957,8 +1100,9 @@ TEST(RtcpTransceiverImplTest, KeyFrameRequestCreatesReducedSizePacket) { TEST(RtcpTransceiverImplTest, SendsXrRrtrWhenEnabled) { const uint32_t kSenderSsrc = 4321; - rtc::ScopedFakeClock clock; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; @@ -968,7 +1112,7 @@ TEST(RtcpTransceiverImplTest, SendsXrRrtrWhenEnabled) { RtcpTransceiverImpl rtcp_transceiver(config); rtcp_transceiver.SendCompoundPacket(); - NtpTime ntp_time_now = TimeMicrosToNtp(rtc::TimeMicros()); + NtpTime ntp_time_now = clock.CurrentNtpTime(); EXPECT_EQ(rtcp_parser.xr()->num_packets(), 1); EXPECT_EQ(rtcp_parser.xr()->sender_ssrc(), kSenderSsrc); @@ -977,7 +1121,9 @@ TEST(RtcpTransceiverImplTest, SendsXrRrtrWhenEnabled) { } TEST(RtcpTransceiverImplTest, SendsNoXrRrtrWhenDisabled) { + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -995,9 +1141,11 @@ TEST(RtcpTransceiverImplTest, SendsNoXrRrtrWhenDisabled) { TEST(RtcpTransceiverImplTest, CalculatesRoundTripTimeOnDlrr) { const uint32_t kSenderSsrc = 4321; + SimulatedClock clock(0); MockRtcpRttStats rtt_observer; MockTransport null_transport; RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; config.schedule_periodic_compound_packets = false; config.outgoing_transport = &null_transport; @@ -1005,25 +1153,27 @@ TEST(RtcpTransceiverImplTest, CalculatesRoundTripTimeOnDlrr) { config.rtt_observer = &rtt_observer; RtcpTransceiverImpl rtcp_transceiver(config); - int64_t time_us = 12345678; + Timestamp time = Timestamp::Micros(12345678); webrtc::rtcp::ReceiveTimeInfo rti; rti.ssrc = kSenderSsrc; - rti.last_rr = CompactNtp(TimeMicrosToNtp(time_us)); + rti.last_rr = CompactNtp(clock.ConvertTimestampToNtpTime(time)); rti.delay_since_last_rr = SaturatedUsToCompactNtp(10 * 1000); webrtc::rtcp::ExtendedReports xr; xr.AddDlrrItem(rti); auto raw_packet = xr.Build(); EXPECT_CALL(rtt_observer, OnRttUpdate(100 /* rtt_ms */)); - rtcp_transceiver.ReceivePacket(raw_packet, time_us + 110 * 1000); + rtcp_transceiver.ReceivePacket(raw_packet, time + TimeDelta::Millis(110)); } TEST(RtcpTransceiverImplTest, IgnoresUnknownSsrcInDlrr) { const uint32_t kSenderSsrc = 4321; const uint32_t kUnknownSsrc = 4322; + SimulatedClock clock(0); MockRtcpRttStats rtt_observer; MockTransport null_transport; RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; config.schedule_periodic_compound_packets = false; config.outgoing_transport = &null_transport; @@ -1031,16 +1181,16 @@ TEST(RtcpTransceiverImplTest, IgnoresUnknownSsrcInDlrr) { config.rtt_observer = &rtt_observer; RtcpTransceiverImpl rtcp_transceiver(config); - int64_t time_us = 12345678; + Timestamp time = Timestamp::Micros(12345678); webrtc::rtcp::ReceiveTimeInfo rti; rti.ssrc = kUnknownSsrc; - rti.last_rr = CompactNtp(TimeMicrosToNtp(time_us)); + rti.last_rr = CompactNtp(clock.ConvertTimestampToNtpTime(time)); webrtc::rtcp::ExtendedReports xr; xr.AddDlrrItem(rti); auto raw_packet = xr.Build(); EXPECT_CALL(rtt_observer, OnRttUpdate(_)).Times(0); - rtcp_transceiver.ReceivePacket(raw_packet, time_us + 100000); + rtcp_transceiver.ReceivePacket(raw_packet, time + TimeDelta::Millis(100)); } } // namespace diff --git a/modules/rtp_rtcp/source/rtcp_transceiver_unittest.cc b/modules/rtp_rtcp/source/rtcp_transceiver_unittest.cc index 9c181c6526..290aa48ff4 100644 --- a/modules/rtp_rtcp/source/rtcp_transceiver_unittest.cc +++ b/modules/rtp_rtcp/source/rtcp_transceiver_unittest.cc @@ -18,6 +18,7 @@ #include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h" #include "rtc_base/event.h" #include "rtc_base/task_queue_for_test.h" +#include "system_wrappers/include/clock.h" #include "test/gmock.h" #include "test/gtest.h" #include "test/mock_transport.h" @@ -34,6 +35,7 @@ using ::testing::NiceMock; using ::webrtc::MockTransport; using ::webrtc::RtcpTransceiver; using ::webrtc::RtcpTransceiverConfig; +using ::webrtc::SimulatedClock; using ::webrtc::TaskQueueForTest; using ::webrtc::rtcp::RemoteEstimate; using ::webrtc::rtcp::RtcpPacket; @@ -57,9 +59,11 @@ void WaitPostedTasks(TaskQueueForTest* queue) { } TEST(RtcpTransceiverTest, SendsRtcpOnTaskQueueWhenCreatedOffTaskQueue) { + SimulatedClock clock(0); MockTransport outgoing_transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &outgoing_transport; config.task_queue = queue.Get(); EXPECT_CALL(outgoing_transport, SendRtcp(_, _)) @@ -74,9 +78,11 @@ TEST(RtcpTransceiverTest, SendsRtcpOnTaskQueueWhenCreatedOffTaskQueue) { } TEST(RtcpTransceiverTest, SendsRtcpOnTaskQueueWhenCreatedOnTaskQueue) { + SimulatedClock clock(0); MockTransport outgoing_transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &outgoing_transport; config.task_queue = queue.Get(); EXPECT_CALL(outgoing_transport, SendRtcp(_, _)) @@ -94,9 +100,11 @@ TEST(RtcpTransceiverTest, SendsRtcpOnTaskQueueWhenCreatedOnTaskQueue) { } TEST(RtcpTransceiverTest, CanBeDestroyedOnTaskQueue) { + SimulatedClock clock(0); NiceMock outgoing_transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &outgoing_transport; config.task_queue = queue.Get(); auto rtcp_transceiver = std::make_unique(config); @@ -110,9 +118,11 @@ TEST(RtcpTransceiverTest, CanBeDestroyedOnTaskQueue) { } TEST(RtcpTransceiverTest, CanBeDestroyedWithoutBlocking) { + SimulatedClock clock(0); TaskQueueForTest queue("rtcp"); NiceMock outgoing_transport; RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &outgoing_transport; config.task_queue = queue.Get(); auto* rtcp_transceiver = new RtcpTransceiver(config); @@ -131,9 +141,11 @@ TEST(RtcpTransceiverTest, CanBeDestroyedWithoutBlocking) { } TEST(RtcpTransceiverTest, MaySendPacketsAfterDestructor) { // i.e. Be careful! + SimulatedClock clock(0); NiceMock outgoing_transport; // Must outlive queue below. TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &outgoing_transport; config.task_queue = queue.Get(); auto* rtcp_transceiver = new RtcpTransceiver(config); @@ -162,9 +174,11 @@ rtc::CopyOnWriteBuffer CreateSenderReport(uint32_t ssrc, uint32_t rtp_time) { TEST(RtcpTransceiverTest, DoesntPostToRtcpObserverAfterCallToRemove) { const uint32_t kRemoteSsrc = 1234; + SimulatedClock clock(0); MockTransport null_transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &null_transport; config.task_queue = queue.Get(); RtcpTransceiver rtcp_transceiver(config); @@ -189,9 +203,11 @@ TEST(RtcpTransceiverTest, DoesntPostToRtcpObserverAfterCallToRemove) { TEST(RtcpTransceiverTest, RemoveMediaReceiverRtcpObserverIsNonBlocking) { const uint32_t kRemoteSsrc = 1234; + SimulatedClock clock(0); MockTransport null_transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &null_transport; config.task_queue = queue.Get(); RtcpTransceiver rtcp_transceiver(config); @@ -213,9 +229,11 @@ TEST(RtcpTransceiverTest, RemoveMediaReceiverRtcpObserverIsNonBlocking) { } TEST(RtcpTransceiverTest, CanCallSendCompoundPacketFromAnyThread) { + SimulatedClock clock(0); MockTransport outgoing_transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &outgoing_transport; config.task_queue = queue.Get(); @@ -242,9 +260,11 @@ TEST(RtcpTransceiverTest, CanCallSendCompoundPacketFromAnyThread) { } TEST(RtcpTransceiverTest, DoesntSendPacketsAfterStopCallback) { + SimulatedClock clock(0); NiceMock outgoing_transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &outgoing_transport; config.task_queue = queue.Get(); config.schedule_periodic_compound_packets = true; @@ -263,9 +283,11 @@ TEST(RtcpTransceiverTest, DoesntSendPacketsAfterStopCallback) { TEST(RtcpTransceiverTest, SendsCombinedRtcpPacketOnTaskQueue) { static constexpr uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); MockTransport outgoing_transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; config.outgoing_transport = &outgoing_transport; config.task_queue = queue.Get(); @@ -300,9 +322,11 @@ TEST(RtcpTransceiverTest, SendsCombinedRtcpPacketOnTaskQueue) { TEST(RtcpTransceiverTest, SendFrameIntraRequestDefaultsToNewRequest) { static constexpr uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); MockTransport outgoing_transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; config.outgoing_transport = &outgoing_transport; config.task_queue = queue.Get(); diff --git a/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension_unittest.cc b/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension_unittest.cc index 11d809693c..974557ce6e 100644 --- a/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension_unittest.cc @@ -115,5 +115,23 @@ TEST(RtpDependencyDescriptorExtensionTest, buffer, structure, active_chains, descriptor)); } +TEST(RtpDependencyDescriptorExtensionTest, FailsToWriteInvalidDescriptor) { + uint8_t buffer[256]; + FrameDependencyStructure structure; + structure.num_decode_targets = 2; + structure.num_chains = 2; + structure.templates = { + FrameDependencyTemplate().T(0).Dtis("SR").ChainDiffs({2, 2})}; + DependencyDescriptor descriptor; + descriptor.frame_dependencies = structure.templates[0]; + descriptor.frame_dependencies.temporal_id = 1; + + EXPECT_EQ( + RtpDependencyDescriptorExtension::ValueSize(structure, 0b11, descriptor), + 0u); + EXPECT_FALSE(RtpDependencyDescriptorExtension::Write(buffer, structure, 0b11, + descriptor)); +} + } // namespace } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.cc b/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.cc index 25d221253b..31df783064 100644 --- a/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.cc +++ b/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.cc @@ -66,6 +66,9 @@ RtpDependencyDescriptorWriter::RtpDependencyDescriptorWriter( } bool RtpDependencyDescriptorWriter::Write() { + if (build_failed_) { + return false; + } WriteMandatoryFields(); if (HasExtendedFields()) { WriteExtendedFields(); @@ -83,6 +86,9 @@ bool RtpDependencyDescriptorWriter::Write() { } int RtpDependencyDescriptorWriter::ValueSizeBits() const { + if (build_failed_) { + return 0; + } static constexpr int kMandatoryFields = 1 + 1 + 6 + 16; int value_size_bits = kMandatoryFields + best_template_.extra_size_bits; if (HasExtendedFields()) { @@ -172,7 +178,10 @@ void RtpDependencyDescriptorWriter::FindBestTemplate() { frame_template.temporal_id; }; auto first = absl::c_find_if(templates, same_layer); - RTC_CHECK(first != templates.end()); + if (first == templates.end()) { + build_failed_ = true; + return; + } auto last = std::find_if_not(first, templates.end(), same_layer); best_template_ = CalculateMatch(first); diff --git a/modules/rtp_rtcp/source/rtp_header_extension_map.cc b/modules/rtp_rtcp/source/rtp_header_extension_map.cc index aebe884c0f..0b5ba474c7 100644 --- a/modules/rtp_rtcp/source/rtp_header_extension_map.cc +++ b/modules/rtp_rtcp/source/rtp_header_extension_map.cc @@ -34,6 +34,7 @@ constexpr ExtensionInfo CreateExtensionInfo() { constexpr ExtensionInfo kExtensions[] = { CreateExtensionInfo(), CreateExtensionInfo(), + CreateExtensionInfo(), CreateExtensionInfo(), CreateExtensionInfo(), CreateExtensionInfo(), diff --git a/modules/rtp_rtcp/source/rtp_header_extensions.cc b/modules/rtp_rtcp/source/rtp_header_extensions.cc index 1c3073e90d..1dd4f54759 100644 --- a/modules/rtp_rtcp/source/rtp_header_extensions.cc +++ b/modules/rtp_rtcp/source/rtp_header_extensions.cc @@ -13,6 +13,7 @@ #include #include +#include #include #include "modules/rtp_rtcp/include/rtp_cvo.h" @@ -186,6 +187,60 @@ bool AudioLevel::Write(rtc::ArrayView data, return true; } +// An RTP Header Extension for Mixer-to-Client Audio Level Indication +// +// https://tools.ietf.org/html/rfc6465 +// +// The form of the audio level extension block: +// +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | ID | len=2 |0| level 1 |0| level 2 |0| level 3 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// Sample Audio Level Encoding Using the One-Byte Header Format +// +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | ID | len=3 |0| level 1 |0| level 2 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// |0| level 3 | 0 (pad) | ... | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// Sample Audio Level Encoding Using the Two-Byte Header Format +constexpr RTPExtensionType CsrcAudioLevel::kId; +constexpr uint8_t CsrcAudioLevel::kMaxValueSizeBytes; +constexpr const char CsrcAudioLevel::kUri[]; + +bool CsrcAudioLevel::Parse(rtc::ArrayView data, + std::vector* csrc_audio_levels) { + if (data.size() > kRtpCsrcSize) { + return false; + } + csrc_audio_levels->resize(data.size()); + for (size_t i = 0; i < data.size(); i++) { + (*csrc_audio_levels)[i] = data[i] & 0x7F; + } + return true; +} + +size_t CsrcAudioLevel::ValueSize( + rtc::ArrayView csrc_audio_levels) { + return csrc_audio_levels.size(); +} + +bool CsrcAudioLevel::Write(rtc::ArrayView data, + rtc::ArrayView csrc_audio_levels) { + RTC_CHECK_LE(csrc_audio_levels.size(), kRtpCsrcSize); + if (csrc_audio_levels.size() != data.size()) { + return false; + } + for (size_t i = 0; i < csrc_audio_levels.size(); i++) { + data[i] = csrc_audio_levels[i] & 0x7F; + } + return true; +} + // From RFC 5450: Transmission Time Offsets in RTP Streams. // // The transmission time is signaled to the receiver in-band using the diff --git a/modules/rtp_rtcp/source/rtp_header_extensions.h b/modules/rtp_rtcp/source/rtp_header_extensions.h index f6e7a579ab..b47824afdb 100644 --- a/modules/rtp_rtcp/source/rtp_header_extensions.h +++ b/modules/rtp_rtcp/source/rtp_header_extensions.h @@ -14,6 +14,7 @@ #include #include +#include #include "api/array_view.h" #include "api/rtp_headers.h" @@ -77,6 +78,20 @@ class AudioLevel { uint8_t audio_level); }; +class CsrcAudioLevel { + public: + static constexpr RTPExtensionType kId = kRtpExtensionCsrcAudioLevel; + static constexpr uint8_t kMaxValueSizeBytes = 15; + static constexpr const char kUri[] = + "urn:ietf:params:rtp-hdrext:csrc-audio-level"; + + static bool Parse(rtc::ArrayView data, + std::vector* csrc_audio_levels); + static size_t ValueSize(rtc::ArrayView csrc_audio_levels); + static bool Write(rtc::ArrayView data, + rtc::ArrayView csrc_audio_levels); +}; + class TransmissionOffset { public: using value_type = int32_t; diff --git a/modules/rtp_rtcp/source/rtp_packet.cc b/modules/rtp_rtcp/source/rtp_packet.cc index 84769d0f4b..8523637feb 100644 --- a/modules/rtp_rtcp/source/rtp_packet.cc +++ b/modules/rtp_rtcp/source/rtp_packet.cc @@ -27,6 +27,7 @@ constexpr size_t kFixedHeaderSize = 12; constexpr uint8_t kRtpVersion = 2; constexpr uint16_t kOneByteExtensionProfileId = 0xBEDE; constexpr uint16_t kTwoByteExtensionProfileId = 0x1000; +constexpr uint16_t kTwobyteExtensionProfileIdAppBitsFilter = 0xfff0; constexpr size_t kOneByteExtensionHeaderLength = 1; constexpr size_t kTwoByteExtensionHeaderLength = 2; constexpr size_t kDefaultPacketSize = 1500; @@ -70,8 +71,8 @@ RtpPacket::RtpPacket(const ExtensionManager* extensions, size_t capacity) RtpPacket::~RtpPacket() {} -void RtpPacket::IdentifyExtensions(const ExtensionManager& extensions) { - extensions_ = extensions; +void RtpPacket::IdentifyExtensions(ExtensionManager extensions) { + extensions_ = std::move(extensions); } bool RtpPacket::Parse(const uint8_t* buffer, size_t buffer_size) { @@ -111,8 +112,6 @@ std::vector RtpPacket::Csrcs() const { } void RtpPacket::CopyHeaderFrom(const RtpPacket& packet) { - RTC_DCHECK_GE(capacity(), packet.headers_size()); - marker_ = packet.marker_; payload_type_ = packet.payload_type_; sequence_number_ = packet.sequence_number_; @@ -186,6 +185,7 @@ void RtpPacket::ZeroMutableExtensions() { break; } case RTPExtensionType::kRtpExtensionAudioLevel: + case RTPExtensionType::kRtpExtensionCsrcAudioLevel: case RTPExtensionType::kRtpExtensionAbsoluteCaptureTime: case RTPExtensionType::kRtpExtensionColorSpace: case RTPExtensionType::kRtpExtensionGenericFrameDescriptor00: @@ -466,16 +466,6 @@ bool RtpPacket::ParseBuffer(const uint8_t* buffer, size_t size) { } payload_offset_ = kFixedHeaderSize + number_of_crcs * 4; - if (has_padding) { - padding_size_ = buffer[size - 1]; - if (padding_size_ == 0) { - RTC_LOG(LS_WARNING) << "Padding was set, but padding size is zero"; - return false; - } - } else { - padding_size_ = 0; - } - extensions_size_ = 0; extension_entries_.clear(); if (has_extension) { @@ -501,7 +491,8 @@ bool RtpPacket::ParseBuffer(const uint8_t* buffer, size_t size) { return false; } if (profile != kOneByteExtensionProfileId && - profile != kTwoByteExtensionProfileId) { + (profile & kTwobyteExtensionProfileIdAppBitsFilter) != + kTwoByteExtensionProfileId) { RTC_LOG(LS_WARNING) << "Unsupported rtp extension " << profile; } else { size_t extension_header_length = profile == kOneByteExtensionProfileId @@ -555,6 +546,16 @@ bool RtpPacket::ParseBuffer(const uint8_t* buffer, size_t size) { payload_offset_ = extension_offset + extensions_capacity; } + if (has_padding && payload_offset_ < size) { + padding_size_ = buffer[size - 1]; + if (padding_size_ == 0) { + RTC_LOG(LS_WARNING) << "Padding was set, but padding size is zero"; + return false; + } + } else { + padding_size_ = 0; + } + if (payload_offset_ + padding_size_ > size) { return false; } diff --git a/modules/rtp_rtcp/source/rtp_packet.h b/modules/rtp_rtcp/source/rtp_packet.h index aa854f35ab..e2e291cf5d 100644 --- a/modules/rtp_rtcp/source/rtp_packet.h +++ b/modules/rtp_rtcp/source/rtp_packet.h @@ -51,7 +51,7 @@ class RtpPacket { bool Parse(rtc::CopyOnWriteBuffer packet); // Maps extensions id to their types. - void IdentifyExtensions(const ExtensionManager& extensions); + void IdentifyExtensions(ExtensionManager extensions); // Header. bool Marker() const { return marker_; } @@ -65,6 +65,7 @@ class RtpPacket { // Payload. size_t payload_size() const { return payload_size_; } + bool has_padding() const { return buffer_[0] & 0x20; } size_t padding_size() const { return padding_size_; } rtc::ArrayView payload() const { return rtc::MakeArrayView(data() + payload_offset_, payload_size_); @@ -114,6 +115,11 @@ class RtpPacket { bool HasExtension() const; bool HasExtension(ExtensionType type) const; + // Returns whether there is an associated id for the extension and thus it is + // possible to set the extension. + template + bool IsRegistered() const; + template bool GetExtension(FirstValue, Values...) const; @@ -207,6 +213,11 @@ bool RtpPacket::HasExtension() const { return HasExtension(Extension::kId); } +template +bool RtpPacket::IsRegistered() const { + return extensions_.IsRegistered(Extension::kId); +} + template bool RtpPacket::GetExtension(FirstValue first, Values... values) const { auto raw = FindExtension(Extension::kId); diff --git a/modules/rtp_rtcp/source/rtp_packet_to_send.h b/modules/rtp_rtcp/source/rtp_packet_to_send.h index 2411deac49..12341ef6cf 100644 --- a/modules/rtp_rtcp/source/rtp_packet_to_send.h +++ b/modules/rtp_rtcp/source/rtp_packet_to_send.h @@ -59,14 +59,14 @@ class RtpPacketToSend : public RtpPacket { void set_retransmitted_sequence_number(uint16_t sequence_number) { retransmitted_sequence_number_ = sequence_number; } - absl::optional retransmitted_sequence_number() { + absl::optional retransmitted_sequence_number() const { return retransmitted_sequence_number_; } void set_allow_retransmission(bool allow_retransmission) { allow_retransmission_ = allow_retransmission; } - bool allow_retransmission() { return allow_retransmission_; } + bool allow_retransmission() const { return allow_retransmission_; } // An application can attach arbitrary data to an RTP packet using // `additional_data`. The additional data does not affect WebRTC processing. diff --git a/modules/rtp_rtcp/source/rtp_packet_unittest.cc b/modules/rtp_rtcp/source/rtp_packet_unittest.cc index f7f21af41d..8c5df1a0ad 100644 --- a/modules/rtp_rtcp/source/rtp_packet_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_packet_unittest.cc @@ -354,6 +354,35 @@ TEST(RtpPacketTest, CreateWithMaxSizeHeaderExtension) { EXPECT_EQ(read, kValue); } +TEST(RtpPacketTest, SetsRegisteredExtension) { + RtpPacketToSend::ExtensionManager extensions; + extensions.Register(kTransmissionOffsetExtensionId); + RtpPacketToSend packet(&extensions); + + EXPECT_TRUE(packet.IsRegistered()); + EXPECT_FALSE(packet.HasExtension()); + + // Try to set the extensions. + EXPECT_TRUE(packet.SetExtension(kTimeOffset)); + + EXPECT_TRUE(packet.HasExtension()); + EXPECT_EQ(packet.GetExtension(), kTimeOffset); +} + +TEST(RtpPacketTest, FailsToSetUnregisteredExtension) { + RtpPacketToSend::ExtensionManager extensions; + extensions.Register(kTransmissionOffsetExtensionId); + RtpPacketToSend packet(&extensions); + + EXPECT_FALSE(packet.IsRegistered()); + EXPECT_FALSE(packet.HasExtension()); + + EXPECT_FALSE(packet.SetExtension(42)); + + EXPECT_FALSE(packet.HasExtension()); + EXPECT_EQ(packet.GetExtension(), absl::nullopt); +} + TEST(RtpPacketTest, SetReservedExtensionsAfterPayload) { const size_t kPayloadSize = 4; RtpPacketToSend::ExtensionManager extensions; @@ -475,6 +504,76 @@ TEST(RtpPacketTest, ParseWithExtension) { EXPECT_EQ(0u, packet.padding_size()); } +TEST(RtpPacketTest, ParseHeaderOnly) { + // clang-format off + constexpr uint8_t kPaddingHeader[] = { + 0x80, 0x62, 0x35, 0x79, + 0x65, 0x43, 0x12, 0x78, + 0x12, 0x34, 0x56, 0x78}; + // clang-format on + + RtpPacket packet; + EXPECT_TRUE(packet.Parse(rtc::CopyOnWriteBuffer(kPaddingHeader))); + EXPECT_EQ(packet.PayloadType(), 0x62u); + EXPECT_EQ(packet.SequenceNumber(), 0x3579u); + EXPECT_EQ(packet.Timestamp(), 0x65431278u); + EXPECT_EQ(packet.Ssrc(), 0x12345678u); + + EXPECT_FALSE(packet.has_padding()); + EXPECT_EQ(packet.padding_size(), 0u); + EXPECT_EQ(packet.payload_size(), 0u); +} + +TEST(RtpPacketTest, ParseHeaderOnlyWithPadding) { + // clang-format off + constexpr uint8_t kPaddingHeader[] = { + 0xa0, 0x62, 0x35, 0x79, + 0x65, 0x43, 0x12, 0x78, + 0x12, 0x34, 0x56, 0x78}; + // clang-format on + + RtpPacket packet; + EXPECT_TRUE(packet.Parse(rtc::CopyOnWriteBuffer(kPaddingHeader))); + + EXPECT_TRUE(packet.has_padding()); + EXPECT_EQ(packet.padding_size(), 0u); + EXPECT_EQ(packet.payload_size(), 0u); +} + +TEST(RtpPacketTest, ParseHeaderOnlyWithExtensionAndPadding) { + // clang-format off + constexpr uint8_t kPaddingHeader[] = { + 0xb0, 0x62, 0x35, 0x79, + 0x65, 0x43, 0x12, 0x78, + 0x12, 0x34, 0x56, 0x78, + 0xbe, 0xde, 0x00, 0x01, + 0x11, 0x00, 0x00, 0x00}; + // clang-format on + + RtpHeaderExtensionMap extensions; + extensions.Register(1); + RtpPacket packet(&extensions); + EXPECT_TRUE(packet.Parse(rtc::CopyOnWriteBuffer(kPaddingHeader))); + EXPECT_TRUE(packet.has_padding()); + EXPECT_TRUE(packet.HasExtension()); + EXPECT_EQ(packet.padding_size(), 0u); +} + +TEST(RtpPacketTest, ParsePaddingOnlyPacket) { + // clang-format off + constexpr uint8_t kPaddingHeader[] = { + 0xa0, 0x62, 0x35, 0x79, + 0x65, 0x43, 0x12, 0x78, + 0x12, 0x34, 0x56, 0x78, + 0, 0, 3}; + // clang-format on + + RtpPacket packet; + EXPECT_TRUE(packet.Parse(rtc::CopyOnWriteBuffer(kPaddingHeader))); + EXPECT_TRUE(packet.has_padding()); + EXPECT_EQ(packet.padding_size(), 3u); +} + TEST(RtpPacketTest, GetExtensionWithoutParametersReturnsOptionalValue) { RtpPacket::ExtensionManager extensions; extensions.Register(kTransmissionOffsetExtensionId); diff --git a/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.cc b/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.cc new file mode 100644 index 0000000000..3d62bcef44 --- /dev/null +++ b/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.cc @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.h" + +#include + +#include +#include + +namespace webrtc { + +Av1Obu::Av1Obu(uint8_t obu_type) : header_(obu_type | kAv1ObuSizePresentBit) {} + +Av1Obu& Av1Obu::WithExtension(uint8_t extension) { + extension_ = extension; + header_ |= kAv1ObuExtensionPresentBit; + return *this; +} +Av1Obu& Av1Obu::WithoutSize() { + header_ &= ~kAv1ObuSizePresentBit; + return *this; +} +Av1Obu& Av1Obu::WithPayload(std::vector payload) { + payload_ = std::move(payload); + return *this; +} + +std::vector BuildAv1Frame(std::initializer_list obus) { + std::vector raw; + for (const Av1Obu& obu : obus) { + raw.push_back(obu.header_); + if (obu.header_ & kAv1ObuExtensionPresentBit) { + raw.push_back(obu.extension_); + } + if (obu.header_ & kAv1ObuSizePresentBit) { + // write size in leb128 format. + size_t payload_size = obu.payload_.size(); + while (payload_size >= 0x80) { + raw.push_back(0x80 | (payload_size & 0x7F)); + payload_size >>= 7; + } + raw.push_back(payload_size); + } + raw.insert(raw.end(), obu.payload_.begin(), obu.payload_.end()); + } + return raw; +} + +} // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.h b/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.h new file mode 100644 index 0000000000..04a902fe56 --- /dev/null +++ b/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_RTP_RTCP_SOURCE_RTP_PACKETIZER_AV1_TEST_HELPER_H_ +#define MODULES_RTP_RTCP_SOURCE_RTP_PACKETIZER_AV1_TEST_HELPER_H_ + +#include + +#include +#include +#include + +namespace webrtc { +// All obu types offset by 3 to take correct position in the obu_header. +constexpr uint8_t kAv1ObuTypeSequenceHeader = 1 << 3; +constexpr uint8_t kAv1ObuTypeTemporalDelimiter = 2 << 3; +constexpr uint8_t kAv1ObuTypeFrameHeader = 3 << 3; +constexpr uint8_t kAv1ObuTypeTileGroup = 4 << 3; +constexpr uint8_t kAv1ObuTypeMetadata = 5 << 3; +constexpr uint8_t kAv1ObuTypeFrame = 6 << 3; +constexpr uint8_t kAv1ObuTypeTileList = 8 << 3; +constexpr uint8_t kAv1ObuExtensionPresentBit = 0b0'0000'100; +constexpr uint8_t kAv1ObuSizePresentBit = 0b0'0000'010; +constexpr uint8_t kAv1ObuExtensionS1T1 = 0b001'01'000; + +class Av1Obu { + public: + explicit Av1Obu(uint8_t obu_type); + + Av1Obu& WithExtension(uint8_t extension); + Av1Obu& WithoutSize(); + Av1Obu& WithPayload(std::vector payload); + + private: + friend std::vector BuildAv1Frame(std::initializer_list obus); + uint8_t header_; + uint8_t extension_ = 0; + std::vector payload_; +}; + +std::vector BuildAv1Frame(std::initializer_list obus); + +} // namespace webrtc +#endif // MODULES_RTP_RTCP_SOURCE_RTP_PACKETIZER_AV1_TEST_HELPER_H_ diff --git a/modules/rtp_rtcp/source/rtp_packetizer_av1_unittest.cc b/modules/rtp_rtcp/source/rtp_packetizer_av1_unittest.cc index 84d2b35bc6..2151a59295 100644 --- a/modules/rtp_rtcp/source/rtp_packetizer_av1_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_packetizer_av1_unittest.cc @@ -21,6 +21,7 @@ #include "api/scoped_refptr.h" #include "api/video/encoded_image.h" #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" +#include "modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.h" #include "modules/rtp_rtcp/source/video_rtp_depacketizer_av1.h" #include "test/gmock.h" #include "test/gtest.h" @@ -35,17 +36,6 @@ using ::testing::Le; using ::testing::SizeIs; constexpr uint8_t kNewCodedVideoSequenceBit = 0b00'00'1000; -// All obu types offset by 3 to take correct position in the obu_header. -constexpr uint8_t kObuTypeSequenceHeader = 1 << 3; -constexpr uint8_t kObuTypeTemporalDelimiter = 2 << 3; -constexpr uint8_t kObuTypeFrameHeader = 3 << 3; -constexpr uint8_t kObuTypeTileGroup = 4 << 3; -constexpr uint8_t kObuTypeMetadata = 5 << 3; -constexpr uint8_t kObuTypeFrame = 6 << 3; -constexpr uint8_t kObuTypeTileList = 8 << 3; -constexpr uint8_t kObuExtensionPresentBit = 0b0'0000'100; -constexpr uint8_t kObuSizePresentBit = 0b0'0000'010; -constexpr uint8_t kObuExtensionS1T1 = 0b001'01'000; // Wrapper around rtp_packet to make it look like container of payload bytes. struct RtpPayload { @@ -109,135 +99,90 @@ Av1Frame ReassembleFrame(rtc::ArrayView rtp_payloads) { return Av1Frame(VideoRtpDepacketizerAv1().AssembleFrame(payloads)); } -class Obu { - public: - explicit Obu(uint8_t obu_type) : header_(obu_type | kObuSizePresentBit) { - EXPECT_EQ(obu_type & 0b0'1111'000, obu_type); - } - - Obu& WithExtension(uint8_t extension) { - extension_ = extension; - header_ |= kObuExtensionPresentBit; - return *this; - } - Obu& WithoutSize() { - header_ &= ~kObuSizePresentBit; - return *this; - } - Obu& WithPayload(std::vector payload) { - payload_ = std::move(payload); - return *this; - } - - private: - friend std::vector BuildAv1Frame(std::initializer_list obus); - uint8_t header_; - uint8_t extension_ = 0; - std::vector payload_; -}; - -std::vector BuildAv1Frame(std::initializer_list obus) { - std::vector raw; - for (const Obu& obu : obus) { - raw.push_back(obu.header_); - if (obu.header_ & kObuExtensionPresentBit) { - raw.push_back(obu.extension_); - } - if (obu.header_ & kObuSizePresentBit) { - // write size in leb128 format. - size_t payload_size = obu.payload_.size(); - while (payload_size >= 0x80) { - raw.push_back(0x80 | (payload_size & 0x7F)); - payload_size >>= 7; - } - raw.push_back(payload_size); - } - raw.insert(raw.end(), obu.payload_.begin(), obu.payload_.end()); - } - return raw; -} - TEST(RtpPacketizerAv1Test, PacketizeOneObuWithoutSizeAndExtension) { - auto kFrame = BuildAv1Frame( - {Obu(kObuTypeFrame).WithoutSize().WithPayload({1, 2, 3, 4, 5, 6, 7})}); + auto kFrame = BuildAv1Frame({Av1Obu(kAv1ObuTypeFrame) + .WithoutSize() + .WithPayload({1, 2, 3, 4, 5, 6, 7})}); EXPECT_THAT(Packetize(kFrame, {}), ElementsAre(ElementsAre(0b00'01'0000, // aggregation header - kObuTypeFrame, 1, 2, 3, 4, 5, 6, 7))); + kAv1ObuTypeFrame, 1, 2, 3, 4, 5, 6, 7))); } TEST(RtpPacketizerAv1Test, PacketizeOneObuWithoutSizeWithExtension) { - auto kFrame = BuildAv1Frame({Obu(kObuTypeFrame) + auto kFrame = BuildAv1Frame({Av1Obu(kAv1ObuTypeFrame) .WithoutSize() - .WithExtension(kObuExtensionS1T1) + .WithExtension(kAv1ObuExtensionS1T1) .WithPayload({2, 3, 4, 5, 6, 7})}); - EXPECT_THAT(Packetize(kFrame, {}), - ElementsAre(ElementsAre(0b00'01'0000, // aggregation header - kObuTypeFrame | kObuExtensionPresentBit, - kObuExtensionS1T1, 2, 3, 4, 5, 6, 7))); + EXPECT_THAT( + Packetize(kFrame, {}), + ElementsAre(ElementsAre(0b00'01'0000, // aggregation header + kAv1ObuTypeFrame | kAv1ObuExtensionPresentBit, + kAv1ObuExtensionS1T1, 2, 3, 4, 5, 6, 7))); } TEST(RtpPacketizerAv1Test, RemovesObuSizeFieldWithoutExtension) { auto kFrame = BuildAv1Frame( - {Obu(kObuTypeFrame).WithPayload({11, 12, 13, 14, 15, 16, 17})}); + {Av1Obu(kAv1ObuTypeFrame).WithPayload({11, 12, 13, 14, 15, 16, 17})}); EXPECT_THAT( Packetize(kFrame, {}), ElementsAre(ElementsAre(0b00'01'0000, // aggregation header - kObuTypeFrame, 11, 12, 13, 14, 15, 16, 17))); + kAv1ObuTypeFrame, 11, 12, 13, 14, 15, 16, 17))); } TEST(RtpPacketizerAv1Test, RemovesObuSizeFieldWithExtension) { - auto kFrame = BuildAv1Frame({Obu(kObuTypeFrame) - .WithExtension(kObuExtensionS1T1) + auto kFrame = BuildAv1Frame({Av1Obu(kAv1ObuTypeFrame) + .WithExtension(kAv1ObuExtensionS1T1) .WithPayload({1, 2, 3, 4, 5, 6, 7})}); - EXPECT_THAT(Packetize(kFrame, {}), - ElementsAre(ElementsAre(0b00'01'0000, // aggregation header - kObuTypeFrame | kObuExtensionPresentBit, - kObuExtensionS1T1, 1, 2, 3, 4, 5, 6, 7))); + EXPECT_THAT( + Packetize(kFrame, {}), + ElementsAre(ElementsAre(0b00'01'0000, // aggregation header + kAv1ObuTypeFrame | kAv1ObuExtensionPresentBit, + kAv1ObuExtensionS1T1, 1, 2, 3, 4, 5, 6, 7))); } TEST(RtpPacketizerAv1Test, OmitsSizeForLastObuWhenThreeObusFitsIntoThePacket) { auto kFrame = BuildAv1Frame( - {Obu(kObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6}), - Obu(kObuTypeMetadata).WithPayload({11, 12, 13, 14}), - Obu(kObuTypeFrame).WithPayload({21, 22, 23, 24, 25, 26})}); - EXPECT_THAT( - Packetize(kFrame, {}), - ElementsAre(ElementsAre(0b00'11'0000, // aggregation header - 7, kObuTypeSequenceHeader, 1, 2, 3, 4, 5, 6, // - 5, kObuTypeMetadata, 11, 12, 13, 14, // - kObuTypeFrame, 21, 22, 23, 24, 25, 26))); + {Av1Obu(kAv1ObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6}), + Av1Obu(kAv1ObuTypeMetadata).WithPayload({11, 12, 13, 14}), + Av1Obu(kAv1ObuTypeFrame).WithPayload({21, 22, 23, 24, 25, 26})}); + EXPECT_THAT(Packetize(kFrame, {}), + ElementsAre(ElementsAre( + 0b00'11'0000, // aggregation header + 7, kAv1ObuTypeSequenceHeader, 1, 2, 3, 4, 5, 6, // + 5, kAv1ObuTypeMetadata, 11, 12, 13, 14, // + kAv1ObuTypeFrame, 21, 22, 23, 24, 25, 26))); } TEST(RtpPacketizerAv1Test, UseSizeForAllObusWhenFourObusFitsIntoThePacket) { auto kFrame = BuildAv1Frame( - {Obu(kObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6}), - Obu(kObuTypeMetadata).WithPayload({11, 12, 13, 14}), - Obu(kObuTypeFrameHeader).WithPayload({21, 22, 23}), - Obu(kObuTypeTileGroup).WithPayload({31, 32, 33, 34, 35, 36})}); - EXPECT_THAT( - Packetize(kFrame, {}), - ElementsAre(ElementsAre(0b00'00'0000, // aggregation header - 7, kObuTypeSequenceHeader, 1, 2, 3, 4, 5, 6, // - 5, kObuTypeMetadata, 11, 12, 13, 14, // - 4, kObuTypeFrameHeader, 21, 22, 23, // - 7, kObuTypeTileGroup, 31, 32, 33, 34, 35, 36))); + {Av1Obu(kAv1ObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6}), + Av1Obu(kAv1ObuTypeMetadata).WithPayload({11, 12, 13, 14}), + Av1Obu(kAv1ObuTypeFrameHeader).WithPayload({21, 22, 23}), + Av1Obu(kAv1ObuTypeTileGroup).WithPayload({31, 32, 33, 34, 35, 36})}); + EXPECT_THAT(Packetize(kFrame, {}), + ElementsAre(ElementsAre( + 0b00'00'0000, // aggregation header + 7, kAv1ObuTypeSequenceHeader, 1, 2, 3, 4, 5, 6, // + 5, kAv1ObuTypeMetadata, 11, 12, 13, 14, // + 4, kAv1ObuTypeFrameHeader, 21, 22, 23, // + 7, kAv1ObuTypeTileGroup, 31, 32, 33, 34, 35, 36))); } TEST(RtpPacketizerAv1Test, DiscardsTemporalDelimiterAndTileListObu) { auto kFrame = BuildAv1Frame( - {Obu(kObuTypeTemporalDelimiter), Obu(kObuTypeMetadata), - Obu(kObuTypeTileList).WithPayload({1, 2, 3, 4, 5, 6}), - Obu(kObuTypeFrameHeader).WithPayload({21, 22, 23}), - Obu(kObuTypeTileGroup).WithPayload({31, 32, 33, 34, 35, 36})}); + {Av1Obu(kAv1ObuTypeTemporalDelimiter), Av1Obu(kAv1ObuTypeMetadata), + Av1Obu(kAv1ObuTypeTileList).WithPayload({1, 2, 3, 4, 5, 6}), + Av1Obu(kAv1ObuTypeFrameHeader).WithPayload({21, 22, 23}), + Av1Obu(kAv1ObuTypeTileGroup).WithPayload({31, 32, 33, 34, 35, 36})}); EXPECT_THAT( Packetize(kFrame, {}), ElementsAre(ElementsAre(0b00'11'0000, // aggregation header 1, - kObuTypeMetadata, // - 4, kObuTypeFrameHeader, 21, 22, + kAv1ObuTypeMetadata, // + 4, kAv1ObuTypeFrameHeader, 21, 22, 23, // - kObuTypeTileGroup, 31, 32, 33, 34, 35, 36))); + kAv1ObuTypeTileGroup, 31, 32, 33, 34, 35, 36))); } TEST(RtpPacketizerAv1Test, SplitTwoObusIntoTwoPacketForceSplitObuHeader) { @@ -246,17 +191,17 @@ TEST(RtpPacketizerAv1Test, SplitTwoObusIntoTwoPacketForceSplitObuHeader) { const uint8_t kExpectPayload1[6] = { 0b01'10'0000, // aggregation_header 3, - kObuTypeFrameHeader | kObuExtensionPresentBit, - kObuExtensionS1T1, + kAv1ObuTypeFrameHeader | kAv1ObuExtensionPresentBit, + kAv1ObuExtensionS1T1, 21, // - kObuTypeTileGroup | kObuExtensionPresentBit}; + kAv1ObuTypeTileGroup | kAv1ObuExtensionPresentBit}; const uint8_t kExpectPayload2[6] = {0b10'01'0000, // aggregation_header - kObuExtensionS1T1, 11, 12, 13, 14}; - auto kFrame = BuildAv1Frame({Obu(kObuTypeFrameHeader) - .WithExtension(kObuExtensionS1T1) + kAv1ObuExtensionS1T1, 11, 12, 13, 14}; + auto kFrame = BuildAv1Frame({Av1Obu(kAv1ObuTypeFrameHeader) + .WithExtension(kAv1ObuExtensionS1T1) .WithPayload({21}), - Obu(kObuTypeTileGroup) - .WithExtension(kObuExtensionS1T1) + Av1Obu(kAv1ObuTypeTileGroup) + .WithExtension(kAv1ObuExtensionS1T1) .WithPayload({11, 12, 13, 14})}); RtpPacketizer::PayloadSizeLimits limits; @@ -269,7 +214,7 @@ TEST(RtpPacketizerAv1Test, SplitTwoObusIntoTwoPacketForceSplitObuHeader) { TEST(RtpPacketizerAv1Test, SetsNbitAtTheFirstPacketOfAKeyFrameWithSequenceHeader) { auto kFrame = BuildAv1Frame( - {Obu(kObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6, 7})}); + {Av1Obu(kAv1ObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6, 7})}); RtpPacketizer::PayloadSizeLimits limits; limits.max_payload_len = 6; auto packets = Packetize(kFrame, limits, VideoFrameType::kVideoFrameKey); @@ -280,8 +225,8 @@ TEST(RtpPacketizerAv1Test, TEST(RtpPacketizerAv1Test, DoesntSetNbitAtThePacketsOfAKeyFrameWithoutSequenceHeader) { - auto kFrame = - BuildAv1Frame({Obu(kObuTypeFrame).WithPayload({1, 2, 3, 4, 5, 6, 7})}); + auto kFrame = BuildAv1Frame( + {Av1Obu(kAv1ObuTypeFrame).WithPayload({1, 2, 3, 4, 5, 6, 7})}); RtpPacketizer::PayloadSizeLimits limits; limits.max_payload_len = 6; auto packets = Packetize(kFrame, limits, VideoFrameType::kVideoFrameKey); @@ -293,7 +238,7 @@ TEST(RtpPacketizerAv1Test, TEST(RtpPacketizerAv1Test, DoesntSetNbitAtThePacketsOfADeltaFrame) { // Even when that delta frame starts with a (redundant) sequence header. auto kFrame = BuildAv1Frame( - {Obu(kObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6, 7})}); + {Av1Obu(kAv1ObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6, 7})}); RtpPacketizer::PayloadSizeLimits limits; limits.max_payload_len = 6; auto packets = Packetize(kFrame, limits, VideoFrameType::kVideoFrameDelta); @@ -308,8 +253,9 @@ TEST(RtpPacketizerAv1Test, DoesntSetNbitAtThePacketsOfADeltaFrame) { // RtpDepacketizer always inserts obu_size fields in the output, use frame where // each obu has obu_size fields for more streight forward validation. TEST(RtpPacketizerAv1Test, SplitSingleObuIntoTwoPackets) { - auto kFrame = BuildAv1Frame( - {Obu(kObuTypeFrame).WithPayload({11, 12, 13, 14, 15, 16, 17, 18, 19})}); + auto kFrame = + BuildAv1Frame({Av1Obu(kAv1ObuTypeFrame) + .WithPayload({11, 12, 13, 14, 15, 16, 17, 18, 19})}); RtpPacketizer::PayloadSizeLimits limits; limits.max_payload_len = 8; @@ -322,7 +268,7 @@ TEST(RtpPacketizerAv1Test, SplitSingleObuIntoTwoPackets) { TEST(RtpPacketizerAv1Test, SplitSingleObuIntoManyPackets) { auto kFrame = BuildAv1Frame( - {Obu(kObuTypeFrame).WithPayload(std::vector(1200, 27))}); + {Av1Obu(kAv1ObuTypeFrame).WithPayload(std::vector(1200, 27))}); RtpPacketizer::PayloadSizeLimits limits; limits.max_payload_len = 100; @@ -336,7 +282,7 @@ TEST(RtpPacketizerAv1Test, SplitSingleObuIntoManyPackets) { TEST(RtpPacketizerAv1Test, SetMarkerBitForLastPacketInEndOfPictureFrame) { auto kFrame = BuildAv1Frame( - {Obu(kObuTypeFrame).WithPayload(std::vector(200, 27))}); + {Av1Obu(kAv1ObuTypeFrame).WithPayload(std::vector(200, 27))}); RtpPacketizer::PayloadSizeLimits limits; limits.max_payload_len = 100; @@ -350,7 +296,7 @@ TEST(RtpPacketizerAv1Test, SetMarkerBitForLastPacketInEndOfPictureFrame) { TEST(RtpPacketizerAv1Test, DoesntSetMarkerBitForPacketsNotInEndOfPictureFrame) { auto kFrame = BuildAv1Frame( - {Obu(kObuTypeFrame).WithPayload(std::vector(200, 27))}); + {Av1Obu(kAv1ObuTypeFrame).WithPayload(std::vector(200, 27))}); RtpPacketizer::PayloadSizeLimits limits; limits.max_payload_len = 100; @@ -366,8 +312,8 @@ TEST(RtpPacketizerAv1Test, SplitTwoObusIntoTwoPackets) { // 2nd OBU is too large to fit into one packet, so its head would be in the // same packet as the 1st OBU. auto kFrame = BuildAv1Frame( - {Obu(kObuTypeSequenceHeader).WithPayload({11, 12}), - Obu(kObuTypeFrame).WithPayload({1, 2, 3, 4, 5, 6, 7, 8, 9})}); + {Av1Obu(kAv1ObuTypeSequenceHeader).WithPayload({11, 12}), + Av1Obu(kAv1ObuTypeFrame).WithPayload({1, 2, 3, 4, 5, 6, 7, 8, 9})}); RtpPacketizer::PayloadSizeLimits limits; limits.max_payload_len = 8; @@ -380,8 +326,9 @@ TEST(RtpPacketizerAv1Test, SplitTwoObusIntoTwoPackets) { TEST(RtpPacketizerAv1Test, SplitSingleObuIntoTwoPacketsBecauseOfSinglePacketLimit) { - auto kFrame = BuildAv1Frame( - {Obu(kObuTypeFrame).WithPayload({11, 12, 13, 14, 15, 16, 17, 18, 19})}); + auto kFrame = + BuildAv1Frame({Av1Obu(kAv1ObuTypeFrame) + .WithPayload({11, 12, 13, 14, 15, 16, 17, 18, 19})}); RtpPacketizer::PayloadSizeLimits limits; limits.max_payload_len = 10; limits.single_packet_reduction_len = 8; diff --git a/modules/rtp_rtcp/source/rtp_rtcp_config.h b/modules/rtp_rtcp/source/rtp_rtcp_config.h index 6863c4c353..66caadd578 100644 --- a/modules/rtp_rtcp/source/rtp_rtcp_config.h +++ b/modules/rtp_rtcp/source/rtp_rtcp_config.h @@ -11,13 +11,15 @@ #ifndef MODULES_RTP_RTCP_SOURCE_RTP_RTCP_CONFIG_H_ #define MODULES_RTP_RTCP_SOURCE_RTP_RTCP_CONFIG_H_ +#include "api/units/time_delta.h" + // Configuration file for RTP utilities (RTPSender, RTPReceiver ...) namespace webrtc { -enum { kDefaultMaxReorderingThreshold = 50 }; // In sequence numbers. -enum { kRtcpMaxNackFields = 253 }; +constexpr int kDefaultMaxReorderingThreshold = 5; // In sequence numbers. +constexpr int kRtcpMaxNackFields = 253; -enum { RTCP_SEND_BEFORE_KEY_FRAME_MS = 100 }; -enum { RTCP_MAX_REPORT_BLOCKS = 31 }; // RFC 3550 page 37 +constexpr TimeDelta RTCP_SEND_BEFORE_KEY_FRAME = TimeDelta::Millis(100); +constexpr int RTCP_MAX_REPORT_BLOCKS = 31; // RFC 3550 page 37 } // namespace webrtc #endif // MODULES_RTP_RTCP_SOURCE_RTP_RTCP_CONFIG_H_ diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl.cc index 5a79f55d33..3f985e213a 100644 --- a/modules/rtp_rtcp/source/rtp_rtcp_impl.cc +++ b/modules/rtp_rtcp/source/rtp_rtcp_impl.cc @@ -21,7 +21,9 @@ #include "api/transport/field_trial_based_config.h" #include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h" +#include "modules/rtp_rtcp/source/rtcp_sender.h" #include "modules/rtp_rtcp/source/rtp_rtcp_config.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "system_wrappers/include/ntp_time.h" @@ -58,7 +60,8 @@ std::unique_ptr RtpRtcp::DEPRECATED_Create( } ModuleRtpRtcpImpl::ModuleRtpRtcpImpl(const Configuration& configuration) - : rtcp_sender_(configuration), + : rtcp_sender_( + RTCPSender::Configuration::FromRtpRtcpConfiguration(configuration)), rtcp_receiver_(configuration, this), clock_(configuration.clock), last_bitrate_process_time_(clock_->TimeInMilliseconds()), @@ -378,7 +381,16 @@ bool ModuleRtpRtcpImpl::OnSendingRtpFrame(uint32_t timestamp, if (!Sending()) return false; - rtcp_sender_.SetLastRtpTime(timestamp, capture_time_ms, payload_type); + // TODO(bugs.webrtc.org/12873): Migrate this method and it's users to use + // optional Timestamps. + absl::optional capture_time; + if (capture_time_ms > 0) { + capture_time = Timestamp::Millis(capture_time_ms); + } + absl::optional payload_type_optional; + if (payload_type >= 0) + payload_type_optional = payload_type; + rtcp_sender_.SetLastRtpTime(timestamp, capture_time, payload_type_optional); // Make sure an RTCP report isn't queued behind a key frame. if (rtcp_sender_.TimeToSendRTCPReport(force_sender_report)) rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport); @@ -683,6 +695,11 @@ void ModuleRtpRtcpImpl::SetRemoteSSRC(const uint32_t ssrc) { rtcp_receiver_.SetRemoteSSRC(ssrc); } +void ModuleRtpRtcpImpl::SetLocalSsrc(uint32_t local_ssrc) { + rtcp_receiver_.set_local_media_ssrc(local_ssrc); + rtcp_sender_.SetSsrc(local_ssrc); +} + RtpSendRates ModuleRtpRtcpImpl::GetSendRates() const { return rtp_sender_->packet_sender.GetSendRates(); } diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl.h b/modules/rtp_rtcp/source/rtp_rtcp_impl.h index 5bcabc57b1..b0e0b41c48 100644 --- a/modules/rtp_rtcp/source/rtp_rtcp_impl.h +++ b/modules/rtp_rtcp/source/rtp_rtcp_impl.h @@ -63,6 +63,7 @@ class ModuleRtpRtcpImpl : public RtpRtcp, public RTCPReceiver::ModuleRtpRtcp { size_t incoming_packet_length) override; void SetRemoteSSRC(uint32_t ssrc) override; + void SetLocalSsrc(uint32_t ssrc) override; // Sender part. void RegisterSendPayloadFrequency(int payload_type, diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl2.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl2.cc index e526bac659..7fae1e3bd0 100644 --- a/modules/rtp_rtcp/source/rtp_rtcp_impl2.cc +++ b/modules/rtp_rtcp/source/rtp_rtcp_impl2.cc @@ -19,11 +19,17 @@ #include #include +#include "absl/types/optional.h" +#include "api/sequence_checker.h" #include "api/transport/field_trial_based_config.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" #include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h" #include "modules/rtp_rtcp/source/rtp_rtcp_config.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" +#include "rtc_base/task_utils/to_queued_task.h" +#include "rtc_base/time_utils.h" #include "system_wrappers/include/ntp_time.h" #ifdef _WIN32 @@ -33,10 +39,25 @@ namespace webrtc { namespace { -const int64_t kRtpRtcpMaxIdleTimeProcessMs = 5; const int64_t kDefaultExpectedRetransmissionTimeMs = 125; constexpr TimeDelta kRttUpdateInterval = TimeDelta::Millis(1000); + +RTCPSender::Configuration AddRtcpSendEvaluationCallback( + RTCPSender::Configuration config, + std::function send_evaluation_callback) { + config.schedule_next_rtcp_send_evaluation_function = + std::move(send_evaluation_callback); + return config; +} + +int DelayMillisForDuration(TimeDelta duration) { + // TimeDelta::ms() rounds downwards sometimes which leads to too little time + // slept. Account for this, unless |duration| is exactly representable in + // millisecs. + return (duration.us() + rtc::kNumMillisecsPerSec - 1) / + rtc::kNumMicrosecsPerMillisec; +} } // namespace ModuleRtpRtcpImpl2::RtpSenderContext::RtpSenderContext( @@ -55,12 +76,13 @@ void ModuleRtpRtcpImpl2::RtpSenderContext::AssignSequenceNumber( ModuleRtpRtcpImpl2::ModuleRtpRtcpImpl2(const Configuration& configuration) : worker_queue_(TaskQueueBase::Current()), - rtcp_sender_(configuration), + rtcp_sender_(AddRtcpSendEvaluationCallback( + RTCPSender::Configuration::FromRtpRtcpConfiguration(configuration), + [this](TimeDelta duration) { + ScheduleRtcpSendEvaluation(duration); + })), rtcp_receiver_(configuration, this), clock_(configuration.clock), - last_rtt_process_time_(clock_->TimeInMilliseconds()), - next_process_time_(clock_->TimeInMilliseconds() + - kRtpRtcpMaxIdleTimeProcessMs), packet_overhead_(28), // IPV4 UDP. nack_last_time_sent_full_ms_(0), nack_last_seq_number_sent_(0), @@ -68,7 +90,7 @@ ModuleRtpRtcpImpl2::ModuleRtpRtcpImpl2(const Configuration& configuration) rtt_stats_(configuration.rtt_stats), rtt_ms_(0) { RTC_DCHECK(worker_queue_); - process_thread_checker_.Detach(); + packet_sequence_checker_.Detach(); if (!configuration.receiver_only) { rtp_sender_ = std::make_unique(configuration); // Make sure rtcp sender use same timestamp offset as rtp sender. @@ -104,44 +126,6 @@ std::unique_ptr ModuleRtpRtcpImpl2::Create( return std::make_unique(configuration); } -// Returns the number of milliseconds until the module want a worker thread -// to call Process. -int64_t ModuleRtpRtcpImpl2::TimeUntilNextProcess() { - RTC_DCHECK_RUN_ON(&process_thread_checker_); - return std::max(0, - next_process_time_ - clock_->TimeInMilliseconds()); -} - -// Process any pending tasks such as timeouts (non time critical events). -void ModuleRtpRtcpImpl2::Process() { - RTC_DCHECK_RUN_ON(&process_thread_checker_); - - const Timestamp now = clock_->CurrentTime(); - - // TODO(bugs.webrtc.org/11581): Figure out why we need to call Process() 200 - // times a second. - next_process_time_ = now.ms() + kRtpRtcpMaxIdleTimeProcessMs; - - // TODO(bugs.webrtc.org/11581): once we don't use Process() to trigger - // calls to SendRTCP(), the only remaining timer will require remote_bitrate_ - // to be not null. In that case, we can disable the timer when it is null. - if (remote_bitrate_ && rtcp_sender_.Sending() && rtcp_sender_.TMMBR()) { - unsigned int target_bitrate = 0; - std::vector ssrcs; - if (remote_bitrate_->LatestEstimate(&ssrcs, &target_bitrate)) { - if (!ssrcs.empty()) { - target_bitrate = target_bitrate / ssrcs.size(); - } - rtcp_sender_.SetTargetBitrate(target_bitrate); - } - } - - // TODO(bugs.webrtc.org/11581): Run this on a separate set of delayed tasks - // based off of next_time_to_send_rtcp_ in RTCPSender. - if (rtcp_sender_.TimeToSendRTCPReport()) - rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport); -} - void ModuleRtpRtcpImpl2::SetRtxSendStatus(int mode) { rtp_sender_->packet_generator.SetRtxStatus(mode); } @@ -169,6 +153,7 @@ absl::optional ModuleRtpRtcpImpl2::FlexfecSsrc() const { void ModuleRtpRtcpImpl2::IncomingRtcpPacket(const uint8_t* rtcp_packet, const size_t length) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); rtcp_receiver_.IncomingPacket(rtcp_packet, length); } @@ -219,6 +204,12 @@ RtpState ModuleRtpRtcpImpl2::GetRtxState() const { return rtp_sender_->packet_generator.GetRtxRtpState(); } +uint32_t ModuleRtpRtcpImpl2::local_media_ssrc() const { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + RTC_DCHECK_EQ(rtcp_receiver_.local_media_ssrc(), rtcp_sender_.SSRC()); + return rtcp_receiver_.local_media_ssrc(); +} + void ModuleRtpRtcpImpl2::SetRid(const std::string& rid) { if (rtp_sender_) { rtp_sender_->packet_generator.SetRid(rid); @@ -328,7 +319,16 @@ bool ModuleRtpRtcpImpl2::OnSendingRtpFrame(uint32_t timestamp, if (!Sending()) return false; - rtcp_sender_.SetLastRtpTime(timestamp, capture_time_ms, payload_type); + // TODO(bugs.webrtc.org/12873): Migrate this method and it's users to use + // optional Timestamps. + absl::optional capture_time; + if (capture_time_ms > 0) { + capture_time = Timestamp::Millis(capture_time_ms); + } + absl::optional payload_type_optional; + if (payload_type >= 0) + payload_type_optional = payload_type; + rtcp_sender_.SetLastRtpTime(timestamp, capture_time, payload_type_optional); // Make sure an RTCP report isn't queued behind a key frame. if (rtcp_sender_.TimeToSendRTCPReport(force_sender_report)) rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport); @@ -650,8 +650,15 @@ void ModuleRtpRtcpImpl2::SetRemoteSSRC(const uint32_t ssrc) { rtcp_receiver_.SetRemoteSSRC(ssrc); } +void ModuleRtpRtcpImpl2::SetLocalSsrc(uint32_t local_ssrc) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + rtcp_receiver_.set_local_media_ssrc(local_ssrc); + rtcp_sender_.SetSsrc(local_ssrc); +} + RtpSendRates ModuleRtpRtcpImpl2::GetSendRates() const { - RTC_DCHECK_RUN_ON(worker_queue_); + // Typically called on the `rtp_transport_queue_` owned by an + // RtpTransportControllerSendInterface instance. return rtp_sender_->packet_sender.GetSendRates(); } @@ -735,13 +742,62 @@ void ModuleRtpRtcpImpl2::PeriodicUpdate() { rtt_stats_->OnRttUpdate(rtt->ms()); set_rtt_ms(rtt->ms()); } +} + +// RTC_RUN_ON(worker_queue_); +void ModuleRtpRtcpImpl2::MaybeSendRtcp() { + if (rtcp_sender_.TimeToSendRTCPReport()) + rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport); +} + +// TODO(bugs.webrtc.org/12889): Consider removing this function when the issue +// is resolved. +// RTC_RUN_ON(worker_queue_); +void ModuleRtpRtcpImpl2::MaybeSendRtcpAtOrAfterTimestamp( + Timestamp execution_time) { + Timestamp now = clock_->CurrentTime(); + if (now >= execution_time) { + MaybeSendRtcp(); + return; + } + + RTC_DLOG(LS_WARNING) + << "BUGBUG: Task queue scheduled delayed call too early."; + + ScheduleMaybeSendRtcpAtOrAfterTimestamp(execution_time, execution_time - now); +} + +void ModuleRtpRtcpImpl2::ScheduleRtcpSendEvaluation(TimeDelta duration) { + // We end up here under various sequences including the worker queue, and + // the RTCPSender lock is held. + // We're assuming that the fact that RTCPSender executes under other sequences + // than the worker queue on which it's created on implies that external + // synchronization is present and removes this activity before destruction. + if (duration.IsZero()) { + worker_queue_->PostTask(ToQueuedTask(task_safety_, [this] { + RTC_DCHECK_RUN_ON(worker_queue_); + MaybeSendRtcp(); + })); + } else { + Timestamp execution_time = clock_->CurrentTime() + duration; + ScheduleMaybeSendRtcpAtOrAfterTimestamp(execution_time, duration); + } +} - // kTmmbrTimeoutIntervalMs is 25 seconds, so an order of seconds. - // Instead of this polling approach, consider having an optional timer in the - // RTCPReceiver class that is started/stopped based on the state of - // rtcp_sender_.TMMBR(). - if (rtcp_sender_.TMMBR() && rtcp_receiver_.UpdateTmmbrTimers()) - rtcp_receiver_.NotifyTmmbrUpdated(); +void ModuleRtpRtcpImpl2::ScheduleMaybeSendRtcpAtOrAfterTimestamp( + Timestamp execution_time, + TimeDelta duration) { + // We end up here under various sequences including the worker queue, and + // the RTCPSender lock is held. + // See note in ScheduleRtcpSendEvaluation about why |worker_queue_| can be + // accessed. + worker_queue_->PostDelayedTask( + ToQueuedTask(task_safety_, + [this, execution_time] { + RTC_DCHECK_RUN_ON(worker_queue_); + MaybeSendRtcpAtOrAfterTimestamp(execution_time); + }), + DelayMillisForDuration(duration)); } } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl2.h b/modules/rtp_rtcp/source/rtp_rtcp_impl2.h index 00f6ff161d..0ad495593d 100644 --- a/modules/rtp_rtcp/source/rtp_rtcp_impl2.h +++ b/modules/rtp_rtcp/source/rtp_rtcp_impl2.h @@ -23,6 +23,7 @@ #include "api/rtp_headers.h" #include "api/sequence_checker.h" #include "api/task_queue/task_queue_base.h" +#include "api/units/time_delta.h" #include "api/video/video_bitrate_allocation.h" #include "modules/include/module_fec_types.h" #include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h" @@ -32,7 +33,6 @@ #include "modules/rtp_rtcp/source/rtcp_sender.h" #include "modules/rtp_rtcp/source/rtp_packet_history.h" #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" -#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" #include "modules/rtp_rtcp/source/rtp_sender.h" #include "modules/rtp_rtcp/source/rtp_sender_egress.h" #include "rtc_base/gtest_prod_util.h" @@ -40,6 +40,8 @@ #include "rtc_base/system/no_unique_address.h" #include "rtc_base/task_utils/pending_task_safety_flag.h" #include "rtc_base/task_utils/repeating_task.h" +#include "rtc_base/task_utils/to_queued_task.h" +#include "rtc_base/thread_annotations.h" namespace webrtc { @@ -48,7 +50,6 @@ struct PacedPacketInfo; struct RTPVideoHeader; class ModuleRtpRtcpImpl2 final : public RtpRtcpInterface, - public Module, public RTCPReceiver::ModuleRtpRtcp { public: explicit ModuleRtpRtcpImpl2( @@ -62,13 +63,6 @@ class ModuleRtpRtcpImpl2 final : public RtpRtcpInterface, static std::unique_ptr Create( const Configuration& configuration); - // Returns the number of milliseconds until the module want a worker thread to - // call Process. - int64_t TimeUntilNextProcess() override; - - // Process any pending tasks such as timeouts. - void Process() override; - // Receiver part. // Called when we receive an RTCP packet. @@ -77,6 +71,8 @@ class ModuleRtpRtcpImpl2 final : public RtpRtcpInterface, void SetRemoteSSRC(uint32_t ssrc) override; + void SetLocalSsrc(uint32_t local_ssrc) override; + // Sender part. void RegisterSendPayloadFrequency(int payload_type, int payload_frequency) override; @@ -110,6 +106,11 @@ class ModuleRtpRtcpImpl2 final : public RtpRtcpInterface, uint32_t SSRC() const override { return rtcp_sender_.SSRC(); } + // Semantically identical to `SSRC()` but must be called on the packet + // delivery thread/tq and returns the ssrc that maps to + // RtpRtcpInterface::Configuration::local_media_ssrc. + uint32_t local_media_ssrc() const; + void SetRid(const std::string& rid) override; void SetMid(const std::string& mid) override; @@ -193,7 +194,8 @@ class ModuleRtpRtcpImpl2 final : public RtpRtcpInterface, int64_t ExpectedRetransmissionTimeMs() const override; // Force a send of an RTCP packet. - // Normal SR and RR are triggered via the process function. + // Normal SR and RR are triggered via the task queue that's current when this + // object is created. int32_t SendRTCP(RTCPPacketType rtcpPacketType) override; void GetSendStreamDataCounters( @@ -282,18 +284,32 @@ class ModuleRtpRtcpImpl2 final : public RtpRtcpInterface, // Returns true if the module is configured to store packets. bool StorePackets() const; + // Used from RtcpSenderMediator to maybe send rtcp. + void MaybeSendRtcp() RTC_RUN_ON(worker_queue_); + + // Called when |rtcp_sender_| informs of the next RTCP instant. The method may + // be called on various sequences, and is called under a RTCPSenderLock. + void ScheduleRtcpSendEvaluation(TimeDelta duration); + + // Helper method combating too early delayed calls from task queues. + // TODO(bugs.webrtc.org/12889): Consider removing this function when the issue + // is resolved. + void MaybeSendRtcpAtOrAfterTimestamp(Timestamp execution_time) + RTC_RUN_ON(worker_queue_); + + // Schedules a call to MaybeSendRtcpAtOrAfterTimestamp delayed by |duration|. + void ScheduleMaybeSendRtcpAtOrAfterTimestamp(Timestamp execution_time, + TimeDelta duration); + TaskQueueBase* const worker_queue_; - RTC_NO_UNIQUE_ADDRESS SequenceChecker process_thread_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_; std::unique_ptr rtp_sender_; - RTCPSender rtcp_sender_; RTCPReceiver rtcp_receiver_; Clock* const clock_; - int64_t last_rtt_process_time_; - int64_t next_process_time_; uint16_t packet_overhead_; // Send side @@ -308,6 +324,8 @@ class ModuleRtpRtcpImpl2 final : public RtpRtcpInterface, // The processed RTT from RtcpRttStats. mutable Mutex mutex_rtt_; int64_t rtt_ms_ RTC_GUARDED_BY(mutex_rtt_); + + RTC_NO_UNIQUE_ADDRESS ScopedTaskSafety task_safety_; }; } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc index d38a7ffe18..c8ab15de78 100644 --- a/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc @@ -10,18 +10,22 @@ #include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" +#include #include #include #include +#include #include "absl/types/optional.h" #include "api/transport/field_trial_based_config.h" +#include "api/units/time_delta.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtcp_packet.h" #include "modules/rtp_rtcp/source/rtcp_packet/nack.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" #include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" #include "modules/rtp_rtcp/source/rtp_sender_video.h" +#include "rtc_base/logging.h" #include "rtc_base/rate_limiter.h" #include "rtc_base/strings/string_builder.h" #include "test/gmock.h" @@ -42,16 +46,17 @@ using ::testing::SizeIs; namespace webrtc { namespace { -const uint32_t kSenderSsrc = 0x12345; -const uint32_t kReceiverSsrc = 0x23456; -const int64_t kOneWayNetworkDelayMs = 100; -const uint8_t kBaseLayerTid = 0; -const uint8_t kHigherLayerTid = 1; -const uint16_t kSequenceNumber = 100; -const uint8_t kPayloadType = 100; -const int kWidth = 320; -const int kHeight = 100; -const int kCaptureTimeMsToRtpTimestamp = 90; // 90 kHz clock. +constexpr uint32_t kSenderSsrc = 0x12345; +constexpr uint32_t kReceiverSsrc = 0x23456; +constexpr TimeDelta kOneWayNetworkDelay = TimeDelta::Millis(100); +constexpr uint8_t kBaseLayerTid = 0; +constexpr uint8_t kHigherLayerTid = 1; +constexpr uint16_t kSequenceNumber = 100; +constexpr uint8_t kPayloadType = 100; +constexpr int kWidth = 320; +constexpr int kHeight = 100; +constexpr int kCaptureTimeMsToRtpTimestamp = 90; // 90 kHz clock. +constexpr TimeDelta kDefaultReportInterval = TimeDelta::Millis(1000); // RTP header extension ids. enum : int { @@ -70,21 +75,25 @@ class RtcpRttStatsTestImpl : public RtcpRttStats { int64_t rtt_ms_; }; -class SendTransport : public Transport { +// TODO(bugs.webrtc.org/11581): remove inheritance once the ModuleRtpRtcpImpl2 +// Module/ProcessThread dependency is gone. +class SendTransport : public Transport, + public sim_time_impl::SimulatedSequenceRunner { public: - SendTransport() + SendTransport(TimeDelta delay, GlobalSimulatedTimeController* time_controller) : receiver_(nullptr), - time_controller_(nullptr), - delay_ms_(0), + time_controller_(time_controller), + delay_(delay), rtp_packets_sent_(0), rtcp_packets_sent_(0), - last_packet_(&header_extensions_) {} + last_packet_(&header_extensions_) { + time_controller_->Register(this); + } + + ~SendTransport() { time_controller_->Unregister(this); } void SetRtpRtcpModule(ModuleRtpRtcpImpl2* receiver) { receiver_ = receiver; } - void SimulateNetworkDelay(int64_t delay_ms, TimeController* time_controller) { - time_controller_ = time_controller; - delay_ms_ = delay_ms; - } + void SimulateNetworkDelay(TimeDelta delay) { delay_ = delay; } bool SendRtp(const uint8_t* data, size_t len, const PacketOptions& options) override { @@ -96,26 +105,49 @@ class SendTransport : public Transport { test::RtcpPacketParser parser; parser.Parse(data, len); last_nack_list_ = parser.nack()->packet_ids(); - - if (time_controller_) { - time_controller_->AdvanceTime(TimeDelta::Millis(delay_ms_)); - } - EXPECT_TRUE(receiver_); - receiver_->IncomingRtcpPacket(data, len); + Timestamp current_time = time_controller_->GetClock()->CurrentTime(); + Timestamp delivery_time = current_time + delay_; + rtcp_packets_.push_back( + Packet{delivery_time, std::vector(data, data + len)}); ++rtcp_packets_sent_; + RunReady(current_time); return true; } + // sim_time_impl::SimulatedSequenceRunner + Timestamp GetNextRunTime() const override { + if (!rtcp_packets_.empty()) + return rtcp_packets_.front().send_time; + return Timestamp::PlusInfinity(); + } + void RunReady(Timestamp at_time) override { + while (!rtcp_packets_.empty() && + rtcp_packets_.front().send_time <= at_time) { + Packet packet = std::move(rtcp_packets_.front()); + rtcp_packets_.pop_front(); + EXPECT_TRUE(receiver_); + receiver_->IncomingRtcpPacket(packet.data.data(), packet.data.size()); + } + } + TaskQueueBase* GetAsTaskQueue() override { + return reinterpret_cast(this); + } + size_t NumRtcpSent() { return rtcp_packets_sent_; } ModuleRtpRtcpImpl2* receiver_; - TimeController* time_controller_; - int64_t delay_ms_; + GlobalSimulatedTimeController* const time_controller_; + TimeDelta delay_; int rtp_packets_sent_; size_t rtcp_packets_sent_; std::vector last_nack_list_; RtpHeaderExtensionMap header_extensions_; RtpPacketReceived last_packet_; + struct Packet { + Timestamp send_time; + std::vector data; + }; + std::deque rtcp_packets_; }; struct TestConfig { @@ -166,18 +198,19 @@ class RtpRtcpModule : public RtcpPacketTypeCounterObserver, uint32_t ssrc; }; - RtpRtcpModule(TimeController* time_controller, + RtpRtcpModule(GlobalSimulatedTimeController* time_controller, bool is_sender, const FieldTrialConfig& trials) - : is_sender_(is_sender), + : time_controller_(time_controller), + is_sender_(is_sender), trials_(trials), receive_statistics_( ReceiveStatistics::Create(time_controller->GetClock())), - time_controller_(time_controller) { + transport_(kOneWayNetworkDelay, time_controller) { CreateModuleImpl(); - transport_.SimulateNetworkDelay(kOneWayNetworkDelayMs, time_controller); } + TimeController* const time_controller_; const bool is_sender_; const FieldTrialConfig& trials_; RtcpPacketTypeCounter packets_sent_; @@ -186,7 +219,6 @@ class RtpRtcpModule : public RtcpPacketTypeCounterObserver, SendTransport transport_; RtcpRttStatsTestImpl rtt_stats_; std::unique_ptr impl_; - int rtcp_report_interval_ms_ = 0; void RtcpPacketTypesCounterUpdated( uint32_t ssrc, @@ -218,8 +250,8 @@ class RtpRtcpModule : public RtcpPacketTypeCounterObserver, std::vector LastNackListSent() { return transport_.last_nack_list_; } - void SetRtcpReportIntervalAndReset(int rtcp_report_interval_ms) { - rtcp_report_interval_ms_ = rtcp_report_interval_ms; + void SetRtcpReportIntervalAndReset(TimeDelta rtcp_report_interval) { + rtcp_report_interval_ = rtcp_report_interval; CreateModuleImpl(); } const RtpPacketReceived& last_packet() { return transport_.last_packet_; } @@ -228,6 +260,10 @@ class RtpRtcpModule : public RtcpPacketTypeCounterObserver, transport_.header_extensions_.RegisterByUri(id, uri); transport_.last_packet_.IdentifyExtensions(transport_.header_extensions_); } + void ReinintWithFec(VideoFecGenerator* fec_generator) { + fec_generator_ = fec_generator; + CreateModuleImpl(); + } private: void CreateModuleImpl() { @@ -238,21 +274,22 @@ class RtpRtcpModule : public RtcpPacketTypeCounterObserver, config.receive_statistics = receive_statistics_.get(); config.rtcp_packet_type_counter_observer = this; config.rtt_stats = &rtt_stats_; - config.rtcp_report_interval_ms = rtcp_report_interval_ms_; + config.rtcp_report_interval_ms = rtcp_report_interval_.ms(); config.local_media_ssrc = is_sender_ ? kSenderSsrc : kReceiverSsrc; config.need_rtp_packet_infos = true; config.non_sender_rtt_measurement = true; config.field_trials = &trials_; config.send_packet_observer = this; - + config.fec_generator = fec_generator_; impl_.reset(new ModuleRtpRtcpImpl2(config)); impl_->SetRemoteSSRC(is_sender_ ? kReceiverSsrc : kSenderSsrc); impl_->SetRTCPStatus(RtcpMode::kCompound); } - TimeController* const time_controller_; std::map counter_map_; absl::optional last_sent_packet_; + VideoFecGenerator* fec_generator_ = nullptr; + TimeDelta rtcp_report_interval_ = kDefaultReportInterval; }; } // namespace @@ -289,8 +326,27 @@ class RtpRtcpImpl2Test : public ::testing::TestWithParam { receiver_.transport_.SetRtpRtcpModule(sender_.impl_.get()); } - void AdvanceTimeMs(int64_t milliseconds) { - time_controller_.AdvanceTime(TimeDelta::Millis(milliseconds)); + void AdvanceTime(TimeDelta duration) { + time_controller_.AdvanceTime(duration); + } + + void ReinitWithFec(VideoFecGenerator* fec_generator, + absl::optional red_payload_type) { + sender_.ReinintWithFec(fec_generator); + EXPECT_EQ(0, sender_.impl_->SetSendingStatus(true)); + sender_.impl_->SetSendingMediaStatus(true); + sender_.impl_->SetSequenceNumber(kSequenceNumber); + sender_.impl_->SetStorePacketsStatus(true, 100); + receiver_.transport_.SetRtpRtcpModule(sender_.impl_.get()); + + RTPSenderVideo::Config video_config; + video_config.clock = time_controller_.GetClock(); + video_config.rtp_sender = sender_.impl_->RtpSender(); + video_config.field_trials = &field_trials_; + video_config.fec_overhead_bytes = fec_generator->MaxPacketOverhead(); + video_config.fec_type = fec_generator->GetFecType(); + video_config.red_payload_type = red_payload_type; + sender_video_ = std::make_unique(video_config); } GlobalSimulatedTimeController time_controller_; @@ -364,7 +420,7 @@ TEST_P(RtpRtcpImpl2Test, RetransmitsAllLayers) { EXPECT_EQ(kSequenceNumber + 2, sender_.LastRtpSequenceNumber()); // Min required delay until retransmit = 5 + RTT ms (RTT = 0). - AdvanceTimeMs(5); + AdvanceTime(TimeDelta::Millis(5)); // Frame with kBaseLayerTid re-sent. IncomingRtcpNack(&sender_, kSequenceNumber); @@ -392,10 +448,11 @@ TEST_P(RtpRtcpImpl2Test, Rtt) { EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); // Sender module should send an SR. EXPECT_EQ(0, sender_.impl_->SendRTCP(kRtcpReport)); + AdvanceTime(kOneWayNetworkDelay); // Receiver module should send a RR with a response to the last received SR. - AdvanceTimeMs(1000); EXPECT_EQ(0, receiver_.impl_->SendRTCP(kRtcpReport)); + AdvanceTime(kOneWayNetworkDelay); // Verify RTT. int64_t rtt; @@ -404,10 +461,10 @@ TEST_P(RtpRtcpImpl2Test, Rtt) { int64_t max_rtt; EXPECT_EQ( 0, sender_.impl_->RTT(kReceiverSsrc, &rtt, &avg_rtt, &min_rtt, &max_rtt)); - EXPECT_NEAR(2 * kOneWayNetworkDelayMs, rtt, 1); - EXPECT_NEAR(2 * kOneWayNetworkDelayMs, avg_rtt, 1); - EXPECT_NEAR(2 * kOneWayNetworkDelayMs, min_rtt, 1); - EXPECT_NEAR(2 * kOneWayNetworkDelayMs, max_rtt, 1); + EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), rtt, 1); + EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), avg_rtt, 1); + EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), min_rtt, 1); + EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), max_rtt, 1); // No RTT from other ssrc. EXPECT_EQ(-1, sender_.impl_->RTT(kReceiverSsrc + 1, &rtt, &avg_rtt, &min_rtt, @@ -416,11 +473,11 @@ TEST_P(RtpRtcpImpl2Test, Rtt) { // Verify RTT from rtt_stats config. EXPECT_EQ(0, sender_.rtt_stats_.LastProcessedRtt()); EXPECT_EQ(0, sender_.impl_->rtt_ms()); - AdvanceTimeMs(1000); + AdvanceTime(TimeDelta::Millis(1000)); - EXPECT_NEAR(2 * kOneWayNetworkDelayMs, sender_.rtt_stats_.LastProcessedRtt(), - 1); - EXPECT_NEAR(2 * kOneWayNetworkDelayMs, sender_.impl_->rtt_ms(), 1); + EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), + sender_.rtt_stats_.LastProcessedRtt(), 1); + EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), sender_.impl_->rtt_ms(), 1); } TEST_P(RtpRtcpImpl2Test, RttForReceiverOnly) { @@ -428,7 +485,7 @@ TEST_P(RtpRtcpImpl2Test, RttForReceiverOnly) { EXPECT_EQ(0, receiver_.impl_->SendRTCP(kRtcpReport)); // Sender module should send a response to the last received RTRR (DLRR). - AdvanceTimeMs(1000); + AdvanceTime(TimeDelta::Millis(1000)); // Send Frame before sending a SR. EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); EXPECT_EQ(0, sender_.impl_->SendRTCP(kRtcpReport)); @@ -436,29 +493,26 @@ TEST_P(RtpRtcpImpl2Test, RttForReceiverOnly) { // Verify RTT. EXPECT_EQ(0, receiver_.rtt_stats_.LastProcessedRtt()); EXPECT_EQ(0, receiver_.impl_->rtt_ms()); - AdvanceTimeMs(1000); - EXPECT_NEAR(2 * kOneWayNetworkDelayMs, + AdvanceTime(TimeDelta::Millis(1000)); + EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), receiver_.rtt_stats_.LastProcessedRtt(), 1); - EXPECT_NEAR(2 * kOneWayNetworkDelayMs, receiver_.impl_->rtt_ms(), 1); + EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), receiver_.impl_->rtt_ms(), 1); } TEST_P(RtpRtcpImpl2Test, NoSrBeforeMedia) { // Ignore fake transport delays in this test. - sender_.transport_.SimulateNetworkDelay(0, &time_controller_); - receiver_.transport_.SimulateNetworkDelay(0, &time_controller_); - - sender_.impl_->Process(); - EXPECT_EQ(-1, sender_.RtcpSent().first_packet_time_ms); + sender_.transport_.SimulateNetworkDelay(TimeDelta::Millis(0)); + receiver_.transport_.SimulateNetworkDelay(TimeDelta::Millis(0)); + // Move ahead to the instant a rtcp is expected. // Verify no SR is sent before media has been sent, RR should still be sent // from the receiving module though. - AdvanceTimeMs(2000); + AdvanceTime(kDefaultReportInterval / 2); int64_t current_time = time_controller_.GetClock()->TimeInMilliseconds(); - sender_.impl_->Process(); - receiver_.impl_->Process(); EXPECT_EQ(-1, sender_.RtcpSent().first_packet_time_ms); EXPECT_EQ(receiver_.RtcpSent().first_packet_time_ms, current_time); + // RTCP should be triggered by the RTP send. EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); EXPECT_EQ(sender_.RtcpSent().first_packet_time_ms, current_time); } @@ -473,6 +527,7 @@ TEST_P(RtpRtcpImpl2Test, RtcpPacketTypeCounter_Nack) { const uint16_t kNackLength = 1; uint16_t nack_list[kNackLength] = {123}; EXPECT_EQ(0, receiver_.impl_->SendNACK(nack_list, kNackLength)); + AdvanceTime(kOneWayNetworkDelay); EXPECT_EQ(1U, receiver_.RtcpSent().nack_packets); EXPECT_GT(receiver_.RtcpSent().first_packet_time_ms, -1); @@ -561,7 +616,7 @@ TEST_P(RtpRtcpImpl2Test, SendsExtendedNackList) { } TEST_P(RtpRtcpImpl2Test, ReSendsNackListAfterRttMs) { - sender_.transport_.SimulateNetworkDelay(0, &time_controller_); + sender_.transport_.SimulateNetworkDelay(TimeDelta::Millis(0)); // Send module sends a NACK. const uint16_t kNackLength = 2; uint16_t nack_list[kNackLength] = {123, 125}; @@ -573,20 +628,20 @@ TEST_P(RtpRtcpImpl2Test, ReSendsNackListAfterRttMs) { EXPECT_THAT(sender_.LastNackListSent(), ElementsAre(123, 125)); // Same list not re-send, rtt interval has not passed. - const int kStartupRttMs = 100; - AdvanceTimeMs(kStartupRttMs); + const TimeDelta kStartupRtt = TimeDelta::Millis(100); + AdvanceTime(kStartupRtt); EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list, kNackLength)); EXPECT_EQ(1U, sender_.RtcpSent().nack_packets); // Rtt interval passed, full list sent. - AdvanceTimeMs(1); + AdvanceTime(TimeDelta::Millis(1)); EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list, kNackLength)); EXPECT_EQ(2U, sender_.RtcpSent().nack_packets); EXPECT_THAT(sender_.LastNackListSent(), ElementsAre(123, 125)); } TEST_P(RtpRtcpImpl2Test, UniqueNackRequests) { - receiver_.transport_.SimulateNetworkDelay(0, &time_controller_); + receiver_.transport_.SimulateNetworkDelay(TimeDelta::Millis(0)); EXPECT_EQ(0U, receiver_.RtcpSent().nack_packets); EXPECT_EQ(0U, receiver_.RtcpSent().nack_requests); EXPECT_EQ(0U, receiver_.RtcpSent().unique_nack_requests); @@ -608,8 +663,8 @@ TEST_P(RtpRtcpImpl2Test, UniqueNackRequests) { EXPECT_EQ(100, sender_.RtcpReceived().UniqueNackRequestsInPercent()); // Receive module sends new request with duplicated packets. - const int kStartupRttMs = 100; - AdvanceTimeMs(kStartupRttMs + 1); + const TimeDelta kStartupRtt = TimeDelta::Millis(100); + AdvanceTime(kStartupRtt + TimeDelta::Millis(1)); const uint16_t kNackLength2 = 4; uint16_t nack_list2[kNackLength2] = {11, 18, 20, 21}; EXPECT_EQ(0, receiver_.impl_->SendNACK(nack_list2, kNackLength2)); @@ -626,7 +681,7 @@ TEST_P(RtpRtcpImpl2Test, UniqueNackRequests) { } TEST_P(RtpRtcpImpl2Test, ConfigurableRtcpReportInterval) { - const int kVideoReportInterval = 3000; + const TimeDelta kVideoReportInterval = TimeDelta::Millis(3000); // Recreate sender impl with new configuration, and redo setup. sender_.SetRtcpReportIntervalAndReset(kVideoReportInterval); @@ -635,41 +690,34 @@ TEST_P(RtpRtcpImpl2Test, ConfigurableRtcpReportInterval) { EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); // Initial state - sender_.impl_->Process(); EXPECT_EQ(sender_.RtcpSent().first_packet_time_ms, -1); EXPECT_EQ(0u, sender_.transport_.NumRtcpSent()); // Move ahead to the last ms before a rtcp is expected, no action. - AdvanceTimeMs(kVideoReportInterval / 2 - 1); - sender_.impl_->Process(); + AdvanceTime(kVideoReportInterval / 2 - TimeDelta::Millis(1)); EXPECT_EQ(sender_.RtcpSent().first_packet_time_ms, -1); EXPECT_EQ(sender_.transport_.NumRtcpSent(), 0u); // Move ahead to the first rtcp. Send RTCP. - AdvanceTimeMs(1); - sender_.impl_->Process(); + AdvanceTime(TimeDelta::Millis(1)); EXPECT_GT(sender_.RtcpSent().first_packet_time_ms, -1); EXPECT_EQ(sender_.transport_.NumRtcpSent(), 1u); EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); // Move ahead to the last possible second before second rtcp is expected. - AdvanceTimeMs(kVideoReportInterval * 1 / 2 - 1); - sender_.impl_->Process(); + AdvanceTime(kVideoReportInterval * 1 / 2 - TimeDelta::Millis(1)); EXPECT_EQ(sender_.transport_.NumRtcpSent(), 1u); // Move ahead into the range of second rtcp, the second rtcp may be sent. - AdvanceTimeMs(1); - sender_.impl_->Process(); + AdvanceTime(TimeDelta::Millis(1)); EXPECT_GE(sender_.transport_.NumRtcpSent(), 1u); - AdvanceTimeMs(kVideoReportInterval / 2); - sender_.impl_->Process(); + AdvanceTime(kVideoReportInterval / 2); EXPECT_GE(sender_.transport_.NumRtcpSent(), 1u); // Move out the range of second rtcp, the second rtcp must have been sent. - AdvanceTimeMs(kVideoReportInterval / 2); - sender_.impl_->Process(); + AdvanceTime(kVideoReportInterval / 2); EXPECT_EQ(sender_.transport_.NumRtcpSent(), 2u); } @@ -691,7 +739,7 @@ TEST_P(RtpRtcpImpl2Test, StoresPacketInfoForSentPackets) { packet.set_first_packet_of_frame(true); packet.SetMarker(true); sender_.impl_->TrySendPacket(&packet, pacing_info); - AdvanceTimeMs(1); + AdvanceTime(TimeDelta::Millis(1)); std::vector seqno_info = sender_.impl_->GetSentRtpPacketInfos(std::vector{1}); @@ -716,7 +764,7 @@ TEST_P(RtpRtcpImpl2Test, StoresPacketInfoForSentPackets) { packet.SetMarker(true); sender_.impl_->TrySendPacket(&packet, pacing_info); - AdvanceTimeMs(1); + AdvanceTime(TimeDelta::Millis(1)); seqno_info = sender_.impl_->GetSentRtpPacketInfos(std::vector{2, 3, 4}); @@ -746,6 +794,7 @@ TEST_P(RtpRtcpImpl2Test, SenderReportStatsAvailable) { EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); // Send an SR. ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0)); + AdvanceTime(kOneWayNetworkDelay); EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), Not(Eq(absl::nullopt))); } @@ -796,10 +845,12 @@ TEST_P(RtpRtcpImpl2Test, SenderReportStatsCount) { EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); // Send the first SR. ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0)); + AdvanceTime(kOneWayNetworkDelay); EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), Optional(Field(&SenderReportStats::reports_count, Eq(1u)))); // Send the second SR. ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0)); + AdvanceTime(kOneWayNetworkDelay); EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), Optional(Field(&SenderReportStats::reports_count, Eq(2u)))); } @@ -811,6 +862,7 @@ TEST_P(RtpRtcpImpl2Test, SenderReportStatsArrivalTimestampSet) { EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); // Send an SR. ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0)); + AdvanceTime(kOneWayNetworkDelay); auto stats = receiver_.impl_->GetSenderReportStats(); ASSERT_THAT(stats, Not(Eq(absl::nullopt))); EXPECT_TRUE(stats->last_arrival_timestamp.Valid()); @@ -825,9 +877,10 @@ TEST_P(RtpRtcpImpl2Test, SenderReportStatsPacketByteCounters) { ASSERT_THAT(sender_.transport_.rtp_packets_sent_, Gt(0)); // Advance time otherwise the RTCP SR report will not include any packets // generated by `SendFrame()`. - AdvanceTimeMs(1); + AdvanceTime(TimeDelta::Millis(1)); // Send an SR. ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0)); + AdvanceTime(kOneWayNetworkDelay); EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), Optional(AllOf(Field(&SenderReportStats::packets_sent, Gt(0u)), Field(&SenderReportStats::bytes_sent, Gt(0u))))); @@ -935,16 +988,16 @@ TEST_P(RtpRtcpImpl2Test, AssignsTransmissionTimeOffset) { sender_.RegisterHeaderExtension(TransmissionOffset::kUri, kTransmissionOffsetExtensionId); - constexpr int kOffsetMs = 100; + constexpr TimeDelta kOffset = TimeDelta::Millis(100); // Transmission offset is calculated from difference between capture time // and send time. int64_t capture_time_ms = time_controller_.GetClock()->TimeInMilliseconds(); - time_controller_.AdvanceTime(TimeDelta::Millis(kOffsetMs)); + time_controller_.AdvanceTime(kOffset); EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid, /*timestamp=*/0, capture_time_ms)); EXPECT_EQ(sender_.last_packet().GetExtension(), - kOffsetMs * kCaptureTimeMsToRtpTimestamp); + kOffset.ms() * kCaptureTimeMsToRtpTimestamp); } TEST_P(RtpRtcpImpl2Test, PropagatesSentPacketInfo) { @@ -962,6 +1015,67 @@ TEST_P(RtpRtcpImpl2Test, PropagatesSentPacketInfo) { Field(&RtpRtcpModule::SentPacket::ssrc, Eq(kSenderSsrc))))); } +TEST_P(RtpRtcpImpl2Test, GeneratesFlexfec) { + constexpr int kFlexfecPayloadType = 118; + constexpr uint32_t kFlexfecSsrc = 17; + const char kNoMid[] = ""; + const std::vector kNoRtpExtensions; + const std::vector kNoRtpExtensionSizes; + + // Make sure FlexFec sequence numbers start at a different point than media. + const uint16_t fec_start_seq = sender_.impl_->SequenceNumber() + 100; + RtpState start_state; + start_state.sequence_number = fec_start_seq; + FlexfecSender flexfec_sender(kFlexfecPayloadType, kFlexfecSsrc, kSenderSsrc, + kNoMid, kNoRtpExtensions, kNoRtpExtensionSizes, + &start_state, time_controller_.GetClock()); + ReinitWithFec(&flexfec_sender, /*red_payload_type=*/absl::nullopt); + + // Parameters selected to generate a single FEC packet per media packet. + FecProtectionParams params; + params.fec_rate = 15; + params.max_fec_frames = 1; + params.fec_mask_type = kFecMaskRandom; + sender_.impl_->SetFecProtectionParams(params, params); + + // Send a one packet frame, expect one media packet and one FEC packet. + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); + ASSERT_THAT(sender_.transport_.rtp_packets_sent_, Eq(2)); + + const RtpPacketReceived& fec_packet = sender_.last_packet(); + EXPECT_EQ(fec_packet.SequenceNumber(), fec_start_seq); + EXPECT_EQ(fec_packet.Ssrc(), kFlexfecSsrc); + EXPECT_EQ(fec_packet.PayloadType(), kFlexfecPayloadType); +} + +TEST_P(RtpRtcpImpl2Test, GeneratesUlpfec) { + constexpr int kUlpfecPayloadType = 118; + constexpr int kRedPayloadType = 119; + UlpfecGenerator ulpfec_sender(kRedPayloadType, kUlpfecPayloadType, + time_controller_.GetClock()); + ReinitWithFec(&ulpfec_sender, kRedPayloadType); + + // Parameters selected to generate a single FEC packet per media packet. + FecProtectionParams params; + params.fec_rate = 15; + params.max_fec_frames = 1; + params.fec_mask_type = kFecMaskRandom; + sender_.impl_->SetFecProtectionParams(params, params); + + // Send a one packet frame, expect one media packet and one FEC packet. + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); + ASSERT_THAT(sender_.transport_.rtp_packets_sent_, Eq(2)); + + // Ulpfec is sent on the media ssrc, sharing the sequene number series. + const RtpPacketReceived& fec_packet = sender_.last_packet(); + EXPECT_EQ(fec_packet.SequenceNumber(), kSequenceNumber + 1); + EXPECT_EQ(fec_packet.Ssrc(), kSenderSsrc); + // The packets are encapsulated in RED packets, check that and that the RED + // header (first byte of payload) indicates the desired FEC payload type. + EXPECT_EQ(fec_packet.PayloadType(), kRedPayloadType); + EXPECT_EQ(fec_packet.payload()[0], kUlpfecPayloadType); +} + INSTANTIATE_TEST_SUITE_P(WithAndWithoutOverhead, RtpRtcpImpl2Test, ::testing::Values(TestConfig{false}, diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc index cc6b76c121..ac05584e18 100644 --- a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc @@ -72,11 +72,10 @@ class SendTransport : public Transport { bool SendRtp(const uint8_t* data, size_t len, const PacketOptions& options) override { - RTPHeader header; - std::unique_ptr parser(RtpHeaderParser::CreateForTest()); - EXPECT_TRUE(parser->Parse(static_cast(data), len, &header)); + RtpPacket packet; + EXPECT_TRUE(packet.Parse(data, len)); ++rtp_packets_sent_; - last_rtp_header_ = header; + last_rtp_sequence_number_ = packet.SequenceNumber(); return true; } bool SendRtcp(const uint8_t* data, size_t len) override { @@ -98,7 +97,7 @@ class SendTransport : public Transport { int64_t delay_ms_; int rtp_packets_sent_; size_t rtcp_packets_sent_; - RTPHeader last_rtp_header_; + uint16_t last_rtp_sequence_number_; std::vector last_nack_list_; }; @@ -138,7 +137,7 @@ class RtpRtcpModule : public RtcpPacketTypeCounterObserver { } int RtpSent() { return transport_.rtp_packets_sent_; } uint16_t LastRtpSequenceNumber() { - return transport_.last_rtp_header_.sequenceNumber; + return transport_.last_rtp_sequence_number_; } std::vector LastNackListSent() { return transport_.last_nack_list_; diff --git a/modules/rtp_rtcp/source/rtp_rtcp_interface.h b/modules/rtp_rtcp/source/rtp_rtcp_interface.h index 457a993139..dd5744ec54 100644 --- a/modules/rtp_rtcp/source/rtp_rtcp_interface.h +++ b/modules/rtp_rtcp/source/rtp_rtcp_interface.h @@ -180,6 +180,10 @@ class RtpRtcpInterface : public RtcpFeedbackSenderInterface { virtual void SetRemoteSSRC(uint32_t ssrc) = 0; + // Called when the local ssrc changes (post initialization) for receive + // streams to match with send. Called on the packet receive thread/tq. + virtual void SetLocalSsrc(uint32_t ssrc) = 0; + // ************************************************************************** // Sender // ************************************************************************** diff --git a/modules/rtp_rtcp/source/rtp_sender.cc b/modules/rtp_rtcp/source/rtp_sender.cc index 8435e5f33d..80c319f4f2 100644 --- a/modules/rtp_rtcp/source/rtp_sender.cc +++ b/modules/rtp_rtcp/source/rtp_sender.cc @@ -104,6 +104,7 @@ bool IsNonVolatile(RTPExtensionType type) { switch (type) { case kRtpExtensionTransmissionTimeOffset: case kRtpExtensionAudioLevel: + case kRtpExtensionCsrcAudioLevel: case kRtpExtensionAbsoluteSendTime: case kRtpExtensionTransportSequenceNumber: case kRtpExtensionTransportSequenceNumber02: diff --git a/modules/rtp_rtcp/source/rtp_sender_audio.cc b/modules/rtp_rtcp/source/rtp_sender_audio.cc index 8cf60aaecd..4d72211b7c 100644 --- a/modules/rtp_rtcp/source/rtp_sender_audio.cc +++ b/modules/rtp_rtcp/source/rtp_sender_audio.cc @@ -157,7 +157,7 @@ bool RTPSenderAudio::SendAudio(AudioFrameType frame_type, return SendAudio(frame_type, payload_type, rtp_timestamp, payload_data, payload_size, // TODO(bugs.webrtc.org/10739) replace once plumbed. - /*absolute_capture_timestamp_ms=*/0); + /*absolute_capture_timestamp_ms=*/-1); } bool RTPSenderAudio::SendAudio(AudioFrameType frame_type, @@ -277,22 +277,26 @@ bool RTPSenderAudio::SendAudio(AudioFrameType frame_type, packet->SetExtension( frame_type == AudioFrameType::kAudioFrameSpeech, audio_level_dbov); - // Send absolute capture time periodically in order to optimize and save - // network traffic. Missing absolute capture times can be interpolated on the - // receiving end if sending intervals are small enough. - auto absolute_capture_time = absolute_capture_time_sender_.OnSendPacket( - AbsoluteCaptureTimeSender::GetSource(packet->Ssrc(), packet->Csrcs()), - packet->Timestamp(), - // Replace missing value with 0 (invalid frequency), this will trigger - // absolute capture time sending. - encoder_rtp_timestamp_frequency.value_or(0), - Int64MsToUQ32x32(absolute_capture_timestamp_ms + NtpOffsetMs()), - /*estimated_capture_clock_offset=*/ - include_capture_clock_offset_ ? absl::make_optional(0) : absl::nullopt); - if (absolute_capture_time) { - // It also checks that extension was registered during SDP negotiation. If - // not then setter won't do anything. - packet->SetExtension(*absolute_capture_time); + if (absolute_capture_timestamp_ms > 0) { + // Send absolute capture time periodically in order to optimize and save + // network traffic. Missing absolute capture times can be interpolated on + // the receiving end if sending intervals are small enough. + auto absolute_capture_time = absolute_capture_time_sender_.OnSendPacket( + AbsoluteCaptureTimeSender::GetSource(packet->Ssrc(), packet->Csrcs()), + packet->Timestamp(), + // Replace missing value with 0 (invalid frequency), this will trigger + // absolute capture time sending. + encoder_rtp_timestamp_frequency.value_or(0), + Int64MsToUQ32x32(clock_->ConvertTimestampToNtpTimeInMilliseconds( + absolute_capture_timestamp_ms)), + /*estimated_capture_clock_offset=*/ + include_capture_clock_offset_ ? absl::make_optional(0) : absl::nullopt); + if (absolute_capture_time) { + // It also checks that extension was registered during SDP negotiation. If + // not then setter won't do anything. + packet->SetExtension( + *absolute_capture_time); + } } uint8_t* payload = packet->AllocatePayload(payload_size); diff --git a/modules/rtp_rtcp/source/rtp_sender_audio.h b/modules/rtp_rtcp/source/rtp_sender_audio.h index 57b9dd7ce6..6d61facc9a 100644 --- a/modules/rtp_rtcp/source/rtp_sender_audio.h +++ b/modules/rtp_rtcp/source/rtp_sender_audio.h @@ -51,6 +51,8 @@ class RTPSenderAudio { const uint8_t* payload_data, size_t payload_size); + // `absolute_capture_timestamp_ms` and `Clock::CurrentTime` + // should be using the same epoch. bool SendAudio(AudioFrameType frame_type, int8_t payload_type, uint32_t rtp_timestamp, diff --git a/modules/rtp_rtcp/source/rtp_sender_audio_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_audio_unittest.cc index d75f4e8947..0221800ea8 100644 --- a/modules/rtp_rtcp/source/rtp_sender_audio_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_sender_audio_unittest.cc @@ -19,7 +19,6 @@ #include "modules/rtp_rtcp/source/rtp_header_extensions.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" #include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" -#include "modules/rtp_rtcp/source/time_util.h" #include "test/field_trial.h" #include "test/gmock.h" #include "test/gtest.h" @@ -167,8 +166,10 @@ TEST_F(RtpSenderAudioTest, SendAudioWithAbsoluteCaptureTime) { transport_.last_sent_packet() .GetExtension(); EXPECT_TRUE(absolute_capture_time); - EXPECT_EQ(absolute_capture_time->absolute_capture_timestamp, - Int64MsToUQ32x32(kAbsoluteCaptureTimestampMs + NtpOffsetMs())); + EXPECT_EQ( + absolute_capture_time->absolute_capture_timestamp, + Int64MsToUQ32x32(fake_clock_.ConvertTimestampToNtpTimeInMilliseconds( + kAbsoluteCaptureTimestampMs))); EXPECT_FALSE( absolute_capture_time->estimated_capture_clock_offset.has_value()); } @@ -201,8 +202,10 @@ TEST_F(RtpSenderAudioTest, transport_.last_sent_packet() .GetExtension(); EXPECT_TRUE(absolute_capture_time); - EXPECT_EQ(absolute_capture_time->absolute_capture_timestamp, - Int64MsToUQ32x32(kAbsoluteCaptureTimestampMs + NtpOffsetMs())); + EXPECT_EQ( + absolute_capture_time->absolute_capture_timestamp, + Int64MsToUQ32x32(fake_clock_.ConvertTimestampToNtpTimeInMilliseconds( + kAbsoluteCaptureTimestampMs))); EXPECT_TRUE( absolute_capture_time->estimated_capture_clock_offset.has_value()); EXPECT_EQ(0, *absolute_capture_time->estimated_capture_clock_offset); diff --git a/modules/rtp_rtcp/source/rtp_sender_egress.cc b/modules/rtp_rtcp/source/rtp_sender_egress.cc index 55dd9ff075..126b89c8c8 100644 --- a/modules/rtp_rtcp/source/rtp_sender_egress.cc +++ b/modules/rtp_rtcp/source/rtp_sender_egress.cc @@ -142,6 +142,9 @@ void RtpSenderEgress::SendPacket(RtpPacketToSend* packet, RTC_DCHECK(packet->packet_type().has_value()); RTC_DCHECK(HasCorrectSsrc(*packet)); + if (packet->packet_type() == RtpPacketMediaType::kRetransmission) { + RTC_DCHECK(packet->retransmitted_sequence_number().has_value()); + } const uint32_t packet_ssrc = packet->Ssrc(); const int64_t now_ms = clock_->TimeInMilliseconds(); @@ -409,13 +412,34 @@ void RtpSenderEgress::AddPacketToTransportFeedback( } RtpPacketSendInfo packet_info; - packet_info.ssrc = ssrc_; packet_info.transport_sequence_number = packet_id; - packet_info.rtp_sequence_number = packet.SequenceNumber(); packet_info.rtp_timestamp = packet.Timestamp(); packet_info.length = packet_size; packet_info.pacing_info = pacing_info; packet_info.packet_type = packet.packet_type(); + + switch (*packet_info.packet_type) { + case RtpPacketMediaType::kAudio: + case RtpPacketMediaType::kVideo: + packet_info.media_ssrc = ssrc_; + packet_info.rtp_sequence_number = packet.SequenceNumber(); + break; + case RtpPacketMediaType::kRetransmission: + // For retransmissions, we're want to remove the original media packet + // if the rentrasmit arrives - so populate that in the packet info. + packet_info.media_ssrc = ssrc_; + packet_info.rtp_sequence_number = + *packet.retransmitted_sequence_number(); + break; + case RtpPacketMediaType::kPadding: + case RtpPacketMediaType::kForwardErrorCorrection: + // We're not interested in feedback about these packets being received + // or lost. + break; + } + // TODO(bugs.webrtc.org/12713): Remove once downstream usage is gone. + packet_info.ssrc = packet_info.media_ssrc.value_or(0); + transport_feedback_observer_->OnAddPacket(packet_info); } } diff --git a/modules/rtp_rtcp/source/rtp_sender_egress_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_egress_unittest.cc index 8089bd8e6e..4f3990cc3e 100644 --- a/modules/rtp_rtcp/source/rtp_sender_egress_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_sender_egress_unittest.cc @@ -18,6 +18,7 @@ #include "api/units/data_size.h" #include "api/units/timestamp.h" #include "logging/rtc_event_log/mock/mock_rtc_event_log.h" +#include "modules/rtp_rtcp/include/flexfec_sender.h" #include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtp_header_extensions.h" @@ -31,18 +32,24 @@ namespace webrtc { namespace { +using ::testing::_; using ::testing::Field; using ::testing::NiceMock; +using ::testing::Optional; using ::testing::StrictMock; constexpr Timestamp kStartTime = Timestamp::Millis(123456789); constexpr int kDefaultPayloadType = 100; +constexpr int kFlexfectPayloadType = 110; constexpr uint16_t kStartSequenceNumber = 33; constexpr uint32_t kSsrc = 725242; constexpr uint32_t kRtxSsrc = 12345; +constexpr uint32_t kFlexFecSsrc = 23456; enum : int { kTransportSequenceNumberExtensionId = 1, - kVideoTimingExtensionExtensionId, + kAbsoluteSendTimeExtensionId, + kTransmissionOffsetExtensionId, + kVideoTimingExtensionId, }; struct TestConfig { @@ -214,7 +221,7 @@ TEST_P(RtpSenderEgressTest, TransportFeedbackObserverGetsCorrectByteCount) { EXPECT_CALL( feedback_observer_, OnAddPacket(AllOf( - Field(&RtpPacketSendInfo::ssrc, kSsrc), + Field(&RtpPacketSendInfo::media_ssrc, kSsrc), Field(&RtpPacketSendInfo::transport_sequence_number, kTransportSequenceNumber), Field(&RtpPacketSendInfo::rtp_sequence_number, kStartSequenceNumber), @@ -239,6 +246,8 @@ TEST_P(RtpSenderEgressTest, PacketOptionsIsRetransmitSetByPacketType) { std::unique_ptr retransmission = BuildRtpPacket(); retransmission->set_packet_type(RtpPacketMediaType::kRetransmission); + retransmission->set_retransmitted_sequence_number( + media_packet->SequenceNumber()); sender->SendPacket(retransmission.get(), PacedPacketInfo()); EXPECT_TRUE(transport_.last_packet()->options.is_retransmit); } @@ -332,7 +341,7 @@ TEST_P(RtpSenderEgressTest, OnSendSideDelayUpdated) { TEST_P(RtpSenderEgressTest, WritesPacerExitToTimingExtension) { std::unique_ptr sender = CreateRtpSenderEgress(); - header_extensions_.RegisterByUri(kVideoTimingExtensionExtensionId, + header_extensions_.RegisterByUri(kVideoTimingExtensionId, VideoTimingExtension::kUri); std::unique_ptr packet = BuildRtpPacket(); @@ -354,7 +363,7 @@ TEST_P(RtpSenderEgressTest, WritesNetwork2ToTimingExtension) { RtpRtcpInterface::Configuration rtp_config = DefaultConfig(); rtp_config.populate_network2_timestamp = true; auto sender = std::make_unique(rtp_config, &packet_history_); - header_extensions_.RegisterByUri(kVideoTimingExtensionExtensionId, + header_extensions_.RegisterByUri(kVideoTimingExtensionId, VideoTimingExtension::kUri); const uint16_t kPacerExitMs = 1234u; @@ -400,9 +409,571 @@ TEST_P(RtpSenderEgressTest, OnSendPacketNotUpdatedForRetransmits) { std::unique_ptr packet = BuildRtpPacket(); packet->SetExtension(kTransportSequenceNumber); packet->set_packet_type(RtpPacketMediaType::kRetransmission); + packet->set_retransmitted_sequence_number(packet->SequenceNumber()); sender->SendPacket(packet.get(), PacedPacketInfo()); } +TEST_P(RtpSenderEgressTest, ReportsFecRate) { + constexpr int kNumPackets = 10; + constexpr TimeDelta kTimeBetweenPackets = TimeDelta::Millis(33); + + std::unique_ptr sender = CreateRtpSenderEgress(); + DataSize total_fec_data_sent = DataSize::Zero(); + // Send some packets, alternating between media and FEC. + for (size_t i = 0; i < kNumPackets; ++i) { + std::unique_ptr media_packet = BuildRtpPacket(); + media_packet->set_packet_type(RtpPacketMediaType::kVideo); + media_packet->SetPayloadSize(500); + sender->SendPacket(media_packet.get(), PacedPacketInfo()); + + std::unique_ptr fec_packet = BuildRtpPacket(); + fec_packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); + fec_packet->SetPayloadSize(123); + sender->SendPacket(fec_packet.get(), PacedPacketInfo()); + total_fec_data_sent += DataSize::Bytes(fec_packet->size()); + + time_controller_.AdvanceTime(kTimeBetweenPackets); + } + + EXPECT_NEAR( + (sender->GetSendRates()[RtpPacketMediaType::kForwardErrorCorrection]) + .bps(), + (total_fec_data_sent / (kTimeBetweenPackets * kNumPackets)).bps(), 500); +} + +TEST_P(RtpSenderEgressTest, BitrateCallbacks) { + class MockBitrateStaticsObserver : public BitrateStatisticsObserver { + public: + MOCK_METHOD(void, Notify, (uint32_t, uint32_t, uint32_t), (override)); + } observer; + + RtpRtcpInterface::Configuration config = DefaultConfig(); + config.send_bitrate_observer = &observer; + auto sender = std::make_unique(config, &packet_history_); + + // Simulate kNumPackets sent with kPacketInterval intervals, with the + // number of packets selected so that we fill (but don't overflow) the one + // second averaging window. + const TimeDelta kWindowSize = TimeDelta::Seconds(1); + const TimeDelta kPacketInterval = TimeDelta::Millis(20); + const int kNumPackets = (kWindowSize - kPacketInterval) / kPacketInterval; + + DataSize total_data_sent = DataSize::Zero(); + + // Send all but on of the packets, expect a call for each packet but don't + // verify bitrate yet (noisy measurements in the beginning). + for (int i = 0; i < kNumPackets; ++i) { + std::unique_ptr packet = BuildRtpPacket(); + packet->SetPayloadSize(500); + // Mark all packets as retransmissions - will cause total and retransmission + // rates to be equal. + packet->set_packet_type(RtpPacketMediaType::kRetransmission); + packet->set_retransmitted_sequence_number(packet->SequenceNumber()); + total_data_sent += DataSize::Bytes(packet->size()); + + EXPECT_CALL(observer, Notify(_, _, kSsrc)) + .WillOnce([&](uint32_t total_bitrate_bps, + uint32_t retransmission_bitrate_bps, uint32_t /*ssrc*/) { + TimeDelta window_size = i * kPacketInterval + TimeDelta::Millis(1); + // If there is just a single data point, there is no well defined + // averaging window so a bitrate of zero will be reported. + const double expected_bitrate_bps = + i == 0 ? 0.0 : (total_data_sent / window_size).bps(); + EXPECT_NEAR(total_bitrate_bps, expected_bitrate_bps, 500); + EXPECT_NEAR(retransmission_bitrate_bps, expected_bitrate_bps, 500); + }); + + sender->SendPacket(packet.get(), PacedPacketInfo()); + time_controller_.AdvanceTime(kPacketInterval); + } +} + +TEST_P(RtpSenderEgressTest, DoesNotPutNotRetransmittablePacketsInHistory) { + std::unique_ptr sender = CreateRtpSenderEgress(); + packet_history_.SetStorePacketsStatus( + RtpPacketHistory::StorageMode::kStoreAndCull, 10); + + std::unique_ptr packet = BuildRtpPacket(); + packet->set_allow_retransmission(false); + sender->SendPacket(packet.get(), PacedPacketInfo()); + EXPECT_FALSE( + packet_history_.GetPacketState(packet->SequenceNumber()).has_value()); +} + +TEST_P(RtpSenderEgressTest, PutsRetransmittablePacketsInHistory) { + std::unique_ptr sender = CreateRtpSenderEgress(); + packet_history_.SetStorePacketsStatus( + RtpPacketHistory::StorageMode::kStoreAndCull, 10); + + std::unique_ptr packet = BuildRtpPacket(); + packet->set_allow_retransmission(true); + sender->SendPacket(packet.get(), PacedPacketInfo()); + EXPECT_THAT( + packet_history_.GetPacketState(packet->SequenceNumber()), + Optional( + Field(&RtpPacketHistory::PacketState::pending_transmission, false))); +} + +TEST_P(RtpSenderEgressTest, DoesNotPutNonMediaInHistory) { + std::unique_ptr sender = CreateRtpSenderEgress(); + packet_history_.SetStorePacketsStatus( + RtpPacketHistory::StorageMode::kStoreAndCull, 10); + + // Non-media packets, even when marked as retransmittable, are not put into + // the packet history. + std::unique_ptr retransmission = BuildRtpPacket(); + retransmission->set_allow_retransmission(true); + retransmission->set_packet_type(RtpPacketMediaType::kRetransmission); + retransmission->set_retransmitted_sequence_number( + retransmission->SequenceNumber()); + sender->SendPacket(retransmission.get(), PacedPacketInfo()); + EXPECT_FALSE(packet_history_.GetPacketState(retransmission->SequenceNumber()) + .has_value()); + + std::unique_ptr fec = BuildRtpPacket(); + fec->set_allow_retransmission(true); + fec->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); + sender->SendPacket(fec.get(), PacedPacketInfo()); + EXPECT_FALSE( + packet_history_.GetPacketState(fec->SequenceNumber()).has_value()); + + std::unique_ptr padding = BuildRtpPacket(); + padding->set_allow_retransmission(true); + padding->set_packet_type(RtpPacketMediaType::kPadding); + sender->SendPacket(padding.get(), PacedPacketInfo()); + EXPECT_FALSE( + packet_history_.GetPacketState(padding->SequenceNumber()).has_value()); +} + +TEST_P(RtpSenderEgressTest, UpdatesSendStatusOfRetransmittedPackets) { + std::unique_ptr sender = CreateRtpSenderEgress(); + packet_history_.SetStorePacketsStatus( + RtpPacketHistory::StorageMode::kStoreAndCull, 10); + + // Send a packet, putting it in the history. + std::unique_ptr media_packet = BuildRtpPacket(); + media_packet->set_allow_retransmission(true); + sender->SendPacket(media_packet.get(), PacedPacketInfo()); + EXPECT_THAT( + packet_history_.GetPacketState(media_packet->SequenceNumber()), + Optional( + Field(&RtpPacketHistory::PacketState::pending_transmission, false))); + + // Simulate a retransmission, marking the packet as pending. + std::unique_ptr retransmission = + packet_history_.GetPacketAndMarkAsPending(media_packet->SequenceNumber()); + retransmission->set_retransmitted_sequence_number( + media_packet->SequenceNumber()); + retransmission->set_packet_type(RtpPacketMediaType::kRetransmission); + EXPECT_THAT(packet_history_.GetPacketState(media_packet->SequenceNumber()), + Optional(Field( + &RtpPacketHistory::PacketState::pending_transmission, true))); + + // Simulate packet leaving pacer, the packet should be marked as non-pending. + sender->SendPacket(retransmission.get(), PacedPacketInfo()); + EXPECT_THAT( + packet_history_.GetPacketState(media_packet->SequenceNumber()), + Optional( + Field(&RtpPacketHistory::PacketState::pending_transmission, false))); +} + +TEST_P(RtpSenderEgressTest, StreamDataCountersCallbacks) { + std::unique_ptr sender = CreateRtpSenderEgress(); + + const RtpPacketCounter kEmptyCounter; + RtpPacketCounter expected_transmitted_counter; + RtpPacketCounter expected_retransmission_counter; + + // Send a media packet. + std::unique_ptr media_packet = BuildRtpPacket(); + media_packet->SetPayloadSize(6); + expected_transmitted_counter.packets += 1; + expected_transmitted_counter.payload_bytes += media_packet->payload_size(); + expected_transmitted_counter.header_bytes += media_packet->headers_size(); + + EXPECT_CALL( + mock_rtp_stats_callback_, + DataCountersUpdated(AllOf(Field(&StreamDataCounters::transmitted, + expected_transmitted_counter), + Field(&StreamDataCounters::retransmitted, + expected_retransmission_counter), + Field(&StreamDataCounters::fec, kEmptyCounter)), + kSsrc)); + sender->SendPacket(media_packet.get(), PacedPacketInfo()); + time_controller_.AdvanceTime(TimeDelta::Zero()); + + // Send a retransmission. Retransmissions are counted into both transmitted + // and retransmitted packet statistics. + std::unique_ptr retransmission_packet = BuildRtpPacket(); + retransmission_packet->set_packet_type(RtpPacketMediaType::kRetransmission); + retransmission_packet->set_retransmitted_sequence_number( + retransmission_packet->SequenceNumber()); + media_packet->SetPayloadSize(7); + expected_transmitted_counter.packets += 1; + expected_transmitted_counter.payload_bytes += + retransmission_packet->payload_size(); + expected_transmitted_counter.header_bytes += + retransmission_packet->headers_size(); + + expected_retransmission_counter.packets += 1; + expected_retransmission_counter.payload_bytes += + retransmission_packet->payload_size(); + expected_retransmission_counter.header_bytes += + retransmission_packet->headers_size(); + + EXPECT_CALL( + mock_rtp_stats_callback_, + DataCountersUpdated(AllOf(Field(&StreamDataCounters::transmitted, + expected_transmitted_counter), + Field(&StreamDataCounters::retransmitted, + expected_retransmission_counter), + Field(&StreamDataCounters::fec, kEmptyCounter)), + kSsrc)); + sender->SendPacket(retransmission_packet.get(), PacedPacketInfo()); + time_controller_.AdvanceTime(TimeDelta::Zero()); + + // Send a padding packet. + std::unique_ptr padding_packet = BuildRtpPacket(); + padding_packet->set_packet_type(RtpPacketMediaType::kPadding); + padding_packet->SetPadding(224); + expected_transmitted_counter.packets += 1; + expected_transmitted_counter.padding_bytes += padding_packet->padding_size(); + expected_transmitted_counter.header_bytes += padding_packet->headers_size(); + + EXPECT_CALL( + mock_rtp_stats_callback_, + DataCountersUpdated(AllOf(Field(&StreamDataCounters::transmitted, + expected_transmitted_counter), + Field(&StreamDataCounters::retransmitted, + expected_retransmission_counter), + Field(&StreamDataCounters::fec, kEmptyCounter)), + kSsrc)); + sender->SendPacket(padding_packet.get(), PacedPacketInfo()); + time_controller_.AdvanceTime(TimeDelta::Zero()); +} + +TEST_P(RtpSenderEgressTest, StreamDataCountersCallbacksFec) { + std::unique_ptr sender = CreateRtpSenderEgress(); + + const RtpPacketCounter kEmptyCounter; + RtpPacketCounter expected_transmitted_counter; + RtpPacketCounter expected_fec_counter; + + // Send a media packet. + std::unique_ptr media_packet = BuildRtpPacket(); + media_packet->SetPayloadSize(6); + expected_transmitted_counter.packets += 1; + expected_transmitted_counter.payload_bytes += media_packet->payload_size(); + expected_transmitted_counter.header_bytes += media_packet->headers_size(); + + EXPECT_CALL( + mock_rtp_stats_callback_, + DataCountersUpdated( + AllOf(Field(&StreamDataCounters::transmitted, + expected_transmitted_counter), + Field(&StreamDataCounters::retransmitted, kEmptyCounter), + Field(&StreamDataCounters::fec, expected_fec_counter)), + kSsrc)); + sender->SendPacket(media_packet.get(), PacedPacketInfo()); + time_controller_.AdvanceTime(TimeDelta::Zero()); + + // Send and FEC packet. FEC is counted into both transmitted and FEC packet + // statistics. + std::unique_ptr fec_packet = BuildRtpPacket(); + fec_packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); + fec_packet->SetPayloadSize(6); + expected_transmitted_counter.packets += 1; + expected_transmitted_counter.payload_bytes += fec_packet->payload_size(); + expected_transmitted_counter.header_bytes += fec_packet->headers_size(); + + expected_fec_counter.packets += 1; + expected_fec_counter.payload_bytes += fec_packet->payload_size(); + expected_fec_counter.header_bytes += fec_packet->headers_size(); + + EXPECT_CALL( + mock_rtp_stats_callback_, + DataCountersUpdated( + AllOf(Field(&StreamDataCounters::transmitted, + expected_transmitted_counter), + Field(&StreamDataCounters::retransmitted, kEmptyCounter), + Field(&StreamDataCounters::fec, expected_fec_counter)), + kSsrc)); + sender->SendPacket(fec_packet.get(), PacedPacketInfo()); + time_controller_.AdvanceTime(TimeDelta::Zero()); +} + +TEST_P(RtpSenderEgressTest, UpdatesDataCounters) { + std::unique_ptr sender = CreateRtpSenderEgress(); + + const RtpPacketCounter kEmptyCounter; + + // Send a media packet. + std::unique_ptr media_packet = BuildRtpPacket(); + media_packet->SetPayloadSize(6); + sender->SendPacket(media_packet.get(), PacedPacketInfo()); + time_controller_.AdvanceTime(TimeDelta::Zero()); + + // Send an RTX retransmission packet. + std::unique_ptr rtx_packet = BuildRtpPacket(); + rtx_packet->set_packet_type(RtpPacketMediaType::kRetransmission); + rtx_packet->SetSsrc(kRtxSsrc); + rtx_packet->SetPayloadSize(7); + rtx_packet->set_retransmitted_sequence_number(media_packet->SequenceNumber()); + sender->SendPacket(rtx_packet.get(), PacedPacketInfo()); + time_controller_.AdvanceTime(TimeDelta::Zero()); + + StreamDataCounters rtp_stats; + StreamDataCounters rtx_stats; + sender->GetDataCounters(&rtp_stats, &rtx_stats); + + EXPECT_EQ(rtp_stats.transmitted.packets, 1u); + EXPECT_EQ(rtp_stats.transmitted.payload_bytes, media_packet->payload_size()); + EXPECT_EQ(rtp_stats.transmitted.padding_bytes, media_packet->padding_size()); + EXPECT_EQ(rtp_stats.transmitted.header_bytes, media_packet->headers_size()); + EXPECT_EQ(rtp_stats.retransmitted, kEmptyCounter); + EXPECT_EQ(rtp_stats.fec, kEmptyCounter); + + // Retransmissions are counted both into transmitted and retransmitted + // packet counts. + EXPECT_EQ(rtx_stats.transmitted.packets, 1u); + EXPECT_EQ(rtx_stats.transmitted.payload_bytes, rtx_packet->payload_size()); + EXPECT_EQ(rtx_stats.transmitted.padding_bytes, rtx_packet->padding_size()); + EXPECT_EQ(rtx_stats.transmitted.header_bytes, rtx_packet->headers_size()); + EXPECT_EQ(rtx_stats.retransmitted, rtx_stats.transmitted); + EXPECT_EQ(rtx_stats.fec, kEmptyCounter); +} + +TEST_P(RtpSenderEgressTest, SendPacketUpdatesExtensions) { + header_extensions_.RegisterByUri(kVideoTimingExtensionId, + VideoTimingExtension::kUri); + header_extensions_.RegisterByUri(kAbsoluteSendTimeExtensionId, + AbsoluteSendTime::kUri); + header_extensions_.RegisterByUri(kTransmissionOffsetExtensionId, + TransmissionOffset::kUri); + std::unique_ptr sender = CreateRtpSenderEgress(); + + std::unique_ptr packet = BuildRtpPacket(); + packet->set_packetization_finish_time_ms(clock_->TimeInMilliseconds()); + + const int32_t kDiffMs = 10; + time_controller_.AdvanceTime(TimeDelta::Millis(kDiffMs)); + + sender->SendPacket(packet.get(), PacedPacketInfo()); + + RtpPacketReceived received_packet = transport_.last_packet()->packet; + + EXPECT_EQ(received_packet.GetExtension(), kDiffMs * 90); + + EXPECT_EQ(received_packet.GetExtension(), + AbsoluteSendTime::MsTo24Bits(clock_->TimeInMilliseconds())); + + VideoSendTiming timing; + EXPECT_TRUE(received_packet.GetExtension(&timing)); + EXPECT_EQ(timing.pacer_exit_delta_ms, kDiffMs); +} + +TEST_P(RtpSenderEgressTest, SendPacketSetsPacketOptions) { + const uint16_t kPacketId = 42; + std::unique_ptr sender = CreateRtpSenderEgress(); + header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId, + TransportSequenceNumber::kUri); + + std::unique_ptr packet = BuildRtpPacket(); + packet->SetExtension(kPacketId); + EXPECT_CALL(send_packet_observer_, OnSendPacket); + sender->SendPacket(packet.get(), PacedPacketInfo()); + + PacketOptions packet_options = transport_.last_packet()->options; + + EXPECT_EQ(packet_options.packet_id, kPacketId); + EXPECT_TRUE(packet_options.included_in_allocation); + EXPECT_TRUE(packet_options.included_in_feedback); + EXPECT_FALSE(packet_options.is_retransmit); + + // Send another packet as retransmission, verify options are populated. + std::unique_ptr retransmission = BuildRtpPacket(); + retransmission->SetExtension(kPacketId + 1); + retransmission->set_packet_type(RtpPacketMediaType::kRetransmission); + retransmission->set_retransmitted_sequence_number(packet->SequenceNumber()); + sender->SendPacket(retransmission.get(), PacedPacketInfo()); + EXPECT_TRUE(transport_.last_packet()->options.is_retransmit); +} + +TEST_P(RtpSenderEgressTest, SendPacketUpdatesStats) { + const size_t kPayloadSize = 1000; + StrictMock send_side_delay_observer; + + const rtc::ArrayView kNoRtpHeaderExtensionSizes; + FlexfecSender flexfec(kFlexfectPayloadType, kFlexFecSsrc, kSsrc, /*mid=*/"", + /*header_extensions=*/{}, kNoRtpHeaderExtensionSizes, + /*rtp_state=*/nullptr, time_controller_.GetClock()); + RtpRtcpInterface::Configuration config = DefaultConfig(); + config.fec_generator = &flexfec; + config.send_side_delay_observer = &send_side_delay_observer; + auto sender = std::make_unique(config, &packet_history_); + + header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId, + TransportSequenceNumber::kUri); + + const int64_t capture_time_ms = clock_->TimeInMilliseconds(); + + std::unique_ptr video_packet = BuildRtpPacket(); + video_packet->set_packet_type(RtpPacketMediaType::kVideo); + video_packet->SetPayloadSize(kPayloadSize); + video_packet->SetExtension(1); + + std::unique_ptr rtx_packet = BuildRtpPacket(); + rtx_packet->SetSsrc(kRtxSsrc); + rtx_packet->set_packet_type(RtpPacketMediaType::kRetransmission); + rtx_packet->set_retransmitted_sequence_number(video_packet->SequenceNumber()); + rtx_packet->SetPayloadSize(kPayloadSize); + rtx_packet->SetExtension(2); + + std::unique_ptr fec_packet = BuildRtpPacket(); + fec_packet->SetSsrc(kFlexFecSsrc); + fec_packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); + fec_packet->SetPayloadSize(kPayloadSize); + fec_packet->SetExtension(3); + + const int64_t kDiffMs = 25; + time_controller_.AdvanceTime(TimeDelta::Millis(kDiffMs)); + + EXPECT_CALL(send_side_delay_observer, + SendSideDelayUpdated(kDiffMs, kDiffMs, kDiffMs, kSsrc)); + EXPECT_CALL( + send_side_delay_observer, + SendSideDelayUpdated(kDiffMs, kDiffMs, 2 * kDiffMs, kFlexFecSsrc)); + + EXPECT_CALL(send_packet_observer_, OnSendPacket(1, capture_time_ms, kSsrc)); + + sender->SendPacket(video_packet.get(), PacedPacketInfo()); + + // Send packet observer not called for padding/retransmissions. + EXPECT_CALL(send_packet_observer_, OnSendPacket(2, _, _)).Times(0); + sender->SendPacket(rtx_packet.get(), PacedPacketInfo()); + + EXPECT_CALL(send_packet_observer_, + OnSendPacket(3, capture_time_ms, kFlexFecSsrc)); + sender->SendPacket(fec_packet.get(), PacedPacketInfo()); + + time_controller_.AdvanceTime(TimeDelta::Zero()); + StreamDataCounters rtp_stats; + StreamDataCounters rtx_stats; + sender->GetDataCounters(&rtp_stats, &rtx_stats); + EXPECT_EQ(rtp_stats.transmitted.packets, 2u); + EXPECT_EQ(rtp_stats.fec.packets, 1u); + EXPECT_EQ(rtx_stats.retransmitted.packets, 1u); +} + +TEST_P(RtpSenderEgressTest, TransportFeedbackObserverWithRetransmission) { + const uint16_t kTransportSequenceNumber = 17; + header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId, + TransportSequenceNumber::kUri); + std::unique_ptr retransmission = BuildRtpPacket(); + retransmission->set_packet_type(RtpPacketMediaType::kRetransmission); + retransmission->SetExtension( + kTransportSequenceNumber); + uint16_t retransmitted_seq = retransmission->SequenceNumber() - 2; + retransmission->set_retransmitted_sequence_number(retransmitted_seq); + + std::unique_ptr sender = CreateRtpSenderEgress(); + EXPECT_CALL( + feedback_observer_, + OnAddPacket(AllOf( + Field(&RtpPacketSendInfo::media_ssrc, kSsrc), + Field(&RtpPacketSendInfo::rtp_sequence_number, retransmitted_seq), + Field(&RtpPacketSendInfo::transport_sequence_number, + kTransportSequenceNumber)))); + sender->SendPacket(retransmission.get(), PacedPacketInfo()); +} + +TEST_P(RtpSenderEgressTest, TransportFeedbackObserverWithRtxRetransmission) { + const uint16_t kTransportSequenceNumber = 17; + header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId, + TransportSequenceNumber::kUri); + + std::unique_ptr rtx_retransmission = BuildRtpPacket(); + rtx_retransmission->SetSsrc(kRtxSsrc); + rtx_retransmission->SetExtension( + kTransportSequenceNumber); + rtx_retransmission->set_packet_type(RtpPacketMediaType::kRetransmission); + uint16_t rtx_retransmitted_seq = rtx_retransmission->SequenceNumber() - 2; + rtx_retransmission->set_retransmitted_sequence_number(rtx_retransmitted_seq); + + std::unique_ptr sender = CreateRtpSenderEgress(); + EXPECT_CALL( + feedback_observer_, + OnAddPacket(AllOf( + Field(&RtpPacketSendInfo::media_ssrc, kSsrc), + Field(&RtpPacketSendInfo::rtp_sequence_number, rtx_retransmitted_seq), + Field(&RtpPacketSendInfo::transport_sequence_number, + kTransportSequenceNumber)))); + sender->SendPacket(rtx_retransmission.get(), PacedPacketInfo()); +} + +TEST_P(RtpSenderEgressTest, TransportFeedbackObserverPadding) { + const uint16_t kTransportSequenceNumber = 17; + header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId, + TransportSequenceNumber::kUri); + std::unique_ptr padding = BuildRtpPacket(); + padding->SetPadding(224); + padding->set_packet_type(RtpPacketMediaType::kPadding); + padding->SetExtension(kTransportSequenceNumber); + + std::unique_ptr sender = CreateRtpSenderEgress(); + EXPECT_CALL( + feedback_observer_, + OnAddPacket(AllOf(Field(&RtpPacketSendInfo::media_ssrc, absl::nullopt), + Field(&RtpPacketSendInfo::transport_sequence_number, + kTransportSequenceNumber)))); + sender->SendPacket(padding.get(), PacedPacketInfo()); +} + +TEST_P(RtpSenderEgressTest, TransportFeedbackObserverRtxPadding) { + const uint16_t kTransportSequenceNumber = 17; + header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId, + TransportSequenceNumber::kUri); + + std::unique_ptr rtx_padding = BuildRtpPacket(); + rtx_padding->SetPadding(224); + rtx_padding->SetSsrc(kRtxSsrc); + rtx_padding->set_packet_type(RtpPacketMediaType::kPadding); + rtx_padding->SetExtension(kTransportSequenceNumber); + + std::unique_ptr sender = CreateRtpSenderEgress(); + EXPECT_CALL( + feedback_observer_, + OnAddPacket(AllOf(Field(&RtpPacketSendInfo::media_ssrc, absl::nullopt), + Field(&RtpPacketSendInfo::transport_sequence_number, + kTransportSequenceNumber)))); + sender->SendPacket(rtx_padding.get(), PacedPacketInfo()); +} + +TEST_P(RtpSenderEgressTest, TransportFeedbackObserverFec) { + const uint16_t kTransportSequenceNumber = 17; + header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId, + TransportSequenceNumber::kUri); + + std::unique_ptr fec_packet = BuildRtpPacket(); + fec_packet->SetSsrc(kFlexFecSsrc); + fec_packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); + fec_packet->SetExtension(kTransportSequenceNumber); + + const rtc::ArrayView kNoRtpHeaderExtensionSizes; + FlexfecSender flexfec(kFlexfectPayloadType, kFlexFecSsrc, kSsrc, /*mid=*/"", + /*header_extensions=*/{}, kNoRtpHeaderExtensionSizes, + /*rtp_state=*/nullptr, time_controller_.GetClock()); + RtpRtcpInterface::Configuration config = DefaultConfig(); + config.fec_generator = &flexfec; + auto sender = std::make_unique(config, &packet_history_); + EXPECT_CALL( + feedback_observer_, + OnAddPacket(AllOf(Field(&RtpPacketSendInfo::media_ssrc, absl::nullopt), + Field(&RtpPacketSendInfo::transport_sequence_number, + kTransportSequenceNumber)))); + sender->SendPacket(fec_packet.get(), PacedPacketInfo()); +} + INSTANTIATE_TEST_SUITE_P(WithAndWithoutOverhead, RtpSenderEgressTest, ::testing::Values(TestConfig(false), diff --git a/modules/rtp_rtcp/source/rtp_sender_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_unittest.cc index 49fc474ffc..e9be016143 100644 --- a/modules/rtp_rtcp/source/rtp_sender_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_sender_unittest.cc @@ -22,14 +22,12 @@ #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" #include "modules/rtp_rtcp/include/rtp_packet_sender.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" -#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h" #include "modules/rtp_rtcp/source/rtp_format_video_generic.h" #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h" #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h" #include "modules/rtp_rtcp/source/rtp_header_extensions.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" -#include "modules/rtp_rtcp/source/rtp_sender_egress.h" #include "modules/rtp_rtcp/source/rtp_sender_video.h" #include "modules/rtp_rtcp/source/rtp_utility.h" #include "modules/rtp_rtcp/source/video_fec_generator.h" @@ -72,8 +70,6 @@ const uint64_t kStartTime = 123456789; const size_t kMaxPaddingSize = 224u; const uint8_t kPayloadData[] = {47, 11, 32, 93, 89}; const int64_t kDefaultExpectedRetransmissionTimeMs = 125; -const char kNoRid[] = ""; -const char kNoMid[] = ""; const size_t kMaxPaddingLength = 224; // Value taken from rtp_sender.cc. const uint32_t kTimestampTicksPerMs = 90; // 90kHz clock. @@ -82,7 +78,7 @@ using ::testing::AllOf; using ::testing::AtLeast; using ::testing::Contains; using ::testing::Each; -using ::testing::ElementsAreArray; +using ::testing::ElementsAre; using ::testing::Eq; using ::testing::Field; using ::testing::Gt; @@ -93,58 +89,6 @@ using ::testing::Pointee; using ::testing::Property; using ::testing::Return; using ::testing::SizeIs; -using ::testing::StrictMock; - -class LoopbackTransportTest : public webrtc::Transport { - public: - LoopbackTransportTest() : total_bytes_sent_(0) { - receivers_extensions_.Register( - kTransmissionTimeOffsetExtensionId); - receivers_extensions_.Register( - kAbsoluteSendTimeExtensionId); - receivers_extensions_.Register( - kTransportSequenceNumberExtensionId); - receivers_extensions_.Register(kVideoRotationExtensionId); - receivers_extensions_.Register(kAudioLevelExtensionId); - receivers_extensions_.Register( - kVideoTimingExtensionId); - receivers_extensions_.Register(kMidExtensionId); - receivers_extensions_.Register( - kGenericDescriptorId); - receivers_extensions_.Register(kRidExtensionId); - receivers_extensions_.Register( - kRepairedRidExtensionId); - } - - bool SendRtp(const uint8_t* data, - size_t len, - const PacketOptions& options) override { - last_options_ = options; - total_bytes_sent_ += len; - sent_packets_.push_back(RtpPacketReceived(&receivers_extensions_)); - EXPECT_TRUE(sent_packets_.back().Parse(data, len)); - return true; - } - bool SendRtcp(const uint8_t* data, size_t len) override { return false; } - const RtpPacketReceived& last_sent_packet() { return sent_packets_.back(); } - int packets_sent() { return sent_packets_.size(); } - - size_t total_bytes_sent_; - PacketOptions last_options_; - std::vector sent_packets_; - - private: - RtpHeaderExtensionMap receivers_extensions_; -}; - -MATCHER_P(SameRtcEventTypeAs, value, "") { - return value == arg->GetType(); -} - -struct TestConfig { - explicit TestConfig(bool with_overhead) : with_overhead(with_overhead) {} - bool with_overhead = false; -}; class MockRtpPacketPacer : public RtpPacketSender { public: @@ -157,133 +101,11 @@ class MockRtpPacketPacer : public RtpPacketSender { (override)); }; -class MockSendSideDelayObserver : public SendSideDelayObserver { - public: - MOCK_METHOD(void, - SendSideDelayUpdated, - (int, int, uint64_t, uint32_t), - (override)); -}; - -class MockSendPacketObserver : public SendPacketObserver { - public: - MOCK_METHOD(void, OnSendPacket, (uint16_t, int64_t, uint32_t), (override)); -}; - -class MockTransportFeedbackObserver : public TransportFeedbackObserver { - public: - MOCK_METHOD(void, OnAddPacket, (const RtpPacketSendInfo&), (override)); - MOCK_METHOD(void, - OnTransportFeedback, - (const rtcp::TransportFeedback&), - (override)); -}; - -class StreamDataTestCallback : public StreamDataCountersCallback { - public: - StreamDataTestCallback() - : StreamDataCountersCallback(), ssrc_(0), counters_() {} - ~StreamDataTestCallback() override = default; - - void DataCountersUpdated(const StreamDataCounters& counters, - uint32_t ssrc) override { - ssrc_ = ssrc; - counters_ = counters; - } - - uint32_t ssrc_; - StreamDataCounters counters_; - - void MatchPacketCounter(const RtpPacketCounter& expected, - const RtpPacketCounter& actual) { - EXPECT_EQ(expected.payload_bytes, actual.payload_bytes); - EXPECT_EQ(expected.header_bytes, actual.header_bytes); - EXPECT_EQ(expected.padding_bytes, actual.padding_bytes); - EXPECT_EQ(expected.packets, actual.packets); - } - - void Matches(uint32_t ssrc, const StreamDataCounters& counters) { - EXPECT_EQ(ssrc, ssrc_); - MatchPacketCounter(counters.transmitted, counters_.transmitted); - MatchPacketCounter(counters.retransmitted, counters_.retransmitted); - EXPECT_EQ(counters.fec.packets, counters_.fec.packets); - } -}; - -class TaskQueuePacketSender : public RtpPacketSender { - public: - TaskQueuePacketSender(TimeController* time_controller, - std::unique_ptr packet_sender) - : time_controller_(time_controller), - packet_sender_(std::move(packet_sender)), - queue_(time_controller_->CreateTaskQueueFactory()->CreateTaskQueue( - "PacerQueue", - TaskQueueFactory::Priority::NORMAL)) {} - - void EnqueuePackets( - std::vector> packets) override { - queue_->PostTask(ToQueuedTask([sender = packet_sender_.get(), - packets_ = std::move(packets)]() mutable { - sender->EnqueuePackets(std::move(packets_)); - })); - // Trigger task we just enqueued to be executed by updating the simulated - // time controller. - time_controller_->AdvanceTime(TimeDelta::Zero()); - } - - TaskQueueBase* task_queue() const { return queue_.get(); } - - TimeController* const time_controller_; - std::unique_ptr packet_sender_; - std::unique_ptr queue_; -}; - -// Mimics ModuleRtpRtcp::RtpSenderContext. -// TODO(sprang): Split up unit tests and test these components individually -// wherever possible. -struct RtpSenderContext : public SequenceNumberAssigner { - RtpSenderContext(const RtpRtcpInterface::Configuration& config, - TimeController* time_controller) - : time_controller_(time_controller), - packet_history_(config.clock, config.enable_rtx_padding_prioritization), - packet_sender_(config, &packet_history_), - pacer_(time_controller, - std::make_unique( - &packet_sender_, - this)), - packet_generator_(config, - &packet_history_, - config.paced_sender ? config.paced_sender : &pacer_) { - } - void AssignSequenceNumber(RtpPacketToSend* packet) override { - packet_generator_.AssignSequenceNumber(packet); - } - // Inject packet straight into RtpSenderEgress without passing through the - // pacer, but while still running on the pacer task queue. - void InjectPacket(std::unique_ptr packet, - const PacedPacketInfo& packet_info) { - pacer_.task_queue()->PostTask( - ToQueuedTask([sender_ = &packet_sender_, packet_ = std::move(packet), - packet_info]() mutable { - sender_->SendPacket(packet_.get(), packet_info); - })); - time_controller_->AdvanceTime(TimeDelta::Zero()); - } - TimeController* time_controller_; - RtpPacketHistory packet_history_; - RtpSenderEgress packet_sender_; - TaskQueuePacketSender pacer_; - RTPSender packet_generator_; -}; - class FieldTrialConfig : public WebRtcKeyValueConfig { public: - FieldTrialConfig() - : overhead_enabled_(false), - max_padding_factor_(1200) {} + FieldTrialConfig() : max_padding_factor_(1200) {} ~FieldTrialConfig() override {} - void SetOverHeadEnabled(bool enabled) { overhead_enabled_ = enabled; } void SetMaxPaddingFactor(double factor) { max_padding_factor_ = factor; } std::string Lookup(absl::string_view key) const override { @@ -292,20 +114,17 @@ class FieldTrialConfig : public WebRtcKeyValueConfig { rtc::SimpleStringBuilder ssb(string_buf); ssb << "factor:" << max_padding_factor_; return ssb.str(); - } else if (key == "WebRTC-SendSideBwe-WithOverhead") { - return overhead_enabled_ ? "Enabled" : "Disabled"; } return ""; } private: - bool overhead_enabled_; double max_padding_factor_; }; } // namespace -class RtpSenderTest : public ::testing::TestWithParam { +class RtpSenderTest : public ::testing::Test { protected: RtpSenderTest() : time_controller_(Timestamp::Millis(kStartTime)), @@ -320,80 +139,65 @@ class RtpSenderTest : public ::testing::TestWithParam { nullptr, clock_), kMarkerBit(true) { - field_trials_.SetOverHeadEnabled(GetParam().with_overhead); } - void SetUp() override { SetUpRtpSender(true, false, false); } + void SetUp() override { SetUpRtpSender(true, false, nullptr); } - RTPSender* rtp_sender() { - RTC_DCHECK(rtp_sender_context_); - return &rtp_sender_context_->packet_generator_; - } - - RtpSenderEgress* rtp_egress() { - RTC_DCHECK(rtp_sender_context_); - return &rtp_sender_context_->packet_sender_; - } - - void SetUpRtpSender(bool pacer, - bool populate_network2, - bool always_send_mid_and_rid) { - SetUpRtpSender(pacer, populate_network2, always_send_mid_and_rid, - &flexfec_sender_); - } - - void SetUpRtpSender(bool pacer, - bool populate_network2, + void SetUpRtpSender(bool populate_network2, bool always_send_mid_and_rid, VideoFecGenerator* fec_generator) { + RtpRtcpInterface::Configuration config = GetDefaultConfig(); + config.fec_generator = fec_generator; + config.populate_network2_timestamp = populate_network2; + config.always_send_mid_and_rid = always_send_mid_and_rid; + CreateSender(config); + } + + RtpRtcpInterface::Configuration GetDefaultConfig() { RtpRtcpInterface::Configuration config; config.clock = clock_; - config.outgoing_transport = &transport_; config.local_media_ssrc = kSsrc; config.rtx_send_ssrc = kRtxSsrc; - config.fec_generator = fec_generator; config.event_log = &mock_rtc_event_log_; - config.send_packet_observer = &send_packet_observer_; config.retransmission_rate_limiter = &retransmission_rate_limiter_; - config.paced_sender = pacer ? &mock_paced_sender_ : nullptr; - config.populate_network2_timestamp = populate_network2; - config.rtp_stats_callback = &rtp_stats_callback_; - config.always_send_mid_and_rid = always_send_mid_and_rid; + config.paced_sender = &mock_paced_sender_; config.field_trials = &field_trials_; + return config; + } - rtp_sender_context_ = - std::make_unique(config, &time_controller_); - rtp_sender()->SetSequenceNumber(kSeqNum); - rtp_sender()->SetTimestampOffset(0); + void CreateSender(const RtpRtcpInterface::Configuration& config) { + packet_history_ = std::make_unique( + config.clock, config.enable_rtx_padding_prioritization); + rtp_sender_ = std::make_unique(config, packet_history_.get(), + config.paced_sender); + rtp_sender_->SetSequenceNumber(kSeqNum); + rtp_sender_->SetTimestampOffset(0); } GlobalSimulatedTimeController time_controller_; Clock* const clock_; NiceMock mock_rtc_event_log_; MockRtpPacketPacer mock_paced_sender_; - StrictMock send_packet_observer_; - StrictMock feedback_observer_; RateLimiter retransmission_rate_limiter_; FlexfecSender flexfec_sender_; - std::unique_ptr rtp_sender_context_; + std::unique_ptr packet_history_; + std::unique_ptr rtp_sender_; - LoopbackTransportTest transport_; const bool kMarkerBit; FieldTrialConfig field_trials_; - StreamDataTestCallback rtp_stats_callback_; std::unique_ptr BuildRtpPacket(int payload_type, bool marker_bit, uint32_t timestamp, int64_t capture_time_ms) { - auto packet = rtp_sender()->AllocatePacket(); + auto packet = rtp_sender_->AllocatePacket(); packet->SetPayloadType(payload_type); packet->set_packet_type(RtpPacketMediaType::kVideo); packet->SetMarker(marker_bit); packet->SetTimestamp(timestamp); packet->set_capture_time_ms(capture_time_ms); - EXPECT_TRUE(rtp_sender()->AssignSequenceNumber(packet.get())); + EXPECT_TRUE(rtp_sender_->AssignSequenceNumber(packet.get())); return packet; } @@ -406,8 +210,8 @@ class RtpSenderTest : public ::testing::TestWithParam { packet->set_allow_retransmission(true); // Packet should be stored in a send bucket. - EXPECT_TRUE(rtp_sender()->SendToNetwork( - std::make_unique(*packet))); + EXPECT_TRUE( + rtp_sender_->SendToNetwork(std::make_unique(*packet))); return packet; } @@ -417,16 +221,15 @@ class RtpSenderTest : public ::testing::TestWithParam { // because of lack of capacity for the media packet, or for an rtx packet // containing the media packet. return SendPacket(kCaptureTimeMs, - /*payload_length=*/rtp_sender()->MaxRtpPacketSize() - - rtp_sender()->ExpectedPerPacketOverhead()); + /*payload_length=*/rtp_sender_->MaxRtpPacketSize() - + rtp_sender_->ExpectedPerPacketOverhead()); } size_t GenerateAndSendPadding(size_t target_size_bytes) { size_t generated_bytes = 0; - for (auto& packet : - rtp_sender()->GeneratePadding(target_size_bytes, true)) { + for (auto& packet : rtp_sender_->GeneratePadding(target_size_bytes, true)) { generated_bytes += packet->payload_size() + packet->padding_size(); - rtp_sender_context_->InjectPacket(std::move(packet), PacedPacketInfo()); + rtp_sender_->SendToNetwork(std::move(packet)); } return generated_bytes; } @@ -439,64 +242,56 @@ class RtpSenderTest : public ::testing::TestWithParam { // RTX needs to be able to read the source packets from the packet store. // Pick a number of packets to store big enough for any unit test. constexpr uint16_t kNumberOfPacketsToStore = 100; - rtp_sender_context_->packet_history_.SetStorePacketsStatus( + packet_history_->SetStorePacketsStatus( RtpPacketHistory::StorageMode::kStoreAndCull, kNumberOfPacketsToStore); - rtp_sender()->SetRtxPayloadType(kRtxPayload, kPayload); - rtp_sender()->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); + rtp_sender_->SetRtxPayloadType(kRtxPayload, kPayload); + rtp_sender_->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); } // Enable sending of the MID header extension for both the primary SSRC and // the RTX SSRC. void EnableMidSending(const std::string& mid) { - rtp_sender()->RegisterRtpHeaderExtension(RtpMid::kUri, kMidExtensionId); - rtp_sender()->SetMid(mid); + rtp_sender_->RegisterRtpHeaderExtension(RtpMid::kUri, kMidExtensionId); + rtp_sender_->SetMid(mid); } // Enable sending of the RSID header extension for the primary SSRC and the // RRSID header extension for the RTX SSRC. void EnableRidSending(const std::string& rid) { - rtp_sender()->RegisterRtpHeaderExtension(RtpStreamId::kUri, - kRidExtensionId); - rtp_sender()->RegisterRtpHeaderExtension(RepairedRtpStreamId::kUri, - kRepairedRidExtensionId); - rtp_sender()->SetRid(rid); + rtp_sender_->RegisterRtpHeaderExtension(RtpStreamId::kUri, kRidExtensionId); + rtp_sender_->RegisterRtpHeaderExtension(RepairedRtpStreamId::kUri, + kRepairedRidExtensionId); + rtp_sender_->SetRid(rid); } }; -// TODO(pbos): Move tests over from WithoutPacer to RtpSenderTest as this is our -// default code path. -class RtpSenderTestWithoutPacer : public RtpSenderTest { - public: - void SetUp() override { SetUpRtpSender(false, false, false); } -}; - -TEST_P(RtpSenderTestWithoutPacer, AllocatePacketSetCsrc) { +TEST_F(RtpSenderTest, AllocatePacketSetCsrc) { // Configure rtp_sender with csrc. std::vector csrcs; csrcs.push_back(0x23456789); - rtp_sender()->SetCsrcs(csrcs); + rtp_sender_->SetCsrcs(csrcs); - auto packet = rtp_sender()->AllocatePacket(); + auto packet = rtp_sender_->AllocatePacket(); ASSERT_TRUE(packet); - EXPECT_EQ(rtp_sender()->SSRC(), packet->Ssrc()); + EXPECT_EQ(rtp_sender_->SSRC(), packet->Ssrc()); EXPECT_EQ(csrcs, packet->Csrcs()); } -TEST_P(RtpSenderTestWithoutPacer, AllocatePacketReserveExtensions) { +TEST_F(RtpSenderTest, AllocatePacketReserveExtensions) { // Configure rtp_sender with extensions. - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( TransmissionOffset::kUri, kTransmissionTimeOffsetExtensionId)); - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( AbsoluteSendTime::kUri, kAbsoluteSendTimeExtensionId)); - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension(AudioLevel::kUri, - kAudioLevelExtensionId)); - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension(AudioLevel::kUri, + kAudioLevelExtensionId)); + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( TransportSequenceNumber::kUri, kTransportSequenceNumberExtensionId)); - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( VideoOrientation::kUri, kVideoRotationExtensionId)); - auto packet = rtp_sender()->AllocatePacket(); + auto packet = rtp_sender_->AllocatePacket(); ASSERT_TRUE(packet); // Preallocate BWE extensions RtpSender set itself. @@ -508,76 +303,74 @@ TEST_P(RtpSenderTestWithoutPacer, AllocatePacketReserveExtensions) { EXPECT_FALSE(packet->HasExtension()); } -TEST_P(RtpSenderTest, PaddingAlwaysAllowedOnAudio) { - MockTransport transport; - RtpRtcpInterface::Configuration config; +TEST_F(RtpSenderTest, PaddingAlwaysAllowedOnAudio) { + RtpRtcpInterface::Configuration config = GetDefaultConfig(); config.audio = true; - config.clock = clock_; - config.outgoing_transport = &transport; - config.paced_sender = &mock_paced_sender_; - config.local_media_ssrc = kSsrc; - config.event_log = &mock_rtc_event_log_; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - rtp_sender_context_ = - std::make_unique(config, &time_controller_); - - rtp_sender()->SetTimestampOffset(0); - - std::unique_ptr audio_packet = - rtp_sender()->AllocatePacket(); + CreateSender(config); + + std::unique_ptr audio_packet = rtp_sender_->AllocatePacket(); // Padding on audio stream allowed regardless of marker in the last packet. audio_packet->SetMarker(false); audio_packet->SetPayloadType(kPayload); - rtp_sender()->AssignSequenceNumber(audio_packet.get()); + rtp_sender_->AssignSequenceNumber(audio_packet.get()); const size_t kPaddingSize = 59; - EXPECT_CALL(transport, SendRtp(_, kPaddingSize + kRtpHeaderSize, _)) - .WillOnce(Return(true)); + + EXPECT_CALL( + mock_paced_sender_, + EnqueuePackets(ElementsAre(AllOf( + Pointee(Property(&RtpPacketToSend::packet_type, + RtpPacketMediaType::kPadding)), + Pointee(Property(&RtpPacketToSend::padding_size, kPaddingSize)))))); EXPECT_EQ(kPaddingSize, GenerateAndSendPadding(kPaddingSize)); // Requested padding size is too small, will send a larger one. const size_t kMinPaddingSize = 50; - EXPECT_CALL(transport, SendRtp(_, kMinPaddingSize + kRtpHeaderSize, _)) - .WillOnce(Return(true)); + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre( + AllOf(Pointee(Property(&RtpPacketToSend::packet_type, + RtpPacketMediaType::kPadding)), + Pointee(Property(&RtpPacketToSend::padding_size, + kMinPaddingSize)))))); EXPECT_EQ(kMinPaddingSize, GenerateAndSendPadding(kMinPaddingSize - 5)); } -TEST_P(RtpSenderTest, SendToNetworkForwardsPacketsToPacer) { +TEST_F(RtpSenderTest, SendToNetworkForwardsPacketsToPacer) { auto packet = BuildRtpPacket(kPayload, kMarkerBit, kTimestamp, 0); int64_t now_ms = clock_->TimeInMilliseconds(); EXPECT_CALL( mock_paced_sender_, - EnqueuePackets(Contains(AllOf( + EnqueuePackets(ElementsAre(AllOf( Pointee(Property(&RtpPacketToSend::Ssrc, kSsrc)), Pointee(Property(&RtpPacketToSend::SequenceNumber, kSeqNum)), Pointee(Property(&RtpPacketToSend::capture_time_ms, now_ms)))))); EXPECT_TRUE( - rtp_sender()->SendToNetwork(std::make_unique(*packet))); + rtp_sender_->SendToNetwork(std::make_unique(*packet))); } -TEST_P(RtpSenderTest, ReSendPacketForwardsPacketsToPacer) { - rtp_sender_context_->packet_history_.SetStorePacketsStatus( +TEST_F(RtpSenderTest, ReSendPacketForwardsPacketsToPacer) { + packet_history_->SetStorePacketsStatus( RtpPacketHistory::StorageMode::kStoreAndCull, 10); int64_t now_ms = clock_->TimeInMilliseconds(); auto packet = BuildRtpPacket(kPayload, kMarkerBit, kTimestamp, now_ms); uint16_t seq_no = packet->SequenceNumber(); packet->set_allow_retransmission(true); - rtp_sender_context_->packet_history_.PutRtpPacket(std::move(packet), now_ms); + packet_history_->PutRtpPacket(std::move(packet), now_ms); EXPECT_CALL(mock_paced_sender_, - EnqueuePackets(Contains(AllOf( + EnqueuePackets(ElementsAre(AllOf( Pointee(Property(&RtpPacketToSend::Ssrc, kSsrc)), Pointee(Property(&RtpPacketToSend::SequenceNumber, kSeqNum)), Pointee(Property(&RtpPacketToSend::capture_time_ms, now_ms)), Pointee(Property(&RtpPacketToSend::packet_type, RtpPacketMediaType::kRetransmission)))))); - EXPECT_TRUE(rtp_sender()->ReSendPacket(seq_no)); + EXPECT_TRUE(rtp_sender_->ReSendPacket(seq_no)); } // This test sends 1 regular video packet, then 4 padding packets, and then // 1 more regular packet. -TEST_P(RtpSenderTest, SendPadding) { +TEST_F(RtpSenderTest, SendPadding) { constexpr int kNumPaddingPackets = 4; EXPECT_CALL(mock_paced_sender_, EnqueuePackets); std::unique_ptr media_packet = @@ -593,7 +386,7 @@ TEST_P(RtpSenderTest, SendPadding) { // number range. Size will be forced to full pack size and the timestamp // shall be that of the last media packet. EXPECT_CALL(mock_paced_sender_, - EnqueuePackets(Contains(AllOf( + EnqueuePackets(ElementsAre(AllOf( Pointee(Property(&RtpPacketToSend::Ssrc, kSsrc)), Pointee(Property(&RtpPacketToSend::SequenceNumber, media_packet->SequenceNumber() + i + 1)), @@ -602,15 +395,15 @@ TEST_P(RtpSenderTest, SendPadding) { Pointee(Property(&RtpPacketToSend::Timestamp, media_packet->Timestamp())))))); std::vector> padding_packets = - rtp_sender()->GeneratePadding(kPaddingTargetBytes, - /*media_has_been_sent=*/true); + rtp_sender_->GeneratePadding(kPaddingTargetBytes, + /*media_has_been_sent=*/true); ASSERT_THAT(padding_packets, SizeIs(1)); - rtp_sender()->SendToNetwork(std::move(padding_packets[0])); + rtp_sender_->SendToNetwork(std::move(padding_packets[0])); } // Send a regular video packet again. EXPECT_CALL(mock_paced_sender_, - EnqueuePackets(Contains(AllOf( + EnqueuePackets(ElementsAre(AllOf( Pointee(Property( &RtpPacketToSend::SequenceNumber, media_packet->SequenceNumber() + kNumPaddingPackets + 1)), @@ -622,53 +415,53 @@ TEST_P(RtpSenderTest, SendPadding) { /*payload_size=*/100); } -TEST_P(RtpSenderTest, NoPaddingAsFirstPacketWithoutBweExtensions) { - EXPECT_THAT(rtp_sender()->GeneratePadding(/*target_size_bytes=*/100, - /*media_has_been_sent=*/false), +TEST_F(RtpSenderTest, NoPaddingAsFirstPacketWithoutBweExtensions) { + EXPECT_THAT(rtp_sender_->GeneratePadding(/*target_size_bytes=*/100, + /*media_has_been_sent=*/false), IsEmpty()); // Don't send padding before media even with RTX. EnableRtx(); - EXPECT_THAT(rtp_sender()->GeneratePadding(/*target_size_bytes=*/100, - /*media_has_been_sent=*/false), + EXPECT_THAT(rtp_sender_->GeneratePadding(/*target_size_bytes=*/100, + /*media_has_been_sent=*/false), IsEmpty()); } -TEST_P(RtpSenderTest, AllowPaddingAsFirstPacketOnRtxWithTransportCc) { - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( +TEST_F(RtpSenderTest, AllowPaddingAsFirstPacketOnRtxWithTransportCc) { + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( TransportSequenceNumber::kUri, kTransportSequenceNumberExtensionId)); // Padding can't be sent as first packet on media SSRC since we don't know // what payload type to assign. - EXPECT_THAT(rtp_sender()->GeneratePadding(/*target_size_bytes=*/100, - /*media_has_been_sent=*/false), + EXPECT_THAT(rtp_sender_->GeneratePadding(/*target_size_bytes=*/100, + /*media_has_been_sent=*/false), IsEmpty()); // With transportcc padding can be sent as first packet on the RTX SSRC. EnableRtx(); - EXPECT_THAT(rtp_sender()->GeneratePadding(/*target_size_bytes=*/100, - /*media_has_been_sent=*/false), + EXPECT_THAT(rtp_sender_->GeneratePadding(/*target_size_bytes=*/100, + /*media_has_been_sent=*/false), Not(IsEmpty())); } -TEST_P(RtpSenderTest, AllowPaddingAsFirstPacketOnRtxWithAbsSendTime) { - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( +TEST_F(RtpSenderTest, AllowPaddingAsFirstPacketOnRtxWithAbsSendTime) { + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( AbsoluteSendTime::kUri, kAbsoluteSendTimeExtensionId)); // Padding can't be sent as first packet on media SSRC since we don't know // what payload type to assign. - EXPECT_THAT(rtp_sender()->GeneratePadding(/*target_size_bytes=*/100, - /*media_has_been_sent=*/false), + EXPECT_THAT(rtp_sender_->GeneratePadding(/*target_size_bytes=*/100, + /*media_has_been_sent=*/false), IsEmpty()); // With abs send time, padding can be sent as first packet on the RTX SSRC. EnableRtx(); - EXPECT_THAT(rtp_sender()->GeneratePadding(/*target_size_bytes=*/100, - /*media_has_been_sent=*/false), + EXPECT_THAT(rtp_sender_->GeneratePadding(/*target_size_bytes=*/100, + /*media_has_been_sent=*/false), Not(IsEmpty())); } -TEST_P(RtpSenderTest, UpdatesTimestampsOnPlainRtxPadding) { +TEST_F(RtpSenderTest, UpdatesTimestampsOnPlainRtxPadding) { EnableRtx(); // Timestamps as set based on capture time in RtpSenderTest. const int64_t start_time = clock_->TimeInMilliseconds(); @@ -677,7 +470,7 @@ TEST_P(RtpSenderTest, UpdatesTimestampsOnPlainRtxPadding) { // Start by sending one media packet. EXPECT_CALL( mock_paced_sender_, - EnqueuePackets(Contains(AllOf( + EnqueuePackets(ElementsAre(AllOf( Pointee(Property(&RtpPacketToSend::padding_size, 0u)), Pointee(Property(&RtpPacketToSend::Timestamp, start_timestamp)), Pointee(Property(&RtpPacketToSend::capture_time_ms, start_time)))))); @@ -690,8 +483,8 @@ TEST_P(RtpSenderTest, UpdatesTimestampsOnPlainRtxPadding) { // Timestamps on padding should be offset from the sent media. EXPECT_THAT( - rtp_sender()->GeneratePadding(/*target_size_bytes=*/100, - /*media_has_been_sent=*/true), + rtp_sender_->GeneratePadding(/*target_size_bytes=*/100, + /*media_has_been_sent=*/true), Each(AllOf( Pointee(Property(&RtpPacketToSend::padding_size, kMaxPaddingLength)), Pointee(Property( @@ -701,8 +494,8 @@ TEST_P(RtpSenderTest, UpdatesTimestampsOnPlainRtxPadding) { start_time + kTimeDiff.ms()))))); } -TEST_P(RtpSenderTest, KeepsTimestampsOnPayloadPadding) { - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( +TEST_F(RtpSenderTest, KeepsTimestampsOnPayloadPadding) { + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( TransportSequenceNumber::kUri, kTransportSequenceNumberExtensionId)); EnableRtx(); // Timestamps as set based on capture time in RtpSenderTest. @@ -714,14 +507,13 @@ TEST_P(RtpSenderTest, KeepsTimestampsOnPayloadPadding) { // Start by sending one media packet and putting in the packet history. EXPECT_CALL( mock_paced_sender_, - EnqueuePackets(Contains(AllOf( + EnqueuePackets(ElementsAre(AllOf( Pointee(Property(&RtpPacketToSend::padding_size, 0u)), Pointee(Property(&RtpPacketToSend::Timestamp, start_timestamp)), Pointee(Property(&RtpPacketToSend::capture_time_ms, start_time)))))); std::unique_ptr media_packet = SendPacket(start_time, kPayloadSize); - rtp_sender_context_->packet_history_.PutRtpPacket(std::move(media_packet), - start_time); + packet_history_->PutRtpPacket(std::move(media_packet), start_time); // Advance time before sending padding. const TimeDelta kTimeDiff = TimeDelta::Millis(17); @@ -729,8 +521,8 @@ TEST_P(RtpSenderTest, KeepsTimestampsOnPayloadPadding) { // Timestamps on payload padding should be set to original. EXPECT_THAT( - rtp_sender()->GeneratePadding(/*target_size_bytes=*/100, - /*media_has_been_sent=*/true), + rtp_sender_->GeneratePadding(/*target_size_bytes=*/100, + /*media_has_been_sent=*/true), Each(AllOf( Pointee(Property(&RtpPacketToSend::padding_size, 0u)), Pointee(Property(&RtpPacketToSend::payload_size, @@ -739,225 +531,56 @@ TEST_P(RtpSenderTest, KeepsTimestampsOnPayloadPadding) { Pointee(Property(&RtpPacketToSend::capture_time_ms, start_time))))); } -TEST_P(RtpSenderTest, SendFlexfecPackets) { - constexpr uint32_t kTimestamp = 1234; - constexpr int kMediaPayloadType = 127; - constexpr VideoCodecType kCodecType = VideoCodecType::kVideoCodecGeneric; - constexpr int kFlexfecPayloadType = 118; - const std::vector kNoRtpExtensions; - const std::vector kNoRtpExtensionSizes; - FlexfecSender flexfec_sender(kFlexfecPayloadType, kFlexFecSsrc, kSsrc, kNoMid, - kNoRtpExtensions, kNoRtpExtensionSizes, - nullptr /* rtp_state */, clock_); - - // Reset |rtp_sender_| to use FlexFEC. - RtpRtcpInterface::Configuration config; - config.clock = clock_; - config.outgoing_transport = &transport_; - config.paced_sender = &mock_paced_sender_; - config.local_media_ssrc = kSsrc; - config.fec_generator = &flexfec_sender_; - config.event_log = &mock_rtc_event_log_; - config.send_packet_observer = &send_packet_observer_; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - config.field_trials = &field_trials_; - rtp_sender_context_ = - std::make_unique(config, &time_controller_); - - rtp_sender()->SetSequenceNumber(kSeqNum); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 10); - - FieldTrialBasedConfig field_trials; - RTPSenderVideo::Config video_config; - video_config.clock = clock_; - video_config.rtp_sender = rtp_sender(); - video_config.fec_type = flexfec_sender.GetFecType(); - video_config.fec_overhead_bytes = flexfec_sender.MaxPacketOverhead(); - video_config.fec_type = flexfec_sender.GetFecType(); - video_config.fec_overhead_bytes = flexfec_sender.MaxPacketOverhead(); - video_config.field_trials = &field_trials; - RTPSenderVideo rtp_sender_video(video_config); - - // Parameters selected to generate a single FEC packet per media packet. - FecProtectionParams params; - params.fec_rate = 15; - params.max_fec_frames = 1; - params.fec_mask_type = kFecMaskRandom; - flexfec_sender.SetProtectionParameters(params, params); - - uint16_t flexfec_seq_num; - RTPVideoHeader video_header; - - std::unique_ptr media_packet; - std::unique_ptr fec_packet; - - EXPECT_CALL(mock_paced_sender_, EnqueuePackets) - .WillOnce([&](std::vector> packets) { - for (auto& packet : packets) { - if (packet->packet_type() == RtpPacketMediaType::kVideo) { - EXPECT_EQ(packet->Ssrc(), kSsrc); - EXPECT_EQ(packet->SequenceNumber(), kSeqNum); - media_packet = std::move(packet); - - // Simulate RtpSenderEgress adding packet to fec generator. - flexfec_sender.AddPacketAndGenerateFec(*media_packet); - auto fec_packets = flexfec_sender.GetFecPackets(); - EXPECT_EQ(fec_packets.size(), 1u); - fec_packet = std::move(fec_packets[0]); - EXPECT_EQ(fec_packet->packet_type(), - RtpPacketMediaType::kForwardErrorCorrection); - EXPECT_EQ(fec_packet->Ssrc(), kFlexFecSsrc); - } else { - EXPECT_EQ(packet->packet_type(), - RtpPacketMediaType::kForwardErrorCorrection); - fec_packet = std::move(packet); - EXPECT_EQ(fec_packet->Ssrc(), kFlexFecSsrc); - } - } - }); - - video_header.frame_type = VideoFrameType::kVideoFrameKey; - EXPECT_TRUE(rtp_sender_video.SendVideo( - kMediaPayloadType, kCodecType, kTimestamp, clock_->TimeInMilliseconds(), - kPayloadData, video_header, kDefaultExpectedRetransmissionTimeMs)); - ASSERT_TRUE(media_packet != nullptr); - ASSERT_TRUE(fec_packet != nullptr); - - flexfec_seq_num = fec_packet->SequenceNumber(); - rtp_sender_context_->InjectPacket(std::move(media_packet), PacedPacketInfo()); - rtp_sender_context_->InjectPacket(std::move(fec_packet), PacedPacketInfo()); - - ASSERT_EQ(2, transport_.packets_sent()); - const RtpPacketReceived& sent_media_packet = transport_.sent_packets_[0]; - EXPECT_EQ(kMediaPayloadType, sent_media_packet.PayloadType()); - EXPECT_EQ(kSeqNum, sent_media_packet.SequenceNumber()); - EXPECT_EQ(kSsrc, sent_media_packet.Ssrc()); - const RtpPacketReceived& sent_flexfec_packet = transport_.sent_packets_[1]; - EXPECT_EQ(kFlexfecPayloadType, sent_flexfec_packet.PayloadType()); - EXPECT_EQ(flexfec_seq_num, sent_flexfec_packet.SequenceNumber()); - EXPECT_EQ(kFlexFecSsrc, sent_flexfec_packet.Ssrc()); -} - -TEST_P(RtpSenderTestWithoutPacer, SendFlexfecPackets) { - constexpr uint32_t kTimestamp = 1234; - constexpr int kMediaPayloadType = 127; - constexpr VideoCodecType kCodecType = VideoCodecType::kVideoCodecGeneric; - constexpr int kFlexfecPayloadType = 118; - const std::vector kNoRtpExtensions; - const std::vector kNoRtpExtensionSizes; - FlexfecSender flexfec_sender(kFlexfecPayloadType, kFlexFecSsrc, kSsrc, kNoMid, - kNoRtpExtensions, kNoRtpExtensionSizes, - nullptr /* rtp_state */, clock_); - - // Reset |rtp_sender_| to use FlexFEC. - RtpRtcpInterface::Configuration config; - config.clock = clock_; - config.outgoing_transport = &transport_; - config.local_media_ssrc = kSsrc; - config.fec_generator = &flexfec_sender; - config.event_log = &mock_rtc_event_log_; - config.send_packet_observer = &send_packet_observer_; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - config.field_trials = &field_trials_; - rtp_sender_context_ = - std::make_unique(config, &time_controller_); - - rtp_sender()->SetSequenceNumber(kSeqNum); - - FieldTrialBasedConfig field_trials; - RTPSenderVideo::Config video_config; - video_config.clock = clock_; - video_config.rtp_sender = rtp_sender(); - video_config.fec_type = flexfec_sender.GetFecType(); - video_config.fec_overhead_bytes = flexfec_sender_.MaxPacketOverhead(); - video_config.field_trials = &field_trials; - RTPSenderVideo rtp_sender_video(video_config); - - // Parameters selected to generate a single FEC packet per media packet. - FecProtectionParams params; - params.fec_rate = 15; - params.max_fec_frames = 1; - params.fec_mask_type = kFecMaskRandom; - rtp_egress()->SetFecProtectionParameters(params, params); - - EXPECT_CALL(mock_rtc_event_log_, - LogProxy(SameRtcEventTypeAs(RtcEvent::Type::RtpPacketOutgoing))) - .Times(2); - RTPVideoHeader video_header; - video_header.frame_type = VideoFrameType::kVideoFrameKey; - EXPECT_TRUE(rtp_sender_video.SendVideo( - kMediaPayloadType, kCodecType, kTimestamp, clock_->TimeInMilliseconds(), - kPayloadData, video_header, kDefaultExpectedRetransmissionTimeMs)); - - ASSERT_EQ(2, transport_.packets_sent()); - const RtpPacketReceived& media_packet = transport_.sent_packets_[0]; - EXPECT_EQ(kMediaPayloadType, media_packet.PayloadType()); - EXPECT_EQ(kSsrc, media_packet.Ssrc()); - const RtpPacketReceived& flexfec_packet = transport_.sent_packets_[1]; - EXPECT_EQ(kFlexfecPayloadType, flexfec_packet.PayloadType()); - EXPECT_EQ(kFlexFecSsrc, flexfec_packet.Ssrc()); -} - // Test that the MID header extension is included on sent packets when // configured. -TEST_P(RtpSenderTestWithoutPacer, MidIncludedOnSentPackets) { +TEST_F(RtpSenderTest, MidIncludedOnSentPackets) { const char kMid[] = "mid"; - EnableMidSending(kMid); - // Send a couple packets. + // Send a couple packets, expect both packets to have the MID set. + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee( + Property(&RtpPacketToSend::GetExtension, kMid))))) + .Times(2); SendGenericPacket(); SendGenericPacket(); - - // Expect both packets to have the MID set. - ASSERT_EQ(2u, transport_.sent_packets_.size()); - for (const RtpPacketReceived& packet : transport_.sent_packets_) { - std::string mid; - ASSERT_TRUE(packet.GetExtension(&mid)); - EXPECT_EQ(kMid, mid); - } } -TEST_P(RtpSenderTestWithoutPacer, RidIncludedOnSentPackets) { +TEST_F(RtpSenderTest, RidIncludedOnSentPackets) { const char kRid[] = "f"; - EnableRidSending(kRid); + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(Property( + &RtpPacketToSend::GetExtension, kRid))))); SendGenericPacket(); - - ASSERT_EQ(1u, transport_.sent_packets_.size()); - const RtpPacketReceived& packet = transport_.sent_packets_[0]; - std::string rid; - ASSERT_TRUE(packet.GetExtension(&rid)); - EXPECT_EQ(kRid, rid); } -TEST_P(RtpSenderTestWithoutPacer, RidIncludedOnRtxSentPackets) { +TEST_F(RtpSenderTest, RidIncludedOnRtxSentPackets) { const char kRid[] = "f"; - EnableRtx(); EnableRidSending(kRid); + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(AllOf( + Property(&RtpPacketToSend::GetExtension, kRid), + Property(&RtpPacketToSend::HasExtension, + false)))))) + .WillOnce([&](std::vector> packets) { + packet_history_->PutRtpPacket(std::move(packets[0]), + clock_->TimeInMilliseconds()); + }); SendGenericPacket(); - ASSERT_EQ(1u, transport_.sent_packets_.size()); - const RtpPacketReceived& packet = transport_.sent_packets_[0]; - std::string rid; - ASSERT_TRUE(packet.GetExtension(&rid)); - EXPECT_EQ(kRid, rid); - rid = kNoRid; - EXPECT_FALSE(packet.HasExtension()); - - uint16_t packet_id = packet.SequenceNumber(); - rtp_sender()->ReSendPacket(packet_id); - ASSERT_EQ(2u, transport_.sent_packets_.size()); - const RtpPacketReceived& rtx_packet = transport_.sent_packets_[1]; - ASSERT_TRUE(rtx_packet.GetExtension(&rid)); - EXPECT_EQ(kRid, rid); - EXPECT_FALSE(rtx_packet.HasExtension()); + + EXPECT_CALL( + mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(AllOf( + Property(&RtpPacketToSend::GetExtension, kRid), + Property(&RtpPacketToSend::HasExtension, false)))))); + rtp_sender_->ReSendPacket(kSeqNum); } -TEST_P(RtpSenderTestWithoutPacer, MidAndRidNotIncludedOnSentPacketsAfterAck) { +TEST_F(RtpSenderTest, MidAndRidNotIncludedOnSentPacketsAfterAck) { const char kMid[] = "mid"; const char kRid[] = "f"; @@ -965,53 +588,48 @@ TEST_P(RtpSenderTestWithoutPacer, MidAndRidNotIncludedOnSentPacketsAfterAck) { EnableRidSending(kRid); // This first packet should include both MID and RID. + EXPECT_CALL( + mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(AllOf( + Property(&RtpPacketToSend::GetExtension, kMid), + Property(&RtpPacketToSend::GetExtension, kRid)))))); auto first_built_packet = SendGenericPacket(); - - rtp_sender()->OnReceivedAckOnSsrc(first_built_packet->SequenceNumber()); + rtp_sender_->OnReceivedAckOnSsrc(first_built_packet->SequenceNumber()); // The second packet should include neither since an ack was received. + EXPECT_CALL( + mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(AllOf( + Property(&RtpPacketToSend::HasExtension, false), + Property(&RtpPacketToSend::HasExtension, false)))))); SendGenericPacket(); - - ASSERT_EQ(2u, transport_.sent_packets_.size()); - - const RtpPacketReceived& first_packet = transport_.sent_packets_[0]; - std::string mid, rid; - ASSERT_TRUE(first_packet.GetExtension(&mid)); - EXPECT_EQ(kMid, mid); - ASSERT_TRUE(first_packet.GetExtension(&rid)); - EXPECT_EQ(kRid, rid); - - const RtpPacketReceived& second_packet = transport_.sent_packets_[1]; - EXPECT_FALSE(second_packet.HasExtension()); - EXPECT_FALSE(second_packet.HasExtension()); } -TEST_P(RtpSenderTestWithoutPacer, - MidAndRidAlwaysIncludedOnSentPacketsWhenConfigured) { - SetUpRtpSender(false, false, /*always_send_mid_and_rid=*/true); +TEST_F(RtpSenderTest, MidAndRidAlwaysIncludedOnSentPacketsWhenConfigured) { + SetUpRtpSender(false, /*always_send_mid_and_rid=*/true, nullptr); const char kMid[] = "mid"; const char kRid[] = "f"; EnableMidSending(kMid); EnableRidSending(kRid); // Send two media packets: one before and one after the ack. - auto first_packet = SendGenericPacket(); - rtp_sender()->OnReceivedAckOnSsrc(first_packet->SequenceNumber()); - SendGenericPacket(); - // Due to the configuration, both sent packets should contain MID and RID. - ASSERT_EQ(2u, transport_.sent_packets_.size()); - for (const RtpPacketReceived& packet : transport_.sent_packets_) { - EXPECT_EQ(packet.GetExtension(), kMid); - EXPECT_EQ(packet.GetExtension(), kRid); - } + EXPECT_CALL( + mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee( + AllOf(Property(&RtpPacketToSend::GetExtension, kMid), + Property(&RtpPacketToSend::GetExtension, kRid)))))) + .Times(2); + auto first_built_packet = SendGenericPacket(); + rtp_sender_->OnReceivedAckOnSsrc(first_built_packet->SequenceNumber()); + SendGenericPacket(); } // Test that the first RTX packet includes both MID and RRID even if the packet // being retransmitted did not have MID or RID. The MID and RID are needed on // the first packets for a given SSRC, and RTX packets are sent on a separate // SSRC. -TEST_P(RtpSenderTestWithoutPacer, MidAndRidIncludedOnFirstRtxPacket) { +TEST_F(RtpSenderTest, MidAndRidIncludedOnFirstRtxPacket) { const char kMid[] = "mid"; const char kRid[] = "f"; @@ -1020,30 +638,32 @@ TEST_P(RtpSenderTestWithoutPacer, MidAndRidIncludedOnFirstRtxPacket) { EnableRidSending(kRid); // This first packet will include both MID and RID. + EXPECT_CALL(mock_paced_sender_, EnqueuePackets); auto first_built_packet = SendGenericPacket(); - rtp_sender()->OnReceivedAckOnSsrc(first_built_packet->SequenceNumber()); + rtp_sender_->OnReceivedAckOnSsrc(first_built_packet->SequenceNumber()); - // The second packet will include neither since an ack was received. + // The second packet will include neither since an ack was received, put + // it in the packet history for retransmission. + EXPECT_CALL(mock_paced_sender_, EnqueuePackets(SizeIs(1))) + .WillOnce([&](std::vector> packets) { + packet_history_->PutRtpPacket(std::move(packets[0]), + clock_->TimeInMilliseconds()); + }); auto second_built_packet = SendGenericPacket(); // The first RTX packet should include MID and RRID. - ASSERT_LT(0, - rtp_sender()->ReSendPacket(second_built_packet->SequenceNumber())); - - ASSERT_EQ(3u, transport_.sent_packets_.size()); - - const RtpPacketReceived& rtx_packet = transport_.sent_packets_[2]; - std::string mid, rrid; - ASSERT_TRUE(rtx_packet.GetExtension(&mid)); - EXPECT_EQ(kMid, mid); - ASSERT_TRUE(rtx_packet.GetExtension(&rrid)); - EXPECT_EQ(kRid, rrid); + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(AllOf( + Property(&RtpPacketToSend::GetExtension, kMid), + Property(&RtpPacketToSend::GetExtension, + kRid)))))); + rtp_sender_->ReSendPacket(second_built_packet->SequenceNumber()); } // Test that the RTX packets sent after receving an ACK on the RTX SSRC does // not include either MID or RRID even if the packet being retransmitted did // had a MID or RID. -TEST_P(RtpSenderTestWithoutPacer, MidAndRidNotIncludedOnRtxPacketsAfterAck) { +TEST_F(RtpSenderTest, MidAndRidNotIncludedOnRtxPacketsAfterAck) { const char kMid[] = "mid"; const char kRid[] = "f"; @@ -1052,41 +672,44 @@ TEST_P(RtpSenderTestWithoutPacer, MidAndRidNotIncludedOnRtxPacketsAfterAck) { EnableRidSending(kRid); // This first packet will include both MID and RID. + EXPECT_CALL(mock_paced_sender_, EnqueuePackets(SizeIs(1))) + .WillOnce([&](std::vector> packets) { + packet_history_->PutRtpPacket(std::move(packets[0]), + clock_->TimeInMilliseconds()); + }); auto first_built_packet = SendGenericPacket(); - rtp_sender()->OnReceivedAckOnSsrc(first_built_packet->SequenceNumber()); + rtp_sender_->OnReceivedAckOnSsrc(first_built_packet->SequenceNumber()); // The second packet will include neither since an ack was received. + EXPECT_CALL(mock_paced_sender_, EnqueuePackets(SizeIs(1))) + .WillOnce([&](std::vector> packets) { + packet_history_->PutRtpPacket(std::move(packets[0]), + clock_->TimeInMilliseconds()); + }); auto second_built_packet = SendGenericPacket(); // The first RTX packet will include MID and RRID. - ASSERT_LT(0, - rtp_sender()->ReSendPacket(second_built_packet->SequenceNumber())); - - ASSERT_EQ(3u, transport_.sent_packets_.size()); - const RtpPacketReceived& first_rtx_packet = transport_.sent_packets_[2]; - - rtp_sender()->OnReceivedAckOnRtxSsrc(first_rtx_packet.SequenceNumber()); + EXPECT_CALL(mock_paced_sender_, EnqueuePackets(SizeIs(1))) + .WillOnce([&](std::vector> packets) { + rtp_sender_->OnReceivedAckOnRtxSsrc(packets[0]->SequenceNumber()); + packet_history_->MarkPacketAsSent( + *packets[0]->retransmitted_sequence_number()); + }); + rtp_sender_->ReSendPacket(second_built_packet->SequenceNumber()); // The second and third RTX packets should not include MID nor RRID. - ASSERT_LT(0, - rtp_sender()->ReSendPacket(first_built_packet->SequenceNumber())); - ASSERT_LT(0, - rtp_sender()->ReSendPacket(second_built_packet->SequenceNumber())); - - ASSERT_EQ(5u, transport_.sent_packets_.size()); - - const RtpPacketReceived& second_rtx_packet = transport_.sent_packets_[3]; - EXPECT_FALSE(second_rtx_packet.HasExtension()); - EXPECT_FALSE(second_rtx_packet.HasExtension()); - - const RtpPacketReceived& third_rtx_packet = transport_.sent_packets_[4]; - EXPECT_FALSE(third_rtx_packet.HasExtension()); - EXPECT_FALSE(third_rtx_packet.HasExtension()); + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(AllOf( + Property(&RtpPacketToSend::HasExtension, false), + Property(&RtpPacketToSend::HasExtension, + false)))))) + .Times(2); + rtp_sender_->ReSendPacket(first_built_packet->SequenceNumber()); + rtp_sender_->ReSendPacket(second_built_packet->SequenceNumber()); } -TEST_P(RtpSenderTestWithoutPacer, - MidAndRidAlwaysIncludedOnRtxPacketsWhenConfigured) { - SetUpRtpSender(false, false, /*always_send_mid_and_rid=*/true); +TEST_F(RtpSenderTest, MidAndRidAlwaysIncludedOnRtxPacketsWhenConfigured) { + SetUpRtpSender(false, /*always_send_mid_and_rid=*/true, nullptr); const char kMid[] = "mid"; const char kRid[] = "f"; EnableRtx(); @@ -1094,63 +717,68 @@ TEST_P(RtpSenderTestWithoutPacer, EnableRidSending(kRid); // Send two media packets: one before and one after the ack. + EXPECT_CALL( + mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee( + AllOf(Property(&RtpPacketToSend::GetExtension, kMid), + Property(&RtpPacketToSend::GetExtension, kRid)))))) + .Times(2) + .WillRepeatedly( + [&](std::vector> packets) { + packet_history_->PutRtpPacket(std::move(packets[0]), + clock_->TimeInMilliseconds()); + }); auto media_packet1 = SendGenericPacket(); - rtp_sender()->OnReceivedAckOnSsrc(media_packet1->SequenceNumber()); + rtp_sender_->OnReceivedAckOnSsrc(media_packet1->SequenceNumber()); auto media_packet2 = SendGenericPacket(); // Send three RTX packets with different combinations of orders w.r.t. the // media and RTX acks. - ASSERT_LT(0, rtp_sender()->ReSendPacket(media_packet2->SequenceNumber())); - ASSERT_EQ(3u, transport_.sent_packets_.size()); - rtp_sender()->OnReceivedAckOnRtxSsrc( - transport_.sent_packets_[2].SequenceNumber()); - ASSERT_LT(0, rtp_sender()->ReSendPacket(media_packet1->SequenceNumber())); - ASSERT_LT(0, rtp_sender()->ReSendPacket(media_packet2->SequenceNumber())); - // Due to the configuration, all sent packets should contain MID // and either RID (media) or RRID (RTX). - ASSERT_EQ(5u, transport_.sent_packets_.size()); - for (const auto& packet : transport_.sent_packets_) { - EXPECT_EQ(packet.GetExtension(), kMid); - } - for (size_t i = 0; i < 2; ++i) { - const RtpPacketReceived& packet = transport_.sent_packets_[i]; - EXPECT_EQ(packet.GetExtension(), kRid); - } - for (size_t i = 2; i < transport_.sent_packets_.size(); ++i) { - const RtpPacketReceived& packet = transport_.sent_packets_[i]; - EXPECT_EQ(packet.GetExtension(), kRid); - } + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(AllOf( + Property(&RtpPacketToSend::GetExtension, kMid), + Property(&RtpPacketToSend::GetExtension, + kRid)))))) + .Times(3) + .WillRepeatedly( + [&](std::vector> packets) { + rtp_sender_->OnReceivedAckOnRtxSsrc(packets[0]->SequenceNumber()); + packet_history_->MarkPacketAsSent( + *packets[0]->retransmitted_sequence_number()); + }); + rtp_sender_->ReSendPacket(media_packet2->SequenceNumber()); + rtp_sender_->ReSendPacket(media_packet1->SequenceNumber()); + rtp_sender_->ReSendPacket(media_packet2->SequenceNumber()); } // Test that if the RtpState indicates an ACK has been received on that SSRC // then neither the MID nor RID header extensions will be sent. -TEST_P(RtpSenderTestWithoutPacer, - MidAndRidNotIncludedOnSentPacketsAfterRtpStateRestored) { +TEST_F(RtpSenderTest, MidAndRidNotIncludedOnSentPacketsAfterRtpStateRestored) { const char kMid[] = "mid"; const char kRid[] = "f"; EnableMidSending(kMid); EnableRidSending(kRid); - RtpState state = rtp_sender()->GetRtpState(); + RtpState state = rtp_sender_->GetRtpState(); EXPECT_FALSE(state.ssrc_has_acked); state.ssrc_has_acked = true; - rtp_sender()->SetRtpState(state); + rtp_sender_->SetRtpState(state); + EXPECT_CALL( + mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(AllOf( + Property(&RtpPacketToSend::HasExtension, false), + Property(&RtpPacketToSend::HasExtension, false)))))); SendGenericPacket(); - - ASSERT_EQ(1u, transport_.sent_packets_.size()); - const RtpPacketReceived& packet = transport_.sent_packets_[0]; - EXPECT_FALSE(packet.HasExtension()); - EXPECT_FALSE(packet.HasExtension()); } // Test that if the RTX RtpState indicates an ACK has been received on that // RTX SSRC then neither the MID nor RRID header extensions will be sent on // RTX packets. -TEST_P(RtpSenderTestWithoutPacer, - MidAndRridNotIncludedOnRtxPacketsAfterRtpStateRestored) { +TEST_F(RtpSenderTest, MidAndRridNotIncludedOnRtxPacketsAfterRtpStateRestored) { const char kMid[] = "mid"; const char kRid[] = "f"; @@ -1158,756 +786,255 @@ TEST_P(RtpSenderTestWithoutPacer, EnableMidSending(kMid); EnableRidSending(kRid); - RtpState rtx_state = rtp_sender()->GetRtxRtpState(); + RtpState rtx_state = rtp_sender_->GetRtxRtpState(); EXPECT_FALSE(rtx_state.ssrc_has_acked); rtx_state.ssrc_has_acked = true; - rtp_sender()->SetRtxRtpState(rtx_state); + rtp_sender_->SetRtxRtpState(rtx_state); + EXPECT_CALL(mock_paced_sender_, EnqueuePackets(SizeIs(1))) + .WillOnce([&](std::vector> packets) { + packet_history_->PutRtpPacket(std::move(packets[0]), + clock_->TimeInMilliseconds()); + }); auto built_packet = SendGenericPacket(); - ASSERT_LT(0, rtp_sender()->ReSendPacket(built_packet->SequenceNumber())); - - ASSERT_EQ(2u, transport_.sent_packets_.size()); - const RtpPacketReceived& rtx_packet = transport_.sent_packets_[1]; - EXPECT_FALSE(rtx_packet.HasExtension()); - EXPECT_FALSE(rtx_packet.HasExtension()); -} - -TEST_P(RtpSenderTest, FecOverheadRate) { - constexpr uint32_t kTimestamp = 1234; - constexpr int kMediaPayloadType = 127; - constexpr VideoCodecType kCodecType = VideoCodecType::kVideoCodecGeneric; - constexpr int kFlexfecPayloadType = 118; - const std::vector kNoRtpExtensions; - const std::vector kNoRtpExtensionSizes; - FlexfecSender flexfec_sender(kFlexfecPayloadType, kFlexFecSsrc, kSsrc, kNoMid, - kNoRtpExtensions, kNoRtpExtensionSizes, - nullptr /* rtp_state */, clock_); - - // Reset |rtp_sender_| to use this FlexFEC instance. - SetUpRtpSender(false, false, false, &flexfec_sender); - - FieldTrialBasedConfig field_trials; - RTPSenderVideo::Config video_config; - video_config.clock = clock_; - video_config.rtp_sender = rtp_sender(); - video_config.fec_type = flexfec_sender.GetFecType(); - video_config.fec_overhead_bytes = flexfec_sender.MaxPacketOverhead(); - video_config.field_trials = &field_trials; - RTPSenderVideo rtp_sender_video(video_config); - // Parameters selected to generate a single FEC packet per media packet. - FecProtectionParams params; - params.fec_rate = 15; - params.max_fec_frames = 1; - params.fec_mask_type = kFecMaskRandom; - rtp_egress()->SetFecProtectionParameters(params, params); - - constexpr size_t kNumMediaPackets = 10; - constexpr size_t kNumFecPackets = kNumMediaPackets; - constexpr int64_t kTimeBetweenPacketsMs = 10; - for (size_t i = 0; i < kNumMediaPackets; ++i) { - RTPVideoHeader video_header; - - video_header.frame_type = VideoFrameType::kVideoFrameKey; - EXPECT_TRUE(rtp_sender_video.SendVideo( - kMediaPayloadType, kCodecType, kTimestamp, clock_->TimeInMilliseconds(), - kPayloadData, video_header, kDefaultExpectedRetransmissionTimeMs)); - - time_controller_.AdvanceTime(TimeDelta::Millis(kTimeBetweenPacketsMs)); - } - constexpr size_t kRtpHeaderLength = 12; - constexpr size_t kFlexfecHeaderLength = 20; - constexpr size_t kGenericCodecHeaderLength = 1; - constexpr size_t kPayloadLength = sizeof(kPayloadData); - constexpr size_t kPacketLength = kRtpHeaderLength + kFlexfecHeaderLength + - kGenericCodecHeaderLength + kPayloadLength; - - EXPECT_NEAR( - kNumFecPackets * kPacketLength * 8 / - (kNumFecPackets * kTimeBetweenPacketsMs / 1000.0f), - rtp_egress() - ->GetSendRates()[RtpPacketMediaType::kForwardErrorCorrection] - .bps(), - 500); -} - -TEST_P(RtpSenderTest, BitrateCallbacks) { - class TestCallback : public BitrateStatisticsObserver { - public: - TestCallback() - : BitrateStatisticsObserver(), - num_calls_(0), - ssrc_(0), - total_bitrate_(0), - retransmit_bitrate_(0) {} - ~TestCallback() override = default; - - void Notify(uint32_t total_bitrate, - uint32_t retransmit_bitrate, - uint32_t ssrc) override { - ++num_calls_; - ssrc_ = ssrc; - total_bitrate_ = total_bitrate; - retransmit_bitrate_ = retransmit_bitrate; - } - - uint32_t num_calls_; - uint32_t ssrc_; - uint32_t total_bitrate_; - uint32_t retransmit_bitrate_; - } callback; - - RtpRtcpInterface::Configuration config; - config.clock = clock_; - config.outgoing_transport = &transport_; - config.local_media_ssrc = kSsrc; - config.send_bitrate_observer = &callback; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - rtp_sender_context_ = - std::make_unique(config, &time_controller_); - - FieldTrialBasedConfig field_trials; - RTPSenderVideo::Config video_config; - video_config.clock = clock_; - video_config.rtp_sender = rtp_sender(); - video_config.field_trials = &field_trials; - RTPSenderVideo rtp_sender_video(video_config); - const VideoCodecType kCodecType = VideoCodecType::kVideoCodecGeneric; - const uint8_t kPayloadType = 127; - - // Simulate kNumPackets sent with kPacketInterval ms intervals, with the - // number of packets selected so that we fill (but don't overflow) the one - // second averaging window. - const uint32_t kWindowSizeMs = 1000; - const uint32_t kPacketInterval = 20; - const uint32_t kNumPackets = - (kWindowSizeMs - kPacketInterval) / kPacketInterval; - // Overhead = 12 bytes RTP header + 1 byte generic header. - const uint32_t kPacketOverhead = 13; - - uint8_t payload[] = {47, 11, 32, 93, 89}; - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 1); - uint32_t ssrc = rtp_sender()->SSRC(); - // Send a few frames. - RTPVideoHeader video_header; - for (uint32_t i = 0; i < kNumPackets; ++i) { - video_header.frame_type = VideoFrameType::kVideoFrameKey; - ASSERT_TRUE(rtp_sender_video.SendVideo( - kPayloadType, kCodecType, 1234, 4321, payload, video_header, - kDefaultExpectedRetransmissionTimeMs)); - time_controller_.AdvanceTime(TimeDelta::Millis(kPacketInterval)); - } - - // We get one call for every stats updated, thus two calls since both the - // stream stats and the retransmit stats are updated once. - EXPECT_EQ(kNumPackets, callback.num_calls_); - EXPECT_EQ(ssrc, callback.ssrc_); - const uint32_t kTotalPacketSize = kPacketOverhead + sizeof(payload); - // Bitrate measured over delta between last and first timestamp, plus one. - const uint32_t kExpectedWindowMs = (kNumPackets - 1) * kPacketInterval + 1; - const uint32_t kExpectedBitsAccumulated = kTotalPacketSize * kNumPackets * 8; - const uint32_t kExpectedRateBps = - (kExpectedBitsAccumulated * 1000 + (kExpectedWindowMs / 2)) / - kExpectedWindowMs; - EXPECT_EQ(kExpectedRateBps, callback.total_bitrate_); -} - -TEST_P(RtpSenderTestWithoutPacer, StreamDataCountersCallbacks) { - const uint8_t kPayloadType = 127; - const VideoCodecType kCodecType = VideoCodecType::kVideoCodecGeneric; - FieldTrialBasedConfig field_trials; - RTPSenderVideo::Config video_config; - video_config.clock = clock_; - video_config.rtp_sender = rtp_sender(); - video_config.field_trials = &field_trials; - RTPSenderVideo rtp_sender_video(video_config); - uint8_t payload[] = {47, 11, 32, 93, 89}; - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 1); - uint32_t ssrc = rtp_sender()->SSRC(); - - // Send a frame. - RTPVideoHeader video_header; - video_header.frame_type = VideoFrameType::kVideoFrameKey; - ASSERT_TRUE(rtp_sender_video.SendVideo(kPayloadType, kCodecType, 1234, 4321, - payload, video_header, - kDefaultExpectedRetransmissionTimeMs)); - StreamDataCounters expected; - expected.transmitted.payload_bytes = 6; - expected.transmitted.header_bytes = 12; - expected.transmitted.padding_bytes = 0; - expected.transmitted.packets = 1; - expected.retransmitted.payload_bytes = 0; - expected.retransmitted.header_bytes = 0; - expected.retransmitted.padding_bytes = 0; - expected.retransmitted.packets = 0; - expected.fec.packets = 0; - rtp_stats_callback_.Matches(ssrc, expected); - - // Retransmit a frame. - uint16_t seqno = rtp_sender()->SequenceNumber() - 1; - rtp_sender()->ReSendPacket(seqno); - expected.transmitted.payload_bytes = 12; - expected.transmitted.header_bytes = 24; - expected.transmitted.packets = 2; - expected.retransmitted.payload_bytes = 6; - expected.retransmitted.header_bytes = 12; - expected.retransmitted.padding_bytes = 0; - expected.retransmitted.packets = 1; - rtp_stats_callback_.Matches(ssrc, expected); - - // Send padding. - GenerateAndSendPadding(kMaxPaddingSize); - expected.transmitted.payload_bytes = 12; - expected.transmitted.header_bytes = 36; - expected.transmitted.padding_bytes = kMaxPaddingSize; - expected.transmitted.packets = 3; - rtp_stats_callback_.Matches(ssrc, expected); -} - -TEST_P(RtpSenderTestWithoutPacer, StreamDataCountersCallbacksUlpfec) { - const uint8_t kRedPayloadType = 96; - const uint8_t kUlpfecPayloadType = 97; - const uint8_t kPayloadType = 127; - const VideoCodecType kCodecType = VideoCodecType::kVideoCodecGeneric; - - UlpfecGenerator ulpfec_generator(kRedPayloadType, kUlpfecPayloadType, clock_); - SetUpRtpSender(false, false, false, &ulpfec_generator); - RTPSenderVideo::Config video_config; - video_config.clock = clock_; - video_config.rtp_sender = rtp_sender(); - video_config.field_trials = &field_trials_; - video_config.red_payload_type = kRedPayloadType; - video_config.fec_type = ulpfec_generator.GetFecType(); - video_config.fec_overhead_bytes = ulpfec_generator.MaxPacketOverhead(); - RTPSenderVideo rtp_sender_video(video_config); - uint8_t payload[] = {47, 11, 32, 93, 89}; - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 1); - uint32_t ssrc = rtp_sender()->SSRC(); - - RTPVideoHeader video_header; - StreamDataCounters expected; - - // Send ULPFEC. - FecProtectionParams fec_params; - fec_params.fec_mask_type = kFecMaskRandom; - fec_params.fec_rate = 1; - fec_params.max_fec_frames = 1; - rtp_egress()->SetFecProtectionParameters(fec_params, fec_params); - video_header.frame_type = VideoFrameType::kVideoFrameDelta; - ASSERT_TRUE(rtp_sender_video.SendVideo(kPayloadType, kCodecType, 1234, 4321, - payload, video_header, - kDefaultExpectedRetransmissionTimeMs)); - expected.transmitted.payload_bytes = 28; - expected.transmitted.header_bytes = 24; - expected.transmitted.packets = 2; - expected.fec.packets = 1; - rtp_stats_callback_.Matches(ssrc, expected); -} - -TEST_P(RtpSenderTestWithoutPacer, BytesReportedCorrectly) { - const uint8_t kPayloadType = 127; - const size_t kPayloadSize = 1400; - rtp_sender()->SetRtxPayloadType(kPayloadType - 1, kPayloadType); - rtp_sender()->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); - - SendPacket(clock_->TimeInMilliseconds(), kPayloadSize); - // Will send 2 full-size padding packets. - GenerateAndSendPadding(1); - GenerateAndSendPadding(1); - - StreamDataCounters rtp_stats; - StreamDataCounters rtx_stats; - rtp_egress()->GetDataCounters(&rtp_stats, &rtx_stats); - - // Payload - EXPECT_GT(rtp_stats.first_packet_time_ms, -1); - EXPECT_EQ(rtp_stats.transmitted.payload_bytes, kPayloadSize); - EXPECT_EQ(rtp_stats.transmitted.header_bytes, 12u); - EXPECT_EQ(rtp_stats.transmitted.padding_bytes, 0u); - EXPECT_EQ(rtx_stats.transmitted.payload_bytes, 0u); - EXPECT_EQ(rtx_stats.transmitted.header_bytes, 24u); - EXPECT_EQ(rtx_stats.transmitted.padding_bytes, 2 * kMaxPaddingSize); - - EXPECT_EQ(rtp_stats.transmitted.TotalBytes(), - rtp_stats.transmitted.payload_bytes + - rtp_stats.transmitted.header_bytes + - rtp_stats.transmitted.padding_bytes); - EXPECT_EQ(rtx_stats.transmitted.TotalBytes(), - rtx_stats.transmitted.payload_bytes + - rtx_stats.transmitted.header_bytes + - rtx_stats.transmitted.padding_bytes); - - EXPECT_EQ( - transport_.total_bytes_sent_, - rtp_stats.transmitted.TotalBytes() + rtx_stats.transmitted.TotalBytes()); + EXPECT_CALL( + mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(AllOf( + Property(&RtpPacketToSend::HasExtension, false), + Property(&RtpPacketToSend::HasExtension, false)))))); + ASSERT_LT(0, rtp_sender_->ReSendPacket(built_packet->SequenceNumber())); } -TEST_P(RtpSenderTestWithoutPacer, RespectsNackBitrateLimit) { +TEST_F(RtpSenderTest, RespectsNackBitrateLimit) { const int32_t kPacketSize = 1400; const int32_t kNumPackets = 30; retransmission_rate_limiter_.SetMaxRate(kPacketSize * kNumPackets * 8); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( + packet_history_->SetStorePacketsStatus( RtpPacketHistory::StorageMode::kStoreAndCull, kNumPackets); - const uint16_t kStartSequenceNumber = rtp_sender()->SequenceNumber(); + const uint16_t kStartSequenceNumber = rtp_sender_->SequenceNumber(); std::vector sequence_numbers; for (int32_t i = 0; i < kNumPackets; ++i) { sequence_numbers.push_back(kStartSequenceNumber + i); time_controller_.AdvanceTime(TimeDelta::Millis(1)); + EXPECT_CALL(mock_paced_sender_, EnqueuePackets(SizeIs(1))) + .WillOnce([&](std::vector> packets) { + packet_history_->PutRtpPacket(std::move(packets[0]), + clock_->TimeInMilliseconds()); + }); SendPacket(clock_->TimeInMilliseconds(), kPacketSize); } - EXPECT_EQ(kNumPackets, transport_.packets_sent()); time_controller_.AdvanceTime(TimeDelta::Millis(1000 - kNumPackets)); // Resending should work - brings the bandwidth up to the limit. // NACK bitrate is capped to the same bitrate as the encoder, since the max // protection overhead is 50% (see MediaOptimization::SetTargetRates). - rtp_sender()->OnReceivedNack(sequence_numbers, 0); - EXPECT_EQ(kNumPackets * 2, transport_.packets_sent()); + EXPECT_CALL(mock_paced_sender_, EnqueuePackets(ElementsAre(Pointee(Property( + &RtpPacketToSend::packet_type, + RtpPacketMediaType::kRetransmission))))) + .Times(kNumPackets) + .WillRepeatedly( + [&](std::vector> packets) { + for (const auto& packet : packets) { + packet_history_->MarkPacketAsSent( + *packet->retransmitted_sequence_number()); + } + }); + rtp_sender_->OnReceivedNack(sequence_numbers, 0); // Must be at least 5ms in between retransmission attempts. time_controller_.AdvanceTime(TimeDelta::Millis(5)); // Resending should not work, bandwidth exceeded. - rtp_sender()->OnReceivedNack(sequence_numbers, 0); - EXPECT_EQ(kNumPackets * 2, transport_.packets_sent()); + EXPECT_CALL(mock_paced_sender_, EnqueuePackets).Times(0); + rtp_sender_->OnReceivedNack(sequence_numbers, 0); } -TEST_P(RtpSenderTest, UpdatingCsrcsUpdatedOverhead) { - RtpRtcpInterface::Configuration config; - config.clock = clock_; - config.outgoing_transport = &transport_; - config.local_media_ssrc = kSsrc; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - rtp_sender_context_ = - std::make_unique(config, &time_controller_); +TEST_F(RtpSenderTest, UpdatingCsrcsUpdatedOverhead) { + RtpRtcpInterface::Configuration config = GetDefaultConfig(); + config.rtx_send_ssrc = {}; + CreateSender(config); // Base RTP overhead is 12B. - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 12u); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u); // Adding two csrcs adds 2*4 bytes to the header. - rtp_sender()->SetCsrcs({1, 2}); - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 20u); + rtp_sender_->SetCsrcs({1, 2}); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 20u); } -TEST_P(RtpSenderTest, OnOverheadChanged) { - RtpRtcpInterface::Configuration config; - config.clock = clock_; - config.outgoing_transport = &transport_; - config.local_media_ssrc = kSsrc; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - rtp_sender_context_ = - std::make_unique(config, &time_controller_); +TEST_F(RtpSenderTest, OnOverheadChanged) { + RtpRtcpInterface::Configuration config = GetDefaultConfig(); + config.rtx_send_ssrc = {}; + CreateSender(config); // Base RTP overhead is 12B. - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 12u); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u); - rtp_sender()->RegisterRtpHeaderExtension(TransmissionOffset::kUri, - kTransmissionTimeOffsetExtensionId); + rtp_sender_->RegisterRtpHeaderExtension(TransmissionOffset::kUri, + kTransmissionTimeOffsetExtensionId); // TransmissionTimeOffset extension has a size of 3B, but with the addition // of header index and rounding to 4 byte boundary we end up with 20B total. - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 20u); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 20u); } -TEST_P(RtpSenderTest, CountMidOnlyUntilAcked) { - RtpRtcpInterface::Configuration config; - config.clock = clock_; - config.outgoing_transport = &transport_; - config.local_media_ssrc = kSsrc; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - rtp_sender_context_ = - std::make_unique(config, &time_controller_); +TEST_F(RtpSenderTest, CountMidOnlyUntilAcked) { + RtpRtcpInterface::Configuration config = GetDefaultConfig(); + config.rtx_send_ssrc = {}; + CreateSender(config); // Base RTP overhead is 12B. - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 12u); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u); - rtp_sender()->RegisterRtpHeaderExtension(RtpMid::kUri, kMidExtensionId); - rtp_sender()->RegisterRtpHeaderExtension(RtpStreamId::kUri, kRidExtensionId); + rtp_sender_->RegisterRtpHeaderExtension(RtpMid::kUri, kMidExtensionId); + rtp_sender_->RegisterRtpHeaderExtension(RtpStreamId::kUri, kRidExtensionId); // Counted only if set. - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 12u); - rtp_sender()->SetMid("foo"); - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 36u); - rtp_sender()->SetRid("bar"); - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 52u); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u); + rtp_sender_->SetMid("foo"); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 36u); + rtp_sender_->SetRid("bar"); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 52u); // Ack received, mid/rid no longer sent. - rtp_sender()->OnReceivedAckOnSsrc(0); - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 12u); + rtp_sender_->OnReceivedAckOnSsrc(0); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u); } -TEST_P(RtpSenderTest, DontCountVolatileExtensionsIntoOverhead) { - RtpRtcpInterface::Configuration config; - config.clock = clock_; - config.outgoing_transport = &transport_; - config.local_media_ssrc = kSsrc; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - rtp_sender_context_ = - std::make_unique(config, &time_controller_); +TEST_F(RtpSenderTest, DontCountVolatileExtensionsIntoOverhead) { + RtpRtcpInterface::Configuration config = GetDefaultConfig(); + config.rtx_send_ssrc = {}; + CreateSender(config); // Base RTP overhead is 12B. - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 12u); - - rtp_sender()->RegisterRtpHeaderExtension(InbandComfortNoiseExtension::kUri, - 1); - rtp_sender()->RegisterRtpHeaderExtension(AbsoluteCaptureTimeExtension::kUri, - 2); - rtp_sender()->RegisterRtpHeaderExtension(VideoOrientation::kUri, 3); - rtp_sender()->RegisterRtpHeaderExtension(PlayoutDelayLimits::kUri, 4); - rtp_sender()->RegisterRtpHeaderExtension(VideoContentTypeExtension::kUri, 5); - rtp_sender()->RegisterRtpHeaderExtension(VideoTimingExtension::kUri, 6); - rtp_sender()->RegisterRtpHeaderExtension(RepairedRtpStreamId::kUri, 7); - rtp_sender()->RegisterRtpHeaderExtension(ColorSpaceExtension::kUri, 8); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u); + + rtp_sender_->RegisterRtpHeaderExtension(InbandComfortNoiseExtension::kUri, 1); + rtp_sender_->RegisterRtpHeaderExtension(AbsoluteCaptureTimeExtension::kUri, + 2); + rtp_sender_->RegisterRtpHeaderExtension(VideoOrientation::kUri, 3); + rtp_sender_->RegisterRtpHeaderExtension(PlayoutDelayLimits::kUri, 4); + rtp_sender_->RegisterRtpHeaderExtension(VideoContentTypeExtension::kUri, 5); + rtp_sender_->RegisterRtpHeaderExtension(VideoTimingExtension::kUri, 6); + rtp_sender_->RegisterRtpHeaderExtension(RepairedRtpStreamId::kUri, 7); + rtp_sender_->RegisterRtpHeaderExtension(ColorSpaceExtension::kUri, 8); // Still only 12B counted since can't count on above being sent. - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 12u); -} - -TEST_P(RtpSenderTest, SendPacketMatchesVideo) { - std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - packet->set_packet_type(RtpPacketMediaType::kVideo); - - // Verify sent with correct SSRC. - packet = BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - packet->SetSsrc(kSsrc); - packet->set_packet_type(RtpPacketMediaType::kVideo); - rtp_sender_context_->InjectPacket(std::move(packet), PacedPacketInfo()); - EXPECT_EQ(transport_.packets_sent(), 1); -} - -TEST_P(RtpSenderTest, SendPacketMatchesAudio) { - std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - packet->set_packet_type(RtpPacketMediaType::kAudio); - - // Verify sent with correct SSRC. - packet = BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - packet->SetSsrc(kSsrc); - packet->set_packet_type(RtpPacketMediaType::kAudio); - rtp_sender_context_->InjectPacket(std::move(packet), PacedPacketInfo()); - EXPECT_EQ(transport_.packets_sent(), 1); -} - -TEST_P(RtpSenderTest, SendPacketMatchesRetransmissions) { - std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - packet->set_packet_type(RtpPacketMediaType::kRetransmission); - - // Verify sent with correct SSRC (non-RTX). - packet = BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - packet->SetSsrc(kSsrc); - packet->set_packet_type(RtpPacketMediaType::kRetransmission); - rtp_sender_context_->InjectPacket(std::move(packet), PacedPacketInfo()); - EXPECT_EQ(transport_.packets_sent(), 1); - - // RTX retransmission. - packet = BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - packet->SetSsrc(kRtxSsrc); - packet->set_packet_type(RtpPacketMediaType::kRetransmission); - rtp_sender_context_->InjectPacket(std::move(packet), PacedPacketInfo()); - EXPECT_EQ(transport_.packets_sent(), 2); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u); } -TEST_P(RtpSenderTest, SendPacketMatchesPadding) { - std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - packet->set_packet_type(RtpPacketMediaType::kPadding); - - // Verify sent with correct SSRC (non-RTX). - packet = BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - packet->SetSsrc(kSsrc); - packet->set_packet_type(RtpPacketMediaType::kPadding); - rtp_sender_context_->InjectPacket(std::move(packet), PacedPacketInfo()); - EXPECT_EQ(transport_.packets_sent(), 1); - - // RTX padding. - packet = BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - packet->SetSsrc(kRtxSsrc); - packet->set_packet_type(RtpPacketMediaType::kPadding); - rtp_sender_context_->InjectPacket(std::move(packet), PacedPacketInfo()); - EXPECT_EQ(transport_.packets_sent(), 2); -} - -TEST_P(RtpSenderTest, SendPacketMatchesFlexfec) { - std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); - - // Verify sent with correct SSRC. - packet = BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - packet->SetSsrc(kFlexFecSsrc); - packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); - rtp_sender_context_->InjectPacket(std::move(packet), PacedPacketInfo()); - EXPECT_EQ(transport_.packets_sent(), 1); -} - -TEST_P(RtpSenderTest, SendPacketMatchesUlpfec) { - std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); - - // Verify sent with correct SSRC. - packet = BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - packet->SetSsrc(kSsrc); - packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); - rtp_sender_context_->InjectPacket(std::move(packet), PacedPacketInfo()); - EXPECT_EQ(transport_.packets_sent(), 1); -} - -TEST_P(RtpSenderTest, SendPacketHandlesRetransmissionHistory) { - rtp_sender_context_->packet_history_.SetStorePacketsStatus( +TEST_F(RtpSenderTest, SendPacketHandlesRetransmissionHistory) { + packet_history_->SetStorePacketsStatus( RtpPacketHistory::StorageMode::kStoreAndCull, 10); // Ignore calls to EnqueuePackets() for this test. EXPECT_CALL(mock_paced_sender_, EnqueuePackets).WillRepeatedly(Return()); - // Build a media packet and send it. + // Build a media packet and put in the packet history. std::unique_ptr packet = BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); const uint16_t media_sequence_number = packet->SequenceNumber(); - packet->set_packet_type(RtpPacketMediaType::kVideo); packet->set_allow_retransmission(true); - rtp_sender_context_->InjectPacket(std::move(packet), PacedPacketInfo()); + packet_history_->PutRtpPacket(std::move(packet), + clock_->TimeInMilliseconds()); - // Simulate retransmission request. + // Simulate successful retransmission request. time_controller_.AdvanceTime(TimeDelta::Millis(30)); - EXPECT_GT(rtp_sender()->ReSendPacket(media_sequence_number), 0); + EXPECT_THAT(rtp_sender_->ReSendPacket(media_sequence_number), Gt(0)); // Packet already pending, retransmission not allowed. time_controller_.AdvanceTime(TimeDelta::Millis(30)); - EXPECT_EQ(rtp_sender()->ReSendPacket(media_sequence_number), 0); - - // Packet exiting pacer, mark as not longer pending. - packet = BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - EXPECT_NE(packet->SequenceNumber(), media_sequence_number); - packet->set_packet_type(RtpPacketMediaType::kRetransmission); - packet->SetSsrc(kRtxSsrc); - packet->set_retransmitted_sequence_number(media_sequence_number); - packet->set_allow_retransmission(false); - uint16_t seq_no = packet->SequenceNumber(); - rtp_sender_context_->InjectPacket(std::move(packet), PacedPacketInfo()); + EXPECT_THAT(rtp_sender_->ReSendPacket(media_sequence_number), Eq(0)); + + // Simulate packet exiting pacer, mark as not longer pending. + packet_history_->MarkPacketAsSent(media_sequence_number); // Retransmissions allowed again. time_controller_.AdvanceTime(TimeDelta::Millis(30)); - EXPECT_GT(rtp_sender()->ReSendPacket(media_sequence_number), 0); - - // Retransmission of RTX packet should not be allowed. - EXPECT_EQ(rtp_sender()->ReSendPacket(seq_no), 0); + EXPECT_THAT(rtp_sender_->ReSendPacket(media_sequence_number), Gt(0)); } -TEST_P(RtpSenderTest, SendPacketUpdatesExtensions) { - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( - TransmissionOffset::kUri, kTransmissionTimeOffsetExtensionId)); - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( - AbsoluteSendTime::kUri, kAbsoluteSendTimeExtensionId)); - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( - VideoTimingExtension::kUri, kVideoTimingExtensionId)); - - std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - packet->set_packetization_finish_time_ms(clock_->TimeInMilliseconds()); - - const int32_t kDiffMs = 10; - time_controller_.AdvanceTime(TimeDelta::Millis(kDiffMs)); - - packet->set_packet_type(RtpPacketMediaType::kVideo); - rtp_sender_context_->InjectPacket(std::move(packet), PacedPacketInfo()); - - const RtpPacketReceived& received_packet = transport_.last_sent_packet(); - - EXPECT_EQ(received_packet.GetExtension(), kDiffMs * 90); - - EXPECT_EQ(received_packet.GetExtension(), - AbsoluteSendTime::MsTo24Bits(clock_->TimeInMilliseconds())); - - VideoSendTiming timing; - EXPECT_TRUE(received_packet.GetExtension(&timing)); - EXPECT_EQ(timing.pacer_exit_delta_ms, kDiffMs); -} +TEST_F(RtpSenderTest, MarksRetransmittedPackets) { + packet_history_->SetStorePacketsStatus( + RtpPacketHistory::StorageMode::kStoreAndCull, 10); -TEST_P(RtpSenderTest, SendPacketSetsPacketOptions) { - const uint16_t kPacketId = 42; - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( - TransportSequenceNumber::kUri, kTransportSequenceNumberExtensionId)); + // Build a media packet and put in the packet history. std::unique_ptr packet = BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - packet->SetExtension(kPacketId); - - packet->set_packet_type(RtpPacketMediaType::kVideo); - EXPECT_CALL(send_packet_observer_, OnSendPacket); - rtp_sender_context_->InjectPacket(std::move(packet), PacedPacketInfo()); - - EXPECT_EQ(transport_.last_options_.packet_id, kPacketId); - EXPECT_TRUE(transport_.last_options_.included_in_allocation); - EXPECT_TRUE(transport_.last_options_.included_in_feedback); - EXPECT_FALSE(transport_.last_options_.is_retransmit); - - // Send another packet as retransmission, verify options are populated. - packet = BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - packet->SetExtension(kPacketId + 1); - packet->set_packet_type(RtpPacketMediaType::kRetransmission); - rtp_sender_context_->InjectPacket(std::move(packet), PacedPacketInfo()); - EXPECT_TRUE(transport_.last_options_.is_retransmit); -} - -TEST_P(RtpSenderTest, SendPacketUpdatesStats) { - const size_t kPayloadSize = 1000; - - StrictMock send_side_delay_observer; - - RtpRtcpInterface::Configuration config; - config.clock = clock_; - config.outgoing_transport = &transport_; - config.local_media_ssrc = kSsrc; - config.rtx_send_ssrc = kRtxSsrc; - config.fec_generator = &flexfec_sender_; - config.send_side_delay_observer = &send_side_delay_observer; - config.event_log = &mock_rtc_event_log_; - config.send_packet_observer = &send_packet_observer_; - rtp_sender_context_ = - std::make_unique(config, &time_controller_); - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( - TransportSequenceNumber::kUri, kTransportSequenceNumberExtensionId)); - - const int64_t capture_time_ms = clock_->TimeInMilliseconds(); - - std::unique_ptr video_packet = - BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - video_packet->set_packet_type(RtpPacketMediaType::kVideo); - video_packet->SetPayloadSize(kPayloadSize); - video_packet->SetExtension(1); - - std::unique_ptr rtx_packet = - BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - rtx_packet->SetSsrc(kRtxSsrc); - rtx_packet->set_packet_type(RtpPacketMediaType::kRetransmission); - rtx_packet->SetPayloadSize(kPayloadSize); - rtx_packet->SetExtension(2); - - std::unique_ptr fec_packet = - BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); - fec_packet->SetSsrc(kFlexFecSsrc); - fec_packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); - fec_packet->SetPayloadSize(kPayloadSize); - fec_packet->SetExtension(3); - - const int64_t kDiffMs = 25; - time_controller_.AdvanceTime(TimeDelta::Millis(kDiffMs)); + const uint16_t media_sequence_number = packet->SequenceNumber(); + packet->set_allow_retransmission(true); + packet_history_->PutRtpPacket(std::move(packet), + clock_->TimeInMilliseconds()); - EXPECT_CALL(send_side_delay_observer, - SendSideDelayUpdated(kDiffMs, kDiffMs, kDiffMs, kSsrc)); + // Expect a retransmission packet marked with which packet it is a + // retransmit of. EXPECT_CALL( - send_side_delay_observer, - SendSideDelayUpdated(kDiffMs, kDiffMs, 2 * kDiffMs, kFlexFecSsrc)); - - EXPECT_CALL(send_packet_observer_, OnSendPacket(1, capture_time_ms, kSsrc)); - - rtp_sender_context_->InjectPacket(std::move(video_packet), PacedPacketInfo()); - - // Send packet observer not called for padding/retransmissions. - EXPECT_CALL(send_packet_observer_, OnSendPacket(2, _, _)).Times(0); - rtp_sender_context_->InjectPacket(std::move(rtx_packet), PacedPacketInfo()); - - EXPECT_CALL(send_packet_observer_, - OnSendPacket(3, capture_time_ms, kFlexFecSsrc)); - rtp_sender_context_->InjectPacket(std::move(fec_packet), PacedPacketInfo()); - - StreamDataCounters rtp_stats; - StreamDataCounters rtx_stats; - rtp_egress()->GetDataCounters(&rtp_stats, &rtx_stats); - EXPECT_EQ(rtp_stats.transmitted.packets, 2u); - EXPECT_EQ(rtp_stats.fec.packets, 1u); - EXPECT_EQ(rtx_stats.retransmitted.packets, 1u); + mock_paced_sender_, + EnqueuePackets(ElementsAre(AllOf( + Pointee(Property(&RtpPacketToSend::packet_type, + RtpPacketMediaType::kRetransmission)), + Pointee(Property(&RtpPacketToSend::retransmitted_sequence_number, + Eq(media_sequence_number))))))); + EXPECT_THAT(rtp_sender_->ReSendPacket(media_sequence_number), Gt(0)); } -TEST_P(RtpSenderTest, GeneratedPaddingHasBweExtensions) { +TEST_F(RtpSenderTest, GeneratedPaddingHasBweExtensions) { // Min requested size in order to use RTX payload. const size_t kMinPaddingSize = 50; + EnableRtx(); - rtp_sender()->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); - rtp_sender()->SetRtxPayloadType(kRtxPayload, kPayload); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 1); - - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( TransmissionOffset::kUri, kTransmissionTimeOffsetExtensionId)); - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( AbsoluteSendTime::kUri, kAbsoluteSendTimeExtensionId)); - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( TransportSequenceNumber::kUri, kTransportSequenceNumberExtensionId)); - // Send a payload packet first, to enable padding and populate the packet - // history. + // Put a packet in the history, in order to facilitate payload padding. std::unique_ptr packet = BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); packet->set_allow_retransmission(true); packet->SetPayloadSize(kMinPaddingSize); packet->set_packet_type(RtpPacketMediaType::kVideo); - EXPECT_CALL(send_packet_observer_, OnSendPacket).Times(1); - rtp_sender_context_->InjectPacket(std::move(packet), PacedPacketInfo()); + packet_history_->PutRtpPacket(std::move(packet), + clock_->TimeInMilliseconds()); // Generate a plain padding packet, check that extensions are registered. std::vector> generated_packets = - rtp_sender()->GeneratePadding(/*target_size_bytes=*/1, true); + rtp_sender_->GeneratePadding(/*target_size_bytes=*/1, true); ASSERT_THAT(generated_packets, SizeIs(1)); auto& plain_padding = generated_packets.front(); EXPECT_GT(plain_padding->padding_size(), 0u); EXPECT_TRUE(plain_padding->HasExtension()); EXPECT_TRUE(plain_padding->HasExtension()); EXPECT_TRUE(plain_padding->HasExtension()); - - // Verify all header extensions have been written. - rtp_sender_context_->InjectPacket(std::move(plain_padding), - PacedPacketInfo()); - const auto& sent_plain_padding = transport_.last_sent_packet(); - EXPECT_TRUE(sent_plain_padding.HasExtension()); - EXPECT_TRUE(sent_plain_padding.HasExtension()); - EXPECT_TRUE(sent_plain_padding.HasExtension()); - webrtc::RTPHeader rtp_header; - sent_plain_padding.GetHeader(&rtp_header); - EXPECT_TRUE(rtp_header.extension.hasAbsoluteSendTime); - EXPECT_TRUE(rtp_header.extension.hasTransmissionTimeOffset); - EXPECT_TRUE(rtp_header.extension.hasTransportSequenceNumber); + EXPECT_GT(plain_padding->padding_size(), 0u); // Generate a payload padding packets, check that extensions are registered. - generated_packets = rtp_sender()->GeneratePadding(kMinPaddingSize, true); + generated_packets = rtp_sender_->GeneratePadding(kMinPaddingSize, true); ASSERT_EQ(generated_packets.size(), 1u); auto& payload_padding = generated_packets.front(); EXPECT_EQ(payload_padding->padding_size(), 0u); EXPECT_TRUE(payload_padding->HasExtension()); EXPECT_TRUE(payload_padding->HasExtension()); EXPECT_TRUE(payload_padding->HasExtension()); - - // Verify all header extensions have been written. - rtp_sender_context_->InjectPacket(std::move(payload_padding), - PacedPacketInfo()); - const auto& sent_payload_padding = transport_.last_sent_packet(); - EXPECT_TRUE(sent_payload_padding.HasExtension()); - EXPECT_TRUE(sent_payload_padding.HasExtension()); - EXPECT_TRUE(sent_payload_padding.HasExtension()); - sent_payload_padding.GetHeader(&rtp_header); - EXPECT_TRUE(rtp_header.extension.hasAbsoluteSendTime); - EXPECT_TRUE(rtp_header.extension.hasTransmissionTimeOffset); - EXPECT_TRUE(rtp_header.extension.hasTransportSequenceNumber); + EXPECT_GT(payload_padding->payload_size(), 0u); } -TEST_P(RtpSenderTest, GeneratePaddingResendsOldPacketsWithRtx) { +TEST_F(RtpSenderTest, GeneratePaddingResendsOldPacketsWithRtx) { // Min requested size in order to use RTX payload. const size_t kMinPaddingSize = 50; - rtp_sender()->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); - rtp_sender()->SetRtxPayloadType(kRtxPayload, kPayload); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( + rtp_sender_->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); + rtp_sender_->SetRtxPayloadType(kRtxPayload, kPayload); + packet_history_->SetStorePacketsStatus( RtpPacketHistory::StorageMode::kStoreAndCull, 1); - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( TransportSequenceNumber::kUri, kTransportSequenceNumberExtensionId)); const size_t kPayloadPacketSize = kMinPaddingSize; @@ -1916,15 +1043,13 @@ TEST_P(RtpSenderTest, GeneratePaddingResendsOldPacketsWithRtx) { packet->set_allow_retransmission(true); packet->SetPayloadSize(kPayloadPacketSize); packet->set_packet_type(RtpPacketMediaType::kVideo); - - // Send a dummy video packet so it ends up in the packet history. - EXPECT_CALL(send_packet_observer_, OnSendPacket).Times(1); - rtp_sender_context_->InjectPacket(std::move(packet), PacedPacketInfo()); + packet_history_->PutRtpPacket(std::move(packet), + clock_->TimeInMilliseconds()); // Generated padding has large enough budget that the video packet should be // retransmitted as padding. std::vector> generated_packets = - rtp_sender()->GeneratePadding(kMinPaddingSize, true); + rtp_sender_->GeneratePadding(kMinPaddingSize, true); ASSERT_EQ(generated_packets.size(), 1u); auto& padding_packet = generated_packets.front(); EXPECT_EQ(padding_packet->packet_type(), RtpPacketMediaType::kPadding); @@ -1937,7 +1062,7 @@ TEST_P(RtpSenderTest, GeneratePaddingResendsOldPacketsWithRtx) { size_t padding_bytes_generated = 0; generated_packets = - rtp_sender()->GeneratePadding(kPaddingBytesRequested, true); + rtp_sender_->GeneratePadding(kPaddingBytesRequested, true); EXPECT_EQ(generated_packets.size(), 1u); for (auto& packet : generated_packets) { EXPECT_EQ(packet->packet_type(), RtpPacketMediaType::kPadding); @@ -1950,17 +1075,17 @@ TEST_P(RtpSenderTest, GeneratePaddingResendsOldPacketsWithRtx) { EXPECT_EQ(padding_bytes_generated, kMaxPaddingSize); } -TEST_P(RtpSenderTest, LimitsPayloadPaddingSize) { +TEST_F(RtpSenderTest, LimitsPayloadPaddingSize) { // Limit RTX payload padding to 2x target size. const double kFactor = 2.0; field_trials_.SetMaxPaddingFactor(kFactor); - SetUpRtpSender(true, false, false); - rtp_sender()->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); - rtp_sender()->SetRtxPayloadType(kRtxPayload, kPayload); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( + SetUpRtpSender(false, false, nullptr); + rtp_sender_->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); + rtp_sender_->SetRtxPayloadType(kRtxPayload, kPayload); + packet_history_->SetStorePacketsStatus( RtpPacketHistory::StorageMode::kStoreAndCull, 1); - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( TransportSequenceNumber::kUri, kTransportSequenceNumberExtensionId)); // Send a dummy video packet so it ends up in the packet history. @@ -1970,8 +1095,8 @@ TEST_P(RtpSenderTest, LimitsPayloadPaddingSize) { packet->set_allow_retransmission(true); packet->SetPayloadSize(kPayloadPacketSize); packet->set_packet_type(RtpPacketMediaType::kVideo); - EXPECT_CALL(send_packet_observer_, OnSendPacket).Times(1); - rtp_sender_context_->InjectPacket(std::move(packet), PacedPacketInfo()); + packet_history_->PutRtpPacket(std::move(packet), + clock_->TimeInMilliseconds()); // Smallest target size that will result in the sent packet being returned as // padding. @@ -1981,26 +1106,26 @@ TEST_P(RtpSenderTest, LimitsPayloadPaddingSize) { // Generated padding has large enough budget that the video packet should be // retransmitted as padding. EXPECT_THAT( - rtp_sender()->GeneratePadding(kMinTargerSizeForPayload, true), + rtp_sender_->GeneratePadding(kMinTargerSizeForPayload, true), AllOf(Not(IsEmpty()), Each(Pointee(Property(&RtpPacketToSend::padding_size, Eq(0u)))))); // If payload padding is > 2x requested size, plain padding is returned // instead. EXPECT_THAT( - rtp_sender()->GeneratePadding(kMinTargerSizeForPayload - 1, true), + rtp_sender_->GeneratePadding(kMinTargerSizeForPayload - 1, true), AllOf(Not(IsEmpty()), Each(Pointee(Property(&RtpPacketToSend::padding_size, Gt(0u)))))); } -TEST_P(RtpSenderTest, GeneratePaddingCreatesPurePaddingWithoutRtx) { - rtp_sender_context_->packet_history_.SetStorePacketsStatus( +TEST_F(RtpSenderTest, GeneratePaddingCreatesPurePaddingWithoutRtx) { + packet_history_->SetStorePacketsStatus( RtpPacketHistory::StorageMode::kStoreAndCull, 1); - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( TransmissionOffset::kUri, kTransmissionTimeOffsetExtensionId)); - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( AbsoluteSendTime::kUri, kAbsoluteSendTimeExtensionId)); - ASSERT_TRUE(rtp_sender()->RegisterRtpHeaderExtension( + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( TransportSequenceNumber::kUri, kTransportSequenceNumberExtensionId)); const size_t kPayloadPacketSize = 1234; @@ -2011,8 +1136,8 @@ TEST_P(RtpSenderTest, GeneratePaddingCreatesPurePaddingWithoutRtx) { packet->set_allow_retransmission(true); packet->SetPayloadSize(kPayloadPacketSize); packet->set_packet_type(RtpPacketMediaType::kVideo); - EXPECT_CALL(send_packet_observer_, OnSendPacket).Times(1); - rtp_sender_context_->InjectPacket(std::move(packet), PacedPacketInfo()); + packet_history_->PutRtpPacket(std::move(packet), + clock_->TimeInMilliseconds()); // Payload padding not available without RTX, only generate plain padding on // the media SSRC. @@ -2024,7 +1149,7 @@ TEST_P(RtpSenderTest, GeneratePaddingCreatesPurePaddingWithoutRtx) { (kPaddingBytesRequested + kMaxPaddingSize - 1) / kMaxPaddingSize; size_t padding_bytes_generated = 0; std::vector> padding_packets = - rtp_sender()->GeneratePadding(kPaddingBytesRequested, true); + rtp_sender_->GeneratePadding(kPaddingBytesRequested, true); EXPECT_EQ(padding_packets.size(), kExpectedNumPaddingPackets); for (auto& packet : padding_packets) { EXPECT_EQ(packet->packet_type(), RtpPacketMediaType::kPadding); @@ -2035,21 +1160,13 @@ TEST_P(RtpSenderTest, GeneratePaddingCreatesPurePaddingWithoutRtx) { EXPECT_TRUE(packet->HasExtension()); EXPECT_TRUE(packet->HasExtension()); EXPECT_TRUE(packet->HasExtension()); - - // Verify all header extensions are received. - rtp_sender_context_->InjectPacket(std::move(packet), PacedPacketInfo()); - webrtc::RTPHeader rtp_header; - transport_.last_sent_packet().GetHeader(&rtp_header); - EXPECT_TRUE(rtp_header.extension.hasAbsoluteSendTime); - EXPECT_TRUE(rtp_header.extension.hasTransmissionTimeOffset); - EXPECT_TRUE(rtp_header.extension.hasTransportSequenceNumber); } EXPECT_EQ(padding_bytes_generated, kExpectedNumPaddingPackets * kMaxPaddingSize); } -TEST_P(RtpSenderTest, SupportsPadding) { +TEST_F(RtpSenderTest, SupportsPadding) { bool kSendingMediaStats[] = {true, false}; bool kEnableRedundantPayloads[] = {true, false}; absl::string_view kBweExtensionUris[] = { @@ -2058,196 +1175,127 @@ TEST_P(RtpSenderTest, SupportsPadding) { const int kExtensionsId = 7; for (bool sending_media : kSendingMediaStats) { - rtp_sender()->SetSendingMediaStatus(sending_media); + rtp_sender_->SetSendingMediaStatus(sending_media); for (bool redundant_payloads : kEnableRedundantPayloads) { int rtx_mode = kRtxRetransmitted; if (redundant_payloads) { rtx_mode |= kRtxRedundantPayloads; } - rtp_sender()->SetRtxStatus(rtx_mode); + rtp_sender_->SetRtxStatus(rtx_mode); for (auto extension_uri : kBweExtensionUris) { - EXPECT_FALSE(rtp_sender()->SupportsPadding()); - rtp_sender()->RegisterRtpHeaderExtension(extension_uri, kExtensionsId); + EXPECT_FALSE(rtp_sender_->SupportsPadding()); + rtp_sender_->RegisterRtpHeaderExtension(extension_uri, kExtensionsId); if (!sending_media) { - EXPECT_FALSE(rtp_sender()->SupportsPadding()); + EXPECT_FALSE(rtp_sender_->SupportsPadding()); } else { - EXPECT_TRUE(rtp_sender()->SupportsPadding()); + EXPECT_TRUE(rtp_sender_->SupportsPadding()); if (redundant_payloads) { - EXPECT_TRUE(rtp_sender()->SupportsRtxPayloadPadding()); + EXPECT_TRUE(rtp_sender_->SupportsRtxPayloadPadding()); } else { - EXPECT_FALSE(rtp_sender()->SupportsRtxPayloadPadding()); + EXPECT_FALSE(rtp_sender_->SupportsRtxPayloadPadding()); } } - rtp_sender()->DeregisterRtpHeaderExtension(extension_uri); - EXPECT_FALSE(rtp_sender()->SupportsPadding()); + rtp_sender_->DeregisterRtpHeaderExtension(extension_uri); + EXPECT_FALSE(rtp_sender_->SupportsPadding()); } } } } -TEST_P(RtpSenderTest, SetsCaptureTimeAndPopulatesTransmissionOffset) { - rtp_sender()->RegisterRtpHeaderExtension(TransmissionOffset::kUri, - kTransmissionTimeOffsetExtensionId); - - rtp_sender()->SetSendingMediaStatus(true); - rtp_sender()->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); - rtp_sender()->SetRtxPayloadType(kRtxPayload, kPayload); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 10); - - const int64_t kMissingCaptureTimeMs = 0; - const int64_t kOffsetMs = 10; - - auto packet = - BuildRtpPacket(kPayload, kMarkerBit, clock_->TimeInMilliseconds(), - kMissingCaptureTimeMs); - packet->set_packet_type(RtpPacketMediaType::kVideo); - packet->ReserveExtension(); - packet->AllocatePayload(sizeof(kPayloadData)); - - std::unique_ptr packet_to_pace; - EXPECT_CALL(mock_paced_sender_, EnqueuePackets) - .WillOnce([&](std::vector> packets) { - EXPECT_EQ(packets.size(), 1u); - EXPECT_GT(packets[0]->capture_time_ms(), 0); - packet_to_pace = std::move(packets[0]); - }); +TEST_F(RtpSenderTest, SetsCaptureTimeOnRtxRetransmissions) { + EnableRtx(); + // Put a packet in the packet history, with current time as capture time. + const int64_t start_time_ms = clock_->TimeInMilliseconds(); + std::unique_ptr packet = + BuildRtpPacket(kPayload, kMarkerBit, start_time_ms, + /*capture_time_ms=*/start_time_ms); packet->set_allow_retransmission(true); - EXPECT_TRUE(rtp_sender()->SendToNetwork(std::move(packet))); - - time_controller_.AdvanceTime(TimeDelta::Millis(kOffsetMs)); - - rtp_sender_context_->InjectPacket(std::move(packet_to_pace), - PacedPacketInfo()); + packet_history_->PutRtpPacket(std::move(packet), start_time_ms); - EXPECT_EQ(1, transport_.packets_sent()); - absl::optional transmission_time_extension = - transport_.sent_packets_.back().GetExtension(); - ASSERT_TRUE(transmission_time_extension.has_value()); - EXPECT_EQ(*transmission_time_extension, kOffsetMs * kTimestampTicksPerMs); + // Advance time, request an RTX retransmission. Capture timestamp should be + // preserved. + time_controller_.AdvanceTime(TimeDelta::Millis(10)); - // Retransmit packet. The RTX packet should get the same capture time as the - // original packet, so offset is delta from original packet to now. - time_controller_.AdvanceTime(TimeDelta::Millis(kOffsetMs)); - - std::unique_ptr rtx_packet_to_pace; - EXPECT_CALL(mock_paced_sender_, EnqueuePackets) - .WillOnce([&](std::vector> packets) { - EXPECT_GT(packets[0]->capture_time_ms(), 0); - rtx_packet_to_pace = std::move(packets[0]); - }); - - EXPECT_GT(rtp_sender()->ReSendPacket(kSeqNum), 0); - rtp_sender_context_->InjectPacket(std::move(rtx_packet_to_pace), - PacedPacketInfo()); - - EXPECT_EQ(2, transport_.packets_sent()); - transmission_time_extension = - transport_.sent_packets_.back().GetExtension(); - ASSERT_TRUE(transmission_time_extension.has_value()); - EXPECT_EQ(*transmission_time_extension, 2 * kOffsetMs * kTimestampTicksPerMs); + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(Property( + &RtpPacketToSend::capture_time_ms, start_time_ms))))); + EXPECT_GT(rtp_sender_->ReSendPacket(kSeqNum), 0); } -TEST_P(RtpSenderTestWithoutPacer, ClearHistoryOnSequenceNumberCange) { - const int64_t kRtt = 10; - - rtp_sender()->SetSendingMediaStatus(true); - rtp_sender()->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); - rtp_sender()->SetRtxPayloadType(kRtxPayload, kPayload); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 10); - rtp_sender_context_->packet_history_.SetRtt(kRtt); +TEST_F(RtpSenderTest, ClearHistoryOnSequenceNumberCange) { + EnableRtx(); - // Send a packet and record its sequence numbers. - SendGenericPacket(); - ASSERT_EQ(1u, transport_.sent_packets_.size()); - const uint16_t packet_seqence_number = - transport_.sent_packets_.back().SequenceNumber(); + // Put a packet in the packet history. + const int64_t now_ms = clock_->TimeInMilliseconds(); + std::unique_ptr packet = + BuildRtpPacket(kPayload, kMarkerBit, now_ms, now_ms); + packet->set_allow_retransmission(true); + packet_history_->PutRtpPacket(std::move(packet), now_ms); - // Advance time and make sure it can be retransmitted, even if we try to set - // the ssrc the what it already is. - rtp_sender()->SetSequenceNumber(rtp_sender()->SequenceNumber()); - time_controller_.AdvanceTime(TimeDelta::Millis(kRtt)); - EXPECT_GT(rtp_sender()->ReSendPacket(packet_seqence_number), 0); + EXPECT_TRUE(packet_history_->GetPacketState(kSeqNum)); - // Change the sequence number, then move the time and try to retransmit again. - // The old packet should now be gone. - rtp_sender()->SetSequenceNumber(rtp_sender()->SequenceNumber() - 1); - time_controller_.AdvanceTime(TimeDelta::Millis(kRtt)); - EXPECT_EQ(rtp_sender()->ReSendPacket(packet_seqence_number), 0); + // Update the sequence number of the RTP module, verify packet has been + // removed. + rtp_sender_->SetSequenceNumber(rtp_sender_->SequenceNumber() - 1); + EXPECT_FALSE(packet_history_->GetPacketState(kSeqNum)); } -TEST_P(RtpSenderTest, IgnoresNackAfterDisablingMedia) { +TEST_F(RtpSenderTest, IgnoresNackAfterDisablingMedia) { const int64_t kRtt = 10; - rtp_sender()->SetSendingMediaStatus(true); - rtp_sender()->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); - rtp_sender()->SetRtxPayloadType(kRtxPayload, kPayload); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 10); - rtp_sender_context_->packet_history_.SetRtt(kRtt); - - // Send a packet so it is in the packet history. - std::unique_ptr packet_to_pace; - EXPECT_CALL(mock_paced_sender_, EnqueuePackets) - .WillOnce([&](std::vector> packets) { - packet_to_pace = std::move(packets[0]); - }); - - SendGenericPacket(); - rtp_sender_context_->InjectPacket(std::move(packet_to_pace), - PacedPacketInfo()); + EnableRtx(); + packet_history_->SetRtt(kRtt); - ASSERT_EQ(1u, transport_.sent_packets_.size()); + // Put a packet in the history. + const int64_t start_time_ms = clock_->TimeInMilliseconds(); + std::unique_ptr packet = + BuildRtpPacket(kPayload, kMarkerBit, start_time_ms, + /*capture_time_ms=*/start_time_ms); + packet->set_allow_retransmission(true); + packet_history_->PutRtpPacket(std::move(packet), start_time_ms); - // Disable media sending and try to retransmit the packet, it should fail. - rtp_sender()->SetSendingMediaStatus(false); - time_controller_.AdvanceTime(TimeDelta::Millis(kRtt)); - EXPECT_LT(rtp_sender()->ReSendPacket(kSeqNum), 0); + // Disable media sending and try to retransmit the packet, it should fail. + rtp_sender_->SetSendingMediaStatus(false); + time_controller_.AdvanceTime(TimeDelta::Millis(kRtt)); + EXPECT_LT(rtp_sender_->ReSendPacket(kSeqNum), 0); } -TEST_P(RtpSenderTest, DoesntFecProtectRetransmissions) { +TEST_F(RtpSenderTest, DoesntFecProtectRetransmissions) { // Set up retranmission without RTX, so that a plain copy of the old packet is // re-sent instead. const int64_t kRtt = 10; - rtp_sender()->SetSendingMediaStatus(true); - rtp_sender()->SetRtxStatus(kRtxOff); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( + rtp_sender_->SetSendingMediaStatus(true); + rtp_sender_->SetRtxStatus(kRtxOff); + packet_history_->SetStorePacketsStatus( RtpPacketHistory::StorageMode::kStoreAndCull, 10); - rtp_sender_context_->packet_history_.SetRtt(kRtt); - - // Send a packet so it is in the packet history, make sure to mark it for - // FEC protection. - std::unique_ptr packet_to_pace; - EXPECT_CALL(mock_paced_sender_, EnqueuePackets) - .WillOnce([&](std::vector> packets) { - packet_to_pace = std::move(packets[0]); - }); - - SendGenericPacket(); - packet_to_pace->set_fec_protect_packet(true); - rtp_sender_context_->InjectPacket(std::move(packet_to_pace), - PacedPacketInfo()); + packet_history_->SetRtt(kRtt); - ASSERT_EQ(1u, transport_.sent_packets_.size()); + // Put a fec protected packet in the history. + const int64_t start_time_ms = clock_->TimeInMilliseconds(); + std::unique_ptr packet = + BuildRtpPacket(kPayload, kMarkerBit, start_time_ms, + /*capture_time_ms=*/start_time_ms); + packet->set_allow_retransmission(true); + packet->set_fec_protect_packet(true); + packet_history_->PutRtpPacket(std::move(packet), start_time_ms); // Re-send packet, the retransmitted packet should not have the FEC protection // flag set. EXPECT_CALL(mock_paced_sender_, - EnqueuePackets(Each(Pointee( + EnqueuePackets(ElementsAre(Pointee( Property(&RtpPacketToSend::fec_protect_packet, false))))); time_controller_.AdvanceTime(TimeDelta::Millis(kRtt)); - EXPECT_GT(rtp_sender()->ReSendPacket(kSeqNum), 0); + EXPECT_GT(rtp_sender_->ReSendPacket(kSeqNum), 0); } -TEST_P(RtpSenderTest, MarksPacketsWithKeyframeStatus) { +TEST_F(RtpSenderTest, MarksPacketsWithKeyframeStatus) { FieldTrialBasedConfig field_trials; RTPSenderVideo::Config video_config; video_config.clock = clock_; - video_config.rtp_sender = rtp_sender(); + video_config.rtp_sender = rtp_sender_.get(); video_config.field_trials = &field_trials; RTPSenderVideo rtp_sender_video(video_config); @@ -2290,14 +1338,4 @@ TEST_P(RtpSenderTest, MarksPacketsWithKeyframeStatus) { } } -INSTANTIATE_TEST_SUITE_P(WithAndWithoutOverhead, - RtpSenderTest, - ::testing::Values(TestConfig{false}, - TestConfig{true})); - -INSTANTIATE_TEST_SUITE_P(WithAndWithoutOverhead, - RtpSenderTestWithoutPacer, - ::testing::Values(TestConfig{false}, - TestConfig{true})); - } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_sender_video.cc b/modules/rtp_rtcp/source/rtp_sender_video.cc index 6e620777c6..4919e3ebf4 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video.cc +++ b/modules/rtp_rtcp/source/rtp_sender_video.cc @@ -361,7 +361,8 @@ void RTPSenderVideo::AddRtpHeaderExtensions( if (video_header.generic) { bool extension_is_set = false; - if (video_structure_ != nullptr) { + if (packet->IsRegistered() && + video_structure_ != nullptr) { DependencyDescriptor descriptor; descriptor.first_packet_in_frame = first_packet; descriptor.last_packet_in_frame = last_packet; @@ -407,7 +408,8 @@ void RTPSenderVideo::AddRtpHeaderExtensions( } // Do not use generic frame descriptor when dependency descriptor is stored. - if (!extension_is_set) { + if (packet->IsRegistered() && + !extension_is_set) { RtpGenericFrameDescriptor generic_descriptor; generic_descriptor.SetFirstPacketInSubFrame(first_packet); generic_descriptor.SetLastPacketInSubFrame(last_packet); @@ -437,7 +439,8 @@ void RTPSenderVideo::AddRtpHeaderExtensions( } } - if (first_packet && + if (packet->IsRegistered() && + first_packet && send_allocation_ != SendVideoLayersAllocation::kDontSend && (video_header.frame_type == VideoFrameType::kVideoFrameKey || PacketWillLikelyBeRequestedForRestransmitionIfLost(video_header))) { @@ -523,7 +526,8 @@ bool RTPSenderVideo::SendVideo( AbsoluteCaptureTimeSender::GetSource(single_packet->Ssrc(), single_packet->Csrcs()), single_packet->Timestamp(), kVideoPayloadTypeFrequency, - Int64MsToUQ32x32(single_packet->capture_time_ms() + NtpOffsetMs()), + Int64MsToUQ32x32( + clock_->ConvertTimestampToNtpTimeInMilliseconds(capture_time_ms)), /*estimated_capture_clock_offset=*/ include_capture_clock_offset_ ? estimated_capture_clock_offset_ms : absl::nullopt); diff --git a/modules/rtp_rtcp/source/rtp_sender_video.h b/modules/rtp_rtcp/source/rtp_sender_video.h index 06f3d20014..ba8d7e8360 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video.h +++ b/modules/rtp_rtcp/source/rtp_sender_video.h @@ -89,6 +89,7 @@ class RTPSenderVideo { virtual ~RTPSenderVideo(); // expected_retransmission_time_ms.has_value() -> retransmission allowed. + // `capture_time_ms` and `clock::CurrentTime` should be using the same epoch. // Calls to this method is assumed to be externally serialized. // |estimated_capture_clock_offset_ms| is an estimated clock offset between // this sender and the original capturer, for this video packet. See diff --git a/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc index 508d70f8e3..ea727828cc 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc @@ -34,7 +34,6 @@ #include "modules/rtp_rtcp/source/rtp_packet_received.h" #include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" #include "modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h" -#include "modules/rtp_rtcp/source/time_util.h" #include "rtc_base/arraysize.h" #include "rtc_base/rate_limiter.h" #include "rtc_base/task_queue_for_test.h" @@ -1054,8 +1053,10 @@ TEST_P(RtpSenderVideoTest, AbsoluteCaptureTime) { packet.GetExtension(); if (absolute_capture_time) { ++packets_with_abs_capture_time; - EXPECT_EQ(absolute_capture_time->absolute_capture_timestamp, - Int64MsToUQ32x32(kAbsoluteCaptureTimestampMs + NtpOffsetMs())); + EXPECT_EQ( + absolute_capture_time->absolute_capture_timestamp, + Int64MsToUQ32x32(fake_clock_.ConvertTimestampToNtpTimeInMilliseconds( + kAbsoluteCaptureTimestampMs))); EXPECT_FALSE( absolute_capture_time->estimated_capture_clock_offset.has_value()); } @@ -1092,8 +1093,10 @@ TEST_P(RtpSenderVideoTest, AbsoluteCaptureTimeWithCaptureClockOffset) { packet.GetExtension(); if (absolute_capture_time) { ++packets_with_abs_capture_time; - EXPECT_EQ(absolute_capture_time->absolute_capture_timestamp, - Int64MsToUQ32x32(kAbsoluteCaptureTimestampMs + NtpOffsetMs())); + EXPECT_EQ( + absolute_capture_time->absolute_capture_timestamp, + Int64MsToUQ32x32(fake_clock_.ConvertTimestampToNtpTimeInMilliseconds( + kAbsoluteCaptureTimestampMs))); EXPECT_EQ(kExpectedCaptureClockOffset, absolute_capture_time->estimated_capture_clock_offset); } diff --git a/modules/rtp_rtcp/source/rtp_util.cc b/modules/rtp_rtcp/source/rtp_util.cc new file mode 100644 index 0000000000..46c641ea2f --- /dev/null +++ b/modules/rtp_rtcp/source/rtp_util.cc @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/rtp_rtcp/source/rtp_util.h" + +#include +#include + +#include "api/array_view.h" + +namespace webrtc { +namespace { + +constexpr uint8_t kRtpVersion = 2; +constexpr size_t kMinRtpPacketLen = 12; +constexpr size_t kMinRtcpPacketLen = 4; + +bool HasCorrectRtpVersion(rtc::ArrayView packet) { + return packet[0] >> 6 == kRtpVersion; +} + +// For additional details, see http://tools.ietf.org/html/rfc5761#section-4 +bool PayloadTypeIsReservedForRtcp(uint8_t payload_type) { + return 64 <= payload_type && payload_type < 96; +} + +} // namespace + +bool IsRtpPacket(rtc::ArrayView packet) { + return packet.size() >= kMinRtpPacketLen && HasCorrectRtpVersion(packet) && + !PayloadTypeIsReservedForRtcp(packet[1] & 0x7F); +} + +bool IsRtcpPacket(rtc::ArrayView packet) { + return packet.size() >= kMinRtcpPacketLen && HasCorrectRtpVersion(packet) && + PayloadTypeIsReservedForRtcp(packet[1] & 0x7F); +} + +} // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_util.h b/modules/rtp_rtcp/source/rtp_util.h new file mode 100644 index 0000000000..b85727bf47 --- /dev/null +++ b/modules/rtp_rtcp/source/rtp_util.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_RTP_RTCP_SOURCE_RTP_UTIL_H_ +#define MODULES_RTP_RTCP_SOURCE_RTP_UTIL_H_ + +#include + +#include "api/array_view.h" + +namespace webrtc { + +bool IsRtcpPacket(rtc::ArrayView packet); +bool IsRtpPacket(rtc::ArrayView packet); + +} // namespace webrtc + +#endif // MODULES_RTP_RTCP_SOURCE_RTP_UTIL_H_ diff --git a/modules/rtp_rtcp/source/rtp_util_unittest.cc b/modules/rtp_rtcp/source/rtp_util_unittest.cc new file mode 100644 index 0000000000..8f980ecff1 --- /dev/null +++ b/modules/rtp_rtcp/source/rtp_util_unittest.cc @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/rtp_rtcp/source/rtp_util.h" + +#include "test/gmock.h" + +namespace webrtc { +namespace { + +TEST(RtpUtil, IsRtpPacket) { + constexpr uint8_t kMinimalisticRtpPacket[] = {0x80, 97, 0, 0, // + 0, 0, 0, 0, // + 0, 0, 0, 0}; + EXPECT_TRUE(IsRtpPacket(kMinimalisticRtpPacket)); + + constexpr uint8_t kWrongRtpVersion[] = {0xc0, 97, 0, 0, // + 0, 0, 0, 0, // + 0, 0, 0, 0}; + EXPECT_FALSE(IsRtpPacket(kWrongRtpVersion)); + + constexpr uint8_t kPacketWithPayloadForRtcp[] = {0x80, 200, 0, 0, // + 0, 0, 0, 0, // + 0, 0, 0, 0}; + EXPECT_FALSE(IsRtpPacket(kPacketWithPayloadForRtcp)); + + constexpr uint8_t kTooSmallRtpPacket[] = {0x80, 97, 0, 0, // + 0, 0, 0, 0, // + 0, 0, 0}; + EXPECT_FALSE(IsRtpPacket(kTooSmallRtpPacket)); + + EXPECT_FALSE(IsRtpPacket({})); +} + +TEST(RtpUtil, IsRtcpPacket) { + constexpr uint8_t kMinimalisticRtcpPacket[] = {0x80, 202, 0, 0}; + EXPECT_TRUE(IsRtcpPacket(kMinimalisticRtcpPacket)); + + constexpr uint8_t kWrongRtpVersion[] = {0xc0, 202, 0, 0}; + EXPECT_FALSE(IsRtcpPacket(kWrongRtpVersion)); + + constexpr uint8_t kPacketWithPayloadForRtp[] = {0x80, 225, 0, 0}; + EXPECT_FALSE(IsRtcpPacket(kPacketWithPayloadForRtp)); + + constexpr uint8_t kTooSmallRtcpPacket[] = {0x80, 202, 0}; + EXPECT_FALSE(IsRtcpPacket(kTooSmallRtcpPacket)); + + EXPECT_FALSE(IsRtcpPacket({})); +} + +} // namespace +} // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_utility.cc b/modules/rtp_rtcp/source/rtp_utility.cc index a22785faca..9b68f0dead 100644 --- a/modules/rtp_rtcp/source/rtp_utility.cc +++ b/modules/rtp_rtcp/source/rtp_utility.cc @@ -131,7 +131,7 @@ bool RtpHeaderParser::RTCP() const { } bool RtpHeaderParser::ParseRtcp(RTPHeader* header) const { - assert(header != NULL); + RTC_DCHECK(header); const ptrdiff_t length = _ptrRTPDataEnd - _ptrRTPDataBegin; if (length < kRtcpMinParseLength) { @@ -364,6 +364,10 @@ void RtpHeaderParser::ParseOneByteExtensionHeader( header->extension.hasTransmissionTimeOffset = true; break; } + case kRtpExtensionCsrcAudioLevel: { + RTC_LOG(LS_WARNING) << "Csrc audio level extension not supported"; + return; + } case kRtpExtensionAudioLevel: { if (len != 0) { RTC_LOG(LS_WARNING) << "Incorrect audio level len: " << len; diff --git a/modules/rtp_rtcp/source/rtp_utility.h b/modules/rtp_rtcp/source/rtp_utility.h index cdda9ef119..cdfff4072f 100644 --- a/modules/rtp_rtcp/source/rtp_utility.h +++ b/modules/rtp_rtcp/source/rtp_utility.h @@ -15,6 +15,7 @@ #include +#include "absl/base/attributes.h" #include "absl/strings/string_view.h" #include "api/rtp_headers.h" #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" @@ -34,6 +35,7 @@ class RtpHeaderParser { RtpHeaderParser(const uint8_t* rtpData, size_t rtpDataLength); ~RtpHeaderParser(); + ABSL_DEPRECATED("Use IsRtpPacket or IsRtcpPacket") bool RTCP() const; bool ParseRtcp(RTPHeader* header) const; bool Parse(RTPHeader* parsedPacket, diff --git a/modules/rtp_rtcp/source/time_util.cc b/modules/rtp_rtcp/source/time_util.cc index b5b4f8bd98..fe0cfea11f 100644 --- a/modules/rtp_rtcp/source/time_util.cc +++ b/modules/rtp_rtcp/source/time_util.cc @@ -17,48 +17,6 @@ #include "rtc_base/time_utils.h" namespace webrtc { -namespace { - -int64_t NtpOffsetMsCalledOnce() { - constexpr int64_t kNtpJan1970Sec = 2208988800; - int64_t clock_time = rtc::TimeMillis(); - int64_t utc_time = rtc::TimeUTCMillis(); - return utc_time - clock_time + kNtpJan1970Sec * rtc::kNumMillisecsPerSec; -} - -} // namespace - -int64_t NtpOffsetMs() { - // Calculate the offset once. - static int64_t ntp_offset_ms = NtpOffsetMsCalledOnce(); - return ntp_offset_ms; -} - -NtpTime TimeMicrosToNtp(int64_t time_us) { - // Since this doesn't return a wallclock time, but only NTP representation - // of rtc::TimeMillis() clock, the exact offset doesn't matter. - // To simplify conversions between NTP and RTP time, this offset is - // limited to milliseconds in resolution. - int64_t time_ntp_us = time_us + NtpOffsetMs() * 1000; - RTC_DCHECK_GE(time_ntp_us, 0); // Time before year 1900 is unsupported. - - // TODO(danilchap): Convert both seconds and fraction together using int128 - // when that type is easily available. - // Currently conversion is done separetly for seconds and fraction of a second - // to avoid overflow. - - // Convert seconds to uint32 through uint64 for well-defined cast. - // Wrap around (will happen in 2036) is expected for ntp time. - uint32_t ntp_seconds = - static_cast(time_ntp_us / rtc::kNumMicrosecsPerSec); - - // Scale fractions of the second to ntp resolution. - constexpr int64_t kNtpInSecond = 1LL << 32; - int64_t us_fractions = time_ntp_us % rtc::kNumMicrosecsPerSec; - uint32_t ntp_fractions = - us_fractions * kNtpInSecond / rtc::kNumMicrosecsPerSec; - return NtpTime(ntp_seconds, ntp_fractions); -} uint32_t SaturatedUsToCompactNtp(int64_t us) { constexpr uint32_t kMaxCompactNtp = 0xFFFFFFFF; diff --git a/modules/rtp_rtcp/source/time_util.h b/modules/rtp_rtcp/source/time_util.h index 94b914310c..c883e5ca38 100644 --- a/modules/rtp_rtcp/source/time_util.h +++ b/modules/rtp_rtcp/source/time_util.h @@ -17,20 +17,6 @@ namespace webrtc { -// Converts time obtained using rtc::TimeMicros to ntp format. -// TimeMicrosToNtp guarantees difference of the returned values matches -// difference of the passed values. -// As a result TimeMicrosToNtp(rtc::TimeMicros()) doesn't guarantee to match -// system time. -// However, TimeMicrosToNtp Guarantees that returned NtpTime will be offsetted -// from rtc::TimeMicros() by integral number of milliseconds. -// Use NtpOffsetMs() to get that offset value. -NtpTime TimeMicrosToNtp(int64_t time_us); - -// Difference between Ntp time and local relative time returned by -// rtc::TimeMicros() -int64_t NtpOffsetMs(); - // Helper function for compact ntp representation: // RFC 3550, Section 4. Time Format. // Wallclock time is represented using the timestamp format of diff --git a/modules/rtp_rtcp/source/time_util_unittest.cc b/modules/rtp_rtcp/source/time_util_unittest.cc index 4b469bb956..6ff55dda55 100644 --- a/modules/rtp_rtcp/source/time_util_unittest.cc +++ b/modules/rtp_rtcp/source/time_util_unittest.cc @@ -9,34 +9,10 @@ */ #include "modules/rtp_rtcp/source/time_util.h" -#include "rtc_base/fake_clock.h" -#include "rtc_base/time_utils.h" -#include "system_wrappers/include/clock.h" #include "test/gtest.h" namespace webrtc { -TEST(TimeUtilTest, TimeMicrosToNtpDoesntChangeBetweenRuns) { - rtc::ScopedFakeClock clock; - // TimeMicrosToNtp is not pure: it behave differently between different - // execution of the program, but should behave same during same execution. - const int64_t time_us = 12345; - clock.SetTime(Timestamp::Micros(2)); - NtpTime time_ntp = TimeMicrosToNtp(time_us); - clock.SetTime(Timestamp::Micros(time_us)); - EXPECT_EQ(TimeMicrosToNtp(time_us), time_ntp); - clock.SetTime(Timestamp::Micros(1000000)); - EXPECT_EQ(TimeMicrosToNtp(time_us), time_ntp); -} - -TEST(TimeUtilTest, TimeMicrosToNtpKeepsIntervals) { - rtc::ScopedFakeClock clock; - NtpTime time_ntp1 = TimeMicrosToNtp(rtc::TimeMicros()); - clock.AdvanceTime(TimeDelta::Millis(20)); - NtpTime time_ntp2 = TimeMicrosToNtp(rtc::TimeMicros()); - EXPECT_EQ(time_ntp2.ToMs() - time_ntp1.ToMs(), 20); -} - TEST(TimeUtilTest, CompactNtp) { const uint32_t kNtpSec = 0x12345678; const uint32_t kNtpFrac = 0x23456789; diff --git a/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.h b/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.h index a7573993f7..3d7cb3291d 100644 --- a/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.h +++ b/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.h @@ -25,7 +25,7 @@ class VideoRtpDepacketizerVp8 : public VideoRtpDepacketizer { public: VideoRtpDepacketizerVp8() = default; VideoRtpDepacketizerVp8(const VideoRtpDepacketizerVp8&) = delete; - VideoRtpDepacketizerVp8& operator=(VideoRtpDepacketizerVp8&) = delete; + VideoRtpDepacketizerVp8& operator=(const VideoRtpDepacketizerVp8&) = delete; ~VideoRtpDepacketizerVp8() override = default; // Parses vp8 rtp payload descriptor. diff --git a/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h b/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h index c622cbc75e..4bb358a15f 100644 --- a/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h +++ b/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h @@ -25,7 +25,7 @@ class VideoRtpDepacketizerVp9 : public VideoRtpDepacketizer { public: VideoRtpDepacketizerVp9() = default; VideoRtpDepacketizerVp9(const VideoRtpDepacketizerVp9&) = delete; - VideoRtpDepacketizerVp9& operator=(VideoRtpDepacketizerVp9&) = delete; + VideoRtpDepacketizerVp9& operator=(const VideoRtpDepacketizerVp9&) = delete; ~VideoRtpDepacketizerVp9() override = default; // Parses vp9 rtp payload descriptor. diff --git a/modules/rtp_rtcp/test/testFec/test_packet_masks_metrics.cc b/modules/rtp_rtcp/test/testFec/test_packet_masks_metrics.cc index 44597b85bb..dffdf2ebf6 100644 --- a/modules/rtp_rtcp/test/testFec/test_packet_masks_metrics.cc +++ b/modules/rtp_rtcp/test/testFec/test_packet_masks_metrics.cc @@ -225,7 +225,7 @@ class FecPacketMaskMetricsTest : public ::testing::Test { } } // Check that we can only recover 1 packet. - assert(check_num_recovered == 1); + RTC_DCHECK_EQ(check_num_recovered, 1); // Update the state with the newly recovered media packet. state_tmp[jsel] = 0; } @@ -260,7 +260,7 @@ class FecPacketMaskMetricsTest : public ::testing::Test { } } } else { // Gilbert-Elliot model for burst model. - assert(loss_model_[k].loss_type == kBurstyLossModel); + RTC_DCHECK_EQ(loss_model_[k].loss_type, kBurstyLossModel); // Transition probabilities: from previous to current state. // Prob. of previous = lost --> current = received. double prob10 = 1.0 / burst_length; @@ -425,8 +425,8 @@ class FecPacketMaskMetricsTest : public ::testing::Test { } } } // Done with loop over total number of packets. - assert(num_media_packets_lost <= num_media_packets); - assert(num_packets_lost <= tot_num_packets && num_packets_lost > 0); + RTC_DCHECK_LE(num_media_packets_lost, num_media_packets); + RTC_DCHECK_LE(num_packets_lost, tot_num_packets && num_packets_lost > 0); double residual_loss = 0.0; // Only need to compute residual loss (number of recovered packets) for // configurations that have at least one media packet lost. @@ -445,7 +445,7 @@ class FecPacketMaskMetricsTest : public ::testing::Test { num_recovered_packets = num_media_packets_lost; } } - assert(num_recovered_packets <= num_media_packets); + RTC_DCHECK_LE(num_recovered_packets, num_media_packets); // Compute the residual loss. We only care about recovering media/source // packets, so residual loss is based on lost/recovered media packets. residual_loss = @@ -464,9 +464,9 @@ class FecPacketMaskMetricsTest : public ::testing::Test { // Update the distribution statistics. // Compute the gap of the loss (the "consecutiveness" of the loss). int gap_loss = GapLoss(tot_num_packets, state.get()); - assert(gap_loss < kMaxGapSize); + RTC_DCHECK_LT(gap_loss, kMaxGapSize); int index = gap_loss * (2 * kMaxMediaPacketsTest) + num_packets_lost; - assert(index < kNumStatesDistribution); + RTC_DCHECK_LT(index, kNumStatesDistribution); metrics_code.residual_loss_per_loss_gap[index] += residual_loss; if (code_type == xor_random_code) { // The configuration density is only a function of the code length and @@ -492,8 +492,8 @@ class FecPacketMaskMetricsTest : public ::testing::Test { metrics_code.variance_residual_loss[k] - (metrics_code.average_residual_loss[k] * metrics_code.average_residual_loss[k]); - assert(metrics_code.variance_residual_loss[k] >= 0.0); - assert(metrics_code.average_residual_loss[k] > 0.0); + RTC_DCHECK_GE(metrics_code.variance_residual_loss[k], 0.0); + RTC_DCHECK_GT(metrics_code.average_residual_loss[k], 0.0); metrics_code.variance_residual_loss[k] = std::sqrt(metrics_code.variance_residual_loss[k]) / metrics_code.average_residual_loss[k]; @@ -509,7 +509,7 @@ class FecPacketMaskMetricsTest : public ::testing::Test { } else if (code_type == xor_bursty_code) { CopyMetrics(&kMetricsXorBursty[code_index], metrics_code); } else { - assert(false); + RTC_NOTREACHED(); } } @@ -588,7 +588,7 @@ class FecPacketMaskMetricsTest : public ::testing::Test { num_loss_models++; } } - assert(num_loss_models == kNumLossModels); + RTC_DCHECK_EQ(num_loss_models, kNumLossModels); } void SetCodeParams() { @@ -738,7 +738,7 @@ class FecPacketMaskMetricsTest : public ::testing::Test { code_index++; } } - assert(code_index == kNumberCodes); + RTC_DCHECK_EQ(code_index, kNumberCodes); return 0; } diff --git a/modules/utility/source/process_thread_impl.cc b/modules/utility/source/process_thread_impl.cc index cdc2fa1005..73fc23400b 100644 --- a/modules/utility/source/process_thread_impl.cc +++ b/modules/utility/source/process_thread_impl.cc @@ -180,6 +180,7 @@ void ProcessThreadImpl::PostDelayedTask(std::unique_ptr task, void ProcessThreadImpl::RegisterModule(Module* module, const rtc::Location& from) { + TRACE_EVENT0("webrtc", "ProcessThreadImpl::RegisterModule"); RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(module) << from.ToString(); diff --git a/modules/utility/source/process_thread_impl.h b/modules/utility/source/process_thread_impl.h index b667bfc68a..5d22e37ca1 100644 --- a/modules/utility/source/process_thread_impl.h +++ b/modules/utility/source/process_thread_impl.h @@ -21,7 +21,6 @@ #include "api/task_queue/queued_task.h" #include "modules/include/module.h" #include "modules/utility/include/process_thread.h" -#include "rtc_base/deprecated/recursive_critical_section.h" #include "rtc_base/event.h" #include "rtc_base/location.h" #include "rtc_base/platform_thread.h" diff --git a/modules/video_capture/device_info_impl.cc b/modules/video_capture/device_info_impl.cc index 846977e68f..d5abb29407 100644 --- a/modules/video_capture/device_info_impl.cc +++ b/modules/video_capture/device_info_impl.cc @@ -52,7 +52,7 @@ int32_t DeviceInfoImpl::NumberOfCapabilities(const char* deviceUniqueIdUTF8) { int32_t DeviceInfoImpl::GetCapability(const char* deviceUniqueIdUTF8, const uint32_t deviceCapabilityNumber, VideoCaptureCapability& capability) { - assert(deviceUniqueIdUTF8 != NULL); + RTC_DCHECK(deviceUniqueIdUTF8); MutexLock lock(&_apiLock); diff --git a/modules/video_capture/linux/device_info_linux.cc b/modules/video_capture/linux/device_info_linux.cc index b3c9766029..cde3b86d5c 100644 --- a/modules/video_capture/linux/device_info_linux.cc +++ b/modules/video_capture/linux/device_info_linux.cc @@ -116,7 +116,7 @@ int32_t DeviceInfoLinux::GetDeviceName(uint32_t deviceNumber, memset(deviceNameUTF8, 0, deviceNameLength); memcpy(cameraName, cap.card, sizeof(cap.card)); - if (deviceNameLength >= strlen(cameraName)) { + if (deviceNameLength > strlen(cameraName)) { memcpy(deviceNameUTF8, cameraName, strlen(cameraName)); } else { RTC_LOG(LS_INFO) << "buffer passed is too small"; @@ -126,7 +126,7 @@ int32_t DeviceInfoLinux::GetDeviceName(uint32_t deviceNumber, if (cap.bus_info[0] != 0) // may not available in all drivers { // copy device id - if (deviceUniqueIdUTF8Length >= strlen((const char*)cap.bus_info)) { + if (deviceUniqueIdUTF8Length > strlen((const char*)cap.bus_info)) { memset(deviceUniqueIdUTF8, 0, deviceUniqueIdUTF8Length); memcpy(deviceUniqueIdUTF8, cap.bus_info, strlen((const char*)cap.bus_info)); @@ -146,7 +146,7 @@ int32_t DeviceInfoLinux::CreateCapabilityMap(const char* deviceUniqueIdUTF8) { const int32_t deviceUniqueIdUTF8Length = (int32_t)strlen((char*)deviceUniqueIdUTF8); - if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength) { + if (deviceUniqueIdUTF8Length >= kVideoCaptureUniqueNameLength) { RTC_LOG(LS_INFO) << "Device name too long"; return -1; } diff --git a/modules/video_capture/test/video_capture_unittest.cc b/modules/video_capture/test/video_capture_unittest.cc index 1a0cf2d5da..e74a456cee 100644 --- a/modules/video_capture/test/video_capture_unittest.cc +++ b/modules/video_capture/test/video_capture_unittest.cc @@ -152,7 +152,7 @@ class VideoCaptureTest : public ::testing::Test { void SetUp() override { device_info_.reset(VideoCaptureFactory::CreateDeviceInfo()); - assert(device_info_.get()); + RTC_DCHECK(device_info_.get()); number_of_devices_ = device_info_->NumberOfDevices(); ASSERT_GT(number_of_devices_, 0u); } diff --git a/modules/video_capture/windows/device_info_ds.cc b/modules/video_capture/windows/device_info_ds.cc index 97f61f7845..3731dce8bc 100644 --- a/modules/video_capture/windows/device_info_ds.cc +++ b/modules/video_capture/windows/device_info_ds.cc @@ -213,7 +213,7 @@ IBaseFilter* DeviceInfoDS::GetDeviceFilter(const char* deviceUniqueIdUTF8, uint32_t productUniqueIdUTF8Length) { const int32_t deviceUniqueIdUTF8Length = (int32_t)strlen( (char*)deviceUniqueIdUTF8); // UTF8 is also NULL terminated - if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength) { + if (deviceUniqueIdUTF8Length >= kVideoCaptureUniqueNameLength) { RTC_LOG(LS_INFO) << "Device name too long"; return NULL; } @@ -306,7 +306,7 @@ int32_t DeviceInfoDS::CreateCapabilityMap(const char* deviceUniqueIdUTF8) const int32_t deviceUniqueIdUTF8Length = (int32_t)strlen((char*)deviceUniqueIdUTF8); - if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength) { + if (deviceUniqueIdUTF8Length >= kVideoCaptureUniqueNameLength) { RTC_LOG(LS_INFO) << "Device name too long"; return -1; } @@ -380,7 +380,7 @@ int32_t DeviceInfoDS::CreateCapabilityMap(const char* deviceUniqueIdUTF8) supportFORMAT_VideoInfo2 = true; VIDEOINFOHEADER2* h = reinterpret_cast(pmt->pbFormat); - assert(h); + RTC_DCHECK(h); foundInterlacedFormat |= h->dwInterlaceFlags & (AMINTERLACE_IsInterlaced | AMINTERLACE_DisplayModeBobOnly); @@ -418,7 +418,7 @@ int32_t DeviceInfoDS::CreateCapabilityMap(const char* deviceUniqueIdUTF8) if (pmt->formattype == FORMAT_VideoInfo) { VIDEOINFOHEADER* h = reinterpret_cast(pmt->pbFormat); - assert(h); + RTC_DCHECK(h); capability.directShowCapabilityIndex = tmp; capability.width = h->bmiHeader.biWidth; capability.height = h->bmiHeader.biHeight; @@ -427,7 +427,7 @@ int32_t DeviceInfoDS::CreateCapabilityMap(const char* deviceUniqueIdUTF8) if (pmt->formattype == FORMAT_VideoInfo2) { VIDEOINFOHEADER2* h = reinterpret_cast(pmt->pbFormat); - assert(h); + RTC_DCHECK(h); capability.directShowCapabilityIndex = tmp; capability.width = h->bmiHeader.biWidth; capability.height = h->bmiHeader.biHeight; @@ -568,7 +568,7 @@ void DeviceInfoDS::GetProductId(const char* devicePath, // Find the second occurrence. pos = strchr(pos + 1, '&'); uint32_t bytesToCopy = (uint32_t)(pos - startPos); - if (pos && (bytesToCopy <= productUniqueIdUTF8Length) && + if (pos && (bytesToCopy < productUniqueIdUTF8Length) && bytesToCopy <= kVideoCaptureProductIdLength) { strncpy_s((char*)productUniqueIdUTF8, productUniqueIdUTF8Length, (char*)startPos, bytesToCopy); diff --git a/modules/video_capture/windows/sink_filter_ds.cc b/modules/video_capture/windows/sink_filter_ds.cc index 9019b127cf..e4be7aa14f 100644 --- a/modules/video_capture/windows/sink_filter_ds.cc +++ b/modules/video_capture/windows/sink_filter_ds.cc @@ -58,7 +58,7 @@ class EnumPins : public IEnumPins { } STDMETHOD(Clone)(IEnumPins** pins) { - RTC_DCHECK(false); + RTC_NOTREACHED(); return E_NOTIMPL; } @@ -83,7 +83,7 @@ class EnumPins : public IEnumPins { } STDMETHOD(Skip)(ULONG count) { - RTC_DCHECK(false); + RTC_NOTREACHED(); return E_NOTIMPL; } @@ -274,7 +274,7 @@ class MediaTypesEnum : public IEnumMediaTypes { // IEnumMediaTypes STDMETHOD(Clone)(IEnumMediaTypes** pins) { - RTC_DCHECK(false); + RTC_NOTREACHED(); return E_NOTIMPL; } @@ -364,7 +364,7 @@ class MediaTypesEnum : public IEnumMediaTypes { } STDMETHOD(Skip)(ULONG count) { - RTC_DCHECK(false); + RTC_NOTREACHED(); return E_NOTIMPL; } @@ -538,7 +538,7 @@ STDMETHODIMP CaptureInputPin::Connect(IPin* receive_pin, return VFW_E_NOT_STOPPED; if (receive_pin_) { - RTC_DCHECK(false); + RTC_NOTREACHED(); return VFW_E_ALREADY_CONNECTED; } @@ -564,7 +564,7 @@ STDMETHODIMP CaptureInputPin::ReceiveConnection( RTC_DCHECK(Filter()->IsStopped()); if (receive_pin_) { - RTC_DCHECK(false); + RTC_NOTREACHED(); return VFW_E_ALREADY_CONNECTED; } diff --git a/modules/video_capture/windows/video_capture_ds.cc b/modules/video_capture/windows/video_capture_ds.cc index 6dca74750c..1a1e51934d 100644 --- a/modules/video_capture/windows/video_capture_ds.cc +++ b/modules/video_capture/windows/video_capture_ds.cc @@ -57,7 +57,7 @@ VideoCaptureDS::~VideoCaptureDS() { int32_t VideoCaptureDS::Init(const char* deviceUniqueIdUTF8) { const int32_t nameLength = (int32_t)strlen((char*)deviceUniqueIdUTF8); - if (nameLength > kVideoCaptureUniqueNameLength) + if (nameLength >= kVideoCaptureUniqueNameLength) return -1; // Store the device name diff --git a/modules/video_coding/BUILD.gn b/modules/video_coding/BUILD.gn index b28517e8be..50f2e8d836 100644 --- a/modules/video_coding/BUILD.gn +++ b/modules/video_coding/BUILD.gn @@ -985,6 +985,7 @@ if (rtc_include_tests) { "utility/qp_parser_unittest.cc", "utility/quality_scaler_unittest.cc", "utility/simulcast_rate_allocator_unittest.cc", + "utility/vp9_uncompressed_header_parser_unittest.cc", "video_codec_initializer_unittest.cc", "video_receiver_unittest.cc", ] diff --git a/modules/video_coding/codecs/av1/libaom_av1_encoder.cc b/modules/video_coding/codecs/av1/libaom_av1_encoder.cc index 8c82476b7a..034709a989 100644 --- a/modules/video_coding/codecs/av1/libaom_av1_encoder.cc +++ b/modules/video_coding/codecs/av1/libaom_av1_encoder.cc @@ -214,6 +214,11 @@ int LibaomAv1Encoder::InitEncode(const VideoCodec* codec_settings, cfg_.kf_mode = AOM_KF_DISABLED; cfg_.rc_min_quantizer = kQpMin; cfg_.rc_max_quantizer = encoder_settings_.qpMax; + cfg_.rc_undershoot_pct = 50; + cfg_.rc_overshoot_pct = 50; + cfg_.rc_buf_initial_sz = 600; + cfg_.rc_buf_optimal_sz = 600; + cfg_.rc_buf_sz = 1000; cfg_.g_usage = kUsageProfile; cfg_.g_error_resilient = 0; // Low-latency settings. @@ -588,12 +593,26 @@ int32_t LibaomAv1Encoder::Encode( // kNative. As a workaround to this, we perform ToI420() a second time. // TODO(https://crbug.com/webrtc/12602): When Android buffers have a correct // ToI420() implementaion, remove his workaround. + if (!converted_buffer) { + RTC_LOG(LS_ERROR) << "Failed to convert " + << VideoFrameBufferTypeToString( + converted_buffer->type()) + << " image to I420. Can't encode frame."; + return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE; + } if (converted_buffer->type() != VideoFrameBuffer::Type::kI420 && converted_buffer->type() != VideoFrameBuffer::Type::kI420A) { converted_buffer = converted_buffer->ToI420(); RTC_CHECK(converted_buffer->type() == VideoFrameBuffer::Type::kI420 || converted_buffer->type() == VideoFrameBuffer::Type::kI420A); } + if (!converted_buffer) { + RTC_LOG(LS_ERROR) << "Failed to convert " + << VideoFrameBufferTypeToString( + converted_buffer->type()) + << " image to I420. Can't encode frame."; + return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE; + } prepped_input_frame = VideoFrame(converted_buffer, frame.timestamp(), frame.render_time_ms(), frame.rotation()); } @@ -671,8 +690,15 @@ int32_t LibaomAv1Encoder::Encode( encoded_image.content_type_ = VideoContentType::UNSPECIFIED; // If encoded image width/height info are added to aom_codec_cx_pkt_t, // use those values in lieu of the values in frame. - encoded_image._encodedHeight = frame.height(); - encoded_image._encodedWidth = frame.width(); + if (svc_params_) { + int n = svc_params_->scaling_factor_num[layer_frame.SpatialId()]; + int d = svc_params_->scaling_factor_den[layer_frame.SpatialId()]; + encoded_image._encodedWidth = cfg_.g_w * n / d; + encoded_image._encodedHeight = cfg_.g_h * n / d; + } else { + encoded_image._encodedWidth = cfg_.g_w; + encoded_image._encodedHeight = cfg_.g_h; + } encoded_image.timing_.flags = VideoSendTiming::kInvalid; int qp = -1; ret = aom_codec_control(&ctx_, AOME_GET_LAST_QUANTIZER, &qp); diff --git a/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc b/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc index ea77e091af..96057a0ce2 100644 --- a/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc +++ b/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc @@ -25,6 +25,7 @@ namespace webrtc { namespace { using ::testing::ElementsAre; +using ::testing::Field; using ::testing::IsEmpty; using ::testing::SizeIs; @@ -135,5 +136,36 @@ TEST(LibaomAv1EncoderTest, EncoderInfoProvidesFpsAllocation) { EXPECT_THAT(encoder_info.fps_allocation[3], IsEmpty()); } +TEST(LibaomAv1EncoderTest, PopulatesEncodedFrameSize) { + std::unique_ptr encoder = CreateLibaomAv1Encoder(); + VideoCodec codec_settings = DefaultCodecSettings(); + ASSERT_GT(codec_settings.width, 4); + // Configure encoder with 3 spatial layers. + codec_settings.SetScalabilityMode("L3T1"); + ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()), + WEBRTC_VIDEO_CODEC_OK); + + using Frame = EncodedVideoFrameProducer::EncodedFrame; + std::vector encoded_frames = + EncodedVideoFrameProducer(*encoder).SetNumInputFrames(1).Encode(); + EXPECT_THAT( + encoded_frames, + ElementsAre( + Field(&Frame::encoded_image, + AllOf(Field(&EncodedImage::_encodedWidth, + codec_settings.width / 4), + Field(&EncodedImage::_encodedHeight, + codec_settings.height / 4))), + Field(&Frame::encoded_image, + AllOf(Field(&EncodedImage::_encodedWidth, + codec_settings.width / 2), + Field(&EncodedImage::_encodedHeight, + codec_settings.height / 2))), + Field(&Frame::encoded_image, + AllOf(Field(&EncodedImage::_encodedWidth, codec_settings.width), + Field(&EncodedImage::_encodedHeight, + codec_settings.height))))); +} + } // namespace } // namespace webrtc diff --git a/modules/video_coding/codecs/h264/h264.cc b/modules/video_coding/codecs/h264/h264.cc index 016d0aa538..14e1691153 100644 --- a/modules/video_coding/codecs/h264/h264.cc +++ b/modules/video_coding/codecs/h264/h264.cc @@ -17,6 +17,7 @@ #include "absl/types/optional.h" #include "api/video_codecs/sdp_video_format.h" #include "media/base/media_constants.h" +#include "rtc_base/trace_event.h" #if defined(WEBRTC_USE_H264) #include "modules/video_coding/codecs/h264/h264_decoder_impl.h" @@ -65,6 +66,7 @@ void DisableRtcUseH264() { } std::vector SupportedH264Codecs() { + TRACE_EVENT0("webrtc", __func__); if (!IsH264CodecSupported()) return std::vector(); // We only support encoding Constrained Baseline Profile (CBP), but the diff --git a/modules/video_coding/codecs/h264/h264_decoder_impl.cc b/modules/video_coding/codecs/h264/h264_decoder_impl.cc index 6f37b52fd8..83f9a77614 100644 --- a/modules/video_coding/codecs/h264/h264_decoder_impl.cc +++ b/modules/video_coding/codecs/h264/h264_decoder_impl.cc @@ -54,6 +54,16 @@ enum H264DecoderImplEvent { kH264DecoderEventMax = 16, }; +struct ScopedPtrAVFreePacket { + void operator()(AVPacket* packet) { av_packet_free(&packet); } +}; +typedef std::unique_ptr ScopedAVPacket; + +ScopedAVPacket MakeScopedAVPacket() { + ScopedAVPacket packet(av_packet_alloc()); + return packet; +} + } // namespace int H264DecoderImpl::AVGetBuffer2(AVCodecContext* context, @@ -202,7 +212,7 @@ int32_t H264DecoderImpl::InitDecode(const VideoCodec* codec_settings, // a pointer |this|. av_context_->opaque = this; - AVCodec* codec = avcodec_find_decoder(av_context_->codec_id); + const AVCodec* codec = avcodec_find_decoder(av_context_->codec_id); if (!codec) { // This is an indication that FFmpeg has not been initialized or it has not // been compiled/initialized with the correct set of codecs. @@ -261,21 +271,25 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image, return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; } - AVPacket packet; - av_init_packet(&packet); + ScopedAVPacket packet = MakeScopedAVPacket(); + if (!packet) { + ReportError(); + return WEBRTC_VIDEO_CODEC_ERROR; + } // packet.data has a non-const type, but isn't modified by // avcodec_send_packet. - packet.data = const_cast(input_image.data()); + packet->data = const_cast(input_image.data()); if (input_image.size() > static_cast(std::numeric_limits::max())) { ReportError(); return WEBRTC_VIDEO_CODEC_ERROR; } - packet.size = static_cast(input_image.size()); + packet->size = static_cast(input_image.size()); int64_t frame_timestamp_us = input_image.ntp_time_ms_ * 1000; // ms -> μs av_context_->reordered_opaque = frame_timestamp_us; - int result = avcodec_send_packet(av_context_.get(), &packet); + int result = avcodec_send_packet(av_context_.get(), packet.get()); + if (result < 0) { RTC_LOG(LS_ERROR) << "avcodec_send_packet error: " << result; ReportError(); diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.cc b/modules/video_coding/codecs/h264/h264_encoder_impl.cc index 949c51bafa..af0393976e 100644 --- a/modules/video_coding/codecs/h264/h264_encoder_impl.cc +++ b/modules/video_coding/codecs/h264/h264_encoder_impl.cc @@ -16,6 +16,7 @@ #include "modules/video_coding/codecs/h264/h264_encoder_impl.h" +#include #include #include @@ -241,7 +242,8 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* inst, configurations_[i].frame_dropping_on = codec_.H264()->frameDroppingOn; configurations_[i].key_frame_interval = codec_.H264()->keyFrameInterval; configurations_[i].num_temporal_layers = - codec_.simulcastStream[idx].numberOfTemporalLayers; + std::max(codec_.H264()->numberOfTemporalLayers, + codec_.simulcastStream[idx].numberOfTemporalLayers); // Create downscaled image buffers. if (i > 0) { @@ -445,7 +447,7 @@ int32_t H264EncoderImpl::Encode( pictures_[i].iStride[0], pictures_[i].pData[1], pictures_[i].iStride[1], pictures_[i].pData[2], pictures_[i].iStride[2], configurations_[i].width, - configurations_[i].height, libyuv::kFilterBilinear); + configurations_[i].height, libyuv::kFilterBox); } if (!configurations_[i].sending) { @@ -578,7 +580,13 @@ SEncParamExt H264EncoderImpl::CreateEncoderParams(size_t i) const { encoder_params.iMaxBitrate; encoder_params.iTemporalLayerNum = configurations_[i].num_temporal_layers; if (encoder_params.iTemporalLayerNum > 1) { - encoder_params.iNumRefFrame = 1; + // iNumRefFrame specifies total number of reference buffers to allocate. + // For N temporal layers we need at least (N - 1) buffers to store last + // encoded frames of all reference temporal layers. + // Note that there is no API in OpenH264 encoder to specify exact set of + // references to be used to prediction of a given frame. Encoder can + // theoretically use all available reference buffers. + encoder_params.iNumRefFrame = encoder_params.iTemporalLayerNum - 1; } RTC_LOG(INFO) << "OpenH264 version is " << OPENH264_MAJOR << "." << OPENH264_MINOR; diff --git a/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc b/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc index 4c11f6ab03..dee5b1b939 100644 --- a/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc +++ b/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc @@ -128,6 +128,8 @@ std::string CodecSpecificToString(const VideoCodec& codec) { case kVideoCodecH264: ss << "frame_dropping: " << codec.H264().frameDroppingOn; ss << "\nkey_frame_interval: " << codec.H264().keyFrameInterval; + ss << "\nnum_temporal_layers: " + << static_cast(codec.H264().numberOfTemporalLayers); break; default: break; @@ -214,6 +216,8 @@ void VideoCodecTestFixtureImpl::Config::SetCodecSettings( case kVideoCodecH264: codec_settings.H264()->frameDroppingOn = frame_dropper_on; codec_settings.H264()->keyFrameInterval = kBaseKeyFrameInterval; + codec_settings.H264()->numberOfTemporalLayers = + static_cast(num_temporal_layers); break; default: break; @@ -236,6 +240,8 @@ size_t VideoCodecTestFixtureImpl::Config::NumberOfTemporalLayers() const { return codec_settings.VP8().numberOfTemporalLayers; } else if (codec_settings.codecType == kVideoCodecVP9) { return codec_settings.VP9().numberOfTemporalLayers; + } else if (codec_settings.codecType == kVideoCodecH264) { + return codec_settings.H264().numberOfTemporalLayers; } else { return 1; } diff --git a/modules/video_coding/codecs/test/videocodec_test_libvpx.cc b/modules/video_coding/codecs/test/videocodec_test_libvpx.cc index fa768927b0..0eb0d5a284 100644 --- a/modules/video_coding/codecs/test/videocodec_test_libvpx.cc +++ b/modules/video_coding/codecs/test/videocodec_test_libvpx.cc @@ -222,21 +222,6 @@ TEST(VideoCodecTestLibvpx, HighBitrateVP8) { fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr); } -// The tests below are currently disabled for Android. For ARM, the encoder -// uses |cpu_speed| = 12, as opposed to default |cpu_speed| <= 6 for x86, -// which leads to significantly different quality. The quality and rate control -// settings in the tests below are defined for encoder speed setting -// |cpu_speed| <= ~6. A number of settings would need to be significantly -// modified for the |cpu_speed| = 12 case. For now, keep the tests below -// disabled on Android. Some quality parameter in the above test has been -// adjusted to also pass for |cpu_speed| <= 12. - -// TODO(webrtc:9267): Fails on iOS -#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) -#define MAYBE_ChangeBitrateVP8 DISABLED_ChangeBitrateVP8 -#else -#define MAYBE_ChangeBitrateVP8 ChangeBitrateVP8 -#endif TEST(VideoCodecTestLibvpx, MAYBE_ChangeBitrateVP8) { auto config = CreateConfig(); config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, true, true, false, @@ -265,12 +250,6 @@ TEST(VideoCodecTestLibvpx, MAYBE_ChangeBitrateVP8) { fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr); } -// TODO(webrtc:9267): Fails on iOS -#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) -#define MAYBE_ChangeFramerateVP8 DISABLED_ChangeFramerateVP8 -#else -#define MAYBE_ChangeFramerateVP8 ChangeFramerateVP8 -#endif TEST(VideoCodecTestLibvpx, MAYBE_ChangeFramerateVP8) { auto config = CreateConfig(); config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, true, true, false, @@ -286,7 +265,7 @@ TEST(VideoCodecTestLibvpx, MAYBE_ChangeFramerateVP8) { #if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) std::vector rc_thresholds = { - {10, 2, 60, 1, 0.3, 0.3, 0, 1}, + {10, 2.42, 60, 1, 0.3, 0.3, 0, 1}, {10, 2, 30, 1, 0.3, 0.3, 0, 0}, {10, 2, 10, 1, 0.3, 0.2, 0, 0}}; #else @@ -298,7 +277,7 @@ TEST(VideoCodecTestLibvpx, MAYBE_ChangeFramerateVP8) { #if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) std::vector quality_thresholds = { - {31, 30, 0.85, 0.84}, {31.5, 30.5, 0.86, 0.84}, {30.5, 29, 0.83, 0.78}}; + {31, 30, 0.85, 0.84}, {31.4, 30.5, 0.86, 0.84}, {30.5, 29, 0.83, 0.78}}; #else std::vector quality_thresholds = { {31, 30, 0.87, 0.85}, {32, 31, 0.88, 0.85}, {32, 30, 0.87, 0.82}}; diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc index 6a8a90169f..a994193031 100644 --- a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc +++ b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc @@ -1037,7 +1037,7 @@ int LibvpxVp8Encoder::Encode(const VideoFrame& frame, // would like to use the duration of the previous frame. Unfortunately the // rate control seems to be off with that setup. Using the average input // frame rate to calculate an average duration for now. - assert(codec_.maxFramerate > 0); + RTC_DCHECK_GT(codec_.maxFramerate, 0); uint32_t duration = kRtpTicksPerSecond / codec_.maxFramerate; int error = WEBRTC_VIDEO_CODEC_OK; @@ -1074,7 +1074,7 @@ void LibvpxVp8Encoder::PopulateCodecSpecific(CodecSpecificInfo* codec_specific, int stream_idx, int encoder_idx, uint32_t timestamp) { - assert(codec_specific != NULL); + RTC_DCHECK(codec_specific); codec_specific->codecType = kVideoCodecVP8; codec_specific->codecSpecific.VP8.keyIdx = kNoKeyIdx; // TODO(hlundin) populate this @@ -1336,6 +1336,13 @@ LibvpxVp8Encoder::PrepareBuffers(rtc::scoped_refptr buffer) { if (converted_buffer->type() != VideoFrameBuffer::Type::kI420 && converted_buffer->type() != VideoFrameBuffer::Type::kI420A) { converted_buffer = converted_buffer->ToI420(); + if (!converted_buffer) { + RTC_LOG(LS_ERROR) << "Failed to convert " + << VideoFrameBufferTypeToString( + converted_buffer->type()) + << " image to I420. Can't encode frame."; + return {}; + } RTC_CHECK(converted_buffer->type() == VideoFrameBuffer::Type::kI420 || converted_buffer->type() == VideoFrameBuffer::Type::kI420A); } @@ -1388,8 +1395,6 @@ LibvpxVp8Encoder::PrepareBuffers(rtc::scoped_refptr buffer) { } scaled_buffer = mapped_scaled_buffer; } - RTC_DCHECK_EQ(scaled_buffer->type(), mapped_buffer->type()) - << "Scaled frames must have the same type as the mapped frame."; if (!IsCompatibleVideoFrameBufferType(scaled_buffer->type(), mapped_buffer->type())) { RTC_LOG(LS_ERROR) << "When scaling " @@ -1399,6 +1404,10 @@ LibvpxVp8Encoder::PrepareBuffers(rtc::scoped_refptr buffer) { << " instead of " << VideoFrameBufferTypeToString(mapped_buffer->type()) << ". Can't encode frame."; + RTC_NOTREACHED() << "Scaled buffer type " + << VideoFrameBufferTypeToString(scaled_buffer->type()) + << " is not compatible with mapped buffer type " + << VideoFrameBufferTypeToString(mapped_buffer->type()); return {}; } SetRawImagePlanes(&raw_images_[i], scaled_buffer); diff --git a/modules/video_coding/codecs/vp9/include/vp9_globals.h b/modules/video_coding/codecs/vp9/include/vp9_globals.h index 6f9d09933f..34aa0bc6cf 100644 --- a/modules/video_coding/codecs/vp9/include/vp9_globals.h +++ b/modules/video_coding/codecs/vp9/include/vp9_globals.h @@ -18,6 +18,7 @@ #include #include "modules/video_coding/codecs/interface/common_constants.h" +#include "rtc_base/checks.h" namespace webrtc { @@ -131,7 +132,7 @@ struct GofInfoVP9 { pid_diff[7][1] = 2; break; default: - assert(false); + RTC_NOTREACHED(); } } diff --git a/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc b/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc index 2a7b125d36..511e6df585 100644 --- a/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc +++ b/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc @@ -1068,8 +1068,15 @@ int LibvpxVp9Encoder::Encode(const VideoFrame& input_image, break; } default: { - i010_copy = - I010Buffer::Copy(*input_image.video_frame_buffer()->ToI420()); + auto i420_buffer = input_image.video_frame_buffer()->ToI420(); + if (!i420_buffer) { + RTC_LOG(LS_ERROR) << "Failed to convert " + << VideoFrameBufferTypeToString( + input_image.video_frame_buffer()->type()) + << " image to I420. Can't encode frame."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + i010_copy = I010Buffer::Copy(*i420_buffer); i010_buffer = i010_copy.get(); } } @@ -1908,6 +1915,12 @@ rtc::scoped_refptr LibvpxVp9Encoder::PrepareBufferForProfile0( if (converted_buffer->type() != VideoFrameBuffer::Type::kI420 && converted_buffer->type() != VideoFrameBuffer::Type::kI420A) { converted_buffer = converted_buffer->ToI420(); + if (!converted_buffer) { + RTC_LOG(LS_ERROR) << "Failed to convert " + << VideoFrameBufferTypeToString(buffer->type()) + << " image to I420. Can't encode frame."; + return {}; + } RTC_CHECK(converted_buffer->type() == VideoFrameBuffer::Type::kI420 || converted_buffer->type() == VideoFrameBuffer::Type::kI420A); } diff --git a/modules/video_coding/decoding_state.cc b/modules/video_coding/decoding_state.cc index a951358992..5e405cbd05 100644 --- a/modules/video_coding/decoding_state.cc +++ b/modules/video_coding/decoding_state.cc @@ -55,21 +55,22 @@ uint16_t VCMDecodingState::sequence_num() const { } bool VCMDecodingState::IsOldFrame(const VCMFrameBuffer* frame) const { - assert(frame != NULL); + RTC_DCHECK(frame); if (in_initial_state_) return false; return !IsNewerTimestamp(frame->Timestamp(), time_stamp_); } bool VCMDecodingState::IsOldPacket(const VCMPacket* packet) const { - assert(packet != NULL); + RTC_DCHECK(packet); if (in_initial_state_) return false; return !IsNewerTimestamp(packet->timestamp, time_stamp_); } void VCMDecodingState::SetState(const VCMFrameBuffer* frame) { - assert(frame != NULL && frame->GetHighSeqNum() >= 0); + RTC_DCHECK(frame); + RTC_CHECK_GE(frame->GetHighSeqNum(), 0); if (!UsingFlexibleMode(frame)) UpdateSyncState(frame); sequence_num_ = static_cast(frame->GetHighSeqNum()); @@ -150,7 +151,7 @@ bool VCMDecodingState::UpdateEmptyFrame(const VCMFrameBuffer* frame) { } void VCMDecodingState::UpdateOldPacket(const VCMPacket* packet) { - assert(packet != NULL); + RTC_DCHECK(packet); if (packet->timestamp == time_stamp_) { // Late packet belonging to the last decoded frame - make sure we update the // last decoded sequence number. @@ -204,7 +205,7 @@ bool VCMDecodingState::ContinuousFrame(const VCMFrameBuffer* frame) const { // - Sequence numbers. // Return true when in initial state. // Note that when a method is not applicable it will return false. - assert(frame != NULL); + RTC_DCHECK(frame); // A key frame is always considered continuous as it doesn't refer to any // frames and therefore won't introduce any errors even if prior frames are // missing. diff --git a/modules/video_coding/frame_buffer.cc b/modules/video_coding/frame_buffer.cc index 0f64ab1449..8f73e73bad 100644 --- a/modules/video_coding/frame_buffer.cc +++ b/modules/video_coding/frame_buffer.cc @@ -75,7 +75,7 @@ VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(const VCMPacket& packet, int64_t timeInMs, const FrameData& frame_data) { TRACE_EVENT0("webrtc", "VCMFrameBuffer::InsertPacket"); - assert(!(NULL == packet.dataPtr && packet.sizeBytes > 0)); + RTC_DCHECK(!(NULL == packet.dataPtr && packet.sizeBytes > 0)); if (packet.dataPtr != NULL) { _payloadType = packet.payloadType; } @@ -230,19 +230,19 @@ void VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state) { switch (state) { case kStateIncomplete: // we can go to this state from state kStateEmpty - assert(_state == kStateEmpty); + RTC_DCHECK_EQ(_state, kStateEmpty); // Do nothing, we received a packet break; case kStateComplete: - assert(_state == kStateEmpty || _state == kStateIncomplete); + RTC_DCHECK(_state == kStateEmpty || _state == kStateIncomplete); break; case kStateEmpty: // Should only be set to empty through Reset(). - assert(false); + RTC_NOTREACHED(); break; } _state = state; diff --git a/modules/video_coding/frame_buffer2.cc b/modules/video_coding/frame_buffer2.cc index 903b9fb623..80f9eb1814 100644 --- a/modules/video_coding/frame_buffer2.cc +++ b/modules/video_coding/frame_buffer2.cc @@ -63,7 +63,11 @@ FrameBuffer::FrameBuffer(Clock* clock, last_log_non_decoded_ms_(-kLogNonDecodedIntervalMs), add_rtt_to_playout_delay_( webrtc::field_trial::IsEnabled("WebRTC-AddRttToPlayoutDelay")), - rtt_mult_settings_(RttMultExperiment::GetRttMultValue()) { + rtt_mult_settings_(RttMultExperiment::GetRttMultValue()), + zero_playout_delay_max_decode_queue_size_("max_decode_queue_size", + kMaxFramesBuffered) { + ParseFieldTrial({&zero_playout_delay_max_decode_queue_size_}, + field_trial::FindFullName("WebRTC-ZeroPlayoutDelay")); callback_checker_.Detach(); } @@ -110,6 +114,8 @@ void FrameBuffer::StartWaitForNextFrameOnQueue() { if (!frames_to_decode_.empty()) { // We have frames, deliver! frame = absl::WrapUnique(GetNextFrame()); + timing_->SetLastDecodeScheduledTimestamp( + clock_->TimeInMilliseconds()); } else if (clock_->TimeInMilliseconds() < latest_return_time_ms_) { // If there's no frames to decode and there is still time left, it // means that the frame buffer was cleared between creation and @@ -210,7 +216,11 @@ int64_t FrameBuffer::FindNextFrame(int64_t now_ms) { if (frame->RenderTime() == -1) { frame->SetRenderTime(timing_->RenderTimeMs(frame->Timestamp(), now_ms)); } - wait_ms = timing_->MaxWaitingTime(frame->RenderTime(), now_ms); + bool too_many_frames_queued = + frames_.size() > zero_playout_delay_max_decode_queue_size_ ? true + : false; + wait_ms = timing_->MaxWaitingTime(frame->RenderTime(), now_ms, + too_many_frames_queued); // This will cause the frame buffer to prefer high framerate rather // than high resolution in the case of the decoder not decoding fast diff --git a/modules/video_coding/frame_buffer2.h b/modules/video_coding/frame_buffer2.h index 721668a123..c7d8fcd403 100644 --- a/modules/video_coding/frame_buffer2.h +++ b/modules/video_coding/frame_buffer2.h @@ -25,6 +25,7 @@ #include "modules/video_coding/jitter_estimator.h" #include "modules/video_coding/utility/decoded_frames_history.h" #include "rtc_base/event.h" +#include "rtc_base/experiments/field_trial_parser.h" #include "rtc_base/experiments/rtt_mult_experiment.h" #include "rtc_base/numerics/sequence_number_util.h" #include "rtc_base/synchronization/mutex.h" @@ -188,6 +189,13 @@ class FrameBuffer { // rtt_mult experiment settings. const absl::optional rtt_mult_settings_; + + // Maximum number of frames in the decode queue to allow pacing. If the + // queue grows beyond the max limit, pacing will be disabled and frames will + // be pushed to the decoder as soon as possible. This only has an effect + // when the low-latency rendering path is active, which is indicated by + // the frame's render time == 0. + FieldTrialParameter zero_playout_delay_max_decode_queue_size_; }; } // namespace video_coding diff --git a/modules/video_coding/frame_buffer2_unittest.cc b/modules/video_coding/frame_buffer2_unittest.cc index 68acf813ae..f2a0589411 100644 --- a/modules/video_coding/frame_buffer2_unittest.cc +++ b/modules/video_coding/frame_buffer2_unittest.cc @@ -56,7 +56,8 @@ class VCMTimingFake : public VCMTiming { } int64_t MaxWaitingTime(int64_t render_time_ms, - int64_t now_ms) const override { + int64_t now_ms, + bool too_many_frames_queued) const override { return render_time_ms - now_ms - kDecodeTime; } diff --git a/modules/video_coding/generic_decoder.cc b/modules/video_coding/generic_decoder.cc index 621fd73972..acb4307f3f 100644 --- a/modules/video_coding/generic_decoder.cc +++ b/modules/video_coding/generic_decoder.cc @@ -113,7 +113,8 @@ void VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage, if (!frameInfo) { RTC_LOG(LS_WARNING) << "Too many frames backed up in the decoder, dropping " - "this one."; + "frame with timestamp " + << decodedImage.timestamp(); return; } diff --git a/modules/video_coding/jitter_buffer.cc b/modules/video_coding/jitter_buffer.cc index 772098a738..75142e93ee 100644 --- a/modules/video_coding/jitter_buffer.cc +++ b/modules/video_coding/jitter_buffer.cc @@ -347,7 +347,7 @@ VCMFrameBufferEnum VCMJitterBuffer::GetFrame(const VCMPacket& packet, int64_t VCMJitterBuffer::LastPacketTime(const VCMEncodedFrame* frame, bool* retransmitted) const { - assert(retransmitted); + RTC_DCHECK(retransmitted); MutexLock lock(&mutex_); const VCMFrameBuffer* frame_buffer = static_cast(frame); @@ -498,7 +498,7 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet, RecycleFrameBuffer(frame); return kFlushIndicator; default: - assert(false); + RTC_NOTREACHED(); } return buffer_state; } @@ -580,8 +580,8 @@ void VCMJitterBuffer::SetNackSettings(size_t max_nack_list_size, int max_packet_age_to_nack, int max_incomplete_time_ms) { MutexLock lock(&mutex_); - assert(max_packet_age_to_nack >= 0); - assert(max_incomplete_time_ms_ >= 0); + RTC_DCHECK_GE(max_packet_age_to_nack, 0); + RTC_DCHECK_GE(max_incomplete_time_ms_, 0); max_nack_list_size_ = max_nack_list_size; max_packet_age_to_nack_ = max_packet_age_to_nack; max_incomplete_time_ms_ = max_incomplete_time_ms; @@ -600,7 +600,7 @@ int VCMJitterBuffer::NonContinuousOrIncompleteDuration() { uint16_t VCMJitterBuffer::EstimatedLowSequenceNumber( const VCMFrameBuffer& frame) const { - assert(frame.GetLowSeqNum() >= 0); + RTC_DCHECK_GE(frame.GetLowSeqNum(), 0); if (frame.HaveFirstPacket()) return frame.GetLowSeqNum(); diff --git a/modules/video_coding/jitter_estimator.cc b/modules/video_coding/jitter_estimator.cc index 44e2a9811e..92a298c259 100644 --- a/modules/video_coding/jitter_estimator.cc +++ b/modules/video_coding/jitter_estimator.cc @@ -247,7 +247,7 @@ void VCMJitterEstimator::KalmanEstimateChannel(int64_t frameDelayMS, hMh_sigma = deltaFSBytes * Mh[0] + Mh[1] + sigma; if ((hMh_sigma < 1e-9 && hMh_sigma >= 0) || (hMh_sigma > -1e-9 && hMh_sigma <= 0)) { - assert(false); + RTC_NOTREACHED(); return; } kalmanGain[0] = Mh[0] / hMh_sigma; @@ -276,11 +276,11 @@ void VCMJitterEstimator::KalmanEstimateChannel(int64_t frameDelayMS, kalmanGain[1] * deltaFSBytes * t01; // Covariance matrix, must be positive semi-definite. - assert(_thetaCov[0][0] + _thetaCov[1][1] >= 0 && - _thetaCov[0][0] * _thetaCov[1][1] - - _thetaCov[0][1] * _thetaCov[1][0] >= - 0 && - _thetaCov[0][0] >= 0); + RTC_DCHECK(_thetaCov[0][0] + _thetaCov[1][1] >= 0 && + _thetaCov[0][0] * _thetaCov[1][1] - + _thetaCov[0][1] * _thetaCov[1][0] >= + 0 && + _thetaCov[0][0] >= 0); } // Calculate difference in delay between a sample and the expected delay @@ -302,7 +302,7 @@ void VCMJitterEstimator::EstimateRandomJitter(double d_dT, _lastUpdateT = now; if (_alphaCount == 0) { - assert(false); + RTC_NOTREACHED(); return; } double alpha = @@ -428,7 +428,7 @@ double VCMJitterEstimator::GetFrameRate() const { double fps = 1000000.0 / fps_counter_.ComputeMean(); // Sanity check. - assert(fps >= 0.0); + RTC_DCHECK_GE(fps, 0.0); if (fps > kMaxFramerateEstimate) { fps = kMaxFramerateEstimate; } diff --git a/modules/video_coding/media_opt_util.cc b/modules/video_coding/media_opt_util.cc index b47eeb55d3..0136ae8ec9 100644 --- a/modules/video_coding/media_opt_util.cc +++ b/modules/video_coding/media_opt_util.cc @@ -87,10 +87,10 @@ VCMNackFecMethod::VCMNackFecMethod(int64_t lowRttNackThresholdMs, _lowRttNackMs(lowRttNackThresholdMs), _highRttNackMs(highRttNackThresholdMs), _maxFramesFec(1) { - assert(lowRttNackThresholdMs >= -1 && highRttNackThresholdMs >= -1); - assert(highRttNackThresholdMs == -1 || - lowRttNackThresholdMs <= highRttNackThresholdMs); - assert(lowRttNackThresholdMs > -1 || highRttNackThresholdMs == -1); + RTC_DCHECK(lowRttNackThresholdMs >= -1 && highRttNackThresholdMs >= -1); + RTC_DCHECK(highRttNackThresholdMs == -1 || + lowRttNackThresholdMs <= highRttNackThresholdMs); + RTC_DCHECK(lowRttNackThresholdMs > -1 || highRttNackThresholdMs == -1); _type = kNackFec; } @@ -384,7 +384,7 @@ bool VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters) { indexTableKey = VCM_MIN(indexTableKey, kFecRateTableSize); // Check on table index - assert(indexTableKey < kFecRateTableSize); + RTC_DCHECK_LT(indexTableKey, kFecRateTableSize); // Protection factor for I frame codeRateKey = kFecRateTable[indexTableKey]; diff --git a/modules/video_coding/receiver.cc b/modules/video_coding/receiver.cc index 6b942fbe57..8e8f0e1ee2 100644 --- a/modules/video_coding/receiver.cc +++ b/modules/video_coding/receiver.cc @@ -141,7 +141,8 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms, uint16_t new_max_wait_time = static_cast(VCM_MAX(available_wait_time, 0)); uint32_t wait_time_ms = rtc::saturated_cast( - timing_->MaxWaitingTime(render_time_ms, clock_->TimeInMilliseconds())); + timing_->MaxWaitingTime(render_time_ms, clock_->TimeInMilliseconds(), + /*too_many_frames_queued=*/false)); if (new_max_wait_time < wait_time_ms) { // We're not allowed to wait until the frame is supposed to be rendered, // waiting as long as we're allowed to avoid busy looping, and then return diff --git a/modules/video_coding/rtp_frame_reference_finder.cc b/modules/video_coding/rtp_frame_reference_finder.cc index a060f84777..a44b76bf15 100644 --- a/modules/video_coding/rtp_frame_reference_finder.cc +++ b/modules/video_coding/rtp_frame_reference_finder.cc @@ -142,31 +142,34 @@ T& RtpFrameReferenceFinderImpl::GetRefFinderAs() { } // namespace internal -RtpFrameReferenceFinder::RtpFrameReferenceFinder( - OnCompleteFrameCallback* frame_callback) - : RtpFrameReferenceFinder(frame_callback, 0) {} +RtpFrameReferenceFinder::RtpFrameReferenceFinder() + : RtpFrameReferenceFinder(0) {} RtpFrameReferenceFinder::RtpFrameReferenceFinder( - OnCompleteFrameCallback* frame_callback, int64_t picture_id_offset) : picture_id_offset_(picture_id_offset), - frame_callback_(frame_callback), impl_(std::make_unique()) {} RtpFrameReferenceFinder::~RtpFrameReferenceFinder() = default; -void RtpFrameReferenceFinder::ManageFrame( +RtpFrameReferenceFinder::ReturnVector RtpFrameReferenceFinder::ManageFrame( std::unique_ptr frame) { // If we have cleared past this frame, drop it. if (cleared_to_seq_num_ != -1 && AheadOf(cleared_to_seq_num_, frame->first_seq_num())) { - return; + return {}; } - HandOffFrames(impl_->ManageFrame(std::move(frame))); + + auto frames = impl_->ManageFrame(std::move(frame)); + AddPictureIdOffset(frames); + return frames; } -void RtpFrameReferenceFinder::PaddingReceived(uint16_t seq_num) { - HandOffFrames(impl_->PaddingReceived(seq_num)); +RtpFrameReferenceFinder::ReturnVector RtpFrameReferenceFinder::PaddingReceived( + uint16_t seq_num) { + auto frames = impl_->PaddingReceived(seq_num); + AddPictureIdOffset(frames); + return frames; } void RtpFrameReferenceFinder::ClearTo(uint16_t seq_num) { @@ -174,14 +177,12 @@ void RtpFrameReferenceFinder::ClearTo(uint16_t seq_num) { impl_->ClearTo(seq_num); } -void RtpFrameReferenceFinder::HandOffFrames(ReturnVector frames) { +void RtpFrameReferenceFinder::AddPictureIdOffset(ReturnVector& frames) { for (auto& frame : frames) { frame->SetId(frame->Id() + picture_id_offset_); for (size_t i = 0; i < frame->num_references; ++i) { frame->references[i] += picture_id_offset_; } - - frame_callback_->OnCompleteFrame(std::move(frame)); } } diff --git a/modules/video_coding/rtp_frame_reference_finder.h b/modules/video_coding/rtp_frame_reference_finder.h index 3577ea8285..d2447773a3 100644 --- a/modules/video_coding/rtp_frame_reference_finder.h +++ b/modules/video_coding/rtp_frame_reference_finder.h @@ -20,47 +20,38 @@ namespace internal { class RtpFrameReferenceFinderImpl; } // namespace internal -// A complete frame is a frame which has received all its packets and all its -// references are known. -class OnCompleteFrameCallback { - public: - virtual ~OnCompleteFrameCallback() {} - virtual void OnCompleteFrame(std::unique_ptr frame) = 0; -}; - class RtpFrameReferenceFinder { public: using ReturnVector = absl::InlinedVector, 3>; - explicit RtpFrameReferenceFinder(OnCompleteFrameCallback* frame_callback); - explicit RtpFrameReferenceFinder(OnCompleteFrameCallback* frame_callback, - int64_t picture_id_offset); + RtpFrameReferenceFinder(); + explicit RtpFrameReferenceFinder(int64_t picture_id_offset); ~RtpFrameReferenceFinder(); - // Manage this frame until: - // - We have all information needed to determine its references, after - // which |frame_callback_| is called with the completed frame, or - // - We have too many stashed frames (determined by |kMaxStashedFrames|) - // so we drop this frame, or - // - It gets cleared by ClearTo, which also means we drop it. - void ManageFrame(std::unique_ptr frame); + // The RtpFrameReferenceFinder will hold onto the frame until: + // - the required information to determine its references has been received, + // in which case it (and possibly other) frames are returned, or + // - There are too many stashed frames (determined by |kMaxStashedFrames|), + // in which case it gets dropped, or + // - It gets cleared by ClearTo, in which case its dropped. + // - The frame is old, in which case it also gets dropped. + ReturnVector ManageFrame(std::unique_ptr frame); // Notifies that padding has been received, which the reference finder // might need to calculate the references of a frame. - void PaddingReceived(uint16_t seq_num); + ReturnVector PaddingReceived(uint16_t seq_num); // Clear all stashed frames that include packets older than |seq_num|. void ClearTo(uint16_t seq_num); private: - void HandOffFrames(ReturnVector frames); + void AddPictureIdOffset(ReturnVector& frames); // How far frames have been cleared out of the buffer by RTP sequence number. // A frame will be cleared if it contains a packet with a sequence number // older than |cleared_to_seq_num_|. int cleared_to_seq_num_ = -1; const int64_t picture_id_offset_; - OnCompleteFrameCallback* frame_callback_; std::unique_ptr impl_; }; diff --git a/modules/video_coding/rtp_frame_reference_finder_unittest.cc b/modules/video_coding/rtp_frame_reference_finder_unittest.cc index 5141b496c0..a5b0fc49ce 100644 --- a/modules/video_coding/rtp_frame_reference_finder_unittest.cc +++ b/modules/video_coding/rtp_frame_reference_finder_unittest.cc @@ -60,28 +60,29 @@ std::unique_ptr CreateFrame( } } // namespace -class TestRtpFrameReferenceFinder : public ::testing::Test, - public OnCompleteFrameCallback { +class TestRtpFrameReferenceFinder : public ::testing::Test { protected: TestRtpFrameReferenceFinder() : rand_(0x8739211), - reference_finder_(new RtpFrameReferenceFinder(this)), + reference_finder_(std::make_unique()), frames_from_callback_(FrameComp()) {} uint16_t Rand() { return rand_.Rand(); } - void OnCompleteFrame(std::unique_ptr frame) override { - int64_t pid = frame->Id(); - uint16_t sidx = *frame->SpatialIndex(); - auto frame_it = frames_from_callback_.find(std::make_pair(pid, sidx)); - if (frame_it != frames_from_callback_.end()) { - ADD_FAILURE() << "Already received frame with (pid:sidx): (" << pid << ":" - << sidx << ")"; - return; + void OnCompleteFrames(RtpFrameReferenceFinder::ReturnVector frames) { + for (auto& frame : frames) { + int64_t pid = frame->Id(); + uint16_t sidx = *frame->SpatialIndex(); + auto frame_it = frames_from_callback_.find(std::make_pair(pid, sidx)); + if (frame_it != frames_from_callback_.end()) { + ADD_FAILURE() << "Already received frame with (pid:sidx): (" << pid + << ":" << sidx << ")"; + return; + } + + frames_from_callback_.insert( + std::make_pair(std::make_pair(pid, sidx), std::move(frame))); } - - frames_from_callback_.insert( - std::make_pair(std::make_pair(pid, sidx), std::move(frame))); } void InsertGeneric(uint16_t seq_num_start, @@ -91,14 +92,18 @@ class TestRtpFrameReferenceFinder : public ::testing::Test, CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecGeneric, RTPVideoTypeHeader()); - reference_finder_->ManageFrame(std::move(frame)); + OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame))); } void InsertH264(uint16_t seq_num_start, uint16_t seq_num_end, bool keyframe) { std::unique_ptr frame = CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecH264, RTPVideoTypeHeader()); - reference_finder_->ManageFrame(std::move(frame)); + OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame))); + } + + void InsertPadding(uint16_t seq_num) { + OnCompleteFrames(reference_finder_->PaddingReceived(seq_num)); } // Check if a frame with picture id |pid| and spatial index |sidx| has been @@ -165,7 +170,7 @@ TEST_F(TestRtpFrameReferenceFinder, PaddingPackets) { InsertGeneric(sn, sn, true); InsertGeneric(sn + 2, sn + 2, false); EXPECT_EQ(1UL, frames_from_callback_.size()); - reference_finder_->PaddingReceived(sn + 1); + InsertPadding(sn + 1); EXPECT_EQ(2UL, frames_from_callback_.size()); } @@ -173,8 +178,8 @@ TEST_F(TestRtpFrameReferenceFinder, PaddingPacketsReordered) { uint16_t sn = Rand(); InsertGeneric(sn, sn, true); - reference_finder_->PaddingReceived(sn + 1); - reference_finder_->PaddingReceived(sn + 4); + InsertPadding(sn + 1); + InsertPadding(sn + 4); InsertGeneric(sn + 2, sn + 3, false); EXPECT_EQ(2UL, frames_from_callback_.size()); @@ -186,12 +191,12 @@ TEST_F(TestRtpFrameReferenceFinder, PaddingPacketsReorderedMultipleKeyframes) { uint16_t sn = Rand(); InsertGeneric(sn, sn, true); - reference_finder_->PaddingReceived(sn + 1); - reference_finder_->PaddingReceived(sn + 4); + InsertPadding(sn + 1); + InsertPadding(sn + 4); InsertGeneric(sn + 2, sn + 3, false); InsertGeneric(sn + 5, sn + 5, true); - reference_finder_->PaddingReceived(sn + 6); - reference_finder_->PaddingReceived(sn + 9); + InsertPadding(sn + 6); + InsertPadding(sn + 9); InsertGeneric(sn + 7, sn + 8, false); EXPECT_EQ(4UL, frames_from_callback_.size()); @@ -308,7 +313,7 @@ TEST_F(TestRtpFrameReferenceFinder, Av1FrameNoDependencyDescriptor) { CreateFrame(/*seq_num_start=*/sn, /*seq_num_end=*/sn, /*keyframe=*/true, kVideoCodecAV1, RTPVideoTypeHeader()); - reference_finder_->ManageFrame(std::move(frame)); + OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame))); ASSERT_EQ(1UL, frames_from_callback_.size()); CheckReferencesGeneric(sn); diff --git a/modules/video_coding/session_info.cc b/modules/video_coding/session_info.cc index 07b9a9d6b5..477bbbe209 100644 --- a/modules/video_coding/session_info.cc +++ b/modules/video_coding/session_info.cc @@ -49,7 +49,7 @@ void VCMSessionInfo::UpdateDataPointers(const uint8_t* old_base_ptr, const uint8_t* new_base_ptr) { for (PacketIterator it = packets_.begin(); it != packets_.end(); ++it) if ((*it).dataPtr != NULL) { - assert(old_base_ptr != NULL && new_base_ptr != NULL); + RTC_DCHECK(old_base_ptr != NULL && new_base_ptr != NULL); (*it).dataPtr = new_base_ptr + ((*it).dataPtr - old_base_ptr); } } @@ -348,7 +348,7 @@ VCMSessionInfo::PacketIterator VCMSessionInfo::FindNextPartitionBeginning( VCMSessionInfo::PacketIterator VCMSessionInfo::FindPartitionEnd( PacketIterator it) const { - assert((*it).codec() == kVideoCodecVP8); + RTC_DCHECK_EQ((*it).codec(), kVideoCodecVP8); PacketIterator prev_it = it; const int partition_id = absl::get((*it).video_header.video_type_header) diff --git a/modules/video_coding/timing.cc b/modules/video_coding/timing.cc index eddac4f5de..ea1b59cad7 100644 --- a/modules/video_coding/timing.cc +++ b/modules/video_coding/timing.cc @@ -34,9 +34,13 @@ VCMTiming::VCMTiming(Clock* clock) prev_frame_timestamp_(0), timing_frame_info_(), num_decoded_frames_(0), - low_latency_renderer_enabled_("enabled", true) { + low_latency_renderer_enabled_("enabled", true), + zero_playout_delay_min_pacing_("min_pacing", TimeDelta::Millis(0)), + last_decode_scheduled_ts_(0) { ParseFieldTrial({&low_latency_renderer_enabled_}, field_trial::FindFullName("WebRTC-LowLatencyRenderer")); + ParseFieldTrial({&zero_playout_delay_min_pacing_}, + field_trial::FindFullName("WebRTC-ZeroPlayoutDelay")); } void VCMTiming::Reset() { @@ -153,7 +157,7 @@ void VCMTiming::StopDecodeTimer(uint32_t /*time_stamp*/, void VCMTiming::StopDecodeTimer(int32_t decode_time_ms, int64_t now_ms) { MutexLock lock(&mutex_); codec_timer_->AddTiming(decode_time_ms, now_ms); - assert(decode_time_ms >= 0); + RTC_DCHECK_GE(decode_time_ms, 0); ++num_decoded_frames_; } @@ -168,6 +172,12 @@ int64_t VCMTiming::RenderTimeMs(uint32_t frame_timestamp, return RenderTimeMsInternal(frame_timestamp, now_ms); } +void VCMTiming::SetLastDecodeScheduledTimestamp( + int64_t last_decode_scheduled_ts) { + MutexLock lock(&mutex_); + last_decode_scheduled_ts_ = last_decode_scheduled_ts; +} + int64_t VCMTiming::RenderTimeMsInternal(uint32_t frame_timestamp, int64_t now_ms) const { constexpr int kLowLatencyRendererMaxPlayoutDelayMs = 500; @@ -195,18 +205,33 @@ int64_t VCMTiming::RenderTimeMsInternal(uint32_t frame_timestamp, int VCMTiming::RequiredDecodeTimeMs() const { const int decode_time_ms = codec_timer_->RequiredDecodeTimeMs(); - assert(decode_time_ms >= 0); + RTC_DCHECK_GE(decode_time_ms, 0); return decode_time_ms; } int64_t VCMTiming::MaxWaitingTime(int64_t render_time_ms, - int64_t now_ms) const { + int64_t now_ms, + bool too_many_frames_queued) const { MutexLock lock(&mutex_); - const int64_t max_wait_time_ms = - render_time_ms - now_ms - RequiredDecodeTimeMs() - render_delay_ms_; - - return max_wait_time_ms; + if (render_time_ms == 0 && zero_playout_delay_min_pacing_->us() > 0 && + min_playout_delay_ms_ == 0 && max_playout_delay_ms_ > 0) { + // |render_time_ms| == 0 indicates that the frame should be decoded and + // rendered as soon as possible. However, the decoder can be choked if too + // many frames are sent at once. Therefore, limit the interframe delay to + // |zero_playout_delay_min_pacing_| unless too many frames are queued in + // which case the frames are sent to the decoder at once. + if (too_many_frames_queued) { + return 0; + } + int64_t earliest_next_decode_start_time = + last_decode_scheduled_ts_ + zero_playout_delay_min_pacing_->ms(); + int64_t max_wait_time_ms = now_ms >= earliest_next_decode_start_time + ? 0 + : earliest_next_decode_start_time - now_ms; + return max_wait_time_ms; + } + return render_time_ms - now_ms - RequiredDecodeTimeMs() - render_delay_ms_; } int VCMTiming::TargetVideoDelay() const { diff --git a/modules/video_coding/timing.h b/modules/video_coding/timing.h index 736b5e9ae4..7f891e4b9b 100644 --- a/modules/video_coding/timing.h +++ b/modules/video_coding/timing.h @@ -14,6 +14,7 @@ #include #include "absl/types/optional.h" +#include "api/units/time_delta.h" #include "api/video/video_timing.h" #include "modules/video_coding/codec_timer.h" #include "rtc_base/experiments/field_trial_parser.h" @@ -81,8 +82,15 @@ class VCMTiming { virtual int64_t RenderTimeMs(uint32_t frame_timestamp, int64_t now_ms) const; // Returns the maximum time in ms that we can wait for a frame to become - // complete before we must pass it to the decoder. - virtual int64_t MaxWaitingTime(int64_t render_time_ms, int64_t now_ms) const; + // complete before we must pass it to the decoder. render_time_ms==0 indicates + // that the frames should be processed as quickly as possible, with possibly + // only a small delay added to make sure that the decoder is not overloaded. + // In this case, the parameter too_many_frames_queued is used to signal that + // the decode queue is full and that the frame should be decoded as soon as + // possible. + virtual int64_t MaxWaitingTime(int64_t render_time_ms, + int64_t now_ms, + bool too_many_frames_queued) const; // Returns the current target delay which is required delay + decode time + // render delay. @@ -104,6 +112,9 @@ class VCMTiming { absl::optional max_composition_delay_in_frames); absl::optional MaxCompositionDelayInFrames() const; + // Updates the last time a frame was scheduled for decoding. + void SetLastDecodeScheduledTimestamp(int64_t last_decode_scheduled_ts); + enum { kDefaultRenderDelayMs = 10 }; enum { kDelayMaxChangeMsPerS = 100 }; @@ -139,6 +150,15 @@ class VCMTiming { FieldTrialParameter low_latency_renderer_enabled_ RTC_GUARDED_BY(mutex_); absl::optional max_composition_delay_in_frames_ RTC_GUARDED_BY(mutex_); + // Set by the field trial WebRTC-ZeroPlayoutDelay. The parameter min_pacing + // determines the minimum delay between frames scheduled for decoding that is + // used when min playout delay=0 and max playout delay>=0. + FieldTrialParameter zero_playout_delay_min_pacing_ + RTC_GUARDED_BY(mutex_); + // Timestamp at which the last frame was scheduled to be sent to the decoder. + // Used only when the RTP header extension playout delay is set to min=0 ms + // which is indicated by a render time set to 0. + int64_t last_decode_scheduled_ts_ RTC_GUARDED_BY(mutex_); }; } // namespace webrtc diff --git a/modules/video_coding/timing_unittest.cc b/modules/video_coding/timing_unittest.cc index ee86605fb6..cc87a3b4e0 100644 --- a/modules/video_coding/timing_unittest.cc +++ b/modules/video_coding/timing_unittest.cc @@ -11,6 +11,7 @@ #include "modules/video_coding/timing.h" #include "system_wrappers/include/clock.h" +#include "test/field_trial.h" #include "test/gtest.h" namespace webrtc { @@ -18,7 +19,7 @@ namespace { const int kFps = 25; } // namespace -TEST(ReceiverTiming, Tests) { +TEST(ReceiverTimingTest, JitterDelay) { SimulatedClock clock(0); VCMTiming timing(&clock); timing.Reset(); @@ -35,7 +36,7 @@ TEST(ReceiverTiming, Tests) { timing.set_render_delay(0); uint32_t wait_time_ms = timing.MaxWaitingTime( timing.RenderTimeMs(timestamp, clock.TimeInMilliseconds()), - clock.TimeInMilliseconds()); + clock.TimeInMilliseconds(), /*too_many_frames_queued=*/false); // First update initializes the render time. Since we have no decode delay // we get wait_time_ms = renderTime - now - renderDelay = jitter. EXPECT_EQ(jitter_delay_ms, wait_time_ms); @@ -47,7 +48,7 @@ TEST(ReceiverTiming, Tests) { timing.UpdateCurrentDelay(timestamp); wait_time_ms = timing.MaxWaitingTime( timing.RenderTimeMs(timestamp, clock.TimeInMilliseconds()), - clock.TimeInMilliseconds()); + clock.TimeInMilliseconds(), /*too_many_frames_queued=*/false); // Since we gradually increase the delay we only get 100 ms every second. EXPECT_EQ(jitter_delay_ms - 10, wait_time_ms); @@ -56,7 +57,7 @@ TEST(ReceiverTiming, Tests) { timing.UpdateCurrentDelay(timestamp); wait_time_ms = timing.MaxWaitingTime( timing.RenderTimeMs(timestamp, clock.TimeInMilliseconds()), - clock.TimeInMilliseconds()); + clock.TimeInMilliseconds(), /*too_many_frames_queued=*/false); EXPECT_EQ(jitter_delay_ms, wait_time_ms); // Insert frames without jitter, verify that this gives the exact wait time. @@ -69,7 +70,7 @@ TEST(ReceiverTiming, Tests) { timing.UpdateCurrentDelay(timestamp); wait_time_ms = timing.MaxWaitingTime( timing.RenderTimeMs(timestamp, clock.TimeInMilliseconds()), - clock.TimeInMilliseconds()); + clock.TimeInMilliseconds(), /*too_many_frames_queued=*/false); EXPECT_EQ(jitter_delay_ms, wait_time_ms); // Add decode time estimates for 1 second. @@ -84,7 +85,7 @@ TEST(ReceiverTiming, Tests) { timing.UpdateCurrentDelay(timestamp); wait_time_ms = timing.MaxWaitingTime( timing.RenderTimeMs(timestamp, clock.TimeInMilliseconds()), - clock.TimeInMilliseconds()); + clock.TimeInMilliseconds(), /*too_many_frames_queued=*/false); EXPECT_EQ(jitter_delay_ms, wait_time_ms); const int kMinTotalDelayMs = 200; @@ -96,7 +97,7 @@ TEST(ReceiverTiming, Tests) { timing.set_render_delay(kRenderDelayMs); wait_time_ms = timing.MaxWaitingTime( timing.RenderTimeMs(timestamp, clock.TimeInMilliseconds()), - clock.TimeInMilliseconds()); + clock.TimeInMilliseconds(), /*too_many_frames_queued=*/false); // We should at least have kMinTotalDelayMs - decodeTime (10) - renderTime // (10) to wait. EXPECT_EQ(kMinTotalDelayMs - kDecodeTimeMs - kRenderDelayMs, wait_time_ms); @@ -110,7 +111,7 @@ TEST(ReceiverTiming, Tests) { timing.UpdateCurrentDelay(timestamp); } -TEST(ReceiverTiming, WrapAround) { +TEST(ReceiverTimingTest, TimestampWrapAround) { SimulatedClock clock(0); VCMTiming timing(&clock); // Provoke a wrap-around. The fifth frame will have wrapped at 25 fps. @@ -127,4 +128,155 @@ TEST(ReceiverTiming, WrapAround) { } } +TEST(ReceiverTimingTest, MaxWaitingTimeIsZeroForZeroRenderTime) { + // This is the default path when the RTP playout delay header extension is set + // to min==0. + constexpr int64_t kStartTimeUs = 3.15e13; // About one year in us. + constexpr int64_t kTimeDeltaMs = 1000.0 / 60.0; + constexpr int64_t kZeroRenderTimeMs = 0; + SimulatedClock clock(kStartTimeUs); + VCMTiming timing(&clock); + timing.Reset(); + for (int i = 0; i < 10; ++i) { + clock.AdvanceTimeMilliseconds(kTimeDeltaMs); + int64_t now_ms = clock.TimeInMilliseconds(); + EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + 0); + } + // Another frame submitted at the same time also returns a negative max + // waiting time. + int64_t now_ms = clock.TimeInMilliseconds(); + EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + 0); + // MaxWaitingTime should be less than zero even if there's a burst of frames. + EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + 0); + EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + 0); + EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + 0); +} + +TEST(ReceiverTimingTest, MaxWaitingTimeZeroDelayPacingExperiment) { + // The minimum pacing is enabled by a field trial and active if the RTP + // playout delay header extension is set to min==0. + constexpr int64_t kMinPacingMs = 3; + test::ScopedFieldTrials override_field_trials( + "WebRTC-ZeroPlayoutDelay/min_pacing:3ms/"); + constexpr int64_t kStartTimeUs = 3.15e13; // About one year in us. + constexpr int64_t kTimeDeltaMs = 1000.0 / 60.0; + constexpr int64_t kZeroRenderTimeMs = 0; + SimulatedClock clock(kStartTimeUs); + VCMTiming timing(&clock); + timing.Reset(); + // MaxWaitingTime() returns zero for evenly spaced video frames. + for (int i = 0; i < 10; ++i) { + clock.AdvanceTimeMilliseconds(kTimeDeltaMs); + int64_t now_ms = clock.TimeInMilliseconds(); + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + 0); + timing.SetLastDecodeScheduledTimestamp(now_ms); + } + // Another frame submitted at the same time is paced according to the field + // trial setting. + int64_t now_ms = clock.TimeInMilliseconds(); + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + kMinPacingMs); + // If there's a burst of frames, the wait time is calculated based on next + // decode time. + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + kMinPacingMs); + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + kMinPacingMs); + // Allow a few ms to pass, this should be subtracted from the MaxWaitingTime. + constexpr int64_t kTwoMs = 2; + clock.AdvanceTimeMilliseconds(kTwoMs); + now_ms = clock.TimeInMilliseconds(); + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + kMinPacingMs - kTwoMs); + // A frame is decoded at the current time, the wait time should be restored to + // pacing delay. + timing.SetLastDecodeScheduledTimestamp(now_ms); + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + kMinPacingMs); +} + +TEST(ReceiverTimingTest, DefaultMaxWaitingTimeUnaffectedByPacingExperiment) { + // The minimum pacing is enabled by a field trial but should not have any + // effect if render_time_ms is greater than 0; + test::ScopedFieldTrials override_field_trials( + "WebRTC-ZeroPlayoutDelay/min_pacing:3ms/"); + constexpr int64_t kStartTimeUs = 3.15e13; // About one year in us. + constexpr int64_t kTimeDeltaMs = 1000.0 / 60.0; + SimulatedClock clock(kStartTimeUs); + VCMTiming timing(&clock); + timing.Reset(); + clock.AdvanceTimeMilliseconds(kTimeDeltaMs); + int64_t now_ms = clock.TimeInMilliseconds(); + int64_t render_time_ms = now_ms + 30; + // Estimate the internal processing delay from the first frame. + int64_t estimated_processing_delay = + (render_time_ms - now_ms) - + timing.MaxWaitingTime(render_time_ms, now_ms, + /*too_many_frames_queued=*/false); + EXPECT_GT(estimated_processing_delay, 0); + + // Any other frame submitted at the same time should be scheduled according to + // its render time. + for (int i = 0; i < 5; ++i) { + render_time_ms += kTimeDeltaMs; + EXPECT_EQ(timing.MaxWaitingTime(render_time_ms, now_ms, + /*too_many_frames_queued=*/false), + render_time_ms - now_ms - estimated_processing_delay); + } +} + +TEST(ReceiverTiminTest, MaxWaitingTimeReturnsZeroIfTooManyFramesQueuedIsTrue) { + // The minimum pacing is enabled by a field trial and active if the RTP + // playout delay header extension is set to min==0. + constexpr int64_t kMinPacingMs = 3; + test::ScopedFieldTrials override_field_trials( + "WebRTC-ZeroPlayoutDelay/min_pacing:3ms/"); + constexpr int64_t kStartTimeUs = 3.15e13; // About one year in us. + constexpr int64_t kTimeDeltaMs = 1000.0 / 60.0; + constexpr int64_t kZeroRenderTimeMs = 0; + SimulatedClock clock(kStartTimeUs); + VCMTiming timing(&clock); + timing.Reset(); + // MaxWaitingTime() returns zero for evenly spaced video frames. + for (int i = 0; i < 10; ++i) { + clock.AdvanceTimeMilliseconds(kTimeDeltaMs); + int64_t now_ms = clock.TimeInMilliseconds(); + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + 0); + timing.SetLastDecodeScheduledTimestamp(now_ms); + } + // Another frame submitted at the same time is paced according to the field + // trial setting. + int64_t now_ms = clock.TimeInMilliseconds(); + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + kMinPacingMs); + // MaxWaitingTime returns 0 even if there's a burst of frames if + // too_many_frames_queued is set to true. + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/true), + 0); + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/true), + 0); +} + } // namespace webrtc diff --git a/modules/video_coding/utility/frame_dropper.h b/modules/video_coding/utility/frame_dropper.h index 50a8d58e66..014b5dd7aa 100644 --- a/modules/video_coding/utility/frame_dropper.h +++ b/modules/video_coding/utility/frame_dropper.h @@ -44,7 +44,7 @@ class FrameDropper { // Input: // - framesize_bytes : The size of the latest frame returned // from the encoder. - // - delta_frame : True if the encoder returned a key frame. + // - delta_frame : True if the encoder returned a delta frame. void Fill(size_t framesize_bytes, bool delta_frame); void Leak(uint32_t input_framerate); diff --git a/modules/video_coding/utility/simulcast_test_fixture_impl.cc b/modules/video_coding/utility/simulcast_test_fixture_impl.cc index a9af643446..6d3195c32b 100644 --- a/modules/video_coding/utility/simulcast_test_fixture_impl.cc +++ b/modules/video_coding/utility/simulcast_test_fixture_impl.cc @@ -190,7 +190,7 @@ void ConfigureStream(int width, float max_framerate, SpatialLayer* stream, int num_temporal_layers) { - assert(stream); + RTC_DCHECK(stream); stream->width = width; stream->height = height; stream->maxBitrate = max_bitrate; @@ -590,6 +590,7 @@ void SimulcastTestFixtureImpl::SwitchingToOneStream(int width, int height) { settings_.VP8()->numberOfTemporalLayers = 1; temporal_layer_profile = kDefaultTemporalLayerProfile; } else { + settings_.H264()->numberOfTemporalLayers = 1; temporal_layer_profile = kNoTemporalLayerProfile; } settings_.maxBitrate = 100; diff --git a/modules/video_coding/utility/vp9_uncompressed_header_parser.cc b/modules/video_coding/utility/vp9_uncompressed_header_parser.cc index b33e29695f..07ba3255c6 100644 --- a/modules/video_coding/utility/vp9_uncompressed_header_parser.cc +++ b/modules/video_coding/utility/vp9_uncompressed_header_parser.cc @@ -305,7 +305,7 @@ bool Vp9ReadSegmentationParams(BitstreamReader* br) { constexpr int kSegmentationFeatureBits[kVp9SegLvlMax] = {8, 6, 2, 0}; constexpr bool kSegmentationFeatureSigned[kVp9SegLvlMax] = {1, 1, 0, 0}; - return br->IfNextBoolean([&] { // segmentation_enabled + RETURN_IF_FALSE(br->IfNextBoolean([&] { // segmentation_enabled return br->IfNextBoolean([&] { // update_map // Consume probs. for (int i = 0; i < 7; ++i) { @@ -321,7 +321,7 @@ bool Vp9ReadSegmentationParams(BitstreamReader* br) { return true; }); }); - }); + })); return br->IfNextBoolean([&] { RETURN_IF_FALSE(br->ConsumeBits(1)); // abs_or_delta diff --git a/modules/video_coding/utility/vp9_uncompressed_header_parser_unittest.cc b/modules/video_coding/utility/vp9_uncompressed_header_parser_unittest.cc new file mode 100644 index 0000000000..b69b45d5c4 --- /dev/null +++ b/modules/video_coding/utility/vp9_uncompressed_header_parser_unittest.cc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h" + +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { +namespace vp9 { + +TEST(Vp9UncompressedHeaderParserTest, FrameWithSegmentation) { + // Uncompressed header from a frame generated with libvpx. + // Encoded QVGA frame (SL0 of a VGA frame) that includes a segmentation. + const uint8_t kHeader[] = { + 0x87, 0x01, 0x00, 0x00, 0x02, 0x7e, 0x01, 0xdf, 0x02, 0x7f, 0x01, 0xdf, + 0xc6, 0x87, 0x04, 0x83, 0x83, 0x2e, 0x46, 0x60, 0x20, 0x38, 0x0c, 0x06, + 0x03, 0xcd, 0x80, 0xc0, 0x60, 0x9f, 0xc5, 0x46, 0x00, 0x00, 0x00, 0x00, + 0x2e, 0x73, 0xb7, 0xee, 0x22, 0x06, 0x81, 0x82, 0xd4, 0xef, 0xc3, 0x58, + 0x1f, 0x12, 0xd2, 0x7b, 0x28, 0x1f, 0x80, 0xfc, 0x07, 0xe0, 0x00, 0x00}; + + absl::optional frame_info = + ParseIntraFrameInfo(kHeader, sizeof(kHeader)); + // Segmentation info is not actually populated in FrameInfo struct, but it + // needs to be parsed otherwise we end up on the wrong offset. The check for + // segmentation is thus that we have a valid return value. + ASSERT_TRUE(frame_info.has_value()); + + EXPECT_EQ(frame_info->is_keyframe, false); + EXPECT_EQ(frame_info->error_resilient, true); + EXPECT_EQ(frame_info->show_frame, true); + EXPECT_EQ(frame_info->base_qp, 185); + EXPECT_EQ(frame_info->frame_width, 320); + EXPECT_EQ(frame_info->frame_height, 240); + EXPECT_EQ(frame_info->render_width, 640); + EXPECT_EQ(frame_info->render_height, 480); +} + +} // namespace vp9 +} // namespace webrtc diff --git a/modules/video_coding/video_codec_initializer.cc b/modules/video_coding/video_codec_initializer.cc index 90a02e0c2d..17ea66acb1 100644 --- a/modules/video_coding/video_codec_initializer.cc +++ b/modules/video_coding/video_codec_initializer.cc @@ -262,7 +262,11 @@ VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec( break; } case kVideoCodecAV1: - if (!SetAv1SvcConfig(video_codec)) { + if (SetAv1SvcConfig(video_codec)) { + for (size_t i = 0; i < config.spatial_layers.size(); ++i) { + video_codec.spatialLayers[i].active = config.spatial_layers[i].active; + } + } else { RTC_LOG(LS_WARNING) << "Failed to configure svc bitrates for av1."; } break; diff --git a/modules/video_coding/video_codec_initializer_unittest.cc b/modules/video_coding/video_codec_initializer_unittest.cc index da3d80d91b..6c1c2e7a38 100644 --- a/modules/video_coding/video_codec_initializer_unittest.cc +++ b/modules/video_coding/video_codec_initializer_unittest.cc @@ -461,4 +461,34 @@ TEST_F(VideoCodecInitializerTest, Av1TwoSpatialLayersBitratesAreConsistent) { codec.spatialLayers[1].maxBitrate); } +TEST_F(VideoCodecInitializerTest, Av1TwoSpatialLayersActiveByDefault) { + VideoEncoderConfig config; + config.codec_type = VideoCodecType::kVideoCodecAV1; + std::vector streams = {DefaultStream()}; + streams[0].scalability_mode = "L2T2"; + config.spatial_layers = {}; + + VideoCodec codec; + EXPECT_TRUE(VideoCodecInitializer::SetupCodec(config, streams, &codec)); + + EXPECT_TRUE(codec.spatialLayers[0].active); + EXPECT_TRUE(codec.spatialLayers[1].active); +} + +TEST_F(VideoCodecInitializerTest, Av1TwoSpatialLayersOneDeactivated) { + VideoEncoderConfig config; + config.codec_type = VideoCodecType::kVideoCodecAV1; + std::vector streams = {DefaultStream()}; + streams[0].scalability_mode = "L2T2"; + config.spatial_layers.resize(2); + config.spatial_layers[0].active = true; + config.spatial_layers[1].active = false; + + VideoCodec codec; + EXPECT_TRUE(VideoCodecInitializer::SetupCodec(config, streams, &codec)); + + EXPECT_TRUE(codec.spatialLayers[0].active); + EXPECT_FALSE(codec.spatialLayers[1].active); +} + } // namespace webrtc diff --git a/net/dcsctp/fuzzers/dcsctp_fuzzers.h b/net/dcsctp/fuzzers/dcsctp_fuzzers.h index 0a69bf0f89..f3de0722f4 100644 --- a/net/dcsctp/fuzzers/dcsctp_fuzzers.h +++ b/net/dcsctp/fuzzers/dcsctp_fuzzers.h @@ -77,7 +77,6 @@ class FuzzerCallbacks : public DcSctpSocketCallbacks { rtc::ArrayView outgoing_streams) override {} void OnIncomingStreamsReset( rtc::ArrayView incoming_streams) override {} - void NotifyOutgoingMessageBufferEmpty() override {} std::vector ConsumeSentPacket() { if (sent_packets_.empty()) { diff --git a/net/dcsctp/packet/bounded_byte_reader.h b/net/dcsctp/packet/bounded_byte_reader.h index b87648886e..603ed6ac33 100644 --- a/net/dcsctp/packet/bounded_byte_reader.h +++ b/net/dcsctp/packet/bounded_byte_reader.h @@ -52,7 +52,7 @@ template class BoundedByteReader { public: explicit BoundedByteReader(rtc::ArrayView data) : data_(data) { - RTC_DCHECK(data.size() >= FixedSize); + RTC_CHECK(data.size() >= FixedSize); } template @@ -77,7 +77,7 @@ class BoundedByteReader { template BoundedByteReader sub_reader(size_t variable_offset) const { - RTC_DCHECK(FixedSize + variable_offset + SubSize <= data_.size()); + RTC_CHECK(FixedSize + variable_offset + SubSize <= data_.size()); rtc::ArrayView sub_span = data_.subview(FixedSize + variable_offset, SubSize); diff --git a/net/dcsctp/packet/bounded_byte_writer.h b/net/dcsctp/packet/bounded_byte_writer.h index 4e547b0528..467f26800b 100644 --- a/net/dcsctp/packet/bounded_byte_writer.h +++ b/net/dcsctp/packet/bounded_byte_writer.h @@ -56,7 +56,7 @@ template class BoundedByteWriter { public: explicit BoundedByteWriter(rtc::ArrayView data) : data_(data) { - RTC_DCHECK(data.size() >= FixedSize); + RTC_CHECK(data.size() >= FixedSize); } template @@ -81,7 +81,7 @@ class BoundedByteWriter { template BoundedByteWriter sub_writer(size_t variable_offset) { - RTC_DCHECK(FixedSize + variable_offset + SubSize <= data_.size()); + RTC_CHECK(FixedSize + variable_offset + SubSize <= data_.size()); return BoundedByteWriter( data_.subview(FixedSize + variable_offset, SubSize)); diff --git a/net/dcsctp/packet/chunk/sack_chunk.cc b/net/dcsctp/packet/chunk/sack_chunk.cc index a9f17d79dd..d80e430082 100644 --- a/net/dcsctp/packet/chunk/sack_chunk.cc +++ b/net/dcsctp/packet/chunk/sack_chunk.cc @@ -88,13 +88,12 @@ absl::optional SackChunk::Parse(rtc::ArrayView data) { offset += kGapAckBlockSize; } - std::vector duplicate_tsns; - duplicate_tsns.reserve(nbr_of_gap_blocks); + std::set duplicate_tsns; for (int i = 0; i < nbr_of_dup_tsns; ++i) { BoundedByteReader sub_reader = reader->sub_reader(offset); - duplicate_tsns.push_back(TSN(sub_reader.Load32<0>())); + duplicate_tsns.insert(TSN(sub_reader.Load32<0>())); offset += kDupTsnBlockSize; } RTC_DCHECK(offset == reader->variable_data_size()); @@ -124,11 +123,11 @@ void SackChunk::SerializeTo(std::vector& out) const { offset += kGapAckBlockSize; } - for (int i = 0; i < nbr_of_dup_tsns; ++i) { + for (TSN tsn : duplicate_tsns_) { BoundedByteWriter sub_writer = writer.sub_writer(offset); - sub_writer.Store32<0>(*duplicate_tsns_[i]); + sub_writer.Store32<0>(*tsn); offset += kDupTsnBlockSize; } diff --git a/net/dcsctp/packet/chunk/sack_chunk.h b/net/dcsctp/packet/chunk/sack_chunk.h index 0b464fb359..e6758fa332 100644 --- a/net/dcsctp/packet/chunk/sack_chunk.h +++ b/net/dcsctp/packet/chunk/sack_chunk.h @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -48,7 +49,7 @@ class SackChunk : public Chunk, public TLVTrait { SackChunk(TSN cumulative_tsn_ack, uint32_t a_rwnd, std::vector gap_ack_blocks, - std::vector duplicate_tsns) + std::set duplicate_tsns) : cumulative_tsn_ack_(cumulative_tsn_ack), a_rwnd_(a_rwnd), gap_ack_blocks_(std::move(gap_ack_blocks)), @@ -63,7 +64,7 @@ class SackChunk : public Chunk, public TLVTrait { rtc::ArrayView gap_ack_blocks() const { return gap_ack_blocks_; } - rtc::ArrayView duplicate_tsns() const { return duplicate_tsns_; } + const std::set& duplicate_tsns() const { return duplicate_tsns_; } private: static constexpr size_t kGapAckBlockSize = 4; @@ -72,7 +73,7 @@ class SackChunk : public Chunk, public TLVTrait { const TSN cumulative_tsn_ack_; const uint32_t a_rwnd_; std::vector gap_ack_blocks_; - std::vector duplicate_tsns_; + std::set duplicate_tsns_; }; } // namespace dcsctp diff --git a/net/dcsctp/packet/chunk_validators.cc b/net/dcsctp/packet/chunk_validators.cc index b3467037c7..48d351827e 100644 --- a/net/dcsctp/packet/chunk_validators.cc +++ b/net/dcsctp/packet/chunk_validators.cc @@ -38,9 +38,7 @@ SackChunk ChunkValidators::Clean(SackChunk&& sack) { // Not more than at most one remaining? Exit early. if (gap_ack_blocks.size() <= 1) { return SackChunk(sack.cumulative_tsn_ack(), sack.a_rwnd(), - std::move(gap_ack_blocks), - std::vector(sack.duplicate_tsns().begin(), - sack.duplicate_tsns().end())); + std::move(gap_ack_blocks), sack.duplicate_tsns()); } // Sort the intervals by their start value, to aid in the merging below. @@ -63,8 +61,7 @@ SackChunk ChunkValidators::Clean(SackChunk&& sack) { } return SackChunk(sack.cumulative_tsn_ack(), sack.a_rwnd(), std::move(merged), - std::vector(sack.duplicate_tsns().begin(), - sack.duplicate_tsns().end())); + sack.duplicate_tsns()); } bool ChunkValidators::Validate(const SackChunk& sack) { diff --git a/net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause.cc b/net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause.cc index c4e2961bdd..b89f86e43e 100644 --- a/net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause.cc +++ b/net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause.cc @@ -48,14 +48,14 @@ MissingMandatoryParameterCause::Parse(rtc::ArrayView data) { } uint32_t count = reader->Load32<4>(); - if (reader->variable_data_size() != count * kMissingParameterSize) { + if (reader->variable_data_size() / kMissingParameterSize != count) { RTC_DLOG(LS_WARNING) << "Invalid number of missing parameters"; return absl::nullopt; } std::vector missing_parameter_types; missing_parameter_types.reserve(count); - for (size_t i = 0; i < count; ++i) { + for (uint32_t i = 0; i < count; ++i) { BoundedByteReader sub_reader = reader->sub_reader(i * kMissingParameterSize); diff --git a/net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause_test.cc b/net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause_test.cc index 8c0434050d..1c526ff0e2 100644 --- a/net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause_test.cc +++ b/net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause_test.cc @@ -22,6 +22,7 @@ namespace dcsctp { namespace { using ::testing::ElementsAre; +using ::testing::IsEmpty; TEST(MissingMandatoryParameterCauseTest, SerializeAndDeserialize) { uint16_t parameter_types[] = {1, 2, 3}; @@ -37,5 +38,22 @@ TEST(MissingMandatoryParameterCauseTest, SerializeAndDeserialize) { EXPECT_THAT(deserialized.missing_parameter_types(), ElementsAre(1, 2, 3)); } +TEST(MissingMandatoryParameterCauseTest, HandlesDeserializeZeroParameters) { + uint8_t serialized[] = {0, 2, 0, 8, 0, 0, 0, 0}; + + ASSERT_HAS_VALUE_AND_ASSIGN( + MissingMandatoryParameterCause deserialized, + MissingMandatoryParameterCause::Parse(serialized)); + + EXPECT_THAT(deserialized.missing_parameter_types(), IsEmpty()); +} + +TEST(MissingMandatoryParameterCauseTest, HandlesOverflowParameterCount) { + // 0x80000004 * 2 = 2**32 + 8 -> if overflow, would validate correctly. + uint8_t serialized[] = {0, 2, 0, 8, 0x80, 0x00, 0x00, 0x04}; + + EXPECT_FALSE(MissingMandatoryParameterCause::Parse(serialized).has_value()); +} + } // namespace } // namespace dcsctp diff --git a/net/dcsctp/packet/sctp_packet.cc b/net/dcsctp/packet/sctp_packet.cc index da06ccf867..3e419c5978 100644 --- a/net/dcsctp/packet/sctp_packet.cc +++ b/net/dcsctp/packet/sctp_packet.cc @@ -82,8 +82,8 @@ size_t SctpPacket::Builder::bytes_remaining() const { // The packet header (CommonHeader) hasn't been written yet: return max_packet_size_ - kHeaderSize; } else if (out_.size() > max_packet_size_) { - RTC_DCHECK(false) << "Exceeded max size, data=" << out_.size() - << ", max_size=" << max_packet_size_; + RTC_NOTREACHED() << "Exceeded max size, data=" << out_.size() + << ", max_size=" << max_packet_size_; return 0; } return max_packet_size_ - out_.size(); diff --git a/net/dcsctp/public/BUILD.gn b/net/dcsctp/public/BUILD.gn index 85cf529c2f..ced94de151 100644 --- a/net/dcsctp/public/BUILD.gn +++ b/net/dcsctp/public/BUILD.gn @@ -43,11 +43,50 @@ rtc_source_set("socket") { ] } +rtc_source_set("factory") { + deps = [ + ":socket", + ":types", + "../socket:dcsctp_socket", + ] + sources = [ + "dcsctp_socket_factory.cc", + "dcsctp_socket_factory.h", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] +} + +rtc_source_set("mocks") { + testonly = true + sources = [ "mock_dcsctp_socket.h" ] + deps = [ + ":socket", + "../../../test:test_support", + ] +} + +rtc_source_set("utils") { + deps = [ + ":socket", + ":types", + "../../../api:array_view", + "../../../rtc_base:logging", + "../../../rtc_base:stringutils", + "../socket:dcsctp_socket", + ] + sources = [ + "text_pcap_packet_observer.cc", + "text_pcap_packet_observer.h", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] +} + if (rtc_include_tests) { rtc_library("dcsctp_public_unittests") { testonly = true deps = [ + ":mocks", ":strong_alias", ":types", "../../../rtc_base:checks", @@ -56,6 +95,7 @@ if (rtc_include_tests) { "../../../test:test_support", ] sources = [ + "mock_dcsctp_socket_test.cc", "strong_alias_test.cc", "types_test.cc", ] diff --git a/net/dcsctp/public/dcsctp_options.h b/net/dcsctp/public/dcsctp_options.h index 4f5b50cf53..caefcff4f5 100644 --- a/net/dcsctp/public/dcsctp_options.h +++ b/net/dcsctp/public/dcsctp_options.h @@ -81,7 +81,11 @@ struct DcSctpOptions { // Maximum send buffer size. It will not be possible to queue more data than // this before sending it. - size_t max_send_buffer_size = 2 * 1024 * 1024; + size_t max_send_buffer_size = 2'000'000; + + // A threshold that, when the amount of data in the send buffer goes below + // this value, will trigger `DcSctpCallbacks::OnTotalBufferedAmountLow`. + size_t total_buffered_amount_low_threshold = 1'800'000; // Max allowed RTT value. When the RTT is measured and it's found to be larger // than this value, it will be discarded and not used for e.g. any RTO @@ -108,7 +112,7 @@ struct DcSctpOptions { // T2-shutdown timeout. DurationMs t2_shutdown_timeout = DurationMs(1000); - // Hearbeat interval (on idle connections only). + // Hearbeat interval (on idle connections only). Set to zero to disable. DurationMs heartbeat_interval = DurationMs(30000); // The maximum time when a SACK will be sent from the arrival of an diff --git a/net/dcsctp/public/dcsctp_socket.h b/net/dcsctp/public/dcsctp_socket.h index 1c2fb97f7a..f07f54e044 100644 --- a/net/dcsctp/public/dcsctp_socket.h +++ b/net/dcsctp/public/dcsctp_socket.h @@ -197,12 +197,11 @@ class DcSctpSocketCallbacks { // Triggered when the outgoing message buffer is empty, meaning that there are // no more queued messages, but there can still be packets in-flight or to be // retransmitted. (in contrast to SCTP_SENDER_DRY_EVENT). - // TODO(boivie): This is currently only used in benchmarks to have a steady - // flow of packets to send // // Note that it's NOT ALLOWED to call into this library from within this // callback. - virtual void NotifyOutgoingMessageBufferEmpty() = 0; + ABSL_DEPRECATED("Use OnTotalBufferedAmountLow instead") + virtual void NotifyOutgoingMessageBufferEmpty() {} // Called when the library has received an SCTP message in full and delivers // it to the upper layer. @@ -263,6 +262,17 @@ class DcSctpSocketCallbacks { // It is allowed to call into this library from within this callback. virtual void OnIncomingStreamsReset( rtc::ArrayView incoming_streams) = 0; + + // Will be called when the amount of data buffered to be sent falls to or + // below the threshold set when calling `SetBufferedAmountLowThreshold`. + // + // It is allowed to call into this library from within this callback. + virtual void OnBufferedAmountLow(StreamID stream_id) {} + + // Will be called when the total amount of data buffered (in the entire send + // buffer, for all streams) falls to or below the threshold specified in + // `DcSctpOptions::total_buffered_amount_low_threshold`. + virtual void OnTotalBufferedAmountLow() {} }; // The DcSctpSocket implementation implements the following interface. @@ -326,6 +336,20 @@ class DcSctpSocketInterface { // or streams that don't support resetting will not perform any operation. virtual ResetStreamsStatus ResetStreams( rtc::ArrayView outgoing_streams) = 0; + + // Returns the number of bytes of data currently queued to be sent on a given + // stream. + virtual size_t buffered_amount(StreamID stream_id) const = 0; + + // Returns the number of buffered outgoing bytes that is considered "low" for + // a given stream. See `SetBufferedAmountLowThreshold`. + virtual size_t buffered_amount_low_threshold(StreamID stream_id) const = 0; + + // Used to specify the number of bytes of buffered outgoing data that is + // considered "low" for a given stream, which will trigger an + // OnBufferedAmountLow event. The default value is zero (0). + virtual void SetBufferedAmountLowThreshold(StreamID stream_id, + size_t bytes) = 0; }; } // namespace dcsctp diff --git a/net/dcsctp/public/dcsctp_socket_factory.cc b/net/dcsctp/public/dcsctp_socket_factory.cc new file mode 100644 index 0000000000..338d143424 --- /dev/null +++ b/net/dcsctp/public/dcsctp_socket_factory.cc @@ -0,0 +1,31 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "net/dcsctp/public/dcsctp_socket_factory.h" + +#include +#include + +#include "absl/strings/string_view.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/public/packet_observer.h" +#include "net/dcsctp/socket/dcsctp_socket.h" + +namespace dcsctp { +std::unique_ptr DcSctpSocketFactory::Create( + absl::string_view log_prefix, + DcSctpSocketCallbacks& callbacks, + std::unique_ptr packet_observer, + const DcSctpOptions& options) { + return std::make_unique(log_prefix, callbacks, + std::move(packet_observer), options); +} +} // namespace dcsctp diff --git a/net/dcsctp/public/dcsctp_socket_factory.h b/net/dcsctp/public/dcsctp_socket_factory.h new file mode 100644 index 0000000000..dcc68d9b54 --- /dev/null +++ b/net/dcsctp/public/dcsctp_socket_factory.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PUBLIC_DCSCTP_SOCKET_FACTORY_H_ +#define NET_DCSCTP_PUBLIC_DCSCTP_SOCKET_FACTORY_H_ + +#include + +#include "absl/strings/string_view.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/public/packet_observer.h" + +namespace dcsctp { +class DcSctpSocketFactory { + public: + std::unique_ptr Create( + absl::string_view log_prefix, + DcSctpSocketCallbacks& callbacks, + std::unique_ptr packet_observer, + const DcSctpOptions& options); +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_PUBLIC_DCSCTP_SOCKET_FACTORY_H_ diff --git a/net/dcsctp/public/mock_dcsctp_socket.h b/net/dcsctp/public/mock_dcsctp_socket.h new file mode 100644 index 0000000000..18140642b7 --- /dev/null +++ b/net/dcsctp/public/mock_dcsctp_socket.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PUBLIC_MOCK_DCSCTP_SOCKET_H_ +#define NET_DCSCTP_PUBLIC_MOCK_DCSCTP_SOCKET_H_ + +#include "net/dcsctp/public/dcsctp_socket.h" +#include "test/gmock.h" + +namespace dcsctp { + +class MockDcSctpSocket : public DcSctpSocketInterface { + public: + MOCK_METHOD(void, + ReceivePacket, + (rtc::ArrayView data), + (override)); + + MOCK_METHOD(void, HandleTimeout, (TimeoutID timeout_id), (override)); + + MOCK_METHOD(void, Connect, (), (override)); + + MOCK_METHOD(void, Shutdown, (), (override)); + + MOCK_METHOD(void, Close, (), (override)); + + MOCK_METHOD(SocketState, state, (), (const, override)); + + MOCK_METHOD(const DcSctpOptions&, options, (), (const, override)); + + MOCK_METHOD(void, SetMaxMessageSize, (size_t max_message_size), (override)); + + MOCK_METHOD(SendStatus, + Send, + (DcSctpMessage message, const SendOptions& send_options), + (override)); + + MOCK_METHOD(ResetStreamsStatus, + ResetStreams, + (rtc::ArrayView outgoing_streams), + (override)); + + MOCK_METHOD(size_t, buffered_amount, (StreamID stream_id), (const, override)); + + MOCK_METHOD(size_t, + buffered_amount_low_threshold, + (StreamID stream_id), + (const, override)); + + MOCK_METHOD(void, + SetBufferedAmountLowThreshold, + (StreamID stream_id, size_t bytes), + (override)); +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PUBLIC_MOCK_DCSCTP_SOCKET_H_ diff --git a/net/dcsctp/public/mock_dcsctp_socket_test.cc b/net/dcsctp/public/mock_dcsctp_socket_test.cc new file mode 100644 index 0000000000..57013e4ce2 --- /dev/null +++ b/net/dcsctp/public/mock_dcsctp_socket_test.cc @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/public/mock_dcsctp_socket.h" + +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { + +// This test exists to ensure that all methods are mocked correctly, and to +// generate compiler errors if they are not. +TEST(MockDcSctpSocketTest, CanInstantiateAndConnect) { + testing::StrictMock socket; + + EXPECT_CALL(socket, Connect); + + socket.Connect(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/public/text_pcap_packet_observer.cc b/net/dcsctp/public/text_pcap_packet_observer.cc new file mode 100644 index 0000000000..2b13060190 --- /dev/null +++ b/net/dcsctp/public/text_pcap_packet_observer.cc @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/public/text_pcap_packet_observer.h" + +#include "api/array_view.h" +#include "net/dcsctp/public/types.h" +#include "rtc_base/logging.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +void TextPcapPacketObserver::OnSentPacket( + dcsctp::TimeMs now, + rtc::ArrayView payload) { + PrintPacket("O ", name_, now, payload); +} + +void TextPcapPacketObserver::OnReceivedPacket( + dcsctp::TimeMs now, + rtc::ArrayView payload) { + PrintPacket("I ", name_, now, payload); +} + +void TextPcapPacketObserver::PrintPacket( + absl::string_view prefix, + absl::string_view socket_name, + dcsctp::TimeMs now, + rtc::ArrayView payload) { + rtc::StringBuilder s; + s << "\n" << prefix; + int64_t remaining = *now % (24 * 60 * 60 * 1000); + int hours = remaining / (60 * 60 * 1000); + remaining = remaining % (60 * 60 * 1000); + int minutes = remaining / (60 * 1000); + remaining = remaining % (60 * 1000); + int seconds = remaining / 1000; + int ms = remaining % 1000; + s.AppendFormat("%02d:%02d:%02d.%03d", hours, minutes, seconds, ms); + s << " 0000"; + for (uint8_t byte : payload) { + s.AppendFormat(" %02x", byte); + } + s << " # SCTP_PACKET " << socket_name; + RTC_LOG(LS_VERBOSE) << s.str(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/public/text_pcap_packet_observer.h b/net/dcsctp/public/text_pcap_packet_observer.h new file mode 100644 index 0000000000..0685771ccf --- /dev/null +++ b/net/dcsctp/public/text_pcap_packet_observer.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PUBLIC_TEXT_PCAP_PACKET_OBSERVER_H_ +#define NET_DCSCTP_PUBLIC_TEXT_PCAP_PACKET_OBSERVER_H_ + +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/public/packet_observer.h" +#include "net/dcsctp/public/types.h" + +namespace dcsctp { + +// Print outs all sent and received packets to the logs, at LS_VERBOSE severity. +class TextPcapPacketObserver : public dcsctp::PacketObserver { + public: + explicit TextPcapPacketObserver(absl::string_view name) : name_(name) {} + + // Implementation of `dcsctp::PacketObserver`. + void OnSentPacket(dcsctp::TimeMs now, + rtc::ArrayView payload) override; + + void OnReceivedPacket(dcsctp::TimeMs now, + rtc::ArrayView payload) override; + + // Prints a packet to the log. Exposed to allow it to be used in compatibility + // tests suites that don't use PacketObserver. + static void PrintPacket(absl::string_view prefix, + absl::string_view socket_name, + dcsctp::TimeMs now, + rtc::ArrayView payload); + + private: + const std::string name_; +}; + +} // namespace dcsctp +#endif // NET_DCSCTP_PUBLIC_TEXT_PCAP_PACKET_OBSERVER_H_ diff --git a/net/dcsctp/rx/BUILD.gn b/net/dcsctp/rx/BUILD.gn index 7b5406b753..fb92513158 100644 --- a/net/dcsctp/rx/BUILD.gn +++ b/net/dcsctp/rx/BUILD.gn @@ -24,6 +24,7 @@ rtc_library("data_tracker") { "data_tracker.h", ] absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] diff --git a/net/dcsctp/rx/data_tracker.cc b/net/dcsctp/rx/data_tracker.cc index 3e03dfece2..5b563a8463 100644 --- a/net/dcsctp/rx/data_tracker.cc +++ b/net/dcsctp/rx/data_tracker.cc @@ -9,6 +9,7 @@ */ #include "net/dcsctp/rx/data_tracker.h" +#include #include #include #include @@ -16,6 +17,7 @@ #include #include +#include "absl/algorithm/container.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "net/dcsctp/common/sequence_numbers.h" @@ -26,6 +28,86 @@ namespace dcsctp { +constexpr size_t DataTracker::kMaxDuplicateTsnReported; +constexpr size_t DataTracker::kMaxGapAckBlocksReported; + +bool DataTracker::AdditionalTsnBlocks::Add(UnwrappedTSN tsn) { + // Find any block to expand. It will look for any block that includes (also + // when expanded) the provided `tsn`. It will return the block that is greater + // than, or equal to `tsn`. + auto it = absl::c_lower_bound( + blocks_, tsn, [&](const TsnRange& elem, const UnwrappedTSN& t) { + return elem.last.next_value() < t; + }); + + if (it == blocks_.end()) { + // No matching block found. There is no greater than, or equal block - which + // means that this TSN is greater than any block. It can then be inserted at + // the end. + blocks_.emplace_back(tsn, tsn); + return true; + } + + if (tsn >= it->first && tsn <= it->last) { + // It's already in this block. + return false; + } + + if (it->last.next_value() == tsn) { + // This block can be expanded to the right, or merged with the next. + auto next_it = it + 1; + if (next_it != blocks_.end() && tsn.next_value() == next_it->first) { + // Expanding it would make it adjacent to next block - merge those. + it->last = next_it->last; + blocks_.erase(next_it); + return true; + } + + // Expand to the right + it->last = tsn; + return true; + } + + if (it->first == tsn.next_value()) { + // This block can be expanded to the left. Merging to the left would've been + // covered by the above "merge to the right". Both blocks (expand a + // right-most block to the left and expand a left-most block to the right) + // would match, but the left-most would be returned by std::lower_bound. + RTC_DCHECK(it == blocks_.begin() || (it - 1)->last.next_value() != tsn); + + // Expand to the left. + it->first = tsn; + return true; + } + + // Need to create a new block in the middle. + blocks_.emplace(it, tsn, tsn); + return true; +} + +void DataTracker::AdditionalTsnBlocks::EraseTo(UnwrappedTSN tsn) { + // Find the block that is greater than or equals `tsn`. + auto it = absl::c_lower_bound( + blocks_, tsn, [&](const TsnRange& elem, const UnwrappedTSN& t) { + return elem.last < t; + }); + + // The block that is found is greater or equal (or possibly ::end, when no + // block is greater or equal). All blocks before this block can be safely + // removed. the TSN might be within this block, so possibly truncate it. + bool tsn_is_within_block = it != blocks_.end() && tsn >= it->first; + blocks_.erase(blocks_.begin(), it); + + if (tsn_is_within_block) { + blocks_.front().first = tsn.next_value(); + } +} + +void DataTracker::AdditionalTsnBlocks::PopFront() { + RTC_DCHECK(!blocks_.empty()); + blocks_.erase(blocks_.begin()); +} + bool DataTracker::IsTSNValid(TSN tsn) const { UnwrappedTSN unwrapped_tsn = tsn_unwrapper_.PeekUnwrap(tsn); @@ -52,21 +134,41 @@ void DataTracker::Observe(TSN tsn, // Old chunk already seen before? if (unwrapped_tsn <= last_cumulative_acked_tsn_) { - // TODO(boivie) Set duplicate TSN, even if it's not used in SCTP yet. - return; - } - - if (unwrapped_tsn == last_cumulative_acked_tsn_.next_value()) { - last_cumulative_acked_tsn_ = unwrapped_tsn; - // The cumulative acked tsn may be moved even further, if a gap was filled. - while (!additional_tsns_.empty() && - *additional_tsns_.begin() == - last_cumulative_acked_tsn_.next_value()) { - last_cumulative_acked_tsn_.Increment(); - additional_tsns_.erase(additional_tsns_.begin()); + if (duplicate_tsns_.size() < kMaxDuplicateTsnReported) { + duplicate_tsns_.insert(unwrapped_tsn.Wrap()); } + // https://datatracker.ietf.org/doc/html/rfc4960#section-6.2 + // "When a packet arrives with duplicate DATA chunk(s) and with no new DATA + // chunk(s), the endpoint MUST immediately send a SACK with no delay. If a + // packet arrives with duplicate DATA chunk(s) bundled with new DATA chunks, + // the endpoint MAY immediately send a SACK." + UpdateAckState(AckState::kImmediate, "duplicate data"); } else { - additional_tsns_.insert(unwrapped_tsn); + if (unwrapped_tsn == last_cumulative_acked_tsn_.next_value()) { + last_cumulative_acked_tsn_ = unwrapped_tsn; + // The cumulative acked tsn may be moved even further, if a gap was + // filled. + if (!additional_tsn_blocks_.empty() && + additional_tsn_blocks_.front().first == + last_cumulative_acked_tsn_.next_value()) { + last_cumulative_acked_tsn_ = additional_tsn_blocks_.front().last; + additional_tsn_blocks_.PopFront(); + } + } else { + bool inserted = additional_tsn_blocks_.Add(unwrapped_tsn); + if (!inserted) { + // Already seen before. + if (duplicate_tsns_.size() < kMaxDuplicateTsnReported) { + duplicate_tsns_.insert(unwrapped_tsn.Wrap()); + } + // https://datatracker.ietf.org/doc/html/rfc4960#section-6.2 + // "When a packet arrives with duplicate DATA chunk(s) and with no new + // DATA chunk(s), the endpoint MUST immediately send a SACK with no + // delay. If a packet arrives with duplicate DATA chunk(s) bundled with + // new DATA chunks, the endpoint MAY immediately send a SACK." + // No need to do this. SACKs are sent immediately on packet loss below. + } + } } // https://tools.ietf.org/html/rfc4960#section-6.7 @@ -75,7 +177,7 @@ void DataTracker::Observe(TSN tsn, // the received DATA chunk sequence, it SHOULD send a SACK with Gap Ack // Blocks immediately. The data receiver continues sending a SACK after // receipt of each SCTP packet that doesn't fill the gap." - if (!additional_tsns_.empty()) { + if (!additional_tsn_blocks_.empty()) { UpdateAckState(AckState::kImmediate, "packet loss"); } @@ -139,24 +241,20 @@ void DataTracker::HandleForwardTsn(TSN new_cumulative_ack) { // `last_cumulative_acked_tsn_`, and if there have been prior "gaps" that are // now overlapping with the new value, remove them. last_cumulative_acked_tsn_ = unwrapped_tsn; - int erased_additional_tsns = std::distance( - additional_tsns_.begin(), additional_tsns_.upper_bound(unwrapped_tsn)); - additional_tsns_.erase(additional_tsns_.begin(), - additional_tsns_.upper_bound(unwrapped_tsn)); + additional_tsn_blocks_.EraseTo(unwrapped_tsn); // See if the `last_cumulative_acked_tsn_` can be moved even further: - while (!additional_tsns_.empty() && - *additional_tsns_.begin() == last_cumulative_acked_tsn_.next_value()) { - last_cumulative_acked_tsn_.Increment(); - additional_tsns_.erase(additional_tsns_.begin()); - ++erased_additional_tsns; + if (!additional_tsn_blocks_.empty() && + additional_tsn_blocks_.front().first == + last_cumulative_acked_tsn_.next_value()) { + last_cumulative_acked_tsn_ = additional_tsn_blocks_.front().last; + additional_tsn_blocks_.PopFront(); } RTC_DLOG(LS_VERBOSE) << log_prefix_ << "FORWARD_TSN, cum_ack_tsn=" << *prev_last_cum_ack_tsn.Wrap() << "->" << *new_cumulative_ack << "->" - << *last_cumulative_acked_tsn_.Wrap() << ", removed " - << erased_additional_tsns << " additional TSNs"; + << *last_cumulative_acked_tsn_.Wrap(); // https://tools.ietf.org/html/rfc3758#section-3.6 // "Any time a FORWARD TSN chunk arrives, for the purposes of sending a @@ -178,51 +276,26 @@ SackChunk DataTracker::CreateSelectiveAck(size_t a_rwnd) { // that. So this SACK produced is more like a NR-SACK as explained in // https://ieeexplore.ieee.org/document/4697037 and which there is an RFC // draft at https://tools.ietf.org/html/draft-tuexen-tsvwg-sctp-multipath-17. - std::vector duplicate_tsns; - duplicate_tsns.reserve(duplicates_.size()); - for (UnwrappedTSN tsn : duplicates_) { - duplicate_tsns.push_back(tsn.Wrap()); - } - duplicates_.clear(); + std::set duplicate_tsns; + duplicate_tsns_.swap(duplicate_tsns); return SackChunk(last_cumulative_acked_tsn_.Wrap(), a_rwnd, - CreateGapAckBlocks(), duplicate_tsns); + CreateGapAckBlocks(), std::move(duplicate_tsns)); } std::vector DataTracker::CreateGapAckBlocks() const { - // This method will calculate the gaps between blocks of contiguous values in - // `additional_tsns_`, in the same format as the SACK chunk expects it; - // offsets from the "cumulative ack TSN value". + const auto& blocks = additional_tsn_blocks_.blocks(); std::vector gap_ack_blocks; - - absl::optional first_tsn_in_block = absl::nullopt; - absl::optional last_tsn_in_block = absl::nullopt; - - auto flush = [&]() { - if (first_tsn_in_block.has_value()) { - auto start_diff = UnwrappedTSN::Difference(*first_tsn_in_block, - last_cumulative_acked_tsn_); - auto end_diff = UnwrappedTSN::Difference(*last_tsn_in_block, - last_cumulative_acked_tsn_); - gap_ack_blocks.emplace_back(static_cast(start_diff), - static_cast(end_diff)); - first_tsn_in_block = absl::nullopt; - last_tsn_in_block = absl::nullopt; - } - }; - for (UnwrappedTSN tsn : additional_tsns_) { - if (last_tsn_in_block.has_value() && - last_tsn_in_block->next_value() == tsn) { - // Continuing the same block. - last_tsn_in_block = tsn; - } else { - // New block, or a gap from the old block's last value. - flush(); - first_tsn_in_block = tsn; - last_tsn_in_block = tsn; - } + gap_ack_blocks.reserve(std::min(blocks.size(), kMaxGapAckBlocksReported)); + for (size_t i = 0; i < blocks.size() && i < kMaxGapAckBlocksReported; ++i) { + auto start_diff = + UnwrappedTSN::Difference(blocks[i].first, last_cumulative_acked_tsn_); + auto end_diff = + UnwrappedTSN::Difference(blocks[i].last, last_cumulative_acked_tsn_); + gap_ack_blocks.emplace_back(static_cast(start_diff), + static_cast(end_diff)); } - flush(); + return gap_ack_blocks; } diff --git a/net/dcsctp/rx/data_tracker.h b/net/dcsctp/rx/data_tracker.h index 6146d2a839..167f5a04e7 100644 --- a/net/dcsctp/rx/data_tracker.h +++ b/net/dcsctp/rx/data_tracker.h @@ -16,6 +16,7 @@ #include #include #include +#include #include #include "absl/strings/string_view.h" @@ -38,12 +39,17 @@ namespace dcsctp { // 200ms, whatever is smallest). class DataTracker { public: + // The maximum number of duplicate TSNs that will be reported in a SACK. + static constexpr size_t kMaxDuplicateTsnReported = 20; + // The maximum number of gap-ack-blocks that will be reported in a SACK. + static constexpr size_t kMaxGapAckBlocksReported = 20; + // The maximum number of accepted in-flight DATA chunks. This indicates the // maximum difference from this buffer's last cumulative ack TSN, and any // received data. Data received beyond this limit will be dropped, which will // force the transmitter to send data that actually increases the last // cumulative acked TSN. - static constexpr uint32_t kMaxAcceptedOutstandingFragments = 256; + static constexpr uint32_t kMaxAcceptedOutstandingFragments = 100000; explicit DataTracker(absl::string_view log_prefix, Timer* delayed_ack_timer, @@ -111,6 +117,49 @@ class DataTracker { // Send a SACK immediately after handling this packet. kImmediate, }; + + // Represents ranges of TSNs that have been received that are not directly + // following the last cumulative acked TSN. This information is returned to + // the sender in the "gap ack blocks" in the SACK chunk. The blocks are always + // non-overlapping and non-adjacent. + class AdditionalTsnBlocks { + public: + // Represents an inclusive range of received TSNs, i.e. [first, last]. + struct TsnRange { + TsnRange(UnwrappedTSN first, UnwrappedTSN last) + : first(first), last(last) {} + UnwrappedTSN first; + UnwrappedTSN last; + }; + + // Adds a TSN to the set. This will try to expand any existing block and + // might merge blocks to ensure that all blocks are non-adjacent. If a + // current block can't be expanded, a new block is created. + // + // The return value indicates if `tsn` was added. If false is returned, the + // `tsn` was already represented in one of the blocks. + bool Add(UnwrappedTSN tsn); + + // Erases all TSNs up to, and including `tsn`. This will remove all blocks + // that are completely below `tsn` and may truncate a block where `tsn` is + // within that block. In that case, the frontmost block's start TSN will be + // the next following tsn after `tsn`. + void EraseTo(UnwrappedTSN tsn); + + // Removes the first block. Must not be called on an empty set. + void PopFront(); + + const std::vector& blocks() const { return blocks_; } + + bool empty() const { return blocks_.empty(); } + + const TsnRange& front() const { return blocks_.front(); } + + private: + // A sorted vector of non-overlapping and non-adjacent blocks. + std::vector blocks_; + }; + std::vector CreateGapAckBlocks() const; void UpdateAckState(AckState new_state, absl::string_view reason); static absl::string_view ToString(AckState ack_state); @@ -125,8 +174,8 @@ class DataTracker { // All TSNs up until (and including) this value have been seen. UnwrappedTSN last_cumulative_acked_tsn_; // Received TSNs that are not directly following `last_cumulative_acked_tsn_`. - std::set additional_tsns_; - std::set duplicates_; + AdditionalTsnBlocks additional_tsn_blocks_; + std::set duplicate_tsns_; }; } // namespace dcsctp diff --git a/net/dcsctp/rx/data_tracker_test.cc b/net/dcsctp/rx/data_tracker_test.cc index d714b0ba9e..5c2e56fb2b 100644 --- a/net/dcsctp/rx/data_tracker_test.cc +++ b/net/dcsctp/rx/data_tracker_test.cc @@ -25,6 +25,8 @@ namespace dcsctp { namespace { using ::testing::ElementsAre; using ::testing::IsEmpty; +using ::testing::SizeIs; +using ::testing::UnorderedElementsAre; constexpr size_t kArwnd = 10000; constexpr TSN kInitialTSN(11); @@ -224,11 +226,411 @@ TEST_F(DataTrackerTest, WillNotAcceptInvalidTSNs) { size_t limit = DataTracker::kMaxAcceptedOutstandingFragments; EXPECT_FALSE(buf_.IsTSNValid(TSN(*last_tsn + limit + 1))); EXPECT_FALSE(buf_.IsTSNValid(TSN(*last_tsn - (limit + 1)))); - EXPECT_FALSE(buf_.IsTSNValid(TSN(*last_tsn + 65536))); - EXPECT_FALSE(buf_.IsTSNValid(TSN(*last_tsn - 65536))); EXPECT_FALSE(buf_.IsTSNValid(TSN(*last_tsn + 0x8000000))); EXPECT_FALSE(buf_.IsTSNValid(TSN(*last_tsn - 0x8000000))); } +TEST_F(DataTrackerTest, ReportSingleDuplicateTsns) { + Observer({11, 12, 11}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(12)); + EXPECT_THAT(sack.gap_ack_blocks(), IsEmpty()); + EXPECT_THAT(sack.duplicate_tsns(), UnorderedElementsAre(TSN(11))); +} + +TEST_F(DataTrackerTest, ReportMultipleDuplicateTsns) { + Observer({11, 12, 13, 14, 12, 13, 12, 13, 15, 16}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(16)); + EXPECT_THAT(sack.gap_ack_blocks(), IsEmpty()); + EXPECT_THAT(sack.duplicate_tsns(), UnorderedElementsAre(TSN(12), TSN(13))); +} + +TEST_F(DataTrackerTest, ReportDuplicateTsnsInGapAckBlocks) { + Observer({11, /*12,*/ 13, 14, 13, 14, 15, 16}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(11)); + EXPECT_THAT(sack.gap_ack_blocks(), ElementsAre(SackChunk::GapAckBlock(2, 5))); + EXPECT_THAT(sack.duplicate_tsns(), UnorderedElementsAre(TSN(13), TSN(14))); +} + +TEST_F(DataTrackerTest, ClearsDuplicateTsnsAfterCreatingSack) { + Observer({11, 12, 13, 14, 12, 13, 12, 13, 15, 16}); + SackChunk sack1 = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack1.cumulative_tsn_ack(), TSN(16)); + EXPECT_THAT(sack1.gap_ack_blocks(), IsEmpty()); + EXPECT_THAT(sack1.duplicate_tsns(), UnorderedElementsAre(TSN(12), TSN(13))); + + Observer({17}); + SackChunk sack2 = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack2.cumulative_tsn_ack(), TSN(17)); + EXPECT_THAT(sack2.gap_ack_blocks(), IsEmpty()); + EXPECT_THAT(sack2.duplicate_tsns(), IsEmpty()); +} + +TEST_F(DataTrackerTest, LimitsNumberOfDuplicatesReported) { + for (size_t i = 0; i < DataTracker::kMaxDuplicateTsnReported + 10; ++i) { + TSN tsn(11 + i); + buf_.Observe(tsn, AnyDataChunk::ImmediateAckFlag(false)); + buf_.Observe(tsn, AnyDataChunk::ImmediateAckFlag(false)); + } + + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_THAT(sack.gap_ack_blocks(), IsEmpty()); + EXPECT_THAT(sack.duplicate_tsns(), + SizeIs(DataTracker::kMaxDuplicateTsnReported)); +} + +TEST_F(DataTrackerTest, LimitsNumberOfGapAckBlocksReported) { + for (size_t i = 0; i < DataTracker::kMaxGapAckBlocksReported + 10; ++i) { + TSN tsn(11 + i * 2); + buf_.Observe(tsn, AnyDataChunk::ImmediateAckFlag(false)); + } + + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(11)); + EXPECT_THAT(sack.gap_ack_blocks(), + SizeIs(DataTracker::kMaxGapAckBlocksReported)); +} + +TEST_F(DataTrackerTest, SendsSackForFirstPacketObserved) { + Observer({11}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); +} + +TEST_F(DataTrackerTest, SendsSackEverySecondPacketWhenThereIsNoPacketLoss) { + Observer({11}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + Observer({12}); + buf_.ObservePacketEnd(); + EXPECT_FALSE(buf_.ShouldSendAck()); + EXPECT_TRUE(timer_->is_running()); + Observer({13}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + Observer({14}); + buf_.ObservePacketEnd(); + EXPECT_FALSE(buf_.ShouldSendAck()); + EXPECT_TRUE(timer_->is_running()); + Observer({15}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); +} + +TEST_F(DataTrackerTest, SendsSackEveryPacketOnPacketLoss) { + Observer({11}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + Observer({13}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + Observer({14}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + Observer({15}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + Observer({16}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + // Fill the hole. + Observer({12}); + buf_.ObservePacketEnd(); + EXPECT_FALSE(buf_.ShouldSendAck()); + EXPECT_TRUE(timer_->is_running()); + // Goes back to every second packet + Observer({17}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + Observer({18}); + buf_.ObservePacketEnd(); + EXPECT_FALSE(buf_.ShouldSendAck()); + EXPECT_TRUE(timer_->is_running()); +} + +TEST_F(DataTrackerTest, SendsSackOnDuplicateDataChunks) { + Observer({11}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + Observer({11}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + Observer({12}); + buf_.ObservePacketEnd(); + EXPECT_FALSE(buf_.ShouldSendAck()); + EXPECT_TRUE(timer_->is_running()); + // Goes back to every second packet + Observer({13}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + // Duplicate again + Observer({12}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); +} + +TEST_F(DataTrackerTest, GapAckBlockAddSingleBlock) { + Observer({12}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(sack.gap_ack_blocks(), ElementsAre(SackChunk::GapAckBlock(2, 2))); +} + +TEST_F(DataTrackerTest, GapAckBlockAddsAnother) { + Observer({12}); + Observer({14}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(sack.gap_ack_blocks(), ElementsAre(SackChunk::GapAckBlock(2, 2), + SackChunk::GapAckBlock(4, 4))); +} + +TEST_F(DataTrackerTest, GapAckBlockAddsDuplicate) { + Observer({12}); + Observer({12}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(sack.gap_ack_blocks(), ElementsAre(SackChunk::GapAckBlock(2, 2))); + EXPECT_THAT(sack.duplicate_tsns(), ElementsAre(TSN(12))); +} + +TEST_F(DataTrackerTest, GapAckBlockExpandsToRight) { + Observer({12}); + Observer({13}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(sack.gap_ack_blocks(), ElementsAre(SackChunk::GapAckBlock(2, 3))); +} + +TEST_F(DataTrackerTest, GapAckBlockExpandsToRightWithOther) { + Observer({12}); + Observer({20}); + Observer({30}); + Observer({21}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(sack.gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(2, 2), // + SackChunk::GapAckBlock(10, 11), // + SackChunk::GapAckBlock(20, 20))); +} + +TEST_F(DataTrackerTest, GapAckBlockExpandsToLeft) { + Observer({13}); + Observer({12}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(sack.gap_ack_blocks(), ElementsAre(SackChunk::GapAckBlock(2, 3))); +} + +TEST_F(DataTrackerTest, GapAckBlockExpandsToLeftWithOther) { + Observer({12}); + Observer({21}); + Observer({30}); + Observer({20}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(sack.gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(2, 2), // + SackChunk::GapAckBlock(10, 11), // + SackChunk::GapAckBlock(20, 20))); +} + +TEST_F(DataTrackerTest, GapAckBlockExpandsToLRightAndMerges) { + Observer({12}); + Observer({20}); + Observer({22}); + Observer({30}); + Observer({21}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(sack.gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(2, 2), // + SackChunk::GapAckBlock(10, 12), // + SackChunk::GapAckBlock(20, 20))); +} + +TEST_F(DataTrackerTest, GapAckBlockMergesManyBlocksIntoOne) { + Observer({22}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(12, 12))); + Observer({30}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(12, 12), // + SackChunk::GapAckBlock(20, 20))); + Observer({24}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(12, 12), // + SackChunk::GapAckBlock(14, 14), // + SackChunk::GapAckBlock(20, 20))); + Observer({28}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(12, 12), // + SackChunk::GapAckBlock(14, 14), // + SackChunk::GapAckBlock(18, 18), // + SackChunk::GapAckBlock(20, 20))); + Observer({26}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(12, 12), // + SackChunk::GapAckBlock(14, 14), // + SackChunk::GapAckBlock(16, 16), // + SackChunk::GapAckBlock(18, 18), // + SackChunk::GapAckBlock(20, 20))); + Observer({29}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(12, 12), // + SackChunk::GapAckBlock(14, 14), // + SackChunk::GapAckBlock(16, 16), // + SackChunk::GapAckBlock(18, 20))); + Observer({23}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(12, 14), // + SackChunk::GapAckBlock(16, 16), // + SackChunk::GapAckBlock(18, 20))); + Observer({27}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(12, 14), // + SackChunk::GapAckBlock(16, 20))); + + Observer({25}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(12, 20))); + Observer({20}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(10, 10), // + SackChunk::GapAckBlock(12, 20))); + Observer({32}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(10, 10), // + SackChunk::GapAckBlock(12, 20), // + SackChunk::GapAckBlock(22, 22))); + Observer({21}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(10, 20), // + SackChunk::GapAckBlock(22, 22))); + Observer({31}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(10, 22))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveBeforeCumAckTsn) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + + buf_.HandleForwardTsn(TSN(8)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(2, 4), // + SackChunk::GapAckBlock(10, 12), + SackChunk::GapAckBlock(20, 21))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveBeforeFirstBlock) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + + buf_.HandleForwardTsn(TSN(11)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(14)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(6, 8), // + SackChunk::GapAckBlock(16, 17))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveAtBeginningOfFirstBlock) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + + buf_.HandleForwardTsn(TSN(12)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(14)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(6, 8), // + SackChunk::GapAckBlock(16, 17))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveAtMiddleOfFirstBlock) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + buf_.HandleForwardTsn(TSN(13)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(14)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(6, 8), // + SackChunk::GapAckBlock(16, 17))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveAtEndOfFirstBlock) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + buf_.HandleForwardTsn(TSN(14)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(14)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(6, 8), // + SackChunk::GapAckBlock(16, 17))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveRightAfterFirstBlock) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + + buf_.HandleForwardTsn(TSN(18)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(18)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(2, 4), // + SackChunk::GapAckBlock(12, 13))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveRightBeforeSecondBlock) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + + buf_.HandleForwardTsn(TSN(19)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(22)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(8, 9))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveRightAtStartOfSecondBlock) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + + buf_.HandleForwardTsn(TSN(20)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(22)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(8, 9))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveRightAtMiddleOfSecondBlock) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + + buf_.HandleForwardTsn(TSN(21)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(22)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(8, 9))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveRightAtEndOfSecondBlock) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + + buf_.HandleForwardTsn(TSN(22)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(22)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(8, 9))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveeFarAfterAllBlocks) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + + buf_.HandleForwardTsn(TSN(40)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(40)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), IsEmpty()); +} + } // namespace } // namespace dcsctp diff --git a/net/dcsctp/rx/reassembly_queue.h b/net/dcsctp/rx/reassembly_queue.h index b752e53acc..25cda70c58 100644 --- a/net/dcsctp/rx/reassembly_queue.h +++ b/net/dcsctp/rx/reassembly_queue.h @@ -104,8 +104,8 @@ class ReassemblyQueue { // data. size_t queued_bytes() const { return queued_bytes_; } - // The remaining bytes until the queue is full. - size_t remaining_bytes() const { return max_size_bytes_ - queued_bytes_; } + // The remaining bytes until the queue has reached the watermark limit. + size_t remaining_bytes() const { return watermark_bytes_ - queued_bytes_; } // Indicates if the queue is full. Data should not be added to the queue when // it's full. diff --git a/net/dcsctp/socket/BUILD.gn b/net/dcsctp/socket/BUILD.gn index 2fb05abdc9..72ac139acb 100644 --- a/net/dcsctp/socket/BUILD.gn +++ b/net/dcsctp/socket/BUILD.gn @@ -133,10 +133,10 @@ rtc_library("dcsctp_socket") { "../rx:data_tracker", "../rx:reassembly_queue", "../timer", - "../tx:fcfs_send_queue", "../tx:retransmission_error_counter", "../tx:retransmission_queue", "../tx:retransmission_timeout", + "../tx:rr_send_queue", "../tx:send_queue", ] sources = [ @@ -211,6 +211,7 @@ if (rtc_include_tests) { "../packet:tlv_trait", "../public:socket", "../public:types", + "../public:utils", "../rx:data_tracker", "../rx:reassembly_queue", "../testing:data_generator", @@ -220,6 +221,7 @@ if (rtc_include_tests) { "../tx:retransmission_queue", ] absl_deps = [ + "//third_party/abseil-cpp/absl/flags:flag", "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", diff --git a/net/dcsctp/socket/callback_deferrer.h b/net/dcsctp/socket/callback_deferrer.h index 79f3f36d15..197cf434af 100644 --- a/net/dcsctp/socket/callback_deferrer.h +++ b/net/dcsctp/socket/callback_deferrer.h @@ -79,11 +79,6 @@ class CallbackDeferrer : public DcSctpSocketCallbacks { return underlying_.GetRandomInt(low, high); } - void NotifyOutgoingMessageBufferEmpty() override { - // Will not be deferred - call directly. - underlying_.NotifyOutgoingMessageBufferEmpty(); - } - void OnMessageReceived(DcSctpMessage message) override { deferred_.emplace_back( [deliverer = MessageDeliverer(std::move(message))]( @@ -145,6 +140,17 @@ class CallbackDeferrer : public DcSctpSocketCallbacks { DcSctpSocketCallbacks& cb) { cb.OnIncomingStreamsReset(streams); }); } + void OnBufferedAmountLow(StreamID stream_id) override { + deferred_.emplace_back([stream_id](DcSctpSocketCallbacks& cb) { + cb.OnBufferedAmountLow(stream_id); + }); + } + + void OnTotalBufferedAmountLow() override { + deferred_.emplace_back( + [](DcSctpSocketCallbacks& cb) { cb.OnTotalBufferedAmountLow(); }); + } + private: // A wrapper around the move-only DcSctpMessage, to let it be captured in a // lambda. diff --git a/net/dcsctp/socket/dcsctp_socket.cc b/net/dcsctp/socket/dcsctp_socket.cc index 174288eeb3..71bc98c70d 100644 --- a/net/dcsctp/socket/dcsctp_socket.cc +++ b/net/dcsctp/socket/dcsctp_socket.cc @@ -167,7 +167,14 @@ DcSctpSocket::DcSctpSocket(absl::string_view log_prefix, TimerOptions(options.t2_shutdown_timeout, TimerBackoffAlgorithm::kExponential, options.max_retransmissions))), - send_queue_(log_prefix_, options_.max_send_buffer_size) {} + send_queue_( + log_prefix_, + options_.max_send_buffer_size, + [this](StreamID stream_id) { + callbacks_.OnBufferedAmountLow(stream_id); + }, + options_.total_buffered_amount_low_threshold, + [this]() { callbacks_.OnTotalBufferedAmountLow(); }) {} std::string DcSctpSocket::log_prefix() const { return log_prefix_ + "[" + std::string(ToString(state_)) + "] "; @@ -184,7 +191,7 @@ bool DcSctpSocket::IsConsistent() const { case State::kCookieEchoed: return (tcb_ != nullptr && !t1_init_->is_running() && t1_cookie_->is_running() && !t2_shutdown_->is_running() && - cookie_echo_chunk_.has_value()); + tcb_->has_cookie_echo_chunk()); case State::kEstablished: return (tcb_ != nullptr && !t1_init_->is_running() && !t1_cookie_->is_running() && !t2_shutdown_->is_running()); @@ -332,7 +339,6 @@ void DcSctpSocket::InternalClose(ErrorKind error, absl::string_view message) { t1_cookie_->Stop(); t2_shutdown_->Stop(); tcb_ = nullptr; - cookie_echo_chunk_ = absl::nullopt; if (error == ErrorKind::kNoError) { callbacks_.OnClosed(); @@ -437,6 +443,19 @@ void DcSctpSocket::SetMaxMessageSize(size_t max_message_size) { options_.max_message_size = max_message_size; } +size_t DcSctpSocket::buffered_amount(StreamID stream_id) const { + return send_queue_.buffered_amount(stream_id); +} + +size_t DcSctpSocket::buffered_amount_low_threshold(StreamID stream_id) const { + return send_queue_.buffered_amount_low_threshold(stream_id); +} + +void DcSctpSocket::SetBufferedAmountLowThreshold(StreamID stream_id, + size_t bytes) { + send_queue_.SetBufferedAmountLowThreshold(stream_id, bytes); +} + void DcSctpSocket::MaybeSendShutdownOnPacketReceived(const SctpPacket& packet) { if (state_ == State::kShutdownSent) { bool has_data_chunk = @@ -756,7 +775,7 @@ absl::optional DcSctpSocket::OnCookieTimerExpiry() { RTC_DCHECK(state_ == State::kCookieEchoed); if (t1_cookie_->is_running()) { - SendCookieEcho(); + tcb_->SendBufferedPackets(callbacks_.TimeMillis()); } else { InternalClose(ErrorKind::kTooManyRetries, "No COOKIE_ACK received"); } @@ -1028,19 +1047,6 @@ void DcSctpSocket::HandleInit(const CommonHeader& header, SendPacket(b); } -void DcSctpSocket::SendCookieEcho() { - RTC_DCHECK(tcb_ != nullptr); - TimeMs now = callbacks_.TimeMillis(); - SctpPacket::Builder b = tcb_->PacketBuilder(); - b.Add(*cookie_echo_chunk_); - - // https://tools.ietf.org/html/rfc4960#section-5.1 - // "The COOKIE ECHO chunk can be bundled with any pending outbound DATA - // chunks, but it MUST be the first chunk in the packet and until the COOKIE - // ACK is returned the sender MUST NOT send any other packets to the peer." - tcb_->SendBufferedPackets(b, now, /*only_one_packet=*/true); -} - void DcSctpSocket::HandleInitAck( const CommonHeader& header, const SctpPacket::ChunkDescriptor& descriptor) { @@ -1086,8 +1092,8 @@ void DcSctpSocket::HandleInitAck( SetState(State::kCookieEchoed, "INIT_ACK received"); // The connection isn't fully established just yet. - cookie_echo_chunk_ = CookieEchoChunk(cookie->data()); - SendCookieEcho(); + tcb_->SetCookieEchoChunk(CookieEchoChunk(cookie->data())); + tcb_->SendBufferedPackets(callbacks_.TimeMillis()); t1_cookie_->Start(); } @@ -1127,7 +1133,9 @@ void DcSctpSocket::HandleCookieEcho( t1_init_->Stop(); t1_cookie_->Stop(); if (state_ != State::kEstablished) { - cookie_echo_chunk_ = absl::nullopt; + if (tcb_ != nullptr) { + tcb_->ClearCookieEchoChunk(); + } SetState(State::kEstablished, "COOKIE_ECHO received"); callbacks_.OnConnected(); } @@ -1250,7 +1258,7 @@ void DcSctpSocket::HandleCookieAck( // RFC 4960, Errata ID: 4400 t1_cookie_->Stop(); - cookie_echo_chunk_ = absl::nullopt; + tcb_->ClearCookieEchoChunk(); SetState(State::kEstablished, "COOKIE_ACK received"); tcb_->SendBufferedPackets(callbacks_.TimeMillis()); callbacks_.OnConnected(); diff --git a/net/dcsctp/socket/dcsctp_socket.h b/net/dcsctp/socket/dcsctp_socket.h index 24c0437b41..32e89b50d1 100644 --- a/net/dcsctp/socket/dcsctp_socket.h +++ b/net/dcsctp/socket/dcsctp_socket.h @@ -49,10 +49,10 @@ #include "net/dcsctp/socket/state_cookie.h" #include "net/dcsctp/socket/transmission_control_block.h" #include "net/dcsctp/timer/timer.h" -#include "net/dcsctp/tx/fcfs_send_queue.h" #include "net/dcsctp/tx/retransmission_error_counter.h" #include "net/dcsctp/tx/retransmission_queue.h" #include "net/dcsctp/tx/retransmission_timeout.h" +#include "net/dcsctp/tx/rr_send_queue.h" namespace dcsctp { @@ -93,7 +93,9 @@ class DcSctpSocket : public DcSctpSocketInterface { SocketState state() const override; const DcSctpOptions& options() const override { return options_; } void SetMaxMessageSize(size_t max_message_size) override; - + size_t buffered_amount(StreamID stream_id) const override; + size_t buffered_amount_low_threshold(StreamID stream_id) const override; + void SetBufferedAmountLowThreshold(StreamID stream_id, size_t bytes) override; // Returns this socket's verification tag, or zero if not yet connected. VerificationTag verification_tag() const { return tcb_ != nullptr ? tcb_->my_verification_tag() : VerificationTag(0); @@ -146,8 +148,6 @@ class DcSctpSocket : public DcSctpSocketInterface { void MaybeSendShutdownOnPacketReceived(const SctpPacket& packet); // Sends a INIT chunk. void SendInit(); - // Sends a CookieEcho chunk. - void SendCookieEcho(); // Sends a SHUTDOWN chunk. void SendShutdown(); // Sends a SHUTDOWN-ACK chunk. @@ -257,11 +257,7 @@ class DcSctpSocket : public DcSctpSocketInterface { // The actual SendQueue implementation. As data can be sent on a socket before // the connection is established, this component is not in the TCB. - FCFSSendQueue send_queue_; - - // Only valid when state == State::kCookieEchoed - // A cached Cookie Echo Chunk, to be re-sent on timer expiry. - absl::optional cookie_echo_chunk_ = absl::nullopt; + RRSendQueue send_queue_; // Contains verification tag and initial TSN between having sent the INIT // until the connection is established (there is no TCB at this point). diff --git a/net/dcsctp/socket/dcsctp_socket_test.cc b/net/dcsctp/socket/dcsctp_socket_test.cc index a3ddc7f85e..7ca3d9b399 100644 --- a/net/dcsctp/socket/dcsctp_socket_test.cc +++ b/net/dcsctp/socket/dcsctp_socket_test.cc @@ -16,6 +16,7 @@ #include #include +#include "absl/flags/flag.h" #include "absl/memory/memory.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" @@ -40,6 +41,7 @@ #include "net/dcsctp/public/dcsctp_message.h" #include "net/dcsctp/public/dcsctp_options.h" #include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/public/text_pcap_packet_observer.h" #include "net/dcsctp/public/types.h" #include "net/dcsctp/rx/reassembly_queue.h" #include "net/dcsctp/socket/mock_dcsctp_socket_callbacks.h" @@ -47,6 +49,8 @@ #include "rtc_base/gunit.h" #include "test/gmock.h" +ABSL_FLAG(bool, dcsctp_capture_packets, false, "Print packet capture."); + namespace dcsctp { namespace { using ::testing::_; @@ -58,6 +62,61 @@ using ::testing::SizeIs; constexpr SendOptions kSendOptions; constexpr size_t kLargeMessageSize = DcSctpOptions::kMaxSafeMTUSize * 20; +static constexpr size_t kSmallMessageSize = 10; + +MATCHER_P(HasDataChunkWithStreamId, stream_id, "") { + absl::optional packet = SctpPacket::Parse(arg); + if (!packet.has_value()) { + *result_listener << "data didn't parse as an SctpPacket"; + return false; + } + + if (packet->descriptors()[0].type != DataChunk::kType) { + *result_listener << "the first chunk in the packet is not a data chunk"; + return false; + } + + absl::optional dc = + DataChunk::Parse(packet->descriptors()[0].data); + if (!dc.has_value()) { + *result_listener << "The first chunk didn't parse as a data chunk"; + return false; + } + + if (dc->stream_id() != stream_id) { + *result_listener << "the stream_id is " << *dc->stream_id(); + return false; + } + + return true; +} + +MATCHER_P(HasDataChunkWithPPID, ppid, "") { + absl::optional packet = SctpPacket::Parse(arg); + if (!packet.has_value()) { + *result_listener << "data didn't parse as an SctpPacket"; + return false; + } + + if (packet->descriptors()[0].type != DataChunk::kType) { + *result_listener << "the first chunk in the packet is not a data chunk"; + return false; + } + + absl::optional dc = + DataChunk::Parse(packet->descriptors()[0].data); + if (!dc.has_value()) { + *result_listener << "The first chunk didn't parse as a data chunk"; + return false; + } + + if (dc->ppid() != ppid) { + *result_listener << "the ppid is " << *dc->ppid(); + return false; + } + + return true; +} MATCHER_P(HasDataChunkWithSsn, ssn, "") { absl::optional packet = SctpPacket::Parse(arg); @@ -179,14 +238,21 @@ DcSctpOptions MakeOptionsForTest(bool enable_message_interleaving) { return options; } +std::unique_ptr GetPacketObserver(absl::string_view name) { + if (absl::GetFlag(FLAGS_dcsctp_capture_packets)) { + return std::make_unique(name); + } + return nullptr; +} + class DcSctpSocketTest : public testing::Test { protected: explicit DcSctpSocketTest(bool enable_message_interleaving = false) : options_(MakeOptionsForTest(enable_message_interleaving)), cb_a_("A"), cb_z_("Z"), - sock_a_("A", cb_a_, nullptr, options_), - sock_z_("Z", cb_z_, nullptr, options_) {} + sock_a_("A", cb_a_, GetPacketObserver("A"), options_), + sock_z_("Z", cb_z_, GetPacketObserver("Z"), options_) {} void AdvanceTime(DurationMs duration) { cb_a_.AdvanceTime(duration); @@ -493,6 +559,65 @@ TEST_F(DcSctpSocketTest, ResendingCookieEchoTooManyTimesAborts) { EXPECT_EQ(sock_a_.state(), SocketState::kClosed); } +TEST_F(DcSctpSocketTest, DoesntSendMorePacketsUntilCookieAckHasBeenReceived) { + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), + std::vector(kLargeMessageSize)), + kSendOptions); + sock_a_.Connect(); + + // Z reads INIT, produces INIT_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // A reads INIT_ACK, produces COOKIE_ECHO + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + // COOKIE_ECHO is never received by Z. + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket cookie_echo_packet1, + SctpPacket::Parse(cb_a_.ConsumeSentPacket())); + EXPECT_THAT(cookie_echo_packet1.descriptors(), SizeIs(2)); + EXPECT_EQ(cookie_echo_packet1.descriptors()[0].type, CookieEchoChunk::kType); + EXPECT_EQ(cookie_echo_packet1.descriptors()[1].type, DataChunk::kType); + + EXPECT_THAT(cb_a_.ConsumeSentPacket(), IsEmpty()); + + // There are DATA chunks in the sent packet (that was lost), which means that + // the T3-RTX timer is running, but as the socket is in kCookieEcho state, it + // will be T1-COOKIE that drives retransmissions, so when the T3-RTX expires, + // nothing should be retransmitted. + ASSERT_TRUE(options_.rto_initial < options_.t1_cookie_timeout); + AdvanceTime(options_.rto_initial); + RunTimers(); + EXPECT_THAT(cb_a_.ConsumeSentPacket(), IsEmpty()); + + // When T1-COOKIE expires, both the COOKIE-ECHO and DATA should be present. + AdvanceTime(options_.t1_cookie_timeout - options_.rto_initial); + RunTimers(); + + // And this COOKIE-ECHO and DATA is also lost - never received by Z. + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket cookie_echo_packet2, + SctpPacket::Parse(cb_a_.ConsumeSentPacket())); + EXPECT_THAT(cookie_echo_packet2.descriptors(), SizeIs(2)); + EXPECT_EQ(cookie_echo_packet2.descriptors()[0].type, CookieEchoChunk::kType); + EXPECT_EQ(cookie_echo_packet2.descriptors()[1].type, DataChunk::kType); + + EXPECT_THAT(cb_a_.ConsumeSentPacket(), IsEmpty()); + + // COOKIE_ECHO has exponential backoff. + AdvanceTime(options_.t1_cookie_timeout * 2); + RunTimers(); + + // Z reads COOKIE_ECHO, produces COOKIE_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // A reads COOKIE_ACK. + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + EXPECT_EQ(sock_a_.state(), SocketState::kConnected); + EXPECT_EQ(sock_z_.state(), SocketState::kConnected); + + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + EXPECT_THAT(cb_z_.ConsumeReceivedMessage()->payload(), + SizeIs(kLargeMessageSize)); +} + TEST_F(DcSctpSocketTest, ShutdownConnection) { ConnectSockets(); @@ -828,6 +953,84 @@ TEST_F(DcSctpSocketTest, ResetStreamWillMakeChunksStartAtZeroSsn) { sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); } +TEST_F(DcSctpSocketTest, ResetStreamWillOnlyResetTheRequestedStreams) { + ConnectSockets(); + + std::vector payload(options_.mtu - 100); + + // Send two ordered messages on SID 1 + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), payload), {}); + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), payload), {}); + + auto packet1 = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet1, HasDataChunkWithStreamId(StreamID(1))); + EXPECT_THAT(packet1, HasDataChunkWithSsn(SSN(0))); + sock_z_.ReceivePacket(packet1); + + auto packet2 = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet1, HasDataChunkWithStreamId(StreamID(1))); + EXPECT_THAT(packet2, HasDataChunkWithSsn(SSN(1))); + sock_z_.ReceivePacket(packet2); + + // Handle SACK + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + // Do the same, for SID 3 + sock_a_.Send(DcSctpMessage(StreamID(3), PPID(53), payload), {}); + sock_a_.Send(DcSctpMessage(StreamID(3), PPID(53), payload), {}); + auto packet3 = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet3, HasDataChunkWithStreamId(StreamID(3))); + EXPECT_THAT(packet3, HasDataChunkWithSsn(SSN(0))); + sock_z_.ReceivePacket(packet3); + auto packet4 = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet4, HasDataChunkWithStreamId(StreamID(3))); + EXPECT_THAT(packet4, HasDataChunkWithSsn(SSN(1))); + sock_z_.ReceivePacket(packet4); + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + // Receive all messages. + absl::optional msg1 = cb_z_.ConsumeReceivedMessage(); + ASSERT_TRUE(msg1.has_value()); + EXPECT_EQ(msg1->stream_id(), StreamID(1)); + + absl::optional msg2 = cb_z_.ConsumeReceivedMessage(); + ASSERT_TRUE(msg2.has_value()); + EXPECT_EQ(msg2->stream_id(), StreamID(1)); + + absl::optional msg3 = cb_z_.ConsumeReceivedMessage(); + ASSERT_TRUE(msg3.has_value()); + EXPECT_EQ(msg3->stream_id(), StreamID(3)); + + absl::optional msg4 = cb_z_.ConsumeReceivedMessage(); + ASSERT_TRUE(msg4.has_value()); + EXPECT_EQ(msg4->stream_id(), StreamID(3)); + + // Reset SID 1. This will directly send a RE-CONFIG. + sock_a_.ResetStreams(std::vector({StreamID(3)})); + // RE-CONFIG, req + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // RE-CONFIG, resp + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + // Send a message on SID 1 and 3 - SID 1 should not be reset, but 3 should. + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), payload), {}); + + sock_a_.Send(DcSctpMessage(StreamID(3), PPID(53), payload), {}); + + auto packet5 = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet5, HasDataChunkWithStreamId(StreamID(1))); + EXPECT_THAT(packet5, HasDataChunkWithSsn(SSN(2))); // Unchanged. + sock_z_.ReceivePacket(packet5); + + auto packet6 = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet6, HasDataChunkWithStreamId(StreamID(3))); + EXPECT_THAT(packet6, HasDataChunkWithSsn(SSN(0))); // Reset. + sock_z_.ReceivePacket(packet6); + + // Handle SACK + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); +} + TEST_F(DcSctpSocketTest, OnePeerReconnects) { ConnectSockets(); @@ -873,7 +1076,14 @@ TEST_F(DcSctpSocketTest, SendMessageWithLimitedRtx) { // Third DATA sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); - // Handle SACK + // Handle SACK for first DATA + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + // Handle delayed SACK for third DATA + AdvanceTime(options_.delayed_ack_max_timeout); + RunTimers(); + + // Handle SACK for second DATA sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); // Now the missing data chunk will be marked as nacked, but it might still be @@ -889,11 +1099,7 @@ TEST_F(DcSctpSocketTest, SendMessageWithLimitedRtx) { // FORWARD-TSN (third) sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); - // The receiver might have moved into delayed ack mode. - AdvanceTime(options_.rto_initial); - RunTimers(); - - // Handle SACK + // Which will trigger a SACK sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); absl::optional msg1 = cb_z_.ConsumeReceivedMessage(); @@ -908,6 +1114,78 @@ TEST_F(DcSctpSocketTest, SendMessageWithLimitedRtx) { EXPECT_FALSE(msg3.has_value()); } +TEST_F(DcSctpSocketTest, SendManyFragmentedMessagesWithLimitedRtx) { + ConnectSockets(); + + SendOptions send_options; + send_options.unordered = IsUnordered(true); + send_options.max_retransmissions = 0; + std::vector payload(options_.mtu * 2 - 100 /* margin */); + // Sending first message + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(51), payload), send_options); + // Sending second message + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(52), payload), send_options); + // Sending third message + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), payload), send_options); + // Sending fourth message + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(54), payload), send_options); + + // First DATA, first fragment + std::vector packet = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet, HasDataChunkWithPPID(PPID(51))); + sock_z_.ReceivePacket(std::move(packet)); + + // First DATA, second fragment (lost) + packet = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet, HasDataChunkWithPPID(PPID(51))); + + // Second DATA, first fragment + packet = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet, HasDataChunkWithPPID(PPID(52))); + sock_z_.ReceivePacket(std::move(packet)); + + // Second DATA, second fragment (lost) + packet = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet, HasDataChunkWithPPID(PPID(52))); + EXPECT_THAT(packet, HasDataChunkWithSsn(SSN(0))); + + // Third DATA, first fragment + packet = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet, HasDataChunkWithPPID(PPID(53))); + EXPECT_THAT(packet, HasDataChunkWithSsn(SSN(0))); + sock_z_.ReceivePacket(std::move(packet)); + + // Third DATA, second fragment (lost) + packet = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet, HasDataChunkWithPPID(PPID(53))); + EXPECT_THAT(packet, HasDataChunkWithSsn(SSN(0))); + + // Fourth DATA, first fragment + packet = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet, HasDataChunkWithPPID(PPID(54))); + EXPECT_THAT(packet, HasDataChunkWithSsn(SSN(0))); + sock_z_.ReceivePacket(std::move(packet)); + + // Fourth DATA, second fragment + packet = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet, HasDataChunkWithPPID(PPID(54))); + EXPECT_THAT(packet, HasDataChunkWithSsn(SSN(0))); + sock_z_.ReceivePacket(std::move(packet)); + + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + + // Let the RTX timer expire, and exchange FORWARD-TSN/SACKs + AdvanceTime(options_.rto_initial); + RunTimers(); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + + absl::optional msg1 = cb_z_.ConsumeReceivedMessage(); + ASSERT_TRUE(msg1.has_value()); + EXPECT_EQ(msg1->ppid(), PPID(54)); + + ASSERT_FALSE(cb_z_.ConsumeReceivedMessage().has_value()); +} + struct FakeChunkConfig : ChunkConfig { static constexpr int kType = 0x49; static constexpr size_t kHeaderSize = 4; @@ -1040,11 +1318,12 @@ TEST_F(DcSctpSocketTest, PassingHighWatermarkWillOnlyAcceptCumAckTsn) { AllOf(HasSackWithCumAckTsn(AddTo(tsn, 1)), HasSackWithNoGapAckBlocks())); // This DATA should be accepted, and it fills the reassembly queue. - sock_z2.ReceivePacket(SctpPacket::Builder(sock_z2.verification_tag(), options) - .Add(DataChunk(AddTo(tsn, 2), StreamID(1), SSN(0), - PPID(53), std::vector(10), - /*options=*/{})) - .Build()); + sock_z2.ReceivePacket( + SctpPacket::Builder(sock_z2.verification_tag(), options) + .Add(DataChunk(AddTo(tsn, 2), StreamID(1), SSN(0), PPID(53), + std::vector(kSmallMessageSize), + /*options=*/{})) + .Build()); // The receiver might have moved into delayed ack mode. cb_z2.AdvanceTime(options.rto_initial); @@ -1058,11 +1337,12 @@ TEST_F(DcSctpSocketTest, PassingHighWatermarkWillOnlyAcceptCumAckTsn) { EXPECT_CALL(cb_z2, OnClosed).Times(0); // This DATA will make the connection close. It's too full now. - sock_z2.ReceivePacket(SctpPacket::Builder(sock_z2.verification_tag(), options) - .Add(DataChunk(AddTo(tsn, 3), StreamID(1), SSN(0), - PPID(53), std::vector(10), - /*options=*/{})) - .Build()); + sock_z2.ReceivePacket( + SctpPacket::Builder(sock_z2.verification_tag(), options) + .Add(DataChunk(AddTo(tsn, 3), StreamID(1), SSN(0), PPID(53), + std::vector(kSmallMessageSize), + /*options=*/{})) + .Build()); } TEST_F(DcSctpSocketTest, SetMaxMessageSize) { @@ -1162,5 +1442,171 @@ TEST_F(DcSctpSocketTest, DiscardsMessagesWithLowLifetimeIfMustBuffer) { EXPECT_FALSE(cb_z_.ConsumeReceivedMessage().has_value()); } +TEST_F(DcSctpSocketTest, HasReasonableBufferedAmountValues) { + ConnectSockets(); + + EXPECT_EQ(sock_a_.buffered_amount(StreamID(1)), 0u); + + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), + std::vector(kSmallMessageSize)), + kSendOptions); + // Sending a small message will directly send it as a single packet, so + // nothing is left in the queue. + EXPECT_EQ(sock_a_.buffered_amount(StreamID(1)), 0u); + + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), + std::vector(kLargeMessageSize)), + kSendOptions); + + // Sending a message will directly start sending a few packets, so the + // buffered amount is not the full message size. + EXPECT_GT(sock_a_.buffered_amount(StreamID(1)), 0u); + EXPECT_LT(sock_a_.buffered_amount(StreamID(1)), kLargeMessageSize); +} + +TEST_F(DcSctpSocketTest, HasDefaultOnBufferedAmountLowValueZero) { + EXPECT_EQ(sock_a_.buffered_amount_low_threshold(StreamID(1)), 0u); +} + +TEST_F(DcSctpSocketTest, TriggersOnBufferedAmountLowWithDefaultValueZero) { + EXPECT_CALL(cb_a_, OnBufferedAmountLow).Times(0); + ConnectSockets(); + + EXPECT_CALL(cb_a_, OnBufferedAmountLow(StreamID(1))); + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), + std::vector(kSmallMessageSize)), + kSendOptions); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); +} + +TEST_F(DcSctpSocketTest, DoesntTriggerOnBufferedAmountLowIfBelowThreshold) { + static constexpr size_t kMessageSize = 1000; + static constexpr size_t kBufferedAmountLowThreshold = kMessageSize * 10; + + sock_a_.SetBufferedAmountLowThreshold(StreamID(1), + kBufferedAmountLowThreshold); + EXPECT_CALL(cb_a_, OnBufferedAmountLow).Times(0); + ConnectSockets(); + + EXPECT_CALL(cb_a_, OnBufferedAmountLow(StreamID(1))).Times(0); + sock_a_.Send( + DcSctpMessage(StreamID(1), PPID(53), std::vector(kMessageSize)), + kSendOptions); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + + sock_a_.Send( + DcSctpMessage(StreamID(1), PPID(53), std::vector(kMessageSize)), + kSendOptions); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); +} + +TEST_F(DcSctpSocketTest, TriggersOnBufferedAmountMultipleTimes) { + static constexpr size_t kMessageSize = 1000; + static constexpr size_t kBufferedAmountLowThreshold = kMessageSize / 2; + + sock_a_.SetBufferedAmountLowThreshold(StreamID(1), + kBufferedAmountLowThreshold); + EXPECT_CALL(cb_a_, OnBufferedAmountLow).Times(0); + ConnectSockets(); + + EXPECT_CALL(cb_a_, OnBufferedAmountLow(StreamID(1))).Times(3); + EXPECT_CALL(cb_a_, OnBufferedAmountLow(StreamID(2))).Times(2); + sock_a_.Send( + DcSctpMessage(StreamID(1), PPID(53), std::vector(kMessageSize)), + kSendOptions); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + + sock_a_.Send( + DcSctpMessage(StreamID(2), PPID(53), std::vector(kMessageSize)), + kSendOptions); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + + sock_a_.Send( + DcSctpMessage(StreamID(1), PPID(53), std::vector(kMessageSize)), + kSendOptions); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + + sock_a_.Send( + DcSctpMessage(StreamID(2), PPID(53), std::vector(kMessageSize)), + kSendOptions); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + + sock_a_.Send( + DcSctpMessage(StreamID(1), PPID(53), std::vector(kMessageSize)), + kSendOptions); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); +} + +TEST_F(DcSctpSocketTest, TriggersOnBufferedAmountLowOnlyWhenCrossingThreshold) { + static constexpr size_t kMessageSize = 1000; + static constexpr size_t kBufferedAmountLowThreshold = kMessageSize * 1.5; + + sock_a_.SetBufferedAmountLowThreshold(StreamID(1), + kBufferedAmountLowThreshold); + EXPECT_CALL(cb_a_, OnBufferedAmountLow).Times(0); + ConnectSockets(); + + EXPECT_CALL(cb_a_, OnBufferedAmountLow).Times(0); + + // Add a few messages to fill up the congestion window. When that is full, + // messages will start to be fully buffered. + while (sock_a_.buffered_amount(StreamID(1)) == 0) { + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), + std::vector(kMessageSize)), + kSendOptions); + } + size_t initial_buffered = sock_a_.buffered_amount(StreamID(1)); + ASSERT_GE(initial_buffered, 0u); + ASSERT_LT(initial_buffered, kMessageSize); + + // Up to kMessageSize (which is below the threshold) + sock_a_.Send( + DcSctpMessage(StreamID(1), PPID(53), + std::vector(kMessageSize - initial_buffered)), + kSendOptions); + EXPECT_EQ(sock_a_.buffered_amount(StreamID(1)), kMessageSize); + + // Up to 2*kMessageSize (which is above the threshold) + sock_a_.Send( + DcSctpMessage(StreamID(1), PPID(53), std::vector(kMessageSize)), + kSendOptions); + EXPECT_EQ(sock_a_.buffered_amount(StreamID(1)), 2 * kMessageSize); + + // Start ACKing packets, which will empty the send queue, and trigger the + // callback. + EXPECT_CALL(cb_a_, OnBufferedAmountLow(StreamID(1))).Times(1); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); +} + +TEST_F(DcSctpSocketTest, DoesntTriggerOnTotalBufferAmountLowWhenBelow) { + ConnectSockets(); + + EXPECT_CALL(cb_a_, OnTotalBufferedAmountLow).Times(0); + + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), + std::vector(kLargeMessageSize)), + kSendOptions); + + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); +} + +TEST_F(DcSctpSocketTest, TriggersOnTotalBufferAmountLowWhenCrossingThreshold) { + ConnectSockets(); + + EXPECT_CALL(cb_a_, OnTotalBufferedAmountLow).Times(0); + + // Fill up the send queue completely. + for (;;) { + if (sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), + std::vector(kLargeMessageSize)), + kSendOptions) == SendStatus::kErrorResourceExhaustion) { + break; + } + } + + EXPECT_CALL(cb_a_, OnTotalBufferedAmountLow).Times(1); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); +} + } // namespace } // namespace dcsctp diff --git a/net/dcsctp/socket/heartbeat_handler.cc b/net/dcsctp/socket/heartbeat_handler.cc index 30a0001c68..78616d1033 100644 --- a/net/dcsctp/socket/heartbeat_handler.cc +++ b/net/dcsctp/socket/heartbeat_handler.cc @@ -104,10 +104,15 @@ HeartbeatHandler::HeartbeatHandler(absl::string_view log_prefix, TimerBackoffAlgorithm::kExponential, /*max_restarts=*/0))) { // The interval timer must always be running as long as the association is up. - interval_timer_->Start(); + RestartTimer(); } void HeartbeatHandler::RestartTimer() { + if (interval_duration_ == DurationMs(0)) { + // Heartbeating has been disabled. + return; + } + if (interval_duration_should_include_rtt_) { // The RTT should be used, but it's not easy accessible. The RTO will // suffice. diff --git a/net/dcsctp/socket/heartbeat_handler_test.cc b/net/dcsctp/socket/heartbeat_handler_test.cc index 20c1d465db..2c5df9fd92 100644 --- a/net/dcsctp/socket/heartbeat_handler_test.cc +++ b/net/dcsctp/socket/heartbeat_handler_test.cc @@ -30,17 +30,19 @@ using ::testing::NiceMock; using ::testing::Return; using ::testing::SizeIs; -DcSctpOptions MakeOptions() { +constexpr DurationMs kHeartbeatInterval = DurationMs(30'000); + +DcSctpOptions MakeOptions(DurationMs heartbeat_interval) { DcSctpOptions options; options.heartbeat_interval_include_rtt = false; - options.heartbeat_interval = DurationMs(30'000); + options.heartbeat_interval = heartbeat_interval; return options; } -class HeartbeatHandlerTest : public testing::Test { +class HeartbeatHandlerTestBase : public testing::Test { protected: - HeartbeatHandlerTest() - : options_(MakeOptions()), + explicit HeartbeatHandlerTestBase(DurationMs heartbeat_interval) + : options_(MakeOptions(heartbeat_interval)), context_(&callbacks_), timer_manager_([this]() { return callbacks_.CreateTimeout(); }), handler_("log: ", options_, &context_, &timer_manager_) {} @@ -63,6 +65,31 @@ class HeartbeatHandlerTest : public testing::Test { HeartbeatHandler handler_; }; +class HeartbeatHandlerTest : public HeartbeatHandlerTestBase { + protected: + HeartbeatHandlerTest() : HeartbeatHandlerTestBase(kHeartbeatInterval) {} +}; + +class DisabledHeartbeatHandlerTest : public HeartbeatHandlerTestBase { + protected: + DisabledHeartbeatHandlerTest() : HeartbeatHandlerTestBase(DurationMs(0)) {} +}; + +TEST_F(HeartbeatHandlerTest, HasRunningHeartbeatIntervalTimer) { + AdvanceTime(options_.heartbeat_interval); + + // Validate that a heartbeat request was sent. + std::vector payload = callbacks_.ConsumeSentPacket(); + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket packet, SctpPacket::Parse(payload)); + ASSERT_THAT(packet.descriptors(), SizeIs(1)); + + ASSERT_HAS_VALUE_AND_ASSIGN( + HeartbeatRequestChunk request, + HeartbeatRequestChunk::Parse(packet.descriptors()[0].data)); + + EXPECT_TRUE(request.info().has_value()); +} + TEST_F(HeartbeatHandlerTest, RepliesToHeartbeatRequests) { uint8_t info_data[] = {1, 2, 3, 4, 5}; HeartbeatRequestChunk request( @@ -120,5 +147,12 @@ TEST_F(HeartbeatHandlerTest, IncreasesErrorIfNotAckedInTime) { AdvanceTime(rto); } +TEST_F(DisabledHeartbeatHandlerTest, IsReallyDisabled) { + AdvanceTime(options_.heartbeat_interval); + + // Validate that a request was NOT sent. + EXPECT_THAT(callbacks_.ConsumeSentPacket(), IsEmpty()); +} + } // namespace } // namespace dcsctp diff --git a/net/dcsctp/socket/mock_dcsctp_socket_callbacks.h b/net/dcsctp/socket/mock_dcsctp_socket_callbacks.h index 799f85c274..bcf1bde5b8 100644 --- a/net/dcsctp/socket/mock_dcsctp_socket_callbacks.h +++ b/net/dcsctp/socket/mock_dcsctp_socket_callbacks.h @@ -93,7 +93,6 @@ class MockDcSctpSocketCallbacks : public DcSctpSocketCallbacks { uint32_t GetRandomInt(uint32_t low, uint32_t high) override { return random_.Rand(low, high); } - MOCK_METHOD(void, NotifyOutgoingMessageBufferEmpty, (), (override)); MOCK_METHOD(void, OnMessageReceived, (DcSctpMessage message), (override)); MOCK_METHOD(void, @@ -120,6 +119,8 @@ class MockDcSctpSocketCallbacks : public DcSctpSocketCallbacks { OnIncomingStreamsReset, (rtc::ArrayView incoming_streams), (override)); + MOCK_METHOD(void, OnBufferedAmountLow, (StreamID stream_id), (override)); + MOCK_METHOD(void, OnTotalBufferedAmountLow, (), (override)); bool HasPacket() const { return !sent_packets_.empty(); } diff --git a/net/dcsctp/socket/stream_reset_handler_test.cc b/net/dcsctp/socket/stream_reset_handler_test.cc index 6168f16312..a8e96fbf20 100644 --- a/net/dcsctp/socket/stream_reset_handler_test.cc +++ b/net/dcsctp/socket/stream_reset_handler_test.cc @@ -105,7 +105,6 @@ class StreamResetHandlerTest : public testing::Test { producer_, [](DurationMs rtt_ms) {}, []() {}, - []() {}, *t3_rtx_timer_, /*options=*/{}), handler_("log: ", diff --git a/net/dcsctp/socket/transmission_control_block.cc b/net/dcsctp/socket/transmission_control_block.cc index 6e0be6a316..4fde40cee9 100644 --- a/net/dcsctp/socket/transmission_control_block.cc +++ b/net/dcsctp/socket/transmission_control_block.cc @@ -54,9 +54,15 @@ absl::optional TransmissionControlBlock::OnRtxTimerExpiry() { TimeMs now = callbacks_.TimeMillis(); RTC_DLOG(LS_INFO) << log_prefix_ << "Timer " << t3_rtx_->name() << " has expired"; - if (IncrementTxErrorCounter("t3-rtx expired")) { - retransmission_queue_.HandleT3RtxTimerExpiry(); - SendBufferedPackets(now); + if (cookie_echo_chunk_.has_value()) { + // In the COOKIE_ECHO state, let the T1-COOKIE timer trigger + // retransmissions, to avoid having two timers doing that. + RTC_DLOG(LS_VERBOSE) << "Not retransmitting as T1-cookie is active."; + } else { + if (IncrementTxErrorCounter("t3-rtx expired")) { + retransmission_queue_.HandleT3RtxTimerExpiry(); + SendBufferedPackets(now); + } } return absl::nullopt; } @@ -77,12 +83,19 @@ void TransmissionControlBlock::MaybeSendSack() { } void TransmissionControlBlock::SendBufferedPackets(SctpPacket::Builder& builder, - TimeMs now, - bool only_one_packet) { + TimeMs now) { for (int packet_idx = 0;; ++packet_idx) { // Only add control chunks to the first packet that is sent, if sending // multiple packets in one go (as allowed by the congestion window). if (packet_idx == 0) { + if (cookie_echo_chunk_.has_value()) { + // https://tools.ietf.org/html/rfc4960#section-5.1 + // "The COOKIE ECHO chunk can be bundled with any pending outbound DATA + // chunks, but it MUST be the first chunk in the packet..." + RTC_DCHECK(builder.empty()); + builder.Add(*cookie_echo_chunk_); + } + // https://tools.ietf.org/html/rfc4960#section-6 // "Before an endpoint transmits a DATA chunk, if any received DATA // chunks have not been acknowledged (e.g., due to delayed ack), the @@ -122,7 +135,11 @@ void TransmissionControlBlock::SendBufferedPackets(SctpPacket::Builder& builder, break; } Send(builder); - if (only_one_packet) { + + if (cookie_echo_chunk_.has_value()) { + // https://tools.ietf.org/html/rfc4960#section-5.1 + // "... until the COOKIE ACK is returned the sender MUST NOT send any + // other packets to the peer." break; } } diff --git a/net/dcsctp/socket/transmission_control_block.h b/net/dcsctp/socket/transmission_control_block.h index 2f1c9ada6c..172f7c0c08 100644 --- a/net/dcsctp/socket/transmission_control_block.h +++ b/net/dcsctp/socket/transmission_control_block.h @@ -19,6 +19,7 @@ #include "absl/strings/string_view.h" #include "net/dcsctp/common/sequence_numbers.h" +#include "net/dcsctp/packet/chunk/cookie_echo_chunk.h" #include "net/dcsctp/packet/sctp_packet.h" #include "net/dcsctp/public/dcsctp_options.h" #include "net/dcsctp/public/dcsctp_socket.h" @@ -89,7 +90,6 @@ class TransmissionControlBlock : public Context { a_rwnd, send_queue, [this](DurationMs rtt) { return ObserveRTT(rtt); }, - [this]() { callbacks_.NotifyOutgoingMessageBufferEmpty(); }, [this]() { tx_error_counter_.Clear(); }, *t3_rtx_, options, @@ -145,20 +145,31 @@ class TransmissionControlBlock : public Context { // Sends a SACK, if there is a need to. void MaybeSendSack(); + // Will be set while the socket is in kCookieEcho state. In this state, there + // can only be a single packet outstanding, and it must contain the COOKIE + // ECHO chunk as the first chunk in that packet, until the COOKIE ACK has been + // received, which will make the socket call `ClearCookieEchoChunk`. + void SetCookieEchoChunk(CookieEchoChunk chunk) { + cookie_echo_chunk_ = std::move(chunk); + } + + // Called when the COOKIE ACK chunk has been received, to allow further + // packets to be sent. + void ClearCookieEchoChunk() { cookie_echo_chunk_ = absl::nullopt; } + + bool has_cookie_echo_chunk() const { return cookie_echo_chunk_.has_value(); } + // Fills `builder` (which may already be filled with control chunks) with - // with other control and data chunks, and sends packets as much as can be - // allowed by the congestion control algorithm. If `only_one_packet` is true, - // only a single packet will be sent. Otherwise, zero, one or multiple may be - // sent. - void SendBufferedPackets(SctpPacket::Builder& builder, - TimeMs now, - bool only_one_packet = false); - - // As above, but without passing in a builder and allowing sending many - // packets. + // other control and data chunks, and sends packets as much as can be + // allowed by the congestion control algorithm. + void SendBufferedPackets(SctpPacket::Builder& builder, TimeMs now); + + // As above, but without passing in a builder. If `cookie_echo_chunk_` is + // present, then only one packet will be sent, with this chunk as the first + // chunk. void SendBufferedPackets(TimeMs now) { SctpPacket::Builder builder(peer_verification_tag_, options_); - SendBufferedPackets(builder, now, /*only_one_packet=*/false); + SendBufferedPackets(builder, now); } // Returns a textual representation of this object, for logging. @@ -196,6 +207,13 @@ class TransmissionControlBlock : public Context { RetransmissionQueue retransmission_queue_; StreamResetHandler stream_reset_handler_; HeartbeatHandler heartbeat_handler_; + + // Only valid when the socket state == State::kCookieEchoed. In this state, + // the socket must wait for COOKIE ACK to continue sending any packets (not + // including a COOKIE ECHO). So if `cookie_echo_chunk_` is present, the + // SendBufferedChunks will always only just send one packet, with this chunk + // as the first chunk in the packet. + absl::optional cookie_echo_chunk_ = absl::nullopt; }; } // namespace dcsctp diff --git a/net/dcsctp/tx/BUILD.gn b/net/dcsctp/tx/BUILD.gn index 924a194f85..2f0b27afc6 100644 --- a/net/dcsctp/tx/BUILD.gn +++ b/net/dcsctp/tx/BUILD.gn @@ -20,7 +20,7 @@ rtc_source_set("send_queue") { absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } -rtc_library("fcfs_send_queue") { +rtc_library("rr_send_queue") { deps = [ ":send_queue", "../../../api:array_view", @@ -32,8 +32,8 @@ rtc_library("fcfs_send_queue") { "../public:types", ] sources = [ - "fcfs_send_queue.cc", - "fcfs_send_queue.h", + "rr_send_queue.cc", + "rr_send_queue.h", ] absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", @@ -111,11 +111,11 @@ if (rtc_include_tests) { testonly = true deps = [ - ":fcfs_send_queue", ":mock_send_queue", ":retransmission_error_counter", ":retransmission_queue", ":retransmission_timeout", + ":rr_send_queue", ":send_queue", "../../../api:array_view", "../../../rtc_base:checks", @@ -127,14 +127,15 @@ if (rtc_include_tests) { "../public:socket", "../public:types", "../testing:data_generator", + "../testing:testing_macros", "../timer", ] absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] sources = [ - "fcfs_send_queue_test.cc", "retransmission_error_counter_test.cc", "retransmission_queue_test.cc", "retransmission_timeout_test.cc", + "rr_send_queue_test.cc", ] } } diff --git a/net/dcsctp/tx/fcfs_send_queue.cc b/net/dcsctp/tx/fcfs_send_queue.cc deleted file mode 100644 index f2dc5e40f8..0000000000 --- a/net/dcsctp/tx/fcfs_send_queue.cc +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#include "net/dcsctp/tx/fcfs_send_queue.h" - -#include -#include -#include -#include -#include -#include - -#include "absl/algorithm/container.h" -#include "absl/types/optional.h" -#include "api/array_view.h" -#include "net/dcsctp/packet/data.h" -#include "net/dcsctp/public/dcsctp_message.h" -#include "net/dcsctp/public/dcsctp_socket.h" -#include "net/dcsctp/tx/send_queue.h" -#include "rtc_base/logging.h" - -namespace dcsctp { -void FCFSSendQueue::Add(TimeMs now, - DcSctpMessage message, - const SendOptions& send_options) { - RTC_DCHECK(!message.payload().empty()); - std::deque& queue = - IsPaused(message.stream_id()) ? paused_items_ : items_; - // Any limited lifetime should start counting from now - when the message - // has been added to the queue. - absl::optional expires_at = absl::nullopt; - if (send_options.lifetime.has_value()) { - // `expires_at` is the time when it expires. Which is slightly larger than - // the message's lifetime, as the message is alive during its entire - // lifetime (which may be zero). - expires_at = now + *send_options.lifetime + DurationMs(1); - } - queue.emplace_back(std::move(message), expires_at, send_options); -} - -size_t FCFSSendQueue::total_bytes() const { - // TODO(boivie): Have the current size as a member variable, so that's it not - // calculated for every operation. - return absl::c_accumulate(items_, 0, - [](size_t size, const Item& item) { - return size + item.remaining_size; - }) + - absl::c_accumulate(paused_items_, 0, - [](size_t size, const Item& item) { - return size + item.remaining_size; - }); -} - -bool FCFSSendQueue::IsFull() const { - return total_bytes() >= buffer_size_; -} - -bool FCFSSendQueue::IsEmpty() const { - return items_.empty(); -} - -FCFSSendQueue::Item* FCFSSendQueue::GetFirstNonExpiredMessage(TimeMs now) { - while (!items_.empty()) { - FCFSSendQueue::Item& item = items_.front(); - // An entire item can be discarded iff: - // 1) It hasn't been partially sent (has been allocated a message_id). - // 2) It has a non-negative expiry time. - // 3) And that expiry time has passed. - if (!item.message_id.has_value() && item.expires_at.has_value() && - *item.expires_at <= now) { - // TODO(boivie): This should be reported to the client. - RTC_DLOG(LS_VERBOSE) - << log_prefix_ - << "Message is expired before even partially sent - discarding"; - items_.pop_front(); - continue; - } - - return &item; - } - return nullptr; -} - -absl::optional FCFSSendQueue::Produce(TimeMs now, - size_t max_size) { - Item* item = GetFirstNonExpiredMessage(now); - if (item == nullptr) { - return absl::nullopt; - } - - DcSctpMessage& message = item->message; - - // Don't make too small fragments as that can result in increased risk of - // failure to assemble a message if a small fragment is missing. - if (item->remaining_size > max_size && max_size < kMinimumFragmentedPayload) { - RTC_DLOG(LS_VERBOSE) << log_prefix_ << "tx-msg: Will not fragment " - << item->remaining_size << " bytes into buffer of " - << max_size << " bytes"; - return absl::nullopt; - } - - // Allocate Message ID and SSN when the first fragment is sent. - if (!item->message_id.has_value()) { - MID& mid = - mid_by_stream_id_[{item->send_options.unordered, message.stream_id()}]; - item->message_id = mid; - mid = MID(*mid + 1); - } - if (!item->send_options.unordered && !item->ssn.has_value()) { - SSN& ssn = ssn_by_stream_id_[message.stream_id()]; - item->ssn = ssn; - ssn = SSN(*ssn + 1); - } - - // Grab the next `max_size` fragment from this message and calculate flags. - rtc::ArrayView chunk_payload = - item->message.payload().subview(item->remaining_offset, max_size); - rtc::ArrayView message_payload = message.payload(); - Data::IsBeginning is_beginning(chunk_payload.data() == - message_payload.data()); - Data::IsEnd is_end((chunk_payload.data() + chunk_payload.size()) == - (message_payload.data() + message_payload.size())); - - StreamID stream_id = message.stream_id(); - PPID ppid = message.ppid(); - - // Zero-copy the payload if the message fits in a single chunk. - std::vector payload = - is_beginning && is_end - ? std::move(message).ReleasePayload() - : std::vector(chunk_payload.begin(), chunk_payload.end()); - - FSN fsn(item->current_fsn); - item->current_fsn = FSN(*item->current_fsn + 1); - - SendQueue::DataToSend chunk(Data(stream_id, item->ssn.value_or(SSN(0)), - item->message_id.value(), fsn, ppid, - std::move(payload), is_beginning, is_end, - item->send_options.unordered)); - chunk.max_retransmissions = item->send_options.max_retransmissions; - chunk.expires_at = item->expires_at; - - if (is_end) { - // The entire message has been sent, and its last data copied to `chunk`, so - // it can safely be discarded. - items_.pop_front(); - } else { - item->remaining_offset += chunk_payload.size(); - item->remaining_size -= chunk_payload.size(); - RTC_DCHECK(item->remaining_offset + item->remaining_size == - item->message.payload().size()); - RTC_DCHECK(item->remaining_size > 0); - } - RTC_DLOG(LS_VERBOSE) << log_prefix_ << "tx-msg: Producing chunk of " - << chunk.data.size() << " bytes (max: " << max_size - << ")"; - return chunk; -} - -void FCFSSendQueue::Discard(IsUnordered unordered, - StreamID stream_id, - MID message_id) { - // As this method will only discard partially sent messages, and as the queue - // is a FIFO queue, the only partially sent message would be the topmost - // message. - if (!items_.empty()) { - Item& item = items_.front(); - if (item.send_options.unordered == unordered && - item.message.stream_id() == stream_id && item.message_id.has_value() && - *item.message_id == message_id) { - items_.pop_front(); - } - } -} - -void FCFSSendQueue::PrepareResetStreams( - rtc::ArrayView streams) { - for (StreamID stream_id : streams) { - paused_streams_.insert(stream_id); - } - - // Will not discard partially sent messages - only whole messages. Partially - // delivered messages (at the time of receiving a Stream Reset command) will - // always deliver all the fragments before actually resetting the stream. - for (auto it = items_.begin(); it != items_.end();) { - if (IsPaused(it->message.stream_id()) && it->remaining_offset == 0) { - it = items_.erase(it); - } else { - ++it; - } - } -} - -bool FCFSSendQueue::CanResetStreams() const { - for (auto& item : items_) { - if (IsPaused(item.message.stream_id())) { - return false; - } - } - return true; -} - -void FCFSSendQueue::CommitResetStreams() { - for (StreamID stream_id : paused_streams_) { - ssn_by_stream_id_[stream_id] = SSN(0); - // https://tools.ietf.org/html/rfc8260#section-2.3.2 - // "When an association resets the SSN using the SCTP extension defined - // in [RFC6525], the two counters (one for the ordered messages, one for - // the unordered messages) used for the MIDs MUST be reset to 0." - mid_by_stream_id_[{IsUnordered(false), stream_id}] = MID(0); - mid_by_stream_id_[{IsUnordered(true), stream_id}] = MID(0); - } - RollbackResetStreams(); -} - -void FCFSSendQueue::RollbackResetStreams() { - while (!paused_items_.empty()) { - items_.push_back(std::move(paused_items_.front())); - paused_items_.pop_front(); - } - paused_streams_.clear(); -} - -void FCFSSendQueue::Reset() { - if (!items_.empty()) { - // If this message has been partially sent, reset it so that it will be - // re-sent. - auto& item = items_.front(); - item.remaining_offset = 0; - item.remaining_size = item.message.payload().size(); - item.message_id = absl::nullopt; - item.ssn = absl::nullopt; - item.current_fsn = FSN(0); - } - RollbackResetStreams(); - mid_by_stream_id_.clear(); - ssn_by_stream_id_.clear(); -} - -bool FCFSSendQueue::IsPaused(StreamID stream_id) const { - return paused_streams_.find(stream_id) != paused_streams_.end(); -} - -} // namespace dcsctp diff --git a/net/dcsctp/tx/fcfs_send_queue.h b/net/dcsctp/tx/fcfs_send_queue.h deleted file mode 100644 index 63e7eab49a..0000000000 --- a/net/dcsctp/tx/fcfs_send_queue.h +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#ifndef NET_DCSCTP_TX_FCFS_SEND_QUEUE_H_ -#define NET_DCSCTP_TX_FCFS_SEND_QUEUE_H_ - -#include -#include -#include -#include -#include -#include - -#include "absl/algorithm/container.h" -#include "absl/strings/string_view.h" -#include "absl/types/optional.h" -#include "api/array_view.h" -#include "net/dcsctp/common/pair_hash.h" -#include "net/dcsctp/public/dcsctp_message.h" -#include "net/dcsctp/public/dcsctp_socket.h" -#include "net/dcsctp/public/types.h" -#include "net/dcsctp/tx/send_queue.h" - -namespace dcsctp { - -// The FCFSSendQueue (First-Come, First-Served Send Queue) holds all messages -// that the client wants to send, but that haven't yet been split into chunks -// and sent on the wire. -// -// First-Come, First Served means that it passes the data in the exact same -// order as they were delivered by the calling application, and is defined in -// https://tools.ietf.org/html/rfc8260#section-3.1. It's a FIFO queue, but that -// term isn't used in this RFC. -// -// As messages can be (requested to be) sent before -// the connection is properly established, this send queue is always present - -// even for closed connections. -class FCFSSendQueue : public SendQueue { - public: - // How small a data chunk's payload may be, if having to fragment a message. - static constexpr size_t kMinimumFragmentedPayload = 10; - - FCFSSendQueue(absl::string_view log_prefix, size_t buffer_size) - : log_prefix_(std::string(log_prefix) + "fcfs: "), - buffer_size_(buffer_size) {} - - // Indicates if the buffer is full. Note that it's up to the caller to ensure - // that the buffer is not full prior to adding new items to it. - bool IsFull() const; - // Indicates if the buffer is empty. - bool IsEmpty() const; - - // Adds the message to be sent using the `send_options` provided. The current - // time should be in `now`. Note that it's the responsibility of the caller to - // ensure that the buffer is not full (by calling `IsFull`) before adding - // messages to it. - void Add(TimeMs now, - DcSctpMessage message, - const SendOptions& send_options = {}); - - // Implementation of `SendQueue`. - absl::optional Produce(TimeMs now, size_t max_size) override; - void Discard(IsUnordered unordered, - StreamID stream_id, - MID message_id) override; - void PrepareResetStreams(rtc::ArrayView streams) override; - bool CanResetStreams() const override; - void CommitResetStreams() override; - void RollbackResetStreams() override; - void Reset() override; - - // The size of the buffer, in "payload bytes". - size_t total_bytes() const; - - private: - // An enqueued message and metadata. - struct Item { - explicit Item(DcSctpMessage msg, - absl::optional expires_at, - const SendOptions& send_options) - : message(std::move(msg)), - expires_at(expires_at), - send_options(send_options), - remaining_offset(0), - remaining_size(message.payload().size()) {} - DcSctpMessage message; - absl::optional expires_at; - SendOptions send_options; - // The remaining payload (offset and size) to be sent, when it has been - // fragmented. - size_t remaining_offset; - size_t remaining_size; - // If set, an allocated Message ID and SSN. Will be allocated when the first - // fragment is sent. - absl::optional message_id = absl::nullopt; - absl::optional ssn = absl::nullopt; - // The current Fragment Sequence Number, incremented for each fragment. - FSN current_fsn = FSN(0); - }; - - Item* GetFirstNonExpiredMessage(TimeMs now); - bool IsPaused(StreamID stream_id) const; - - const std::string log_prefix_; - const size_t buffer_size_; - std::deque items_; - - std::unordered_set paused_streams_; - std::deque paused_items_; - - std::unordered_map, MID, UnorderedStreamHash> - mid_by_stream_id_; - std::unordered_map ssn_by_stream_id_; -}; -} // namespace dcsctp - -#endif // NET_DCSCTP_TX_FCFS_SEND_QUEUE_H_ diff --git a/net/dcsctp/tx/fcfs_send_queue_test.cc b/net/dcsctp/tx/fcfs_send_queue_test.cc deleted file mode 100644 index a67a0a1a9c..0000000000 --- a/net/dcsctp/tx/fcfs_send_queue_test.cc +++ /dev/null @@ -1,361 +0,0 @@ -/* - * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#include "net/dcsctp/tx/fcfs_send_queue.h" - -#include -#include -#include - -#include "net/dcsctp/packet/data.h" -#include "net/dcsctp/public/dcsctp_message.h" -#include "net/dcsctp/public/dcsctp_options.h" -#include "net/dcsctp/public/dcsctp_socket.h" -#include "net/dcsctp/public/types.h" -#include "net/dcsctp/tx/send_queue.h" -#include "rtc_base/gunit.h" -#include "test/gmock.h" - -namespace dcsctp { -namespace { - -constexpr TimeMs kNow = TimeMs(0); -constexpr StreamID kStreamID(1); -constexpr PPID kPPID(53); - -class FCFSSendQueueTest : public testing::Test { - protected: - FCFSSendQueueTest() : buf_("log: ", 100) {} - - const DcSctpOptions options_; - FCFSSendQueue buf_; -}; - -TEST_F(FCFSSendQueueTest, EmptyBuffer) { - EXPECT_TRUE(buf_.IsEmpty()); - EXPECT_FALSE(buf_.Produce(kNow, 100).has_value()); - EXPECT_FALSE(buf_.IsFull()); -} - -TEST_F(FCFSSendQueueTest, AddAndGetSingleChunk) { - buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, {1, 2, 4, 5, 6})); - - EXPECT_FALSE(buf_.IsEmpty()); - EXPECT_FALSE(buf_.IsFull()); - absl::optional chunk_opt = buf_.Produce(kNow, 100); - ASSERT_TRUE(chunk_opt.has_value()); - EXPECT_TRUE(chunk_opt->data.is_beginning); - EXPECT_TRUE(chunk_opt->data.is_end); -} - -TEST_F(FCFSSendQueueTest, CarveOutBeginningMiddleAndEnd) { - std::vector payload(60); - buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); - - absl::optional chunk_beg = - buf_.Produce(kNow, /*max_size=*/20); - ASSERT_TRUE(chunk_beg.has_value()); - EXPECT_TRUE(chunk_beg->data.is_beginning); - EXPECT_FALSE(chunk_beg->data.is_end); - - absl::optional chunk_mid = - buf_.Produce(kNow, /*max_size=*/20); - ASSERT_TRUE(chunk_mid.has_value()); - EXPECT_FALSE(chunk_mid->data.is_beginning); - EXPECT_FALSE(chunk_mid->data.is_end); - - absl::optional chunk_end = - buf_.Produce(kNow, /*max_size=*/20); - ASSERT_TRUE(chunk_end.has_value()); - EXPECT_FALSE(chunk_end->data.is_beginning); - EXPECT_TRUE(chunk_end->data.is_end); - - EXPECT_FALSE(buf_.Produce(kNow, 100).has_value()); -} - -TEST_F(FCFSSendQueueTest, GetChunksFromTwoMessages) { - std::vector payload(60); - buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); - buf_.Add(kNow, DcSctpMessage(StreamID(3), PPID(54), payload)); - - absl::optional chunk_one = buf_.Produce(kNow, 100); - ASSERT_TRUE(chunk_one.has_value()); - EXPECT_EQ(chunk_one->data.stream_id, kStreamID); - EXPECT_EQ(chunk_one->data.ppid, kPPID); - EXPECT_TRUE(chunk_one->data.is_beginning); - EXPECT_TRUE(chunk_one->data.is_end); - - absl::optional chunk_two = buf_.Produce(kNow, 100); - ASSERT_TRUE(chunk_two.has_value()); - EXPECT_EQ(chunk_two->data.stream_id, StreamID(3)); - EXPECT_EQ(chunk_two->data.ppid, PPID(54)); - EXPECT_TRUE(chunk_two->data.is_beginning); - EXPECT_TRUE(chunk_two->data.is_end); -} - -TEST_F(FCFSSendQueueTest, BufferBecomesFullAndEmptied) { - std::vector payload(60); - EXPECT_FALSE(buf_.IsFull()); - buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); - EXPECT_FALSE(buf_.IsFull()); - buf_.Add(kNow, DcSctpMessage(StreamID(3), PPID(54), payload)); - EXPECT_TRUE(buf_.IsFull()); - // However, it's still possible to add messages. It's a soft limit, and it - // might be necessary to forcefully add messages due to e.g. external - // fragmentation. - buf_.Add(kNow, DcSctpMessage(StreamID(5), PPID(55), payload)); - EXPECT_TRUE(buf_.IsFull()); - - absl::optional chunk_one = buf_.Produce(kNow, 100); - ASSERT_TRUE(chunk_one.has_value()); - EXPECT_EQ(chunk_one->data.stream_id, kStreamID); - EXPECT_EQ(chunk_one->data.ppid, kPPID); - - EXPECT_TRUE(buf_.IsFull()); - - absl::optional chunk_two = buf_.Produce(kNow, 100); - ASSERT_TRUE(chunk_two.has_value()); - EXPECT_EQ(chunk_two->data.stream_id, StreamID(3)); - EXPECT_EQ(chunk_two->data.ppid, PPID(54)); - - EXPECT_FALSE(buf_.IsFull()); - EXPECT_FALSE(buf_.IsEmpty()); - - absl::optional chunk_three = buf_.Produce(kNow, 100); - ASSERT_TRUE(chunk_three.has_value()); - EXPECT_EQ(chunk_three->data.stream_id, StreamID(5)); - EXPECT_EQ(chunk_three->data.ppid, PPID(55)); - - EXPECT_FALSE(buf_.IsFull()); - EXPECT_TRUE(buf_.IsEmpty()); -} - -TEST_F(FCFSSendQueueTest, WillNotSendTooSmallPacket) { - std::vector payload(FCFSSendQueue::kMinimumFragmentedPayload + 1); - buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); - - // Wouldn't fit enough payload (wouldn't want to fragment) - EXPECT_FALSE( - buf_.Produce(kNow, - /*max_size=*/FCFSSendQueue::kMinimumFragmentedPayload - 1) - .has_value()); - - // Minimum fragment - absl::optional chunk_one = - buf_.Produce(kNow, - /*max_size=*/FCFSSendQueue::kMinimumFragmentedPayload); - ASSERT_TRUE(chunk_one.has_value()); - EXPECT_EQ(chunk_one->data.stream_id, kStreamID); - EXPECT_EQ(chunk_one->data.ppid, kPPID); - - // There is only one byte remaining - it can be fetched as it doesn't require - // additional fragmentation. - absl::optional chunk_two = - buf_.Produce(kNow, /*max_size=*/1); - ASSERT_TRUE(chunk_two.has_value()); - EXPECT_EQ(chunk_two->data.stream_id, kStreamID); - EXPECT_EQ(chunk_two->data.ppid, kPPID); - - EXPECT_TRUE(buf_.IsEmpty()); -} - -TEST_F(FCFSSendQueueTest, DefaultsToOrderedSend) { - std::vector payload(20); - - // Default is ordered - buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); - absl::optional chunk_one = - buf_.Produce(kNow, /*max_size=*/100); - ASSERT_TRUE(chunk_one.has_value()); - EXPECT_FALSE(chunk_one->data.is_unordered); - - // Explicitly unordered. - SendOptions opts; - opts.unordered = IsUnordered(true); - buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload), opts); - absl::optional chunk_two = - buf_.Produce(kNow, /*max_size=*/100); - ASSERT_TRUE(chunk_two.has_value()); - EXPECT_TRUE(chunk_two->data.is_unordered); -} - -TEST_F(FCFSSendQueueTest, ProduceWithLifetimeExpiry) { - std::vector payload(20); - - // Default is no expiry - TimeMs now = kNow; - buf_.Add(now, DcSctpMessage(kStreamID, kPPID, payload)); - now += DurationMs(1000000); - ASSERT_TRUE(buf_.Produce(now, 100)); - - SendOptions expires_2_seconds; - expires_2_seconds.lifetime = DurationMs(2000); - - // Add and consume within lifetime - buf_.Add(now, DcSctpMessage(kStreamID, kPPID, payload), expires_2_seconds); - now += DurationMs(2000); - ASSERT_TRUE(buf_.Produce(now, 100)); - - // Add and consume just outside lifetime - buf_.Add(now, DcSctpMessage(kStreamID, kPPID, payload), expires_2_seconds); - now += DurationMs(2001); - ASSERT_FALSE(buf_.Produce(now, 100)); - - // A long time after expiry - buf_.Add(now, DcSctpMessage(kStreamID, kPPID, payload), expires_2_seconds); - now += DurationMs(1000000); - ASSERT_FALSE(buf_.Produce(now, 100)); - - // Expire one message, but produce the second that is not expired. - buf_.Add(now, DcSctpMessage(kStreamID, kPPID, payload), expires_2_seconds); - - SendOptions expires_4_seconds; - expires_4_seconds.lifetime = DurationMs(4000); - - buf_.Add(now, DcSctpMessage(kStreamID, kPPID, payload), expires_4_seconds); - now += DurationMs(2001); - - ASSERT_TRUE(buf_.Produce(now, 100)); - ASSERT_FALSE(buf_.Produce(now, 100)); -} - -TEST_F(FCFSSendQueueTest, DiscardPartialPackets) { - std::vector payload(120); - - buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); - buf_.Add(kNow, DcSctpMessage(StreamID(2), PPID(54), payload)); - - absl::optional chunk_one = buf_.Produce(kNow, 100); - ASSERT_TRUE(chunk_one.has_value()); - EXPECT_FALSE(chunk_one->data.is_end); - EXPECT_EQ(chunk_one->data.stream_id, kStreamID); - buf_.Discard(IsUnordered(false), chunk_one->data.stream_id, - chunk_one->data.message_id); - - absl::optional chunk_two = buf_.Produce(kNow, 100); - ASSERT_TRUE(chunk_two.has_value()); - EXPECT_FALSE(chunk_two->data.is_end); - EXPECT_EQ(chunk_two->data.stream_id, StreamID(2)); - - absl::optional chunk_three = buf_.Produce(kNow, 100); - ASSERT_TRUE(chunk_three.has_value()); - EXPECT_TRUE(chunk_three->data.is_end); - EXPECT_EQ(chunk_three->data.stream_id, StreamID(2)); - ASSERT_FALSE(buf_.Produce(kNow, 100)); - - // Calling it again shouldn't cause issues. - buf_.Discard(IsUnordered(false), chunk_one->data.stream_id, - chunk_one->data.message_id); - ASSERT_FALSE(buf_.Produce(kNow, 100)); -} - -TEST_F(FCFSSendQueueTest, PrepareResetStreamsDiscardsStream) { - buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, {1, 2, 3})); - buf_.Add(kNow, DcSctpMessage(StreamID(2), PPID(54), {1, 2, 3, 4, 5})); - EXPECT_EQ(buf_.total_bytes(), 8u); - - buf_.PrepareResetStreams(std::vector({StreamID(1)})); - EXPECT_EQ(buf_.total_bytes(), 5u); - buf_.CommitResetStreams(); - buf_.PrepareResetStreams(std::vector({StreamID(2)})); - EXPECT_EQ(buf_.total_bytes(), 0u); -} - -TEST_F(FCFSSendQueueTest, PrepareResetStreamsNotPartialPackets) { - std::vector payload(120); - - buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); - buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); - - absl::optional chunk_one = buf_.Produce(kNow, 50); - ASSERT_TRUE(chunk_one.has_value()); - EXPECT_EQ(chunk_one->data.stream_id, kStreamID); - EXPECT_EQ(buf_.total_bytes(), 2 * payload.size() - 50); - - StreamID stream_ids[] = {StreamID(1)}; - buf_.PrepareResetStreams(stream_ids); - EXPECT_EQ(buf_.total_bytes(), payload.size() - 50); -} - -TEST_F(FCFSSendQueueTest, EnqueuedItemsArePausedDuringStreamReset) { - std::vector payload(50); - - buf_.PrepareResetStreams(std::vector({StreamID(1)})); - EXPECT_EQ(buf_.total_bytes(), 0u); - - buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); - EXPECT_EQ(buf_.total_bytes(), payload.size()); - - EXPECT_FALSE(buf_.Produce(kNow, 100).has_value()); - buf_.CommitResetStreams(); - EXPECT_EQ(buf_.total_bytes(), payload.size()); - - absl::optional chunk_one = buf_.Produce(kNow, 50); - ASSERT_TRUE(chunk_one.has_value()); - EXPECT_EQ(chunk_one->data.stream_id, kStreamID); - EXPECT_EQ(buf_.total_bytes(), 0u); -} - -TEST_F(FCFSSendQueueTest, CommittingResetsSSN) { - std::vector payload(50); - - buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); - buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); - - absl::optional chunk_one = buf_.Produce(kNow, 100); - ASSERT_TRUE(chunk_one.has_value()); - EXPECT_EQ(chunk_one->data.ssn, SSN(0)); - - absl::optional chunk_two = buf_.Produce(kNow, 100); - ASSERT_TRUE(chunk_two.has_value()); - EXPECT_EQ(chunk_two->data.ssn, SSN(1)); - - StreamID stream_ids[] = {StreamID(1)}; - buf_.PrepareResetStreams(stream_ids); - - // Buffered - buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); - - EXPECT_TRUE(buf_.CanResetStreams()); - buf_.CommitResetStreams(); - - absl::optional chunk_three = buf_.Produce(kNow, 100); - ASSERT_TRUE(chunk_three.has_value()); - EXPECT_EQ(chunk_three->data.ssn, SSN(0)); -} - -TEST_F(FCFSSendQueueTest, RollBackResumesSSN) { - std::vector payload(50); - - buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); - buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); - - absl::optional chunk_one = buf_.Produce(kNow, 100); - ASSERT_TRUE(chunk_one.has_value()); - EXPECT_EQ(chunk_one->data.ssn, SSN(0)); - - absl::optional chunk_two = buf_.Produce(kNow, 100); - ASSERT_TRUE(chunk_two.has_value()); - EXPECT_EQ(chunk_two->data.ssn, SSN(1)); - - buf_.PrepareResetStreams(std::vector({StreamID(1)})); - - // Buffered - buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); - - EXPECT_TRUE(buf_.CanResetStreams()); - buf_.RollbackResetStreams(); - - absl::optional chunk_three = buf_.Produce(kNow, 100); - ASSERT_TRUE(chunk_three.has_value()); - EXPECT_EQ(chunk_three->data.ssn, SSN(2)); -} - -} // namespace -} // namespace dcsctp diff --git a/net/dcsctp/tx/mock_send_queue.h b/net/dcsctp/tx/mock_send_queue.h index 54f5fd275d..0cf64583ae 100644 --- a/net/dcsctp/tx/mock_send_queue.h +++ b/net/dcsctp/tx/mock_send_queue.h @@ -31,7 +31,7 @@ class MockSendQueue : public SendQueue { Produce, (TimeMs now, size_t max_size), (override)); - MOCK_METHOD(void, + MOCK_METHOD(bool, Discard, (IsUnordered unordered, StreamID stream_id, MID message_id), (override)); @@ -43,6 +43,16 @@ class MockSendQueue : public SendQueue { MOCK_METHOD(void, CommitResetStreams, (), (override)); MOCK_METHOD(void, RollbackResetStreams, (), (override)); MOCK_METHOD(void, Reset, (), (override)); + MOCK_METHOD(size_t, buffered_amount, (StreamID stream_id), (const, override)); + MOCK_METHOD(size_t, total_buffered_amount, (), (const, override)); + MOCK_METHOD(size_t, + buffered_amount_low_threshold, + (StreamID stream_id), + (const, override)); + MOCK_METHOD(void, + SetBufferedAmountLowThreshold, + (StreamID stream_id, size_t bytes), + (override)); }; } // namespace dcsctp diff --git a/net/dcsctp/tx/retransmission_queue.cc b/net/dcsctp/tx/retransmission_queue.cc index 704e6ab16b..51bb65a30c 100644 --- a/net/dcsctp/tx/retransmission_queue.cc +++ b/net/dcsctp/tx/retransmission_queue.cc @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -38,6 +39,7 @@ #include "net/dcsctp/public/types.h" #include "net/dcsctp/timer/timer.h" #include "net/dcsctp/tx/send_queue.h" +#include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "rtc_base/strings/string_builder.h" @@ -55,7 +57,6 @@ RetransmissionQueue::RetransmissionQueue( size_t a_rwnd, SendQueue& send_queue, std::function on_new_rtt, - std::function on_send_queue_empty, std::function on_clear_retransmission_counter, Timer& t3_rtx, const DcSctpOptions& options, @@ -68,7 +69,6 @@ RetransmissionQueue::RetransmissionQueue( ? IDataChunk::kHeaderSize : DataChunk::kHeaderSize), on_new_rtt_(std::move(on_new_rtt)), - on_send_queue_empty_(std::move(on_send_queue_empty)), on_clear_retransmission_counter_( std::move(on_clear_retransmission_counter)), t3_rtx_(t3_rtx), @@ -83,6 +83,24 @@ RetransmissionQueue::RetransmissionQueue( last_cumulative_tsn_ack_(tsn_unwrapper_.Unwrap(TSN(*initial_tsn - 1))), send_queue_(send_queue) {} +bool RetransmissionQueue::IsConsistent() const { + size_t actual_outstanding_bytes = 0; + + std::set actual_to_be_retransmitted; + for (const auto& elem : outstanding_data_) { + if (elem.second.is_outstanding()) { + actual_outstanding_bytes += GetSerializedChunkSize(elem.second.data()); + } + + if (elem.second.should_be_retransmitted()) { + actual_to_be_retransmitted.insert(elem.first); + } + } + + return actual_outstanding_bytes == outstanding_bytes_ && + actual_to_be_retransmitted == to_be_retransmitted_; +} + // Returns how large a chunk will be, serialized, carrying the data size_t RetransmissionQueue::GetSerializedChunkSize(const Data& data) const { return RoundUpTo4(data_chunk_header_size_ + data.size()); @@ -95,6 +113,11 @@ void RetransmissionQueue::RemoveAcked(UnwrappedTSN cumulative_tsn_ack, for (auto it = outstanding_data_.begin(); it != first_unacked; ++it) { ack_info.bytes_acked_by_cumulative_tsn_ack += it->second.data().size(); ack_info.acked_tsns.push_back(it->first.Wrap()); + if (it->second.is_outstanding()) { + outstanding_bytes_ -= GetSerializedChunkSize(it->second.data()); + } else if (it->second.should_be_retransmitted()) { + to_be_retransmitted_.erase(it->first); + } } outstanding_data_.erase(outstanding_data_.begin(), first_unacked); @@ -115,10 +138,16 @@ void RetransmissionQueue::AckGapBlocks( auto end = outstanding_data_.upper_bound( UnwrappedTSN::AddTo(cumulative_tsn_ack, block.end)); for (auto iter = start; iter != end; ++iter) { - if (iter->second.state() != State::kAcked) { + if (!iter->second.is_acked()) { ack_info.bytes_acked_by_new_gap_ack_blocks += iter->second.data().size(); - iter->second.SetState(State::kAcked); + if (iter->second.is_outstanding()) { + outstanding_bytes_ -= GetSerializedChunkSize(iter->second.data()); + } + if (iter->second.should_be_retransmitted()) { + to_be_retransmitted_.erase(iter->first); + } + iter->second.Ack(); ack_info.highest_tsn_acked = std::max(ack_info.highest_tsn_acked, iter->first); ack_info.acked_tsns.push_back(iter->first.Wrap()); @@ -159,13 +188,8 @@ void RetransmissionQueue::NackBetweenAckBlocks( for (auto iter = outstanding_data_.upper_bound(prev_block_last_acked); iter != outstanding_data_.lower_bound(cur_block_first_acked); ++iter) { if (iter->first <= max_tsn_to_nack) { - iter->second.Nack(); - - if (iter->second.state() == State::kToBeRetransmitted) { - ack_info.has_packet_loss = true; - RTC_DLOG(LS_VERBOSE) << log_prefix_ << *iter->first.Wrap() - << " marked for retransmission"; - } + ack_info.has_packet_loss = + NackItem(iter->first, iter->second, /*retransmit_now=*/false); } } prev_block_last_acked = UnwrappedTSN::AddTo(cumulative_tsn_ack, block.end); @@ -367,7 +391,6 @@ bool RetransmissionQueue::HandleSack(TimeMs now, const SackChunk& sack) { // NACK and possibly mark for retransmit chunks that weren't acked. NackBetweenAckBlocks(cumulative_tsn_ack, sack.gap_ack_blocks(), ack_info); - RecalculateOutstandingBytes(); // Update of outstanding_data_ is now done. Congestion control remains. UpdateReceiverWindow(sack.a_rwnd()); @@ -413,6 +436,7 @@ bool RetransmissionQueue::HandleSack(TimeMs now, const SackChunk& sack) { last_cumulative_tsn_ack_ = cumulative_tsn_ack; StartT3RtxTimerIfOutstandingData(); + RTC_DCHECK(IsConsistent()); return true; } @@ -440,19 +464,6 @@ void RetransmissionQueue::UpdateRTT(TimeMs now, } } -void RetransmissionQueue::RecalculateOutstandingBytes() { - outstanding_bytes_ = absl::c_accumulate( - outstanding_data_, 0, - [&](size_t r, const std::pair& d) { - // Packets that have been ACKED or NACKED are not outstanding, as they - // are received. And packets that are marked for retransmission or - // abandoned are lost, and not outstanding. - return r + (d.second.state() == State::kInFlight - ? GetSerializedChunkSize(d.second.data()) - : 0); - }); -} - void RetransmissionQueue::HandleT3RtxTimerExpiry() { size_t old_cwnd = cwnd_; size_t old_outstanding_bytes = outstanding_bytes_; @@ -480,21 +491,14 @@ void RetransmissionQueue::HandleT3RtxTimerExpiry() { // T3-rtx timer expired but did not fit in one MTU (rule E3 above) should be // marked for retransmission and sent as soon as cwnd allows (normally, when a // SACK arrives)." - int count = 0; for (auto& elem : outstanding_data_) { UnwrappedTSN tsn = elem.first; TxData& item = elem.second; - if (item.state() == State::kInFlight || item.state() == State::kNacked) { - RTC_DLOG(LS_VERBOSE) << log_prefix_ << "Chunk " << *tsn.Wrap() - << " will be retransmitted due to T3-RTX"; - item.SetState(State::kToBeRetransmitted); - ++count; + if (!item.is_acked()) { + NackItem(tsn, item, /*retransmit_now=*/true); } } - // Marking some packets as retransmitted changes outstanding bytes. - RecalculateOutstandingBytes(); - // https://tools.ietf.org/html/rfc4960#section-6.3.3 // "Start the retransmission timer T3-rtx on the destination address // to which the retransmission is sent, if rule R1 above indicates to do so." @@ -503,34 +507,64 @@ void RetransmissionQueue::HandleT3RtxTimerExpiry() { RTC_DLOG(LS_INFO) << log_prefix_ << "t3-rtx expired. new cwnd=" << cwnd_ << " (" << old_cwnd << "), ssthresh=" << ssthresh_ - << ", rtx-packets=" << count << ", outstanding_bytes " - << outstanding_bytes_ << " (" << old_outstanding_bytes - << ")"; + << ", outstanding_bytes " << outstanding_bytes_ << " (" + << old_outstanding_bytes << ")"; + RTC_DCHECK(IsConsistent()); +} + +bool RetransmissionQueue::NackItem(UnwrappedTSN tsn, + TxData& item, + bool retransmit_now) { + if (item.is_outstanding()) { + outstanding_bytes_ -= GetSerializedChunkSize(item.data()); + } + + switch (item.Nack(retransmit_now)) { + case TxData::NackAction::kNothing: + return false; + case TxData::NackAction::kRetransmit: + to_be_retransmitted_.insert(tsn); + RTC_DLOG(LS_VERBOSE) << log_prefix_ << *tsn.Wrap() + << " marked for retransmission"; + break; + case TxData::NackAction::kAbandon: + AbandonAllFor(item); + break; + } + return true; } std::vector> RetransmissionQueue::GetChunksToBeRetransmitted(size_t max_size) { std::vector> result; - for (auto& elem : outstanding_data_) { - UnwrappedTSN tsn = elem.first; - TxData& item = elem.second; + + for (auto it = to_be_retransmitted_.begin(); + it != to_be_retransmitted_.end();) { + UnwrappedTSN tsn = *it; + auto elem = outstanding_data_.find(tsn); + RTC_DCHECK(elem != outstanding_data_.end()); + TxData& item = elem->second; + RTC_DCHECK(item.should_be_retransmitted()); + RTC_DCHECK(!item.is_outstanding()); + RTC_DCHECK(!item.is_abandoned()); + RTC_DCHECK(!item.is_acked()); size_t serialized_size = GetSerializedChunkSize(item.data()); - if (item.state() == State::kToBeRetransmitted && - serialized_size <= max_size) { + if (serialized_size <= max_size) { item.Retransmit(); result.emplace_back(tsn.Wrap(), item.data().Clone()); max_size -= serialized_size; + outstanding_bytes_ += serialized_size; + it = to_be_retransmitted_.erase(it); + } else { + ++it; } // No point in continuing if the packet is full. if (max_size <= data_chunk_header_size_) { break; } } - // As some chunks may have switched state, that needs to be reflected here. - if (!result.empty()) { - RecalculateOutstandingBytes(); - } + return result; } @@ -580,23 +614,40 @@ std::vector> RetransmissionQueue::GetChunksToSend( absl::optional chunk_opt = send_queue_.Produce(now, max_bytes - data_chunk_header_size_); if (!chunk_opt.has_value()) { - on_send_queue_empty_(); break; } UnwrappedTSN tsn = next_tsn_; next_tsn_.Increment(); - to_be_sent.emplace_back(tsn.Wrap(), chunk_opt->data.Clone()); // All chunks are always padded to be even divisible by 4. size_t chunk_size = GetSerializedChunkSize(chunk_opt->data); max_bytes -= chunk_size; outstanding_bytes_ += chunk_size; rwnd_ -= chunk_size; - outstanding_data_.emplace( - tsn, RetransmissionQueue::TxData(std::move(chunk_opt->data), - chunk_opt->max_retransmissions, now, - chunk_opt->expires_at)); + auto item_it = + outstanding_data_ + .emplace(tsn, + RetransmissionQueue::TxData( + chunk_opt->data.Clone(), + partial_reliability_ ? chunk_opt->max_retransmissions + : absl::nullopt, + now, + partial_reliability_ ? chunk_opt->expires_at + : absl::nullopt)) + .first; + + if (item_it->second.has_expired(now)) { + // No need to send it - it was expired when it was in the send + // queue. + RTC_DLOG(LS_VERBOSE) + << log_prefix_ << "Marking freshly produced chunk " + << *item_it->first.Wrap() << " and message " + << *item_it->second.data().message_id << " as expired"; + AbandonAllFor(item_it->second); + } else { + to_be_sent.emplace_back(tsn.Wrap(), std::move(chunk_opt->data)); + } } } @@ -624,6 +675,7 @@ std::vector> RetransmissionQueue::GetChunksToSend( << " (" << old_outstanding_bytes << "), cwnd=" << cwnd_ << ", rwnd=" << rwnd_ << " (" << old_rwnd << ")"; } + RTC_DCHECK(IsConsistent()); return to_be_sent; } @@ -632,7 +684,20 @@ RetransmissionQueue::GetChunkStatesForTesting() const { std::vector> states; states.emplace_back(last_cumulative_tsn_ack_.Wrap(), State::kAcked); for (const auto& elem : outstanding_data_) { - states.emplace_back(elem.first.Wrap(), elem.second.state()); + State state; + if (elem.second.is_abandoned()) { + state = State::kAbandoned; + } else if (elem.second.should_be_retransmitted()) { + state = State::kToBeRetransmitted; + } else if (elem.second.is_acked()) { + state = State::kAcked; + } else if (elem.second.is_outstanding()) { + state = State::kInFlight; + } else { + state = State::kNacked; + } + + states.emplace_back(elem.first.Wrap(), state); } return states; } @@ -641,76 +706,124 @@ bool RetransmissionQueue::ShouldSendForwardTsn(TimeMs now) { if (!partial_reliability_) { return false; } - ExpireChunks(now); + ExpireOutstandingChunks(now); if (!outstanding_data_.empty()) { auto it = outstanding_data_.begin(); return it->first == last_cumulative_tsn_ack_.next_value() && - it->second.state() == State::kAbandoned; + it->second.is_abandoned(); } + RTC_DCHECK(IsConsistent()); return false; } -void RetransmissionQueue::TxData::Nack() { +void RetransmissionQueue::TxData::Ack() { + ack_state_ = AckState::kAcked; + should_be_retransmitted_ = false; +} + +RetransmissionQueue::TxData::NackAction RetransmissionQueue::TxData::Nack( + bool retransmit_now) { + ack_state_ = AckState::kNacked; ++nack_count_; - if (nack_count_ >= kNumberOfNacksForRetransmission) { - state_ = State::kToBeRetransmitted; - } else { - state_ = State::kNacked; + if ((retransmit_now || nack_count_ >= kNumberOfNacksForRetransmission) && + !is_abandoned_) { + // Nacked enough times - it's considered lost. + if (!max_retransmissions_.has_value() || + num_retransmissions_ < max_retransmissions_) { + should_be_retransmitted_ = true; + return NackAction::kRetransmit; + } + Abandon(); + return NackAction::kAbandon; } + return NackAction::kNothing; } void RetransmissionQueue::TxData::Retransmit() { - state_ = State::kInFlight; + ack_state_ = AckState::kUnacked; + should_be_retransmitted_ = false; + nack_count_ = 0; ++num_retransmissions_; } +void RetransmissionQueue::TxData::Abandon() { + is_abandoned_ = true; + should_be_retransmitted_ = false; +} + bool RetransmissionQueue::TxData::has_expired(TimeMs now) const { - if (state_ != State::kAcked && state_ != State::kAbandoned) { - if (max_retransmissions_.has_value() && - num_retransmissions_ >= *max_retransmissions_) { - return true; - } else if (expires_at_.has_value() && *expires_at_ <= now) { - return true; - } - } - return false; + return expires_at_.has_value() && *expires_at_ <= now; } -void RetransmissionQueue::ExpireChunks(TimeMs now) { +void RetransmissionQueue::ExpireOutstandingChunks(TimeMs now) { for (const auto& elem : outstanding_data_) { UnwrappedTSN tsn = elem.first; const TxData& item = elem.second; - // Chunks that are in-flight (possibly lost?), nacked or to be retransmitted - // can be expired easily. There is always a risk that a message is expired - // that was already received by the peer, but for which there haven't been - // a SACK received. But that's acceptable, and handled. - if (item.has_expired(now)) { - RTC_DLOG(LS_VERBOSE) << log_prefix_ << "Marking chunk " << *tsn.Wrap() - << " and message " << *item.data().message_id - << " as expired"; - ExpireAllFor(item); + // Chunks that are nacked can be expired. Care should be taken not to expire + // unacked (in-flight) chunks as they might have been received, but the SACK + // is either delayed or in-flight and may be received later. + if (item.is_abandoned()) { + // Already abandoned. + } else if (item.is_nacked() && item.has_expired(now)) { + RTC_DLOG(LS_VERBOSE) << log_prefix_ << "Marking nacked chunk " + << *tsn.Wrap() << " and message " + << *item.data().message_id << " as expired"; + AbandonAllFor(item); + } else { + // A non-expired chunk. No need to iterate any further. + break; } } } -void RetransmissionQueue::ExpireAllFor( +void RetransmissionQueue::AbandonAllFor( const RetransmissionQueue::TxData& item) { // Erase all remaining chunks from the producer, if any. - send_queue_.Discard(item.data().is_unordered, item.data().stream_id, - item.data().message_id); + if (send_queue_.Discard(item.data().is_unordered, item.data().stream_id, + item.data().message_id)) { + // There were remaining chunks to be produced for this message. Since the + // receiver may have already received all chunks (up till now) for this + // message, we can't just FORWARD-TSN to the last fragment in this + // (abandoned) message and start sending a new message, as the receiver will + // then see a new message before the end of the previous one was seen (or + // skipped over). So create a new fragment, representing the end, that the + // received will never see as it is abandoned immediately and used as cum + // TSN in the sent FORWARD-TSN. + UnwrappedTSN tsn = next_tsn_; + next_tsn_.Increment(); + Data message_end(item.data().stream_id, item.data().ssn, + item.data().message_id, item.data().fsn, item.data().ppid, + std::vector(), Data::IsBeginning(false), + Data::IsEnd(true), item.data().is_unordered); + TxData& added_item = + outstanding_data_ + .emplace(tsn, RetransmissionQueue::TxData(std::move(message_end), + absl::nullopt, TimeMs(0), + absl::nullopt)) + .first->second; + // The added chunk shouldn't be included in `outstanding_bytes`, so set it + // as acked. + added_item.Ack(); + RTC_DLOG(LS_VERBOSE) << log_prefix_ + << "Adding unsent end placeholder for message at tsn=" + << *tsn.Wrap(); + } for (auto& elem : outstanding_data_) { UnwrappedTSN tsn = elem.first; TxData& other = elem.second; - if (other.state() != State::kAbandoned && + if (!other.is_abandoned() && other.data().stream_id == item.data().stream_id && other.data().is_unordered == item.data().is_unordered && other.data().message_id == item.data().message_id) { RTC_DLOG(LS_VERBOSE) << log_prefix_ << "Marking chunk " << *tsn.Wrap() << " as abandoned"; - other.SetState(State::kAbandoned); + if (other.should_be_retransmitted()) { + to_be_retransmitted_.erase(tsn); + } + other.Abandon(); } } } @@ -724,8 +837,7 @@ ForwardTsnChunk RetransmissionQueue::CreateForwardTsn() const { UnwrappedTSN tsn = elem.first; const TxData& item = elem.second; - if ((tsn != new_cumulative_ack.next_value()) || - item.state() != State::kAbandoned) { + if ((tsn != new_cumulative_ack.next_value()) || !item.is_abandoned()) { break; } new_cumulative_ack = tsn; @@ -752,8 +864,7 @@ IForwardTsnChunk RetransmissionQueue::CreateIForwardTsn() const { UnwrappedTSN tsn = elem.first; const TxData& item = elem.second; - if ((tsn != new_cumulative_ack.next_value()) || - item.state() != State::kAbandoned) { + if ((tsn != new_cumulative_ack.next_value()) || !item.is_abandoned()) { break; } new_cumulative_ack = tsn; diff --git a/net/dcsctp/tx/retransmission_queue.h b/net/dcsctp/tx/retransmission_queue.h index c2599a438d..c5a6a04db8 100644 --- a/net/dcsctp/tx/retransmission_queue.h +++ b/net/dcsctp/tx/retransmission_queue.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -43,7 +44,7 @@ namespace dcsctp { class RetransmissionQueue { public: static constexpr size_t kMinimumFragmentedPayload = 10; - // State for DATA chunks (message fragments) in the queue. + // State for DATA chunks (message fragments) in the queue - used in tests. enum class State { // The chunk has been sent but not received yet (from the sender's point of // view, as no SACK has been received yet that reference this chunk). @@ -72,7 +73,6 @@ class RetransmissionQueue { size_t a_rwnd, SendQueue& send_queue, std::function on_new_rtt, - std::function on_send_queue_empty, std::function on_clear_retransmission_counter, Timer& t3_rtx, const DcSctpOptions& options, @@ -143,6 +143,12 @@ class RetransmissionQueue { // its associated metadata. class TxData { public: + enum class NackAction { + kNothing, + kRetransmit, + kAbandon, + }; + explicit TxData(Data data, absl::optional max_retransmissions, TimeMs time_sent, @@ -154,24 +160,52 @@ class RetransmissionQueue { TimeMs time_sent() const { return time_sent_; } - State state() const { return state_; } - void SetState(State state) { state_ = state; } - const Data& data() const { return data_; } - // Nacks an item. If it has been nacked enough times, it will be marked for - // retransmission. - void Nack(); + // Acks an item. + void Ack(); + + // Nacks an item. If it has been nacked enough times, or if `retransmit_now` + // is set, it might be marked for retransmission. If the item has reached + // its max retransmission value, it will instead be abandoned. The action + // performed is indicated as return value. + NackAction Nack(bool retransmit_now = false); + + // Prepares the item to be retransmitted. Sets it as outstanding and + // clears all nack counters. void Retransmit(); - bool has_been_retransmitted() { return num_retransmissions_ > 0; } + // Marks this item as abandoned. + void Abandon(); + + bool is_outstanding() const { return ack_state_ == AckState::kUnacked; } + bool is_acked() const { return ack_state_ == AckState::kAcked; } + bool is_nacked() const { return ack_state_ == AckState::kNacked; } + bool is_abandoned() const { return is_abandoned_; } + + // Indicates if this chunk should be retransmitted. + bool should_be_retransmitted() const { return should_be_retransmitted_; } + // Indicates if this chunk has ever been retransmitted. + bool has_been_retransmitted() const { return num_retransmissions_ > 0; } // Given the current time, and the current state of this DATA chunk, it will // indicate if it has expired (SCTP Partial Reliability Extension). bool has_expired(TimeMs now) const; private: - State state_ = State::kInFlight; + enum class AckState { + kUnacked, + kAcked, + kNacked, + }; + // Indicates the presence of this chunk, if it's in flight (Unacked), has + // been received (Acked) or is lost (Nacked). + AckState ack_state_ = AckState::kUnacked; + // Indicates if this chunk has been abandoned, which is a terminal state. + bool is_abandoned_ = false; + // Indicates if this chunk should be retransmitted. + bool should_be_retransmitted_ = false; + // The number of times the DATA chunk has been nacked (by having received a // SACK which doesn't include it). Will be cleared on retransmissions. size_t nack_count_ = 0; @@ -214,6 +248,8 @@ class RetransmissionQueue { UnwrappedTSN highest_tsn_acked; }; + bool IsConsistent() const; + // Returns how large a chunk will be, serialized, carrying the data size_t GetSerializedChunkSize(const Data& data) const; @@ -236,6 +272,14 @@ class RetransmissionQueue { // by setting `bytes_acked_by_cumulative_tsn_ack` and `acked_tsns`. void RemoveAcked(UnwrappedTSN cumulative_tsn_ack, AckInfo& ack_info); + // Helper method to nack an item and perform the correct operations given the + // action indicated when nacking an item (e.g. retransmitting or abandoning). + // The return value indicate if an action was performed, meaning that packet + // loss was detected and acted upon. + bool NackItem(UnwrappedTSN cumulative_tsn_ack, + TxData& item, + bool retransmit_now); + // Will mark the chunks covered by the `gap_ack_blocks` from an incoming SACK // as "acked" and update `ack_info` by adding new TSNs to `added_tsns`. void AckGapBlocks(UnwrappedTSN cumulative_tsn_ack, @@ -270,8 +314,6 @@ class RetransmissionQueue { // Update the congestion control algorithm, given as packet loss has been // detected, as reported in an incoming SACK chunk. void HandlePacketLoss(UnwrappedTSN highest_tsn_acked); - // Recalculate the number of in-flight payload bytes. - void RecalculateOutstandingBytes(); // Update the view of the receiver window size. void UpdateReceiverWindow(uint32_t a_rwnd); // Given `max_size` of space left in a packet, which chunks can be added to @@ -281,13 +323,13 @@ class RetransmissionQueue { // is running. void StartT3RtxTimerIfOutstandingData(); - // Given the current time `now_ms`, expire chunks that have a limited - // lifetime. - void ExpireChunks(TimeMs now); - // Given that a message fragment, `item` has expired, expire all other - // fragments that share the same message - even never-before-sent fragments - // that are still in the SendQueue. - void ExpireAllFor(const RetransmissionQueue::TxData& item); + // Given the current time `now_ms`, expire and abandon outstanding (sent at + // least once) chunks that have a limited lifetime. + void ExpireOutstandingChunks(TimeMs now); + // Given that a message fragment, `item` has been abandoned, abandon all other + // fragments that share the same message - both never-before-sent fragments + // that are still in the SendQueue and outstanding chunks. + void AbandonAllFor(const RetransmissionQueue::TxData& item); // Returns the current congestion control algorithm phase. CongestionAlgorithmPhase phase() const { @@ -304,8 +346,6 @@ class RetransmissionQueue { const size_t data_chunk_header_size_; // Called when a new RTT measurement has been done const std::function on_new_rtt_; - // Called when the send queue is empty. - const std::function on_send_queue_empty_; // Called when a SACK has been seen that cleared the retransmission counter. const std::function on_clear_retransmission_counter_; // The retransmission counter. @@ -337,7 +377,9 @@ class RetransmissionQueue { // cumulative acked. Note that it also contains chunks that have been acked in // gap ack blocks. std::map outstanding_data_; - // The sum of the message bytes of the send_queue_ + // Data chunks that are to be retransmitted. + std::set to_be_retransmitted_; + // The number of bytes that are in-flight (sent but not yet acked or nacked). size_t outstanding_bytes_ = 0; }; } // namespace dcsctp diff --git a/net/dcsctp/tx/retransmission_queue_test.cc b/net/dcsctp/tx/retransmission_queue_test.cc index f36d91eb7b..4aa76d66e5 100644 --- a/net/dcsctp/tx/retransmission_queue_test.cc +++ b/net/dcsctp/tx/retransmission_queue_test.cc @@ -42,6 +42,7 @@ using ::testing::ElementsAre; using ::testing::IsEmpty; using ::testing::NiceMock; using ::testing::Pair; +using ::testing::Return; using ::testing::SizeIs; using ::testing::UnorderedElementsAre; @@ -79,7 +80,6 @@ class RetransmissionQueueTest : public testing::Test { options.mtu = kMaxMtu; return RetransmissionQueue( "", TSN(10), kArwnd, producer_, on_rtt_.AsStdFunction(), - on_outgoing_message_buffer_empty_.AsStdFunction(), on_clear_retransmission_counter_.AsStdFunction(), *timer_, options, supports_partial_reliability, use_message_interleaving); } @@ -89,7 +89,6 @@ class RetransmissionQueueTest : public testing::Test { FakeTimeoutManager timeout_manager_; TimerManager timer_manager_; NiceMock> on_rtt_; - NiceMock> on_outgoing_message_buffer_empty_; NiceMock> on_clear_retransmission_counter_; NiceMock producer_; std::unique_ptr timer_; @@ -379,14 +378,14 @@ TEST_F(RetransmissionQueueTest, LimitsRetransmissionsAsUdp) { Pair(TSN(10), State::kInFlight))); // Will force chunks to be retransmitted + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .Times(1); + queue.HandleT3RtxTimerExpiry(); EXPECT_THAT(queue.GetChunkStatesForTesting(), ElementsAre(Pair(TSN(9), State::kAcked), // - Pair(TSN(10), State::kToBeRetransmitted))); - - EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) - .Times(1); + Pair(TSN(10), State::kAbandoned))); EXPECT_TRUE(queue.ShouldSendForwardTsn(now_)); @@ -439,9 +438,9 @@ TEST_F(RetransmissionQueueTest, LimitsRetransmissionsToThreeSends) { EXPECT_THAT(queue.GetChunksToSend(now_, 1000), SizeIs(1)); // Retransmission 4 - not allowed. - queue.HandleT3RtxTimerExpiry(); EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) .Times(1); + queue.HandleT3RtxTimerExpiry(); EXPECT_TRUE(queue.ShouldSendForwardTsn(now_)); EXPECT_THAT(queue.GetChunksToSend(now_, 1000), IsEmpty()); @@ -522,22 +521,74 @@ TEST_F(RetransmissionQueueTest, ProducesValidForwardTsn) { // Chunk 10 is acked, but the remaining are lost queue.HandleSack(now_, SackChunk(TSN(10), kArwnd, {}, {})); + + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .WillOnce(Return(true)); + queue.HandleT3RtxTimerExpiry(); + // NOTE: The TSN=13 represents the end fragment. EXPECT_THAT(queue.GetChunkStatesForTesting(), - ElementsAre(Pair(TSN(10), State::kAcked), // - Pair(TSN(11), State::kToBeRetransmitted), // - Pair(TSN(12), State::kToBeRetransmitted))); + ElementsAre(Pair(TSN(10), State::kAcked), // + Pair(TSN(11), State::kAbandoned), // + Pair(TSN(12), State::kAbandoned), // + Pair(TSN(13), State::kAbandoned))); - EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) - .Times(1); EXPECT_TRUE(queue.ShouldSendForwardTsn(now_)); + ForwardTsnChunk forward_tsn = queue.CreateForwardTsn(); + EXPECT_EQ(forward_tsn.new_cumulative_tsn(), TSN(13)); + EXPECT_THAT(forward_tsn.skipped_streams(), + UnorderedElementsAre( + ForwardTsnChunk::SkippedStream(StreamID(1), SSN(42)))); +} + +TEST_F(RetransmissionQueueTest, ProducesValidForwardTsnWhenFullySent) { + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({1, 2, 3, 4}, "B")); + dts.max_retransmissions = 0; + return dts; + }) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({5, 6, 7, 8}, "")); + dts.max_retransmissions = 0; + return dts; + }) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({9, 10, 11, 12}, "E")); + dts.max_retransmissions = 0; + return dts; + }) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + // Send and ack first chunk (TSN 10) + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 1000); + EXPECT_THAT(chunks_to_send, ElementsAre(Pair(TSN(10), _), Pair(TSN(11), _), + Pair(TSN(12), _))); + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight), // + Pair(TSN(11), State::kInFlight), // + Pair(TSN(12), State::kInFlight))); + + // Chunk 10 is acked, but the remaining are lost + queue.HandleSack(now_, SackChunk(TSN(10), kArwnd, {}, {})); + + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .WillOnce(Return(false)); + + queue.HandleT3RtxTimerExpiry(); + EXPECT_THAT(queue.GetChunkStatesForTesting(), ElementsAre(Pair(TSN(10), State::kAcked), // Pair(TSN(11), State::kAbandoned), // Pair(TSN(12), State::kAbandoned))); + EXPECT_TRUE(queue.ShouldSendForwardTsn(now_)); + ForwardTsnChunk forward_tsn = queue.CreateForwardTsn(); EXPECT_EQ(forward_tsn.new_cumulative_tsn(), TSN(12)); EXPECT_THAT(forward_tsn.skipped_streams(), @@ -599,34 +650,61 @@ TEST_F(RetransmissionQueueTest, ProducesValidIForwardTsn) { Pair(TSN(12), State::kNacked), // Pair(TSN(13), State::kAcked))); - queue.HandleT3RtxTimerExpiry(); - - EXPECT_THAT(queue.GetChunkStatesForTesting(), - ElementsAre(Pair(TSN(9), State::kAcked), // - Pair(TSN(10), State::kToBeRetransmitted), // - Pair(TSN(11), State::kToBeRetransmitted), // - Pair(TSN(12), State::kToBeRetransmitted), // - Pair(TSN(13), State::kAcked))); - EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) - .Times(1); + .WillOnce(Return(true)); EXPECT_CALL(producer_, Discard(IsUnordered(true), StreamID(2), MID(42))) - .Times(1); + .WillOnce(Return(true)); EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(3), MID(42))) - .Times(1); - EXPECT_TRUE(queue.ShouldSendForwardTsn(now_)); + .WillOnce(Return(true)); + + queue.HandleT3RtxTimerExpiry(); EXPECT_THAT(queue.GetChunkStatesForTesting(), ElementsAre(Pair(TSN(9), State::kAcked), // Pair(TSN(10), State::kAbandoned), // Pair(TSN(11), State::kAbandoned), // Pair(TSN(12), State::kAbandoned), // - Pair(TSN(13), State::kAcked))); + Pair(TSN(13), State::kAcked), + // Representing end fragments of stream 1-3 + Pair(TSN(14), State::kAbandoned), // + Pair(TSN(15), State::kAbandoned), // + Pair(TSN(16), State::kAbandoned))); - IForwardTsnChunk forward_tsn = queue.CreateIForwardTsn(); - EXPECT_EQ(forward_tsn.new_cumulative_tsn(), TSN(12)); + EXPECT_TRUE(queue.ShouldSendForwardTsn(now_)); + + IForwardTsnChunk forward_tsn1 = queue.CreateIForwardTsn(); + EXPECT_EQ(forward_tsn1.new_cumulative_tsn(), TSN(12)); EXPECT_THAT( - forward_tsn.skipped_streams(), + forward_tsn1.skipped_streams(), + UnorderedElementsAre(IForwardTsnChunk::SkippedStream( + IsUnordered(false), StreamID(1), MID(42)), + IForwardTsnChunk::SkippedStream( + IsUnordered(true), StreamID(2), MID(42)), + IForwardTsnChunk::SkippedStream( + IsUnordered(false), StreamID(3), MID(42)))); + + // When TSN 13 is acked, the placeholder "end fragments" must be skipped as + // well. + + // A receiver is more likely to ack TSN 13, but do it incrementally. + queue.HandleSack(now_, SackChunk(TSN(12), kArwnd, {}, {})); + + EXPECT_CALL(producer_, Discard).Times(0); + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + + queue.HandleSack(now_, SackChunk(TSN(13), kArwnd, {}, {})); + EXPECT_TRUE(queue.ShouldSendForwardTsn(now_)); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(13), State::kAcked), // + Pair(TSN(14), State::kAbandoned), // + Pair(TSN(15), State::kAbandoned), // + Pair(TSN(16), State::kAbandoned))); + + IForwardTsnChunk forward_tsn2 = queue.CreateIForwardTsn(); + EXPECT_EQ(forward_tsn2.new_cumulative_tsn(), TSN(16)); + EXPECT_THAT( + forward_tsn2.skipped_streams(), UnorderedElementsAre(IForwardTsnChunk::SkippedStream( IsUnordered(false), StreamID(1), MID(42)), IForwardTsnChunk::SkippedStream( @@ -800,5 +878,305 @@ TEST_F(RetransmissionQueueTest, StaysWithinAvailableSize) { EXPECT_THAT(chunks_to_send, ElementsAre(Pair(TSN(10), _), Pair(TSN(11), _))); } +TEST_F(RetransmissionQueueTest, AccountsNackedAbandonedChunksAsNotOutstanding) { + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({1, 2, 3, 4}, "B")); + dts.max_retransmissions = 0; + return dts; + }) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({5, 6, 7, 8}, "")); + dts.max_retransmissions = 0; + return dts; + }) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({9, 10, 11, 12}, "")); + dts.max_retransmissions = 0; + return dts; + }) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + // Send and ack first chunk (TSN 10) + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 1000); + EXPECT_THAT(chunks_to_send, ElementsAre(Pair(TSN(10), _), Pair(TSN(11), _), + Pair(TSN(12), _))); + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight), // + Pair(TSN(11), State::kInFlight), // + Pair(TSN(12), State::kInFlight))); + EXPECT_EQ(queue.outstanding_bytes(), (16 + 4) * 3u); + + // Mark the message as lost. + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .Times(1); + queue.HandleT3RtxTimerExpiry(); + + EXPECT_TRUE(queue.ShouldSendForwardTsn(now_)); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kAbandoned), // + Pair(TSN(11), State::kAbandoned), // + Pair(TSN(12), State::kAbandoned))); + EXPECT_EQ(queue.outstanding_bytes(), 0u); + + // Now ACK those, one at a time. + queue.HandleSack(now_, SackChunk(TSN(10), kArwnd, {}, {})); + EXPECT_EQ(queue.outstanding_bytes(), 0u); + + queue.HandleSack(now_, SackChunk(TSN(11), kArwnd, {}, {})); + EXPECT_EQ(queue.outstanding_bytes(), 0u); + + queue.HandleSack(now_, SackChunk(TSN(12), kArwnd, {}, {})); + EXPECT_EQ(queue.outstanding_bytes(), 0u); +} + +TEST_F(RetransmissionQueueTest, ExpireFromSendQueueWhenPartiallySent) { + RetransmissionQueue queue = CreateQueue(); + DataGeneratorOptions options; + options.stream_id = StreamID(17); + options.message_id = MID(42); + TimeMs test_start = now_; + EXPECT_CALL(producer_, Produce) + .WillOnce([&](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({1, 2, 3, 4}, "B", options)); + dts.expires_at = TimeMs(test_start + DurationMs(10)); + return dts; + }) + .WillOnce([&](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({5, 6, 7, 8}, "", options)); + dts.expires_at = TimeMs(test_start + DurationMs(10)); + return dts; + }) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 24); + EXPECT_THAT(chunks_to_send, ElementsAre(Pair(TSN(10), _))); + + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(17), MID(42))) + .WillOnce(Return(true)); + now_ += DurationMs(100); + + EXPECT_THAT(queue.GetChunksToSend(now_, 24), IsEmpty()); + + EXPECT_THAT( + queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // Initial TSN + Pair(TSN(10), State::kAbandoned), // Produced + Pair(TSN(11), State::kAbandoned), // Produced and expired + Pair(TSN(12), State::kAbandoned))); // Placeholder end +} + +TEST_F(RetransmissionQueueTest, LimitsRetransmissionsOnlyWhenNackedThreeTimes) { + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({1, 2, 3, 4}, "BE")); + dts.max_retransmissions = 0; + return dts; + }) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 1000); + EXPECT_THAT(chunks_to_send, ElementsAre(Pair(TSN(10), _), Pair(TSN(11), _), + Pair(TSN(12), _), Pair(TSN(13), _))); + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight), // + Pair(TSN(11), State::kInFlight), // + Pair(TSN(12), State::kInFlight), // + Pair(TSN(13), State::kInFlight))); + + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .Times(0); + + queue.HandleSack( + now_, SackChunk(TSN(9), kArwnd, {SackChunk::GapAckBlock(2, 2)}, {})); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kNacked), // + Pair(TSN(11), State::kAcked), // + Pair(TSN(12), State::kInFlight), // + Pair(TSN(13), State::kInFlight))); + + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + + queue.HandleSack( + now_, SackChunk(TSN(9), kArwnd, {SackChunk::GapAckBlock(2, 3)}, {})); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kNacked), // + Pair(TSN(11), State::kAcked), // + Pair(TSN(12), State::kAcked), // + Pair(TSN(13), State::kInFlight))); + + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .WillOnce(Return(false)); + queue.HandleSack( + now_, SackChunk(TSN(9), kArwnd, {SackChunk::GapAckBlock(2, 4)}, {})); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kAbandoned), // + Pair(TSN(11), State::kAcked), // + Pair(TSN(12), State::kAcked), // + Pair(TSN(13), State::kAcked))); + + EXPECT_TRUE(queue.ShouldSendForwardTsn(now_)); +} + +TEST_F(RetransmissionQueueTest, AbandonsRtxLimit2WhenNackedNineTimes) { + // This is a fairly long test. + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({1, 2, 3, 4}, "BE")); + dts.max_retransmissions = 2; + return dts; + }) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 1000); + EXPECT_THAT(chunks_to_send, + ElementsAre(Pair(TSN(10), _), Pair(TSN(11), _), Pair(TSN(12), _), + Pair(TSN(13), _), Pair(TSN(14), _), Pair(TSN(15), _), + Pair(TSN(16), _), Pair(TSN(17), _), Pair(TSN(18), _), + Pair(TSN(19), _))); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight), // + Pair(TSN(11), State::kInFlight), // + Pair(TSN(12), State::kInFlight), // + Pair(TSN(13), State::kInFlight), // + Pair(TSN(14), State::kInFlight), // + Pair(TSN(15), State::kInFlight), // + Pair(TSN(16), State::kInFlight), // + Pair(TSN(17), State::kInFlight), // + Pair(TSN(18), State::kInFlight), // + Pair(TSN(19), State::kInFlight))); + + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .Times(0); + + // Ack TSN [11 to 13] - three nacks for TSN(10), which will retransmit it. + for (int tsn = 11; tsn <= 13; ++tsn) { + queue.HandleSack( + now_, + SackChunk(TSN(9), kArwnd, {SackChunk::GapAckBlock(2, (tsn - 9))}, {})); + } + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kToBeRetransmitted), // + Pair(TSN(11), State::kAcked), // + Pair(TSN(12), State::kAcked), // + Pair(TSN(13), State::kAcked), // + Pair(TSN(14), State::kInFlight), // + Pair(TSN(15), State::kInFlight), // + Pair(TSN(16), State::kInFlight), // + Pair(TSN(17), State::kInFlight), // + Pair(TSN(18), State::kInFlight), // + Pair(TSN(19), State::kInFlight))); + + EXPECT_THAT(queue.GetChunksToSend(now_, 1000), ElementsAre(Pair(TSN(10), _))); + + // Ack TSN [14 to 16] - three more nacks - second and last retransmission. + for (int tsn = 14; tsn <= 16; ++tsn) { + queue.HandleSack( + now_, + SackChunk(TSN(9), kArwnd, {SackChunk::GapAckBlock(2, (tsn - 9))}, {})); + } + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kToBeRetransmitted), // + Pair(TSN(11), State::kAcked), // + Pair(TSN(12), State::kAcked), // + Pair(TSN(13), State::kAcked), // + Pair(TSN(14), State::kAcked), // + Pair(TSN(15), State::kAcked), // + Pair(TSN(16), State::kAcked), // + Pair(TSN(17), State::kInFlight), // + Pair(TSN(18), State::kInFlight), // + Pair(TSN(19), State::kInFlight))); + + EXPECT_THAT(queue.GetChunksToSend(now_, 1000), ElementsAre(Pair(TSN(10), _))); + + // Ack TSN [17 to 18] + for (int tsn = 17; tsn <= 18; ++tsn) { + queue.HandleSack( + now_, + SackChunk(TSN(9), kArwnd, {SackChunk::GapAckBlock(2, (tsn - 9))}, {})); + } + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kNacked), // + Pair(TSN(11), State::kAcked), // + Pair(TSN(12), State::kAcked), // + Pair(TSN(13), State::kAcked), // + Pair(TSN(14), State::kAcked), // + Pair(TSN(15), State::kAcked), // + Pair(TSN(16), State::kAcked), // + Pair(TSN(17), State::kAcked), // + Pair(TSN(18), State::kAcked), // + Pair(TSN(19), State::kInFlight))); + + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + + // Ack TSN 19 - three more nacks for TSN 10, no more retransmissions. + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .WillOnce(Return(false)); + queue.HandleSack( + now_, SackChunk(TSN(9), kArwnd, {SackChunk::GapAckBlock(2, 10)}, {})); + + EXPECT_THAT(queue.GetChunksToSend(now_, 1000), IsEmpty()); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kAbandoned), // + Pair(TSN(11), State::kAcked), // + Pair(TSN(12), State::kAcked), // + Pair(TSN(13), State::kAcked), // + Pair(TSN(14), State::kAcked), // + Pair(TSN(15), State::kAcked), // + Pair(TSN(16), State::kAcked), // + Pair(TSN(17), State::kAcked), // + Pair(TSN(18), State::kAcked), // + Pair(TSN(19), State::kAcked))); + + EXPECT_TRUE(queue.ShouldSendForwardTsn(now_)); +} // namespace + } // namespace } // namespace dcsctp diff --git a/net/dcsctp/tx/retransmission_timeout.cc b/net/dcsctp/tx/retransmission_timeout.cc index f38b94d32c..7d545a07d0 100644 --- a/net/dcsctp/tx/retransmission_timeout.cc +++ b/net/dcsctp/tx/retransmission_timeout.cc @@ -58,6 +58,11 @@ void RetransmissionTimeout::ObserveRTT(DurationMs measured_rtt) { rto_ = srtt_ + 4 * rttvar_; } + // If the RTO becomes smaller or equal to RTT, expiration timers will be + // scheduled at the same time as packets are expected. Only happens in + // extremely stable RTTs, i.e. in simulations. + rto_ = std::fmax(rto_, rtt + 1); + // Clamp RTO between min and max. rto_ = std::fmin(std::fmax(rto_, min_rto_), max_rto_); } diff --git a/net/dcsctp/tx/retransmission_timeout_test.cc b/net/dcsctp/tx/retransmission_timeout_test.cc index eb5e72e7ba..3b2e3399fe 100644 --- a/net/dcsctp/tx/retransmission_timeout_test.cc +++ b/net/dcsctp/tx/retransmission_timeout_test.cc @@ -80,29 +80,29 @@ TEST(RetransmissionTimeoutTest, WillNeverGoAboveMaximumRto) { TEST(RetransmissionTimeoutTest, CalculatesRtoForStableRtt) { RetransmissionTimeout rto_(MakeOptions()); rto_.ObserveRTT(DurationMs(124)); - EXPECT_THAT(*rto_.rto(), 372); + EXPECT_EQ(*rto_.rto(), 372); rto_.ObserveRTT(DurationMs(128)); - EXPECT_THAT(*rto_.rto(), 314); + EXPECT_EQ(*rto_.rto(), 314); rto_.ObserveRTT(DurationMs(123)); - EXPECT_THAT(*rto_.rto(), 268); + EXPECT_EQ(*rto_.rto(), 268); rto_.ObserveRTT(DurationMs(125)); - EXPECT_THAT(*rto_.rto(), 233); + EXPECT_EQ(*rto_.rto(), 233); rto_.ObserveRTT(DurationMs(127)); - EXPECT_THAT(*rto_.rto(), 208); + EXPECT_EQ(*rto_.rto(), 208); } TEST(RetransmissionTimeoutTest, CalculatesRtoForUnstableRtt) { RetransmissionTimeout rto_(MakeOptions()); rto_.ObserveRTT(DurationMs(124)); - EXPECT_THAT(*rto_.rto(), 372); + EXPECT_EQ(*rto_.rto(), 372); rto_.ObserveRTT(DurationMs(402)); - EXPECT_THAT(*rto_.rto(), 622); + EXPECT_EQ(*rto_.rto(), 622); rto_.ObserveRTT(DurationMs(728)); - EXPECT_THAT(*rto_.rto(), 800); + EXPECT_EQ(*rto_.rto(), 800); rto_.ObserveRTT(DurationMs(89)); - EXPECT_THAT(*rto_.rto(), 800); + EXPECT_EQ(*rto_.rto(), 800); rto_.ObserveRTT(DurationMs(126)); - EXPECT_THAT(*rto_.rto(), 800); + EXPECT_EQ(*rto_.rto(), 800); } TEST(RetransmissionTimeoutTest, WillStabilizeAfterAWhile) { @@ -112,25 +112,40 @@ TEST(RetransmissionTimeoutTest, WillStabilizeAfterAWhile) { rto_.ObserveRTT(DurationMs(728)); rto_.ObserveRTT(DurationMs(89)); rto_.ObserveRTT(DurationMs(126)); - EXPECT_THAT(*rto_.rto(), 800); + EXPECT_EQ(*rto_.rto(), 800); rto_.ObserveRTT(DurationMs(124)); - EXPECT_THAT(*rto_.rto(), 800); + EXPECT_EQ(*rto_.rto(), 800); rto_.ObserveRTT(DurationMs(122)); - EXPECT_THAT(*rto_.rto(), 709); + EXPECT_EQ(*rto_.rto(), 709); rto_.ObserveRTT(DurationMs(123)); - EXPECT_THAT(*rto_.rto(), 630); + EXPECT_EQ(*rto_.rto(), 630); rto_.ObserveRTT(DurationMs(124)); - EXPECT_THAT(*rto_.rto(), 561); + EXPECT_EQ(*rto_.rto(), 561); rto_.ObserveRTT(DurationMs(122)); - EXPECT_THAT(*rto_.rto(), 504); + EXPECT_EQ(*rto_.rto(), 504); rto_.ObserveRTT(DurationMs(124)); - EXPECT_THAT(*rto_.rto(), 453); + EXPECT_EQ(*rto_.rto(), 453); rto_.ObserveRTT(DurationMs(124)); - EXPECT_THAT(*rto_.rto(), 409); + EXPECT_EQ(*rto_.rto(), 409); rto_.ObserveRTT(DurationMs(124)); - EXPECT_THAT(*rto_.rto(), 372); + EXPECT_EQ(*rto_.rto(), 372); rto_.ObserveRTT(DurationMs(124)); - EXPECT_THAT(*rto_.rto(), 339); + EXPECT_EQ(*rto_.rto(), 339); } + +TEST(RetransmissionTimeoutTest, WillAlwaysStayAboveRTT) { + // In simulations, it's quite common to have a very stable RTT, and having an + // RTO at the same value will cause issues as expiry timers will be scheduled + // to be expire exactly when a packet is supposed to arrive. The RTO must be + // larger than the RTT. In non-simulated environments, this is a non-issue as + // any jitter will increase the RTO. + RetransmissionTimeout rto_(MakeOptions()); + + for (int i = 0; i < 100; ++i) { + rto_.ObserveRTT(DurationMs(124)); + } + EXPECT_GT(*rto_.rto(), 124); +} + } // namespace } // namespace dcsctp diff --git a/net/dcsctp/tx/rr_send_queue.cc b/net/dcsctp/tx/rr_send_queue.cc new file mode 100644 index 0000000000..254214e554 --- /dev/null +++ b/net/dcsctp/tx/rr_send_queue.cc @@ -0,0 +1,436 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/tx/rr_send_queue.h" + +#include +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/public/dcsctp_message.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/public/types.h" +#include "net/dcsctp/tx/send_queue.h" +#include "rtc_base/logging.h" + +namespace dcsctp { + +bool RRSendQueue::OutgoingStream::HasDataToSend(TimeMs now) { + while (!items_.empty()) { + RRSendQueue::OutgoingStream::Item& item = items_.front(); + if (item.message_id.has_value()) { + // Already partially sent messages can always continue to be sent. + return true; + } + + // Message has expired. Remove it and inspect the next one. + if (item.expires_at.has_value() && *item.expires_at <= now) { + buffered_amount_.Decrease(item.remaining_size); + total_buffered_amount_.Decrease(item.remaining_size); + items_.pop_front(); + RTC_DCHECK(IsConsistent()); + continue; + } + + if (is_paused_) { + // The stream has paused (and there is no partially sent message). + return false; + } + return true; + } + return false; +} + +bool RRSendQueue::IsConsistent() const { + size_t total_buffered_amount = 0; + for (const auto& stream_entry : streams_) { + total_buffered_amount += stream_entry.second.buffered_amount().value(); + } + + if (previous_message_has_ended_) { + auto it = streams_.find(current_stream_id_); + if (it != streams_.end() && it->second.has_partially_sent_message()) { + RTC_DLOG(LS_ERROR) + << "Previous message has ended, but still partial message in stream"; + return false; + } + } else { + auto it = streams_.find(current_stream_id_); + if (it == streams_.end() || !it->second.has_partially_sent_message()) { + RTC_DLOG(LS_ERROR) + << "Previous message has NOT ended, but there is no partial message"; + return false; + } + } + + return total_buffered_amount == total_buffered_amount_.value(); +} + +bool RRSendQueue::OutgoingStream::IsConsistent() const { + size_t bytes = 0; + for (const auto& item : items_) { + bytes += item.remaining_size; + } + return bytes == buffered_amount_.value(); +} + +void RRSendQueue::ThresholdWatcher::Decrease(size_t bytes) { + RTC_DCHECK(bytes <= value_); + size_t old_value = value_; + value_ -= bytes; + + if (old_value > low_threshold_ && value_ <= low_threshold_) { + on_threshold_reached_(); + } +} + +void RRSendQueue::ThresholdWatcher::SetLowThreshold(size_t low_threshold) { + // Betting on https://github.com/w3c/webrtc-pc/issues/2654 being accepted. + if (low_threshold_ < value_ && low_threshold >= value_) { + on_threshold_reached_(); + } + low_threshold_ = low_threshold; +} + +void RRSendQueue::OutgoingStream::Add(DcSctpMessage message, + absl::optional expires_at, + const SendOptions& send_options) { + buffered_amount_.Increase(message.payload().size()); + total_buffered_amount_.Increase(message.payload().size()); + items_.emplace_back(std::move(message), expires_at, send_options); + + RTC_DCHECK(IsConsistent()); +} + +absl::optional RRSendQueue::OutgoingStream::Produce( + TimeMs now, + size_t max_size) { + RTC_DCHECK(!items_.empty()); + + Item* item = &items_.front(); + DcSctpMessage& message = item->message; + + if (item->remaining_size > max_size && max_size < kMinimumFragmentedPayload) { + RTC_DCHECK(IsConsistent()); + return absl::nullopt; + } + + // Allocate Message ID and SSN when the first fragment is sent. + if (!item->message_id.has_value()) { + MID& mid = + item->send_options.unordered ? next_unordered_mid_ : next_ordered_mid_; + item->message_id = mid; + mid = MID(*mid + 1); + } + if (!item->send_options.unordered && !item->ssn.has_value()) { + item->ssn = next_ssn_; + next_ssn_ = SSN(*next_ssn_ + 1); + } + + // Grab the next `max_size` fragment from this message and calculate flags. + rtc::ArrayView chunk_payload = + item->message.payload().subview(item->remaining_offset, max_size); + rtc::ArrayView message_payload = message.payload(); + Data::IsBeginning is_beginning(chunk_payload.data() == + message_payload.data()); + Data::IsEnd is_end((chunk_payload.data() + chunk_payload.size()) == + (message_payload.data() + message_payload.size())); + + StreamID stream_id = message.stream_id(); + PPID ppid = message.ppid(); + + // Zero-copy the payload if the message fits in a single chunk. + std::vector payload = + is_beginning && is_end + ? std::move(message).ReleasePayload() + : std::vector(chunk_payload.begin(), chunk_payload.end()); + + FSN fsn(item->current_fsn); + item->current_fsn = FSN(*item->current_fsn + 1); + buffered_amount_.Decrease(payload.size()); + total_buffered_amount_.Decrease(payload.size()); + + SendQueue::DataToSend chunk(Data(stream_id, item->ssn.value_or(SSN(0)), + item->message_id.value(), fsn, ppid, + std::move(payload), is_beginning, is_end, + item->send_options.unordered)); + chunk.max_retransmissions = item->send_options.max_retransmissions; + chunk.expires_at = item->expires_at; + + if (is_end) { + // The entire message has been sent, and its last data copied to `chunk`, so + // it can safely be discarded. + items_.pop_front(); + } else { + item->remaining_offset += chunk_payload.size(); + item->remaining_size -= chunk_payload.size(); + RTC_DCHECK(item->remaining_offset + item->remaining_size == + item->message.payload().size()); + RTC_DCHECK(item->remaining_size > 0); + } + RTC_DCHECK(IsConsistent()); + return chunk; +} + +bool RRSendQueue::OutgoingStream::Discard(IsUnordered unordered, + MID message_id) { + bool result = false; + if (!items_.empty()) { + Item& item = items_.front(); + if (item.send_options.unordered == unordered && + item.message_id.has_value() && *item.message_id == message_id) { + buffered_amount_.Decrease(item.remaining_size); + total_buffered_amount_.Decrease(item.remaining_size); + items_.pop_front(); + // As the item still existed, it had unsent data. + result = true; + } + } + RTC_DCHECK(IsConsistent()); + return result; +} + +void RRSendQueue::OutgoingStream::Pause() { + is_paused_ = true; + + // A stream is paused when it's about to be reset. In this implementation, + // it will throw away all non-partially send messages. This is subject to + // change. It will however not discard any partially sent messages - only + // whole messages. Partially delivered messages (at the time of receiving a + // Stream Reset command) will always deliver all the fragments before + // actually resetting the stream. + for (auto it = items_.begin(); it != items_.end();) { + if (it->remaining_offset == 0) { + buffered_amount_.Decrease(it->remaining_size); + total_buffered_amount_.Decrease(it->remaining_size); + it = items_.erase(it); + } else { + ++it; + } + } + RTC_DCHECK(IsConsistent()); +} + +void RRSendQueue::OutgoingStream::Reset() { + if (!items_.empty()) { + // If this message has been partially sent, reset it so that it will be + // re-sent. + auto& item = items_.front(); + buffered_amount_.Increase(item.message.payload().size() - + item.remaining_size); + total_buffered_amount_.Increase(item.message.payload().size() - + item.remaining_size); + item.remaining_offset = 0; + item.remaining_size = item.message.payload().size(); + item.message_id = absl::nullopt; + item.ssn = absl::nullopt; + item.current_fsn = FSN(0); + } + is_paused_ = false; + next_ordered_mid_ = MID(0); + next_unordered_mid_ = MID(0); + next_ssn_ = SSN(0); + RTC_DCHECK(IsConsistent()); +} + +bool RRSendQueue::OutgoingStream::has_partially_sent_message() const { + if (items_.empty()) { + return false; + } + return items_.front().message_id.has_value(); +} + +void RRSendQueue::Add(TimeMs now, + DcSctpMessage message, + const SendOptions& send_options) { + RTC_DCHECK(!message.payload().empty()); + // Any limited lifetime should start counting from now - when the message + // has been added to the queue. + absl::optional expires_at = absl::nullopt; + if (send_options.lifetime.has_value()) { + // `expires_at` is the time when it expires. Which is slightly larger than + // the message's lifetime, as the message is alive during its entire + // lifetime (which may be zero). + expires_at = now + *send_options.lifetime + DurationMs(1); + } + GetOrCreateStreamInfo(message.stream_id()) + .Add(std::move(message), expires_at, send_options); + RTC_DCHECK(IsConsistent()); +} + +bool RRSendQueue::IsFull() const { + return total_buffered_amount() >= buffer_size_; +} + +bool RRSendQueue::IsEmpty() const { + return total_buffered_amount() == 0; +} + +std::map::iterator +RRSendQueue::GetNextStream(TimeMs now) { + auto start_it = streams_.lower_bound(StreamID(*current_stream_id_ + 1)); + + for (auto it = start_it; it != streams_.end(); ++it) { + if (it->second.HasDataToSend(now)) { + current_stream_id_ = it->first; + return it; + } + } + + for (auto it = streams_.begin(); it != start_it; ++it) { + if (it->second.HasDataToSend(now)) { + current_stream_id_ = it->first; + return it; + } + } + return streams_.end(); +} + +absl::optional RRSendQueue::Produce(TimeMs now, + size_t max_size) { + std::map::iterator stream_it; + + if (previous_message_has_ended_) { + // Previous message has ended. Round-robin to a different stream, if there + // even is one with data to send. + stream_it = GetNextStream(now); + if (stream_it == streams_.end()) { + RTC_DLOG(LS_VERBOSE) + << log_prefix_ + << "There is no stream with data; Can't produce any data."; + return absl::nullopt; + } + } else { + // The previous message has not ended; Continue from the current stream. + stream_it = streams_.find(current_stream_id_); + RTC_DCHECK(stream_it != streams_.end()); + } + + absl::optional data = stream_it->second.Produce(now, max_size); + if (data.has_value()) { + RTC_DLOG(LS_VERBOSE) << log_prefix_ << "Producing DATA, type=" + << (data->data.is_unordered ? "unordered" : "ordered") + << "::" + << (*data->data.is_beginning && *data->data.is_end + ? "complete" + : *data->data.is_beginning + ? "first" + : *data->data.is_end ? "last" : "middle") + << ", stream_id=" << *stream_it->first + << ", ppid=" << *data->data.ppid + << ", length=" << data->data.payload.size(); + + previous_message_has_ended_ = *data->data.is_end; + } + + RTC_DCHECK(IsConsistent()); + return data; +} + +bool RRSendQueue::Discard(IsUnordered unordered, + StreamID stream_id, + MID message_id) { + bool has_discarded = + GetOrCreateStreamInfo(stream_id).Discard(unordered, message_id); + if (has_discarded) { + // Only partially sent messages are discarded, so if a message was + // discarded, then it was the currently sent message. + previous_message_has_ended_ = true; + } + + return has_discarded; +} + +void RRSendQueue::PrepareResetStreams(rtc::ArrayView streams) { + for (StreamID stream_id : streams) { + GetOrCreateStreamInfo(stream_id).Pause(); + } + RTC_DCHECK(IsConsistent()); +} + +bool RRSendQueue::CanResetStreams() const { + // Streams can be reset if those streams that are paused don't have any + // messages that are partially sent. + for (auto& stream : streams_) { + if (stream.second.is_paused() && + stream.second.has_partially_sent_message()) { + return false; + } + } + return true; +} + +void RRSendQueue::CommitResetStreams() { + for (auto& stream_entry : streams_) { + if (stream_entry.second.is_paused()) { + stream_entry.second.Reset(); + } + } + RTC_DCHECK(IsConsistent()); +} + +void RRSendQueue::RollbackResetStreams() { + for (auto& stream_entry : streams_) { + stream_entry.second.Resume(); + } + RTC_DCHECK(IsConsistent()); +} + +void RRSendQueue::Reset() { + // Recalculate buffered amount, as partially sent messages may have been put + // fully back in the queue. + for (auto& stream_entry : streams_) { + OutgoingStream& stream = stream_entry.second; + stream.Reset(); + } + previous_message_has_ended_ = true; +} + +size_t RRSendQueue::buffered_amount(StreamID stream_id) const { + auto it = streams_.find(stream_id); + if (it == streams_.end()) { + return 0; + } + return it->second.buffered_amount().value(); +} + +size_t RRSendQueue::buffered_amount_low_threshold(StreamID stream_id) const { + auto it = streams_.find(stream_id); + if (it == streams_.end()) { + return 0; + } + return it->second.buffered_amount().low_threshold(); +} + +void RRSendQueue::SetBufferedAmountLowThreshold(StreamID stream_id, + size_t bytes) { + GetOrCreateStreamInfo(stream_id).buffered_amount().SetLowThreshold(bytes); +} + +RRSendQueue::OutgoingStream& RRSendQueue::GetOrCreateStreamInfo( + StreamID stream_id) { + auto it = streams_.find(stream_id); + if (it != streams_.end()) { + return it->second; + } + + return streams_ + .emplace(stream_id, + OutgoingStream( + [this, stream_id]() { on_buffered_amount_low_(stream_id); }, + total_buffered_amount_)) + .first->second; +} +} // namespace dcsctp diff --git a/net/dcsctp/tx/rr_send_queue.h b/net/dcsctp/tx/rr_send_queue.h new file mode 100644 index 0000000000..3ec45af17d --- /dev/null +++ b/net/dcsctp/tx/rr_send_queue.h @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_TX_RR_SEND_QUEUE_H_ +#define NET_DCSCTP_TX_RR_SEND_QUEUE_H_ + +#include +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/pair_hash.h" +#include "net/dcsctp/public/dcsctp_message.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/public/types.h" +#include "net/dcsctp/tx/send_queue.h" + +namespace dcsctp { + +// The Round Robin SendQueue holds all messages that the client wants to send, +// but that haven't yet been split into chunks and fully sent on the wire. +// +// As defined in https://datatracker.ietf.org/doc/html/rfc8260#section-3.2, +// it will cycle to send messages from different streams. It will send all +// fragments from one message before continuing with a different message on +// possibly a different stream, until support for message interleaving has been +// implemented. +// +// As messages can be (requested to be) sent before the connection is properly +// established, this send queue is always present - even for closed connections. +class RRSendQueue : public SendQueue { + public: + // How small a data chunk's payload may be, if having to fragment a message. + static constexpr size_t kMinimumFragmentedPayload = 10; + + RRSendQueue(absl::string_view log_prefix, + size_t buffer_size, + std::function on_buffered_amount_low, + size_t total_buffered_amount_low_threshold, + std::function on_total_buffered_amount_low) + : log_prefix_(std::string(log_prefix) + "fcfs: "), + buffer_size_(buffer_size), + on_buffered_amount_low_(std::move(on_buffered_amount_low)), + total_buffered_amount_(std::move(on_total_buffered_amount_low)) { + total_buffered_amount_.SetLowThreshold(total_buffered_amount_low_threshold); + } + + // Indicates if the buffer is full. Note that it's up to the caller to ensure + // that the buffer is not full prior to adding new items to it. + bool IsFull() const; + // Indicates if the buffer is empty. + bool IsEmpty() const; + + // Adds the message to be sent using the `send_options` provided. The current + // time should be in `now`. Note that it's the responsibility of the caller to + // ensure that the buffer is not full (by calling `IsFull`) before adding + // messages to it. + void Add(TimeMs now, + DcSctpMessage message, + const SendOptions& send_options = {}); + + // Implementation of `SendQueue`. + absl::optional Produce(TimeMs now, size_t max_size) override; + bool Discard(IsUnordered unordered, + StreamID stream_id, + MID message_id) override; + void PrepareResetStreams(rtc::ArrayView streams) override; + bool CanResetStreams() const override; + void CommitResetStreams() override; + void RollbackResetStreams() override; + void Reset() override; + size_t buffered_amount(StreamID stream_id) const override; + size_t total_buffered_amount() const override { + return total_buffered_amount_.value(); + } + size_t buffered_amount_low_threshold(StreamID stream_id) const override; + void SetBufferedAmountLowThreshold(StreamID stream_id, size_t bytes) override; + + private: + // Represents a value and a "low threshold" that when the value reaches or + // goes under the "low threshold", will trigger `on_threshold_reached` + // callback. + class ThresholdWatcher { + public: + explicit ThresholdWatcher(std::function on_threshold_reached) + : on_threshold_reached_(std::move(on_threshold_reached)) {} + // Increases the value. + void Increase(size_t bytes) { value_ += bytes; } + // Decreases the value and triggers `on_threshold_reached` if it's at or + // below `low_threshold()`. + void Decrease(size_t bytes); + + size_t value() const { return value_; } + size_t low_threshold() const { return low_threshold_; } + void SetLowThreshold(size_t low_threshold); + + private: + const std::function on_threshold_reached_; + size_t value_ = 0; + size_t low_threshold_ = 0; + }; + + // Per-stream information. + class OutgoingStream { + public: + explicit OutgoingStream(std::function on_buffered_amount_low, + ThresholdWatcher& total_buffered_amount) + : buffered_amount_(std::move(on_buffered_amount_low)), + total_buffered_amount_(total_buffered_amount) {} + + // Enqueues a message to this stream. + void Add(DcSctpMessage message, + absl::optional expires_at, + const SendOptions& send_options); + + // Possibly produces a data chunk to send. + absl::optional Produce(TimeMs now, size_t max_size); + + const ThresholdWatcher& buffered_amount() const { return buffered_amount_; } + ThresholdWatcher& buffered_amount() { return buffered_amount_; } + + // Discards a partially sent message, see `SendQueue::Discard`. + bool Discard(IsUnordered unordered, MID message_id); + + // Pauses this stream, which is used before resetting it. + void Pause(); + + // Resumes a paused stream. + void Resume() { is_paused_ = false; } + + bool is_paused() const { return is_paused_; } + + // Resets this stream, meaning MIDs and SSNs are set to zero. + void Reset(); + + // Indicates if this stream has a partially sent message in it. + bool has_partially_sent_message() const; + + // Indicates if the stream has data to send. It will also try to remove any + // expired non-partially sent message. + bool HasDataToSend(TimeMs now); + + private: + // An enqueued message and metadata. + struct Item { + explicit Item(DcSctpMessage msg, + absl::optional expires_at, + const SendOptions& send_options) + : message(std::move(msg)), + expires_at(expires_at), + send_options(send_options), + remaining_offset(0), + remaining_size(message.payload().size()) {} + DcSctpMessage message; + absl::optional expires_at; + SendOptions send_options; + // The remaining payload (offset and size) to be sent, when it has been + // fragmented. + size_t remaining_offset; + size_t remaining_size; + // If set, an allocated Message ID and SSN. Will be allocated when the + // first fragment is sent. + absl::optional message_id = absl::nullopt; + absl::optional ssn = absl::nullopt; + // The current Fragment Sequence Number, incremented for each fragment. + FSN current_fsn = FSN(0); + }; + + bool IsConsistent() const; + + // Streams are pause when they are about to be reset. + bool is_paused_ = false; + // MIDs are different for unordered and ordered messages sent on a stream. + MID next_unordered_mid_ = MID(0); + MID next_ordered_mid_ = MID(0); + + SSN next_ssn_ = SSN(0); + // Enqueued messages, and metadata. + std::deque items_; + + // The current amount of buffered data. + ThresholdWatcher buffered_amount_; + + // Reference to the total buffered amount, which is updated directly by each + // stream. + ThresholdWatcher& total_buffered_amount_; + }; + + bool IsConsistent() const; + OutgoingStream& GetOrCreateStreamInfo(StreamID stream_id); + absl::optional Produce( + std::map::iterator it, + TimeMs now, + size_t max_size); + + // Return the next stream, in round-robin fashion. + std::map::iterator GetNextStream(TimeMs now); + + const std::string log_prefix_; + const size_t buffer_size_; + + // Called when the buffered amount is below what has been set using + // `SetBufferedAmountLowThreshold`. + const std::function on_buffered_amount_low_; + + // Called when the total buffered amount is below what has been set using + // `SetTotalBufferedAmountLowThreshold`. + const std::function on_total_buffered_amount_low_; + + // The total amount of buffer data, for all streams. + ThresholdWatcher total_buffered_amount_; + + // Indicates if the previous fragment sent was the end of a message. For + // non-interleaved sending, this means that the next message may come from a + // different stream. If not true, the next fragment must be produced from the + // same stream as last time. + bool previous_message_has_ended_ = true; + + // The current stream to send chunks from. Modified by `GetNextStream`. + StreamID current_stream_id_ = StreamID(0); + + // All streams, and messages added to those. + std::map streams_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_TX_RR_SEND_QUEUE_H_ diff --git a/net/dcsctp/tx/rr_send_queue_test.cc b/net/dcsctp/tx/rr_send_queue_test.cc new file mode 100644 index 0000000000..425027762d --- /dev/null +++ b/net/dcsctp/tx/rr_send_queue_test.cc @@ -0,0 +1,783 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/tx/rr_send_queue.h" + +#include +#include +#include + +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/public/dcsctp_message.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/public/types.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "net/dcsctp/tx/send_queue.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::SizeIs; + +constexpr TimeMs kNow = TimeMs(0); +constexpr StreamID kStreamID(1); +constexpr PPID kPPID(53); +constexpr size_t kMaxQueueSize = 1000; +constexpr size_t kBufferedAmountLowThreshold = 500; +constexpr size_t kOneFragmentPacketSize = 100; +constexpr size_t kTwoFragmentPacketSize = 101; + +class RRSendQueueTest : public testing::Test { + protected: + RRSendQueueTest() + : buf_("log: ", + kMaxQueueSize, + on_buffered_amount_low_.AsStdFunction(), + kBufferedAmountLowThreshold, + on_total_buffered_amount_low_.AsStdFunction()) {} + + const DcSctpOptions options_; + testing::NiceMock> + on_buffered_amount_low_; + testing::NiceMock> + on_total_buffered_amount_low_; + RRSendQueue buf_; +}; + +TEST_F(RRSendQueueTest, EmptyBuffer) { + EXPECT_TRUE(buf_.IsEmpty()); + EXPECT_FALSE(buf_.Produce(kNow, kOneFragmentPacketSize).has_value()); + EXPECT_FALSE(buf_.IsFull()); +} + +TEST_F(RRSendQueueTest, AddAndGetSingleChunk) { + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, {1, 2, 4, 5, 6})); + + EXPECT_FALSE(buf_.IsEmpty()); + EXPECT_FALSE(buf_.IsFull()); + absl::optional chunk_opt = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_opt.has_value()); + EXPECT_TRUE(chunk_opt->data.is_beginning); + EXPECT_TRUE(chunk_opt->data.is_end); +} + +TEST_F(RRSendQueueTest, CarveOutBeginningMiddleAndEnd) { + std::vector payload(60); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + + absl::optional chunk_beg = + buf_.Produce(kNow, /*max_size=*/20); + ASSERT_TRUE(chunk_beg.has_value()); + EXPECT_TRUE(chunk_beg->data.is_beginning); + EXPECT_FALSE(chunk_beg->data.is_end); + + absl::optional chunk_mid = + buf_.Produce(kNow, /*max_size=*/20); + ASSERT_TRUE(chunk_mid.has_value()); + EXPECT_FALSE(chunk_mid->data.is_beginning); + EXPECT_FALSE(chunk_mid->data.is_end); + + absl::optional chunk_end = + buf_.Produce(kNow, /*max_size=*/20); + ASSERT_TRUE(chunk_end.has_value()); + EXPECT_FALSE(chunk_end->data.is_beginning); + EXPECT_TRUE(chunk_end->data.is_end); + + EXPECT_FALSE(buf_.Produce(kNow, kOneFragmentPacketSize).has_value()); +} + +TEST_F(RRSendQueueTest, GetChunksFromTwoMessages) { + std::vector payload(60); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + buf_.Add(kNow, DcSctpMessage(StreamID(3), PPID(54), payload)); + + absl::optional chunk_one = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_EQ(chunk_one->data.stream_id, kStreamID); + EXPECT_EQ(chunk_one->data.ppid, kPPID); + EXPECT_TRUE(chunk_one->data.is_beginning); + EXPECT_TRUE(chunk_one->data.is_end); + + absl::optional chunk_two = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_two.has_value()); + EXPECT_EQ(chunk_two->data.stream_id, StreamID(3)); + EXPECT_EQ(chunk_two->data.ppid, PPID(54)); + EXPECT_TRUE(chunk_two->data.is_beginning); + EXPECT_TRUE(chunk_two->data.is_end); +} + +TEST_F(RRSendQueueTest, BufferBecomesFullAndEmptied) { + std::vector payload(600); + EXPECT_FALSE(buf_.IsFull()); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + EXPECT_FALSE(buf_.IsFull()); + buf_.Add(kNow, DcSctpMessage(StreamID(3), PPID(54), payload)); + EXPECT_TRUE(buf_.IsFull()); + // However, it's still possible to add messages. It's a soft limit, and it + // might be necessary to forcefully add messages due to e.g. external + // fragmentation. + buf_.Add(kNow, DcSctpMessage(StreamID(5), PPID(55), payload)); + EXPECT_TRUE(buf_.IsFull()); + + absl::optional chunk_one = buf_.Produce(kNow, 1000); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_EQ(chunk_one->data.stream_id, kStreamID); + EXPECT_EQ(chunk_one->data.ppid, kPPID); + + EXPECT_TRUE(buf_.IsFull()); + + absl::optional chunk_two = buf_.Produce(kNow, 1000); + ASSERT_TRUE(chunk_two.has_value()); + EXPECT_EQ(chunk_two->data.stream_id, StreamID(3)); + EXPECT_EQ(chunk_two->data.ppid, PPID(54)); + + EXPECT_FALSE(buf_.IsFull()); + EXPECT_FALSE(buf_.IsEmpty()); + + absl::optional chunk_three = buf_.Produce(kNow, 1000); + ASSERT_TRUE(chunk_three.has_value()); + EXPECT_EQ(chunk_three->data.stream_id, StreamID(5)); + EXPECT_EQ(chunk_three->data.ppid, PPID(55)); + + EXPECT_FALSE(buf_.IsFull()); + EXPECT_TRUE(buf_.IsEmpty()); +} + +TEST_F(RRSendQueueTest, WillNotSendTooSmallPacket) { + std::vector payload(RRSendQueue::kMinimumFragmentedPayload + 1); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + + // Wouldn't fit enough payload (wouldn't want to fragment) + EXPECT_FALSE( + buf_.Produce(kNow, + /*max_size=*/RRSendQueue::kMinimumFragmentedPayload - 1) + .has_value()); + + // Minimum fragment + absl::optional chunk_one = + buf_.Produce(kNow, + /*max_size=*/RRSendQueue::kMinimumFragmentedPayload); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_EQ(chunk_one->data.stream_id, kStreamID); + EXPECT_EQ(chunk_one->data.ppid, kPPID); + + // There is only one byte remaining - it can be fetched as it doesn't require + // additional fragmentation. + absl::optional chunk_two = + buf_.Produce(kNow, /*max_size=*/1); + ASSERT_TRUE(chunk_two.has_value()); + EXPECT_EQ(chunk_two->data.stream_id, kStreamID); + EXPECT_EQ(chunk_two->data.ppid, kPPID); + + EXPECT_TRUE(buf_.IsEmpty()); +} + +TEST_F(RRSendQueueTest, DefaultsToOrderedSend) { + std::vector payload(20); + + // Default is ordered + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + absl::optional chunk_one = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_FALSE(chunk_one->data.is_unordered); + + // Explicitly unordered. + SendOptions opts; + opts.unordered = IsUnordered(true); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload), opts); + absl::optional chunk_two = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_two.has_value()); + EXPECT_TRUE(chunk_two->data.is_unordered); +} + +TEST_F(RRSendQueueTest, ProduceWithLifetimeExpiry) { + std::vector payload(20); + + // Default is no expiry + TimeMs now = kNow; + buf_.Add(now, DcSctpMessage(kStreamID, kPPID, payload)); + now += DurationMs(1000000); + ASSERT_TRUE(buf_.Produce(now, kOneFragmentPacketSize)); + + SendOptions expires_2_seconds; + expires_2_seconds.lifetime = DurationMs(2000); + + // Add and consume within lifetime + buf_.Add(now, DcSctpMessage(kStreamID, kPPID, payload), expires_2_seconds); + now += DurationMs(2000); + ASSERT_TRUE(buf_.Produce(now, kOneFragmentPacketSize)); + + // Add and consume just outside lifetime + buf_.Add(now, DcSctpMessage(kStreamID, kPPID, payload), expires_2_seconds); + now += DurationMs(2001); + ASSERT_FALSE(buf_.Produce(now, kOneFragmentPacketSize)); + + // A long time after expiry + buf_.Add(now, DcSctpMessage(kStreamID, kPPID, payload), expires_2_seconds); + now += DurationMs(1000000); + ASSERT_FALSE(buf_.Produce(now, kOneFragmentPacketSize)); + + // Expire one message, but produce the second that is not expired. + buf_.Add(now, DcSctpMessage(kStreamID, kPPID, payload), expires_2_seconds); + + SendOptions expires_4_seconds; + expires_4_seconds.lifetime = DurationMs(4000); + + buf_.Add(now, DcSctpMessage(kStreamID, kPPID, payload), expires_4_seconds); + now += DurationMs(2001); + + ASSERT_TRUE(buf_.Produce(now, kOneFragmentPacketSize)); + ASSERT_FALSE(buf_.Produce(now, kOneFragmentPacketSize)); +} + +TEST_F(RRSendQueueTest, DiscardPartialPackets) { + std::vector payload(120); + + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + buf_.Add(kNow, DcSctpMessage(StreamID(2), PPID(54), payload)); + + absl::optional chunk_one = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_FALSE(chunk_one->data.is_end); + EXPECT_EQ(chunk_one->data.stream_id, kStreamID); + buf_.Discard(IsUnordered(false), chunk_one->data.stream_id, + chunk_one->data.message_id); + + absl::optional chunk_two = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_two.has_value()); + EXPECT_FALSE(chunk_two->data.is_end); + EXPECT_EQ(chunk_two->data.stream_id, StreamID(2)); + + absl::optional chunk_three = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_three.has_value()); + EXPECT_TRUE(chunk_three->data.is_end); + EXPECT_EQ(chunk_three->data.stream_id, StreamID(2)); + ASSERT_FALSE(buf_.Produce(kNow, kOneFragmentPacketSize)); + + // Calling it again shouldn't cause issues. + buf_.Discard(IsUnordered(false), chunk_one->data.stream_id, + chunk_one->data.message_id); + ASSERT_FALSE(buf_.Produce(kNow, kOneFragmentPacketSize)); +} + +TEST_F(RRSendQueueTest, PrepareResetStreamsDiscardsStream) { + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, {1, 2, 3})); + buf_.Add(kNow, DcSctpMessage(StreamID(2), PPID(54), {1, 2, 3, 4, 5})); + EXPECT_EQ(buf_.total_buffered_amount(), 8u); + + buf_.PrepareResetStreams(std::vector({StreamID(1)})); + EXPECT_EQ(buf_.total_buffered_amount(), 5u); + buf_.CommitResetStreams(); + buf_.PrepareResetStreams(std::vector({StreamID(2)})); + EXPECT_EQ(buf_.total_buffered_amount(), 0u); +} + +TEST_F(RRSendQueueTest, PrepareResetStreamsNotPartialPackets) { + std::vector payload(120); + + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + + absl::optional chunk_one = buf_.Produce(kNow, 50); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_EQ(chunk_one->data.stream_id, kStreamID); + EXPECT_EQ(buf_.total_buffered_amount(), 2 * payload.size() - 50); + + StreamID stream_ids[] = {StreamID(1)}; + buf_.PrepareResetStreams(stream_ids); + EXPECT_EQ(buf_.total_buffered_amount(), payload.size() - 50); +} + +TEST_F(RRSendQueueTest, EnqueuedItemsArePausedDuringStreamReset) { + std::vector payload(50); + + buf_.PrepareResetStreams(std::vector({StreamID(1)})); + EXPECT_EQ(buf_.total_buffered_amount(), 0u); + + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + EXPECT_EQ(buf_.total_buffered_amount(), payload.size()); + + EXPECT_FALSE(buf_.Produce(kNow, kOneFragmentPacketSize).has_value()); + buf_.CommitResetStreams(); + EXPECT_EQ(buf_.total_buffered_amount(), payload.size()); + + absl::optional chunk_one = buf_.Produce(kNow, 50); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_EQ(chunk_one->data.stream_id, kStreamID); + EXPECT_EQ(buf_.total_buffered_amount(), 0u); +} + +TEST_F(RRSendQueueTest, CommittingResetsSSN) { + std::vector payload(50); + + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + + absl::optional chunk_one = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_EQ(chunk_one->data.ssn, SSN(0)); + + absl::optional chunk_two = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_two.has_value()); + EXPECT_EQ(chunk_two->data.ssn, SSN(1)); + + StreamID stream_ids[] = {StreamID(1)}; + buf_.PrepareResetStreams(stream_ids); + + // Buffered + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + + EXPECT_TRUE(buf_.CanResetStreams()); + buf_.CommitResetStreams(); + + absl::optional chunk_three = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_three.has_value()); + EXPECT_EQ(chunk_three->data.ssn, SSN(0)); +} + +TEST_F(RRSendQueueTest, CommittingResetsSSNForPausedStreamsOnly) { + std::vector payload(50); + + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, payload)); + buf_.Add(kNow, DcSctpMessage(StreamID(3), kPPID, payload)); + + absl::optional chunk_one = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_EQ(chunk_one->data.stream_id, StreamID(1)); + EXPECT_EQ(chunk_one->data.ssn, SSN(0)); + + absl::optional chunk_two = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_two.has_value()); + EXPECT_EQ(chunk_two->data.stream_id, StreamID(3)); + EXPECT_EQ(chunk_two->data.ssn, SSN(0)); + + StreamID stream_ids[] = {StreamID(3)}; + buf_.PrepareResetStreams(stream_ids); + + // Send two more messages - SID 3 will buffer, SID 1 will send. + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, payload)); + buf_.Add(kNow, DcSctpMessage(StreamID(3), kPPID, payload)); + + EXPECT_TRUE(buf_.CanResetStreams()); + buf_.CommitResetStreams(); + + absl::optional chunk_three = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_three.has_value()); + EXPECT_EQ(chunk_three->data.stream_id, StreamID(1)); + EXPECT_EQ(chunk_three->data.ssn, SSN(1)); + + absl::optional chunk_four = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_four.has_value()); + EXPECT_EQ(chunk_four->data.stream_id, StreamID(3)); + EXPECT_EQ(chunk_four->data.ssn, SSN(0)); +} + +TEST_F(RRSendQueueTest, RollBackResumesSSN) { + std::vector payload(50); + + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + + absl::optional chunk_one = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_EQ(chunk_one->data.ssn, SSN(0)); + + absl::optional chunk_two = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_two.has_value()); + EXPECT_EQ(chunk_two->data.ssn, SSN(1)); + + buf_.PrepareResetStreams(std::vector({StreamID(1)})); + + // Buffered + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + + EXPECT_TRUE(buf_.CanResetStreams()); + buf_.RollbackResetStreams(); + + absl::optional chunk_three = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_three.has_value()); + EXPECT_EQ(chunk_three->data.ssn, SSN(2)); +} + +TEST_F(RRSendQueueTest, ReturnsFragmentsForOneMessageBeforeMovingToNext) { + std::vector payload(200); + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, payload)); + buf_.Add(kNow, DcSctpMessage(StreamID(2), kPPID, payload)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(1)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk2, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk2.data.stream_id, StreamID(1)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk3, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk3.data.stream_id, StreamID(2)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk4, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk4.data.stream_id, StreamID(2)); +} + +TEST_F(RRSendQueueTest, ReturnsAlsoSmallFragmentsBeforeMovingToNext) { + std::vector payload(kTwoFragmentPacketSize); + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, payload)); + buf_.Add(kNow, DcSctpMessage(StreamID(2), kPPID, payload)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk1.data.payload, SizeIs(kOneFragmentPacketSize)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk2, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk2.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk2.data.payload, + SizeIs(kTwoFragmentPacketSize - kOneFragmentPacketSize)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk3, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk3.data.stream_id, StreamID(2)); + EXPECT_THAT(chunk3.data.payload, SizeIs(kOneFragmentPacketSize)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk4, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk4.data.stream_id, StreamID(2)); + EXPECT_THAT(chunk4.data.payload, + SizeIs(kTwoFragmentPacketSize - kOneFragmentPacketSize)); +} + +TEST_F(RRSendQueueTest, WillCycleInRoundRobinFashionBetweenStreams) { + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(1))); + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(2))); + buf_.Add(kNow, DcSctpMessage(StreamID(2), kPPID, std::vector(3))); + buf_.Add(kNow, DcSctpMessage(StreamID(2), kPPID, std::vector(4))); + buf_.Add(kNow, DcSctpMessage(StreamID(3), kPPID, std::vector(5))); + buf_.Add(kNow, DcSctpMessage(StreamID(3), kPPID, std::vector(6))); + buf_.Add(kNow, DcSctpMessage(StreamID(4), kPPID, std::vector(7))); + buf_.Add(kNow, DcSctpMessage(StreamID(4), kPPID, std::vector(8))); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk1.data.payload, SizeIs(1)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk2, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk2.data.stream_id, StreamID(2)); + EXPECT_THAT(chunk2.data.payload, SizeIs(3)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk3, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk3.data.stream_id, StreamID(3)); + EXPECT_THAT(chunk3.data.payload, SizeIs(5)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk4, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk4.data.stream_id, StreamID(4)); + EXPECT_THAT(chunk4.data.payload, SizeIs(7)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk5, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk5.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk5.data.payload, SizeIs(2)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk6, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk6.data.stream_id, StreamID(2)); + EXPECT_THAT(chunk6.data.payload, SizeIs(4)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk7, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk7.data.stream_id, StreamID(3)); + EXPECT_THAT(chunk7.data.payload, SizeIs(6)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk8, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk8.data.stream_id, StreamID(4)); + EXPECT_THAT(chunk8.data.payload, SizeIs(8)); +} + +TEST_F(RRSendQueueTest, DoesntTriggerOnBufferedAmountLowWhenSetToZero) { + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + buf_.SetBufferedAmountLowThreshold(StreamID(1), 0u); +} + +TEST_F(RRSendQueueTest, TriggersOnBufferedAmountAtZeroLowWhenSent) { + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(1))); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 1u); + + EXPECT_CALL(on_buffered_amount_low_, Call(StreamID(1))); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk1.data.payload, SizeIs(1)); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 0u); +} + +TEST_F(RRSendQueueTest, WillRetriggerOnBufferedAmountLowIfAddingMore) { + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(1))); + + EXPECT_CALL(on_buffered_amount_low_, Call(StreamID(1))); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk1.data.payload, SizeIs(1)); + + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(1))); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 1u); + + // Should now trigger again, as buffer_amount went above the threshold. + EXPECT_CALL(on_buffered_amount_low_, Call(StreamID(1))); + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk2, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk2.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk2.data.payload, SizeIs(1)); +} + +TEST_F(RRSendQueueTest, OnlyTriggersWhenTransitioningFromAboveToBelowOrEqual) { + buf_.SetBufferedAmountLowThreshold(StreamID(1), 1000); + + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(10))); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 10u); + + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk1.data.payload, SizeIs(10)); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 0u); + + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(20))); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 20u); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk2, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk2.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk2.data.payload, SizeIs(20)); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 0u); +} + +TEST_F(RRSendQueueTest, WillTriggerOnBufferedAmountLowSetAboveZero) { + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + + buf_.SetBufferedAmountLowThreshold(StreamID(1), 700); + + std::vector payload(1000); + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, payload)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk1.data.payload, SizeIs(kOneFragmentPacketSize)); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 900u); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk2, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk2.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk2.data.payload, SizeIs(kOneFragmentPacketSize)); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 800u); + + EXPECT_CALL(on_buffered_amount_low_, Call(StreamID(1))); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk3, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk3.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk3.data.payload, SizeIs(kOneFragmentPacketSize)); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 700u); + + // Doesn't trigger when reducing even further. + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk4, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk3.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk3.data.payload, SizeIs(kOneFragmentPacketSize)); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 600u); +} + +TEST_F(RRSendQueueTest, WillRetriggerOnBufferedAmountLowSetAboveZero) { + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + + buf_.SetBufferedAmountLowThreshold(StreamID(1), 700); + + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(1000))); + + EXPECT_CALL(on_buffered_amount_low_, Call(StreamID(1))); + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, 400)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk1.data.payload, SizeIs(400)); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 600u); + + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(200))); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 800u); + + // Will trigger again, as it went above the limit. + EXPECT_CALL(on_buffered_amount_low_, Call(StreamID(1))); + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk2, + buf_.Produce(kNow, 200)); + EXPECT_EQ(chunk2.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk2.data.payload, SizeIs(200)); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 600u); +} + +TEST_F(RRSendQueueTest, TriggersOnBufferedAmountLowOnThresholdChanged) { + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(100))); + + // Modifying the threshold, still under buffered_amount, should not trigger. + buf_.SetBufferedAmountLowThreshold(StreamID(1), 50); + buf_.SetBufferedAmountLowThreshold(StreamID(1), 99); + + // When the threshold reaches buffered_amount, it will trigger. + EXPECT_CALL(on_buffered_amount_low_, Call(StreamID(1))); + buf_.SetBufferedAmountLowThreshold(StreamID(1), 100); + + // But not when it's set low again. + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + buf_.SetBufferedAmountLowThreshold(StreamID(1), 50); + + // But it will trigger when it overshoots. + EXPECT_CALL(on_buffered_amount_low_, Call(StreamID(1))); + buf_.SetBufferedAmountLowThreshold(StreamID(1), 150); + + // But not when it's set low again. + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + buf_.SetBufferedAmountLowThreshold(StreamID(1), 0); +} + +TEST_F(RRSendQueueTest, + OnTotalBufferedAmountLowDoesNotTriggerOnBufferFillingUp) { + EXPECT_CALL(on_total_buffered_amount_low_, Call).Times(0); + std::vector payload(kBufferedAmountLowThreshold - 1); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + EXPECT_EQ(buf_.total_buffered_amount(), payload.size()); + + // Will not trigger if going above but never below. + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, + std::vector(kOneFragmentPacketSize))); +} + +TEST_F(RRSendQueueTest, TriggersOnTotalBufferedAmountLowWhenCrossing) { + EXPECT_CALL(on_total_buffered_amount_low_, Call).Times(0); + std::vector payload(kBufferedAmountLowThreshold); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + EXPECT_EQ(buf_.total_buffered_amount(), payload.size()); + + // Reaches it. + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, std::vector(1))); + + // Drain it a bit - will trigger. + EXPECT_CALL(on_total_buffered_amount_low_, Call).Times(1); + absl::optional chunk_two = + buf_.Produce(kNow, kOneFragmentPacketSize); +} + +TEST_F(RRSendQueueTest, WillStayInAStreamAsLongAsThatMessageIsSending) { + buf_.Add(kNow, DcSctpMessage(StreamID(5), kPPID, std::vector(1))); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(5)); + EXPECT_THAT(chunk1.data.payload, SizeIs(1)); + + // Next, it should pick a different stream. + + buf_.Add(kNow, + DcSctpMessage(StreamID(1), kPPID, + std::vector(kOneFragmentPacketSize * 2))); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk2, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk2.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk2.data.payload, SizeIs(kOneFragmentPacketSize)); + + // It should still stay on the Stream1 now, even if might be tempted to switch + // to this stream, as it's the stream following 5. + buf_.Add(kNow, DcSctpMessage(StreamID(6), kPPID, std::vector(1))); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk3, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk3.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk3.data.payload, SizeIs(kOneFragmentPacketSize)); + + // After stream id 1 is complete, it's time to do stream 6. + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk4, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk4.data.stream_id, StreamID(6)); + EXPECT_THAT(chunk4.data.payload, SizeIs(1)); + + EXPECT_FALSE(buf_.Produce(kNow, kOneFragmentPacketSize).has_value()); +} + +TEST_F(RRSendQueueTest, WillStayInStreamWhenOnlySmallFragmentRemaining) { + buf_.Add(kNow, + DcSctpMessage(StreamID(5), kPPID, + std::vector(kOneFragmentPacketSize * 2))); + buf_.Add(kNow, DcSctpMessage(StreamID(6), kPPID, std::vector(1))); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(5)); + EXPECT_THAT(chunk1.data.payload, SizeIs(kOneFragmentPacketSize)); + + // Now assume that there will be a lot of previous chunks that need to be + // retransmitted, which fills up the next packet and there is little space + // left in the packet for new chunks. What it should NOT do right now is to + // try to send a message from StreamID 6. And it should not try to send a very + // small fragment from StreamID 5 either. So just skip this one. + EXPECT_FALSE(buf_.Produce(kNow, 8).has_value()); + + // When the next produce request comes with a large buffer to fill, continue + // sending from StreamID 5. + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk2, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk2.data.stream_id, StreamID(5)); + EXPECT_THAT(chunk2.data.payload, SizeIs(kOneFragmentPacketSize)); + + // Lastly, produce a message on StreamID 6. + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk3, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk3.data.stream_id, StreamID(6)); + EXPECT_THAT(chunk3.data.payload, SizeIs(1)); + + EXPECT_FALSE(buf_.Produce(kNow, 8).has_value()); +} +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/tx/send_queue.h b/net/dcsctp/tx/send_queue.h index bb5aab2df8..877dbdda59 100644 --- a/net/dcsctp/tx/send_queue.h +++ b/net/dcsctp/tx/send_queue.h @@ -60,7 +60,10 @@ class SendQueue { // receiver that any partially received message fragments should be skipped. // This means that any remaining fragments in the Send Queue must be removed // as well so that they are not sent. - virtual void Discard(IsUnordered unordered, + // + // This function returns true if this message had unsent fragments still in + // the queue that were discarded, and false if there were no such fragments. + virtual bool Discard(IsUnordered unordered, StreamID stream_id, MID message_id) = 0; @@ -105,6 +108,20 @@ class SendQueue { // of data loss. However, data loss cannot be completely guaranteed when a // peer restarts. virtual void Reset() = 0; + + // Returns the amount of buffered data. This doesn't include packets that are + // e.g. inflight. + virtual size_t buffered_amount(StreamID stream_id) const = 0; + + // Returns the total amount of buffer data, for all streams. + virtual size_t total_buffered_amount() const = 0; + + // Returns the limit for the `OnBufferedAmountLow` event. Default value is 0. + virtual size_t buffered_amount_low_threshold(StreamID stream_id) const = 0; + + // Sets a limit for the `OnBufferedAmountLow` event. + virtual void SetBufferedAmountLowThreshold(StreamID stream_id, + size_t bytes) = 0; }; } // namespace dcsctp diff --git a/p2p/base/dtls_transport.cc b/p2p/base/dtls_transport.cc index 99ee0f1a16..76b94a8d79 100644 --- a/p2p/base/dtls_transport.cc +++ b/p2p/base/dtls_transport.cc @@ -15,6 +15,7 @@ #include #include "absl/memory/memory.h" +#include "api/dtls_transport_interface.h" #include "api/rtc_event_log/rtc_event_log.h" #include "logging/rtc_event_log/events/rtc_event_dtls_transport_state.h" #include "logging/rtc_event_log/events/rtc_event_dtls_writable_state.h" @@ -148,7 +149,7 @@ DtlsTransport::DtlsTransport(IceTransportInternal* ice_transport, DtlsTransport::~DtlsTransport() = default; -DtlsTransportState DtlsTransport::dtls_state() const { +webrtc::DtlsTransportState DtlsTransport::dtls_state() const { return dtls_state_; } @@ -218,7 +219,7 @@ bool DtlsTransport::GetDtlsRole(rtc::SSLRole* role) const { } bool DtlsTransport::GetSslCipherSuite(int* cipher) { - if (dtls_state() != DTLS_TRANSPORT_CONNECTED) { + if (dtls_state() != webrtc::DtlsTransportState::kConnected) { return false; } @@ -276,7 +277,7 @@ bool DtlsTransport::SetRemoteFingerprint(const std::string& digest_alg, remote_fingerprint_value_.size(), &err)) { RTC_LOG(LS_ERROR) << ToString() << ": Couldn't set DTLS certificate digest."; - set_dtls_state(DTLS_TRANSPORT_FAILED); + set_dtls_state(webrtc::DtlsTransportState::kFailed); // If the error is "verification failed", don't return false, because // this means the fingerprint was formatted correctly but didn't match // the certificate from the DTLS handshake. Thus the DTLS state should go @@ -290,12 +291,12 @@ bool DtlsTransport::SetRemoteFingerprint(const std::string& digest_alg, // create a new one, resetting our state. if (dtls_ && fingerprint_changing) { dtls_.reset(nullptr); - set_dtls_state(DTLS_TRANSPORT_NEW); + set_dtls_state(webrtc::DtlsTransportState::kNew); set_writable(false); } if (!SetupDtls()) { - set_dtls_state(DTLS_TRANSPORT_FAILED); + set_dtls_state(webrtc::DtlsTransportState::kFailed); return false; } @@ -373,7 +374,7 @@ bool DtlsTransport::SetupDtls() { } bool DtlsTransport::GetSrtpCryptoSuite(int* cipher) { - if (dtls_state() != DTLS_TRANSPORT_CONNECTED) { + if (dtls_state() != webrtc::DtlsTransportState::kConnected) { return false; } @@ -381,7 +382,7 @@ bool DtlsTransport::GetSrtpCryptoSuite(int* cipher) { } bool DtlsTransport::GetSslVersionBytes(int* version) const { - if (dtls_state() != DTLS_TRANSPORT_CONNECTED) { + if (dtls_state() != webrtc::DtlsTransportState::kConnected) { return false; } @@ -399,14 +400,14 @@ int DtlsTransport::SendPacket(const char* data, } switch (dtls_state()) { - case DTLS_TRANSPORT_NEW: + case webrtc::DtlsTransportState::kNew: // Can't send data until the connection is active. // TODO(ekr@rtfm.com): assert here if dtls_ is NULL? return -1; - case DTLS_TRANSPORT_CONNECTING: + case webrtc::DtlsTransportState::kConnecting: // Can't send data until the connection is active. return -1; - case DTLS_TRANSPORT_CONNECTED: + case webrtc::DtlsTransportState::kConnected: if (flags & PF_SRTP_BYPASS) { RTC_DCHECK(!srtp_ciphers_.empty()); if (!IsRtpPacket(data, size)) { @@ -419,17 +420,17 @@ int DtlsTransport::SendPacket(const char* data, ? static_cast(size) : -1; } - case DTLS_TRANSPORT_FAILED: + case webrtc::DtlsTransportState::kFailed: // Can't send anything when we're failed. - RTC_LOG(LS_ERROR) - << ToString() - << ": Couldn't send packet due to DTLS_TRANSPORT_FAILED."; + RTC_LOG(LS_ERROR) << ToString() + << ": Couldn't send packet due to " + "webrtc::DtlsTransportState::kFailed."; return -1; - case DTLS_TRANSPORT_CLOSED: + case webrtc::DtlsTransportState::kClosed: // Can't send anything when we're closed. - RTC_LOG(LS_ERROR) - << ToString() - << ": Couldn't send packet due to DTLS_TRANSPORT_CLOSED."; + RTC_LOG(LS_ERROR) << ToString() + << ": Couldn't send packet due to " + "webrtc::DtlsTransportState::kClosed."; return -1; default: RTC_NOTREACHED(); @@ -508,27 +509,30 @@ void DtlsTransport::OnWritableState(rtc::PacketTransportInternal* transport) { } switch (dtls_state()) { - case DTLS_TRANSPORT_NEW: + case webrtc::DtlsTransportState::kNew: MaybeStartDtls(); break; - case DTLS_TRANSPORT_CONNECTED: + case webrtc::DtlsTransportState::kConnected: // Note: SignalWritableState fired by set_writable. set_writable(ice_transport_->writable()); break; - case DTLS_TRANSPORT_CONNECTING: + case webrtc::DtlsTransportState::kConnecting: // Do nothing. break; - case DTLS_TRANSPORT_FAILED: + case webrtc::DtlsTransportState::kFailed: // Should not happen. Do nothing. - RTC_LOG(LS_ERROR) - << ToString() - << ": OnWritableState() called in state DTLS_TRANSPORT_FAILED."; + RTC_LOG(LS_ERROR) << ToString() + << ": OnWritableState() called in state " + "webrtc::DtlsTransportState::kFailed."; break; - case DTLS_TRANSPORT_CLOSED: + case webrtc::DtlsTransportState::kClosed: // Should not happen. Do nothing. - RTC_LOG(LS_ERROR) - << ToString() - << ": OnWritableState() called in state DTLS_TRANSPORT_CLOSED."; + RTC_LOG(LS_ERROR) << ToString() + << ": OnWritableState() called in state " + "webrtc::DtlsTransportState::kClosed."; + break; + case webrtc::DtlsTransportState::kNumValues: + RTC_NOTREACHED(); break; } } @@ -540,7 +544,7 @@ void DtlsTransport::OnReceivingState(rtc::PacketTransportInternal* transport) { << ": ice_transport " "receiving state changed to " << ice_transport_->receiving(); - if (!dtls_active_ || dtls_state() == DTLS_TRANSPORT_CONNECTED) { + if (!dtls_active_ || dtls_state() == webrtc::DtlsTransportState::kConnected) { // Note: SignalReceivingState fired by set_receiving. set_receiving(ice_transport_->receiving()); } @@ -562,7 +566,7 @@ void DtlsTransport::OnReadPacket(rtc::PacketTransportInternal* transport, } switch (dtls_state()) { - case DTLS_TRANSPORT_NEW: + case webrtc::DtlsTransportState::kNew: if (dtls_) { RTC_LOG(LS_INFO) << ToString() << ": Packet received before DTLS started."; @@ -591,8 +595,8 @@ void DtlsTransport::OnReadPacket(rtc::PacketTransportInternal* transport, } break; - case DTLS_TRANSPORT_CONNECTING: - case DTLS_TRANSPORT_CONNECTED: + case webrtc::DtlsTransportState::kConnecting: + case webrtc::DtlsTransportState::kConnected: // We should only get DTLS or SRTP packets; STUN's already been demuxed. // Is this potentially a DTLS packet? if (IsDtlsPacket(data, size)) { @@ -602,7 +606,7 @@ void DtlsTransport::OnReadPacket(rtc::PacketTransportInternal* transport, } } else { // Not a DTLS packet; our handshake should be complete by now. - if (dtls_state() != DTLS_TRANSPORT_CONNECTED) { + if (dtls_state() != webrtc::DtlsTransportState::kConnected) { RTC_LOG(LS_ERROR) << ToString() << ": Received non-DTLS packet before DTLS " "complete."; @@ -623,8 +627,9 @@ void DtlsTransport::OnReadPacket(rtc::PacketTransportInternal* transport, SignalReadPacket(this, data, size, packet_time_us, PF_SRTP_BYPASS); } break; - case DTLS_TRANSPORT_FAILED: - case DTLS_TRANSPORT_CLOSED: + case webrtc::DtlsTransportState::kFailed: + case webrtc::DtlsTransportState::kClosed: + case webrtc::DtlsTransportState::kNumValues: // This shouldn't be happening. Drop the packet. break; } @@ -652,7 +657,7 @@ void DtlsTransport::OnDtlsEvent(rtc::StreamInterface* dtls, int sig, int err) { if (dtls_->GetState() == rtc::SS_OPEN) { // The check for OPEN shouldn't be necessary but let's make // sure we don't accidentally frob the state if it's closed. - set_dtls_state(DTLS_TRANSPORT_CONNECTED); + set_dtls_state(webrtc::DtlsTransportState::kConnected); set_writable(true); } } @@ -671,7 +676,7 @@ void DtlsTransport::OnDtlsEvent(rtc::StreamInterface* dtls, int sig, int err) { // Remote peer shut down the association with no error. RTC_LOG(LS_INFO) << ToString() << ": DTLS transport closed by remote"; set_writable(false); - set_dtls_state(DTLS_TRANSPORT_CLOSED); + set_dtls_state(webrtc::DtlsTransportState::kClosed); SignalClosed(this); } else if (ret == rtc::SR_ERROR) { // Remote peer shut down the association with an error. @@ -680,7 +685,7 @@ void DtlsTransport::OnDtlsEvent(rtc::StreamInterface* dtls, int sig, int err) { << ": Closed by remote with DTLS transport error, code=" << read_error; set_writable(false); - set_dtls_state(DTLS_TRANSPORT_FAILED); + set_dtls_state(webrtc::DtlsTransportState::kFailed); SignalClosed(this); } } while (ret == rtc::SR_SUCCESS); @@ -690,10 +695,10 @@ void DtlsTransport::OnDtlsEvent(rtc::StreamInterface* dtls, int sig, int err) { set_writable(false); if (!err) { RTC_LOG(LS_INFO) << ToString() << ": DTLS transport closed"; - set_dtls_state(DTLS_TRANSPORT_CLOSED); + set_dtls_state(webrtc::DtlsTransportState::kClosed); } else { RTC_LOG(LS_INFO) << ToString() << ": DTLS transport error, code=" << err; - set_dtls_state(DTLS_TRANSPORT_FAILED); + set_dtls_state(webrtc::DtlsTransportState::kFailed); } } } @@ -717,11 +722,11 @@ void DtlsTransport::MaybeStartDtls() { // configuration and therefore are our fault. RTC_NOTREACHED() << "StartSSL failed."; RTC_LOG(LS_ERROR) << ToString() << ": Couldn't start DTLS handshake"; - set_dtls_state(DTLS_TRANSPORT_FAILED); + set_dtls_state(webrtc::DtlsTransportState::kFailed); return; } RTC_LOG(LS_INFO) << ToString() << ": DtlsTransport: Started DTLS handshake"; - set_dtls_state(DTLS_TRANSPORT_CONNECTING); + set_dtls_state(webrtc::DtlsTransportState::kConnecting); // Now that the handshake has started, we can process a cached ClientHello // (if one exists). if (cached_client_hello_.size()) { @@ -789,16 +794,17 @@ void DtlsTransport::set_writable(bool writable) { SignalWritableState(this); } -void DtlsTransport::set_dtls_state(DtlsTransportState state) { +void DtlsTransport::set_dtls_state(webrtc::DtlsTransportState state) { if (dtls_state_ == state) { return; } if (event_log_) { - event_log_->Log(std::make_unique( - ConvertDtlsTransportState(state))); + event_log_->Log( + std::make_unique(state)); } - RTC_LOG(LS_VERBOSE) << ToString() << ": set_dtls_state from:" << dtls_state_ - << " to " << state; + RTC_LOG(LS_VERBOSE) << ToString() << ": set_dtls_state from:" + << static_cast(dtls_state_) << " to " + << static_cast(state); dtls_state_ = state; SendDtlsState(this, state); } diff --git a/p2p/base/dtls_transport.h b/p2p/base/dtls_transport.h index f37e468571..0296a742c0 100644 --- a/p2p/base/dtls_transport.h +++ b/p2p/base/dtls_transport.h @@ -16,6 +16,7 @@ #include #include "api/crypto/crypto_options.h" +#include "api/dtls_transport_interface.h" #include "api/sequence_checker.h" #include "p2p/base/dtls_transport_internal.h" #include "p2p/base/ice_transport_internal.h" @@ -109,7 +110,7 @@ class DtlsTransport : public DtlsTransportInternal { ~DtlsTransport() override; - DtlsTransportState dtls_state() const override; + webrtc::DtlsTransportState dtls_state() const override; const std::string& transport_name() const override; int component() const override; @@ -219,12 +220,12 @@ class DtlsTransport : public DtlsTransportInternal { void set_receiving(bool receiving); void set_writable(bool writable); // Sets the DTLS state, signaling if necessary. - void set_dtls_state(DtlsTransportState state); + void set_dtls_state(webrtc::DtlsTransportState state); webrtc::SequenceChecker thread_checker_; const int component_; - DtlsTransportState dtls_state_ = DTLS_TRANSPORT_NEW; + webrtc::DtlsTransportState dtls_state_ = webrtc::DtlsTransportState::kNew; // Underlying ice_transport, not owned by this class. IceTransportInternal* const ice_transport_; std::unique_ptr dtls_; // The DTLS stream diff --git a/p2p/base/dtls_transport_internal.cc b/p2p/base/dtls_transport_internal.cc index dd23b1baa7..6997dbc702 100644 --- a/p2p/base/dtls_transport_internal.cc +++ b/p2p/base/dtls_transport_internal.cc @@ -16,22 +16,4 @@ DtlsTransportInternal::DtlsTransportInternal() = default; DtlsTransportInternal::~DtlsTransportInternal() = default; -webrtc::DtlsTransportState ConvertDtlsTransportState( - cricket::DtlsTransportState cricket_state) { - switch (cricket_state) { - case DtlsTransportState::DTLS_TRANSPORT_NEW: - return webrtc::DtlsTransportState::kNew; - case DtlsTransportState::DTLS_TRANSPORT_CONNECTING: - return webrtc::DtlsTransportState::kConnecting; - case DtlsTransportState::DTLS_TRANSPORT_CONNECTED: - return webrtc::DtlsTransportState::kConnected; - case DtlsTransportState::DTLS_TRANSPORT_CLOSED: - return webrtc::DtlsTransportState::kClosed; - case DtlsTransportState::DTLS_TRANSPORT_FAILED: - return webrtc::DtlsTransportState::kFailed; - } - RTC_NOTREACHED(); - return webrtc::DtlsTransportState::kNew; -} - } // namespace cricket diff --git a/p2p/base/dtls_transport_internal.h b/p2p/base/dtls_transport_internal.h index ff71196f34..0b26a7fd7a 100644 --- a/p2p/base/dtls_transport_internal.h +++ b/p2p/base/dtls_transport_internal.h @@ -32,24 +32,6 @@ namespace cricket { -enum DtlsTransportState { - // Haven't started negotiating. - DTLS_TRANSPORT_NEW = static_cast(webrtc::DtlsTransportState::kNew), - // Have started negotiating. - DTLS_TRANSPORT_CONNECTING = - static_cast(webrtc::DtlsTransportState::kConnecting), - // Negotiated, and has a secure connection. - DTLS_TRANSPORT_CONNECTED = - static_cast(webrtc::DtlsTransportState::kConnected), - // Transport is closed. - DTLS_TRANSPORT_CLOSED = static_cast(webrtc::DtlsTransportState::kClosed), - // Failed due to some error in the handshake process. - DTLS_TRANSPORT_FAILED = static_cast(webrtc::DtlsTransportState::kFailed), -}; - -webrtc::DtlsTransportState ConvertDtlsTransportState( - cricket::DtlsTransportState cricket_state); - enum PacketFlags { PF_NORMAL = 0x00, // A normal packet. PF_SRTP_BYPASS = 0x01, // An encrypted SRTP packet; bypass any additional @@ -66,7 +48,7 @@ class DtlsTransportInternal : public rtc::PacketTransportInternal { public: ~DtlsTransportInternal() override; - virtual DtlsTransportState dtls_state() const = 0; + virtual webrtc::DtlsTransportState dtls_state() const = 0; virtual int component() const = 0; @@ -117,24 +99,25 @@ class DtlsTransportInternal : public rtc::PacketTransportInternal { // Expose the underneath IceTransport. virtual IceTransportInternal* ice_transport() = 0; - // F: void(DtlsTransportInternal*, const DtlsTransportState) + // F: void(DtlsTransportInternal*, const webrtc::DtlsTransportState) template - void SubscribeDtlsState(F&& callback) { - dtls_state_callback_list_.AddReceiver(std::forward(callback)); + void SubscribeDtlsTransportState(F&& callback) { + dtls_transport_state_callback_list_.AddReceiver(std::forward(callback)); } template - void SubscribeDtlsState(const void* id, F&& callback) { - dtls_state_callback_list_.AddReceiver(id, std::forward(callback)); + void SubscribeDtlsTransportState(const void* id, F&& callback) { + dtls_transport_state_callback_list_.AddReceiver(id, + std::forward(callback)); } // Unsubscribe the subscription with given id. - void UnsubscribeDtlsState(const void* id) { - dtls_state_callback_list_.RemoveReceivers(id); + void UnsubscribeDtlsTransportState(const void* id) { + dtls_transport_state_callback_list_.RemoveReceivers(id); } void SendDtlsState(DtlsTransportInternal* transport, - DtlsTransportState state) { - dtls_state_callback_list_.Send(transport, state); + webrtc::DtlsTransportState state) { + dtls_transport_state_callback_list_.Send(transport, state); } // Emitted whenever the Dtls handshake failed on some transport channel. @@ -155,8 +138,8 @@ class DtlsTransportInternal : public rtc::PacketTransportInternal { RTC_DISALLOW_COPY_AND_ASSIGN(DtlsTransportInternal); webrtc::CallbackList dtls_handshake_error_callback_list_; - webrtc::CallbackList - dtls_state_callback_list_; + webrtc::CallbackList + dtls_transport_state_callback_list_; }; } // namespace cricket diff --git a/p2p/base/dtls_transport_unittest.cc b/p2p/base/dtls_transport_unittest.cc index a2ad213435..f01566d263 100644 --- a/p2p/base/dtls_transport_unittest.cc +++ b/p2p/base/dtls_transport_unittest.cc @@ -15,6 +15,7 @@ #include #include +#include "api/dtls_transport_interface.h" #include "p2p/base/fake_ice_transport.h" #include "p2p/base/packet_transport_internal.h" #include "rtc_base/checks.h" @@ -668,18 +669,19 @@ class DtlsEventOrderingTest // Sanity check that the handshake hasn't already finished. EXPECT_FALSE(client1_.dtls_transport()->IsDtlsConnected() || client1_.dtls_transport()->dtls_state() == - DTLS_TRANSPORT_FAILED); + webrtc::DtlsTransportState::kFailed); EXPECT_TRUE_SIMULATED_WAIT( client1_.dtls_transport()->IsDtlsConnected() || client1_.dtls_transport()->dtls_state() == - DTLS_TRANSPORT_FAILED, + webrtc::DtlsTransportState::kFailed, kTimeout, fake_clock_); break; } } - DtlsTransportState expected_final_state = - valid_fingerprint ? DTLS_TRANSPORT_CONNECTED : DTLS_TRANSPORT_FAILED; + webrtc::DtlsTransportState expected_final_state = + valid_fingerprint ? webrtc::DtlsTransportState::kConnected + : webrtc::DtlsTransportState::kFailed; EXPECT_EQ_SIMULATED_WAIT(expected_final_state, client1_.dtls_transport()->dtls_state(), kTimeout, fake_clock_); diff --git a/p2p/base/fake_dtls_transport.h b/p2p/base/fake_dtls_transport.h index 0628c4ce00..e02755c68f 100644 --- a/p2p/base/fake_dtls_transport.h +++ b/p2p/base/fake_dtls_transport.h @@ -17,6 +17,7 @@ #include #include "api/crypto/crypto_options.h" +#include "api/dtls_transport_interface.h" #include "p2p/base/dtls_transport_internal.h" #include "p2p/base/fake_ice_transport.h" #include "rtc_base/fake_ssl_identity.h" @@ -89,7 +90,7 @@ class FakeDtlsTransport : public DtlsTransportInternal { ice_transport_->SetReceiving(receiving); set_receiving(receiving); } - void SetDtlsState(DtlsTransportState state) { + void SetDtlsState(webrtc::DtlsTransportState state) { dtls_state_ = state; SendDtlsState(this, dtls_state_); } @@ -121,7 +122,7 @@ class FakeDtlsTransport : public DtlsTransportInternal { if (!dtls_role_) { dtls_role_ = std::move(rtc::SSL_CLIENT); } - SetDtlsState(DTLS_TRANSPORT_CONNECTED); + SetDtlsState(webrtc::DtlsTransportState::kConnected); ice_transport_->SetDestination( static_cast(dest->ice_transport()), asymmetric); } else { @@ -133,7 +134,7 @@ class FakeDtlsTransport : public DtlsTransportInternal { } // Fake DtlsTransportInternal implementation. - DtlsTransportState dtls_state() const override { return dtls_state_; } + webrtc::DtlsTransportState dtls_state() const override { return dtls_state_; } const std::string& transport_name() const override { return transport_name_; } int component() const override { return component_; } const rtc::SSLFingerprint& dtls_fingerprint() const { @@ -295,7 +296,7 @@ class FakeDtlsTransport : public DtlsTransportInternal { int crypto_suite_ = rtc::SRTP_AES128_CM_SHA1_80; absl::optional ssl_cipher_suite_; - DtlsTransportState dtls_state_ = DTLS_TRANSPORT_NEW; + webrtc::DtlsTransportState dtls_state_ = webrtc::DtlsTransportState::kNew; bool receiving_ = false; bool writable_ = false; diff --git a/p2p/base/fake_port_allocator.h b/p2p/base/fake_port_allocator.h index 9e0e333041..efe9a53a16 100644 --- a/p2p/base/fake_port_allocator.h +++ b/p2p/base/fake_port_allocator.h @@ -238,10 +238,19 @@ class FakePortAllocator : public cricket::PortAllocator { bool initialized() const { return initialized_; } + // For testing: Manipulate MdnsObfuscationEnabled() + bool MdnsObfuscationEnabled() const override { + return mdns_obfuscation_enabled_; + } + void SetMdnsObfuscationEnabledForTesting(bool enabled) { + mdns_obfuscation_enabled_ = enabled; + } + private: rtc::Thread* network_thread_; rtc::PacketSocketFactory* factory_; std::unique_ptr owned_factory_; + bool mdns_obfuscation_enabled_ = false; }; } // namespace cricket diff --git a/p2p/base/p2p_transport_channel.cc b/p2p/base/p2p_transport_channel.cc index eff79ab9be..836721c151 100644 --- a/p2p/base/p2p_transport_channel.cc +++ b/p2p/base/p2p_transport_channel.cc @@ -43,6 +43,7 @@ #include "rtc_base/task_utils/to_queued_task.h" #include "rtc_base/third_party/sigslot/sigslot.h" #include "rtc_base/time_utils.h" +#include "rtc_base/trace_event.h" #include "system_wrappers/include/field_trial.h" #include "system_wrappers/include/metrics.h" @@ -193,6 +194,7 @@ P2PTransportChannel::P2PTransportChannel( true /* presume_writable_when_fully_relayed */, REGATHER_ON_FAILED_NETWORKS_INTERVAL, RECEIVING_SWITCHING_DELAY) { + TRACE_EVENT0("webrtc", "P2PTransportChannel::P2PTransportChannel"); RTC_DCHECK(allocator_ != nullptr); weak_ping_interval_ = GetWeakPingIntervalInFieldTrial(); // Validate IceConfig even for mostly built-in constant default values in case @@ -247,6 +249,7 @@ P2PTransportChannel::P2PTransportChannel( ice_controller_factory) {} P2PTransportChannel::~P2PTransportChannel() { + TRACE_EVENT0("webrtc", "P2PTransportChannel::~P2PTransportChannel"); RTC_DCHECK_RUN_ON(network_thread_); std::vector copy(connections().begin(), connections().end()); for (Connection* con : copy) { diff --git a/p2p/base/port.cc b/p2p/base/port.cc index d24d40f957..a03a0d6a66 100644 --- a/p2p/base/port.cc +++ b/p2p/base/port.cc @@ -33,6 +33,7 @@ #include "rtc_base/string_utils.h" #include "rtc_base/strings/string_builder.h" #include "rtc_base/third_party/base64/base64.h" +#include "rtc_base/trace_event.h" #include "system_wrappers/include/field_trial.h" namespace { @@ -104,16 +105,6 @@ std::string Port::ComputeFoundation(const std::string& type, return rtc::ToString(rtc::ComputeCrc32(sb.Release())); } -CandidateStats::CandidateStats() = default; - -CandidateStats::CandidateStats(const CandidateStats&) = default; - -CandidateStats::CandidateStats(Candidate candidate) { - this->candidate = candidate; -} - -CandidateStats::~CandidateStats() = default; - Port::Port(rtc::Thread* thread, const std::string& type, rtc::PacketSocketFactory* factory, @@ -836,6 +827,7 @@ void Port::Prune() { // Call to stop any currently pending operations from running. void Port::CancelPendingTasks() { + TRACE_EVENT0("webrtc", "Port::CancelPendingTasks"); RTC_DCHECK_RUN_ON(thread_); thread_->Clear(this); } diff --git a/p2p/base/port.h b/p2p/base/port.h index 7759ade33b..2c18f1adeb 100644 --- a/p2p/base/port.h +++ b/p2p/base/port.h @@ -99,14 +99,24 @@ class StunStats { // Stats that we can return about a candidate. class CandidateStats { public: - CandidateStats(); - explicit CandidateStats(Candidate candidate); - CandidateStats(const CandidateStats&); - ~CandidateStats(); + CandidateStats() = default; + CandidateStats(const CandidateStats&) = default; + CandidateStats(CandidateStats&&) = default; + CandidateStats(Candidate candidate, + absl::optional stats = absl::nullopt) + : candidate_(std::move(candidate)), stun_stats_(std::move(stats)) {} + ~CandidateStats() = default; - Candidate candidate; + CandidateStats& operator=(const CandidateStats& other) = default; + + const Candidate& candidate() const { return candidate_; } + + const absl::optional& stun_stats() const { return stun_stats_; } + + private: + Candidate candidate_; // STUN port stats if this candidate is a STUN candidate. - absl::optional stun_stats; + absl::optional stun_stats_; }; typedef std::vector CandidateStatsList; diff --git a/p2p/base/port_allocator.cc b/p2p/base/port_allocator.cc index b13896c4bc..d8ff637e2c 100644 --- a/p2p/base/port_allocator.cc +++ b/p2p/base/port_allocator.cc @@ -317,7 +317,8 @@ Candidate PortAllocator::SanitizeCandidate(const Candidate& c) const { // For a local host candidate, we need to conceal its IP address candidate if // the mDNS obfuscation is enabled. bool use_hostname_address = - c.type() == LOCAL_PORT_TYPE && MdnsObfuscationEnabled(); + (c.type() == LOCAL_PORT_TYPE || c.type() == PRFLX_PORT_TYPE) && + MdnsObfuscationEnabled(); // If adapter enumeration is disabled or host candidates are disabled, // clear the raddr of STUN candidates to avoid local address leakage. bool filter_stun_related_address = diff --git a/p2p/base/port_allocator_unittest.cc b/p2p/base/port_allocator_unittest.cc index 70946a3d81..cbac5cccaf 100644 --- a/p2p/base/port_allocator_unittest.cc +++ b/p2p/base/port_allocator_unittest.cc @@ -305,3 +305,56 @@ TEST_F(PortAllocatorTest, RestrictIceCredentialsChange) { credentials[0].pwd)); allocator_->DiscardCandidatePool(); } + +// Constants for testing candidates +const char kIpv4Address[] = "12.34.56.78"; +const char kIpv4AddressWithPort[] = "12.34.56.78:443"; + +TEST_F(PortAllocatorTest, SanitizeEmptyCandidateDefaultConfig) { + cricket::Candidate input; + cricket::Candidate output = allocator_->SanitizeCandidate(input); + EXPECT_EQ("", output.address().ipaddr().ToString()); +} + +TEST_F(PortAllocatorTest, SanitizeIpv4CandidateDefaultConfig) { + cricket::Candidate input(1, "udp", rtc::SocketAddress(kIpv4Address, 443), 1, + "username", "password", cricket::LOCAL_PORT_TYPE, 1, + "foundation", 1, 1); + cricket::Candidate output = allocator_->SanitizeCandidate(input); + EXPECT_EQ(kIpv4AddressWithPort, output.address().ToString()); + EXPECT_EQ(kIpv4Address, output.address().ipaddr().ToString()); +} + +TEST_F(PortAllocatorTest, SanitizeIpv4CandidateMdnsObfuscationEnabled) { + allocator_->SetMdnsObfuscationEnabledForTesting(true); + cricket::Candidate input(1, "udp", rtc::SocketAddress(kIpv4Address, 443), 1, + "username", "password", cricket::LOCAL_PORT_TYPE, 1, + "foundation", 1, 1); + cricket::Candidate output = allocator_->SanitizeCandidate(input); + EXPECT_NE(kIpv4AddressWithPort, output.address().ToString()); + EXPECT_EQ("", output.address().ipaddr().ToString()); +} + +TEST_F(PortAllocatorTest, SanitizePrflxCandidateMdnsObfuscationEnabled) { + allocator_->SetMdnsObfuscationEnabledForTesting(true); + // Create the candidate from an IP literal. This populates the hostname. + cricket::Candidate input(1, "udp", rtc::SocketAddress(kIpv4Address, 443), 1, + "username", "password", cricket::PRFLX_PORT_TYPE, 1, + "foundation", 1, 1); + cricket::Candidate output = allocator_->SanitizeCandidate(input); + EXPECT_NE(kIpv4AddressWithPort, output.address().ToString()); + EXPECT_EQ("", output.address().ipaddr().ToString()); +} + +TEST_F(PortAllocatorTest, SanitizeIpv4NonLiteralMdnsObfuscationEnabled) { + // Create the candidate with an empty hostname. + allocator_->SetMdnsObfuscationEnabledForTesting(true); + rtc::IPAddress ip; + EXPECT_TRUE(IPFromString(kIpv4Address, &ip)); + cricket::Candidate input(1, "udp", rtc::SocketAddress(ip, 443), 1, "username", + "password", cricket::LOCAL_PORT_TYPE, 1, + "foundation", 1, 1); + cricket::Candidate output = allocator_->SanitizeCandidate(input); + EXPECT_NE(kIpv4AddressWithPort, output.address().ToString()); + EXPECT_EQ("", output.address().ipaddr().ToString()); +} diff --git a/p2p/client/basic_port_allocator.cc b/p2p/client/basic_port_allocator.cc index 7e1f970fad..1d38a4c19f 100644 --- a/p2p/client/basic_port_allocator.cc +++ b/p2p/client/basic_port_allocator.cc @@ -12,12 +12,14 @@ #include #include +#include #include #include #include #include #include "absl/algorithm/container.h" +#include "absl/memory/memory.h" #include "p2p/base/basic_packet_socket_factory.h" #include "p2p/base/port.h" #include "p2p/base/stun_port.h" @@ -27,6 +29,8 @@ #include "rtc_base/checks.h" #include "rtc_base/helpers.h" #include "rtc_base/logging.h" +#include "rtc_base/task_utils/to_queued_task.h" +#include "rtc_base/trace_event.h" #include "system_wrappers/include/field_trial.h" #include "system_wrappers/include/metrics.h" @@ -35,15 +39,6 @@ using rtc::CreateRandomId; namespace cricket { namespace { -enum { - MSG_CONFIG_START, - MSG_CONFIG_READY, - MSG_ALLOCATE, - MSG_ALLOCATION_PHASE, - MSG_SEQUENCEOBJECTS_CREATED, - MSG_CONFIG_STOP, -}; - const int PHASE_UDP = 0; const int PHASE_RELAY = 1; const int PHASE_TCP = 2; @@ -268,16 +263,18 @@ BasicPortAllocatorSession::BasicPortAllocatorSession( network_manager_started_(false), allocation_sequences_created_(false), turn_port_prune_policy_(allocator->turn_port_prune_policy()) { + TRACE_EVENT0("webrtc", + "BasicPortAllocatorSession::BasicPortAllocatorSession"); allocator_->network_manager()->SignalNetworksChanged.connect( this, &BasicPortAllocatorSession::OnNetworksChanged); allocator_->network_manager()->StartUpdating(); } BasicPortAllocatorSession::~BasicPortAllocatorSession() { + TRACE_EVENT0("webrtc", + "BasicPortAllocatorSession::~BasicPortAllocatorSession"); RTC_DCHECK_RUN_ON(network_thread_); allocator_->network_manager()->StopUpdating(); - if (network_thread_ != NULL) - network_thread_->Clear(this); for (uint32_t i = 0; i < sequences_.size(); ++i) { // AllocationSequence should clear it's map entry for turn ports before @@ -289,8 +286,7 @@ BasicPortAllocatorSession::~BasicPortAllocatorSession() { for (it = ports_.begin(); it != ports_.end(); it++) delete it->port(); - for (uint32_t i = 0; i < configs_.size(); ++i) - delete configs_[i]; + configs_.clear(); for (uint32_t i = 0; i < sequences_.size(); ++i) delete sequences_[i]; @@ -370,7 +366,8 @@ void BasicPortAllocatorSession::StartGettingPorts() { socket_factory_ = owned_socket_factory_.get(); } - network_thread_->Post(RTC_FROM_HERE, this, MSG_CONFIG_START); + network_thread_->PostTask(webrtc::ToQueuedTask( + network_safety_, [this] { GetPortConfigurations(); })); RTC_LOG(LS_INFO) << "Start getting ports with turn_port_prune_policy " << turn_port_prune_policy_; @@ -386,11 +383,12 @@ void BasicPortAllocatorSession::StopGettingPorts() { void BasicPortAllocatorSession::ClearGettingPorts() { RTC_DCHECK_RUN_ON(network_thread_); - network_thread_->Clear(this, MSG_ALLOCATE); + ++allocation_epoch_; for (uint32_t i = 0; i < sequences_.size(); ++i) { sequences_[i]->Stop(); } - network_thread_->Post(RTC_FROM_HERE, this, MSG_CONFIG_STOP); + network_thread_->PostTask( + webrtc::ToQueuedTask(network_safety_, [this] { OnConfigStop(); })); state_ = SessionState::CLEARED; } @@ -489,8 +487,10 @@ void BasicPortAllocatorSession::GetCandidateStatsFromReadyPorts( for (auto* port : ports) { auto candidates = port->Candidates(); for (const auto& candidate : candidates) { - CandidateStats candidate_stats(allocator_->SanitizeCandidate(candidate)); - port->GetStunStats(&candidate_stats.stun_stats); + absl::optional stun_stats; + port->GetStunStats(&stun_stats); + CandidateStats candidate_stats(allocator_->SanitizeCandidate(candidate), + std::move(stun_stats)); candidate_stats_list->push_back(std::move(candidate_stats)); } } @@ -574,28 +574,6 @@ bool BasicPortAllocatorSession::CandidatesAllocationDone() const { ports_, [](const PortData& port) { return port.inprogress(); }); } -void BasicPortAllocatorSession::OnMessage(rtc::Message* message) { - switch (message->message_id) { - case MSG_CONFIG_START: - GetPortConfigurations(); - break; - case MSG_CONFIG_READY: - OnConfigReady(static_cast(message->pdata)); - break; - case MSG_ALLOCATE: - OnAllocate(); - break; - case MSG_SEQUENCEOBJECTS_CREATED: - OnAllocationSequenceObjectsCreated(); - break; - case MSG_CONFIG_STOP: - OnConfigStop(); - break; - default: - RTC_NOTREACHED(); - } -} - void BasicPortAllocatorSession::UpdateIceParametersInternal() { RTC_DCHECK_RUN_ON(network_thread_); for (PortData& port : ports_) { @@ -607,26 +585,35 @@ void BasicPortAllocatorSession::UpdateIceParametersInternal() { void BasicPortAllocatorSession::GetPortConfigurations() { RTC_DCHECK_RUN_ON(network_thread_); - PortConfiguration* config = - new PortConfiguration(allocator_->stun_servers(), username(), password()); + auto config = std::make_unique(allocator_->stun_servers(), + username(), password()); for (const RelayServerConfig& turn_server : allocator_->turn_servers()) { config->AddRelay(turn_server); } - ConfigReady(config); + ConfigReady(std::move(config)); } void BasicPortAllocatorSession::ConfigReady(PortConfiguration* config) { RTC_DCHECK_RUN_ON(network_thread_); - network_thread_->Post(RTC_FROM_HERE, this, MSG_CONFIG_READY, config); + ConfigReady(absl::WrapUnique(config)); +} + +void BasicPortAllocatorSession::ConfigReady( + std::unique_ptr config) { + RTC_DCHECK_RUN_ON(network_thread_); + network_thread_->PostTask(webrtc::ToQueuedTask( + network_safety_, [this, config = std::move(config)]() mutable { + OnConfigReady(std::move(config)); + })); } // Adds a configuration to the list. -void BasicPortAllocatorSession::OnConfigReady(PortConfiguration* config) { +void BasicPortAllocatorSession::OnConfigReady( + std::unique_ptr config) { RTC_DCHECK_RUN_ON(network_thread_); - if (config) { - configs_.push_back(config); - } + if (config) + configs_.push_back(std::move(config)); AllocatePorts(); } @@ -664,11 +651,16 @@ void BasicPortAllocatorSession::OnConfigStop() { void BasicPortAllocatorSession::AllocatePorts() { RTC_DCHECK_RUN_ON(network_thread_); - network_thread_->Post(RTC_FROM_HERE, this, MSG_ALLOCATE); + network_thread_->PostTask(webrtc::ToQueuedTask( + network_safety_, [this, allocation_epoch = allocation_epoch_] { + OnAllocate(allocation_epoch); + })); } -void BasicPortAllocatorSession::OnAllocate() { +void BasicPortAllocatorSession::OnAllocate(int allocation_epoch) { RTC_DCHECK_RUN_ON(network_thread_); + if (allocation_epoch != allocation_epoch_) + return; if (network_manager_started_ && !IsStopped()) { bool disable_equivalent_phases = true; @@ -774,7 +766,8 @@ void BasicPortAllocatorSession::DoAllocate(bool disable_equivalent) { done_signal_needed = true; } else { RTC_LOG(LS_INFO) << "Allocate ports on " << networks.size() << " networks"; - PortConfiguration* config = configs_.empty() ? nullptr : configs_.back(); + PortConfiguration* config = + configs_.empty() ? nullptr : configs_.back().get(); for (uint32_t i = 0; i < networks.size(); ++i) { uint32_t sequence_flags = flags(); if ((sequence_flags & DISABLE_ALL_PHASES) == DISABLE_ALL_PHASES) { @@ -814,9 +807,11 @@ void BasicPortAllocatorSession::DoAllocate(bool disable_equivalent) { } AllocationSequence* sequence = - new AllocationSequence(this, networks[i], config, sequence_flags); - sequence->SignalPortAllocationComplete.connect( - this, &BasicPortAllocatorSession::OnPortAllocationComplete); + new AllocationSequence(this, networks[i], config, sequence_flags, + [this, safety_flag = network_safety_.flag()] { + if (safety_flag->alive()) + OnPortAllocationComplete(); + }); sequence->Init(); sequence->Start(); sequences_.push_back(sequence); @@ -824,7 +819,8 @@ void BasicPortAllocatorSession::DoAllocate(bool disable_equivalent) { } } if (done_signal_needed) { - network_thread_->Post(RTC_FROM_HERE, this, MSG_SEQUENCEOBJECTS_CREATED); + network_thread_->PostTask(webrtc::ToQueuedTask( + network_safety_, [this] { OnAllocationSequenceObjectsCreated(); })); } } @@ -1128,8 +1124,7 @@ bool BasicPortAllocatorSession::CandidatePairable(const Candidate& c, !host_candidates_disabled); } -void BasicPortAllocatorSession::OnPortAllocationComplete( - AllocationSequence* seq) { +void BasicPortAllocatorSession::OnPortAllocationComplete() { RTC_DCHECK_RUN_ON(network_thread_); // Send candidate allocation complete signal if all ports are done. MaybeSignalCandidatesAllocationDone(); @@ -1220,10 +1215,12 @@ void BasicPortAllocatorSession::PrunePortsAndRemoveCandidates( // AllocationSequence -AllocationSequence::AllocationSequence(BasicPortAllocatorSession* session, - rtc::Network* network, - PortConfiguration* config, - uint32_t flags) +AllocationSequence::AllocationSequence( + BasicPortAllocatorSession* session, + rtc::Network* network, + PortConfiguration* config, + uint32_t flags, + std::function port_allocation_complete_callback) : session_(session), network_(network), config_(config), @@ -1231,7 +1228,9 @@ AllocationSequence::AllocationSequence(BasicPortAllocatorSession* session, flags_(flags), udp_socket_(), udp_port_(NULL), - phase_(0) {} + phase_(0), + port_allocation_complete_callback_( + std::move(port_allocation_complete_callback)) {} void AllocationSequence::Init() { if (IsFlagSet(PORTALLOCATOR_ENABLE_SHARED_SOCKET)) { @@ -1248,6 +1247,7 @@ void AllocationSequence::Init() { } void AllocationSequence::Clear() { + TRACE_EVENT0("webrtc", "AllocationSequence::Clear"); udp_port_ = NULL; relay_ports_.clear(); } @@ -1259,10 +1259,6 @@ void AllocationSequence::OnNetworkFailed() { Stop(); } -AllocationSequence::~AllocationSequence() { - session_->network_thread()->Clear(this); -} - void AllocationSequence::DisableEquivalentPhases(rtc::Network* network, PortConfiguration* config, uint32_t* flags) { @@ -1337,7 +1333,9 @@ void AllocationSequence::DisableEquivalentPhases(rtc::Network* network, void AllocationSequence::Start() { state_ = kRunning; - session_->network_thread()->Post(RTC_FROM_HERE, this, MSG_ALLOCATION_PHASE); + + session_->network_thread()->PostTask(webrtc::ToQueuedTask( + safety_, [this, epoch = epoch_] { Process(epoch); })); // Take a snapshot of the best IP, so that when DisableEquivalentPhases is // called next time, we enable all phases if the best IP has since changed. previous_best_ip_ = network_->GetBestIP(); @@ -1347,16 +1345,18 @@ void AllocationSequence::Stop() { // If the port is completed, don't set it to stopped. if (state_ == kRunning) { state_ = kStopped; - session_->network_thread()->Clear(this, MSG_ALLOCATION_PHASE); + // Cause further Process calls in the previous epoch to be ignored. + ++epoch_; } } -void AllocationSequence::OnMessage(rtc::Message* msg) { +void AllocationSequence::Process(int epoch) { RTC_DCHECK(rtc::Thread::Current() == session_->network_thread()); - RTC_DCHECK(msg->message_id == MSG_ALLOCATION_PHASE); - const char* const PHASE_NAMES[kNumPhases] = {"Udp", "Relay", "Tcp"}; + if (epoch != epoch_) + return; + // Perform all of the phases in the current step. RTC_LOG(LS_INFO) << network_->ToString() << ": Allocation Phase=" << PHASE_NAMES[phase_]; @@ -1382,14 +1382,16 @@ void AllocationSequence::OnMessage(rtc::Message* msg) { if (state() == kRunning) { ++phase_; - session_->network_thread()->PostDelayed(RTC_FROM_HERE, - session_->allocator()->step_delay(), - this, MSG_ALLOCATION_PHASE); + session_->network_thread()->PostDelayedTask( + webrtc::ToQueuedTask(safety_, + [this, epoch = epoch_] { Process(epoch); }), + session_->allocator()->step_delay()); } else { - // If all phases in AllocationSequence are completed, no allocation - // steps needed further. Canceling pending signal. - session_->network_thread()->Clear(this, MSG_ALLOCATION_PHASE); - SignalPortAllocationComplete(this); + // No allocation steps needed further if all phases in AllocationSequence + // are completed. Cause further Process calls in the previous epoch to be + // ignored. + ++epoch_; + port_allocation_complete_callback_(); } } @@ -1657,8 +1659,6 @@ PortConfiguration::PortConfiguration(const ServerAddresses& stun_servers, webrtc::field_trial::IsDisabled("WebRTC-UseTurnServerAsStunServer"); } -PortConfiguration::~PortConfiguration() = default; - ServerAddresses PortConfiguration::StunServers() { if (!stun_address.IsNil() && stun_servers.find(stun_address) == stun_servers.end()) { diff --git a/p2p/client/basic_port_allocator.h b/p2p/client/basic_port_allocator.h index b27016a1dc..77aceb1e9c 100644 --- a/p2p/client/basic_port_allocator.h +++ b/p2p/client/basic_port_allocator.h @@ -22,7 +22,9 @@ #include "rtc_base/checks.h" #include "rtc_base/network.h" #include "rtc_base/system/rtc_export.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" #include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" namespace cricket { @@ -106,8 +108,9 @@ enum class SessionState { // process will be started. }; -class RTC_EXPORT BasicPortAllocatorSession : public PortAllocatorSession, - public rtc::MessageHandler { +// This class is thread-compatible and assumes it's created, operated upon and +// destroyed on the network thread. +class RTC_EXPORT BasicPortAllocatorSession : public PortAllocatorSession { public: BasicPortAllocatorSession(BasicPortAllocator* allocator, const std::string& content_name, @@ -155,10 +158,11 @@ class RTC_EXPORT BasicPortAllocatorSession : public PortAllocatorSession, // Adds a port configuration that is now ready. Once we have one for each // network (or a timeout occurs), we will start allocating ports. - virtual void ConfigReady(PortConfiguration* config); - - // MessageHandler. Can be overriden if message IDs do not conflict. - void OnMessage(rtc::Message* message) override; + void ConfigReady(std::unique_ptr config); + // TODO(bugs.webrtc.org/12840) Remove once unused in downstream projects. + ABSL_DEPRECATED( + "Use ConfigReady(std::unique_ptr) instead!") + void ConfigReady(PortConfiguration* config); private: class PortData { @@ -213,10 +217,10 @@ class RTC_EXPORT BasicPortAllocatorSession : public PortAllocatorSession, State state_ = STATE_INPROGRESS; }; - void OnConfigReady(PortConfiguration* config); + void OnConfigReady(std::unique_ptr config); void OnConfigStop(); void AllocatePorts(); - void OnAllocate(); + void OnAllocate(int allocation_epoch); void DoAllocate(bool disable_equivalent_phases); void OnNetworksChanged(); void OnAllocationSequenceObjectsCreated(); @@ -233,7 +237,7 @@ class RTC_EXPORT BasicPortAllocatorSession : public PortAllocatorSession, void OnProtocolEnabled(AllocationSequence* seq, ProtocolType proto); void OnPortDestroyed(PortInterface* port); void MaybeSignalCandidatesAllocationDone(); - void OnPortAllocationComplete(AllocationSequence* seq); + void OnPortAllocationComplete(); PortData* FindPort(Port* port); std::vector GetNetworks(); std::vector GetFailedNetworks(); @@ -266,7 +270,7 @@ class RTC_EXPORT BasicPortAllocatorSession : public PortAllocatorSession, bool allocation_started_; bool network_manager_started_; bool allocation_sequences_created_; - std::vector configs_; + std::vector> configs_; std::vector sequences_; std::vector ports_; std::vector candidate_error_events_; @@ -274,13 +278,15 @@ class RTC_EXPORT BasicPortAllocatorSession : public PortAllocatorSession, // Policy on how to prune turn ports, taken from the port allocator. webrtc::PortPrunePolicy turn_port_prune_policy_; SessionState state_ = SessionState::CLEARED; + int allocation_epoch_ RTC_GUARDED_BY(network_thread_) = 0; + webrtc::ScopedTaskSafety network_safety_; friend class AllocationSequence; }; // Records configuration information useful in creating ports. // TODO(deadbeef): Rename "relay" to "turn_server" in this struct. -struct RTC_EXPORT PortConfiguration : public rtc::MessageData { +struct RTC_EXPORT PortConfiguration { // TODO(jiayl): remove |stun_address| when Chrome is updated. rtc::SocketAddress stun_address; ServerAddresses stun_servers; @@ -300,8 +306,6 @@ struct RTC_EXPORT PortConfiguration : public rtc::MessageData { const std::string& username, const std::string& password); - ~PortConfiguration() override; - // Returns addresses of both the explicitly configured STUN servers, // and TURN servers that should be used as STUN servers. ServerAddresses StunServers(); @@ -323,8 +327,8 @@ class TurnPort; // Performs the allocation of ports, in a sequenced (timed) manner, for a given // network and IP address. -class AllocationSequence : public rtc::MessageHandler, - public sigslot::has_slots<> { +// This class is thread-compatible. +class AllocationSequence : public sigslot::has_slots<> { public: enum State { kInit, // Initial state. @@ -334,11 +338,18 @@ class AllocationSequence : public rtc::MessageHandler, // kInit --> kRunning --> {kCompleted|kStopped} }; + // |port_allocation_complete_callback| is called when AllocationSequence is + // done with allocating ports. This signal is useful when port allocation + // fails which doesn't result in any candidates. Using this signal + // BasicPortAllocatorSession can send its candidate discovery conclusion + // signal. Without this signal, BasicPortAllocatorSession doesn't have any + // event to trigger signal. This can also be achieved by starting a timer in + // BPAS, but this is less deterministic. AllocationSequence(BasicPortAllocatorSession* session, rtc::Network* network, PortConfiguration* config, - uint32_t flags); - ~AllocationSequence() override; + uint32_t flags, + std::function port_allocation_complete_callback); void Init(); void Clear(); void OnNetworkFailed(); @@ -360,17 +371,6 @@ class AllocationSequence : public rtc::MessageHandler, void Start(); void Stop(); - // MessageHandler - void OnMessage(rtc::Message* msg) override; - - // Signal from AllocationSequence, when it's done with allocating ports. - // This signal is useful, when port allocation fails which doesn't result - // in any candidates. Using this signal BasicPortAllocatorSession can send - // its candidate discovery conclusion signal. Without this signal, - // BasicPortAllocatorSession doesn't have any event to trigger signal. This - // can also be achieved by starting timer in BPAS. - sigslot::signal1 SignalPortAllocationComplete; - protected: // For testing. void CreateTurnPort(const RelayServerConfig& config); @@ -378,6 +378,7 @@ class AllocationSequence : public rtc::MessageHandler, private: typedef std::vector ProtocolList; + void Process(int epoch); bool IsFlagSet(uint32_t flag) { return ((flags_ & flag) != 0); } void CreateUDPPorts(); void CreateTCPPorts(); @@ -406,6 +407,12 @@ class AllocationSequence : public rtc::MessageHandler, UDPPort* udp_port_; std::vector relay_ports_; int phase_; + std::function port_allocation_complete_callback_; + // This counter is sampled and passed together with tasks when tasks are + // posted. If the sampled counter doesn't match |epoch_| on reception, the + // posted task is ignored. + int epoch_ = 0; + webrtc::ScopedTaskSafety safety_; }; } // namespace cricket diff --git a/pc/BUILD.gn b/pc/BUILD.gn index 3039ec6f66..460462e54a 100644 --- a/pc/BUILD.gn +++ b/pc/BUILD.gn @@ -23,6 +23,20 @@ config("rtc_pc_config") { } } +rtc_library("proxy") { + sources = [ + "proxy.cc", + "proxy.h", + ] + deps = [ + "../api:scoped_refptr", + "../api/task_queue", + "../rtc_base:rtc_base_approved", + "../rtc_base:threading", + "../rtc_base/system:rtc_export", + ] +} + rtc_library("rtc_pc_base") { visibility = [ "*" ] defines = [] @@ -42,14 +56,22 @@ rtc_library("rtc_pc_base") { "ice_transport.h", "jsep_transport.cc", "jsep_transport.h", + "jsep_transport_collection.cc", + "jsep_transport_collection.h", "jsep_transport_controller.cc", "jsep_transport_controller.h", "media_session.cc", "media_session.h", + "media_stream_proxy.h", + "media_stream_track_proxy.h", + "peer_connection_factory_proxy.h", + "peer_connection_proxy.h", "rtcp_mux_filter.cc", "rtcp_mux_filter.h", "rtp_media_utils.cc", "rtp_media_utils.h", + "rtp_receiver_proxy.h", + "rtp_sender_proxy.h", "rtp_transport.cc", "rtp_transport.h", "rtp_transport_internal.h", @@ -59,10 +81,6 @@ rtc_library("rtc_pc_base") { "sctp_transport.h", "sctp_utils.cc", "sctp_utils.h", - "session_description.cc", - "session_description.h", - "simulcast_description.cc", - "simulcast_description.h", "srtp_filter.cc", "srtp_filter.h", "srtp_session.cc", @@ -72,10 +90,15 @@ rtc_library("rtc_pc_base") { "transport_stats.cc", "transport_stats.h", "used_ids.h", + "video_track_source_proxy.cc", + "video_track_source_proxy.h", ] deps = [ ":media_protocol_names", + ":proxy", + ":session_description", + ":simulcast_description", "../api:array_view", "../api:async_dns_resolver", "../api:audio_options_api", @@ -83,6 +106,7 @@ rtc_library("rtc_pc_base") { "../api:function_view", "../api:ice_transport_factory", "../api:libjingle_peerconnection_api", + "../api:media_stream_interface", "../api:packet_socket_factory", "../api:priority", "../api:rtc_error", @@ -127,6 +151,7 @@ rtc_library("rtc_pc_base") { "../rtc_base/network:sent_packet", "../rtc_base/synchronization:mutex", "../rtc_base/system:file_wrapper", + "../rtc_base/system:no_unique_address", "../rtc_base/system:rtc_export", "../rtc_base/task_utils:pending_task_safety_flag", "../rtc_base/task_utils:to_queued_task", @@ -150,6 +175,43 @@ rtc_library("rtc_pc_base") { public_configs = [ ":rtc_pc_config" ] } +rtc_source_set("session_description") { + visibility = [ "*" ] + sources = [ + "session_description.cc", + "session_description.h", + ] + deps = [ + ":media_protocol_names", + ":simulcast_description", + "../api:libjingle_peerconnection_api", + "../api:rtp_parameters", + "../api:rtp_transceiver_direction", + "../media:rtc_media_base", + "../p2p:rtc_p2p", + "../rtc_base:checks", + "../rtc_base:socket_address", + "../rtc_base/system:rtc_export", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/memory:memory", + ] +} + +rtc_source_set("simulcast_description") { + visibility = [ "*" ] + sources = [ + "simulcast_description.cc", + "simulcast_description.h", + ] + deps = [ + "../rtc_base:checks", + "../rtc_base:socket_address", + "../rtc_base/system:rtc_export", + ] +} + rtc_source_set("rtc_pc") { visibility = [ "*" ] allow_poison = [ "audio_codecs" ] # TODO(bugs.webrtc.org/8396): Remove. @@ -219,6 +281,7 @@ rtc_library("peerconnection") { ":media_protocol_names", ":media_stream", ":peer_connection_message_handler", + ":proxy", ":remote_audio_source", ":rtc_pc_base", ":rtp_parameters_conversion", @@ -227,6 +290,8 @@ rtc_library("peerconnection") { ":rtp_transceiver", ":rtp_transmission_manager", ":sdp_state_provider", + ":session_description", + ":simulcast_description", ":stats_collector_interface", ":transceiver_list", ":usage_pattern", @@ -276,6 +341,8 @@ rtc_library("peerconnection") { "../api/video:video_rtp_headers", "../api/video_codecs:video_codecs_api", "../call:call_interfaces", + "../call:rtp_interfaces", + "../call:rtp_sender", "../common_video", "../logging:ice_log", "../media:rtc_data_sctp_transport_internal", @@ -301,6 +368,7 @@ rtc_library("peerconnection") { "../rtc_base/system:file_wrapper", "../rtc_base/system:no_unique_address", "../rtc_base/system:rtc_export", + "../rtc_base/system:unused", "../rtc_base/task_utils:pending_task_safety_flag", "../rtc_base/task_utils:to_queued_task", "../rtc_base/third_party/base64", @@ -380,10 +448,12 @@ rtc_library("rtp_transceiver") { "rtp_transceiver.h", ] deps = [ + ":proxy", ":rtc_pc_base", ":rtp_parameters_conversion", ":rtp_receiver", ":rtp_sender", + ":session_description", "../api:array_view", "../api:libjingle_peerconnection_api", "../api:rtc_error", @@ -471,6 +541,7 @@ rtc_library("rtp_receiver") { ] deps = [ ":media_stream", + ":rtc_pc_base", ":video_track_source", "../api:libjingle_peerconnection_api", "../api:media_stream_interface", @@ -502,6 +573,7 @@ rtc_library("audio_rtp_receiver") { ":jitter_buffer_delay", ":media_stream", ":remote_audio_source", + ":rtc_pc_base", ":rtp_receiver", "../api:frame_transformer_interface", "../api:libjingle_peerconnection_api", @@ -535,6 +607,7 @@ rtc_library("video_rtp_receiver") { deps = [ ":jitter_buffer_delay", ":media_stream", + ":rtc_pc_base", ":rtp_receiver", ":video_rtp_track_source", ":video_track", @@ -702,6 +775,7 @@ rtc_library("rtp_parameters_conversion") { ] deps = [ ":rtc_pc_base", + ":session_description", "../api:array_view", "../api:libjingle_peerconnection_api", "../api:rtc_error", @@ -723,6 +797,7 @@ rtc_library("dtmf_sender") { "dtmf_sender.h", ] deps = [ + ":proxy", "../api:libjingle_peerconnection_api", "../api:scoped_refptr", "../rtc_base:checks", @@ -831,6 +906,7 @@ if (rtc_include_tests && !build_with_chromium) { ":peerconnection", ":rtc_pc", ":rtc_pc_base", + ":session_description", ":video_rtp_receiver", "../api:array_view", "../api:audio_options_api", @@ -995,12 +1071,14 @@ if (rtc_include_tests && !build_with_chromium) { ":jitter_buffer_delay", ":media_stream", ":peerconnection", + ":proxy", ":remote_audio_source", ":rtc_pc_base", ":rtp_parameters_conversion", ":rtp_receiver", ":rtp_sender", ":rtp_transceiver", + ":session_description", ":usage_pattern", ":video_rtp_receiver", ":video_rtp_track_source", @@ -1056,6 +1134,7 @@ if (rtc_include_tests && !build_with_chromium) { "../rtc_base/synchronization:mutex", "../rtc_base/third_party/base64", "../rtc_base/third_party/sigslot", + "../system_wrappers:field_trial", "../system_wrappers:metrics", "../test:field_trial", "../test:fileutils", @@ -1159,6 +1238,7 @@ if (rtc_include_tests && !build_with_chromium) { ":rtp_receiver", ":rtp_sender", ":rtp_transceiver", + ":session_description", ":usage_pattern", ":video_rtp_receiver", ":video_rtp_track_source", diff --git a/pc/audio_rtp_receiver.cc b/pc/audio_rtp_receiver.cc index 5f815c589e..4efab24d15 100644 --- a/pc/audio_rtp_receiver.cc +++ b/pc/audio_rtp_receiver.cc @@ -15,9 +15,9 @@ #include #include -#include "api/media_stream_track_proxy.h" #include "api/sequence_checker.h" #include "pc/audio_track.h" +#include "pc/media_stream_track_proxy.h" #include "rtc_base/checks.h" #include "rtc_base/location.h" #include "rtc_base/logging.h" diff --git a/pc/audio_rtp_receiver.h b/pc/audio_rtp_receiver.h index 7f2e557126..c3468721d8 100644 --- a/pc/audio_rtp_receiver.h +++ b/pc/audio_rtp_receiver.h @@ -21,7 +21,6 @@ #include "api/dtls_transport_interface.h" #include "api/frame_transformer_interface.h" #include "api/media_stream_interface.h" -#include "api/media_stream_track_proxy.h" #include "api/media_types.h" #include "api/rtp_parameters.h" #include "api/rtp_receiver_interface.h" @@ -31,6 +30,7 @@ #include "media/base/media_channel.h" #include "pc/audio_track.h" #include "pc/jitter_buffer_delay.h" +#include "pc/media_stream_track_proxy.h" #include "pc/remote_audio_source.h" #include "pc/rtp_receiver.h" #include "rtc_base/ref_counted_object.h" diff --git a/pc/audio_track.cc b/pc/audio_track.cc index 191d4efbc4..be087f693b 100644 --- a/pc/audio_track.cc +++ b/pc/audio_track.cc @@ -32,7 +32,7 @@ AudioTrack::AudioTrack(const std::string& label, } AudioTrack::~AudioTrack() { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&thread_checker_); set_state(MediaStreamTrackInterface::kEnded); if (audio_source_) audio_source_->UnregisterObserver(this); @@ -43,24 +43,24 @@ std::string AudioTrack::kind() const { } AudioSourceInterface* AudioTrack::GetSource() const { - RTC_DCHECK(thread_checker_.IsCurrent()); + // Callable from any thread. return audio_source_.get(); } void AudioTrack::AddSink(AudioTrackSinkInterface* sink) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&thread_checker_); if (audio_source_) audio_source_->AddSink(sink); } void AudioTrack::RemoveSink(AudioTrackSinkInterface* sink) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&thread_checker_); if (audio_source_) audio_source_->RemoveSink(sink); } void AudioTrack::OnChanged() { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&thread_checker_); if (audio_source_->state() == MediaSourceInterface::kEnded) { set_state(kEnded); } else { diff --git a/pc/audio_track.h b/pc/audio_track.h index 07511a5c94..8a705cf8fb 100644 --- a/pc/audio_track.h +++ b/pc/audio_track.h @@ -41,13 +41,13 @@ class AudioTrack : public MediaStreamTrack, // MediaStreamTrack implementation. std::string kind() const override; - private: // AudioTrackInterface implementation. AudioSourceInterface* GetSource() const override; void AddSink(AudioTrackSinkInterface* sink) override; void RemoveSink(AudioTrackSinkInterface* sink) override; + private: // ObserverInterface implementation. void OnChanged() override; diff --git a/pc/channel.cc b/pc/channel.cc index db8d0e33dd..8630703be1 100644 --- a/pc/channel.cc +++ b/pc/channel.cc @@ -220,6 +220,7 @@ void BaseChannel::Deinit() { } bool BaseChannel::SetRtpTransport(webrtc::RtpTransportInternal* rtp_transport) { + TRACE_EVENT0("webrtc", "BaseChannel::SetRtpTransport"); RTC_DCHECK_RUN_ON(network_thread()); if (rtp_transport == rtp_transport_) { return true; @@ -524,6 +525,7 @@ void BaseChannel::DisableMedia_w() { } void BaseChannel::UpdateWritableState_n() { + TRACE_EVENT0("webrtc", "BaseChannel::UpdateWritableState_n"); if (rtp_transport_->IsWritable(/*rtcp=*/true) && rtp_transport_->IsWritable(/*rtcp=*/false)) { ChannelWritable_n(); @@ -533,6 +535,7 @@ void BaseChannel::UpdateWritableState_n() { } void BaseChannel::ChannelWritable_n() { + TRACE_EVENT0("webrtc", "BaseChannel::ChannelWritable_n"); if (writable_) { return; } @@ -552,6 +555,7 @@ void BaseChannel::ChannelWritable_n() { } void BaseChannel::ChannelNotWritable_n() { + TRACE_EVENT0("webrtc", "BaseChannel::ChannelNotWritable_n"); if (!writable_) { return; } @@ -748,18 +752,12 @@ bool BaseChannel::UpdateRemoteStreams_w( return ret; } -RtpHeaderExtensions BaseChannel::GetFilteredRtpHeaderExtensions( +RtpHeaderExtensions BaseChannel::GetDeduplicatedRtpHeaderExtensions( const RtpHeaderExtensions& extensions) { - if (crypto_options_.srtp.enable_encrypted_rtp_header_extensions) { - RtpHeaderExtensions filtered; - absl::c_copy_if(extensions, std::back_inserter(filtered), - [](const webrtc::RtpExtension& extension) { - return !extension.encrypt; - }); - return filtered; - } - - return webrtc::RtpExtension::FilterDuplicateNonEncrypted(extensions); + return webrtc::RtpExtension::DeduplicateHeaderExtensions( + extensions, crypto_options_.srtp.enable_encrypted_rtp_header_extensions + ? webrtc::RtpExtension::kPreferEncryptedExtension + : webrtc::RtpExtension::kDiscardEncryptedExtension); } void BaseChannel::MaybeAddHandledPayloadType(int payload_type) { @@ -829,7 +827,7 @@ bool VoiceChannel::SetLocalContent_w(const MediaContentDescription* content, RTC_LOG(LS_INFO) << "Setting local voice description for " << ToString(); RtpHeaderExtensions rtp_header_extensions = - GetFilteredRtpHeaderExtensions(content->rtp_header_extensions()); + GetDeduplicatedRtpHeaderExtensions(content->rtp_header_extensions()); // TODO(tommi): There's a hop to the network thread here. // some of the below is also network thread related. UpdateRtpHeaderExtensionMap(rtp_header_extensions); @@ -891,7 +889,7 @@ bool VoiceChannel::SetRemoteContent_w(const MediaContentDescription* content, const AudioContentDescription* audio = content->as_audio(); RtpHeaderExtensions rtp_header_extensions = - GetFilteredRtpHeaderExtensions(audio->rtp_header_extensions()); + GetDeduplicatedRtpHeaderExtensions(audio->rtp_header_extensions()); AudioSendParameters send_params = last_send_params_; RtpSendParametersFromMediaDescription( @@ -991,7 +989,7 @@ bool VideoChannel::SetLocalContent_w(const MediaContentDescription* content, RTC_LOG(LS_INFO) << "Setting local video description for " << ToString(); RtpHeaderExtensions rtp_header_extensions = - GetFilteredRtpHeaderExtensions(content->rtp_header_extensions()); + GetDeduplicatedRtpHeaderExtensions(content->rtp_header_extensions()); UpdateRtpHeaderExtensionMap(rtp_header_extensions); media_channel()->SetExtmapAllowMixed(content->extmap_allow_mixed()); @@ -1084,7 +1082,7 @@ bool VideoChannel::SetRemoteContent_w(const MediaContentDescription* content, const VideoContentDescription* video = content->as_video(); RtpHeaderExtensions rtp_header_extensions = - GetFilteredRtpHeaderExtensions(video->rtp_header_extensions()); + GetDeduplicatedRtpHeaderExtensions(video->rtp_header_extensions()); VideoSendParameters send_params = last_send_params_; RtpSendParametersFromMediaDescription( diff --git a/pc/channel.h b/pc/channel.h index 76f65cc822..d1dbe2cd6c 100644 --- a/pc/channel.h +++ b/pc/channel.h @@ -272,10 +272,11 @@ class BaseChannel : public ChannelInterface, webrtc::SdpType type, std::string* error_desc) RTC_RUN_ON(worker_thread()) = 0; - // Return a list of RTP header extensions with the non-encrypted extensions - // removed depending on the current crypto_options_ and only if both the - // non-encrypted and encrypted extension is present for the same URI. - RtpHeaderExtensions GetFilteredRtpHeaderExtensions( + + // Returns a list of RTP header extensions where any extension URI is unique. + // Encrypted extensions will be either preferred or discarded, depending on + // the current crypto_options_. + RtpHeaderExtensions GetDeduplicatedRtpHeaderExtensions( const RtpHeaderExtensions& extensions); // Add |payload_type| to |demuxer_criteria_| if payload type demuxing is diff --git a/pc/connection_context.cc b/pc/connection_context.cc index 8d6ee636f3..1bb7908f5c 100644 --- a/pc/connection_context.cc +++ b/pc/connection_context.cc @@ -103,11 +103,13 @@ ConnectionContext::ConnectionContext( signaling_thread_->AllowInvokesToThread(network_thread_); worker_thread_->AllowInvokesToThread(network_thread_); if (network_thread_->IsCurrent()) { - network_thread_->DisallowAllInvokes(); + // TODO(https://crbug.com/webrtc/12802) switch to DisallowAllInvokes + network_thread_->AllowInvokesToThread(network_thread_); } else { network_thread_->PostTask(ToQueuedTask([thread = network_thread_] { thread->DisallowBlockingCalls(); - thread->DisallowAllInvokes(); + // TODO(https://crbug.com/webrtc/12802) switch to DisallowAllInvokes + thread->AllowInvokesToThread(thread); })); } diff --git a/pc/data_channel_controller.cc b/pc/data_channel_controller.cc index d8e6b39895..7a6fd3c168 100644 --- a/pc/data_channel_controller.cc +++ b/pc/data_channel_controller.cc @@ -21,7 +21,6 @@ #include "pc/sctp_utils.h" #include "rtc_base/location.h" #include "rtc_base/logging.h" -#include "rtc_base/string_encode.h" #include "rtc_base/task_utils/to_queued_task.h" namespace webrtc { @@ -162,13 +161,13 @@ void DataChannelController::OnReadyToSend() { })); } -void DataChannelController::OnTransportClosed() { +void DataChannelController::OnTransportClosed(RTCError error) { RTC_DCHECK_RUN_ON(network_thread()); signaling_thread()->PostTask( - ToQueuedTask([self = weak_factory_.GetWeakPtr()] { + ToQueuedTask([self = weak_factory_.GetWeakPtr(), error] { if (self) { RTC_DCHECK_RUN_ON(self->signaling_thread()); - self->OnTransportChannelClosed(); + self->OnTransportChannelClosed(error); } })); } @@ -351,14 +350,14 @@ void DataChannelController::OnSctpDataChannelClosed(SctpDataChannel* channel) { } } -void DataChannelController::OnTransportChannelClosed() { +void DataChannelController::OnTransportChannelClosed(RTCError error) { RTC_DCHECK_RUN_ON(signaling_thread()); // Use a temporary copy of the SCTP DataChannel list because the // DataChannel may callback to us and try to modify the list. std::vector> temp_sctp_dcs; temp_sctp_dcs.swap(sctp_data_channels_); for (const auto& channel : temp_sctp_dcs) { - channel->OnTransportChannelClosed(); + channel->OnTransportChannelClosed(error); } } diff --git a/pc/data_channel_controller.h b/pc/data_channel_controller.h index 05fcff0e03..7b1ff26690 100644 --- a/pc/data_channel_controller.h +++ b/pc/data_channel_controller.h @@ -70,7 +70,7 @@ class DataChannelController : public SctpDataChannelProviderInterface, void OnChannelClosing(int channel_id) override; void OnChannelClosed(int channel_id) override; void OnReadyToSend() override; - void OnTransportClosed() override; + void OnTransportClosed(RTCError error) override; // Called from PeerConnection::SetupDataChannelTransport_n void SetupDataChannelTransport_n(); @@ -111,7 +111,7 @@ class DataChannelController : public SctpDataChannelProviderInterface, return SignalSctpDataChannelCreated_; } // Called when the transport for the data channels is closed or destroyed. - void OnTransportChannelClosed(); + void OnTransportChannelClosed(RTCError error); void OnSctpDataChannelClosed(SctpDataChannel* channel); diff --git a/pc/data_channel_integrationtest.cc b/pc/data_channel_integrationtest.cc index 4b6ae1fb79..47ea74a4b2 100644 --- a/pc/data_channel_integrationtest.cc +++ b/pc/data_channel_integrationtest.cc @@ -27,12 +27,16 @@ #include "rtc_base/gunit.h" #include "rtc_base/ref_counted_object.h" #include "rtc_base/virtual_socket_server.h" +#include "system_wrappers/include/field_trial.h" #include "test/gtest.h" namespace webrtc { namespace { +// All tests in this file require SCTP support. +#ifdef WEBRTC_HAVE_SCTP + class DataChannelIntegrationTest : public PeerConnectionIntegrationBaseTest, public ::testing::WithParamInterface< std::tuple> { @@ -42,8 +46,6 @@ class DataChannelIntegrationTest : public PeerConnectionIntegrationBaseTest, std::get<1>(GetParam())) {} }; -GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(DataChannelIntegrationTest); - // Fake clock must be set before threads are started to prevent race on // Set/GetClockForTesting(). // To achieve that, multiple inheritance is used as a mixin pattern @@ -63,11 +65,6 @@ class FakeClockForTest : public rtc::ScopedFakeClock { ScopedFakeClock& FakeClock() { return *this; } }; -// Ensure FakeClockForTest is constructed first (see class for rationale). -class DataChannelIntegrationTestWithFakeClock - : public FakeClockForTest, - public DataChannelIntegrationTest {}; - class DataChannelIntegrationTestPlanB : public PeerConnectionIntegrationBaseTest { protected: @@ -75,9 +72,6 @@ class DataChannelIntegrationTestPlanB : PeerConnectionIntegrationBaseTest(SdpSemantics::kPlanB) {} }; -GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST( - DataChannelIntegrationTestWithFakeClock); - class DataChannelIntegrationTestUnifiedPlan : public PeerConnectionIntegrationBaseTest { protected: @@ -85,8 +79,6 @@ class DataChannelIntegrationTestUnifiedPlan : PeerConnectionIntegrationBaseTest(SdpSemantics::kUnifiedPlan) {} }; -#ifdef WEBRTC_HAVE_SCTP - // This test causes a PeerConnection to enter Disconnected state, and // sends data on a DataChannel while disconnected. // The data should be surfaced when the connection reestablishes. @@ -622,7 +614,7 @@ TEST_P(DataChannelIntegrationTest, QueuedPacketsGetDeliveredInReliableMode) { kDefaultTimeout); } -TEST_P(DataChannelIntegrationTest, QueuedPacketsGetDeliveredInUnReliableMode) { +TEST_P(DataChannelIntegrationTest, QueuedPacketsGetDroppedInUnreliableMode) { CreatePeerConnectionWrappers(); ConnectFakeSignaling(); DataChannelInit init; @@ -637,6 +629,9 @@ TEST_P(DataChannelIntegrationTest, QueuedPacketsGetDeliveredInUnReliableMode) { kDefaultTimeout); // Cause a temporary network outage virtual_socket_server()->set_drop_probability(1.0); + // Send a few packets. Note that all get dropped only when all packets + // fit into the receiver receive window/congestion window, so that they + // actually get sent. for (int i = 1; i <= 10; i++) { caller()->data_channel()->Send(DataBuffer("Sent while blocked")); } @@ -659,16 +654,119 @@ TEST_P(DataChannelIntegrationTest, QueuedPacketsGetDeliveredInUnReliableMode) { EXPECT_EQ(2u, callee()->data_observer()->received_message_count()); } -INSTANTIATE_TEST_SUITE_P( - DataChannelIntegrationTest, - DataChannelIntegrationTest, - Combine(Values(SdpSemantics::kPlanB, SdpSemantics::kUnifiedPlan), - Values("WebRTC-DataChannel-Dcsctp/Enabled/", - "WebRTC-DataChannel-Dcsctp/Disabled/"))); +TEST_P(DataChannelIntegrationTest, + QueuedPacketsGetDroppedInLifetimeLimitedMode) { + CreatePeerConnectionWrappers(); + ConnectFakeSignaling(); + DataChannelInit init; + init.maxRetransmitTime = 1; + init.ordered = false; + caller()->CreateDataChannel(&init); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_channel(), kDefaultTimeout); + caller()->data_channel()->Send(DataBuffer("hello first")); + ASSERT_EQ_WAIT(1u, callee()->data_observer()->received_message_count(), + kDefaultTimeout); + // Cause a temporary network outage + virtual_socket_server()->set_drop_probability(1.0); + for (int i = 1; i <= 200; i++) { + caller()->data_channel()->Send(DataBuffer("Sent while blocked")); + } + // Nothing should be delivered during outage. + // We do a short wait to verify that delivery count is still 1, + // and to make sure max packet lifetime (which is in ms) is exceeded. + WAIT(false, 10); + EXPECT_EQ(1u, callee()->data_observer()->received_message_count()); + // Reverse the network outage. + virtual_socket_server()->set_drop_probability(0.0); + // Send a new packet, and wait for it to be delivered. + caller()->data_channel()->Send(DataBuffer("After block")); + EXPECT_EQ_WAIT("After block", callee()->data_observer()->last_message(), + kDefaultTimeout); + // Some messages should be lost, but first and last message should have + // been delivered. + // First, check that the protocol guarantee is preserved. + EXPECT_GT(202u, callee()->data_observer()->received_message_count()); + EXPECT_LE(2u, callee()->data_observer()->received_message_count()); + // Then, check that observed behavior (lose some messages) has not changed + if (webrtc::field_trial::IsEnabled("WebRTC-DataChannel-Dcsctp")) { + // DcSctp loses all messages. This is correct. + EXPECT_EQ(2u, callee()->data_observer()->received_message_count()); + } else { + // Usrsctp loses some messages, but keeps messages not attempted. + // THIS IS THE WRONG BEHAVIOR. According to discussion in + // https://github.com/sctplab/usrsctp/issues/584, all these packets + // should be discarded. + // TODO(bugs.webrtc.org/12731): Fix this. + EXPECT_EQ(90u, callee()->data_observer()->received_message_count()); + } +} + +TEST_P(DataChannelIntegrationTest, + SomeQueuedPacketsGetDroppedInMaxRetransmitsMode) { + CreatePeerConnectionWrappers(); + ConnectFakeSignaling(); + DataChannelInit init; + init.maxRetransmits = 0; + init.ordered = false; + caller()->CreateDataChannel(&init); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_channel(), kDefaultTimeout); + caller()->data_channel()->Send(DataBuffer("hello first")); + ASSERT_EQ_WAIT(1u, callee()->data_observer()->received_message_count(), + kDefaultTimeout); + // Cause a temporary network outage + virtual_socket_server()->set_drop_probability(1.0); + // Fill the buffer until queued data starts to build + size_t packet_counter = 0; + while (caller()->data_channel()->buffered_amount() < 1 && + packet_counter < 10000) { + packet_counter++; + caller()->data_channel()->Send(DataBuffer("Sent while blocked")); + } + if (caller()->data_channel()->buffered_amount()) { + RTC_LOG(LS_INFO) << "Buffered data after " << packet_counter << " packets"; + } else { + RTC_LOG(LS_INFO) << "No buffered data after " << packet_counter + << " packets"; + } + // Nothing should be delivered during outage. + // We do a short wait to verify that delivery count is still 1. + WAIT(false, 10); + EXPECT_EQ(1u, callee()->data_observer()->received_message_count()); + // Reverse the network outage. + virtual_socket_server()->set_drop_probability(0.0); + // Send a new packet, and wait for it to be delivered. + caller()->data_channel()->Send(DataBuffer("After block")); + EXPECT_EQ_WAIT("After block", callee()->data_observer()->last_message(), + kDefaultTimeout); + // Some messages should be lost, but first and last message should have + // been delivered. + // Due to the fact that retransmissions are only counted when the packet + // goes on the wire, NOT when they are stalled in queue due to + // congestion, we expect some of the packets to be delivered, because + // congestion prevented them from being sent. + // Citation: https://tools.ietf.org/html/rfc7496#section-3.1 + + // First, check that the protocol guarantee is preserved. + EXPECT_GT(packet_counter, + callee()->data_observer()->received_message_count()); + EXPECT_LE(2u, callee()->data_observer()->received_message_count()); + // Then, check that observed behavior (lose between 100 and 200 messages) + // has not changed. + // Usrsctp behavior is different on Android (177) and other platforms (122). + // Dcsctp loses 432 packets. + EXPECT_GT(2 + packet_counter - 100, + callee()->data_observer()->received_message_count()); + EXPECT_LT(2 + packet_counter - 500, + callee()->data_observer()->received_message_count()); +} INSTANTIATE_TEST_SUITE_P( DataChannelIntegrationTest, - DataChannelIntegrationTestWithFakeClock, + DataChannelIntegrationTest, Combine(Values(SdpSemantics::kPlanB, SdpSemantics::kUnifiedPlan), Values("WebRTC-DataChannel-Dcsctp/Enabled/", "WebRTC-DataChannel-Dcsctp/Disabled/"))); diff --git a/pc/data_channel_unittest.cc b/pc/data_channel_unittest.cc index 98c44f26fe..770892cbe1 100644 --- a/pc/data_channel_unittest.cc +++ b/pc/data_channel_unittest.cc @@ -13,6 +13,7 @@ #include #include +#include "media/sctp/sctp_transport_internal.h" #include "pc/sctp_data_channel.h" #include "pc/sctp_utils.h" #include "pc/test/fake_data_channel_provider.h" @@ -635,7 +636,9 @@ TEST_F(SctpDataChannelTest, TransportDestroyedWhileDataBuffered) { // Tell the data channel that its transport is being destroyed. // It should then stop using the transport (allowing us to delete it) and // transition to the "closed" state. - webrtc_data_channel_->OnTransportChannelClosed(); + webrtc::RTCError error(webrtc::RTCErrorType::OPERATION_ERROR_WITH_DATA, ""); + error.set_error_detail(webrtc::RTCErrorDetailType::SCTP_FAILURE); + webrtc_data_channel_->OnTransportChannelClosed(error); provider_.reset(nullptr); EXPECT_EQ_WAIT(webrtc::DataChannelInterface::kClosed, webrtc_data_channel_->state(), kDefaultTimeout); @@ -646,6 +649,31 @@ TEST_F(SctpDataChannelTest, TransportDestroyedWhileDataBuffered) { webrtc_data_channel_->error().error_detail()); } +TEST_F(SctpDataChannelTest, TransportGotErrorCode) { + SetChannelReady(); + + // Tell the data channel that its transport is being destroyed with an + // error code. + // It should then report that error code. + webrtc::RTCError error(webrtc::RTCErrorType::OPERATION_ERROR_WITH_DATA, + "Transport channel closed"); + error.set_error_detail(webrtc::RTCErrorDetailType::SCTP_FAILURE); + error.set_sctp_cause_code( + static_cast(cricket::SctpErrorCauseCode::kProtocolViolation)); + webrtc_data_channel_->OnTransportChannelClosed(error); + provider_.reset(nullptr); + EXPECT_EQ_WAIT(webrtc::DataChannelInterface::kClosed, + webrtc_data_channel_->state(), kDefaultTimeout); + EXPECT_FALSE(webrtc_data_channel_->error().ok()); + EXPECT_EQ(webrtc::RTCErrorType::OPERATION_ERROR_WITH_DATA, + webrtc_data_channel_->error().type()); + EXPECT_EQ(webrtc::RTCErrorDetailType::SCTP_FAILURE, + webrtc_data_channel_->error().error_detail()); + EXPECT_EQ( + static_cast(cricket::SctpErrorCauseCode::kProtocolViolation), + webrtc_data_channel_->error().sctp_cause_code()); +} + class SctpSidAllocatorTest : public ::testing::Test { protected: SctpSidAllocator allocator_; diff --git a/pc/dtls_srtp_transport.cc b/pc/dtls_srtp_transport.cc index f272ab79cd..ac091c6131 100644 --- a/pc/dtls_srtp_transport.cc +++ b/pc/dtls_srtp_transport.cc @@ -15,6 +15,7 @@ #include #include +#include "api/dtls_transport_interface.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "rtc_base/ssl_stream_adapter.h" @@ -114,10 +115,9 @@ bool DtlsSrtpTransport::IsDtlsConnected() { auto rtcp_dtls_transport = rtcp_mux_enabled() ? nullptr : rtcp_dtls_transport_; return (rtp_dtls_transport_ && - rtp_dtls_transport_->dtls_state() == - cricket::DTLS_TRANSPORT_CONNECTED && + rtp_dtls_transport_->dtls_state() == DtlsTransportState::kConnected && (!rtcp_dtls_transport || rtcp_dtls_transport->dtls_state() == - cricket::DTLS_TRANSPORT_CONNECTED)); + DtlsTransportState::kConnected)); } bool DtlsSrtpTransport::IsDtlsWritable() { @@ -275,17 +275,16 @@ void DtlsSrtpTransport::SetDtlsTransport( } if (*old_dtls_transport) { - (*old_dtls_transport)->UnsubscribeDtlsState(this); + (*old_dtls_transport)->UnsubscribeDtlsTransportState(this); } *old_dtls_transport = new_dtls_transport; if (new_dtls_transport) { - new_dtls_transport->SubscribeDtlsState( - this, [this](cricket::DtlsTransportInternal* transport, - cricket::DtlsTransportState state) { - OnDtlsState(transport, state); - }); + new_dtls_transport->SubscribeDtlsTransportState( + this, + [this](cricket::DtlsTransportInternal* transport, + DtlsTransportState state) { OnDtlsState(transport, state); }); } } @@ -300,7 +299,7 @@ void DtlsSrtpTransport::SetRtcpDtlsTransport( } void DtlsSrtpTransport::OnDtlsState(cricket::DtlsTransportInternal* transport, - cricket::DtlsTransportState state) { + DtlsTransportState state) { RTC_DCHECK(transport == rtp_dtls_transport_ || transport == rtcp_dtls_transport_); @@ -308,7 +307,7 @@ void DtlsSrtpTransport::OnDtlsState(cricket::DtlsTransportInternal* transport, on_dtls_state_change_(); } - if (state != cricket::DTLS_TRANSPORT_CONNECTED) { + if (state != DtlsTransportState::kConnected) { ResetParams(); return; } diff --git a/pc/dtls_srtp_transport.h b/pc/dtls_srtp_transport.h index bc82fd5a9a..9c52dcf809 100644 --- a/pc/dtls_srtp_transport.h +++ b/pc/dtls_srtp_transport.h @@ -16,6 +16,7 @@ #include "absl/types/optional.h" #include "api/crypto_params.h" +#include "api/dtls_transport_interface.h" #include "api/rtc_error.h" #include "p2p/base/dtls_transport_internal.h" #include "p2p/base/packet_transport_internal.h" @@ -82,7 +83,7 @@ class DtlsSrtpTransport : public SrtpTransport { cricket::DtlsTransportInternal* rtcp_dtls_transport); void OnDtlsState(cricket::DtlsTransportInternal* dtls_transport, - cricket::DtlsTransportState state); + DtlsTransportState state); // Override the SrtpTransport::OnWritableState. void OnWritableState(rtc::PacketTransportInternal* packet_transport) override; diff --git a/pc/dtls_transport.cc b/pc/dtls_transport.cc index 1369db1f54..074f44e22b 100644 --- a/pc/dtls_transport.cc +++ b/pc/dtls_transport.cc @@ -13,6 +13,7 @@ #include #include "absl/types/optional.h" +#include "api/dtls_transport_interface.h" #include "api/sequence_checker.h" #include "pc/ice_transport.h" #include "rtc_base/checks.h" @@ -22,26 +23,6 @@ namespace webrtc { -namespace { - -DtlsTransportState TranslateState(cricket::DtlsTransportState internal_state) { - switch (internal_state) { - case cricket::DTLS_TRANSPORT_NEW: - return DtlsTransportState::kNew; - case cricket::DTLS_TRANSPORT_CONNECTING: - return DtlsTransportState::kConnecting; - case cricket::DTLS_TRANSPORT_CONNECTED: - return DtlsTransportState::kConnected; - case cricket::DTLS_TRANSPORT_CLOSED: - return DtlsTransportState::kClosed; - case cricket::DTLS_TRANSPORT_FAILED: - return DtlsTransportState::kFailed; - } - RTC_CHECK_NOTREACHED(); -} - -} // namespace - // Implementation of DtlsTransportInterface DtlsTransport::DtlsTransport( std::unique_ptr internal) @@ -51,9 +32,9 @@ DtlsTransport::DtlsTransport( ice_transport_(rtc::make_ref_counted( internal_dtls_transport_->ice_transport())) { RTC_DCHECK(internal_dtls_transport_.get()); - internal_dtls_transport_->SubscribeDtlsState( + internal_dtls_transport_->SubscribeDtlsTransportState( [this](cricket::DtlsTransportInternal* transport, - cricket::DtlsTransportState state) { + DtlsTransportState state) { OnInternalDtlsState(transport, state); }); UpdateInformation(); @@ -90,7 +71,7 @@ void DtlsTransport::Clear() { RTC_DCHECK_RUN_ON(owner_thread_); RTC_DCHECK(internal()); bool must_send_event = - (internal()->dtls_state() != cricket::DTLS_TRANSPORT_CLOSED); + (internal()->dtls_state() != DtlsTransportState::kClosed); // The destructor of cricket::DtlsTransportInternal calls back // into DtlsTransport, so we can't hold the lock while releasing. std::unique_ptr transport_to_release; @@ -107,7 +88,7 @@ void DtlsTransport::Clear() { void DtlsTransport::OnInternalDtlsState( cricket::DtlsTransportInternal* transport, - cricket::DtlsTransportState state) { + DtlsTransportState state) { RTC_DCHECK_RUN_ON(owner_thread_); RTC_DCHECK(transport == internal()); RTC_DCHECK(state == internal()->dtls_state()); @@ -122,7 +103,7 @@ void DtlsTransport::UpdateInformation() { MutexLock lock(&lock_); if (internal_dtls_transport_) { if (internal_dtls_transport_->dtls_state() == - cricket::DTLS_TRANSPORT_CONNECTED) { + DtlsTransportState::kConnected) { bool success = true; int ssl_cipher_suite; int tls_version; @@ -132,20 +113,19 @@ void DtlsTransport::UpdateInformation() { success &= internal_dtls_transport_->GetSrtpCryptoSuite(&srtp_cipher); if (success) { info_ = DtlsTransportInformation( - TranslateState(internal_dtls_transport_->dtls_state()), tls_version, + internal_dtls_transport_->dtls_state(), tls_version, ssl_cipher_suite, srtp_cipher, internal_dtls_transport_->GetRemoteSSLCertChain()); } else { RTC_LOG(LS_ERROR) << "DtlsTransport in connected state has incomplete " "TLS information"; info_ = DtlsTransportInformation( - TranslateState(internal_dtls_transport_->dtls_state()), - absl::nullopt, absl::nullopt, absl::nullopt, + internal_dtls_transport_->dtls_state(), absl::nullopt, + absl::nullopt, absl::nullopt, internal_dtls_transport_->GetRemoteSSLCertChain()); } } else { - info_ = DtlsTransportInformation( - TranslateState(internal_dtls_transport_->dtls_state())); + info_ = DtlsTransportInformation(internal_dtls_transport_->dtls_state()); } } else { info_ = DtlsTransportInformation(DtlsTransportState::kClosed); diff --git a/pc/dtls_transport.h b/pc/dtls_transport.h index 893b1263ae..cca4cc980a 100644 --- a/pc/dtls_transport.h +++ b/pc/dtls_transport.h @@ -60,7 +60,7 @@ class DtlsTransport : public DtlsTransportInterface { private: void OnInternalDtlsState(cricket::DtlsTransportInternal* transport, - cricket::DtlsTransportState state); + DtlsTransportState state); void UpdateInformation(); DtlsTransportObserverInterface* observer_ = nullptr; diff --git a/pc/dtmf_sender.h b/pc/dtmf_sender.h index 5cf7b2eba1..b64b50e09c 100644 --- a/pc/dtmf_sender.h +++ b/pc/dtmf_sender.h @@ -16,8 +16,8 @@ #include #include "api/dtmf_sender_interface.h" -#include "api/proxy.h" #include "api/scoped_refptr.h" +#include "pc/proxy.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/location.h" #include "rtc_base/ref_count.h" @@ -102,7 +102,6 @@ class DtmfSender : public DtmfSenderInterface, public sigslot::has_slots<> { // Define proxy for DtmfSenderInterface. BEGIN_PRIMARY_PROXY_MAP(DtmfSender) - PROXY_PRIMARY_THREAD_DESTRUCTOR() PROXY_METHOD1(void, RegisterObserver, DtmfSenderObserverInterface*) PROXY_METHOD0(void, UnregisterObserver) @@ -112,7 +111,7 @@ PROXY_CONSTMETHOD0(std::string, tones) PROXY_CONSTMETHOD0(int, duration) PROXY_CONSTMETHOD0(int, inter_tone_gap) PROXY_CONSTMETHOD0(int, comma_delay) -END_PROXY_MAP() +END_PROXY_MAP(DtmfSender) // Get DTMF code from the DTMF event character. bool GetDtmfCode(char tone, int* code); diff --git a/pc/g3doc/dtls_transport.md b/pc/g3doc/dtls_transport.md index 331e5b2778..65206dff5d 100644 --- a/pc/g3doc/dtls_transport.md +++ b/pc/g3doc/dtls_transport.md @@ -4,26 +4,50 @@ ## Overview WebRTC uses DTLS in two ways: -* to negotiate keys for SRTP encryption using [DTLS-SRTP](https://www.rfc-editor.org/info/rfc5763) -* as a transport for SCTP which is used by the Datachannel API -The W3C WebRTC API represents this as the [DtlsTransport](https://w3c.github.io/webrtc-pc/#rtcdtlstransport-interface). +* to negotiate keys for SRTP encryption using + [DTLS-SRTP](https://www.rfc-editor.org/info/rfc5763) +* as a transport for SCTP which is used by the Datachannel API -The DTLS handshake happens after the ICE transport becomes writable and has found a valid pair. -It results in a set of keys being derived for DTLS-SRTP as well as a fingerprint of the remote certificate which is compared to the one given in the SDP `a=fingerprint:` line. +The W3C WebRTC API represents this as the +[DtlsTransport](https://w3c.github.io/webrtc-pc/#rtcdtlstransport-interface). + +The DTLS handshake happens after the ICE transport becomes writable and has +found a valid pair. It results in a set of keys being derived for DTLS-SRTP as +well as a fingerprint of the remote certificate which is compared to the one +given in the SDP `a=fingerprint:` line. This documentation provides an overview of how DTLS is implemented, i.e how the following classes interact. ## webrtc::DtlsTransport -The [`webrtc::DtlsTransport`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/pc/dtls_transport.h;l=32;drc=6a55e7307b78edb50f94a1ff1ef8393d58218369) class -is a wrapper around the `cricket::DtlsTransportInternal` and allows registering observers implementing the `webrtc::DtlsTransportObserverInterface. -The [`webrtc::DtlsTransportObserverInterface`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/dtls_transport_interface.h;l=76;drc=34437d5660a80393d631657329ef74c6538be25a) will provide updates to the observers, passing around a snapshot of the transports state such as the connection state, the remote certificate(s) and the SRTP ciphers as [`DtlsTransportInformation`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/dtls_transport_interface.h;l=41;drc=34437d5660a80393d631657329ef74c6538be25a). -##cricket::DtlsTransportInternal -The [`cricket::DtlsTransportInternal`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/dtls_transport_internal.h;l=63;drc=34437d5660a80393d631657329ef74c6538be25a) class is an interface. Its implementation is [`cricket::DtlsTransport`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/dtls_transport.h;l=94;drc=653bab6790ac92c513b7cf4cd3ad59039c589a95). The `cricket::DtlsTransport` sends and receives network packets via an ICE transport. -It also demultiplexes DTLS packets and SRTP packets according to the scheme described in [RFC 5764](https://tools.ietf.org/html/rfc5764#section-5.1.2). +The [`webrtc::DtlsTransport`][1] class is a wrapper around the +`cricket::DtlsTransportInternal` and allows registering observers implementing +the `webrtc::DtlsTransportObserverInterface`. The +[`webrtc::DtlsTransportObserverInterface`][2] will provide updates to the +observers, passing around a snapshot of the transports state such as the +connection state, the remote certificate(s) and the SRTP ciphers as +[`DtlsTransportInformation`][3]. + +## cricket::DtlsTransportInternal + +The [`cricket::DtlsTransportInternal`][4] class is an interface. Its +implementation is [`cricket::DtlsTransport`][5]. The `cricket::DtlsTransport` +sends and receives network packets via an ICE transport. It also demultiplexes +DTLS packets and SRTP packets according to the scheme described in +[RFC 5764](https://tools.ietf.org/html/rfc5764#section-5.1.2). ## webrtc::DtlsSrtpTranport -The [`webrtc::DtlsSrtpTransport`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/pc/dtls_srtp_transport.h;l=31;drc=c32f00ea9ddf3267257fe6b45d4d79c6f6bcb829) class -is responsÑ–ble for extracting the SRTP keys after the DTLS handshake as well as protection and unprotection of SRTP packets via its [`cricket::SrtpSession`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_session.h;l=33;drc=be66d95ab7f9428028806bbf66cb83800bda9241). + +The [`webrtc::DtlsSrtpTransport`][6] class is responsÑ–ble for extracting the +SRTP keys after the DTLS handshake as well as protection and unprotection of +SRTP packets via its [`cricket::SrtpSession`][7]. + +[1]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/pc/dtls_transport.h;l=32;drc=6a55e7307b78edb50f94a1ff1ef8393d58218369 +[2]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/dtls_transport_interface.h;l=76;drc=34437d5660a80393d631657329ef74c6538be25a +[3]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/dtls_transport_interface.h;l=41;drc=34437d5660a80393d631657329ef74c6538be25a +[4]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/dtls_transport_internal.h;l=63;drc=34437d5660a80393d631657329ef74c6538be25a +[5]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/dtls_transport.h;l=94;drc=653bab6790ac92c513b7cf4cd3ad59039c589a95 +[6]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/pc/dtls_srtp_transport.h;l=31;drc=c32f00ea9ddf3267257fe6b45d4d79c6f6bcb829 +[7]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_session.h;l=33;drc=be66d95ab7f9428028806bbf66cb83800bda9241 diff --git a/pc/g3doc/rtp.md b/pc/g3doc/rtp.md new file mode 100644 index 0000000000..38c1702ad3 --- /dev/null +++ b/pc/g3doc/rtp.md @@ -0,0 +1,56 @@ + + + +# RTP in WebRTC + +WebRTC uses the RTP protocol described in +[RFC3550](https://datatracker.ietf.org/doc/html/rfc3550) for transporting audio +and video. Media is encrypted using [SRTP](./srtp.md). + +## Allocation of payload types + +RTP packets have a payload type field that describes which media codec can be +used to handle a packet. For some (older) codecs like PCMU the payload type is +assigned statically as described in +[RFC3551](https://datatracker.ietf.org/doc/html/rfc3551). For others, it is +assigned dynamically through the SDP. **Note:** there are no guarantees on the +stability of a payload type assignment. + +For this allocation, the range from 96 to 127 is used. When this range is +exhausted, the allocation falls back to the range from 35 to 63 as permitted by +[section 5.1 of RFC3550][1]. Note that older versions of WebRTC failed to +recognize payload types in the lower range. Newer codecs (such as flexfec-03 and +AV1) will by default be allocated in that range. + +Payload types in the range 64 to 95 are not used to avoid confusion with RTCP as +described in [RFC5761](https://datatracker.ietf.org/doc/html/rfc5761). + +## Allocation of audio payload types + +Audio payload types are assigned from a table by the [PayloadTypeMapper][2] +class. New audio codecs should be allocated in the lower dynamic range [35,63], +starting at 63, to reduce collisions with payload types + +## Allocation of video payload types + +Video payload types are allocated by the +[GetPayloadTypesAndDefaultCodecs method][3]. The set of codecs depends on the +platform, in particular for H264 codecs and their different profiles. Payload +numbers are assigned ascending from 96 for video codecs and their +[associated retransmission format](https://datatracker.ietf.org/doc/html/rfc4588). +Some codecs like flexfec-03 and AV1 are assigned to the lower range [35,63] for +reasons explained above. When the upper range [96,127] is exhausted, payload +types are assigned to the lower range [35,63], starting at 35. + +## Handling of payload type collisions + +Due to the requirement that payload types must be uniquely identifiable when +using [BUNDLE](https://datatracker.ietf.org/doc/html/rfc8829) collisions between +the assignments of the audio and video payload types may arise. These are +resolved by the [UsedPayloadTypes][4] class which will reassign payload type +numbers descending from 127. + +[1]: https://datatracker.ietf.org/doc/html/rfc3550#section-5.1 +[2]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/media/engine/payload_type_mapper.cc;l=25;drc=4f26a3c7e8e20e0e0ca4ca67a6ebdf3f5543dc3f +[3]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/media/engine/webrtc_video_engine.cc;l=119;drc=b412efdb780c86e6530493afa403783d14985347 +[4]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/used_ids.h;l=94;drc=b412efdb780c86e6530493afa403783d14985347 diff --git a/pc/g3doc/srtp.md b/pc/g3doc/srtp.md new file mode 100644 index 0000000000..47446157c9 --- /dev/null +++ b/pc/g3doc/srtp.md @@ -0,0 +1,72 @@ + + + +# SRTP in WebRTC + +WebRTC mandates encryption of media by means of the Secure Realtime Protocol, or +SRTP, which is described in +[RFC 3711](https://datatracker.ietf.org/doc/html/rfc3711). + +The key negotiation in WebRTC happens using DTLS-SRTP which is described in +[RFC 5764](https://datatracker.ietf.org/doc/html/rfc5764). The older +[SDES protocol](https://datatracker.ietf.org/doc/html/rfc4568) is implemented +but not enabled by default. + +Unencrypted RTP can be enabled for debugging purposes by setting the +PeerConnections [`disable_encryption`][1] option to true. + +## Supported cipher suites + +The implementation supports the following cipher suites: + +* SRTP_AES128_CM_HMAC_SHA1_80 +* SRTP_AEAD_AES_128_GCM +* SRTP_AEAD_AES_256_GCM + +The SRTP_AES128_CM_HMAC_SHA1_32 cipher suite is accepted for audio-only +connections if offered by the other side. It is not actively supported, see +[SelectCrypto][2] for details. + +The cipher suite ordering allows a non-WebRTC peer to prefer GCM cipher suites, +however they are not selected as default by two instances of the WebRTC library. + +## cricket::SrtpSession + +The [`cricket::SrtpSession`][3] is providing encryption and decryption of SRTP +packets using [`libsrtp`](https://github.com/cisco/libsrtp). Keys will be +provided by `SrtpTransport` or `DtlsSrtpTransport` in the [`SetSend`][4] and +[`SetRecv`][5] methods. + +Encryption and decryption happens in-place in the [`ProtectRtp`][6], +[`ProtectRtcp`][7], [`UnprotectRtp`][8] and [`UnprotectRtcp`][9] methods. The +`SrtpSession` class also takes care of initializing and deinitializing `libsrtp` +by keeping track of how many instances are being used. + +## webrtc::SrtpTransport and webrtc::DtlsSrtpTransport + +The [`webrtc::SrtpTransport`][10] class is controlling the `SrtpSession` +instances for RTP and RTCP. When +[rtcp-mux](https://datatracker.ietf.org/doc/html/rfc5761) is used, the +`SrtpSession` for RTCP is not needed. + +[`webrtc:DtlsSrtpTransport`][11] is a subclass of the `SrtpTransport` that +extracts the keying material when the DTLS handshake is done and configures it +in its base class. It will also become writable only once the DTLS handshake is +done. + +## cricket::SrtpFilter + +The [`cricket::SrtpFilter`][12] class is used to negotiate SDES. + +[1]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/peer_connection_interface.h;l=1413;drc=f467b445631189557d44de86a77ca6a0c3e2108d +[2]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/media_session.cc;l=297;drc=3ac73bd0aa5322abee98f1ff8705af64a184bf61 +[3]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_session.h;l=33;drc=be66d95ab7f9428028806bbf66cb83800bda9241 +[4]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_session.h;l=40;drc=be66d95ab7f9428028806bbf66cb83800bda9241 +[5]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_session.h;l=51;drc=be66d95ab7f9428028806bbf66cb83800bda9241 +[6]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_session.h;l=62;drc=be66d95ab7f9428028806bbf66cb83800bda9241 +[7]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_session.h;l=69;drc=be66d95ab7f9428028806bbf66cb83800bda9241 +[8]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_session.h;l=72;drc=be66d95ab7f9428028806bbf66cb83800bda9241 +[9]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_session.h;l=73;drc=be66d95ab7f9428028806bbf66cb83800bda9241 +[10]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_transport.h;l=37;drc=a4d873786f10eedd72de25ad0d94ad7c53c1f68a +[11]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/dtls_srtp_transport.h;l=31;drc=2f8e0536eb97ce2131e7a74e3ca06077aa0b64b3 +[12]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_filter.h;drc=d15a575ec3528c252419149d35977e55269d8a41 diff --git a/pc/jsep_transport.cc b/pc/jsep_transport.cc index dc4649bf11..e72088885f 100644 --- a/pc/jsep_transport.cc +++ b/pc/jsep_transport.cc @@ -26,6 +26,7 @@ #include "rtc_base/logging.h" #include "rtc_base/ref_counted_object.h" #include "rtc_base/strings/string_builder.h" +#include "rtc_base/trace_event.h" using webrtc::SdpType; @@ -104,6 +105,7 @@ JsepTransport::JsepTransport( ? rtc::make_ref_counted( std::move(sctp_transport)) : nullptr) { + TRACE_EVENT0("webrtc", "JsepTransport::JsepTransport"); RTC_DCHECK(ice_transport_); RTC_DCHECK(rtp_dtls_transport_); // |rtcp_ice_transport_| must be present iff |rtcp_dtls_transport_| is @@ -129,6 +131,7 @@ JsepTransport::JsepTransport( } JsepTransport::~JsepTransport() { + TRACE_EVENT0("webrtc", "JsepTransport::~JsepTransport"); if (sctp_transport_) { sctp_transport_->Clear(); } @@ -147,7 +150,7 @@ webrtc::RTCError JsepTransport::SetLocalJsepTransportDescription( const JsepTransportDescription& jsep_description, SdpType type) { webrtc::RTCError error; - + TRACE_EVENT0("webrtc", "JsepTransport::SetLocalJsepTransportDescription"); RTC_DCHECK_RUN_ON(network_thread_); IceParameters ice_parameters = @@ -233,6 +236,7 @@ webrtc::RTCError JsepTransport::SetLocalJsepTransportDescription( webrtc::RTCError JsepTransport::SetRemoteJsepTransportDescription( const JsepTransportDescription& jsep_description, webrtc::SdpType type) { + TRACE_EVENT0("webrtc", "JsepTransport::SetLocalJsepTransportDescription"); webrtc::RTCError error; RTC_DCHECK_RUN_ON(network_thread_); @@ -344,6 +348,7 @@ absl::optional JsepTransport::GetDtlsRole() const { } bool JsepTransport::GetStats(TransportStats* stats) { + TRACE_EVENT0("webrtc", "JsepTransport::GetStats"); RTC_DCHECK_RUN_ON(network_thread_); stats->transport_name = mid(); stats->channel_stats.clear(); @@ -362,6 +367,7 @@ bool JsepTransport::GetStats(TransportStats* stats) { webrtc::RTCError JsepTransport::VerifyCertificateFingerprint( const rtc::RTCCertificate* certificate, const rtc::SSLFingerprint* fingerprint) const { + TRACE_EVENT0("webrtc", "JsepTransport::VerifyCertificateFingerprint"); RTC_DCHECK_RUN_ON(network_thread_); if (!fingerprint) { return webrtc::RTCError(webrtc::RTCErrorType::INVALID_PARAMETER, @@ -400,6 +406,7 @@ void JsepTransport::SetActiveResetSrtpParams(bool active_reset_srtp_params) { void JsepTransport::SetRemoteIceParameters( const IceParameters& ice_parameters, IceTransportInternal* ice_transport) { + TRACE_EVENT0("webrtc", "JsepTransport::SetRemoteIceParameters"); RTC_DCHECK_RUN_ON(network_thread_); RTC_DCHECK(ice_transport); RTC_DCHECK(remote_description_); diff --git a/pc/jsep_transport_collection.cc b/pc/jsep_transport_collection.cc new file mode 100644 index 0000000000..ce068d99fc --- /dev/null +++ b/pc/jsep_transport_collection.cc @@ -0,0 +1,255 @@ +/* + * Copyright 2021 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "pc/jsep_transport_collection.h" + +#include +#include +#include +#include + +#include "p2p/base/p2p_constants.h" +#include "rtc_base/logging.h" + +namespace webrtc { + +void BundleManager::Update(const cricket::SessionDescription* description) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + bundle_groups_.clear(); + for (const cricket::ContentGroup* new_bundle_group : + description->GetGroupsByName(cricket::GROUP_TYPE_BUNDLE)) { + bundle_groups_.push_back( + std::make_unique(*new_bundle_group)); + RTC_DLOG(LS_VERBOSE) << "Establishing bundle group " + << new_bundle_group->ToString(); + } + established_bundle_groups_by_mid_.clear(); + for (const auto& bundle_group : bundle_groups_) { + for (const std::string& content_name : bundle_group->content_names()) { + established_bundle_groups_by_mid_[content_name] = bundle_group.get(); + } + } +} + +const cricket::ContentGroup* BundleManager::LookupGroupByMid( + const std::string& mid) const { + auto it = established_bundle_groups_by_mid_.find(mid); + return it != established_bundle_groups_by_mid_.end() ? it->second : nullptr; +} +bool BundleManager::IsFirstMidInGroup(const std::string& mid) const { + auto group = LookupGroupByMid(mid); + if (!group) { + return true; // Unbundled MIDs are considered group leaders + } + return mid == *(group->FirstContentName()); +} + +cricket::ContentGroup* BundleManager::LookupGroupByMid(const std::string& mid) { + auto it = established_bundle_groups_by_mid_.find(mid); + return it != established_bundle_groups_by_mid_.end() ? it->second : nullptr; +} + +void BundleManager::DeleteMid(const cricket::ContentGroup* bundle_group, + const std::string& mid) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_LOG(LS_VERBOSE) << "Deleting mid " << mid << " from bundle group " + << bundle_group->ToString(); + // Remove the rejected content from the |bundle_group|. + // The const pointer arg is used to identify the group, we verify + // it before we use it to make a modification. + auto bundle_group_it = std::find_if( + bundle_groups_.begin(), bundle_groups_.end(), + [bundle_group](std::unique_ptr& group) { + return bundle_group == group.get(); + }); + RTC_DCHECK(bundle_group_it != bundle_groups_.end()); + (*bundle_group_it)->RemoveContentName(mid); + established_bundle_groups_by_mid_.erase( + established_bundle_groups_by_mid_.find(mid)); +} + +void BundleManager::DeleteGroup(const cricket::ContentGroup* bundle_group) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DLOG(LS_VERBOSE) << "Deleting bundle group " << bundle_group->ToString(); + + auto bundle_group_it = std::find_if( + bundle_groups_.begin(), bundle_groups_.end(), + [bundle_group](std::unique_ptr& group) { + return bundle_group == group.get(); + }); + RTC_DCHECK(bundle_group_it != bundle_groups_.end()); + auto mid_list = (*bundle_group_it)->content_names(); + for (const auto& content_name : mid_list) { + DeleteMid(bundle_group, content_name); + } + bundle_groups_.erase(bundle_group_it); +} + +void JsepTransportCollection::RegisterTransport( + const std::string& mid, + std::unique_ptr transport) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + SetTransportForMid(mid, transport.get()); + jsep_transports_by_name_[mid] = std::move(transport); + RTC_DCHECK(IsConsistent()); +} + +std::vector JsepTransportCollection::Transports() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + std::vector result; + for (auto& kv : jsep_transports_by_name_) { + result.push_back(kv.second.get()); + } + return result; +} + +void JsepTransportCollection::DestroyAllTransports() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + for (const auto& jsep_transport : jsep_transports_by_name_) { + map_change_callback_(jsep_transport.first, nullptr); + } + jsep_transports_by_name_.clear(); + RTC_DCHECK(IsConsistent()); +} + +const cricket::JsepTransport* JsepTransportCollection::GetTransportByName( + const std::string& transport_name) const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + auto it = jsep_transports_by_name_.find(transport_name); + return (it == jsep_transports_by_name_.end()) ? nullptr : it->second.get(); +} + +cricket::JsepTransport* JsepTransportCollection::GetTransportByName( + const std::string& transport_name) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + auto it = jsep_transports_by_name_.find(transport_name); + return (it == jsep_transports_by_name_.end()) ? nullptr : it->second.get(); +} + +cricket::JsepTransport* JsepTransportCollection::GetTransportForMid( + const std::string& mid) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + auto it = mid_to_transport_.find(mid); + return it == mid_to_transport_.end() ? nullptr : it->second; +} + +const cricket::JsepTransport* JsepTransportCollection::GetTransportForMid( + const std::string& mid) const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + auto it = mid_to_transport_.find(mid); + return it == mid_to_transport_.end() ? nullptr : it->second; +} + +bool JsepTransportCollection::SetTransportForMid( + const std::string& mid, + cricket::JsepTransport* jsep_transport) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DCHECK(jsep_transport); + + auto it = mid_to_transport_.find(mid); + if (it != mid_to_transport_.end() && it->second == jsep_transport) + return true; + + pending_mids_.push_back(mid); + + // The map_change_callback must be called before destroying the + // transport, because it removes references to the transport + // in the RTP demuxer. + bool result = map_change_callback_(mid, jsep_transport); + + if (it == mid_to_transport_.end()) { + mid_to_transport_.insert(std::make_pair(mid, jsep_transport)); + } else { + auto old_transport = it->second; + it->second = jsep_transport; + MaybeDestroyJsepTransport(old_transport); + } + RTC_DCHECK(IsConsistent()); + return result; +} + +void JsepTransportCollection::RemoveTransportForMid(const std::string& mid) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DCHECK(IsConsistent()); + bool ret = map_change_callback_(mid, nullptr); + // Calling OnTransportChanged with nullptr should always succeed, since it is + // only expected to fail when adding media to a transport (not removing). + RTC_DCHECK(ret); + + auto old_transport = GetTransportForMid(mid); + if (old_transport) { + mid_to_transport_.erase(mid); + MaybeDestroyJsepTransport(old_transport); + } + RTC_DCHECK(IsConsistent()); +} + +void JsepTransportCollection::RollbackTransports() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + for (auto&& mid : pending_mids_) { + RemoveTransportForMid(mid); + } + pending_mids_.clear(); +} + +void JsepTransportCollection::CommitTransports() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + pending_mids_.clear(); +} + +bool JsepTransportCollection::TransportInUse( + cricket::JsepTransport* jsep_transport) const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + for (const auto& kv : mid_to_transport_) { + if (kv.second == jsep_transport) { + return true; + } + } + return false; +} + +void JsepTransportCollection::MaybeDestroyJsepTransport( + cricket::JsepTransport* transport) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + // Don't destroy the JsepTransport if there are still media sections referring + // to it. + if (TransportInUse(transport)) { + return; + } + for (const auto& it : jsep_transports_by_name_) { + if (it.second.get() == transport) { + jsep_transports_by_name_.erase(it.first); + state_change_callback_(); + break; + } + } + RTC_DCHECK(IsConsistent()); +} + +bool JsepTransportCollection::IsConsistent() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + for (const auto& it : jsep_transports_by_name_) { + if (!TransportInUse(it.second.get())) { + RTC_LOG(LS_ERROR) << "Transport registered with mid " << it.first + << " is not in use, transport " << it.second.get(); + return false; + } + const auto& lookup = mid_to_transport_.find(it.first); + if (lookup->second != it.second.get()) { + // Not an error, but unusual. + RTC_DLOG(LS_INFO) << "Note: Mid " << it.first << " was registered to " + << it.second.get() << " but currently maps to " + << lookup->second; + } + } + return true; +} + +} // namespace webrtc diff --git a/pc/jsep_transport_collection.h b/pc/jsep_transport_collection.h new file mode 100644 index 0000000000..0dd528d348 --- /dev/null +++ b/pc/jsep_transport_collection.h @@ -0,0 +1,145 @@ +/* + * Copyright 2021 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef PC_JSEP_TRANSPORT_COLLECTION_H_ +#define PC_JSEP_TRANSPORT_COLLECTION_H_ + +#include +#include +#include +#include +#include +#include + +#include "api/sequence_checker.h" +#include "pc/jsep_transport.h" +#include "pc/session_description.h" +#include "rtc_base/checks.h" +#include "rtc_base/system/no_unique_address.h" +#include "rtc_base/thread_annotations.h" + +namespace webrtc { + +// This class manages information about RFC 8843 BUNDLE bundles +// in SDP descriptions. + +// This is a work-in-progress. Planned steps: +// 1) Move all Bundle-related data structures from JsepTransport +// into this class. +// 2) Move all Bundle-related functions into this class. +// 3) Move remaining Bundle-related logic into this class. +// Make data members private. +// 4) Refine interface to have comprehensible semantics. +// 5) Add unit tests. +// 6) Change the logic to do what's right. +class BundleManager { + public: + BundleManager() { + // Allow constructor to be called on a different thread. + sequence_checker_.Detach(); + } + const std::vector>& bundle_groups() + const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + return bundle_groups_; + } + // Lookup a bundle group by a member mid name. + const cricket::ContentGroup* LookupGroupByMid(const std::string& mid) const; + cricket::ContentGroup* LookupGroupByMid(const std::string& mid); + // Returns true if the MID is the first item of a group, or if + // the MID is not a member of a group. + bool IsFirstMidInGroup(const std::string& mid) const; + // Update the groups description. This completely replaces the group + // description with the one from the SessionDescription. + void Update(const cricket::SessionDescription* description); + // Delete a MID from the group that contains it. + void DeleteMid(const cricket::ContentGroup* bundle_group, + const std::string& mid); + // Delete a group. + void DeleteGroup(const cricket::ContentGroup* bundle_group); + + private: + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_; + std::vector> bundle_groups_ + RTC_GUARDED_BY(sequence_checker_); + std::map + established_bundle_groups_by_mid_; +}; + +// This class keeps the mapping of MIDs to transports. +// It is pulled out here because a lot of the code that deals with +// bundles end up modifying this map, and the two need to be consistent; +// the managers may merge. +class JsepTransportCollection { + public: + JsepTransportCollection(std::function + map_change_callback, + std::function state_change_callback) + : map_change_callback_(map_change_callback), + state_change_callback_(state_change_callback) { + // Allow constructor to be called on a different thread. + sequence_checker_.Detach(); + } + + void RegisterTransport(const std::string& mid, + std::unique_ptr transport); + std::vector Transports(); + void DestroyAllTransports(); + // Lookup a JsepTransport by the MID that was used to register it. + cricket::JsepTransport* GetTransportByName(const std::string& mid); + const cricket::JsepTransport* GetTransportByName( + const std::string& mid) const; + // Lookup a JsepTransport by any MID that refers to it. + cricket::JsepTransport* GetTransportForMid(const std::string& mid); + const cricket::JsepTransport* GetTransportForMid( + const std::string& mid) const; + // Set transport for a MID. This may destroy a transport if it is no + // longer in use. + bool SetTransportForMid(const std::string& mid, + cricket::JsepTransport* jsep_transport); + // Remove a transport for a MID. This may destroy a transport if it is + // no longer in use. + void RemoveTransportForMid(const std::string& mid); + // Roll back pending mid-to-transport mappings. + void RollbackTransports(); + // Commit pending mid-transport mappings (rollback is no longer possible). + void CommitTransports(); + // Returns true if any mid currently maps to this transport. + bool TransportInUse(cricket::JsepTransport* jsep_transport) const; + + private: + // Destroy a transport if it's no longer in use. + void MaybeDestroyJsepTransport(cricket::JsepTransport* transport); + + bool IsConsistent(); // For testing only: Verify internal structure. + + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_; + // This member owns the JSEP transports. + std::map> + jsep_transports_by_name_ RTC_GUARDED_BY(sequence_checker_); + + // This keeps track of the mapping between media section + // (BaseChannel/SctpTransport) and the JsepTransport underneath. + std::map mid_to_transport_ + RTC_GUARDED_BY(sequence_checker_); + // Keep track of mids that have been mapped to transports. Used for rollback. + std::vector pending_mids_ RTC_GUARDED_BY(sequence_checker_); + // Callback used to inform subscribers of altered transports. + const std::function + map_change_callback_; + // Callback used to inform subscribers of possibly altered state. + const std::function state_change_callback_; +}; + +} // namespace webrtc + +#endif // PC_JSEP_TRANSPORT_COLLECTION_H_ diff --git a/pc/jsep_transport_controller.cc b/pc/jsep_transport_controller.cc index 372f4f69aa..f0e377e048 100644 --- a/pc/jsep_transport_controller.cc +++ b/pc/jsep_transport_controller.cc @@ -13,10 +13,13 @@ #include #include +#include #include +#include #include #include "absl/algorithm/container.h" +#include "api/dtls_transport_interface.h" #include "api/rtp_parameters.h" #include "api/sequence_checker.h" #include "api/transport/enums.h" @@ -28,27 +31,13 @@ #include "rtc_base/checks.h" #include "rtc_base/location.h" #include "rtc_base/logging.h" -#include "rtc_base/net_helper.h" -#include "rtc_base/socket_address.h" #include "rtc_base/thread.h" +#include "rtc_base/trace_event.h" using webrtc::SdpType; namespace webrtc { -namespace { - -bool IsBundledButNotFirstMid( - const std::map& bundle_groups_by_mid, - const std::string& mid) { - auto it = bundle_groups_by_mid.find(mid); - if (it == bundle_groups_by_mid.end()) - return false; - return mid != *it->second->FirstContentName(); -} - -} // namespace - JsepTransportController::JsepTransportController( rtc::Thread* network_thread, cricket::PortAllocator* port_allocator, @@ -57,6 +46,14 @@ JsepTransportController::JsepTransportController( : network_thread_(network_thread), port_allocator_(port_allocator), async_dns_resolver_factory_(async_dns_resolver_factory), + transports_( + [this](const std::string& mid, cricket::JsepTransport* transport) { + return OnTransportChanged(mid, transport); + }, + [this]() { + RTC_DCHECK_RUN_ON(network_thread_); + UpdateAggregateStates_n(); + }), config_(config), active_reset_srtp_params_(config.active_reset_srtp_params) { // The |transport_observer| is assumed to be non-null. @@ -76,6 +73,7 @@ JsepTransportController::~JsepTransportController() { RTCError JsepTransportController::SetLocalDescription( SdpType type, const cricket::SessionDescription* description) { + TRACE_EVENT0("webrtc", "JsepTransportController::SetLocalDescription"); if (!network_thread_->IsCurrent()) { return network_thread_->Invoke( RTC_FROM_HERE, [=] { return SetLocalDescription(type, description); }); @@ -96,6 +94,7 @@ RTCError JsepTransportController::SetLocalDescription( RTCError JsepTransportController::SetRemoteDescription( SdpType type, const cricket::SessionDescription* description) { + TRACE_EVENT0("webrtc", "JsepTransportController::SetRemoteDescription"); if (!network_thread_->IsCurrent()) { return network_thread_->Invoke( RTC_FROM_HERE, [=] { return SetRemoteDescription(type, description); }); @@ -175,8 +174,8 @@ void JsepTransportController::SetIceConfig(const cricket::IceConfig& config) { void JsepTransportController::SetNeedsIceRestartFlag() { RTC_DCHECK_RUN_ON(network_thread_); - for (auto& kv : jsep_transports_by_name_) { - kv.second->SetNeedsIceRestartFlag(); + for (auto& transport : transports_.Transports()) { + transport->SetNeedsIceRestartFlag(); } } @@ -229,8 +228,8 @@ bool JsepTransportController::SetLocalCertificate( // Set certificate for JsepTransport, which verifies it matches the // fingerprint in SDP, and DTLS transport. // Fallback from DTLS to SDES is not supported. - for (auto& kv : jsep_transports_by_name_) { - kv.second->SetLocalCertificate(certificate_); + for (auto& transport : transports_.Transports()) { + transport->SetLocalCertificate(certificate_); } for (auto& dtls : GetDtlsTransports()) { bool set_cert_success = dtls->SetLocalCertificate(certificate_); @@ -370,8 +369,8 @@ void JsepTransportController::SetActiveResetSrtpParams( << "Updating the active_reset_srtp_params for JsepTransportController: " << active_reset_srtp_params; active_reset_srtp_params_ = active_reset_srtp_params; - for (auto& kv : jsep_transports_by_name_) { - kv.second->SetActiveResetSrtpParams(active_reset_srtp_params); + for (auto& transport : transports_.Transports()) { + transport->SetActiveResetSrtpParams(active_reset_srtp_params); } } @@ -381,13 +380,7 @@ void JsepTransportController::RollbackTransports() { return; } RTC_DCHECK_RUN_ON(network_thread_); - for (auto&& mid : pending_mids_) { - RemoveTransportForMid(mid); - } - for (auto&& mid : pending_mids_) { - MaybeDestroyJsepTransport(mid); - } - pending_mids_.clear(); + transports_.RollbackTransports(); } rtc::scoped_refptr @@ -519,9 +512,7 @@ std::vector JsepTransportController::GetDtlsTransports() { RTC_DCHECK_RUN_ON(network_thread_); std::vector dtls_transports; - for (auto it = jsep_transports_by_name_.begin(); - it != jsep_transports_by_name_.end(); ++it) { - auto jsep_transport = it->second.get(); + for (auto jsep_transport : transports_.Transports()) { RTC_DCHECK(jsep_transport); if (jsep_transport->rtp_dtls_transport()) { dtls_transports.push_back(jsep_transport->rtp_dtls_transport()); @@ -538,6 +529,7 @@ RTCError JsepTransportController::ApplyDescription_n( bool local, SdpType type, const cricket::SessionDescription* description) { + TRACE_EVENT0("webrtc", "JsepTransportController::ApplyDescription_n"); RTC_DCHECK(description); if (local) { @@ -551,28 +543,18 @@ RTCError JsepTransportController::ApplyDescription_n( if (!error.ok()) { return error; } - // Established BUNDLE groups by MID. - std::map - established_bundle_groups_by_mid; - for (const auto& bundle_group : bundle_groups_) { - for (const std::string& content_name : bundle_group->content_names()) { - established_bundle_groups_by_mid[content_name] = bundle_group.get(); - } - } std::map> merged_encrypted_extension_ids_by_bundle; - if (!bundle_groups_.empty()) { + if (!bundles_.bundle_groups().empty()) { merged_encrypted_extension_ids_by_bundle = - MergeEncryptedHeaderExtensionIdsForBundles( - established_bundle_groups_by_mid, description); + MergeEncryptedHeaderExtensionIdsForBundles(description); } for (const cricket::ContentInfo& content_info : description->contents()) { // Don't create transports for rejected m-lines and bundled m-lines. if (content_info.rejected || - IsBundledButNotFirstMid(established_bundle_groups_by_mid, - content_info.name)) { + !bundles_.IsFirstMidInGroup(content_info.name)) { continue; } error = MaybeCreateJsepTransport(local, content_info, *description); @@ -588,15 +570,13 @@ RTCError JsepTransportController::ApplyDescription_n( const cricket::TransportInfo& transport_info = description->transport_infos()[i]; if (content_info.rejected) { - // This may cause groups to be removed from |bundle_groups_| and - // |established_bundle_groups_by_mid|. - HandleRejectedContent(content_info, established_bundle_groups_by_mid); + // This may cause groups to be removed from |bundles_.bundle_groups()|. + HandleRejectedContent(content_info); continue; } - auto it = established_bundle_groups_by_mid.find(content_info.name); const cricket::ContentGroup* established_bundle_group = - it != established_bundle_groups_by_mid.end() ? it->second : nullptr; + bundles_.LookupGroupByMid(content_info.name); // For bundle members that are not BUNDLE-tagged (not first in the group), // configure their transport to be the same as the BUNDLE-tagged transport. @@ -656,7 +636,7 @@ RTCError JsepTransportController::ApplyDescription_n( } } if (type == SdpType::kAnswer) { - pending_mids_.clear(); + transports_.CommitTransports(); } return RTCError::OK(); } @@ -747,7 +727,7 @@ RTCError JsepTransportController::ValidateAndMaybeUpdateBundleGroups( } } - for (const auto& bundle_group : bundle_groups_) { + for (const auto& bundle_group : bundles_.bundle_groups()) { for (const std::string& content_name : bundle_group->content_names()) { // An answer that removes m= sections from pre-negotiated BUNDLE group // without rejecting it, is invalid. @@ -773,14 +753,10 @@ RTCError JsepTransportController::ValidateAndMaybeUpdateBundleGroups( } if (ShouldUpdateBundleGroup(type, description)) { - bundle_groups_.clear(); - for (const cricket::ContentGroup* new_bundle_group : new_bundle_groups) { - bundle_groups_.push_back( - std::make_unique(*new_bundle_group)); - } + bundles_.Update(description); } - for (const auto& bundle_group : bundle_groups_) { + for (const auto& bundle_group : bundles_.bundle_groups()) { if (!bundle_group->FirstContentName()) continue; @@ -824,47 +800,34 @@ RTCError JsepTransportController::ValidateContent( } void JsepTransportController::HandleRejectedContent( - const cricket::ContentInfo& content_info, - std::map& - established_bundle_groups_by_mid) { + const cricket::ContentInfo& content_info) { // If the content is rejected, let the // BaseChannel/SctpTransport change the RtpTransport/DtlsTransport first, // then destroy the cricket::JsepTransport. - auto it = established_bundle_groups_by_mid.find(content_info.name); cricket::ContentGroup* bundle_group = - it != established_bundle_groups_by_mid.end() ? it->second : nullptr; + bundles_.LookupGroupByMid(content_info.name); if (bundle_group && !bundle_group->content_names().empty() && content_info.name == *bundle_group->FirstContentName()) { // Rejecting a BUNDLE group's first mid means we are rejecting the entire // group. for (const auto& content_name : bundle_group->content_names()) { - RemoveTransportForMid(content_name); - // We are about to delete this BUNDLE group, erase all mappings to it. - it = established_bundle_groups_by_mid.find(content_name); - RTC_DCHECK(it != established_bundle_groups_by_mid.end()); - established_bundle_groups_by_mid.erase(it); + transports_.RemoveTransportForMid(content_name); } // Delete the BUNDLE group. - auto bundle_group_it = std::find_if( - bundle_groups_.begin(), bundle_groups_.end(), - [bundle_group](std::unique_ptr& group) { - return bundle_group == group.get(); - }); - RTC_DCHECK(bundle_group_it != bundle_groups_.end()); - bundle_groups_.erase(bundle_group_it); + bundles_.DeleteGroup(bundle_group); } else { - RemoveTransportForMid(content_info.name); + transports_.RemoveTransportForMid(content_info.name); if (bundle_group) { // Remove the rejected content from the |bundle_group|. - bundle_group->RemoveContentName(content_info.name); + bundles_.DeleteMid(bundle_group, content_info.name); } } - MaybeDestroyJsepTransport(content_info.name); } bool JsepTransportController::HandleBundledContent( const cricket::ContentInfo& content_info, const cricket::ContentGroup& bundle_group) { + TRACE_EVENT0("webrtc", "JsepTransportController::HandleBundledContent"); RTC_DCHECK(bundle_group.FirstContentName()); auto jsep_transport = GetJsepTransportByName(*bundle_group.FirstContentName()); @@ -872,49 +835,11 @@ bool JsepTransportController::HandleBundledContent( // If the content is bundled, let the // BaseChannel/SctpTransport change the RtpTransport/DtlsTransport first, // then destroy the cricket::JsepTransport. - if (SetTransportForMid(content_info.name, jsep_transport)) { - // TODO(bugs.webrtc.org/9719) For media transport this is far from ideal, - // because it means that we first create media transport and start - // connecting it, and then we destroy it. We will need to address it before - // video path is enabled. - MaybeDestroyJsepTransport(content_info.name); - return true; - } - return false; -} - -bool JsepTransportController::SetTransportForMid( - const std::string& mid, - cricket::JsepTransport* jsep_transport) { - RTC_DCHECK_RUN_ON(network_thread_); - RTC_DCHECK(jsep_transport); - - auto it = mid_to_transport_.find(mid); - if (it != mid_to_transport_.end() && it->second == jsep_transport) - return true; - - pending_mids_.push_back(mid); - - if (it == mid_to_transport_.end()) { - mid_to_transport_.insert(std::make_pair(mid, jsep_transport)); - } else { - it->second = jsep_transport; - } - - return config_.transport_observer->OnTransportChanged( - mid, jsep_transport->rtp_transport(), jsep_transport->RtpDtlsTransport(), - jsep_transport->data_channel_transport()); -} - -void JsepTransportController::RemoveTransportForMid(const std::string& mid) { - RTC_DCHECK_RUN_ON(network_thread_); - bool ret = config_.transport_observer->OnTransportChanged(mid, nullptr, - nullptr, nullptr); - // Calling OnTransportChanged with nullptr should always succeed, since it is - // only expected to fail when adding media to a transport (not removing). - RTC_DCHECK(ret); - - mid_to_transport_.erase(mid); + // TODO(bugs.webrtc.org/9719) For media transport this is far from ideal, + // because it means that we first create media transport and start + // connecting it, and then we destroy it. We will need to address it before + // video path is enabled. + return transports_.SetTransportForMid(content_info.name, jsep_transport); } cricket::JsepTransportDescription @@ -923,6 +848,8 @@ JsepTransportController::CreateJsepTransportDescription( const cricket::TransportInfo& transport_info, const std::vector& encrypted_extension_ids, int rtp_abs_sendtime_extn_id) { + TRACE_EVENT0("webrtc", + "JsepTransportController::CreateJsepTransportDescription"); const cricket::MediaContentDescription* content_desc = content_info.media_description(); RTC_DCHECK(content_desc); @@ -978,20 +905,19 @@ std::vector JsepTransportController::GetEncryptedHeaderExtensionIds( std::map> JsepTransportController::MergeEncryptedHeaderExtensionIdsForBundles( - const std::map& bundle_groups_by_mid, const cricket::SessionDescription* description) { RTC_DCHECK(description); - RTC_DCHECK(!bundle_groups_.empty()); + RTC_DCHECK(!bundles_.bundle_groups().empty()); std::map> merged_encrypted_extension_ids_by_bundle; // Union the encrypted header IDs in the group when bundle is enabled. for (const cricket::ContentInfo& content_info : description->contents()) { - auto it = bundle_groups_by_mid.find(content_info.name); - if (it == bundle_groups_by_mid.end()) + auto group = bundles_.LookupGroupByMid(content_info.name); + if (!group) continue; // Get or create list of IDs for the BUNDLE group. std::vector& merged_ids = - merged_encrypted_extension_ids_by_bundle[it->second]; + merged_encrypted_extension_ids_by_bundle[group]; // Add IDs not already in the list. std::vector extension_ids = GetEncryptedHeaderExtensionIds(content_info); @@ -1016,32 +942,31 @@ int JsepTransportController::GetRtpAbsSendTimeHeaderExtensionId( const webrtc::RtpExtension* send_time_extension = webrtc::RtpExtension::FindHeaderExtensionByUri( content_desc->rtp_header_extensions(), - webrtc::RtpExtension::kAbsSendTimeUri); + webrtc::RtpExtension::kAbsSendTimeUri, + config_.crypto_options.srtp.enable_encrypted_rtp_header_extensions + ? webrtc::RtpExtension::kPreferEncryptedExtension + : webrtc::RtpExtension::kDiscardEncryptedExtension); return send_time_extension ? send_time_extension->id : -1; } const cricket::JsepTransport* JsepTransportController::GetJsepTransportForMid( const std::string& mid) const { - auto it = mid_to_transport_.find(mid); - return it == mid_to_transport_.end() ? nullptr : it->second; + return transports_.GetTransportForMid(mid); } cricket::JsepTransport* JsepTransportController::GetJsepTransportForMid( const std::string& mid) { - auto it = mid_to_transport_.find(mid); - return it == mid_to_transport_.end() ? nullptr : it->second; + return transports_.GetTransportForMid(mid); } const cricket::JsepTransport* JsepTransportController::GetJsepTransportByName( const std::string& transport_name) const { - auto it = jsep_transports_by_name_.find(transport_name); - return (it == jsep_transports_by_name_.end()) ? nullptr : it->second.get(); + return transports_.GetTransportByName(transport_name); } cricket::JsepTransport* JsepTransportController::GetJsepTransportByName( const std::string& transport_name) { - auto it = jsep_transports_by_name_.find(transport_name); - return (it == jsep_transports_by_name_.end()) ? nullptr : it->second.get(); + return transports_.GetTransportByName(transport_name); } RTCError JsepTransportController::MaybeCreateJsepTransport( @@ -1114,39 +1039,13 @@ RTCError JsepTransportController::MaybeCreateJsepTransport( jsep_transport->SignalRtcpMuxActive.connect( this, &JsepTransportController::UpdateAggregateStates_n); - SetTransportForMid(content_info.name, jsep_transport.get()); - - jsep_transports_by_name_[content_info.name] = std::move(jsep_transport); + transports_.RegisterTransport(content_info.name, std::move(jsep_transport)); UpdateAggregateStates_n(); return RTCError::OK(); } -void JsepTransportController::MaybeDestroyJsepTransport( - const std::string& mid) { - auto jsep_transport = GetJsepTransportByName(mid); - if (!jsep_transport) { - return; - } - - // Don't destroy the JsepTransport if there are still media sections referring - // to it. - for (const auto& kv : mid_to_transport_) { - if (kv.second == jsep_transport) { - return; - } - } - - jsep_transports_by_name_.erase(mid); - UpdateAggregateStates_n(); -} - void JsepTransportController::DestroyAllJsepTransports_n() { - for (const auto& jsep_transport : jsep_transports_by_name_) { - config_.transport_observer->OnTransportChanged(jsep_transport.first, - nullptr, nullptr, nullptr); - } - - jsep_transports_by_name_.clear(); + transports_.DestroyAllTransports(); } void JsepTransportController::SetIceRole_n(cricket::IceRole ice_role) { @@ -1276,6 +1175,7 @@ void JsepTransportController::OnTransportStateChanged_n( } void JsepTransportController::UpdateAggregateStates_n() { + TRACE_EVENT0("webrtc", "JsepTransportController::UpdateAggregateStates_n"); auto dtls_transports = GetDtlsTransports(); cricket::IceConnectionState new_connection_state = cricket::kIceConnectionConnecting; @@ -1291,7 +1191,7 @@ void JsepTransportController::UpdateAggregateStates_n() { bool all_done_gathering = !dtls_transports.empty(); std::map ice_state_counts; - std::map dtls_state_counts; + std::map dtls_state_counts; for (const auto& dtls : dtls_transports) { any_failed = any_failed || dtls->ice_transport()->GetState() == @@ -1393,16 +1293,15 @@ void JsepTransportController::UpdateAggregateStates_n() { // Note that "connecting" is only a valid state for DTLS transports while // "checking", "completed" and "disconnected" are only valid for ICE // transports. - int total_connected = total_ice_connected + - dtls_state_counts[cricket::DTLS_TRANSPORT_CONNECTED]; + int total_connected = + total_ice_connected + dtls_state_counts[DtlsTransportState::kConnected]; int total_dtls_connecting = - dtls_state_counts[cricket::DTLS_TRANSPORT_CONNECTING]; + dtls_state_counts[DtlsTransportState::kConnecting]; int total_failed = - total_ice_failed + dtls_state_counts[cricket::DTLS_TRANSPORT_FAILED]; + total_ice_failed + dtls_state_counts[DtlsTransportState::kFailed]; int total_closed = - total_ice_closed + dtls_state_counts[cricket::DTLS_TRANSPORT_CLOSED]; - int total_new = - total_ice_new + dtls_state_counts[cricket::DTLS_TRANSPORT_NEW]; + total_ice_closed + dtls_state_counts[DtlsTransportState::kClosed]; + int total_new = total_ice_new + dtls_state_counts[DtlsTransportState::kNew]; int total_transports = total_ice * 2; if (total_failed > 0) { @@ -1464,4 +1363,21 @@ void JsepTransportController::OnDtlsHandshakeError( config_.on_dtls_handshake_error_(error); } +bool JsepTransportController::OnTransportChanged( + const std::string& mid, + cricket::JsepTransport* jsep_transport) { + if (config_.transport_observer) { + if (jsep_transport) { + return config_.transport_observer->OnTransportChanged( + mid, jsep_transport->rtp_transport(), + jsep_transport->RtpDtlsTransport(), + jsep_transport->data_channel_transport()); + } else { + return config_.transport_observer->OnTransportChanged(mid, nullptr, + nullptr, nullptr); + } + } + return false; +} + } // namespace webrtc diff --git a/pc/jsep_transport_controller.h b/pc/jsep_transport_controller.h index e3c1187fb4..71b01bffb2 100644 --- a/pc/jsep_transport_controller.h +++ b/pc/jsep_transport_controller.h @@ -31,6 +31,7 @@ #include "api/rtc_error.h" #include "api/rtc_event_log/rtc_event_log.h" #include "api/scoped_refptr.h" +#include "api/sequence_checker.h" #include "api/transport/data_channel_transport_interface.h" #include "api/transport/sctp_transport_factory_interface.h" #include "media/sctp/sctp_transport_internal.h" @@ -48,6 +49,7 @@ #include "pc/dtls_srtp_transport.h" #include "pc/dtls_transport.h" #include "pc/jsep_transport.h" +#include "pc/jsep_transport_collection.h" #include "pc/rtp_transport.h" #include "pc/rtp_transport_internal.h" #include "pc/sctp_transport.h" @@ -55,6 +57,7 @@ #include "pc/srtp_transport.h" #include "pc/transport_stats.h" #include "rtc_base/callback_list.h" +#include "rtc_base/checks.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/copy_on_write_buffer.h" #include "rtc_base/helpers.h" @@ -327,18 +330,12 @@ class JsepTransportController : public sigslot::has_slots<> { const cricket::SessionDescription* description); RTCError ValidateContent(const cricket::ContentInfo& content_info); - void HandleRejectedContent(const cricket::ContentInfo& content_info, - std::map& - established_bundle_groups_by_mid) + void HandleRejectedContent(const cricket::ContentInfo& content_info) RTC_RUN_ON(network_thread_); bool HandleBundledContent(const cricket::ContentInfo& content_info, const cricket::ContentGroup& bundle_group) RTC_RUN_ON(network_thread_); - bool SetTransportForMid(const std::string& mid, - cricket::JsepTransport* jsep_transport); - void RemoveTransportForMid(const std::string& mid); - cricket::JsepTransportDescription CreateJsepTransportDescription( const cricket::ContentInfo& content_info, const cricket::TransportInfo& transport_info, @@ -350,7 +347,6 @@ class JsepTransportController : public sigslot::has_slots<> { std::map> MergeEncryptedHeaderExtensionIdsForBundles( - const std::map& bundle_groups_by_mid, const cricket::SessionDescription* description); std::vector GetEncryptedHeaderExtensionIds( const cricket::ContentInfo& content_info); @@ -384,8 +380,6 @@ class JsepTransportController : public sigslot::has_slots<> { const cricket::SessionDescription& description) RTC_RUN_ON(network_thread_); - void MaybeDestroyJsepTransport(const std::string& mid) - RTC_RUN_ON(network_thread_); void DestroyAllJsepTransports_n() RTC_RUN_ON(network_thread_); void SetIceRole_n(cricket::IceRole ice_role) RTC_RUN_ON(network_thread_); @@ -452,18 +446,14 @@ class JsepTransportController : public sigslot::has_slots<> { void OnDtlsHandshakeError(rtc::SSLHandshakeError error); + bool OnTransportChanged(const std::string& mid, + cricket::JsepTransport* transport); + rtc::Thread* const network_thread_ = nullptr; cricket::PortAllocator* const port_allocator_ = nullptr; AsyncDnsResolverFactoryInterface* const async_dns_resolver_factory_ = nullptr; - std::map> - jsep_transports_by_name_ RTC_GUARDED_BY(network_thread_); - // This keeps track of the mapping between media section - // (BaseChannel/SctpTransport) and the JsepTransport underneath. - std::map mid_to_transport_ - RTC_GUARDED_BY(network_thread_); - // Keep track of mids that have been mapped to transports. Used for rollback. - std::vector pending_mids_ RTC_GUARDED_BY(network_thread_); + JsepTransportCollection transports_ RTC_GUARDED_BY(network_thread_); // Aggregate states for Transports. // standardized_ice_connection_state_ is intended to replace // ice_connection_state, see bugs.webrtc.org/9308 @@ -483,14 +473,13 @@ class JsepTransportController : public sigslot::has_slots<> { const cricket::SessionDescription* remote_desc_ = nullptr; absl::optional initial_offerer_; - // Use unique_ptr<> to get a stable address. - std::vector> bundle_groups_; - cricket::IceConfig ice_config_; cricket::IceRole ice_role_ = cricket::ICEROLE_CONTROLLING; uint64_t ice_tiebreaker_ = rtc::CreateRandomId64(); rtc::scoped_refptr certificate_; + BundleManager bundles_; + RTC_DISALLOW_COPY_AND_ASSIGN(JsepTransportController); }; diff --git a/pc/jsep_transport_controller_unittest.cc b/pc/jsep_transport_controller_unittest.cc index d13a94cb29..2b261c83c8 100644 --- a/pc/jsep_transport_controller_unittest.cc +++ b/pc/jsep_transport_controller_unittest.cc @@ -13,6 +13,7 @@ #include #include +#include "api/dtls_transport_interface.h" #include "p2p/base/dtls_transport_factory.h" #include "p2p/base/fake_dtls_transport.h" #include "p2p/base/fake_ice_transport.h" @@ -693,8 +694,8 @@ TEST_F(JsepTransportControllerTest, combined_connection_state_, kTimeout); EXPECT_EQ(2, combined_connection_state_signal_count_); - fake_audio_dtls->SetDtlsState(cricket::DTLS_TRANSPORT_CONNECTED); - fake_video_dtls->SetDtlsState(cricket::DTLS_TRANSPORT_CONNECTED); + fake_audio_dtls->SetDtlsState(DtlsTransportState::kConnected); + fake_video_dtls->SetDtlsState(DtlsTransportState::kConnected); // Set the connection count to be 2 and the cricket::FakeIceTransport will set // the transport state to be STATE_CONNECTING. fake_video_dtls->fake_ice_transport()->SetConnectionCount(2); @@ -750,8 +751,8 @@ TEST_F(JsepTransportControllerTest, SignalConnectionStateComplete) { combined_connection_state_, kTimeout); EXPECT_EQ(2, combined_connection_state_signal_count_); - fake_audio_dtls->SetDtlsState(cricket::DTLS_TRANSPORT_CONNECTED); - fake_video_dtls->SetDtlsState(cricket::DTLS_TRANSPORT_CONNECTED); + fake_audio_dtls->SetDtlsState(DtlsTransportState::kConnected); + fake_video_dtls->SetDtlsState(DtlsTransportState::kConnected); // Set the connection count to be 1 and the cricket::FakeIceTransport will set // the transport state to be STATE_COMPLETED. fake_video_dtls->fake_ice_transport()->SetTransportState( @@ -839,7 +840,7 @@ TEST_F(JsepTransportControllerTest, fake_audio_dtls->SetWritable(true); fake_audio_dtls->fake_ice_transport()->SetCandidatesGatheringComplete(); fake_audio_dtls->fake_ice_transport()->SetConnectionCount(1); - fake_audio_dtls->SetDtlsState(cricket::DTLS_TRANSPORT_CONNECTED); + fake_audio_dtls->SetDtlsState(DtlsTransportState::kConnected); EXPECT_EQ(1, gathering_state_signal_count_); // Set the remote description and enable the bundle. diff --git a/pc/media_session.cc b/pc/media_session.cc index b4fc63439a..3c73ddf535 100644 --- a/pc/media_session.cc +++ b/pc/media_session.cc @@ -989,68 +989,6 @@ static Codecs MatchCodecPreference( return filtered_codecs; } -static bool FindByUriAndEncryption(const RtpHeaderExtensions& extensions, - const webrtc::RtpExtension& ext_to_match, - webrtc::RtpExtension* found_extension) { - auto it = absl::c_find_if( - extensions, [&ext_to_match](const webrtc::RtpExtension& extension) { - // We assume that all URIs are given in a canonical - // format. - return extension.uri == ext_to_match.uri && - extension.encrypt == ext_to_match.encrypt; - }); - if (it == extensions.end()) { - return false; - } - if (found_extension) { - *found_extension = *it; - } - return true; -} - -static bool FindByUri(const RtpHeaderExtensions& extensions, - const webrtc::RtpExtension& ext_to_match, - webrtc::RtpExtension* found_extension) { - // We assume that all URIs are given in a canonical format. - const webrtc::RtpExtension* found = - webrtc::RtpExtension::FindHeaderExtensionByUri(extensions, - ext_to_match.uri); - if (!found) { - return false; - } - if (found_extension) { - *found_extension = *found; - } - return true; -} - -static bool FindByUriWithEncryptionPreference( - const RtpHeaderExtensions& extensions, - absl::string_view uri_to_match, - bool encryption_preference, - webrtc::RtpExtension* found_extension) { - const webrtc::RtpExtension* unencrypted_extension = nullptr; - for (const webrtc::RtpExtension& extension : extensions) { - // We assume that all URIs are given in a canonical format. - if (extension.uri == uri_to_match) { - if (!encryption_preference || extension.encrypt) { - if (found_extension) { - *found_extension = extension; - } - return true; - } - unencrypted_extension = &extension; - } - } - if (unencrypted_extension) { - if (found_extension) { - *found_extension = *unencrypted_extension; - } - return true; - } - return false; -} - // Adds all extensions from |reference_extensions| to |offered_extensions| that // don't already exist in |offered_extensions| and ensure the IDs don't // collide. If an extension is added, it's also added to |regular_extensions| or @@ -1065,22 +1003,28 @@ static void MergeRtpHdrExts(const RtpHeaderExtensions& reference_extensions, RtpHeaderExtensions* encrypted_extensions, UsedRtpHeaderExtensionIds* used_ids) { for (auto reference_extension : reference_extensions) { - if (!FindByUriAndEncryption(*offered_extensions, reference_extension, - nullptr)) { - webrtc::RtpExtension existing; + if (!webrtc::RtpExtension::FindHeaderExtensionByUriAndEncryption( + *offered_extensions, reference_extension.uri, + reference_extension.encrypt)) { if (reference_extension.encrypt) { - if (FindByUriAndEncryption(*encrypted_extensions, reference_extension, - &existing)) { - offered_extensions->push_back(existing); + const webrtc::RtpExtension* existing = + webrtc::RtpExtension::FindHeaderExtensionByUriAndEncryption( + *encrypted_extensions, reference_extension.uri, + reference_extension.encrypt); + if (existing) { + offered_extensions->push_back(*existing); } else { used_ids->FindAndSetIdUsed(&reference_extension); encrypted_extensions->push_back(reference_extension); offered_extensions->push_back(reference_extension); } } else { - if (FindByUriAndEncryption(*regular_extensions, reference_extension, - &existing)) { - offered_extensions->push_back(existing); + const webrtc::RtpExtension* existing = + webrtc::RtpExtension::FindHeaderExtensionByUriAndEncryption( + *regular_extensions, reference_extension.uri, + reference_extension.encrypt); + if (existing) { + offered_extensions->push_back(*existing); } else { used_ids->FindAndSetIdUsed(&reference_extension); regular_extensions->push_back(reference_extension); @@ -1091,41 +1035,86 @@ static void MergeRtpHdrExts(const RtpHeaderExtensions& reference_extensions, } } -static void AddEncryptedVersionsOfHdrExts(RtpHeaderExtensions* extensions, - RtpHeaderExtensions* all_extensions, - UsedRtpHeaderExtensionIds* used_ids) { - RtpHeaderExtensions encrypted_extensions; - for (const webrtc::RtpExtension& extension : *extensions) { - webrtc::RtpExtension existing; - // Don't add encrypted extensions again that were already included in a - // previous offer or regular extensions that are also included as encrypted - // extensions. - if (extension.encrypt || - !webrtc::RtpExtension::IsEncryptionSupported(extension.uri) || - (FindByUriWithEncryptionPreference(*extensions, extension.uri, true, - &existing) && - existing.encrypt)) { +static void AddEncryptedVersionsOfHdrExts( + RtpHeaderExtensions* offered_extensions, + RtpHeaderExtensions* encrypted_extensions, + UsedRtpHeaderExtensionIds* used_ids) { + RtpHeaderExtensions encrypted_extensions_to_add; + for (const auto& extension : *offered_extensions) { + // Skip existing encrypted offered extension + if (extension.encrypt) { continue; } - if (FindByUri(*all_extensions, extension, &existing)) { - encrypted_extensions.push_back(existing); - } else { - webrtc::RtpExtension encrypted(extension); - encrypted.encrypt = true; - used_ids->FindAndSetIdUsed(&encrypted); - all_extensions->push_back(encrypted); - encrypted_extensions.push_back(encrypted); + // Skip if we cannot encrypt the extension + if (!webrtc::RtpExtension::IsEncryptionSupported(extension.uri)) { + continue; + } + + // Skip if an encrypted extension with that URI already exists in the + // offered extensions. + const bool have_encrypted_extension = + webrtc::RtpExtension::FindHeaderExtensionByUriAndEncryption( + *offered_extensions, extension.uri, true); + if (have_encrypted_extension) { + continue; } + + // Determine if a shared encrypted extension with that URI already exists. + const webrtc::RtpExtension* shared_encrypted_extension = + webrtc::RtpExtension::FindHeaderExtensionByUriAndEncryption( + *encrypted_extensions, extension.uri, true); + if (shared_encrypted_extension) { + // Re-use the shared encrypted extension + encrypted_extensions_to_add.push_back(*shared_encrypted_extension); + continue; + } + + // None exists. Create a new shared encrypted extension from the + // non-encrypted one. + webrtc::RtpExtension new_encrypted_extension(extension); + new_encrypted_extension.encrypt = true; + used_ids->FindAndSetIdUsed(&new_encrypted_extension); + encrypted_extensions->push_back(new_encrypted_extension); + encrypted_extensions_to_add.push_back(new_encrypted_extension); } - extensions->insert(extensions->end(), encrypted_extensions.begin(), - encrypted_extensions.end()); + + // Append the additional encrypted extensions to be offered + offered_extensions->insert(offered_extensions->end(), + encrypted_extensions_to_add.begin(), + encrypted_extensions_to_add.end()); +} + +// Mostly identical to RtpExtension::FindHeaderExtensionByUri but discards any +// encrypted extensions that this implementation cannot encrypt. +static const webrtc::RtpExtension* FindHeaderExtensionByUriDiscardUnsupported( + const std::vector& extensions, + absl::string_view uri, + webrtc::RtpExtension::Filter filter) { + // Note: While it's technically possible to decrypt extensions that we don't + // encrypt, the symmetric API of libsrtp does not allow us to supply + // different IDs for encryption/decryption of header extensions depending on + // whether the packet is inbound or outbound. Thereby, we are limited to + // what we can send in encrypted form. + if (!webrtc::RtpExtension::IsEncryptionSupported(uri)) { + // If there's no encryption support and we only want encrypted extensions, + // there's no point in continuing the search here. + if (filter == webrtc::RtpExtension::kRequireEncryptedExtension) { + return nullptr; + } + + // Instruct to only return non-encrypted extensions + filter = webrtc::RtpExtension::Filter::kDiscardEncryptedExtension; + } + + return webrtc::RtpExtension::FindHeaderExtensionByUri(extensions, uri, + filter); } static void NegotiateRtpHeaderExtensions( const RtpHeaderExtensions& local_extensions, const RtpHeaderExtensions& offered_extensions, - bool enable_encrypted_rtp_header_extensions, + webrtc::RtpExtension::Filter filter, RtpHeaderExtensions* negotiated_extensions) { // TransportSequenceNumberV2 is not offered by default. The special logic for // the TransportSequenceNumber extensions works as follows: @@ -1134,9 +1123,9 @@ static void NegotiateRtpHeaderExtensions( // V1 and V2 V2 regardless of local_extensions. // V2 V2 regardless of local_extensions. const webrtc::RtpExtension* transport_sequence_number_v2_offer = - webrtc::RtpExtension::FindHeaderExtensionByUri( + FindHeaderExtensionByUriDiscardUnsupported( offered_extensions, - webrtc::RtpExtension::kTransportSequenceNumberV2Uri); + webrtc::RtpExtension::kTransportSequenceNumberV2Uri, filter); bool frame_descriptor_in_local = false; bool dependency_descriptor_in_local = false; @@ -1149,10 +1138,10 @@ static void NegotiateRtpHeaderExtensions( dependency_descriptor_in_local = true; else if (ours.uri == webrtc::RtpExtension::kAbsoluteCaptureTimeUri) abs_capture_time_in_local = true; - webrtc::RtpExtension theirs; - if (FindByUriWithEncryptionPreference( - offered_extensions, ours.uri, - enable_encrypted_rtp_header_extensions, &theirs)) { + const webrtc::RtpExtension* theirs = + FindHeaderExtensionByUriDiscardUnsupported(offered_extensions, ours.uri, + filter); + if (theirs) { if (transport_sequence_number_v2_offer && ours.uri == webrtc::RtpExtension::kTransportSequenceNumberUri) { // Don't respond to @@ -1162,7 +1151,7 @@ static void NegotiateRtpHeaderExtensions( continue; } else { // We respond with their RTP header extension id. - negotiated_extensions->push_back(theirs); + negotiated_extensions->push_back(*theirs); } } } @@ -1174,28 +1163,35 @@ static void NegotiateRtpHeaderExtensions( // Frame descriptors support. If the extension is not present locally, but is // in the offer, we add it to the list. - webrtc::RtpExtension theirs; - if (!dependency_descriptor_in_local && - FindByUriWithEncryptionPreference( - offered_extensions, webrtc::RtpExtension::kDependencyDescriptorUri, - enable_encrypted_rtp_header_extensions, &theirs)) { - negotiated_extensions->push_back(theirs); - } - if (!frame_descriptor_in_local && - FindByUriWithEncryptionPreference( - offered_extensions, - webrtc::RtpExtension::kGenericFrameDescriptorUri00, - enable_encrypted_rtp_header_extensions, &theirs)) { - negotiated_extensions->push_back(theirs); + if (!dependency_descriptor_in_local) { + const webrtc::RtpExtension* theirs = + FindHeaderExtensionByUriDiscardUnsupported( + offered_extensions, webrtc::RtpExtension::kDependencyDescriptorUri, + filter); + if (theirs) { + negotiated_extensions->push_back(*theirs); + } + } + if (!frame_descriptor_in_local) { + const webrtc::RtpExtension* theirs = + FindHeaderExtensionByUriDiscardUnsupported( + offered_extensions, + webrtc::RtpExtension::kGenericFrameDescriptorUri00, filter); + if (theirs) { + negotiated_extensions->push_back(*theirs); + } } // Absolute capture time support. If the extension is not present locally, but // is in the offer, we add it to the list. - if (!abs_capture_time_in_local && - FindByUriWithEncryptionPreference( - offered_extensions, webrtc::RtpExtension::kAbsoluteCaptureTimeUri, - enable_encrypted_rtp_header_extensions, &theirs)) { - negotiated_extensions->push_back(theirs); + if (!abs_capture_time_in_local) { + const webrtc::RtpExtension* theirs = + FindHeaderExtensionByUriDiscardUnsupported( + offered_extensions, webrtc::RtpExtension::kAbsoluteCaptureTimeUri, + filter); + if (theirs) { + negotiated_extensions->push_back(*theirs); + } } } @@ -1250,10 +1246,14 @@ static bool CreateMediaContentAnswer( bool bundle_enabled, MediaContentDescription* answer) { answer->set_extmap_allow_mixed_enum(offer->extmap_allow_mixed_enum()); + const webrtc::RtpExtension::Filter extensions_filter = + enable_encrypted_rtp_header_extensions + ? webrtc::RtpExtension::Filter::kPreferEncryptedExtension + : webrtc::RtpExtension::Filter::kDiscardEncryptedExtension; RtpHeaderExtensions negotiated_rtp_extensions; - NegotiateRtpHeaderExtensions( - local_rtp_extensions, offer->rtp_header_extensions(), - enable_encrypted_rtp_header_extensions, &negotiated_rtp_extensions); + NegotiateRtpHeaderExtensions(local_rtp_extensions, + offer->rtp_header_extensions(), + extensions_filter, &negotiated_rtp_extensions); answer->set_rtp_header_extensions(negotiated_rtp_extensions); answer->set_rtcp_mux(session_options.rtcp_mux_enabled && offer->rtcp_mux()); diff --git a/pc/media_session_unittest.cc b/pc/media_session_unittest.cc index 099195f501..c7c07fc527 100644 --- a/pc/media_session_unittest.cc +++ b/pc/media_session_unittest.cc @@ -139,6 +139,7 @@ static const RtpExtension kAudioRtpExtensionEncrypted1[] = { RtpExtension("urn:ietf:params:rtp-hdrext:ssrc-audio-level", 8), RtpExtension("http://google.com/testing/audio_something", 10), RtpExtension("urn:ietf:params:rtp-hdrext:ssrc-audio-level", 12, true), + RtpExtension("http://google.com/testing/audio_something", 11, true), }; static const RtpExtension kAudioRtpExtension2[] = { @@ -161,7 +162,15 @@ static const RtpExtension kAudioRtpExtension3ForEncryption[] = { static const RtpExtension kAudioRtpExtension3ForEncryptionOffer[] = { RtpExtension("http://google.com/testing/audio_something", 2), RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 3), - RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 14, true), + RtpExtension("http://google.com/testing/audio_something", 14, true), + RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 13, true), +}; + +static const RtpExtension kVideoRtpExtension3ForEncryptionOffer[] = { + RtpExtension("http://google.com/testing/video_something", 4), + RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 3), + RtpExtension("http://google.com/testing/video_something", 12, true), + RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 13, true), }; static const RtpExtension kAudioRtpExtensionAnswer[] = { @@ -180,7 +189,8 @@ static const RtpExtension kVideoRtpExtension1[] = { static const RtpExtension kVideoRtpExtensionEncrypted1[] = { RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 14), RtpExtension("http://google.com/testing/video_something", 13), - RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 11, true), + RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 9, true), + RtpExtension("http://google.com/testing/video_something", 7, true), }; static const RtpExtension kVideoRtpExtension2[] = { @@ -205,7 +215,7 @@ static const RtpExtension kVideoRtpExtensionAnswer[] = { }; static const RtpExtension kVideoRtpExtensionEncryptedAnswer[] = { - RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 11, true), + RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 9, true), }; static const RtpExtension kRtpExtensionTransportSequenceNumber01[] = { @@ -3431,19 +3441,11 @@ TEST_F(MediaSessionDescriptionFactoryTest, RtpExtensionIdReusedEncrypted) { MAKE_VECTOR(kVideoRtpExtension3ForEncryption), &opts); std::unique_ptr offer = f1_.CreateOffer(opts, NULL); - // The extensions that are shared between audio and video should use the same - // id. - const RtpExtension kExpectedVideoRtpExtension[] = { - kVideoRtpExtension3ForEncryption[0], - kAudioRtpExtension3ForEncryptionOffer[1], - kAudioRtpExtension3ForEncryptionOffer[2], - }; - EXPECT_EQ( MAKE_VECTOR(kAudioRtpExtension3ForEncryptionOffer), GetFirstAudioContentDescription(offer.get())->rtp_header_extensions()); EXPECT_EQ( - MAKE_VECTOR(kExpectedVideoRtpExtension), + MAKE_VECTOR(kVideoRtpExtension3ForEncryptionOffer), GetFirstVideoContentDescription(offer.get())->rtp_header_extensions()); // Nothing should change when creating a new offer @@ -3453,7 +3455,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, RtpExtensionIdReusedEncrypted) { EXPECT_EQ(MAKE_VECTOR(kAudioRtpExtension3ForEncryptionOffer), GetFirstAudioContentDescription(updated_offer.get()) ->rtp_header_extensions()); - EXPECT_EQ(MAKE_VECTOR(kExpectedVideoRtpExtension), + EXPECT_EQ(MAKE_VECTOR(kVideoRtpExtension3ForEncryptionOffer), GetFirstVideoContentDescription(updated_offer.get()) ->rtp_header_extensions()); } diff --git a/api/media_stream_proxy.h b/pc/media_stream_proxy.h similarity index 82% rename from api/media_stream_proxy.h rename to pc/media_stream_proxy.h index 773c5d8b14..36069a4369 100644 --- a/api/media_stream_proxy.h +++ b/pc/media_stream_proxy.h @@ -8,18 +8,18 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef API_MEDIA_STREAM_PROXY_H_ -#define API_MEDIA_STREAM_PROXY_H_ +#ifndef PC_MEDIA_STREAM_PROXY_H_ +#define PC_MEDIA_STREAM_PROXY_H_ #include #include "api/media_stream_interface.h" -#include "api/proxy.h" +#include "pc/proxy.h" namespace webrtc { -// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods -// are called on is an implementation detail. +// TODO(deadbeef): Move this to a .cc file. What threads methods are called on +// is an implementation detail. BEGIN_PRIMARY_PROXY_MAP(MediaStream) PROXY_PRIMARY_THREAD_DESTRUCTOR() BYPASS_PROXY_CONSTMETHOD0(std::string, id) @@ -37,8 +37,8 @@ PROXY_METHOD1(bool, RemoveTrack, AudioTrackInterface*) PROXY_METHOD1(bool, RemoveTrack, VideoTrackInterface*) PROXY_METHOD1(void, RegisterObserver, ObserverInterface*) PROXY_METHOD1(void, UnregisterObserver, ObserverInterface*) -END_PROXY_MAP() +END_PROXY_MAP(MediaStream) } // namespace webrtc -#endif // API_MEDIA_STREAM_PROXY_H_ +#endif // PC_MEDIA_STREAM_PROXY_H_ diff --git a/api/media_stream_track_proxy.h b/pc/media_stream_track_proxy.h similarity index 72% rename from api/media_stream_track_proxy.h rename to pc/media_stream_track_proxy.h index a0fe676d58..f563137c77 100644 --- a/api/media_stream_track_proxy.h +++ b/pc/media_stream_track_proxy.h @@ -11,26 +11,25 @@ // This file includes proxy classes for tracks. The purpose is // to make sure tracks are only accessed from the signaling thread. -#ifndef API_MEDIA_STREAM_TRACK_PROXY_H_ -#define API_MEDIA_STREAM_TRACK_PROXY_H_ +#ifndef PC_MEDIA_STREAM_TRACK_PROXY_H_ +#define PC_MEDIA_STREAM_TRACK_PROXY_H_ #include #include "api/media_stream_interface.h" -#include "api/proxy.h" +#include "pc/proxy.h" namespace webrtc { -// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods -// are called on is an implementation detail. - +// TODO(deadbeef): Move this to .cc file. What threads methods are called on is +// an implementation detail. BEGIN_PRIMARY_PROXY_MAP(AudioTrack) PROXY_PRIMARY_THREAD_DESTRUCTOR() BYPASS_PROXY_CONSTMETHOD0(std::string, kind) BYPASS_PROXY_CONSTMETHOD0(std::string, id) PROXY_CONSTMETHOD0(TrackState, state) PROXY_CONSTMETHOD0(bool, enabled) -PROXY_CONSTMETHOD0(AudioSourceInterface*, GetSource) +BYPASS_PROXY_CONSTMETHOD0(AudioSourceInterface*, GetSource) PROXY_METHOD1(void, AddSink, AudioTrackSinkInterface*) PROXY_METHOD1(void, RemoveSink, AudioTrackSinkInterface*) PROXY_METHOD1(bool, GetSignalLevel, int*) @@ -38,28 +37,28 @@ PROXY_METHOD0(rtc::scoped_refptr, GetAudioProcessor) PROXY_METHOD1(bool, set_enabled, bool) PROXY_METHOD1(void, RegisterObserver, ObserverInterface*) PROXY_METHOD1(void, UnregisterObserver, ObserverInterface*) -END_PROXY_MAP() +END_PROXY_MAP(AudioTrack) BEGIN_PROXY_MAP(VideoTrack) PROXY_PRIMARY_THREAD_DESTRUCTOR() BYPASS_PROXY_CONSTMETHOD0(std::string, kind) BYPASS_PROXY_CONSTMETHOD0(std::string, id) -PROXY_CONSTMETHOD0(TrackState, state) -PROXY_CONSTMETHOD0(bool, enabled) -PROXY_METHOD1(bool, set_enabled, bool) -PROXY_CONSTMETHOD0(ContentHint, content_hint) -PROXY_METHOD1(void, set_content_hint, ContentHint) +PROXY_SECONDARY_CONSTMETHOD0(TrackState, state) +PROXY_SECONDARY_CONSTMETHOD0(bool, enabled) +PROXY_SECONDARY_METHOD1(bool, set_enabled, bool) +PROXY_SECONDARY_CONSTMETHOD0(ContentHint, content_hint) +PROXY_SECONDARY_METHOD1(void, set_content_hint, ContentHint) PROXY_SECONDARY_METHOD2(void, AddOrUpdateSink, rtc::VideoSinkInterface*, const rtc::VideoSinkWants&) PROXY_SECONDARY_METHOD1(void, RemoveSink, rtc::VideoSinkInterface*) -PROXY_CONSTMETHOD0(VideoTrackSourceInterface*, GetSource) +BYPASS_PROXY_CONSTMETHOD0(VideoTrackSourceInterface*, GetSource) PROXY_METHOD1(void, RegisterObserver, ObserverInterface*) PROXY_METHOD1(void, UnregisterObserver, ObserverInterface*) -END_PROXY_MAP() +END_PROXY_MAP(VideoTrack) } // namespace webrtc -#endif // API_MEDIA_STREAM_TRACK_PROXY_H_ +#endif // PC_MEDIA_STREAM_TRACK_PROXY_H_ diff --git a/pc/peer_connection.cc b/pc/peer_connection.cc index e2b3b61058..276af1787d 100644 --- a/pc/peer_connection.cc +++ b/pc/peer_connection.cc @@ -1279,9 +1279,9 @@ absl::optional PeerConnection::can_trickle_ice_candidates() { "trickle"); } -rtc::scoped_refptr PeerConnection::CreateDataChannel( - const std::string& label, - const DataChannelInit* config) { +RTCErrorOr> +PeerConnection::CreateDataChannelOrError(const std::string& label, + const DataChannelInit* config) { RTC_DCHECK_RUN_ON(signaling_thread()); TRACE_EVENT0("webrtc", "PeerConnection::CreateDataChannel"); @@ -1291,11 +1291,13 @@ rtc::scoped_refptr PeerConnection::CreateDataChannel( if (config) { internal_config.reset(new InternalDataChannelInit(*config)); } + // TODO(bugs.webrtc.org/12796): Return a more specific error. rtc::scoped_refptr channel( data_channel_controller_.InternalCreateDataChannelWithProxy( label, internal_config.get())); if (!channel.get()) { - return nullptr; + return RTCError(RTCErrorType::INTERNAL_ERROR, + "Data channel creation failed"); } // Trigger the onRenegotiationNeeded event for @@ -1881,6 +1883,16 @@ void PeerConnection::SetConnectionState( configuration_.ice_candidate_pool_size, 0, 255, 256); break; } + + // Record whether there was a local or remote provisional answer. + ProvisionalAnswerUsage pranswer = kProvisionalAnswerNotUsed; + if (local_description()->GetType() == SdpType::kPrAnswer) { + pranswer = kProvisionalAnswerLocal; + } else if (remote_description()->GetType() == SdpType::kPrAnswer) { + pranswer = kProvisionalAnswerRemote; + } + RTC_HISTOGRAM_ENUMERATION("WebRTC.PeerConnection.ProvisionalAnswer", + pranswer, kProvisionalAnswerMax); } } @@ -2186,6 +2198,7 @@ cricket::CandidateStatsList PeerConnection::GetPooledCandidateStats() const { std::map PeerConnection::GetTransportStatsByNames( const std::set& transport_names) { + TRACE_EVENT0("webrtc", "PeerConnection::GetTransportStatsByNames"); RTC_DCHECK_RUN_ON(network_thread()); if (!network_thread_safety_->alive()) return {}; @@ -2634,6 +2647,7 @@ void PeerConnection::OnTransportControllerGatheringState( // Runs on network_thread(). void PeerConnection::ReportTransportStats() { + TRACE_EVENT0("webrtc", "PeerConnection::ReportTransportStats"); rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; std::map> media_types_by_transport_name; @@ -2867,18 +2881,10 @@ std::function PeerConnection::InitializeRtcpCallback() { RTC_DCHECK_RUN_ON(network_thread()); - return [this, flag = worker_thread_safety_]( - const rtc::CopyOnWriteBuffer& packet, int64_t packet_time_us) { + return [this](const rtc::CopyOnWriteBuffer& packet, int64_t packet_time_us) { RTC_DCHECK_RUN_ON(network_thread()); - // TODO(bugs.webrtc.org/11993): We should actually be delivering this call - // directly to the Call class somehow directly on the network thread and not - // incur this hop here. The DeliverPacket() method will eventually just have - // to hop back over to the network thread. - worker_thread()->PostTask(ToQueuedTask(flag, [this, packet, - packet_time_us] { - RTC_DCHECK_RUN_ON(worker_thread()); - call_->Receiver()->DeliverPacket(MediaType::ANY, packet, packet_time_us); - })); + call_ptr_->Receiver()->DeliverPacket(MediaType::ANY, packet, + packet_time_us); }; } diff --git a/pc/peer_connection.h b/pc/peer_connection.h index 7be137a6a8..4476c5d8e1 100644 --- a/pc/peer_connection.h +++ b/pc/peer_connection.h @@ -167,7 +167,7 @@ class PeerConnection : public PeerConnectionInternal, std::vector> GetTransceivers() const override; - rtc::scoped_refptr CreateDataChannel( + RTCErrorOr> CreateDataChannelOrError( const std::string& label, const DataChannelInit* config) override; // WARNING: LEGACY. See peerconnectioninterface.h diff --git a/pc/peer_connection_bundle_unittest.cc b/pc/peer_connection_bundle_unittest.cc index fa5be62745..08754c6820 100644 --- a/pc/peer_connection_bundle_unittest.cc +++ b/pc/peer_connection_bundle_unittest.cc @@ -13,7 +13,6 @@ #include "api/audio_codecs/builtin_audio_decoder_factory.h" #include "api/audio_codecs/builtin_audio_encoder_factory.h" #include "api/create_peerconnection_factory.h" -#include "api/peer_connection_proxy.h" #include "api/video_codecs/builtin_video_decoder_factory.h" #include "api/video_codecs/builtin_video_encoder_factory.h" #include "p2p/base/fake_port_allocator.h" @@ -21,6 +20,7 @@ #include "p2p/client/basic_port_allocator.h" #include "pc/media_session.h" #include "pc/peer_connection.h" +#include "pc/peer_connection_proxy.h" #include "pc/peer_connection_wrapper.h" #include "pc/sdp_utils.h" #ifdef WEBRTC_ANDROID diff --git a/pc/peer_connection_data_channel_unittest.cc b/pc/peer_connection_data_channel_unittest.cc index 157dcd25c8..2544473536 100644 --- a/pc/peer_connection_data_channel_unittest.cc +++ b/pc/peer_connection_data_channel_unittest.cc @@ -19,7 +19,6 @@ #include "api/jsep.h" #include "api/media_types.h" #include "api/peer_connection_interface.h" -#include "api/peer_connection_proxy.h" #include "api/scoped_refptr.h" #include "api/task_queue/default_task_queue_factory.h" #include "media/base/codec.h" @@ -32,6 +31,7 @@ #include "pc/media_session.h" #include "pc/peer_connection.h" #include "pc/peer_connection_factory.h" +#include "pc/peer_connection_proxy.h" #include "pc/peer_connection_wrapper.h" #include "pc/sdp_utils.h" #include "pc/session_description.h" diff --git a/pc/peer_connection_factory.cc b/pc/peer_connection_factory.cc index 89b39e5287..50755a38c7 100644 --- a/pc/peer_connection_factory.cc +++ b/pc/peer_connection_factory.cc @@ -18,17 +18,14 @@ #include "api/call/call_factory_interface.h" #include "api/fec_controller.h" #include "api/ice_transport_interface.h" -#include "api/media_stream_proxy.h" -#include "api/media_stream_track_proxy.h" #include "api/network_state_predictor.h" #include "api/packet_socket_factory.h" -#include "api/peer_connection_factory_proxy.h" -#include "api/peer_connection_proxy.h" #include "api/rtc_event_log/rtc_event_log.h" #include "api/sequence_checker.h" #include "api/transport/bitrate_settings.h" #include "api/units/data_rate.h" #include "call/audio_state.h" +#include "call/rtp_transport_controller_send_factory.h" #include "media/base/media_engine.h" #include "p2p/base/basic_async_resolver_factory.h" #include "p2p/base/basic_packet_socket_factory.h" @@ -38,7 +35,11 @@ #include "pc/audio_track.h" #include "pc/local_audio_source.h" #include "pc/media_stream.h" +#include "pc/media_stream_proxy.h" +#include "pc/media_stream_track_proxy.h" #include "pc/peer_connection.h" +#include "pc/peer_connection_factory_proxy.h" +#include "pc/peer_connection_proxy.h" #include "pc/rtp_parameters_conversion.h" #include "pc/session_description.h" #include "pc/video_track.h" @@ -100,7 +101,11 @@ PeerConnectionFactory::PeerConnectionFactory( std::move(dependencies->network_state_predictor_factory)), injected_network_controller_factory_( std::move(dependencies->network_controller_factory)), - neteq_factory_(std::move(dependencies->neteq_factory)) {} + neteq_factory_(std::move(dependencies->neteq_factory)), + transport_controller_send_factory_( + (dependencies->transport_controller_send_factory) + ? std::move(dependencies->transport_controller_send_factory) + : std::make_unique()) {} PeerConnectionFactory::PeerConnectionFactory( PeerConnectionFactoryDependencies dependencies) @@ -139,6 +144,7 @@ RtpCapabilities PeerConnectionFactory::GetRtpSenderCapabilities( case cricket::MEDIA_TYPE_UNSUPPORTED: return RtpCapabilities(); } + RTC_DLOG(LS_ERROR) << "Got unexpected MediaType " << kind; RTC_CHECK_NOTREACHED(); } @@ -165,6 +171,7 @@ RtpCapabilities PeerConnectionFactory::GetRtpReceiverCapabilities( case cricket::MEDIA_TYPE_UNSUPPORTED: return RtpCapabilities(); } + RTC_DLOG(LS_ERROR) << "Got unexpected MediaType " << kind; RTC_CHECK_NOTREACHED(); } @@ -332,7 +339,8 @@ std::unique_ptr PeerConnectionFactory::CreateCall_w( } call_config.trials = &trials(); - + call_config.rtp_transport_controller_send_factory = + transport_controller_send_factory_.get(); return std::unique_ptr( context_->call_factory()->CreateCall(call_config)); } diff --git a/pc/peer_connection_factory.h b/pc/peer_connection_factory.h index bd2efe457e..4946ec6ea2 100644 --- a/pc/peer_connection_factory.h +++ b/pc/peer_connection_factory.h @@ -14,6 +14,7 @@ #include #include + #include #include @@ -36,6 +37,7 @@ #include "api/transport/sctp_transport_factory_interface.h" #include "api/transport/webrtc_key_value_config.h" #include "call/call.h" +#include "call/rtp_transport_controller_send_factory_interface.h" #include "p2p/base/port_allocator.h" #include "pc/channel_manager.h" #include "pc/connection_context.h" @@ -148,6 +150,8 @@ class PeerConnectionFactory : public PeerConnectionFactoryInterface { std::unique_ptr injected_network_controller_factory_; std::unique_ptr neteq_factory_; + const std::unique_ptr + transport_controller_send_factory_; }; } // namespace webrtc diff --git a/api/peer_connection_factory_proxy.h b/pc/peer_connection_factory_proxy.h similarity index 85% rename from api/peer_connection_factory_proxy.h rename to pc/peer_connection_factory_proxy.h index de6250fe92..59e373db7b 100644 --- a/api/peer_connection_factory_proxy.h +++ b/pc/peer_connection_factory_proxy.h @@ -8,20 +8,20 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef API_PEER_CONNECTION_FACTORY_PROXY_H_ -#define API_PEER_CONNECTION_FACTORY_PROXY_H_ +#ifndef PC_PEER_CONNECTION_FACTORY_PROXY_H_ +#define PC_PEER_CONNECTION_FACTORY_PROXY_H_ #include #include #include #include "api/peer_connection_interface.h" -#include "api/proxy.h" +#include "pc/proxy.h" namespace webrtc { -// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods -// are called on is an implementation detail. +// TODO(deadbeef): Move this to .cc file. What threads methods are called on is +// an implementation detail. BEGIN_PROXY_MAP(PeerConnectionFactory) PROXY_PRIMARY_THREAD_DESTRUCTOR() PROXY_METHOD1(void, SetOptions, const Options&) @@ -51,8 +51,8 @@ PROXY_METHOD2(rtc::scoped_refptr, AudioSourceInterface*) PROXY_SECONDARY_METHOD2(bool, StartAecDump, FILE*, int64_t) PROXY_SECONDARY_METHOD0(void, StopAecDump) -END_PROXY_MAP() +END_PROXY_MAP(PeerConnectionFactory) } // namespace webrtc -#endif // API_PEER_CONNECTION_FACTORY_PROXY_H_ +#endif // PC_PEER_CONNECTION_FACTORY_PROXY_H_ diff --git a/pc/peer_connection_histogram_unittest.cc b/pc/peer_connection_histogram_unittest.cc index 97fbde2f86..fa46ce9802 100644 --- a/pc/peer_connection_histogram_unittest.cc +++ b/pc/peer_connection_histogram_unittest.cc @@ -19,7 +19,6 @@ #include "api/jsep.h" #include "api/jsep_session_description.h" #include "api/peer_connection_interface.h" -#include "api/peer_connection_proxy.h" #include "api/rtc_error.h" #include "api/scoped_refptr.h" #include "api/task_queue/default_task_queue_factory.h" @@ -29,6 +28,7 @@ #include "p2p/client/basic_port_allocator.h" #include "pc/peer_connection.h" #include "pc/peer_connection_factory.h" +#include "pc/peer_connection_proxy.h" #include "pc/peer_connection_wrapper.h" #include "pc/sdp_utils.h" #include "pc/test/mock_peer_connection_observers.h" diff --git a/pc/peer_connection_ice_unittest.cc b/pc/peer_connection_ice_unittest.cc index ed30377f20..7971547ffa 100644 --- a/pc/peer_connection_ice_unittest.cc +++ b/pc/peer_connection_ice_unittest.cc @@ -23,10 +23,10 @@ #include "api/audio_codecs/builtin_audio_decoder_factory.h" #include "api/audio_codecs/builtin_audio_encoder_factory.h" #include "api/create_peerconnection_factory.h" -#include "api/peer_connection_proxy.h" #include "api/uma_metrics.h" #include "api/video_codecs/builtin_video_decoder_factory.h" #include "api/video_codecs/builtin_video_encoder_factory.h" +#include "pc/peer_connection_proxy.h" #include "pc/test/fake_audio_capture_module.h" #include "pc/test/mock_peer_connection_observers.h" #include "rtc_base/fake_network.h" diff --git a/api/peer_connection_proxy.h b/pc/peer_connection_proxy.h similarity index 94% rename from api/peer_connection_proxy.h rename to pc/peer_connection_proxy.h index cc9df10eed..7601c9d053 100644 --- a/api/peer_connection_proxy.h +++ b/pc/peer_connection_proxy.h @@ -8,23 +8,23 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef API_PEER_CONNECTION_PROXY_H_ -#define API_PEER_CONNECTION_PROXY_H_ +#ifndef PC_PEER_CONNECTION_PROXY_H_ +#define PC_PEER_CONNECTION_PROXY_H_ #include #include #include #include "api/peer_connection_interface.h" -#include "api/proxy.h" +#include "pc/proxy.h" namespace webrtc { // PeerConnection proxy objects will be constructed with two thread pointers, // signaling and network. The proxy macros don't have 'network' specific macros // and support for a secondary thread is provided via 'SECONDARY' macros. -// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods -// are called on is an implementation detail. +// TODO(deadbeef): Move this to .cc file. What threads methods are called on is +// an implementation detail. BEGIN_PROXY_MAP(PeerConnection) PROXY_PRIMARY_THREAD_DESTRUCTOR() PROXY_METHOD0(rtc::scoped_refptr, local_streams) @@ -76,8 +76,8 @@ PROXY_METHOD2(void, rtc::scoped_refptr, rtc::scoped_refptr) PROXY_METHOD0(void, ClearStatsCache) -PROXY_METHOD2(rtc::scoped_refptr, - CreateDataChannel, +PROXY_METHOD2(RTCErrorOr>, + CreateDataChannelOrError, const std::string&, const DataChannelInit*) PROXY_CONSTMETHOD0(const SessionDescriptionInterface*, local_description) @@ -157,8 +157,8 @@ PROXY_METHOD1(bool, StartRtcEventLog, std::unique_ptr) PROXY_METHOD0(void, StopRtcEventLog) PROXY_METHOD0(void, Close) BYPASS_PROXY_CONSTMETHOD0(rtc::Thread*, signaling_thread) -END_PROXY_MAP() +END_PROXY_MAP(PeerConnection) } // namespace webrtc -#endif // API_PEER_CONNECTION_PROXY_H_ +#endif // PC_PEER_CONNECTION_PROXY_H_ diff --git a/pc/peer_connection_signaling_unittest.cc b/pc/peer_connection_signaling_unittest.cc index a4f05c14c8..1c94570ec7 100644 --- a/pc/peer_connection_signaling_unittest.cc +++ b/pc/peer_connection_signaling_unittest.cc @@ -19,10 +19,10 @@ #include "api/audio_codecs/builtin_audio_encoder_factory.h" #include "api/create_peerconnection_factory.h" #include "api/jsep_session_description.h" -#include "api/peer_connection_proxy.h" #include "api/video_codecs/builtin_video_decoder_factory.h" #include "api/video_codecs/builtin_video_encoder_factory.h" #include "pc/peer_connection.h" +#include "pc/peer_connection_proxy.h" #include "pc/peer_connection_wrapper.h" #include "pc/sdp_utils.h" #include "pc/webrtc_sdp.h" diff --git a/pc/peer_connection_wrapper.cc b/pc/peer_connection_wrapper.cc index 6aed8f1de7..3b4d28f0d9 100644 --- a/pc/peer_connection_wrapper.cc +++ b/pc/peer_connection_wrapper.cc @@ -306,7 +306,14 @@ rtc::scoped_refptr PeerConnectionWrapper::AddVideoTrack( rtc::scoped_refptr PeerConnectionWrapper::CreateDataChannel(const std::string& label) { - return pc()->CreateDataChannel(label, nullptr); + auto result = pc()->CreateDataChannelOrError(label, nullptr); + if (!result.ok()) { + RTC_LOG(LS_ERROR) << "CreateDataChannel failed: " + << ToString(result.error().type()) << " " + << result.error().message(); + return nullptr; + } + return result.MoveValue(); } PeerConnectionInterface::SignalingState diff --git a/pc/proxy.cc b/pc/proxy.cc new file mode 100644 index 0000000000..5f4e0b8832 --- /dev/null +++ b/pc/proxy.cc @@ -0,0 +1,25 @@ +/* + * Copyright 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "pc/proxy.h" + +#include "rtc_base/trace_event.h" + +namespace webrtc { +namespace proxy_internal { +ScopedTrace::ScopedTrace(const char* class_and_method_name) + : class_and_method_name_(class_and_method_name) { + TRACE_EVENT_BEGIN0("webrtc", class_and_method_name_); +} +ScopedTrace::~ScopedTrace() { + TRACE_EVENT_END0("webrtc", class_and_method_name_); +} +} // namespace proxy_internal +} // namespace webrtc diff --git a/api/proxy.h b/pc/proxy.h similarity index 86% rename from api/proxy.h rename to pc/proxy.h index d14e0b2c3e..565ae80175 100644 --- a/api/proxy.h +++ b/pc/proxy.h @@ -56,8 +56,8 @@ // The variant defined with BEGIN_OWNED_PROXY_MAP does not use // refcounting, and instead just takes ownership of the object being proxied. -#ifndef API_PROXY_H_ -#define API_PROXY_H_ +#ifndef PC_PROXY_H_ +#define PC_PROXY_H_ #include #include @@ -71,14 +71,31 @@ #include "rtc_base/event.h" #include "rtc_base/message_handler.h" #include "rtc_base/ref_counted_object.h" +#include "rtc_base/string_utils.h" #include "rtc_base/system/rtc_export.h" #include "rtc_base/thread.h" +#if !defined(RTC_DISABLE_PROXY_TRACE_EVENTS) && !defined(WEBRTC_CHROMIUM_BUILD) +#define RTC_DISABLE_PROXY_TRACE_EVENTS +#endif + namespace rtc { class Location; } namespace webrtc { +namespace proxy_internal { + +// Class for tracing the lifetime of MethodCall::Marshal. +class ScopedTrace { + public: + explicit ScopedTrace(const char* class_and_method_name); + ~ScopedTrace(); + + private: + const char* const class_and_method_name_; +}; +} // namespace proxy_internal template class ReturnType { @@ -181,6 +198,9 @@ class ConstMethodCall : public QueuedTask { rtc::Event event_; }; +#define PROXY_STRINGIZE_IMPL(x) #x +#define PROXY_STRINGIZE(x) PROXY_STRINGIZE_IMPL(x) + // Helper macros to reduce code duplication. #define PROXY_MAP_BOILERPLATE(c) \ template \ @@ -189,6 +209,7 @@ class ConstMethodCall : public QueuedTask { template \ class c##ProxyWithInternal : public c##Interface { \ protected: \ + static constexpr char proxy_name_[] = #c "Proxy"; \ typedef c##Interface C; \ \ public: \ @@ -198,8 +219,10 @@ class ConstMethodCall : public QueuedTask { // clang-format off // clang-format would put the semicolon alone, // leading to a presubmit error (cpplint.py) -#define END_PROXY_MAP() \ - }; +#define END_PROXY_MAP(c) \ + }; \ + template \ + constexpr char c##ProxyWithInternal::proxy_name_[]; // clang-format on #define PRIMARY_PROXY_MAP_BOILERPLATE(c) \ @@ -299,32 +322,51 @@ class ConstMethodCall : public QueuedTask { \ public: // NOLINTNEXTLINE +#if defined(RTC_DISABLE_PROXY_TRACE_EVENTS) +#define TRACE_BOILERPLATE(method) \ + do { \ + } while (0) +#else // if defined(RTC_DISABLE_PROXY_TRACE_EVENTS) +#define TRACE_BOILERPLATE(method) \ + static constexpr auto class_and_method_name = \ + rtc::MakeCompileTimeString(proxy_name_) \ + .Concat(rtc::MakeCompileTimeString("::")) \ + .Concat(rtc::MakeCompileTimeString(#method)); \ + proxy_internal::ScopedTrace scoped_trace(class_and_method_name.string) + +#endif // if defined(RTC_DISABLE_PROXY_TRACE_EVENTS) + #define PROXY_METHOD0(r, method) \ r method() override { \ + TRACE_BOILERPLATE(method); \ MethodCall call(c_, &C::method); \ return call.Marshal(RTC_FROM_HERE, primary_thread_); \ } #define PROXY_CONSTMETHOD0(r, method) \ r method() const override { \ + TRACE_BOILERPLATE(method); \ ConstMethodCall call(c_, &C::method); \ return call.Marshal(RTC_FROM_HERE, primary_thread_); \ } #define PROXY_METHOD1(r, method, t1) \ r method(t1 a1) override { \ + TRACE_BOILERPLATE(method); \ MethodCall call(c_, &C::method, std::move(a1)); \ return call.Marshal(RTC_FROM_HERE, primary_thread_); \ } #define PROXY_CONSTMETHOD1(r, method, t1) \ r method(t1 a1) const override { \ + TRACE_BOILERPLATE(method); \ ConstMethodCall call(c_, &C::method, std::move(a1)); \ return call.Marshal(RTC_FROM_HERE, primary_thread_); \ } #define PROXY_METHOD2(r, method, t1, t2) \ r method(t1 a1, t2 a2) override { \ + TRACE_BOILERPLATE(method); \ MethodCall call(c_, &C::method, std::move(a1), \ std::move(a2)); \ return call.Marshal(RTC_FROM_HERE, primary_thread_); \ @@ -332,6 +374,7 @@ class ConstMethodCall : public QueuedTask { #define PROXY_METHOD3(r, method, t1, t2, t3) \ r method(t1 a1, t2 a2, t3 a3) override { \ + TRACE_BOILERPLATE(method); \ MethodCall call(c_, &C::method, std::move(a1), \ std::move(a2), std::move(a3)); \ return call.Marshal(RTC_FROM_HERE, primary_thread_); \ @@ -339,6 +382,7 @@ class ConstMethodCall : public QueuedTask { #define PROXY_METHOD4(r, method, t1, t2, t3, t4) \ r method(t1 a1, t2 a2, t3 a3, t4 a4) override { \ + TRACE_BOILERPLATE(method); \ MethodCall call(c_, &C::method, std::move(a1), \ std::move(a2), std::move(a3), \ std::move(a4)); \ @@ -347,6 +391,7 @@ class ConstMethodCall : public QueuedTask { #define PROXY_METHOD5(r, method, t1, t2, t3, t4, t5) \ r method(t1 a1, t2 a2, t3 a3, t4 a4, t5 a5) override { \ + TRACE_BOILERPLATE(method); \ MethodCall call(c_, &C::method, std::move(a1), \ std::move(a2), std::move(a3), \ std::move(a4), std::move(a5)); \ @@ -356,30 +401,35 @@ class ConstMethodCall : public QueuedTask { // Define methods which should be invoked on the secondary thread. #define PROXY_SECONDARY_METHOD0(r, method) \ r method() override { \ + TRACE_BOILERPLATE(method); \ MethodCall call(c_, &C::method); \ return call.Marshal(RTC_FROM_HERE, secondary_thread_); \ } #define PROXY_SECONDARY_CONSTMETHOD0(r, method) \ r method() const override { \ + TRACE_BOILERPLATE(method); \ ConstMethodCall call(c_, &C::method); \ return call.Marshal(RTC_FROM_HERE, secondary_thread_); \ } #define PROXY_SECONDARY_METHOD1(r, method, t1) \ r method(t1 a1) override { \ + TRACE_BOILERPLATE(method); \ MethodCall call(c_, &C::method, std::move(a1)); \ return call.Marshal(RTC_FROM_HERE, secondary_thread_); \ } #define PROXY_SECONDARY_CONSTMETHOD1(r, method, t1) \ r method(t1 a1) const override { \ + TRACE_BOILERPLATE(method); \ ConstMethodCall call(c_, &C::method, std::move(a1)); \ return call.Marshal(RTC_FROM_HERE, secondary_thread_); \ } #define PROXY_SECONDARY_METHOD2(r, method, t1, t2) \ r method(t1 a1, t2 a2) override { \ + TRACE_BOILERPLATE(method); \ MethodCall call(c_, &C::method, std::move(a1), \ std::move(a2)); \ return call.Marshal(RTC_FROM_HERE, secondary_thread_); \ @@ -387,6 +437,7 @@ class ConstMethodCall : public QueuedTask { #define PROXY_SECONDARY_CONSTMETHOD2(r, method, t1, t2) \ r method(t1 a1, t2 a2) const override { \ + TRACE_BOILERPLATE(method); \ ConstMethodCall call(c_, &C::method, std::move(a1), \ std::move(a2)); \ return call.Marshal(RTC_FROM_HERE, secondary_thread_); \ @@ -394,6 +445,7 @@ class ConstMethodCall : public QueuedTask { #define PROXY_SECONDARY_METHOD3(r, method, t1, t2, t3) \ r method(t1 a1, t2 a2, t3 a3) override { \ + TRACE_BOILERPLATE(method); \ MethodCall call(c_, &C::method, std::move(a1), \ std::move(a2), std::move(a3)); \ return call.Marshal(RTC_FROM_HERE, secondary_thread_); \ @@ -401,6 +453,7 @@ class ConstMethodCall : public QueuedTask { #define PROXY_SECONDARY_CONSTMETHOD3(r, method, t1, t2) \ r method(t1 a1, t2 a2, t3 a3) const override { \ + TRACE_BOILERPLATE(method); \ ConstMethodCall call(c_, &C::method, std::move(a1), \ std::move(a2), std::move(a3)); \ return call.Marshal(RTC_FROM_HERE, secondary_thread_); \ @@ -409,15 +462,12 @@ class ConstMethodCall : public QueuedTask { // For use when returning purely const state (set during construction). // Use with caution. This method should only be used when the return value will // always be the same. -#define BYPASS_PROXY_CONSTMETHOD0(r, method) \ - r method() const override { \ - static_assert( \ - std::is_same::value || !std::is_pointer::value, \ - "Type is a pointer"); \ - static_assert(!std::is_reference::value, "Type is a reference"); \ - return c_->method(); \ +#define BYPASS_PROXY_CONSTMETHOD0(r, method) \ + r method() const override { \ + TRACE_BOILERPLATE(method); \ + return c_->method(); \ } } // namespace webrtc -#endif // API_PROXY_H_ +#endif // PC_PROXY_H_ diff --git a/pc/proxy_unittest.cc b/pc/proxy_unittest.cc index f59250d49f..ef3d97eddc 100644 --- a/pc/proxy_unittest.cc +++ b/pc/proxy_unittest.cc @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "api/proxy.h" +#include "pc/proxy.h" #include #include @@ -71,7 +71,7 @@ PROXY_CONSTMETHOD0(std::string, ConstMethod0) PROXY_SECONDARY_METHOD1(std::string, Method1, std::string) PROXY_CONSTMETHOD1(std::string, ConstMethod1, std::string) PROXY_SECONDARY_METHOD2(std::string, Method2, std::string, std::string) -END_PROXY_MAP() +END_PROXY_MAP(Fake) // Preprocessor hack to get a proxy class a name different than FakeProxy. #define FakeProxy FakeSignalingProxy @@ -84,7 +84,7 @@ PROXY_CONSTMETHOD0(std::string, ConstMethod0) PROXY_METHOD1(std::string, Method1, std::string) PROXY_CONSTMETHOD1(std::string, ConstMethod1, std::string) PROXY_METHOD2(std::string, Method2, std::string, std::string) -END_PROXY_MAP() +END_PROXY_MAP(Fake) #undef FakeProxy class SignalingProxyTest : public ::testing::Test { @@ -272,7 +272,7 @@ class Foo : public FooInterface { BEGIN_OWNED_PROXY_MAP(Foo) PROXY_PRIMARY_THREAD_DESTRUCTOR() PROXY_METHOD0(void, Bar) -END_PROXY_MAP() +END_PROXY_MAP(Foo) class OwnedProxyTest : public ::testing::Test { public: diff --git a/pc/rtc_stats_collector.cc b/pc/rtc_stats_collector.cc index 7e781a7245..6599d0ef49 100644 --- a/pc/rtc_stats_collector.cc +++ b/pc/rtc_stats_collector.cc @@ -209,20 +209,20 @@ const char* IceCandidatePairStateToRTCStatsIceCandidatePairState( } const char* DtlsTransportStateToRTCDtlsTransportState( - cricket::DtlsTransportState state) { + DtlsTransportState state) { switch (state) { - case cricket::DTLS_TRANSPORT_NEW: + case DtlsTransportState::kNew: return RTCDtlsTransportState::kNew; - case cricket::DTLS_TRANSPORT_CONNECTING: + case DtlsTransportState::kConnecting: return RTCDtlsTransportState::kConnecting; - case cricket::DTLS_TRANSPORT_CONNECTED: + case DtlsTransportState::kConnected: return RTCDtlsTransportState::kConnected; - case cricket::DTLS_TRANSPORT_CLOSED: + case DtlsTransportState::kClosed: return RTCDtlsTransportState::kClosed; - case cricket::DTLS_TRANSPORT_FAILED: + case DtlsTransportState::kFailed: return RTCDtlsTransportState::kFailed; default: - RTC_NOTREACHED(); + RTC_CHECK_NOTREACHED(); return nullptr; } } @@ -265,6 +265,17 @@ const char* QualityLimitationReasonToRTCQualityLimitationReason( RTC_CHECK_NOTREACHED(); } +std::map +QualityLimitationDurationToRTCQualityLimitationDuration( + std::map durations_ms) { + std::map result; + for (const auto& elem : durations_ms) { + result[QualityLimitationReasonToRTCQualityLimitationReason(elem.first)] = + elem.second; + } + return result; +} + double DoubleAudioLevelFromIntAudioLevel(int audio_level) { RTC_DCHECK_GE(audio_level, 0); RTC_DCHECK_LE(audio_level, 32767); @@ -322,6 +333,13 @@ void SetInboundRTPStreamStatsFromMediaReceiverInfo( static_cast(media_receiver_info.header_and_padding_bytes_rcvd); inbound_stats->packets_lost = static_cast(media_receiver_info.packets_lost); + inbound_stats->jitter_buffer_delay = + media_receiver_info.jitter_buffer_delay_seconds; + inbound_stats->jitter_buffer_emitted_count = + media_receiver_info.jitter_buffer_emitted_count; + if (media_receiver_info.nacks_sent) { + inbound_stats->nack_count = *media_receiver_info.nacks_sent; + } } std::unique_ptr CreateInboundAudioStreamStats( @@ -342,10 +360,6 @@ std::unique_ptr CreateInboundAudioStreamStats( } inbound_audio->jitter = static_cast(voice_receiver_info.jitter_ms) / rtc::kNumMillisecsPerSec; - inbound_audio->jitter_buffer_delay = - voice_receiver_info.jitter_buffer_delay_seconds; - inbound_audio->jitter_buffer_emitted_count = - voice_receiver_info.jitter_buffer_emitted_count; inbound_audio->total_samples_received = voice_receiver_info.total_samples_received; inbound_audio->concealed_samples = voice_receiver_info.concealed_samples; @@ -443,8 +457,6 @@ void SetInboundRTPStreamStatsFromVideoReceiverInfo( static_cast(video_receiver_info.firs_sent); inbound_video->pli_count = static_cast(video_receiver_info.plis_sent); - inbound_video->nack_count = - static_cast(video_receiver_info.nacks_sent); inbound_video->frames_received = video_receiver_info.frames_received; inbound_video->frames_decoded = video_receiver_info.frames_decoded; inbound_video->frames_dropped = video_receiver_info.frames_dropped; @@ -504,6 +516,7 @@ void SetOutboundRTPStreamStatsFromMediaSenderInfo( static_cast(media_sender_info.header_and_padding_bytes_sent); outbound_stats->retransmitted_bytes_sent = media_sender_info.retransmitted_bytes_sent; + outbound_stats->nack_count = media_sender_info.nacks_rcvd; } void SetOutboundRTPStreamStatsFromVoiceSenderInfo( @@ -538,8 +551,6 @@ void SetOutboundRTPStreamStatsFromVideoSenderInfo( static_cast(video_sender_info.firs_rcvd); outbound_video->pli_count = static_cast(video_sender_info.plis_rcvd); - outbound_video->nack_count = - static_cast(video_sender_info.nacks_rcvd); if (video_sender_info.qp_sum) outbound_video->qp_sum = *video_sender_info.qp_sum; outbound_video->frames_encoded = video_sender_info.frames_encoded; @@ -568,6 +579,9 @@ void SetOutboundRTPStreamStatsFromVideoSenderInfo( outbound_video->quality_limitation_reason = QualityLimitationReasonToRTCQualityLimitationReason( video_sender_info.quality_limitation_reason); + outbound_video->quality_limitation_durations = + QualityLimitationDurationToRTCQualityLimitationDuration( + video_sender_info.quality_limitation_durations_ms); outbound_video->quality_limitation_resolution_changes = video_sender_info.quality_limitation_resolution_changes; // TODO(https://crbug.com/webrtc/10529): When info's |content_info| is @@ -729,10 +743,22 @@ const std::string& ProduceIceCandidateStats(int64_t timestamp_us, return stats->id(); } +template +void SetAudioProcessingStats(StatsType* stats, + const AudioProcessingStats& apm_stats) { + if (apm_stats.echo_return_loss) { + stats->echo_return_loss = *apm_stats.echo_return_loss; + } + if (apm_stats.echo_return_loss_enhancement) { + stats->echo_return_loss_enhancement = + *apm_stats.echo_return_loss_enhancement; + } +} + std::unique_ptr ProduceMediaStreamTrackStatsFromVoiceSenderInfo( int64_t timestamp_us, - const AudioTrackInterface& audio_track, + AudioTrackInterface& audio_track, const cricket::VoiceSenderInfo& voice_sender_info, int attachment_id) { std::unique_ptr audio_track_stats( @@ -747,13 +773,17 @@ ProduceMediaStreamTrackStatsFromVoiceSenderInfo( attachment_id); audio_track_stats->remote_source = false; audio_track_stats->detached = false; - if (voice_sender_info.apm_statistics.echo_return_loss) { - audio_track_stats->echo_return_loss = - *voice_sender_info.apm_statistics.echo_return_loss; - } - if (voice_sender_info.apm_statistics.echo_return_loss_enhancement) { - audio_track_stats->echo_return_loss_enhancement = - *voice_sender_info.apm_statistics.echo_return_loss_enhancement; + // Audio processor may be attached to either the track or the send + // stream, so look in both places. + SetAudioProcessingStats(audio_track_stats.get(), + voice_sender_info.apm_statistics); + auto audio_processor(audio_track.GetAudioProcessor()); + if (audio_processor.get()) { + // The |has_remote_tracks| argument is obsolete; makes no difference if it's + // set to true or false. + AudioProcessorInterface::AudioProcessorStatistics ap_stats = + audio_processor->GetStats(/*has_remote_tracks=*/false); + SetAudioProcessingStats(audio_track_stats.get(), ap_stats.apm_statistics); } return audio_track_stats; } @@ -1259,6 +1289,8 @@ void RTCStatsCollector::ProducePartialResultsOnSignalingThreadImpl( void RTCStatsCollector::ProducePartialResultsOnNetworkThread( int64_t timestamp_us, absl::optional sctp_transport_name) { + TRACE_EVENT0("webrtc", + "RTCStatsCollector::ProducePartialResultsOnNetworkThread"); RTC_DCHECK_RUN_ON(network_thread_); rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; @@ -1641,6 +1673,8 @@ void RTCStatsCollector::ProduceMediaSourceStats_s( // create separate media source stats objects on a per-attachment basis. std::unique_ptr media_source_stats; if (track->kind() == MediaStreamTrackInterface::kAudioKind) { + AudioTrackInterface* audio_track = + static_cast(track.get()); auto audio_source_stats = std::make_unique( RTCMediaSourceStatsIDFromKindAndAttachment( cricket::MEDIA_TYPE_AUDIO, sender_internal->AttachmentId()), @@ -1661,8 +1695,21 @@ void RTCStatsCollector::ProduceMediaSourceStats_s( voice_sender_info->total_input_energy; audio_source_stats->total_samples_duration = voice_sender_info->total_input_duration; + SetAudioProcessingStats(audio_source_stats.get(), + voice_sender_info->apm_statistics); } } + // Audio processor may be attached to either the track or the send + // stream, so look in both places. + auto audio_processor(audio_track->GetAudioProcessor()); + if (audio_processor.get()) { + // The |has_remote_tracks| argument is obsolete; makes no difference + // if it's set to true or false. + AudioProcessorInterface::AudioProcessorStatistics ap_stats = + audio_processor->GetStats(/*has_remote_tracks=*/false); + SetAudioProcessingStats(audio_source_stats.get(), + ap_stats.apm_statistics); + } media_source_stats = std::move(audio_source_stats); } else { RTC_DCHECK_EQ(MediaStreamTrackInterface::kVideoKind, track->kind()); diff --git a/pc/rtc_stats_collector_unittest.cc b/pc/rtc_stats_collector_unittest.cc index 6a568390c0..2ac0737715 100644 --- a/pc/rtc_stats_collector_unittest.cc +++ b/pc/rtc_stats_collector_unittest.cc @@ -22,6 +22,7 @@ #include "absl/memory/memory.h" #include "absl/strings/str_replace.h" +#include "api/dtls_transport_interface.h" #include "api/media_stream_track.h" #include "api/rtp_parameters.h" #include "api/stats/rtc_stats_report.h" @@ -199,14 +200,34 @@ std::unique_ptr CreateFakeCandidate( return candidate; } +class FakeAudioProcessor : public AudioProcessorInterface { + public: + FakeAudioProcessor() {} + ~FakeAudioProcessor() {} + + private: + AudioProcessorInterface::AudioProcessorStatistics GetStats( + bool has_recv_streams) override { + AudioProcessorStatistics stats; + stats.apm_statistics.echo_return_loss = 2.0; + stats.apm_statistics.echo_return_loss_enhancement = 3.0; + return stats; + } +}; + class FakeAudioTrackForStats : public MediaStreamTrack { public: static rtc::scoped_refptr Create( const std::string& id, - MediaStreamTrackInterface::TrackState state) { + MediaStreamTrackInterface::TrackState state, + bool create_fake_audio_processor) { rtc::scoped_refptr audio_track_stats( new rtc::RefCountedObject(id)); audio_track_stats->set_state(state); + if (create_fake_audio_processor) { + audio_track_stats->processor_ = + rtc::make_ref_counted(); + } return audio_track_stats; } @@ -221,8 +242,11 @@ class FakeAudioTrackForStats : public MediaStreamTrack { void RemoveSink(webrtc::AudioTrackSinkInterface* sink) override {} bool GetSignalLevel(int* level) override { return false; } rtc::scoped_refptr GetAudioProcessor() override { - return nullptr; + return processor_; } + + private: + rtc::scoped_refptr processor_; }; class FakeVideoTrackSourceForStats : public VideoTrackSourceInterface { @@ -307,9 +331,11 @@ class FakeVideoTrackForStats : public MediaStreamTrack { rtc::scoped_refptr CreateFakeTrack( cricket::MediaType media_type, const std::string& track_id, - MediaStreamTrackInterface::TrackState track_state) { + MediaStreamTrackInterface::TrackState track_state, + bool create_fake_audio_processor = false) { if (media_type == cricket::MEDIA_TYPE_AUDIO) { - return FakeAudioTrackForStats::Create(track_id, track_state); + return FakeAudioTrackForStats::Create(track_id, track_state, + create_fake_audio_processor); } else { RTC_DCHECK_EQ(media_type, cricket::MEDIA_TYPE_VIDEO); return FakeVideoTrackForStats::Create(track_id, track_state, nullptr); @@ -925,9 +951,14 @@ TEST_F(RTCStatsCollectorTest, ToJsonProducesParseableJson) { ExampleStatsGraph graph = SetupExampleStatsGraphForSelectorTests(); rtc::scoped_refptr report = stats_->GetStatsReport(); std::string json_format = report->ToJson(); - Json::Reader reader; + + Json::CharReaderBuilder builder; Json::Value json_value; - ASSERT_TRUE(reader.parse(json_format, json_value)); + std::unique_ptr reader(builder.newCharReader()); + ASSERT_TRUE(reader->parse(json_format.c_str(), + json_format.c_str() + json_format.size(), + &json_value, nullptr)); + // A very brief sanity check on the result. EXPECT_EQ(report->size(), json_value.size()); } @@ -1706,7 +1737,7 @@ TEST_F(RTCStatsCollectorTest, voice_receiver_info.inserted_samples_for_deceleration = 987; voice_receiver_info.removed_samples_for_acceleration = 876; voice_receiver_info.silent_concealed_samples = 765; - voice_receiver_info.jitter_buffer_delay_seconds = 3456; + voice_receiver_info.jitter_buffer_delay_seconds = 3.456; voice_receiver_info.jitter_buffer_emitted_count = 13; voice_receiver_info.jitter_buffer_target_delay_seconds = 7.894; voice_receiver_info.jitter_buffer_flushes = 7; @@ -1751,7 +1782,7 @@ TEST_F(RTCStatsCollectorTest, expected_remote_audio_track.inserted_samples_for_deceleration = 987; expected_remote_audio_track.removed_samples_for_acceleration = 876; expected_remote_audio_track.silent_concealed_samples = 765; - expected_remote_audio_track.jitter_buffer_delay = 3456; + expected_remote_audio_track.jitter_buffer_delay = 3.456; expected_remote_audio_track.jitter_buffer_emitted_count = 13; expected_remote_audio_track.jitter_buffer_target_delay = 7.894; expected_remote_audio_track.jitter_buffer_flushes = 7; @@ -1923,6 +1954,7 @@ TEST_F(RTCStatsCollectorTest, CollectRTCInboundRTPStreamStats_Audio) { voice_media_info.receivers[0].local_stats[0].ssrc = 1; voice_media_info.receivers[0].packets_lost = -1; // Signed per RFC3550 voice_media_info.receivers[0].packets_rcvd = 2; + voice_media_info.receivers[0].nacks_sent = 5; voice_media_info.receivers[0].fec_packets_discarded = 5566; voice_media_info.receivers[0].fec_packets_received = 6677; voice_media_info.receivers[0].payload_bytes_rcvd = 3; @@ -1971,6 +2003,7 @@ TEST_F(RTCStatsCollectorTest, CollectRTCInboundRTPStreamStats_Audio) { expected_audio.transport_id = "RTCTransport_TransportName_1"; expected_audio.codec_id = "RTCCodec_AudioMid_Inbound_42"; expected_audio.packets_received = 2; + expected_audio.nack_count = 5; expected_audio.fec_packets_discarded = 5566; expected_audio.fec_packets_received = 6677; expected_audio.bytes_received = 3; @@ -2037,6 +2070,8 @@ TEST_F(RTCStatsCollectorTest, CollectRTCInboundRTPStreamStats_Video) { video_media_info.receivers[0].total_inter_frame_delay = 0.123; video_media_info.receivers[0].total_squared_inter_frame_delay = 0.00456; video_media_info.receivers[0].jitter_ms = 1199; + video_media_info.receivers[0].jitter_buffer_delay_seconds = 3.456; + video_media_info.receivers[0].jitter_buffer_emitted_count = 13; video_media_info.receivers[0].last_packet_received_timestamp_ms = absl::nullopt; @@ -2084,6 +2119,8 @@ TEST_F(RTCStatsCollectorTest, CollectRTCInboundRTPStreamStats_Video) { expected_video.total_inter_frame_delay = 0.123; expected_video.total_squared_inter_frame_delay = 0.00456; expected_video.jitter = 1.199; + expected_video.jitter_buffer_delay = 3.456; + expected_video.jitter_buffer_emitted_count = 13; // |expected_video.last_packet_received_timestamp| should be undefined. // |expected_video.content_type| should be undefined. // |expected_video.decoder_implementation| should be undefined. @@ -2128,6 +2165,7 @@ TEST_F(RTCStatsCollectorTest, CollectRTCOutboundRTPStreamStats_Audio) { voice_media_info.senders[0].payload_bytes_sent = 3; voice_media_info.senders[0].header_and_padding_bytes_sent = 12; voice_media_info.senders[0].retransmitted_bytes_sent = 30; + voice_media_info.senders[0].nacks_rcvd = 31; voice_media_info.senders[0].codec_payload_type = 42; RtpCodecParameters codec_parameters; @@ -2161,6 +2199,7 @@ TEST_F(RTCStatsCollectorTest, CollectRTCOutboundRTPStreamStats_Audio) { expected_audio.bytes_sent = 3; expected_audio.header_bytes_sent = 12; expected_audio.retransmitted_bytes_sent = 30; + expected_audio.nack_count = 31; ASSERT_TRUE(report->Get(expected_audio.id())); EXPECT_EQ( @@ -2198,6 +2237,8 @@ TEST_F(RTCStatsCollectorTest, CollectRTCOutboundRTPStreamStats_Video) { video_media_info.senders[0].total_packet_send_delay_ms = 10000; video_media_info.senders[0].quality_limitation_reason = QualityLimitationReason::kBandwidth; + video_media_info.senders[0].quality_limitation_durations_ms + [webrtc::QualityLimitationReason::kBandwidth] = 300; video_media_info.senders[0].quality_limitation_resolution_changes = 56u; video_media_info.senders[0].qp_sum = absl::nullopt; video_media_info.senders[0].content_type = VideoContentType::UNSPECIFIED; @@ -2253,6 +2294,9 @@ TEST_F(RTCStatsCollectorTest, CollectRTCOutboundRTPStreamStats_Video) { expected_video.total_encoded_bytes_target = 1234; expected_video.total_packet_send_delay = 10.0; expected_video.quality_limitation_reason = "bandwidth"; + expected_video.quality_limitation_durations = std::map{ + std::pair{"bandwidth", 300.0}, + }; expected_video.quality_limitation_resolution_changes = 56u; expected_video.frame_width = 200u; expected_video.frame_height = 100u; @@ -2322,7 +2366,7 @@ TEST_F(RTCStatsCollectorTest, CollectRTCTransportStats) { rtp_transport_channel_stats.component = cricket::ICE_CANDIDATE_COMPONENT_RTP; rtp_transport_channel_stats.ice_transport_stats.connection_infos.push_back( rtp_connection_info); - rtp_transport_channel_stats.dtls_state = cricket::DTLS_TRANSPORT_NEW; + rtp_transport_channel_stats.dtls_state = DtlsTransportState::kNew; rtp_transport_channel_stats.ice_transport_stats .selected_candidate_pair_changes = 1; pc_->SetTransportStats(kTransportName, {rtp_transport_channel_stats}); @@ -2360,7 +2404,7 @@ TEST_F(RTCStatsCollectorTest, CollectRTCTransportStats) { cricket::ICE_CANDIDATE_COMPONENT_RTCP; rtcp_transport_channel_stats.ice_transport_stats.connection_infos.push_back( rtcp_connection_info); - rtcp_transport_channel_stats.dtls_state = cricket::DTLS_TRANSPORT_CONNECTING; + rtcp_transport_channel_stats.dtls_state = DtlsTransportState::kConnecting; pc_->SetTransportStats(kTransportName, {rtp_transport_channel_stats, rtcp_transport_channel_stats}); @@ -2476,7 +2520,7 @@ TEST_F(RTCStatsCollectorTest, CollectRTCTransportStatsWithCrypto) { rtp_transport_channel_stats.ice_transport_stats.connection_infos.push_back( rtp_connection_info); // The state must be connected in order for crypto parameters to show up. - rtp_transport_channel_stats.dtls_state = cricket::DTLS_TRANSPORT_CONNECTED; + rtp_transport_channel_stats.dtls_state = DtlsTransportState::kConnected; rtp_transport_channel_stats.ice_transport_stats .selected_candidate_pair_changes = 1; rtp_transport_channel_stats.ssl_version_bytes = 0x0203; @@ -2520,6 +2564,7 @@ TEST_F(RTCStatsCollectorTest, CollectNoStreamRTCOutboundRTPStreamStats_Audio) { voice_media_info.senders[0].payload_bytes_sent = 3; voice_media_info.senders[0].header_and_padding_bytes_sent = 4; voice_media_info.senders[0].retransmitted_bytes_sent = 30; + voice_media_info.senders[0].nacks_rcvd = 31; voice_media_info.senders[0].codec_payload_type = 42; RtpCodecParameters codec_parameters; @@ -2553,6 +2598,7 @@ TEST_F(RTCStatsCollectorTest, CollectNoStreamRTCOutboundRTPStreamStats_Audio) { expected_audio.bytes_sent = 3; expected_audio.header_bytes_sent = 4; expected_audio.retransmitted_bytes_sent = 30; + expected_audio.nack_count = 31; ASSERT_TRUE(report->Get(expected_audio.id())); EXPECT_EQ( @@ -2574,6 +2620,9 @@ TEST_F(RTCStatsCollectorTest, RTCAudioSourceStatsCollectedForSenderWithTrack) { voice_media_info.senders[0].audio_level = 32767; // [0,32767] voice_media_info.senders[0].total_input_energy = 2.0; voice_media_info.senders[0].total_input_duration = 3.0; + voice_media_info.senders[0].apm_statistics.echo_return_loss = 42.0; + voice_media_info.senders[0].apm_statistics.echo_return_loss_enhancement = + 52.0; auto* voice_media_channel = pc_->AddVoiceChannel("AudioMid", "TransportName"); voice_media_channel->SetStats(voice_media_info); stats_->SetupLocalTrackAndSender(cricket::MEDIA_TYPE_AUDIO, @@ -2589,6 +2638,8 @@ TEST_F(RTCStatsCollectorTest, RTCAudioSourceStatsCollectedForSenderWithTrack) { expected_audio.audio_level = 1.0; // [0,1] expected_audio.total_audio_energy = 2.0; expected_audio.total_samples_duration = 3.0; + expected_audio.echo_return_loss = 42.0; + expected_audio.echo_return_loss_enhancement = 52.0; ASSERT_TRUE(report->Get(expected_audio.id())); EXPECT_EQ(report->Get(expected_audio.id())->cast_to(), @@ -2963,11 +3014,11 @@ TEST_P(RTCStatsCollectorTestWithParamKind, cricket::TransportChannelStats rtp_transport_channel_stats; rtp_transport_channel_stats.component = cricket::ICE_CANDIDATE_COMPONENT_RTP; - rtp_transport_channel_stats.dtls_state = cricket::DTLS_TRANSPORT_NEW; + rtp_transport_channel_stats.dtls_state = DtlsTransportState::kNew; cricket::TransportChannelStats rtcp_transport_channel_stats; rtcp_transport_channel_stats.component = cricket::ICE_CANDIDATE_COMPONENT_RTCP; - rtcp_transport_channel_stats.dtls_state = cricket::DTLS_TRANSPORT_NEW; + rtcp_transport_channel_stats.dtls_state = DtlsTransportState::kNew; pc_->SetTransportStats("TransportName", {rtp_transport_channel_stats, rtcp_transport_channel_stats}); AddSenderInfoAndMediaChannel("TransportName", {report_block_data}, @@ -3050,6 +3101,64 @@ TEST_F(RTCStatsCollectorTest, EXPECT_FALSE(report->Get("RTCVideoSource_42")); } +// Test collecting echo return loss stats from the audio processor attached to +// the track, rather than the voice sender info. +TEST_F(RTCStatsCollectorTest, CollectEchoReturnLossFromTrackAudioProcessor) { + rtc::scoped_refptr local_stream = + MediaStream::Create("LocalStreamId"); + pc_->mutable_local_streams()->AddStream(local_stream); + + // Local audio track + rtc::scoped_refptr local_audio_track = + CreateFakeTrack(cricket::MEDIA_TYPE_AUDIO, "LocalAudioTrackID", + MediaStreamTrackInterface::kEnded, + /*create_fake_audio_processor=*/true); + local_stream->AddTrack( + static_cast(local_audio_track.get())); + + cricket::VoiceSenderInfo voice_sender_info_ssrc1; + voice_sender_info_ssrc1.local_stats.push_back(cricket::SsrcSenderInfo()); + voice_sender_info_ssrc1.local_stats[0].ssrc = 1; + + stats_->CreateMockRtpSendersReceiversAndChannels( + {std::make_pair(local_audio_track.get(), voice_sender_info_ssrc1)}, {}, + {}, {}, {local_stream->id()}, {}); + + rtc::scoped_refptr report = stats_->GetStatsReport(); + + RTCMediaStreamTrackStats expected_local_audio_track_ssrc1( + IdForType(report), report->timestamp_us(), + RTCMediaStreamTrackKind::kAudio); + expected_local_audio_track_ssrc1.track_identifier = local_audio_track->id(); + expected_local_audio_track_ssrc1.media_source_id = + "RTCAudioSource_11"; // Attachment ID = SSRC + 10 + expected_local_audio_track_ssrc1.remote_source = false; + expected_local_audio_track_ssrc1.ended = true; + expected_local_audio_track_ssrc1.detached = false; + expected_local_audio_track_ssrc1.echo_return_loss = 2.0; + expected_local_audio_track_ssrc1.echo_return_loss_enhancement = 3.0; + ASSERT_TRUE(report->Get(expected_local_audio_track_ssrc1.id())) + << "Did not find " << expected_local_audio_track_ssrc1.id() << " in " + << report->ToJson(); + EXPECT_EQ(expected_local_audio_track_ssrc1, + report->Get(expected_local_audio_track_ssrc1.id()) + ->cast_to()); + + RTCAudioSourceStats expected_audio("RTCAudioSource_11", + report->timestamp_us()); + expected_audio.track_identifier = "LocalAudioTrackID"; + expected_audio.kind = "audio"; + expected_audio.audio_level = 0; + expected_audio.total_audio_energy = 0; + expected_audio.total_samples_duration = 0; + expected_audio.echo_return_loss = 2.0; + expected_audio.echo_return_loss_enhancement = 3.0; + + ASSERT_TRUE(report->Get(expected_audio.id())); + EXPECT_EQ(report->Get(expected_audio.id())->cast_to(), + expected_audio); +} + TEST_F(RTCStatsCollectorTest, GetStatsWithSenderSelector) { ExampleStatsGraph graph = SetupExampleStatsGraphForSelectorTests(); // Expected stats graph when filtered by sender: diff --git a/pc/rtc_stats_integrationtest.cc b/pc/rtc_stats_integrationtest.cc index d92e7ff29b..2dfe1b5cd5 100644 --- a/pc/rtc_stats_integrationtest.cc +++ b/pc/rtc_stats_integrationtest.cc @@ -840,11 +840,12 @@ class RTCStatsReportVerifier { verifier.TestMemberIsUndefined(inbound_stream.frames_per_second); } verifier.TestMemberIsUndefined(inbound_stream.frame_bit_depth); + verifier.TestMemberIsNonNegative( + inbound_stream.jitter_buffer_delay); + verifier.TestMemberIsNonNegative( + inbound_stream.jitter_buffer_emitted_count); if (inbound_stream.media_type.is_defined() && *inbound_stream.media_type == "video") { - verifier.TestMemberIsUndefined(inbound_stream.jitter_buffer_delay); - verifier.TestMemberIsUndefined( - inbound_stream.jitter_buffer_emitted_count); verifier.TestMemberIsUndefined(inbound_stream.total_samples_received); verifier.TestMemberIsUndefined(inbound_stream.concealed_samples); verifier.TestMemberIsUndefined(inbound_stream.silent_concealed_samples); @@ -864,10 +865,6 @@ class RTCStatsReportVerifier { verifier.TestMemberIsUndefined(inbound_stream.fir_count); verifier.TestMemberIsUndefined(inbound_stream.pli_count); verifier.TestMemberIsUndefined(inbound_stream.nack_count); - verifier.TestMemberIsNonNegative( - inbound_stream.jitter_buffer_delay); - verifier.TestMemberIsNonNegative( - inbound_stream.jitter_buffer_emitted_count); verifier.TestMemberIsPositive( inbound_stream.total_samples_received); verifier.TestMemberIsNonNegative( @@ -937,7 +934,6 @@ class RTCStatsReportVerifier { RTCVideoSourceStats::kType); verifier.TestMemberIsNonNegative(outbound_stream.fir_count); verifier.TestMemberIsNonNegative(outbound_stream.pli_count); - verifier.TestMemberIsNonNegative(outbound_stream.nack_count); if (*outbound_stream.frames_encoded > 0) { verifier.TestMemberIsNonNegative(outbound_stream.qp_sum); } else { @@ -946,11 +942,11 @@ class RTCStatsReportVerifier { } else { verifier.TestMemberIsUndefined(outbound_stream.fir_count); verifier.TestMemberIsUndefined(outbound_stream.pli_count); - verifier.TestMemberIsUndefined(outbound_stream.nack_count); verifier.TestMemberIsIDReference(outbound_stream.media_source_id, RTCAudioSourceStats::kType); verifier.TestMemberIsUndefined(outbound_stream.qp_sum); } + verifier.TestMemberIsNonNegative(outbound_stream.nack_count); verifier.TestMemberIsOptionalIDReference( outbound_stream.remote_id, RTCRemoteInboundRtpStreamStats::kType); verifier.TestMemberIsNonNegative(outbound_stream.packets_sent); @@ -973,6 +969,8 @@ class RTCStatsReportVerifier { verifier.TestMemberIsNonNegative( outbound_stream.total_packet_send_delay); verifier.TestMemberIsDefined(outbound_stream.quality_limitation_reason); + verifier.TestMemberIsDefined( + outbound_stream.quality_limitation_durations); verifier.TestMemberIsNonNegative( outbound_stream.quality_limitation_resolution_changes); // The integration test is not set up to test screen share; don't require @@ -1005,6 +1003,8 @@ class RTCStatsReportVerifier { // TODO(https://crbug.com/webrtc/10635): Implement for audio as well. verifier.TestMemberIsUndefined(outbound_stream.total_packet_send_delay); verifier.TestMemberIsUndefined(outbound_stream.quality_limitation_reason); + verifier.TestMemberIsUndefined( + outbound_stream.quality_limitation_durations); verifier.TestMemberIsUndefined( outbound_stream.quality_limitation_resolution_changes); verifier.TestMemberIsUndefined(outbound_stream.content_type); @@ -1078,6 +1078,12 @@ class RTCStatsReportVerifier { verifier.TestMemberIsNonNegative(audio_source.audio_level); verifier.TestMemberIsPositive(audio_source.total_audio_energy); verifier.TestMemberIsPositive(audio_source.total_samples_duration); + // TODO(hbos): |echo_return_loss| and |echo_return_loss_enhancement| are + // flaky on msan bot (sometimes defined, sometimes undefined). Should the + // test run until available or is there a way to have it always be + // defined? crbug.com/627816 + verifier.MarkMemberTested(audio_source.echo_return_loss, true); + verifier.MarkMemberTested(audio_source.echo_return_loss_enhancement, true); return verifier.ExpectAllMembersSuccessfullyTested(); } diff --git a/pc/rtp_receiver.cc b/pc/rtp_receiver.cc index 88f32d88e5..2444c9b60d 100644 --- a/pc/rtp_receiver.cc +++ b/pc/rtp_receiver.cc @@ -15,8 +15,8 @@ #include #include -#include "api/media_stream_proxy.h" #include "pc/media_stream.h" +#include "pc/media_stream_proxy.h" #include "rtc_base/location.h" namespace webrtc { diff --git a/pc/rtp_receiver_proxy.h b/pc/rtp_receiver_proxy.h new file mode 100644 index 0000000000..d4114e0f0b --- /dev/null +++ b/pc/rtp_receiver_proxy.h @@ -0,0 +1,54 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef PC_RTP_RECEIVER_PROXY_H_ +#define PC_RTP_RECEIVER_PROXY_H_ + +#include +#include + +#include "api/rtp_receiver_interface.h" +#include "pc/proxy.h" + +namespace webrtc { + +// Define proxy for RtpReceiverInterface. +// TODO(deadbeef): Move this to .cc file. What threads methods are called on is +// an implementation detail. +BEGIN_PROXY_MAP(RtpReceiver) +PROXY_PRIMARY_THREAD_DESTRUCTOR() +BYPASS_PROXY_CONSTMETHOD0(rtc::scoped_refptr, track) +PROXY_CONSTMETHOD0(rtc::scoped_refptr, dtls_transport) +PROXY_CONSTMETHOD0(std::vector, stream_ids) +PROXY_CONSTMETHOD0(std::vector>, + streams) +BYPASS_PROXY_CONSTMETHOD0(cricket::MediaType, media_type) +BYPASS_PROXY_CONSTMETHOD0(std::string, id) +PROXY_SECONDARY_CONSTMETHOD0(RtpParameters, GetParameters) +PROXY_METHOD1(void, SetObserver, RtpReceiverObserverInterface*) +PROXY_SECONDARY_METHOD1(void, + SetJitterBufferMinimumDelay, + absl::optional) +PROXY_SECONDARY_CONSTMETHOD0(std::vector, GetSources) +// TODO(bugs.webrtc.org/12772): Remove. +PROXY_SECONDARY_METHOD1(void, + SetFrameDecryptor, + rtc::scoped_refptr) +// TODO(bugs.webrtc.org/12772): Remove. +PROXY_SECONDARY_CONSTMETHOD0(rtc::scoped_refptr, + GetFrameDecryptor) +PROXY_SECONDARY_METHOD1(void, + SetDepacketizerToDecoderFrameTransformer, + rtc::scoped_refptr) +END_PROXY_MAP(RtpReceiver) + +} // namespace webrtc + +#endif // PC_RTP_RECEIVER_PROXY_H_ diff --git a/pc/rtp_sender_proxy.h b/pc/rtp_sender_proxy.h new file mode 100644 index 0000000000..2f8fe2c0bf --- /dev/null +++ b/pc/rtp_sender_proxy.h @@ -0,0 +1,51 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef PC_RTP_SENDER_PROXY_H_ +#define PC_RTP_SENDER_PROXY_H_ + +#include +#include + +#include "api/rtp_sender_interface.h" +#include "pc/proxy.h" + +namespace webrtc { + +// Define proxy for RtpSenderInterface. +// TODO(deadbeef): Move this to .cc file. What threads methods are called on is +// an implementation detail. +BEGIN_PRIMARY_PROXY_MAP(RtpSender) +PROXY_PRIMARY_THREAD_DESTRUCTOR() +PROXY_METHOD1(bool, SetTrack, MediaStreamTrackInterface*) +PROXY_CONSTMETHOD0(rtc::scoped_refptr, track) +PROXY_CONSTMETHOD0(rtc::scoped_refptr, dtls_transport) +PROXY_CONSTMETHOD0(uint32_t, ssrc) +BYPASS_PROXY_CONSTMETHOD0(cricket::MediaType, media_type) +BYPASS_PROXY_CONSTMETHOD0(std::string, id) +PROXY_CONSTMETHOD0(std::vector, stream_ids) +PROXY_CONSTMETHOD0(std::vector, init_send_encodings) +PROXY_CONSTMETHOD0(RtpParameters, GetParameters) +PROXY_METHOD1(RTCError, SetParameters, const RtpParameters&) +PROXY_CONSTMETHOD0(rtc::scoped_refptr, GetDtmfSender) +PROXY_METHOD1(void, + SetFrameEncryptor, + rtc::scoped_refptr) +PROXY_CONSTMETHOD0(rtc::scoped_refptr, + GetFrameEncryptor) +PROXY_METHOD1(void, SetStreams, const std::vector&) +PROXY_METHOD1(void, + SetEncoderToPacketizerFrameTransformer, + rtc::scoped_refptr) +END_PROXY_MAP(RtpSender) + +} // namespace webrtc + +#endif // PC_RTP_SENDER_PROXY_H_ diff --git a/pc/rtp_transceiver.h b/pc/rtp_transceiver.h index 35dea25a7b..6b1307b1db 100644 --- a/pc/rtp_transceiver.h +++ b/pc/rtp_transceiver.h @@ -21,19 +21,19 @@ #include "absl/types/optional.h" #include "api/array_view.h" #include "api/media_types.h" -#include "api/proxy.h" #include "api/rtc_error.h" #include "api/rtp_parameters.h" -#include "api/rtp_receiver_interface.h" -#include "api/rtp_sender_interface.h" #include "api/rtp_transceiver_direction.h" #include "api/rtp_transceiver_interface.h" #include "api/scoped_refptr.h" #include "api/task_queue/task_queue_base.h" #include "pc/channel_interface.h" #include "pc/channel_manager.h" +#include "pc/proxy.h" #include "pc/rtp_receiver.h" +#include "pc/rtp_receiver_proxy.h" #include "pc/rtp_sender.h" +#include "pc/rtp_sender_proxy.h" #include "rtc_base/ref_counted_object.h" #include "rtc_base/task_utils/pending_task_safety_flag.h" #include "rtc_base/third_party/sigslot/sigslot.h" @@ -310,7 +310,7 @@ PROXY_CONSTMETHOD0(std::vector, PROXY_METHOD1(webrtc::RTCError, SetOfferedRtpHeaderExtensions, rtc::ArrayView) -END_PROXY_MAP() +END_PROXY_MAP(RtpTransceiver) } // namespace webrtc diff --git a/pc/sctp_data_channel.cc b/pc/sctp_data_channel.cc index 682d76829c..0e4ef7de88 100644 --- a/pc/sctp_data_channel.cc +++ b/pc/sctp_data_channel.cc @@ -15,13 +15,14 @@ #include #include -#include "api/proxy.h" #include "media/sctp/sctp_transport_internal.h" +#include "pc/proxy.h" #include "pc/sctp_utils.h" #include "rtc_base/checks.h" #include "rtc_base/location.h" #include "rtc_base/logging.h" #include "rtc_base/ref_counted_object.h" +#include "rtc_base/system/unused.h" #include "rtc_base/task_utils/to_queued_task.h" #include "rtc_base/thread.h" @@ -65,7 +66,7 @@ PROXY_CONSTMETHOD0(uint64_t, buffered_amount) PROXY_METHOD0(void, Close) // TODO(bugs.webrtc.org/11547): Change to run on the network thread. PROXY_METHOD1(bool, Send, const DataBuffer&) -END_PROXY_MAP() +END_PROXY_MAP(DataChannel) } // namespace @@ -178,6 +179,7 @@ SctpDataChannel::SctpDataChannel(const InternalDataChannelInit& config, observer_(nullptr), provider_(provider) { RTC_DCHECK_RUN_ON(signaling_thread_); + RTC_UNUSED(network_thread_); } bool SctpDataChannel::Init() { @@ -381,13 +383,11 @@ void SctpDataChannel::OnTransportChannelCreated() { } } -void SctpDataChannel::OnTransportChannelClosed() { - // The SctpTransport is unusable (for example, because the SCTP m= section - // was rejected, or because the DTLS transport closed), so we need to close - // abruptly. - RTCError error = RTCError(RTCErrorType::OPERATION_ERROR_WITH_DATA, - "Transport channel closed"); - error.set_error_detail(RTCErrorDetailType::SCTP_FAILURE); +void SctpDataChannel::OnTransportChannelClosed(RTCError error) { + // The SctpTransport is unusable, which could come from multiplie reasons: + // - the SCTP m= section was rejected + // - the DTLS transport is closed + // - the SCTP transport is closed CloseAbruptlyWithError(std::move(error)); } diff --git a/pc/sctp_data_channel.h b/pc/sctp_data_channel.h index 1d7a3c73f4..b0df48758b 100644 --- a/pc/sctp_data_channel.h +++ b/pc/sctp_data_channel.h @@ -177,8 +177,6 @@ class SctpDataChannel : public DataChannelInterface, void CloseAbruptlyWithError(RTCError error); // Specializations of CloseAbruptlyWithError void CloseAbruptlyWithDataChannelFailure(const std::string& message); - void CloseAbruptlyWithSctpCauseCode(const std::string& message, - uint16_t cause_code); // Slots for provider to connect signals to. // @@ -209,7 +207,7 @@ class SctpDataChannel : public DataChannelInterface, // Called when the transport channel is unusable. // This method makes sure the DataChannel is disconnected and changes state // to kClosed. - void OnTransportChannelClosed(); + void OnTransportChannelClosed(RTCError error); DataChannelStats GetStats() const; diff --git a/pc/sctp_data_channel_transport.cc b/pc/sctp_data_channel_transport.cc index bb81156a23..f01f86ebd8 100644 --- a/pc/sctp_data_channel_transport.cc +++ b/pc/sctp_data_channel_transport.cc @@ -102,9 +102,9 @@ void SctpDataChannelTransport::OnClosingProcedureComplete(int channel_id) { } } -void SctpDataChannelTransport::OnClosedAbruptly() { +void SctpDataChannelTransport::OnClosedAbruptly(RTCError error) { if (sink_) { - sink_->OnTransportClosed(); + sink_->OnTransportClosed(error); } } diff --git a/pc/sctp_data_channel_transport.h b/pc/sctp_data_channel_transport.h index 30818abc4e..4b89205ea1 100644 --- a/pc/sctp_data_channel_transport.h +++ b/pc/sctp_data_channel_transport.h @@ -41,7 +41,7 @@ class SctpDataChannelTransport : public DataChannelTransportInterface, const rtc::CopyOnWriteBuffer& buffer); void OnClosingProcedureStartedRemotely(int channel_id); void OnClosingProcedureComplete(int channel_id); - void OnClosedAbruptly(); + void OnClosedAbruptly(RTCError error); cricket::SctpTransportInternal* const sctp_transport_; diff --git a/pc/sctp_transport.cc b/pc/sctp_transport.cc index 14a09d77e0..7d4e4551f1 100644 --- a/pc/sctp_transport.cc +++ b/pc/sctp_transport.cc @@ -14,6 +14,7 @@ #include #include "absl/types/optional.h" +#include "api/dtls_transport_interface.h" #include "api/sequence_checker.h" #include "rtc_base/checks.h" #include "rtc_base/location.h" @@ -95,9 +96,9 @@ void SctpTransport::SetDtlsTransport( if (transport) { internal_sctp_transport_->SetDtlsTransport(transport->internal()); - transport->internal()->SubscribeDtlsState( + transport->internal()->SubscribeDtlsTransportState( [this](cricket::DtlsTransportInternal* transport, - cricket::DtlsTransportState state) { + DtlsTransportState state) { OnDtlsStateChange(transport, state); }); if (info_.state() == SctpTransportState::kNew) { @@ -159,11 +160,11 @@ void SctpTransport::OnAssociationChangeCommunicationUp() { } void SctpTransport::OnDtlsStateChange(cricket::DtlsTransportInternal* transport, - cricket::DtlsTransportState state) { + DtlsTransportState state) { RTC_DCHECK_RUN_ON(owner_thread_); RTC_CHECK(transport == dtls_transport_->internal()); - if (state == cricket::DTLS_TRANSPORT_CLOSED || - state == cricket::DTLS_TRANSPORT_FAILED) { + if (state == DtlsTransportState::kClosed || + state == DtlsTransportState::kFailed) { UpdateInformation(SctpTransportState::kClosed); // TODO(http://bugs.webrtc.org/11090): Close all the data channels } diff --git a/pc/sctp_transport.h b/pc/sctp_transport.h index a8bc45b770..87fde53d97 100644 --- a/pc/sctp_transport.h +++ b/pc/sctp_transport.h @@ -71,7 +71,7 @@ class SctpTransport : public SctpTransportInterface, void OnInternalClosingProcedureStartedRemotely(int sid); void OnInternalClosingProcedureComplete(int sid); void OnDtlsStateChange(cricket::DtlsTransportInternal* transport, - cricket::DtlsTransportState state); + DtlsTransportState state); // NOTE: |owner_thread_| is the thread that the SctpTransport object is // constructed on. In the context of PeerConnection, it's the network thread. diff --git a/pc/sctp_transport_unittest.cc b/pc/sctp_transport_unittest.cc index b4618edbff..679b481f4c 100644 --- a/pc/sctp_transport_unittest.cc +++ b/pc/sctp_transport_unittest.cc @@ -14,6 +14,7 @@ #include #include "absl/memory/memory.h" +#include "api/dtls_transport_interface.h" #include "p2p/base/fake_dtls_transport.h" #include "pc/dtls_transport.h" #include "rtc_base/gunit.h" @@ -204,7 +205,7 @@ TEST_F(SctpTransportTest, CloseWhenTransportCloses) { ASSERT_EQ_WAIT(SctpTransportState::kConnected, observer_.State(), kDefaultTimeout); static_cast(dtls_transport_->internal()) - ->SetDtlsState(cricket::DTLS_TRANSPORT_CLOSED); + ->SetDtlsState(DtlsTransportState::kClosed); ASSERT_EQ_WAIT(SctpTransportState::kClosed, observer_.State(), kDefaultTimeout); } diff --git a/pc/sdp_offer_answer.cc b/pc/sdp_offer_answer.cc index 33dab0b9cb..533bd84dbe 100644 --- a/pc/sdp_offer_answer.cc +++ b/pc/sdp_offer_answer.cc @@ -23,7 +23,6 @@ #include "api/array_view.h" #include "api/crypto/crypto_options.h" #include "api/dtls_transport_interface.h" -#include "api/media_stream_proxy.h" #include "api/rtp_parameters.h" #include "api/rtp_receiver_interface.h" #include "api/rtp_sender_interface.h" @@ -41,6 +40,7 @@ #include "pc/data_channel_utils.h" #include "pc/dtls_transport.h" #include "pc/media_stream.h" +#include "pc/media_stream_proxy.h" #include "pc/peer_connection.h" #include "pc/peer_connection_message_handler.h" #include "pc/rtp_media_utils.h" @@ -60,6 +60,7 @@ #include "rtc_base/strings/string_builder.h" #include "rtc_base/third_party/sigslot/sigslot.h" #include "rtc_base/trace_event.h" +#include "system_wrappers/include/field_trial.h" #include "system_wrappers/include/metrics.h" using cricket::ContentInfo; @@ -87,6 +88,9 @@ namespace { typedef webrtc::PeerConnectionInterface::RTCOfferAnswerOptions RTCOfferAnswerOptions; +constexpr const char* kAlwaysAllowPayloadTypeDemuxingFieldTrialName = + "WebRTC-AlwaysAllowPayloadTypeDemuxing"; + // Error messages const char kInvalidSdp[] = "Invalid session description."; const char kInvalidCandidates[] = "Description contains invalid candidates."; @@ -544,13 +548,17 @@ RTCError UpdateSimulcastLayerStatusInSender( } bool SimulcastIsRejected(const ContentInfo* local_content, - const MediaContentDescription& answer_media_desc) { + const MediaContentDescription& answer_media_desc, + bool enable_encrypted_rtp_header_extensions) { bool simulcast_offered = local_content && local_content->media_description() && local_content->media_description()->HasSimulcast(); bool simulcast_answered = answer_media_desc.HasSimulcast(); bool rids_supported = RtpExtension::FindHeaderExtensionByUri( - answer_media_desc.rtp_header_extensions(), RtpExtension::kRidUri); + answer_media_desc.rtp_header_extensions(), RtpExtension::kRidUri, + enable_encrypted_rtp_header_extensions + ? RtpExtension::Filter::kPreferEncryptedExtension + : RtpExtension::Filter::kDiscardEncryptedExtension); return simulcast_offered && (!simulcast_answered || !rids_supported); } @@ -734,6 +742,17 @@ rtc::scoped_refptr LookupDtlsTransportByMid( [controller, &mid] { return controller->LookupDtlsTransportByMid(mid); }); } +bool ContentHasHeaderExtension(const cricket::ContentInfo& content_info, + absl::string_view header_extension_uri) { + for (const RtpExtension& rtp_header_extension : + content_info.media_description()->rtp_header_extensions()) { + if (rtp_header_extension.uri == header_extension_uri) { + return true; + } + } + return false; +} + } // namespace // Used by parameterless SetLocalDescription() to create an offer or answer. @@ -1247,6 +1266,7 @@ RTCError SdpOfferAnswerHandler::ApplyLocalDescription( std::unique_ptr desc, const std::map& bundle_groups_by_mid) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::ApplyLocalDescription"); RTC_DCHECK_RUN_ON(signaling_thread()); RTC_DCHECK(desc); @@ -1536,6 +1556,7 @@ RTCError SdpOfferAnswerHandler::ApplyRemoteDescription( std::unique_ptr desc, const std::map& bundle_groups_by_mid) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::ApplyRemoteDescription"); RTC_DCHECK_RUN_ON(signaling_thread()); RTC_DCHECK(desc); @@ -2021,6 +2042,7 @@ void SdpOfferAnswerHandler::DoCreateOffer( void SdpOfferAnswerHandler::CreateAnswer( CreateSessionDescriptionObserver* observer, const PeerConnectionInterface::RTCOfferAnswerOptions& options) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::CreateAnswer"); RTC_DCHECK_RUN_ON(signaling_thread()); // Chain this operation. If asynchronous operations are pending on the chain, // this operation will be queued to be invoked, otherwise the contents of the @@ -2321,6 +2343,7 @@ AddIceCandidateResult SdpOfferAnswerHandler::AddIceCandidateInternal( void SdpOfferAnswerHandler::AddIceCandidate( std::unique_ptr candidate, std::function callback) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::AddIceCandidate"); RTC_DCHECK_RUN_ON(signaling_thread()); // Chain this operation. If asynchronous operations are pending on the chain, // this operation will be queued to be invoked, otherwise the contents of the @@ -2452,6 +2475,7 @@ PeerConnectionInterface::SignalingState SdpOfferAnswerHandler::signaling_state() void SdpOfferAnswerHandler::ChangeSignalingState( PeerConnectionInterface::SignalingState signaling_state) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::ChangeSignalingState"); RTC_DCHECK_RUN_ON(signaling_thread()); if (signaling_state_ == signaling_state) { return; @@ -2656,6 +2680,7 @@ void SdpOfferAnswerHandler::OnVideoTrackRemoved(VideoTrackInterface* track, } RTCError SdpOfferAnswerHandler::Rollback(SdpType desc_type) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::Rollback"); auto state = signaling_state(); if (state != PeerConnectionInterface::kHaveLocalOffer && state != PeerConnectionInterface::kHaveRemoteOffer) { @@ -3115,6 +3140,8 @@ RTCError SdpOfferAnswerHandler::UpdateTransceiversAndDataChannels( const SessionDescriptionInterface* old_remote_description, const std::map& bundle_groups_by_mid) { + TRACE_EVENT0("webrtc", + "SdpOfferAnswerHandler::UpdateTransceiversAndDataChannels"); RTC_DCHECK_RUN_ON(signaling_thread()); RTC_DCHECK(IsUnifiedPlan()); @@ -3201,6 +3228,7 @@ SdpOfferAnswerHandler::AssociateTransceiver( const ContentInfo& content, const ContentInfo* old_local_content, const ContentInfo* old_remote_content) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::AssociateTransceiver"); RTC_DCHECK(IsUnifiedPlan()); #if RTC_DCHECK_IS_ON // If this is an offer then the m= section might be recycled. If the m= @@ -3277,7 +3305,9 @@ SdpOfferAnswerHandler::AssociateTransceiver( // Check if the offer indicated simulcast but the answer rejected it. // This can happen when simulcast is not supported on the remote party. - if (SimulcastIsRejected(old_local_content, *media_desc)) { + if (SimulcastIsRejected(old_local_content, *media_desc, + pc_->GetCryptoOptions() + .srtp.enable_encrypted_rtp_header_extensions)) { RTC_HISTOGRAM_BOOLEAN(kSimulcastDisabled, true); RTCError error = DisableSimulcastInSender(transceiver->internal()->sender_internal()); @@ -3332,6 +3362,7 @@ RTCError SdpOfferAnswerHandler::UpdateTransceiverChannel( transceiver, const cricket::ContentInfo& content, const cricket::ContentGroup* bundle_group) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::UpdateTransceiverChannel"); RTC_DCHECK(IsUnifiedPlan()); RTC_DCHECK(transceiver); cricket::ChannelInterface* channel = transceiver->internal()->channel(); @@ -3364,8 +3395,14 @@ RTCError SdpOfferAnswerHandler::UpdateDataChannel( const cricket::ContentInfo& content, const cricket::ContentGroup* bundle_group) { if (content.rejected) { - RTC_LOG(LS_INFO) << "Rejected data channel, mid=" << content.mid(); - DestroyDataChannelTransport(); + RTC_LOG(LS_INFO) << "Rejected data channel transport with mid=" + << content.mid(); + + rtc::StringBuilder sb; + sb << "Rejected data channel transport with mid=" << content.mid(); + RTCError error(RTCErrorType::OPERATION_ERROR_WITH_DATA, sb.Release()); + error.set_error_detail(RTCErrorDetailType::DATA_CHANNEL_FAILURE); + DestroyDataChannelTransport(error); } else { if (!data_channel_controller()->data_channel_transport()) { RTC_LOG(LS_INFO) << "Creating data channel, mid=" << content.mid(); @@ -4016,6 +4053,7 @@ void SdpOfferAnswerHandler::RemoveSenders(cricket::MediaType media_type) { void SdpOfferAnswerHandler::UpdateLocalSenders( const std::vector& streams, cricket::MediaType media_type) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::UpdateLocalSenders"); RTC_DCHECK_RUN_ON(signaling_thread()); std::vector* current_senders = rtp_manager()->GetLocalSenderInfos(media_type); @@ -4058,6 +4096,7 @@ void SdpOfferAnswerHandler::UpdateRemoteSendersList( bool default_sender_needed, cricket::MediaType media_type, StreamCollection* new_streams) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::UpdateRemoteSendersList"); RTC_DCHECK_RUN_ON(signaling_thread()); RTC_DCHECK(!IsUnifiedPlan()); @@ -4157,6 +4196,7 @@ void SdpOfferAnswerHandler::UpdateRemoteSendersList( } void SdpOfferAnswerHandler::EnableSending() { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::EnableSending"); RTC_DCHECK_RUN_ON(signaling_thread()); for (const auto& transceiver : transceivers()->ListInternal()) { cricket::ChannelInterface* channel = transceiver->channel(); @@ -4171,6 +4211,7 @@ RTCError SdpOfferAnswerHandler::PushdownMediaDescription( cricket::ContentSource source, const std::map& bundle_groups_by_mid) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::PushdownMediaDescription"); const SessionDescriptionInterface* sdesc = (source == cricket::CS_LOCAL ? local_description() : remote_description()); @@ -4264,6 +4305,7 @@ RTCError SdpOfferAnswerHandler::PushdownMediaDescription( RTCError SdpOfferAnswerHandler::PushdownTransportDescription( cricket::ContentSource source, SdpType type) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::PushdownTransportDescription"); RTC_DCHECK_RUN_ON(signaling_thread()); if (source == cricket::CS_LOCAL) { @@ -4280,6 +4322,7 @@ RTCError SdpOfferAnswerHandler::PushdownTransportDescription( } void SdpOfferAnswerHandler::RemoveStoppedTransceivers() { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::RemoveStoppedTransceivers"); RTC_DCHECK_RUN_ON(signaling_thread()); // 3.2.10.1: For each transceiver in the connection's set of transceivers // run the following steps: @@ -4332,8 +4375,18 @@ void SdpOfferAnswerHandler::RemoveUnusedChannels( } const cricket::ContentInfo* data_info = cricket::GetFirstDataContent(desc); - if (!data_info || data_info->rejected) { - DestroyDataChannelTransport(); + if (!data_info) { + RTCError error(RTCErrorType::OPERATION_ERROR_WITH_DATA, + "No data channel section in the description."); + error.set_error_detail(RTCErrorDetailType::DATA_CHANNEL_FAILURE); + DestroyDataChannelTransport(error); + } else if (data_info->rejected) { + rtc::StringBuilder sb; + sb << "Rejected data channel with mid=" << data_info->name << "."; + + RTCError error(RTCErrorType::OPERATION_ERROR_WITH_DATA, sb.Release()); + error.set_error_detail(RTCErrorDetailType::DATA_CHANNEL_FAILURE); + DestroyDataChannelTransport(error); } } @@ -4499,6 +4552,7 @@ RTCErrorOr SdpOfferAnswerHandler::FindContentInfo( } RTCError SdpOfferAnswerHandler::CreateChannels(const SessionDescription& desc) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::CreateChannels"); // Creating the media channels. Transports should already have been created // at this point. RTC_DCHECK_RUN_ON(signaling_thread()); @@ -4539,6 +4593,7 @@ RTCError SdpOfferAnswerHandler::CreateChannels(const SessionDescription& desc) { // TODO(steveanton): Perhaps this should be managed by the RtpTransceiver. cricket::VoiceChannel* SdpOfferAnswerHandler::CreateVoiceChannel( const std::string& mid) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::CreateVoiceChannel"); RTC_DCHECK_RUN_ON(signaling_thread()); if (!channel_manager()->media_engine()) return nullptr; @@ -4557,6 +4612,7 @@ cricket::VoiceChannel* SdpOfferAnswerHandler::CreateVoiceChannel( // TODO(steveanton): Perhaps this should be managed by the RtpTransceiver. cricket::VideoChannel* SdpOfferAnswerHandler::CreateVideoChannel( const std::string& mid) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::CreateVideoChannel"); RTC_DCHECK_RUN_ON(signaling_thread()); if (!channel_manager()->media_engine()) return nullptr; @@ -4594,6 +4650,7 @@ bool SdpOfferAnswerHandler::CreateDataChannel(const std::string& mid) { void SdpOfferAnswerHandler::DestroyTransceiverChannel( rtc::scoped_refptr> transceiver) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::DestroyTransceiverChannel"); RTC_DCHECK(transceiver); RTC_LOG_THREAD_BLOCK_COUNT(); @@ -4624,12 +4681,12 @@ void SdpOfferAnswerHandler::DestroyTransceiverChannel( } } -void SdpOfferAnswerHandler::DestroyDataChannelTransport() { +void SdpOfferAnswerHandler::DestroyDataChannelTransport(RTCError error) { RTC_DCHECK_RUN_ON(signaling_thread()); const bool has_sctp = pc_->sctp_mid().has_value(); if (has_sctp) - data_channel_controller()->OnTransportChannelClosed(); + data_channel_controller()->OnTransportChannelClosed(error); pc_->network_thread()->Invoke(RTC_FROM_HERE, [this] { RTC_DCHECK_RUN_ON(pc_->network_thread()); @@ -4642,6 +4699,7 @@ void SdpOfferAnswerHandler::DestroyDataChannelTransport() { void SdpOfferAnswerHandler::DestroyChannelInterface( cricket::ChannelInterface* channel) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::DestroyChannelInterface"); RTC_DCHECK_RUN_ON(signaling_thread()); RTC_DCHECK(channel_manager()->media_engine()); RTC_DCHECK(channel); @@ -4701,7 +4759,7 @@ void SdpOfferAnswerHandler::DestroyAllChannels() { } } - DestroyDataChannelTransport(); + DestroyDataChannelTransport({}); } void SdpOfferAnswerHandler::GenerateMediaDescriptionOptions( @@ -4796,6 +4854,8 @@ bool SdpOfferAnswerHandler::UpdatePayloadTypeDemuxingState( cricket::ContentSource source, const std::map& bundle_groups_by_mid) { + TRACE_EVENT0("webrtc", + "SdpOfferAnswerHandler::UpdatePayloadTypeDemuxingState"); RTC_DCHECK_RUN_ON(signaling_thread()); // We may need to delete any created default streams and disable creation of // new ones on the basis of payload type. This is needed to avoid SSRC @@ -4811,10 +4871,13 @@ bool SdpOfferAnswerHandler::UpdatePayloadTypeDemuxingState( struct PayloadTypes { std::set audio_payload_types; std::set video_payload_types; - bool pt_demuxing_enabled_audio = true; - bool pt_demuxing_enabled_video = true; + bool pt_demuxing_possible_audio = true; + bool pt_demuxing_possible_video = true; }; std::map payload_types_by_bundle; + // If the MID is missing from *any* receiving m= section, this is set to true. + bool mid_header_extension_missing_audio = false; + bool mid_header_extension_missing_video = false; for (auto& content_info : sdesc->description()->contents()) { auto it = bundle_groups_by_mid.find(content_info.name); const cricket::ContentGroup* bundle_group = @@ -4838,26 +4901,34 @@ bool SdpOfferAnswerHandler::UpdatePayloadTypeDemuxingState( } switch (content_info.media_description()->type()) { case cricket::MediaType::MEDIA_TYPE_AUDIO: { + if (!mid_header_extension_missing_audio) { + mid_header_extension_missing_audio = + !ContentHasHeaderExtension(content_info, RtpExtension::kMidUri); + } const cricket::AudioContentDescription* audio_desc = content_info.media_description()->as_audio(); for (const cricket::AudioCodec& audio : audio_desc->codecs()) { if (payload_types->audio_payload_types.count(audio.id)) { // Two m= sections are using the same payload type, thus demuxing // by payload type is not possible. - payload_types->pt_demuxing_enabled_audio = false; + payload_types->pt_demuxing_possible_audio = false; } payload_types->audio_payload_types.insert(audio.id); } break; } case cricket::MediaType::MEDIA_TYPE_VIDEO: { + if (!mid_header_extension_missing_video) { + mid_header_extension_missing_video = + !ContentHasHeaderExtension(content_info, RtpExtension::kMidUri); + } const cricket::VideoContentDescription* video_desc = content_info.media_description()->as_video(); for (const cricket::VideoCodec& video : video_desc->codecs()) { if (payload_types->video_payload_types.count(video.id)) { // Two m= sections are using the same payload type, thus demuxing // by payload type is not possible. - payload_types->pt_demuxing_enabled_video = false; + payload_types->pt_demuxing_possible_video = false; } payload_types->video_payload_types.insert(video.id); } @@ -4891,9 +4962,39 @@ bool SdpOfferAnswerHandler::UpdatePayloadTypeDemuxingState( if (channels_to_update.empty()) { return true; } + + // In Unified Plan, payload type demuxing is useful for legacy endpoints that + // don't support the MID header extension, but it can also cause incorrrect + // forwarding of packets when going from one m= section to multiple m= + // sections in the same BUNDLE. This only happens if media arrives prior to + // negotiation, but this can cause missing video and unsignalled ssrc bugs + // severe enough to warrant disabling PT demuxing in such cases. Therefore, if + // a MID header extension is present on all m= sections for a given kind + // (audio/video) then we use that as an OK to disable payload type demuxing in + // BUNDLEs of that kind. However if PT demuxing was ever turned on (e.g. MID + // was ever removed on ANY m= section of that kind) then we continue to allow + // PT demuxing in order to prevent disabling it in follow-up O/A exchanges and + // allowing early media by PT. + bool bundled_pt_demux_allowed_audio = !IsUnifiedPlan() || + mid_header_extension_missing_audio || + pt_demuxing_has_been_used_audio_; + bool bundled_pt_demux_allowed_video = !IsUnifiedPlan() || + mid_header_extension_missing_video || + pt_demuxing_has_been_used_video_; + // Kill switch for the above change. + if (field_trial::IsEnabled(kAlwaysAllowPayloadTypeDemuxingFieldTrialName)) { + // TODO(https://crbug.com/webrtc/12814): If disabling PT-based demux does + // not trigger regressions, remove this kill switch. + bundled_pt_demux_allowed_audio = true; + bundled_pt_demux_allowed_video = true; + } + return pc_->worker_thread()->Invoke( RTC_FROM_HERE, - [&channels_to_update, &bundle_groups_by_mid, &payload_types_by_bundle]() { + [&channels_to_update, &bundle_groups_by_mid, &payload_types_by_bundle, + bundled_pt_demux_allowed_audio, bundled_pt_demux_allowed_video, + pt_demuxing_has_been_used_audio = &pt_demuxing_has_been_used_audio_, + pt_demuxing_has_been_used_video = &pt_demuxing_has_been_used_video_]() { for (const auto& it : channels_to_update) { RtpTransceiverDirection local_direction = it.first; cricket::ChannelInterface* channel = it.second; @@ -4903,17 +5004,27 @@ bool SdpOfferAnswerHandler::UpdatePayloadTypeDemuxingState( bundle_it != bundle_groups_by_mid.end() ? bundle_it->second : nullptr; if (media_type == cricket::MediaType::MEDIA_TYPE_AUDIO) { - if (!channel->SetPayloadTypeDemuxingEnabled( - (!bundle_group || payload_types_by_bundle[bundle_group] - .pt_demuxing_enabled_audio) && - RtpTransceiverDirectionHasRecv(local_direction))) { + bool pt_demux_enabled = + RtpTransceiverDirectionHasRecv(local_direction) && + (!bundle_group || (bundled_pt_demux_allowed_audio && + payload_types_by_bundle[bundle_group] + .pt_demuxing_possible_audio)); + if (pt_demux_enabled) { + *pt_demuxing_has_been_used_audio = true; + } + if (!channel->SetPayloadTypeDemuxingEnabled(pt_demux_enabled)) { return false; } } else if (media_type == cricket::MediaType::MEDIA_TYPE_VIDEO) { - if (!channel->SetPayloadTypeDemuxingEnabled( - (!bundle_group || payload_types_by_bundle[bundle_group] - .pt_demuxing_enabled_video) && - RtpTransceiverDirectionHasRecv(local_direction))) { + bool pt_demux_enabled = + RtpTransceiverDirectionHasRecv(local_direction) && + (!bundle_group || (bundled_pt_demux_allowed_video && + payload_types_by_bundle[bundle_group] + .pt_demuxing_possible_video)); + if (pt_demux_enabled) { + *pt_demuxing_has_been_used_video = true; + } + if (!channel->SetPayloadTypeDemuxingEnabled(pt_demux_enabled)) { return false; } } diff --git a/pc/sdp_offer_answer.h b/pc/sdp_offer_answer.h index 1ef124baec..f86b900b91 100644 --- a/pc/sdp_offer_answer.h +++ b/pc/sdp_offer_answer.h @@ -13,6 +13,7 @@ #include #include + #include #include #include @@ -520,7 +521,7 @@ class SdpOfferAnswerHandler : public SdpStateProvider, // Destroys the RTP data channel transport and/or the SCTP data channel // transport and clears it. - void DestroyDataChannelTransport(); + void DestroyDataChannelTransport(RTCError error); // Destroys the given ChannelInterface. // The channel cannot be accessed after this method is called. @@ -629,6 +630,11 @@ class SdpOfferAnswerHandler : public SdpStateProvider, uint32_t negotiation_needed_event_id_ = 0; bool update_negotiation_needed_on_empty_chain_ RTC_GUARDED_BY(signaling_thread()) = false; + // If PT demuxing is successfully negotiated one time we will allow PT + // demuxing for the rest of the session so that PT-based apps default to PT + // demuxing in follow-up O/A exchanges. + bool pt_demuxing_has_been_used_audio_ = false; + bool pt_demuxing_has_been_used_video_ = false; // In Unified Plan, if we encounter remote SDP that does not contain an a=msid // line we create and use a stream with a random ID for our receivers. This is diff --git a/pc/session_description.cc b/pc/session_description.cc index 35b732d649..7b878cbf7b 100644 --- a/pc/session_description.cc +++ b/pc/session_description.cc @@ -85,6 +85,18 @@ bool ContentGroup::RemoveContentName(const std::string& content_name) { return true; } +std::string ContentGroup::ToString() const { + rtc::StringBuilder acc; + acc << semantics_ << "("; + if (!content_names_.empty()) { + for (const auto& name : content_names_) { + acc << name << " "; + } + } + acc << ")"; + return acc.Release(); +} + SessionDescription::SessionDescription() = default; SessionDescription::SessionDescription(const SessionDescription&) = default; diff --git a/pc/session_description.h b/pc/session_description.h index 96aa996752..a20caf624a 100644 --- a/pc/session_description.h +++ b/pc/session_description.h @@ -143,6 +143,11 @@ class MediaContentDescription { cryptos_ = cryptos; } + // List of RTP header extensions. URIs are **NOT** guaranteed to be unique + // as they can appear twice when both encrypted and non-encrypted extensions + // are present. + // Use RtpExtension::FindHeaderExtensionByUri for finding and + // RtpExtension::DeduplicateHeaderExtensions for filtering. virtual const RtpHeaderExtensions& rtp_header_extensions() const { return rtp_header_extensions_; } @@ -483,6 +488,8 @@ class ContentGroup { bool HasContentName(const std::string& content_name) const; void AddContentName(const std::string& content_name); bool RemoveContentName(const std::string& content_name); + // for debugging + std::string ToString() const; private: std::string semantics_; diff --git a/pc/srtp_transport.cc b/pc/srtp_transport.cc index ee073497e7..c90b3fa227 100644 --- a/pc/srtp_transport.cc +++ b/pc/srtp_transport.cc @@ -201,12 +201,12 @@ bool SrtpTransport::SendRtcpPacket(rtc::CopyOnWriteBuffer* packet, void SrtpTransport::OnRtpPacketReceived(rtc::CopyOnWriteBuffer packet, int64_t packet_time_us) { + TRACE_EVENT0("webrtc", "SrtpTransport::OnRtpPacketReceived"); if (!IsSrtpActive()) { RTC_LOG(LS_WARNING) << "Inactive SRTP transport received an RTP packet. Drop it."; return; } - TRACE_EVENT0("webrtc", "SRTP Decode"); char* data = packet.MutableData(); int len = rtc::checked_cast(packet.size()); if (!UnprotectRtp(data, len, &len)) { @@ -233,12 +233,12 @@ void SrtpTransport::OnRtpPacketReceived(rtc::CopyOnWriteBuffer packet, void SrtpTransport::OnRtcpPacketReceived(rtc::CopyOnWriteBuffer packet, int64_t packet_time_us) { + TRACE_EVENT0("webrtc", "SrtpTransport::OnRtcpPacketReceived"); if (!IsSrtpActive()) { RTC_LOG(LS_WARNING) << "Inactive SRTP transport received an RTCP packet. Drop it."; return; } - TRACE_EVENT0("webrtc", "SRTP Decode"); char* data = packet.MutableData(); int len = rtc::checked_cast(packet.size()); if (!UnprotectRtcp(data, len, &len)) { diff --git a/pc/stats_collector.cc b/pc/stats_collector.cc index 6d4c224cb6..eb2176ed38 100644 --- a/pc/stats_collector.cc +++ b/pc/stats_collector.cc @@ -50,6 +50,7 @@ #include "rtc_base/string_encode.h" #include "rtc_base/thread.h" #include "rtc_base/time_utils.h" +#include "rtc_base/trace_event.h" #include "system_wrappers/include/field_trial.h" namespace webrtc { @@ -318,6 +319,10 @@ void ExtractStats(const cricket::VideoReceiverInfo& info, if (info.qp_sum) report->AddInt64(StatsReport::kStatsValueNameQpSum, *info.qp_sum); + if (info.nacks_sent) { + report->AddInt(StatsReport::kStatsValueNameNacksSent, *info.nacks_sent); + } + const IntForAdd ints[] = { {StatsReport::kStatsValueNameCurrentDelayMs, info.current_delay_ms}, {StatsReport::kStatsValueNameDecodeMs, info.decode_ms}, @@ -331,7 +336,6 @@ void ExtractStats(const cricket::VideoReceiverInfo& info, {StatsReport::kStatsValueNameMaxDecodeMs, info.max_decode_ms}, {StatsReport::kStatsValueNameMinPlayoutDelayMs, info.min_playout_delay_ms}, - {StatsReport::kStatsValueNameNacksSent, info.nacks_sent}, {StatsReport::kStatsValueNamePacketsLost, info.packets_lost}, {StatsReport::kStatsValueNamePacketsReceived, info.packets_rcvd}, {StatsReport::kStatsValueNamePlisSent, info.plis_sent}, @@ -810,7 +814,7 @@ StatsReport* StatsCollector::AddConnectionInfoReport( StatsReport* StatsCollector::AddCandidateReport( const cricket::CandidateStats& candidate_stats, bool local) { - const auto& candidate = candidate_stats.candidate; + const auto& candidate = candidate_stats.candidate(); StatsReport::Id id(StatsReport::NewCandidateId(local, candidate.id())); StatsReport* report = reports_.Find(id); if (!report) { @@ -833,8 +837,8 @@ StatsReport* StatsCollector::AddCandidateReport( } report->set_timestamp(stats_gathering_started_); - if (local && candidate_stats.stun_stats.has_value()) { - const auto& stun_stats = candidate_stats.stun_stats.value(); + if (local && candidate_stats.stun_stats().has_value()) { + const auto& stun_stats = candidate_stats.stun_stats().value(); report->AddInt64(StatsReport::kStatsValueNameSentStunKeepaliveRequests, stun_stats.stun_binding_requests_sent); report->AddInt64(StatsReport::kStatsValueNameRecvStunKeepaliveResponses, @@ -849,6 +853,7 @@ StatsReport* StatsCollector::AddCandidateReport( } std::map StatsCollector::ExtractSessionInfo() { + TRACE_EVENT0("webrtc", "StatsCollector::ExtractSessionInfo"); RTC_DCHECK_RUN_ON(pc_->signaling_thread()); SessionStats stats; @@ -870,6 +875,7 @@ StatsCollector::SessionStats StatsCollector::ExtractSessionInfo_n( RtpTransceiverProxyWithInternal>>& transceivers, absl::optional sctp_transport_name, absl::optional sctp_mid) { + TRACE_EVENT0("webrtc", "StatsCollector::ExtractSessionInfo_n"); RTC_DCHECK_RUN_ON(pc_->network_thread()); rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; SessionStats stats; diff --git a/pc/test/fake_peer_connection_base.h b/pc/test/fake_peer_connection_base.h index 1acf86fdac..7970dd0f0f 100644 --- a/pc/test/fake_peer_connection_base.h +++ b/pc/test/fake_peer_connection_base.h @@ -120,10 +120,11 @@ class FakePeerConnectionBase : public PeerConnectionInternal { return nullptr; } - rtc::scoped_refptr CreateDataChannel( + RTCErrorOr> CreateDataChannelOrError( const std::string& label, const DataChannelInit* config) override { - return nullptr; + return RTCError(RTCErrorType::UNSUPPORTED_OPERATION, + "Fake function called"); } const SessionDescriptionInterface* local_description() const override { diff --git a/pc/test/integration_test_helpers.h b/pc/test/integration_test_helpers.h index f015312e8f..117f1b428b 100644 --- a/pc/test/integration_test_helpers.h +++ b/pc/test/integration_test_helpers.h @@ -37,7 +37,6 @@ #include "api/media_stream_interface.h" #include "api/media_types.h" #include "api/peer_connection_interface.h" -#include "api/peer_connection_proxy.h" #include "api/rtc_error.h" #include "api/rtc_event_log/rtc_event_log_factory.h" #include "api/rtc_event_log/rtc_event_log_factory_interface.h" @@ -84,6 +83,7 @@ #include "pc/media_session.h" #include "pc/peer_connection.h" #include "pc/peer_connection_factory.h" +#include "pc/peer_connection_proxy.h" #include "pc/rtp_media_utils.h" #include "pc/session_description.h" #include "pc/test/fake_audio_capture_module.h" diff --git a/pc/test/mock_delayable.h b/pc/test/mock_delayable.h deleted file mode 100644 index bef07c1970..0000000000 --- a/pc/test/mock_delayable.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef PC_TEST_MOCK_DELAYABLE_H_ -#define PC_TEST_MOCK_DELAYABLE_H_ - -#include - -#include "absl/types/optional.h" -#include "media/base/delayable.h" -#include "test/gmock.h" - -namespace webrtc { - -class MockDelayable : public cricket::Delayable { - public: - MOCK_METHOD(bool, - SetBaseMinimumPlayoutDelayMs, - (uint32_t ssrc, int delay_ms), - (override)); - MOCK_METHOD(absl::optional, - GetBaseMinimumPlayoutDelayMs, - (uint32_t ssrc), - (const, override)); -}; - -} // namespace webrtc - -#endif // PC_TEST_MOCK_DELAYABLE_H_ diff --git a/pc/test/peer_connection_test_wrapper.cc b/pc/test/peer_connection_test_wrapper.cc index 56e81ec0b3..8fdfb1bbb8 100644 --- a/pc/test/peer_connection_test_wrapper.cc +++ b/pc/test/peer_connection_test_wrapper.cc @@ -140,7 +140,14 @@ rtc::scoped_refptr PeerConnectionTestWrapper::CreateDataChannel( const std::string& label, const webrtc::DataChannelInit& init) { - return peer_connection_->CreateDataChannel(label, &init); + auto result = peer_connection_->CreateDataChannelOrError(label, &init); + if (!result.ok()) { + RTC_LOG(LS_ERROR) << "CreateDataChannel failed: " + << ToString(result.error().type()) << " " + << result.error().message(); + return nullptr; + } + return result.MoveValue(); } void PeerConnectionTestWrapper::WaitForNegotiation() { diff --git a/pc/track_media_info_map_unittest.cc b/pc/track_media_info_map_unittest.cc index 2a4889a576..1d5caacddb 100644 --- a/pc/track_media_info_map_unittest.cc +++ b/pc/track_media_info_map_unittest.cc @@ -31,6 +31,45 @@ namespace webrtc { namespace { +class MockVideoTrack : public VideoTrackInterface { + public: + // NotifierInterface + MOCK_METHOD(void, + RegisterObserver, + (ObserverInterface * observer), + (override)); + MOCK_METHOD(void, + UnregisterObserver, + (ObserverInterface * observer), + (override)); + + // MediaStreamTrackInterface + MOCK_METHOD(std::string, kind, (), (const, override)); + MOCK_METHOD(std::string, id, (), (const, override)); + MOCK_METHOD(bool, enabled, (), (const, override)); + MOCK_METHOD(bool, set_enabled, (bool enable), (override)); + MOCK_METHOD(TrackState, state, (), (const, override)); + + // VideoSourceInterface + MOCK_METHOD(void, + AddOrUpdateSink, + (rtc::VideoSinkInterface * sink, + const rtc::VideoSinkWants& wants), + (override)); + // RemoveSink must guarantee that at the time the method returns, + // there is no current and no future calls to VideoSinkInterface::OnFrame. + MOCK_METHOD(void, + RemoveSink, + (rtc::VideoSinkInterface * sink), + (override)); + + // VideoTrackInterface + MOCK_METHOD(VideoTrackSourceInterface*, GetSource, (), (const, override)); + + MOCK_METHOD(ContentHint, content_hint, (), (const, override)); + MOCK_METHOD(void, set_content_hint, (ContentHint hint), (override)); +}; + RtpParameters CreateRtpParametersWithSsrcs( std::initializer_list ssrcs) { RtpParameters params; @@ -79,23 +118,35 @@ rtc::scoped_refptr CreateMockRtpReceiver( return receiver; } +rtc::scoped_refptr CreateVideoTrack( + const std::string& id) { + return VideoTrack::Create(id, FakeVideoTrackSource::Create(false), + rtc::Thread::Current()); +} + +rtc::scoped_refptr CreateMockVideoTrack( + const std::string& id) { + auto track = rtc::make_ref_counted(); + EXPECT_CALL(*track, kind()) + .WillRepeatedly(::testing::Return(VideoTrack::kVideoKind)); + return track; +} + class TrackMediaInfoMapTest : public ::testing::Test { public: TrackMediaInfoMapTest() : TrackMediaInfoMapTest(true) {} - explicit TrackMediaInfoMapTest(bool use_current_thread) + explicit TrackMediaInfoMapTest(bool use_real_video_track) : voice_media_info_(new cricket::VoiceMediaInfo()), video_media_info_(new cricket::VideoMediaInfo()), local_audio_track_(AudioTrack::Create("LocalAudioTrack", nullptr)), remote_audio_track_(AudioTrack::Create("RemoteAudioTrack", nullptr)), - local_video_track_(VideoTrack::Create( - "LocalVideoTrack", - FakeVideoTrackSource::Create(false), - use_current_thread ? rtc::Thread::Current() : nullptr)), - remote_video_track_(VideoTrack::Create( - "RemoteVideoTrack", - FakeVideoTrackSource::Create(false), - use_current_thread ? rtc::Thread::Current() : nullptr)) {} + local_video_track_(use_real_video_track + ? CreateVideoTrack("LocalVideoTrack") + : CreateMockVideoTrack("LocalVideoTrack")), + remote_video_track_(use_real_video_track + ? CreateVideoTrack("RemoteVideoTrack") + : CreateMockVideoTrack("LocalVideoTrack")) {} ~TrackMediaInfoMapTest() { // If we have a map the ownership has been passed to the map, only delete if @@ -179,8 +230,8 @@ class TrackMediaInfoMapTest : public ::testing::Test { std::unique_ptr map_; rtc::scoped_refptr local_audio_track_; rtc::scoped_refptr remote_audio_track_; - rtc::scoped_refptr local_video_track_; - rtc::scoped_refptr remote_video_track_; + rtc::scoped_refptr local_video_track_; + rtc::scoped_refptr remote_video_track_; }; } // namespace diff --git a/pc/transport_stats.h b/pc/transport_stats.h index 7cb95f4ad2..173af91fba 100644 --- a/pc/transport_stats.h +++ b/pc/transport_stats.h @@ -14,6 +14,7 @@ #include #include +#include "api/dtls_transport_interface.h" #include "p2p/base/dtls_transport_internal.h" #include "p2p/base/ice_transport_internal.h" #include "p2p/base/port.h" @@ -30,7 +31,7 @@ struct TransportChannelStats { int ssl_version_bytes = 0; int srtp_crypto_suite = rtc::SRTP_INVALID_CRYPTO_SUITE; int ssl_cipher_suite = rtc::TLS_NULL_WITH_NULL_NULL; - DtlsTransportState dtls_state = DTLS_TRANSPORT_NEW; + webrtc::DtlsTransportState dtls_state = webrtc::DtlsTransportState::kNew; IceTransportStats ice_transport_stats; }; diff --git a/pc/used_ids.h b/pc/used_ids.h index 5960197344..62b2faa018 100644 --- a/pc/used_ids.h +++ b/pc/used_ids.h @@ -108,7 +108,7 @@ class UsedPayloadTypes : public UsedIds { private: static const int kFirstDynamicPayloadTypeLowerRange = 35; - static const int kLastDynamicPayloadTypeLowerRange = 65; + static const int kLastDynamicPayloadTypeLowerRange = 63; static const int kFirstDynamicPayloadTypeUpperRange = 96; static const int kLastDynamicPayloadTypeUpperRange = 127; diff --git a/pc/video_rtp_receiver.cc b/pc/video_rtp_receiver.cc index 34cfe96f28..8db4d9f02f 100644 --- a/pc/video_rtp_receiver.cc +++ b/pc/video_rtp_receiver.cc @@ -16,7 +16,7 @@ #include #include "api/video/recordable_encoded_frame.h" -#include "api/video_track_source_proxy.h" +#include "api/video_track_source_proxy_factory.h" #include "pc/video_track.h" #include "rtc_base/checks.h" #include "rtc_base/location.h" @@ -41,16 +41,15 @@ VideoRtpReceiver::VideoRtpReceiver( track_(VideoTrackProxyWithInternal::Create( rtc::Thread::Current(), worker_thread, - VideoTrack::Create( - receiver_id, - VideoTrackSourceProxy::Create(rtc::Thread::Current(), - worker_thread, - source_), - worker_thread))), + VideoTrack::Create(receiver_id, + CreateVideoTrackSourceProxy(rtc::Thread::Current(), + worker_thread, + source_), + worker_thread))), attachment_id_(GenerateUniqueId()) { RTC_DCHECK(worker_thread_); SetStreams(streams); - source_->SetState(MediaSourceInterface::kLive); + RTC_DCHECK_EQ(source_->state(), MediaSourceInterface::kLive); } VideoRtpReceiver::~VideoRtpReceiver() { diff --git a/pc/video_rtp_receiver.h b/pc/video_rtp_receiver.h index 89e15a5c79..f59db7a840 100644 --- a/pc/video_rtp_receiver.h +++ b/pc/video_rtp_receiver.h @@ -21,7 +21,6 @@ #include "api/dtls_transport_interface.h" #include "api/frame_transformer_interface.h" #include "api/media_stream_interface.h" -#include "api/media_stream_track_proxy.h" #include "api/media_types.h" #include "api/rtp_parameters.h" #include "api/rtp_receiver_interface.h" @@ -33,6 +32,7 @@ #include "api/video/video_source_interface.h" #include "media/base/media_channel.h" #include "pc/jitter_buffer_delay.h" +#include "pc/media_stream_track_proxy.h" #include "pc/rtp_receiver.h" #include "pc/video_rtp_track_source.h" #include "pc/video_track.h" diff --git a/pc/video_track.cc b/pc/video_track.cc index b4f511b5fb..d0246faa87 100644 --- a/pc/video_track.cc +++ b/pc/video_track.cc @@ -11,6 +11,7 @@ #include "pc/video_track.h" #include +#include #include #include "api/notifier.h" @@ -28,10 +29,16 @@ VideoTrack::VideoTrack(const std::string& label, worker_thread_(worker_thread), video_source_(video_source), content_hint_(ContentHint::kNone) { + RTC_DCHECK_RUN_ON(&signaling_thread_); + // Detach the thread checker for VideoSourceBaseGuarded since we'll make calls + // to VideoSourceBaseGuarded on the worker thread, but we're currently on the + // signaling thread. + source_sequence_.Detach(); video_source_->RegisterObserver(this); } VideoTrack::~VideoTrack() { + RTC_DCHECK_RUN_ON(&signaling_thread_); video_source_->UnregisterObserver(this); } @@ -43,26 +50,31 @@ std::string VideoTrack::kind() const { // thread. void VideoTrack::AddOrUpdateSink(rtc::VideoSinkInterface* sink, const rtc::VideoSinkWants& wants) { - RTC_DCHECK(worker_thread_->IsCurrent()); - VideoSourceBase::AddOrUpdateSink(sink, wants); + RTC_DCHECK_RUN_ON(worker_thread_); + VideoSourceBaseGuarded::AddOrUpdateSink(sink, wants); rtc::VideoSinkWants modified_wants = wants; modified_wants.black_frames = !enabled(); video_source_->AddOrUpdateSink(sink, modified_wants); } void VideoTrack::RemoveSink(rtc::VideoSinkInterface* sink) { - RTC_DCHECK(worker_thread_->IsCurrent()); - VideoSourceBase::RemoveSink(sink); + RTC_DCHECK_RUN_ON(worker_thread_); + VideoSourceBaseGuarded::RemoveSink(sink); video_source_->RemoveSink(sink); } +VideoTrackSourceInterface* VideoTrack::GetSource() const { + // Callable from any thread. + return video_source_.get(); +} + VideoTrackInterface::ContentHint VideoTrack::content_hint() const { - RTC_DCHECK_RUN_ON(&signaling_thread_checker_); + RTC_DCHECK_RUN_ON(worker_thread_); return content_hint_; } void VideoTrack::set_content_hint(ContentHint hint) { - RTC_DCHECK_RUN_ON(&signaling_thread_checker_); + RTC_DCHECK_RUN_ON(worker_thread_); if (content_hint_ == hint) return; content_hint_ = hint; @@ -70,25 +82,36 @@ void VideoTrack::set_content_hint(ContentHint hint) { } bool VideoTrack::set_enabled(bool enable) { - RTC_DCHECK(signaling_thread_checker_.IsCurrent()); - worker_thread_->Invoke(RTC_FROM_HERE, [enable, this] { - RTC_DCHECK(worker_thread_->IsCurrent()); - for (auto& sink_pair : sink_pairs()) { - rtc::VideoSinkWants modified_wants = sink_pair.wants; - modified_wants.black_frames = !enable; - video_source_->AddOrUpdateSink(sink_pair.sink, modified_wants); - } - }); + RTC_DCHECK_RUN_ON(worker_thread_); + for (auto& sink_pair : sink_pairs()) { + rtc::VideoSinkWants modified_wants = sink_pair.wants; + modified_wants.black_frames = !enable; + video_source_->AddOrUpdateSink(sink_pair.sink, modified_wants); + } return MediaStreamTrack::set_enabled(enable); } +bool VideoTrack::enabled() const { + RTC_DCHECK_RUN_ON(worker_thread_); + return MediaStreamTrack::enabled(); +} + +MediaStreamTrackInterface::TrackState VideoTrack::state() const { + RTC_DCHECK_RUN_ON(worker_thread_); + return MediaStreamTrack::state(); +} + void VideoTrack::OnChanged() { - RTC_DCHECK(signaling_thread_checker_.IsCurrent()); - if (video_source_->state() == MediaSourceInterface::kEnded) { - set_state(kEnded); - } else { - set_state(kLive); - } + RTC_DCHECK_RUN_ON(&signaling_thread_); + worker_thread_->Invoke( + RTC_FROM_HERE, [this, state = video_source_->state()]() { + // TODO(tommi): Calling set_state() this way isn't ideal since we're + // currently blocking the signaling thread and set_state() may + // internally fire notifications via `FireOnChanged()` which may further + // amplify the blocking effect on the signaling thread. + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + set_state(state == MediaSourceInterface::kEnded ? kEnded : kLive); + }); } rtc::scoped_refptr VideoTrack::Create( diff --git a/pc/video_track.h b/pc/video_track.h index bff63fcb96..e840c8097f 100644 --- a/pc/video_track.h +++ b/pc/video_track.h @@ -27,7 +27,7 @@ namespace webrtc { class VideoTrack : public MediaStreamTrack, - public rtc::VideoSourceBase, + public rtc::VideoSourceBaseGuarded, public ObserverInterface { public: static rtc::scoped_refptr Create( @@ -38,13 +38,13 @@ class VideoTrack : public MediaStreamTrack, void AddOrUpdateSink(rtc::VideoSinkInterface* sink, const rtc::VideoSinkWants& wants) override; void RemoveSink(rtc::VideoSinkInterface* sink) override; + VideoTrackSourceInterface* GetSource() const override; - VideoTrackSourceInterface* GetSource() const override { - return video_source_.get(); - } ContentHint content_hint() const override; void set_content_hint(ContentHint hint) override; bool set_enabled(bool enable) override; + bool enabled() const override; + MediaStreamTrackInterface::TrackState state() const override; std::string kind() const override; protected: @@ -57,10 +57,10 @@ class VideoTrack : public MediaStreamTrack, // Implements ObserverInterface. Observes |video_source_| state. void OnChanged() override; + RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker signaling_thread_; rtc::Thread* const worker_thread_; - SequenceChecker signaling_thread_checker_; - rtc::scoped_refptr video_source_; - ContentHint content_hint_ RTC_GUARDED_BY(signaling_thread_checker_); + const rtc::scoped_refptr video_source_; + ContentHint content_hint_ RTC_GUARDED_BY(worker_thread_); }; } // namespace webrtc diff --git a/pc/video_track_source.cc b/pc/video_track_source.cc index f45d44aa32..d15eaaf43c 100644 --- a/pc/video_track_source.cc +++ b/pc/video_track_source.cc @@ -15,7 +15,7 @@ namespace webrtc { VideoTrackSource::VideoTrackSource(bool remote) - : state_(kInitializing), remote_(remote) { + : state_(kLive), remote_(remote) { worker_thread_checker_.Detach(); } diff --git a/pc/video_track_source_proxy.cc b/pc/video_track_source_proxy.cc new file mode 100644 index 0000000000..309c1f20f8 --- /dev/null +++ b/pc/video_track_source_proxy.cc @@ -0,0 +1,25 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "pc/video_track_source_proxy.h" + +#include "api/media_stream_interface.h" +#include "api/video_track_source_proxy_factory.h" + +namespace webrtc { + +rtc::scoped_refptr CreateVideoTrackSourceProxy( + rtc::Thread* signaling_thread, + rtc::Thread* worker_thread, + VideoTrackSourceInterface* source) { + return VideoTrackSourceProxy::Create(signaling_thread, worker_thread, source); +} + +} // namespace webrtc diff --git a/api/video_track_source_proxy.h b/pc/video_track_source_proxy.h similarity index 85% rename from api/video_track_source_proxy.h rename to pc/video_track_source_proxy.h index 0b60d20de5..8914dd0525 100644 --- a/api/video_track_source_proxy.h +++ b/pc/video_track_source_proxy.h @@ -8,18 +8,18 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef API_VIDEO_TRACK_SOURCE_PROXY_H_ -#define API_VIDEO_TRACK_SOURCE_PROXY_H_ +#ifndef PC_VIDEO_TRACK_SOURCE_PROXY_H_ +#define PC_VIDEO_TRACK_SOURCE_PROXY_H_ #include "api/media_stream_interface.h" -#include "api/proxy.h" +#include "pc/proxy.h" namespace webrtc { // Makes sure the real VideoTrackSourceInterface implementation is destroyed on // the signaling thread and marshals all method calls to the signaling thread. -// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods -// are called on is an implementation detail. +// TODO(deadbeef): Move this to .cc file. What threads methods are called on is +// an implementation detail. BEGIN_PROXY_MAP(VideoTrackSource) PROXY_PRIMARY_THREAD_DESTRUCTOR() PROXY_CONSTMETHOD0(SourceState, state) @@ -42,8 +42,8 @@ PROXY_SECONDARY_METHOD1(void, PROXY_SECONDARY_METHOD1(void, RemoveEncodedSink, rtc::VideoSinkInterface*) -END_PROXY_MAP() +END_PROXY_MAP(VideoTrackSource) } // namespace webrtc -#endif // API_VIDEO_TRACK_SOURCE_PROXY_H_ +#endif // PC_VIDEO_TRACK_SOURCE_PROXY_H_ diff --git a/rtc_base/BUILD.gn b/rtc_base/BUILD.gn index 501ca01541..8dc89fafba 100644 --- a/rtc_base/BUILD.gn +++ b/rtc_base/BUILD.gn @@ -160,7 +160,6 @@ rtc_library("rtc_base_approved") { public_deps += [ # no-presubmit-check TODO(webrtc:8603) ":atomicops", - ":criticalsection", ":logging", ":macromagic", ":platform_thread", @@ -279,7 +278,6 @@ rtc_library("logging") { libs = [] deps = [ ":checks", - ":criticalsection", ":macromagic", ":platform_thread_types", ":stringutils", @@ -505,7 +503,6 @@ if (rtc_enable_libevent) { ] deps = [ ":checks", - ":criticalsection", ":logging", ":macromagic", ":platform_thread", @@ -552,7 +549,6 @@ if (is_win) { ] deps = [ ":checks", - ":criticalsection", ":logging", ":macromagic", ":platform_thread", @@ -576,7 +572,6 @@ rtc_library("rtc_task_queue_stdlib") { ] deps = [ ":checks", - ":criticalsection", ":logging", ":macromagic", ":platform_thread", @@ -642,10 +637,6 @@ rtc_library("rtc_stats_counters") { config("rtc_json_suppressions") { if (!is_win || is_clang) { cflags_cc = [ - # TODO(bugs.webrtc.org/10770): Update jsoncpp API usage and remove - # -Wno-deprecated-declarations. - "-Wno-deprecated-declarations", - # TODO(bugs.webrtc.org/10814): Remove -Wno-undef as soon as it get # removed upstream. "-Wno-undef", @@ -1147,7 +1138,6 @@ rtc_library("testclient") { "test_client.h", ] deps = [ - ":criticalsection", ":gunit_helpers", ":rtc_base", ":rtc_base_tests_utils", @@ -1381,6 +1371,7 @@ if (rtc_include_tests) { ":async_socket", ":bounded_inline_vector", ":checks", + ":criticalsection", ":divide_round", ":gunit_helpers", ":ip_address", @@ -1408,6 +1399,7 @@ if (rtc_include_tests) { "../test:fileutils", "../test:test_main", "../test:test_support", + "containers:unittests", "memory:unittests", "synchronization:mutex", "task_utils:to_queued_task", diff --git a/rtc_base/containers/BUILD.gn b/rtc_base/containers/BUILD.gn new file mode 100644 index 0000000000..f303e706e4 --- /dev/null +++ b/rtc_base/containers/BUILD.gn @@ -0,0 +1,59 @@ +# Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../webrtc.gni") + +rtc_library("flat_containers_internal") { + sources = [ + "as_const.h", + "flat_tree.cc", + "flat_tree.h", + "identity.h", + "invoke.h", + "move_only_int.h", + "not_fn.h", + "void_t.h", + ] + deps = [ + "..:checks", + "../system:no_unique_address", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container" ] + visibility = [ ":*" ] +} + +rtc_source_set("flat_set") { + sources = [ "flat_set.h" ] + deps = [ ":flat_containers_internal" ] +} + +rtc_source_set("flat_map") { + sources = [ "flat_map.h" ] + deps = [ + ":flat_containers_internal", + "..:checks", + ] +} + +rtc_library("unittests") { + testonly = true + sources = [ + "flat_map_unittest.cc", + "flat_set_unittest.cc", + "flat_tree_unittest.cc", + ] + deps = [ + ":flat_containers_internal", + ":flat_map", + ":flat_set", + "../../test:test_support", + "//testing/gmock:gmock", + "//testing/gtest:gtest", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container" ] +} diff --git a/rtc_base/containers/as_const.h b/rtc_base/containers/as_const.h new file mode 100644 index 0000000000..a41b3bc378 --- /dev/null +++ b/rtc_base/containers/as_const.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#ifndef RTC_BASE_CONTAINERS_AS_CONST_H_ +#define RTC_BASE_CONTAINERS_AS_CONST_H_ + +#include + +namespace webrtc { + +// C++14 implementation of C++17's std::as_const(): +// https://en.cppreference.com/w/cpp/utility/as_const +template +constexpr std::add_const_t& as_const(T& t) noexcept { + return t; +} + +template +void as_const(const T&& t) = delete; + +} // namespace webrtc + +#endif // RTC_BASE_CONTAINERS_AS_CONST_H_ diff --git a/rtc_base/containers/flat_map.h b/rtc_base/containers/flat_map.h new file mode 100644 index 0000000000..1dfae51655 --- /dev/null +++ b/rtc_base/containers/flat_map.h @@ -0,0 +1,374 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#ifndef RTC_BASE_CONTAINERS_FLAT_MAP_H_ +#define RTC_BASE_CONTAINERS_FLAT_MAP_H_ + +#include +#include +#include +#include + +#include "rtc_base/checks.h" +#include "rtc_base/containers/flat_tree.h" + +namespace webrtc { + +namespace flat_containers_internal { + +// An implementation of the flat_tree GetKeyFromValue template parameter that +// extracts the key as the first element of a pair. +struct GetFirst { + template + constexpr const Key& operator()(const std::pair& p) const { + return p.first; + } +}; + +} // namespace flat_containers_internal + +// flat_map is a container with a std::map-like interface that stores its +// contents in a sorted container, by default a vector. +// +// Its implementation mostly tracks the corresponding standardization proposal +// https://wg21.link/P0429, except that the storage of keys and values is not +// split. +// +// PROS +// +// - Good memory locality. +// - Low overhead, especially for smaller maps. +// - Performance is good for more workloads than you might expect (see +// //base/containers/README.md in Chromium repository) +// - Supports C++14 map interface. +// +// CONS +// +// - Inserts and removals are O(n). +// +// IMPORTANT NOTES +// +// - Iterators are invalidated across mutations. This means that the following +// line of code has undefined behavior since adding a new element could +// resize the container, invalidating all iterators: +// container["new element"] = it.second; +// - If possible, construct a flat_map in one operation by inserting into +// a container and moving that container into the flat_map constructor. +// +// QUICK REFERENCE +// +// Most of the core functionality is inherited from flat_tree. Please see +// flat_tree.h for more details for most of these functions. As a quick +// reference, the functions available are: +// +// Constructors (inputs need not be sorted): +// flat_map(const flat_map&); +// flat_map(flat_map&&); +// flat_map(InputIterator first, InputIterator last, +// const Compare& compare = Compare()); +// flat_map(const container_type& items, +// const Compare& compare = Compare()); +// flat_map(container_type&& items, +// const Compare& compare = Compare()); // Re-use storage. +// flat_map(std::initializer_list ilist, +// const Compare& comp = Compare()); +// +// Constructors (inputs need to be sorted): +// flat_map(sorted_unique_t, +// InputIterator first, InputIterator last, +// const Compare& compare = Compare()); +// flat_map(sorted_unique_t, +// const container_type& items, +// const Compare& compare = Compare()); +// flat_map(sorted_unique_t, +// container_type&& items, +// const Compare& compare = Compare()); // Re-use storage. +// flat_map(sorted_unique_t, +// std::initializer_list ilist, +// const Compare& comp = Compare()); +// +// Assignment functions: +// flat_map& operator=(const flat_map&); +// flat_map& operator=(flat_map&&); +// flat_map& operator=(initializer_list); +// +// Memory management functions: +// void reserve(size_t); +// size_t capacity() const; +// void shrink_to_fit(); +// +// Size management functions: +// void clear(); +// size_t size() const; +// size_t max_size() const; +// bool empty() const; +// +// Iterator functions: +// iterator begin(); +// const_iterator begin() const; +// const_iterator cbegin() const; +// iterator end(); +// const_iterator end() const; +// const_iterator cend() const; +// reverse_iterator rbegin(); +// const reverse_iterator rbegin() const; +// const_reverse_iterator crbegin() const; +// reverse_iterator rend(); +// const_reverse_iterator rend() const; +// const_reverse_iterator crend() const; +// +// Insert and accessor functions: +// mapped_type& operator[](const key_type&); +// mapped_type& operator[](key_type&&); +// mapped_type& at(const K&); +// const mapped_type& at(const K&) const; +// pair insert(const value_type&); +// pair insert(value_type&&); +// iterator insert(const_iterator hint, const value_type&); +// iterator insert(const_iterator hint, value_type&&); +// void insert(InputIterator first, InputIterator last); +// pair insert_or_assign(K&&, M&&); +// iterator insert_or_assign(const_iterator hint, K&&, M&&); +// pair emplace(Args&&...); +// iterator emplace_hint(const_iterator, Args&&...); +// pair try_emplace(K&&, Args&&...); +// iterator try_emplace(const_iterator hint, K&&, Args&&...); + +// Underlying type functions: +// container_type extract() &&; +// void replace(container_type&&); +// +// Erase functions: +// iterator erase(iterator); +// iterator erase(const_iterator); +// iterator erase(const_iterator first, const_iterator& last); +// template size_t erase(const K& key); +// +// Comparators (see std::map documentation). +// key_compare key_comp() const; +// value_compare value_comp() const; +// +// Search functions: +// template size_t count(const K&) const; +// template iterator find(const K&); +// template const_iterator find(const K&) const; +// template bool contains(const K&) const; +// template pair equal_range(const K&); +// template iterator lower_bound(const K&); +// template const_iterator lower_bound(const K&) const; +// template iterator upper_bound(const K&); +// template const_iterator upper_bound(const K&) const; +// +// General functions: +// void swap(flat_map&); +// +// Non-member operators: +// bool operator==(const flat_map&, const flat_map); +// bool operator!=(const flat_map&, const flat_map); +// bool operator<(const flat_map&, const flat_map); +// bool operator>(const flat_map&, const flat_map); +// bool operator>=(const flat_map&, const flat_map); +// bool operator<=(const flat_map&, const flat_map); +// +template , + class Container = std::vector>> +class flat_map : public ::webrtc::flat_containers_internal::flat_tree< + Key, + flat_containers_internal::GetFirst, + Compare, + Container> { + private: + using tree = typename ::webrtc::flat_containers_internal:: + flat_tree; + + public: + using key_type = typename tree::key_type; + using mapped_type = Mapped; + using value_type = typename tree::value_type; + using reference = typename Container::reference; + using const_reference = typename Container::const_reference; + using size_type = typename Container::size_type; + using difference_type = typename Container::difference_type; + using iterator = typename tree::iterator; + using const_iterator = typename tree::const_iterator; + using reverse_iterator = typename tree::reverse_iterator; + using const_reverse_iterator = typename tree::const_reverse_iterator; + using container_type = typename tree::container_type; + + // -------------------------------------------------------------------------- + // Lifetime and assignments. + // + // Note: we explicitly bring operator= in because otherwise + // flat_map<...> x; + // x = {...}; + // Would first create a flat_map and then move assign it. This most likely + // would be optimized away but still affects our debug builds. + + using tree::tree; + using tree::operator=; + + // Out-of-bound calls to at() will CHECK. + template + mapped_type& at(const K& key); + template + const mapped_type& at(const K& key) const; + + // -------------------------------------------------------------------------- + // Map-specific insert operations. + // + // Normal insert() functions are inherited from flat_tree. + // + // Assume that every operation invalidates iterators and references. + // Insertion of one element can take O(size). + + mapped_type& operator[](const key_type& key); + mapped_type& operator[](key_type&& key); + + template + std::pair insert_or_assign(K&& key, M&& obj); + template + iterator insert_or_assign(const_iterator hint, K&& key, M&& obj); + + template + std::enable_if_t::value, + std::pair> + try_emplace(K&& key, Args&&... args); + + template + std::enable_if_t::value, iterator> + try_emplace(const_iterator hint, K&& key, Args&&... args); + + // -------------------------------------------------------------------------- + // General operations. + // + // Assume that swap invalidates iterators and references. + + void swap(flat_map& other) noexcept; + + friend void swap(flat_map& lhs, flat_map& rhs) noexcept { lhs.swap(rhs); } +}; + +// ---------------------------------------------------------------------------- +// Lookups. + +template +template +auto flat_map::at(const K& key) + -> mapped_type& { + iterator found = tree::find(key); + RTC_CHECK(found != tree::end()); + return found->second; +} + +template +template +auto flat_map::at(const K& key) const + -> const mapped_type& { + const_iterator found = tree::find(key); + RTC_CHECK(found != tree::cend()); + return found->second; +} + +// ---------------------------------------------------------------------------- +// Insert operations. + +template +auto flat_map::operator[](const key_type& key) + -> mapped_type& { + iterator found = tree::lower_bound(key); + if (found == tree::end() || tree::key_comp()(key, found->first)) + found = tree::unsafe_emplace(found, key, mapped_type()); + return found->second; +} + +template +auto flat_map::operator[](key_type&& key) + -> mapped_type& { + iterator found = tree::lower_bound(key); + if (found == tree::end() || tree::key_comp()(key, found->first)) + found = tree::unsafe_emplace(found, std::move(key), mapped_type()); + return found->second; +} + +template +template +auto flat_map::insert_or_assign(K&& key, + M&& obj) + -> std::pair { + auto result = + tree::emplace_key_args(key, std::forward(key), std::forward(obj)); + if (!result.second) + result.first->second = std::forward(obj); + return result; +} + +template +template +auto flat_map::insert_or_assign( + const_iterator hint, + K&& key, + M&& obj) -> iterator { + auto result = tree::emplace_hint_key_args(hint, key, std::forward(key), + std::forward(obj)); + if (!result.second) + result.first->second = std::forward(obj); + return result.first; +} + +template +template +auto flat_map::try_emplace(K&& key, + Args&&... args) + -> std::enable_if_t::value, + std::pair> { + return tree::emplace_key_args( + key, std::piecewise_construct, + std::forward_as_tuple(std::forward(key)), + std::forward_as_tuple(std::forward(args)...)); +} + +template +template +auto flat_map::try_emplace(const_iterator hint, + K&& key, + Args&&... args) + -> std::enable_if_t::value, iterator> { + return tree::emplace_hint_key_args( + hint, key, std::piecewise_construct, + std::forward_as_tuple(std::forward(key)), + std::forward_as_tuple(std::forward(args)...)) + .first; +} + +// ---------------------------------------------------------------------------- +// General operations. + +template +void flat_map::swap(flat_map& other) noexcept { + tree::swap(other); +} + +// Erases all elements that match predicate. It has O(size) complexity. +// +// flat_map last_times; +// ... +// EraseIf(last_times, +// [&](const auto& element) { return now - element.second > kLimit; }); + +// NOLINTNEXTLINE(misc-unused-using-decls) +using ::webrtc::flat_containers_internal::EraseIf; + +} // namespace webrtc + +#endif // RTC_BASE_CONTAINERS_FLAT_MAP_H_ diff --git a/rtc_base/containers/flat_map_unittest.cc b/rtc_base/containers/flat_map_unittest.cc new file mode 100644 index 0000000000..8f0b77fc30 --- /dev/null +++ b/rtc_base/containers/flat_map_unittest.cc @@ -0,0 +1,454 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#include "rtc_base/containers/flat_map.h" + +#include +#include +#include + +#include "rtc_base/containers/move_only_int.h" +#include "test/gmock.h" +#include "test/gtest.h" + +// A flat_map is basically a interface to flat_tree. So several basic +// operations are tested to make sure things are set up properly, but the bulk +// of the tests are in flat_tree_unittests.cc. + +using ::testing::ElementsAre; + +namespace webrtc { + +namespace { + +struct Unsortable { + int value; +}; + +bool operator==(const Unsortable& lhs, const Unsortable& rhs) { + return lhs.value == rhs.value; +} + +bool operator<(const Unsortable& lhs, const Unsortable& rhs) = delete; +bool operator<=(const Unsortable& lhs, const Unsortable& rhs) = delete; +bool operator>(const Unsortable& lhs, const Unsortable& rhs) = delete; +bool operator>=(const Unsortable& lhs, const Unsortable& rhs) = delete; + +TEST(FlatMap, IncompleteType) { + struct A { + using Map = flat_map; + int data; + Map set_with_incomplete_type; + Map::iterator it; + Map::const_iterator cit; + + // We do not declare operator< because clang complains that it's unused. + }; + + A a; +} + +TEST(FlatMap, RangeConstructor) { + flat_map::value_type input_vals[] = { + {1, 1}, {1, 2}, {1, 3}, {2, 1}, {2, 2}, {2, 3}, {3, 1}, {3, 2}, {3, 3}}; + + flat_map first(std::begin(input_vals), std::end(input_vals)); + EXPECT_THAT(first, ElementsAre(std::make_pair(1, 1), std::make_pair(2, 1), + std::make_pair(3, 1))); +} + +TEST(FlatMap, MoveConstructor) { + using pair = std::pair; + + flat_map original; + original.insert(pair(MoveOnlyInt(1), MoveOnlyInt(1))); + original.insert(pair(MoveOnlyInt(2), MoveOnlyInt(2))); + original.insert(pair(MoveOnlyInt(3), MoveOnlyInt(3))); + original.insert(pair(MoveOnlyInt(4), MoveOnlyInt(4))); + + flat_map moved(std::move(original)); + + EXPECT_EQ(1U, moved.count(MoveOnlyInt(1))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(2))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(3))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(4))); +} + +TEST(FlatMap, VectorConstructor) { + using IntPair = std::pair; + using IntMap = flat_map; + std::vector vect{{1, 1}, {1, 2}, {2, 1}}; + IntMap map(std::move(vect)); + EXPECT_THAT(map, ElementsAre(IntPair(1, 1), IntPair(2, 1))); +} + +TEST(FlatMap, InitializerListConstructor) { + flat_map cont( + {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {1, 2}, {10, 10}, {8, 8}}); + EXPECT_THAT(cont, ElementsAre(std::make_pair(1, 1), std::make_pair(2, 2), + std::make_pair(3, 3), std::make_pair(4, 4), + std::make_pair(5, 5), std::make_pair(8, 8), + std::make_pair(10, 10))); +} + +TEST(FlatMap, SortedRangeConstructor) { + using PairType = std::pair; + using MapType = flat_map; + MapType::value_type input_vals[] = {{1, {1}}, {2, {1}}, {3, {1}}}; + MapType map(sorted_unique, std::begin(input_vals), std::end(input_vals)); + EXPECT_THAT( + map, ElementsAre(PairType(1, {1}), PairType(2, {1}), PairType(3, {1}))); +} + +TEST(FlatMap, SortedCopyFromVectorConstructor) { + using PairType = std::pair; + using MapType = flat_map; + std::vector vect{{1, {1}}, {2, {1}}}; + MapType map(sorted_unique, vect); + EXPECT_THAT(map, ElementsAre(PairType(1, {1}), PairType(2, {1}))); +} + +TEST(FlatMap, SortedMoveFromVectorConstructor) { + using PairType = std::pair; + using MapType = flat_map; + std::vector vect{{1, {1}}, {2, {1}}}; + MapType map(sorted_unique, std::move(vect)); + EXPECT_THAT(map, ElementsAre(PairType(1, {1}), PairType(2, {1}))); +} + +TEST(FlatMap, SortedInitializerListConstructor) { + using PairType = std::pair; + flat_map map( + sorted_unique, + {{1, {1}}, {2, {2}}, {3, {3}}, {4, {4}}, {5, {5}}, {8, {8}}, {10, {10}}}); + EXPECT_THAT(map, + ElementsAre(PairType(1, {1}), PairType(2, {2}), PairType(3, {3}), + PairType(4, {4}), PairType(5, {5}), PairType(8, {8}), + PairType(10, {10}))); +} + +TEST(FlatMap, InitializerListAssignment) { + flat_map cont; + cont = {{1, 1}, {2, 2}}; + EXPECT_THAT(cont, ElementsAre(std::make_pair(1, 1), std::make_pair(2, 2))); +} + +TEST(FlatMap, InsertFindSize) { + flat_map s; + s.insert(std::make_pair(1, 1)); + s.insert(std::make_pair(1, 1)); + s.insert(std::make_pair(2, 2)); + + EXPECT_EQ(2u, s.size()); + EXPECT_EQ(std::make_pair(1, 1), *s.find(1)); + EXPECT_EQ(std::make_pair(2, 2), *s.find(2)); + EXPECT_EQ(s.end(), s.find(7)); +} + +TEST(FlatMap, CopySwap) { + flat_map original; + original.insert({1, 1}); + original.insert({2, 2}); + EXPECT_THAT(original, + ElementsAre(std::make_pair(1, 1), std::make_pair(2, 2))); + + flat_map copy(original); + EXPECT_THAT(copy, ElementsAre(std::make_pair(1, 1), std::make_pair(2, 2))); + + copy.erase(copy.begin()); + copy.insert({10, 10}); + EXPECT_THAT(copy, ElementsAre(std::make_pair(2, 2), std::make_pair(10, 10))); + + original.swap(copy); + EXPECT_THAT(original, + ElementsAre(std::make_pair(2, 2), std::make_pair(10, 10))); + EXPECT_THAT(copy, ElementsAre(std::make_pair(1, 1), std::make_pair(2, 2))); +} + +// operator[](const Key&) +TEST(FlatMap, SubscriptConstKey) { + flat_map m; + + // Default construct elements that don't exist yet. + int& s = m["a"]; + EXPECT_EQ(0, s); + EXPECT_EQ(1u, m.size()); + + // The returned mapped reference should refer into the map. + s = 22; + EXPECT_EQ(22, m["a"]); + + // Overwrite existing elements. + m["a"] = 44; + EXPECT_EQ(44, m["a"]); +} + +// operator[](Key&&) +TEST(FlatMap, SubscriptMoveOnlyKey) { + flat_map m; + + // Default construct elements that don't exist yet. + int& s = m[MoveOnlyInt(1)]; + EXPECT_EQ(0, s); + EXPECT_EQ(1u, m.size()); + + // The returned mapped reference should refer into the map. + s = 22; + EXPECT_EQ(22, m[MoveOnlyInt(1)]); + + // Overwrite existing elements. + m[MoveOnlyInt(1)] = 44; + EXPECT_EQ(44, m[MoveOnlyInt(1)]); +} + +// Mapped& at(const Key&) +// const Mapped& at(const Key&) const +TEST(FlatMap, AtFunction) { + flat_map m = {{1, "a"}, {2, "b"}}; + + // Basic Usage. + EXPECT_EQ("a", m.at(1)); + EXPECT_EQ("b", m.at(2)); + + // Const reference works. + const std::string& const_ref = webrtc::as_const(m).at(1); + EXPECT_EQ("a", const_ref); + + // Reference works, can operate on the string. + m.at(1)[0] = 'x'; + EXPECT_EQ("x", m.at(1)); + + // Out-of-bounds will CHECK. + EXPECT_DEATH_IF_SUPPORTED(m.at(-1), ""); + EXPECT_DEATH_IF_SUPPORTED({ m.at(-1)[0] = 'z'; }, ""); + + // Heterogeneous look-up works. + flat_map m2 = {{"a", 1}, {"b", 2}}; + EXPECT_EQ(1, m2.at(absl::string_view("a"))); + EXPECT_EQ(2, webrtc::as_const(m2).at(absl::string_view("b"))); +} + +// insert_or_assign(K&&, M&&) +TEST(FlatMap, InsertOrAssignMoveOnlyKey) { + flat_map m; + + // Initial insertion should return an iterator to the element and set the + // second pair member to |true|. The inserted key and value should be moved + // from. + MoveOnlyInt key(1); + MoveOnlyInt val(22); + auto result = m.insert_or_assign(std::move(key), std::move(val)); + EXPECT_EQ(1, result.first->first.data()); + EXPECT_EQ(22, result.first->second.data()); + EXPECT_TRUE(result.second); + EXPECT_EQ(1u, m.size()); + EXPECT_EQ(0, key.data()); // moved from + EXPECT_EQ(0, val.data()); // moved from + + // Second call with same key should result in an assignment, overwriting the + // old value. Assignment should be indicated by setting the second pair member + // to |false|. Only the inserted value should be moved from, the key should be + // left intact. + key = MoveOnlyInt(1); + val = MoveOnlyInt(44); + result = m.insert_or_assign(std::move(key), std::move(val)); + EXPECT_EQ(1, result.first->first.data()); + EXPECT_EQ(44, result.first->second.data()); + EXPECT_FALSE(result.second); + EXPECT_EQ(1u, m.size()); + EXPECT_EQ(1, key.data()); // not moved from + EXPECT_EQ(0, val.data()); // moved from + + // Check that random insertion results in sorted range. + flat_map map; + for (int i : {3, 1, 5, 6, 8, 7, 0, 9, 4, 2}) { + map.insert_or_assign(MoveOnlyInt(i), i); + EXPECT_TRUE(absl::c_is_sorted(map)); + } +} + +// insert_or_assign(const_iterator hint, K&&, M&&) +TEST(FlatMap, InsertOrAssignMoveOnlyKeyWithHint) { + flat_map m; + + // Initial insertion should return an iterator to the element. The inserted + // key and value should be moved from. + MoveOnlyInt key(1); + MoveOnlyInt val(22); + auto result = m.insert_or_assign(m.end(), std::move(key), std::move(val)); + EXPECT_EQ(1, result->first.data()); + EXPECT_EQ(22, result->second.data()); + EXPECT_EQ(1u, m.size()); + EXPECT_EQ(0, key.data()); // moved from + EXPECT_EQ(0, val.data()); // moved from + + // Second call with same key should result in an assignment, overwriting the + // old value. Only the inserted value should be moved from, the key should be + // left intact. + key = MoveOnlyInt(1); + val = MoveOnlyInt(44); + result = m.insert_or_assign(m.end(), std::move(key), std::move(val)); + EXPECT_EQ(1, result->first.data()); + EXPECT_EQ(44, result->second.data()); + EXPECT_EQ(1u, m.size()); + EXPECT_EQ(1, key.data()); // not moved from + EXPECT_EQ(0, val.data()); // moved from + + // Check that random insertion results in sorted range. + flat_map map; + for (int i : {3, 1, 5, 6, 8, 7, 0, 9, 4, 2}) { + map.insert_or_assign(map.end(), MoveOnlyInt(i), i); + EXPECT_TRUE(absl::c_is_sorted(map)); + } +} + +// try_emplace(K&&, Args&&...) +TEST(FlatMap, TryEmplaceMoveOnlyKey) { + flat_map> m; + + // Trying to emplace into an empty map should succeed. Insertion should return + // an iterator to the element and set the second pair member to |true|. The + // inserted key and value should be moved from. + MoveOnlyInt key(1); + MoveOnlyInt val1(22); + MoveOnlyInt val2(44); + // Test piecewise construction of mapped_type. + auto result = m.try_emplace(std::move(key), std::move(val1), std::move(val2)); + EXPECT_EQ(1, result.first->first.data()); + EXPECT_EQ(22, result.first->second.first.data()); + EXPECT_EQ(44, result.first->second.second.data()); + EXPECT_TRUE(result.second); + EXPECT_EQ(1u, m.size()); + EXPECT_EQ(0, key.data()); // moved from + EXPECT_EQ(0, val1.data()); // moved from + EXPECT_EQ(0, val2.data()); // moved from + + // Second call with same key should result in a no-op, returning an iterator + // to the existing element and returning false as the second pair member. + // Key and values that were attempted to be inserted should be left intact. + key = MoveOnlyInt(1); + auto paired_val = std::make_pair(MoveOnlyInt(33), MoveOnlyInt(55)); + // Test construction of mapped_type from pair. + result = m.try_emplace(std::move(key), std::move(paired_val)); + EXPECT_EQ(1, result.first->first.data()); + EXPECT_EQ(22, result.first->second.first.data()); + EXPECT_EQ(44, result.first->second.second.data()); + EXPECT_FALSE(result.second); + EXPECT_EQ(1u, m.size()); + EXPECT_EQ(1, key.data()); // not moved from + EXPECT_EQ(33, paired_val.first.data()); // not moved from + EXPECT_EQ(55, paired_val.second.data()); // not moved from + + // Check that random insertion results in sorted range. + flat_map map; + for (int i : {3, 1, 5, 6, 8, 7, 0, 9, 4, 2}) { + map.try_emplace(MoveOnlyInt(i), i); + EXPECT_TRUE(absl::c_is_sorted(map)); + } +} + +// try_emplace(const_iterator hint, K&&, Args&&...) +TEST(FlatMap, TryEmplaceMoveOnlyKeyWithHint) { + flat_map> m; + + // Trying to emplace into an empty map should succeed. Insertion should return + // an iterator to the element. The inserted key and value should be moved + // from. + MoveOnlyInt key(1); + MoveOnlyInt val1(22); + MoveOnlyInt val2(44); + // Test piecewise construction of mapped_type. + auto result = + m.try_emplace(m.end(), std::move(key), std::move(val1), std::move(val2)); + EXPECT_EQ(1, result->first.data()); + EXPECT_EQ(22, result->second.first.data()); + EXPECT_EQ(44, result->second.second.data()); + EXPECT_EQ(1u, m.size()); + EXPECT_EQ(0, key.data()); // moved from + EXPECT_EQ(0, val1.data()); // moved from + EXPECT_EQ(0, val2.data()); // moved from + + // Second call with same key should result in a no-op, returning an iterator + // to the existing element. Key and values that were attempted to be inserted + // should be left intact. + key = MoveOnlyInt(1); + val1 = MoveOnlyInt(33); + val2 = MoveOnlyInt(55); + auto paired_val = std::make_pair(MoveOnlyInt(33), MoveOnlyInt(55)); + // Test construction of mapped_type from pair. + result = m.try_emplace(m.end(), std::move(key), std::move(paired_val)); + EXPECT_EQ(1, result->first.data()); + EXPECT_EQ(22, result->second.first.data()); + EXPECT_EQ(44, result->second.second.data()); + EXPECT_EQ(1u, m.size()); + EXPECT_EQ(1, key.data()); // not moved from + EXPECT_EQ(33, paired_val.first.data()); // not moved from + EXPECT_EQ(55, paired_val.second.data()); // not moved from + + // Check that random insertion results in sorted range. + flat_map map; + for (int i : {3, 1, 5, 6, 8, 7, 0, 9, 4, 2}) { + map.try_emplace(map.end(), MoveOnlyInt(i), i); + EXPECT_TRUE(absl::c_is_sorted(map)); + } +} + +TEST(FlatMap, UsingTransparentCompare) { + using ExplicitInt = MoveOnlyInt; + flat_map m; + const auto& m1 = m; + int x = 0; + + // Check if we can use lookup functions without converting to key_type. + // Correctness is checked in flat_tree tests. + m.count(x); + m1.count(x); + m.find(x); + m1.find(x); + m.equal_range(x); + m1.equal_range(x); + m.lower_bound(x); + m1.lower_bound(x); + m.upper_bound(x); + m1.upper_bound(x); + m.erase(x); + + // Check if we broke overload resolution. + m.emplace(ExplicitInt(0), 0); + m.emplace(ExplicitInt(1), 0); + m.erase(m.begin()); + m.erase(m.cbegin()); +} + +TEST(FlatMap, SupportsEraseIf) { + flat_map m; + m.insert(std::make_pair(MoveOnlyInt(1), MoveOnlyInt(1))); + m.insert(std::make_pair(MoveOnlyInt(2), MoveOnlyInt(2))); + m.insert(std::make_pair(MoveOnlyInt(3), MoveOnlyInt(3))); + m.insert(std::make_pair(MoveOnlyInt(4), MoveOnlyInt(4))); + m.insert(std::make_pair(MoveOnlyInt(5), MoveOnlyInt(5))); + + EraseIf(m, [to_be_removed = MoveOnlyInt(2)]( + const std::pair& e) { + return e.first == to_be_removed; + }); + + EXPECT_EQ(m.size(), 4u); + ASSERT_TRUE(m.find(MoveOnlyInt(1)) != m.end()); + ASSERT_FALSE(m.find(MoveOnlyInt(2)) != m.end()); + ASSERT_TRUE(m.find(MoveOnlyInt(3)) != m.end()); + ASSERT_TRUE(m.find(MoveOnlyInt(4)) != m.end()); + ASSERT_TRUE(m.find(MoveOnlyInt(5)) != m.end()); +} + +} // namespace +} // namespace webrtc diff --git a/rtc_base/containers/flat_set.h b/rtc_base/containers/flat_set.h new file mode 100644 index 0000000000..e088cc5314 --- /dev/null +++ b/rtc_base/containers/flat_set.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#ifndef RTC_BASE_CONTAINERS_FLAT_SET_H_ +#define RTC_BASE_CONTAINERS_FLAT_SET_H_ + +#include +#include + +#include "rtc_base/containers/flat_tree.h" +#include "rtc_base/containers/identity.h" + +namespace webrtc { + +// flat_set is a container with a std::set-like interface that stores its +// contents in a sorted container, by default a vector. +// +// Its implementation mostly tracks the corresponding standardization proposal +// https://wg21.link/P1222. +// +// +// PROS +// +// - Good memory locality. +// - Low overhead, especially for smaller sets. +// - Performance is good for more workloads than you might expect (see +// //base/containers/README.md in Chromium repository) +// - Supports C++14 set interface. +// +// CONS +// +// - Inserts and removals are O(n). +// +// IMPORTANT NOTES +// +// - Iterators are invalidated across mutations. +// - If possible, construct a flat_set in one operation by inserting into +// a container and moving that container into the flat_set constructor. +// - For multiple removals use base::EraseIf() which is O(n) rather than +// O(n * removed_items). +// +// QUICK REFERENCE +// +// Most of the core functionality is inherited from flat_tree. Please see +// flat_tree.h for more details for most of these functions. As a quick +// reference, the functions available are: +// +// Constructors (inputs need not be sorted): +// flat_set(const flat_set&); +// flat_set(flat_set&&); +// flat_set(InputIterator first, InputIterator last, +// const Compare& compare = Compare()); +// flat_set(const container_type& items, +// const Compare& compare = Compare()); +// flat_set(container_type&& items, +// const Compare& compare = Compare()); // Re-use storage. +// flat_set(std::initializer_list ilist, +// const Compare& comp = Compare()); +// +// Constructors (inputs need to be sorted): +// flat_set(sorted_unique_t, +// InputIterator first, InputIterator last, +// const Compare& compare = Compare()); +// flat_set(sorted_unique_t, +// const container_type& items, +// const Compare& compare = Compare()); +// flat_set(sorted_unique_t, +// container_type&& items, +// const Compare& compare = Compare()); // Re-use storage. +// flat_set(sorted_unique_t, +// std::initializer_list ilist, +// const Compare& comp = Compare()); +// +// Assignment functions: +// flat_set& operator=(const flat_set&); +// flat_set& operator=(flat_set&&); +// flat_set& operator=(initializer_list); +// +// Memory management functions: +// void reserve(size_t); +// size_t capacity() const; +// void shrink_to_fit(); +// +// Size management functions: +// void clear(); +// size_t size() const; +// size_t max_size() const; +// bool empty() const; +// +// Iterator functions: +// iterator begin(); +// const_iterator begin() const; +// const_iterator cbegin() const; +// iterator end(); +// const_iterator end() const; +// const_iterator cend() const; +// reverse_iterator rbegin(); +// const reverse_iterator rbegin() const; +// const_reverse_iterator crbegin() const; +// reverse_iterator rend(); +// const_reverse_iterator rend() const; +// const_reverse_iterator crend() const; +// +// Insert and accessor functions: +// pair insert(const key_type&); +// pair insert(key_type&&); +// void insert(InputIterator first, InputIterator last); +// iterator insert(const_iterator hint, const key_type&); +// iterator insert(const_iterator hint, key_type&&); +// pair emplace(Args&&...); +// iterator emplace_hint(const_iterator, Args&&...); +// +// Underlying type functions: +// container_type extract() &&; +// void replace(container_type&&); +// +// Erase functions: +// iterator erase(iterator); +// iterator erase(const_iterator); +// iterator erase(const_iterator first, const_iterator& last); +// template size_t erase(const K& key); +// +// Comparators (see std::set documentation). +// key_compare key_comp() const; +// value_compare value_comp() const; +// +// Search functions: +// template size_t count(const K&) const; +// template iterator find(const K&); +// template const_iterator find(const K&) const; +// template bool contains(const K&) const; +// template pair equal_range(K&); +// template iterator lower_bound(const K&); +// template const_iterator lower_bound(const K&) const; +// template iterator upper_bound(const K&); +// template const_iterator upper_bound(const K&) const; +// +// General functions: +// void swap(flat_set&); +// +// Non-member operators: +// bool operator==(const flat_set&, const flat_set); +// bool operator!=(const flat_set&, const flat_set); +// bool operator<(const flat_set&, const flat_set); +// bool operator>(const flat_set&, const flat_set); +// bool operator>=(const flat_set&, const flat_set); +// bool operator<=(const flat_set&, const flat_set); +// +template , + class Container = std::vector> +using flat_set = typename ::webrtc::flat_containers_internal:: + flat_tree; + +// ---------------------------------------------------------------------------- +// General operations. + +// Erases all elements that match predicate. It has O(size) complexity. +// +// flat_set numbers; +// ... +// EraseIf(numbers, [](int number) { return number % 2 == 1; }); + +// NOLINTNEXTLINE(misc-unused-using-decls) +using ::webrtc::flat_containers_internal::EraseIf; + +} // namespace webrtc + +#endif // RTC_BASE_CONTAINERS_FLAT_SET_H_ diff --git a/rtc_base/containers/flat_set_unittest.cc b/rtc_base/containers/flat_set_unittest.cc new file mode 100644 index 0000000000..617db92440 --- /dev/null +++ b/rtc_base/containers/flat_set_unittest.cc @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#include "rtc_base/containers/flat_set.h" + +#include +#include +#include +#include + +#include "rtc_base/containers/move_only_int.h" +#include "test/gmock.h" +#include "test/gtest.h" + +// A flat_set is basically a interface to flat_tree. So several basic +// operations are tested to make sure things are set up properly, but the bulk +// of the tests are in flat_tree_unittests.cc. + +using ::testing::ElementsAre; + +namespace webrtc { +namespace { + +TEST(FlatSet, IncompleteType) { + struct A { + using Set = flat_set; + int data; + Set set_with_incomplete_type; + Set::iterator it; + Set::const_iterator cit; + + // We do not declare operator< because clang complains that it's unused. + }; + + A a; +} + +TEST(FlatSet, RangeConstructor) { + flat_set::value_type input_vals[] = {1, 1, 1, 2, 2, 2, 3, 3, 3}; + + flat_set cont(std::begin(input_vals), std::end(input_vals)); + EXPECT_THAT(cont, ElementsAre(1, 2, 3)); +} + +TEST(FlatSet, MoveConstructor) { + int input_range[] = {1, 2, 3, 4}; + + flat_set original(std::begin(input_range), + std::end(input_range)); + flat_set moved(std::move(original)); + + EXPECT_EQ(1U, moved.count(MoveOnlyInt(1))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(2))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(3))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(4))); +} + +TEST(FlatSet, InitializerListConstructor) { + flat_set cont({1, 2, 3, 4, 5, 6, 10, 8}); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10)); +} + +TEST(FlatSet, InsertFindSize) { + flat_set s; + s.insert(1); + s.insert(1); + s.insert(2); + + EXPECT_EQ(2u, s.size()); + EXPECT_EQ(1, *s.find(1)); + EXPECT_EQ(2, *s.find(2)); + EXPECT_EQ(s.end(), s.find(7)); +} + +TEST(FlatSet, CopySwap) { + flat_set original; + original.insert(1); + original.insert(2); + EXPECT_THAT(original, ElementsAre(1, 2)); + + flat_set copy(original); + EXPECT_THAT(copy, ElementsAre(1, 2)); + + copy.erase(copy.begin()); + copy.insert(10); + EXPECT_THAT(copy, ElementsAre(2, 10)); + + original.swap(copy); + EXPECT_THAT(original, ElementsAre(2, 10)); + EXPECT_THAT(copy, ElementsAre(1, 2)); +} + +TEST(FlatSet, UsingTransparentCompare) { + using ExplicitInt = webrtc::MoveOnlyInt; + flat_set s; + const auto& s1 = s; + int x = 0; + + // Check if we can use lookup functions without converting to key_type. + // Correctness is checked in flat_tree tests. + s.count(x); + s1.count(x); + s.find(x); + s1.find(x); + s.equal_range(x); + s1.equal_range(x); + s.lower_bound(x); + s1.lower_bound(x); + s.upper_bound(x); + s1.upper_bound(x); + s.erase(x); + + // Check if we broke overload resolution. + s.emplace(0); + s.emplace(1); + s.erase(s.begin()); + s.erase(s.cbegin()); +} + +TEST(FlatSet, SupportsEraseIf) { + flat_set s; + s.emplace(MoveOnlyInt(1)); + s.emplace(MoveOnlyInt(2)); + s.emplace(MoveOnlyInt(3)); + s.emplace(MoveOnlyInt(4)); + s.emplace(MoveOnlyInt(5)); + + EraseIf(s, [to_be_removed = MoveOnlyInt(2)](const MoveOnlyInt& elem) { + return elem == to_be_removed; + }); + + EXPECT_EQ(s.size(), 4u); + ASSERT_TRUE(s.find(MoveOnlyInt(1)) != s.end()); + ASSERT_FALSE(s.find(MoveOnlyInt(2)) != s.end()); + ASSERT_TRUE(s.find(MoveOnlyInt(3)) != s.end()); + ASSERT_TRUE(s.find(MoveOnlyInt(4)) != s.end()); + ASSERT_TRUE(s.find(MoveOnlyInt(5)) != s.end()); +} +} // namespace +} // namespace webrtc diff --git a/api/proxy.cc b/rtc_base/containers/flat_tree.cc similarity index 58% rename from api/proxy.cc rename to rtc_base/containers/flat_tree.cc index 67318e7dab..9e86db191a 100644 --- a/api/proxy.cc +++ b/rtc_base/containers/flat_tree.cc @@ -1,5 +1,5 @@ /* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -8,5 +8,12 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "api/proxy.h" +// This implementation is borrowed from Chromium. +#include "rtc_base/containers/flat_tree.h" + +namespace webrtc { + +sorted_unique_t sorted_unique; + +} // namespace webrtc diff --git a/rtc_base/containers/flat_tree.h b/rtc_base/containers/flat_tree.h new file mode 100644 index 0000000000..1b02cce1b4 --- /dev/null +++ b/rtc_base/containers/flat_tree.h @@ -0,0 +1,1102 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#ifndef RTC_BASE_CONTAINERS_FLAT_TREE_H_ +#define RTC_BASE_CONTAINERS_FLAT_TREE_H_ + +#include +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "rtc_base/checks.h" +#include "rtc_base/containers/as_const.h" +#include "rtc_base/containers/not_fn.h" +#include "rtc_base/containers/void_t.h" +#include "rtc_base/system/no_unique_address.h" + +namespace webrtc { +// Tag type that allows skipping the sort_and_unique step when constructing a +// flat_tree in case the underlying container is already sorted and has no +// duplicate elements. +struct sorted_unique_t { + constexpr sorted_unique_t() = default; +}; +extern sorted_unique_t sorted_unique; + +namespace flat_containers_internal { + +// Helper functions used in RTC_DCHECKs below to make sure that inputs tagged +// with sorted_unique are indeed sorted and unique. +template +constexpr bool is_sorted_and_unique(const Range& range, Comp comp) { + // Being unique implies that there are no adjacent elements that + // compare equal. So this checks that each element is strictly less + // than the element after it. + return absl::c_adjacent_find(range, webrtc::not_fn(comp)) == std::end(range); +} + +// This is a convenience trait inheriting from std::true_type if Iterator is at +// least a ForwardIterator and thus supports multiple passes over a range. +template +using is_multipass = + std::is_base_of::iterator_category>; + +// Uses SFINAE to detect whether type has is_transparent member. +template +struct IsTransparentCompare : std::false_type {}; +template +struct IsTransparentCompare> + : std::true_type {}; + +// Helper inspired by C++20's std::to_array to convert a C-style array to a +// std::array. As opposed to the C++20 version this implementation does not +// provide an overload for rvalues and does not strip cv qualifers from the +// returned std::array::value_type. The returned value_type needs to be +// specified explicitly, allowing the construction of std::arrays with const +// elements. +// +// Reference: https://en.cppreference.com/w/cpp/container/array/to_array +template +constexpr std::array ToArrayImpl(const T (&data)[N], + std::index_sequence) { + return {{data[I]...}}; +} + +template +constexpr std::array ToArray(const T (&data)[N]) { + return ToArrayImpl(data, std::make_index_sequence()); +} + +// std::pair's operator= is not constexpr prior to C++20. Thus we need this +// small helper to invoke operator= on the .first and .second member explicitly. +template +constexpr void Assign(T& lhs, T&& rhs) { + lhs = std::move(rhs); +} + +template +constexpr void Assign(std::pair& lhs, std::pair&& rhs) { + Assign(lhs.first, std::move(rhs.first)); + Assign(lhs.second, std::move(rhs.second)); +} + +// constexpr swap implementation. std::swap is not constexpr prior to C++20. +template +constexpr void Swap(T& lhs, T& rhs) { + T tmp = std::move(lhs); + Assign(lhs, std::move(rhs)); + Assign(rhs, std::move(tmp)); +} + +// constexpr prev implementation. std::prev is not constexpr prior to C++17. +template +constexpr BidirIt Prev(BidirIt it) { + return --it; +} + +// constexpr next implementation. std::next is not constexpr prior to C++17. +template +constexpr InputIt Next(InputIt it) { + return ++it; +} + +// constexpr sort implementation. std::sort is not constexpr prior to C++20. +// While insertion sort has a quadratic worst case complexity, it was chosen +// because it has linear complexity for nearly sorted data, is stable, and +// simple to implement. +template +constexpr void InsertionSort(BidirIt first, BidirIt last, const Compare& comp) { + if (first == last) + return; + + for (auto it = Next(first); it != last; ++it) { + for (auto curr = it; curr != first && comp(*curr, *Prev(curr)); --curr) + Swap(*curr, *Prev(curr)); + } +} + +// Implementation ------------------------------------------------------------- + +// Implementation for the sorted associative flat_set and flat_map using a +// sorted vector as the backing store. Do not use directly. +// +// The use of "value" in this is like std::map uses, meaning it's the thing +// contained (in the case of map it's a pair). The Key is how +// things are looked up. In the case of a set, Key == Value. In the case of +// a map, the Key is a component of a Value. +// +// The helper class GetKeyFromValue provides the means to extract a key from a +// value for comparison purposes. It should implement: +// const Key& operator()(const Value&). +template +class flat_tree { + public: + // -------------------------------------------------------------------------- + // Types. + // + using key_type = Key; + using key_compare = KeyCompare; + using value_type = typename Container::value_type; + + // Wraps the templated key comparison to compare values. + struct value_compare { + constexpr bool operator()(const value_type& left, + const value_type& right) const { + GetKeyFromValue extractor; + return comp(extractor(left), extractor(right)); + } + + RTC_NO_UNIQUE_ADDRESS key_compare comp; + }; + + using pointer = typename Container::pointer; + using const_pointer = typename Container::const_pointer; + using reference = typename Container::reference; + using const_reference = typename Container::const_reference; + using size_type = typename Container::size_type; + using difference_type = typename Container::difference_type; + using iterator = typename Container::iterator; + using const_iterator = typename Container::const_iterator; + using reverse_iterator = typename Container::reverse_iterator; + using const_reverse_iterator = typename Container::const_reverse_iterator; + using container_type = Container; + + // -------------------------------------------------------------------------- + // Lifetime. + // + // Constructors that take range guarantee O(N * log^2(N)) + O(N) complexity + // and take O(N * log(N)) + O(N) if extra memory is available (N is a range + // length). + // + // Assume that move constructors invalidate iterators and references. + // + // The constructors that take ranges, lists, and vectors do not require that + // the input be sorted. + // + // When passing the webrtc::sorted_unique tag as the first argument no sort + // and unique step takes places. This is useful if the underlying container + // already has the required properties. + + flat_tree() = default; + flat_tree(const flat_tree&) = default; + flat_tree(flat_tree&&) = default; + + explicit flat_tree(const key_compare& comp); + + template + flat_tree(InputIterator first, + InputIterator last, + const key_compare& comp = key_compare()); + + flat_tree(const container_type& items, + const key_compare& comp = key_compare()); + + explicit flat_tree(container_type&& items, + const key_compare& comp = key_compare()); + + flat_tree(std::initializer_list ilist, + const key_compare& comp = key_compare()); + + template + flat_tree(sorted_unique_t, + InputIterator first, + InputIterator last, + const key_compare& comp = key_compare()); + + flat_tree(sorted_unique_t, + const container_type& items, + const key_compare& comp = key_compare()); + + constexpr flat_tree(sorted_unique_t, + container_type&& items, + const key_compare& comp = key_compare()); + + flat_tree(sorted_unique_t, + std::initializer_list ilist, + const key_compare& comp = key_compare()); + + ~flat_tree() = default; + + // -------------------------------------------------------------------------- + // Assignments. + // + // Assume that move assignment invalidates iterators and references. + + flat_tree& operator=(const flat_tree&) = default; + flat_tree& operator=(flat_tree&&) = default; + // Takes the first if there are duplicates in the initializer list. + flat_tree& operator=(std::initializer_list ilist); + + // -------------------------------------------------------------------------- + // Memory management. + // + // Beware that shrink_to_fit() simply forwards the request to the + // container_type and its implementation is free to optimize otherwise and + // leave capacity() to be greater that its size. + // + // reserve() and shrink_to_fit() invalidate iterators and references. + + void reserve(size_type new_capacity); + size_type capacity() const; + void shrink_to_fit(); + + // -------------------------------------------------------------------------- + // Size management. + // + // clear() leaves the capacity() of the flat_tree unchanged. + + void clear(); + + constexpr size_type size() const; + constexpr size_type max_size() const; + constexpr bool empty() const; + + // -------------------------------------------------------------------------- + // Iterators. + // + // Iterators follow the ordering defined by the key comparator used in + // construction of the flat_tree. + + iterator begin(); + constexpr const_iterator begin() const; + const_iterator cbegin() const; + + iterator end(); + constexpr const_iterator end() const; + const_iterator cend() const; + + reverse_iterator rbegin(); + const_reverse_iterator rbegin() const; + const_reverse_iterator crbegin() const; + + reverse_iterator rend(); + const_reverse_iterator rend() const; + const_reverse_iterator crend() const; + + // -------------------------------------------------------------------------- + // Insert operations. + // + // Assume that every operation invalidates iterators and references. + // Insertion of one element can take O(size). Capacity of flat_tree grows in + // an implementation-defined manner. + // + // NOTE: Prefer to build a new flat_tree from a std::vector (or similar) + // instead of calling insert() repeatedly. + + std::pair insert(const value_type& val); + std::pair insert(value_type&& val); + + iterator insert(const_iterator position_hint, const value_type& x); + iterator insert(const_iterator position_hint, value_type&& x); + + // This method inserts the values from the range [first, last) into the + // current tree. + template + void insert(InputIterator first, InputIterator last); + + template + std::pair emplace(Args&&... args); + + template + iterator emplace_hint(const_iterator position_hint, Args&&... args); + + // -------------------------------------------------------------------------- + // Underlying type operations. + // + // Assume that either operation invalidates iterators and references. + + // Extracts the container_type and returns it to the caller. Ensures that + // `this` is `empty()` afterwards. + container_type extract() &&; + + // Replaces the container_type with `body`. Expects that `body` is sorted + // and has no repeated elements with regard to value_comp(). + void replace(container_type&& body); + + // -------------------------------------------------------------------------- + // Erase operations. + // + // Assume that every operation invalidates iterators and references. + // + // erase(position), erase(first, last) can take O(size). + // erase(key) may take O(size) + O(log(size)). + // + // Prefer webrtc::EraseIf() or some other variation on erase(remove(), end()) + // idiom when deleting multiple non-consecutive elements. + + iterator erase(iterator position); + // Artificially templatized to break ambiguity if `iterator` and + // `const_iterator` are the same type. + template + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + template + size_type erase(const K& key); + + // -------------------------------------------------------------------------- + // Comparators. + + constexpr key_compare key_comp() const; + constexpr value_compare value_comp() const; + + // -------------------------------------------------------------------------- + // Search operations. + // + // Search operations have O(log(size)) complexity. + + template + size_type count(const K& key) const; + + template + iterator find(const K& key); + + template + const_iterator find(const K& key) const; + + template + bool contains(const K& key) const; + + template + std::pair equal_range(const K& key); + + template + std::pair equal_range(const K& key) const; + + template + iterator lower_bound(const K& key); + + template + const_iterator lower_bound(const K& key) const; + + template + iterator upper_bound(const K& key); + + template + const_iterator upper_bound(const K& key) const; + + // -------------------------------------------------------------------------- + // General operations. + // + // Assume that swap invalidates iterators and references. + // + // Implementation note: currently we use operator==() and operator<() on + // std::vector, because they have the same contract we need, so we use them + // directly for brevity and in case it is more optimal than calling equal() + // and lexicograhpical_compare(). If the underlying container type is changed, + // this code may need to be modified. + + void swap(flat_tree& other) noexcept; + + friend bool operator==(const flat_tree& lhs, const flat_tree& rhs) { + return lhs.body_ == rhs.body_; + } + + friend bool operator!=(const flat_tree& lhs, const flat_tree& rhs) { + return !(lhs == rhs); + } + + friend bool operator<(const flat_tree& lhs, const flat_tree& rhs) { + return lhs.body_ < rhs.body_; + } + + friend bool operator>(const flat_tree& lhs, const flat_tree& rhs) { + return rhs < lhs; + } + + friend bool operator>=(const flat_tree& lhs, const flat_tree& rhs) { + return !(lhs < rhs); + } + + friend bool operator<=(const flat_tree& lhs, const flat_tree& rhs) { + return !(lhs > rhs); + } + + friend void swap(flat_tree& lhs, flat_tree& rhs) noexcept { lhs.swap(rhs); } + + protected: + // Emplaces a new item into the tree that is known not to be in it. This + // is for implementing map operator[]. + template + iterator unsafe_emplace(const_iterator position, Args&&... args); + + // Attempts to emplace a new element with key |key|. Only if |key| is not yet + // present, construct value_type from |args| and insert it. Returns an + // iterator to the element with key |key| and a bool indicating whether an + // insertion happened. + template + std::pair emplace_key_args(const K& key, Args&&... args); + + // Similar to |emplace_key_args|, but checks |hint| first as a possible + // insertion position. + template + std::pair emplace_hint_key_args(const_iterator hint, + const K& key, + Args&&... args); + + private: + // Helper class for e.g. lower_bound that can compare a value on the left + // to a key on the right. + struct KeyValueCompare { + // The key comparison object must outlive this class. + explicit KeyValueCompare(const key_compare& comp) : comp_(comp) {} + + template + bool operator()(const T& lhs, const U& rhs) const { + return comp_(extract_if_value_type(lhs), extract_if_value_type(rhs)); + } + + private: + const key_type& extract_if_value_type(const value_type& v) const { + GetKeyFromValue extractor; + return extractor(v); + } + + template + const K& extract_if_value_type(const K& k) const { + return k; + } + + const key_compare& comp_; + }; + + iterator const_cast_it(const_iterator c_it) { + auto distance = std::distance(cbegin(), c_it); + return std::next(begin(), distance); + } + + // This method is inspired by both std::map::insert(P&&) and + // std::map::insert_or_assign(const K&, V&&). It inserts val if an equivalent + // element is not present yet, otherwise it overwrites. It returns an iterator + // to the modified element and a flag indicating whether insertion or + // assignment happened. + template + std::pair insert_or_assign(V&& val) { + auto position = lower_bound(GetKeyFromValue()(val)); + + if (position == end() || value_comp()(val, *position)) + return {body_.emplace(position, std::forward(val)), true}; + + *position = std::forward(val); + return {position, false}; + } + + // This method is similar to insert_or_assign, with the following differences: + // - Instead of searching [begin(), end()) it only searches [first, last). + // - In case no equivalent element is found, val is appended to the end of the + // underlying body and an iterator to the next bigger element in [first, + // last) is returned. + template + std::pair append_or_assign(iterator first, + iterator last, + V&& val) { + auto position = std::lower_bound(first, last, val, value_comp()); + + if (position == last || value_comp()(val, *position)) { + // emplace_back might invalidate position, which is why distance needs to + // be cached. + const difference_type distance = std::distance(begin(), position); + body_.emplace_back(std::forward(val)); + return {std::next(begin(), distance), true}; + } + + *position = std::forward(val); + return {position, false}; + } + + // This method is similar to insert, with the following differences: + // - Instead of searching [begin(), end()) it only searches [first, last). + // - In case no equivalent element is found, val is appended to the end of the + // underlying body and an iterator to the next bigger element in [first, + // last) is returned. + template + std::pair append_unique(iterator first, + iterator last, + V&& val) { + auto position = std::lower_bound(first, last, val, value_comp()); + + if (position == last || value_comp()(val, *position)) { + // emplace_back might invalidate position, which is why distance needs to + // be cached. + const difference_type distance = std::distance(begin(), position); + body_.emplace_back(std::forward(val)); + return {std::next(begin(), distance), true}; + } + + return {position, false}; + } + + void sort_and_unique(iterator first, iterator last) { + // Preserve stability for the unique code below. + std::stable_sort(first, last, value_comp()); + + // lhs is already <= rhs due to sort, therefore !(lhs < rhs) <=> lhs == rhs. + auto equal_comp = webrtc::not_fn(value_comp()); + erase(std::unique(first, last, equal_comp), last); + } + + void sort_and_unique() { sort_and_unique(begin(), end()); } + + // To support comparators that may not be possible to default-construct, we + // have to store an instance of Compare. Since Compare commonly is stateless, + // we use the RTC_NO_UNIQUE_ADDRESS attribute to save space. + RTC_NO_UNIQUE_ADDRESS key_compare comp_; + // Declare after |key_compare_comp_| to workaround GCC ICE. For details + // see https://crbug.com/1156268 + container_type body_; + + // If the compare is not transparent we want to construct key_type once. + template + using KeyTypeOrK = typename std:: + conditional::value, K, key_type>::type; +}; + +// ---------------------------------------------------------------------------- +// Lifetime. + +template +flat_tree::flat_tree( + const KeyCompare& comp) + : comp_(comp) {} + +template +template +flat_tree::flat_tree( + InputIterator first, + InputIterator last, + const KeyCompare& comp) + : comp_(comp), body_(first, last) { + sort_and_unique(); +} + +template +flat_tree::flat_tree( + const container_type& items, + const KeyCompare& comp) + : comp_(comp), body_(items) { + sort_and_unique(); +} + +template +flat_tree::flat_tree( + container_type&& items, + const KeyCompare& comp) + : comp_(comp), body_(std::move(items)) { + sort_and_unique(); +} + +template +flat_tree::flat_tree( + std::initializer_list ilist, + const KeyCompare& comp) + : flat_tree(std::begin(ilist), std::end(ilist), comp) {} + +template +template +flat_tree::flat_tree( + sorted_unique_t, + InputIterator first, + InputIterator last, + const KeyCompare& comp) + : comp_(comp), body_(first, last) { + RTC_DCHECK(is_sorted_and_unique(*this, value_comp())); +} + +template +flat_tree::flat_tree( + sorted_unique_t, + const container_type& items, + const KeyCompare& comp) + : comp_(comp), body_(items) { + RTC_DCHECK(is_sorted_and_unique(*this, value_comp())); +} + +template +constexpr flat_tree::flat_tree( + sorted_unique_t, + container_type&& items, + const KeyCompare& comp) + : comp_(comp), body_(std::move(items)) { + RTC_DCHECK(is_sorted_and_unique(*this, value_comp())); +} + +template +flat_tree::flat_tree( + sorted_unique_t, + std::initializer_list ilist, + const KeyCompare& comp) + : flat_tree(sorted_unique, std::begin(ilist), std::end(ilist), comp) {} + +// ---------------------------------------------------------------------------- +// Assignments. + +template +auto flat_tree::operator=( + std::initializer_list ilist) -> flat_tree& { + body_ = ilist; + sort_and_unique(); + return *this; +} + +// ---------------------------------------------------------------------------- +// Memory management. + +template +void flat_tree::reserve( + size_type new_capacity) { + body_.reserve(new_capacity); +} + +template +auto flat_tree::capacity() const + -> size_type { + return body_.capacity(); +} + +template +void flat_tree::shrink_to_fit() { + body_.shrink_to_fit(); +} + +// ---------------------------------------------------------------------------- +// Size management. + +template +void flat_tree::clear() { + body_.clear(); +} + +template +constexpr auto flat_tree::size() + const -> size_type { + return body_.size(); +} + +template +constexpr auto +flat_tree::max_size() const + -> size_type { + return body_.max_size(); +} + +template +constexpr bool flat_tree::empty() + const { + return body_.empty(); +} + +// ---------------------------------------------------------------------------- +// Iterators. + +template +auto flat_tree::begin() + -> iterator { + return body_.begin(); +} + +template +constexpr auto flat_tree::begin() + const -> const_iterator { + return std::begin(body_); +} + +template +auto flat_tree::cbegin() const + -> const_iterator { + return body_.cbegin(); +} + +template +auto flat_tree::end() -> iterator { + return body_.end(); +} + +template +constexpr auto flat_tree::end() + const -> const_iterator { + return std::end(body_); +} + +template +auto flat_tree::cend() const + -> const_iterator { + return body_.cend(); +} + +template +auto flat_tree::rbegin() + -> reverse_iterator { + return body_.rbegin(); +} + +template +auto flat_tree::rbegin() const + -> const_reverse_iterator { + return body_.rbegin(); +} + +template +auto flat_tree::crbegin() const + -> const_reverse_iterator { + return body_.crbegin(); +} + +template +auto flat_tree::rend() + -> reverse_iterator { + return body_.rend(); +} + +template +auto flat_tree::rend() const + -> const_reverse_iterator { + return body_.rend(); +} + +template +auto flat_tree::crend() const + -> const_reverse_iterator { + return body_.crend(); +} + +// ---------------------------------------------------------------------------- +// Insert operations. +// +// Currently we use position_hint the same way as eastl or boost: +// https://github.com/electronicarts/EASTL/blob/master/include/EASTL/vector_set.h#L493 + +template +auto flat_tree::insert( + const value_type& val) -> std::pair { + return emplace_key_args(GetKeyFromValue()(val), val); +} + +template +auto flat_tree::insert( + value_type&& val) -> std::pair { + return emplace_key_args(GetKeyFromValue()(val), std::move(val)); +} + +template +auto flat_tree::insert( + const_iterator position_hint, + const value_type& val) -> iterator { + return emplace_hint_key_args(position_hint, GetKeyFromValue()(val), val) + .first; +} + +template +auto flat_tree::insert( + const_iterator position_hint, + value_type&& val) -> iterator { + return emplace_hint_key_args(position_hint, GetKeyFromValue()(val), + std::move(val)) + .first; +} + +template +template +void flat_tree::insert( + InputIterator first, + InputIterator last) { + if (first == last) + return; + + // Dispatch to single element insert if the input range contains a single + // element. + if (is_multipass() && std::next(first) == last) { + insert(end(), *first); + return; + } + + // Provide a convenience lambda to obtain an iterator pointing past the last + // old element. This needs to be dymanic due to possible re-allocations. + auto middle = [this, size = size()] { return std::next(begin(), size); }; + + // For batch updates initialize the first insertion point. + difference_type pos_first_new = size(); + + // Loop over the input range while appending new values and overwriting + // existing ones, if applicable. Keep track of the first insertion point. + for (; first != last; ++first) { + std::pair result = append_unique(begin(), middle(), *first); + if (result.second) { + pos_first_new = + std::min(pos_first_new, std::distance(begin(), result.first)); + } + } + + // The new elements might be unordered and contain duplicates, so post-process + // the just inserted elements and merge them with the rest, inserting them at + // the previously found spot. + sort_and_unique(middle(), end()); + std::inplace_merge(std::next(begin(), pos_first_new), middle(), end(), + value_comp()); +} + +template +template +auto flat_tree::emplace( + Args&&... args) -> std::pair { + return insert(value_type(std::forward(args)...)); +} + +template +template +auto flat_tree::emplace_hint( + const_iterator position_hint, + Args&&... args) -> iterator { + return insert(position_hint, value_type(std::forward(args)...)); +} + +// ---------------------------------------------------------------------------- +// Underlying type operations. + +template +auto flat_tree:: + extract() && -> container_type { + return std::exchange(body_, container_type()); +} + +template +void flat_tree::replace( + container_type&& body) { + // Ensure that `body` is sorted and has no repeated elements according to + // `value_comp()`. + RTC_DCHECK(is_sorted_and_unique(body, value_comp())); + body_ = std::move(body); +} + +// ---------------------------------------------------------------------------- +// Erase operations. + +template +auto flat_tree::erase( + iterator position) -> iterator { + RTC_CHECK(position != body_.end()); + return body_.erase(position); +} + +template +template +auto flat_tree::erase( + const_iterator position) -> iterator { + RTC_CHECK(position != body_.end()); + return body_.erase(position); +} + +template +template +auto flat_tree::erase(const K& val) + -> size_type { + auto eq_range = equal_range(val); + auto res = std::distance(eq_range.first, eq_range.second); + erase(eq_range.first, eq_range.second); + return res; +} + +template +auto flat_tree::erase( + const_iterator first, + const_iterator last) -> iterator { + return body_.erase(first, last); +} + +// ---------------------------------------------------------------------------- +// Comparators. + +template +constexpr auto +flat_tree::key_comp() const + -> key_compare { + return comp_; +} + +template +constexpr auto +flat_tree::value_comp() const + -> value_compare { + return value_compare{comp_}; +} + +// ---------------------------------------------------------------------------- +// Search operations. + +template +template +auto flat_tree::count( + const K& key) const -> size_type { + auto eq_range = equal_range(key); + return std::distance(eq_range.first, eq_range.second); +} + +template +template +auto flat_tree::find(const K& key) + -> iterator { + return const_cast_it(webrtc::as_const(*this).find(key)); +} + +template +template +auto flat_tree::find( + const K& key) const -> const_iterator { + auto eq_range = equal_range(key); + return (eq_range.first == eq_range.second) ? end() : eq_range.first; +} + +template +template +bool flat_tree::contains( + const K& key) const { + auto lower = lower_bound(key); + return lower != end() && !comp_(key, GetKeyFromValue()(*lower)); +} + +template +template +auto flat_tree::equal_range( + const K& key) -> std::pair { + auto res = webrtc::as_const(*this).equal_range(key); + return {const_cast_it(res.first), const_cast_it(res.second)}; +} + +template +template +auto flat_tree::equal_range( + const K& key) const -> std::pair { + auto lower = lower_bound(key); + + KeyValueCompare comp(comp_); + if (lower == end() || comp(key, *lower)) + return {lower, lower}; + + return {lower, std::next(lower)}; +} + +template +template +auto flat_tree::lower_bound( + const K& key) -> iterator { + return const_cast_it(webrtc::as_const(*this).lower_bound(key)); +} + +template +template +auto flat_tree::lower_bound( + const K& key) const -> const_iterator { + static_assert(std::is_convertible&, const K&>::value, + "Requested type cannot be bound to the container's key_type " + "which is required for a non-transparent compare."); + + const KeyTypeOrK& key_ref = key; + + KeyValueCompare comp(comp_); + return absl::c_lower_bound(*this, key_ref, comp); +} + +template +template +auto flat_tree::upper_bound( + const K& key) -> iterator { + return const_cast_it(webrtc::as_const(*this).upper_bound(key)); +} + +template +template +auto flat_tree::upper_bound( + const K& key) const -> const_iterator { + static_assert(std::is_convertible&, const K&>::value, + "Requested type cannot be bound to the container's key_type " + "which is required for a non-transparent compare."); + + const KeyTypeOrK& key_ref = key; + + KeyValueCompare comp(comp_); + return absl::c_upper_bound(*this, key_ref, comp); +} + +// ---------------------------------------------------------------------------- +// General operations. + +template +void flat_tree::swap( + flat_tree& other) noexcept { + std::swap(*this, other); +} + +template +template +auto flat_tree::unsafe_emplace( + const_iterator position, + Args&&... args) -> iterator { + return body_.emplace(position, std::forward(args)...); +} + +template +template +auto flat_tree::emplace_key_args( + const K& key, + Args&&... args) -> std::pair { + auto lower = lower_bound(key); + if (lower == end() || comp_(key, GetKeyFromValue()(*lower))) + return {unsafe_emplace(lower, std::forward(args)...), true}; + return {lower, false}; +} + +template +template +auto flat_tree:: + emplace_hint_key_args(const_iterator hint, const K& key, Args&&... args) + -> std::pair { + KeyValueCompare comp(comp_); + if ((hint == begin() || comp(*std::prev(hint), key))) { + if (hint == end() || comp(key, *hint)) { + // *(hint - 1) < key < *hint => key did not exist and hint is correct. + return {unsafe_emplace(hint, std::forward(args)...), true}; + } + if (!comp(*hint, key)) { + // key == *hint => no-op, return correct hint. + return {const_cast_it(hint), false}; + } + } + // hint was not helpful, dispatch to hintless version. + return emplace_key_args(key, std::forward(args)...); +} + +// ---------------------------------------------------------------------------- +// Free functions. + +// Erases all elements that match predicate. It has O(size) complexity. +template +size_t EraseIf( + webrtc::flat_containers_internal:: + flat_tree& container, + Predicate pred) { + auto it = std::remove_if(container.begin(), container.end(), + std::forward(pred)); + size_t removed = std::distance(it, container.end()); + container.erase(it, container.end()); + return removed; +} + +} // namespace flat_containers_internal +} // namespace webrtc + +#endif // RTC_BASE_CONTAINERS_FLAT_TREE_H_ diff --git a/rtc_base/containers/flat_tree_unittest.cc b/rtc_base/containers/flat_tree_unittest.cc new file mode 100644 index 0000000000..9bb803d16d --- /dev/null +++ b/rtc_base/containers/flat_tree_unittest.cc @@ -0,0 +1,1484 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#include "rtc_base/containers/flat_tree.h" + +// Following tests are ported and extended tests from libcpp for std::set. +// They can be found here: +// https://github.com/llvm/llvm-project/tree/main/libcxx/test/std/containers/associative/set +// +// Not ported tests: +// * No tests with PrivateConstructor and std::less<> changed to std::less +// These tests have to do with C++14 std::less<> +// http://en.cppreference.com/w/cpp/utility/functional/less_void +// and add support for templated versions of lookup functions. +// Because we use same implementation, we figured that it's OK just to check +// compilation and this is what we do in flat_set_unittest/flat_map_unittest. +// * No tests for max_size() +// Has to do with allocator support. +// * No tests with DefaultOnly. +// Standard containers allocate each element in the separate node on the heap +// and then manipulate these nodes. Flat containers store their elements in +// contiguous memory and move them around, type is required to be movable. +// * No tests for N3644. +// This proposal suggests that all default constructed iterators compare +// equal. Currently we use std::vector iterators and they don't implement +// this. +// * No tests with min_allocator and no tests counting allocations. +// Flat sets currently don't support allocators. + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rtc_base/containers/identity.h" +#include "rtc_base/containers/move_only_int.h" +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { +namespace flat_containers_internal { +namespace { + +template +class InputIterator { + public: + using iterator_category = std::input_iterator_tag; + using value_type = typename std::iterator_traits::value_type; + using difference_type = typename std::iterator_traits::difference_type; + using pointer = It; + using reference = typename std::iterator_traits::reference; + + InputIterator() : it_() {} + explicit InputIterator(It it) : it_(it) {} + + reference operator*() const { return *it_; } + pointer operator->() const { return it_; } + + InputIterator& operator++() { + ++it_; + return *this; + } + InputIterator operator++(int) { + InputIterator tmp(*this); + ++(*this); + return tmp; + } + + friend bool operator==(const InputIterator& lhs, const InputIterator& rhs) { + return lhs.it_ == rhs.it_; + } + friend bool operator!=(const InputIterator& lhs, const InputIterator& rhs) { + return !(lhs == rhs); + } + + private: + It it_; +}; + +template +InputIterator MakeInputIterator(It it) { + return InputIterator(it); +} + +class Emplaceable { + public: + Emplaceable() : Emplaceable(0, 0.0) {} + Emplaceable(int i, double d) : int_(i), double_(d) {} + Emplaceable(Emplaceable&& other) : int_(other.int_), double_(other.double_) { + other.int_ = 0; + other.double_ = 0.0; + } + Emplaceable(const Emplaceable&) = delete; + Emplaceable& operator=(const Emplaceable&) = delete; + + Emplaceable& operator=(Emplaceable&& other) { + int_ = other.int_; + other.int_ = 0; + double_ = other.double_; + other.double_ = 0.0; + return *this; + } + + friend bool operator==(const Emplaceable& lhs, const Emplaceable& rhs) { + return std::tie(lhs.int_, lhs.double_) == std::tie(rhs.int_, rhs.double_); + } + + friend bool operator<(const Emplaceable& lhs, const Emplaceable& rhs) { + return std::tie(lhs.int_, lhs.double_) < std::tie(rhs.int_, rhs.double_); + } + + private: + int int_; + double double_; +}; + +struct TemplateConstructor { + template + explicit TemplateConstructor(const T&) {} + + friend bool operator<(const TemplateConstructor&, + const TemplateConstructor&) { + return false; + } +}; + +class NonDefaultConstructibleCompare { + public: + explicit NonDefaultConstructibleCompare(int) {} + + template + bool operator()(const T& lhs, const T& rhs) const { + return std::less()(lhs, rhs); + } +}; + +template +struct LessByFirst { + bool operator()(const PairType& lhs, const PairType& rhs) const { + return lhs.first < rhs.first; + } +}; + +// Common test trees. +template +using TypedTree = flat_tree, + ContainerT>; +using IntTree = TypedTree>; +using IntPair = std::pair; +using IntPairTree = + flat_tree, std::vector>; +using MoveOnlyTree = + flat_tree, std::vector>; +using EmplaceableTree = + flat_tree, std::vector>; +using ReversedTree = + flat_tree, std::vector>; + +using TreeWithStrangeCompare = + flat_tree>; + +using ::testing::ElementsAre; +using ::testing::IsEmpty; + +template +class FlatTreeTest : public testing::Test {}; +TYPED_TEST_SUITE_P(FlatTreeTest); + +TEST(FlatTree, IsMultipass) { + static_assert(!is_multipass>(), + "InputIterator is not multipass"); + static_assert(!is_multipass>(), + "OutputIterator is not multipass"); + + static_assert(is_multipass::iterator>(), + "ForwardIterator is multipass"); + static_assert(is_multipass::iterator>(), + "BidirectionalIterator is multipass"); + static_assert(is_multipass::iterator>(), + "RandomAccessIterator is multipass"); +} + +// Tests that the compiler generated move operators propagrate noexcept +// specifiers. +TEST(FlatTree, NoExcept) { + struct MoveThrows { + MoveThrows(MoveThrows&&) noexcept(false) {} + MoveThrows& operator=(MoveThrows&&) noexcept(false) { return *this; } + }; + + using MoveThrowsTree = + flat_tree, std::array>; + + static_assert(std::is_nothrow_move_constructible::value, + "Error: IntTree is not nothrow move constructible"); + static_assert(std::is_nothrow_move_assignable::value, + "Error: IntTree is not nothrow move assignable"); + + static_assert(!std::is_nothrow_move_constructible::value, + "Error: MoveThrowsTree is nothrow move constructible"); + static_assert(!std::is_nothrow_move_assignable::value, + "Error: MoveThrowsTree is nothrow move assignable"); +} + +// ---------------------------------------------------------------------------- +// Class. + +// Check that flat_tree and its iterators can be instantiated with an +// incomplete type. + +TEST(FlatTree, IncompleteType) { + struct A { + using Tree = flat_tree, std::vector>; + int data; + Tree set_with_incomplete_type; + Tree::iterator it; + Tree::const_iterator cit; + + // We do not declare operator< because clang complains that it's unused. + }; + + A a; +} + +TEST(FlatTree, Stability) { + using Pair = std::pair; + + using Tree = flat_tree, std::vector>; + + // Constructors are stable. + Tree cont({{0, 0}, {1, 0}, {0, 1}, {2, 0}, {0, 2}, {1, 1}}); + + auto AllOfSecondsAreZero = [&cont] { + return absl::c_all_of(cont, + [](const Pair& elem) { return elem.second == 0; }); + }; + + EXPECT_TRUE(AllOfSecondsAreZero()) << "constructor should be stable"; + + // Should not replace existing. + cont.insert(Pair(0, 2)); + cont.insert(Pair(1, 2)); + cont.insert(Pair(2, 2)); + + EXPECT_TRUE(AllOfSecondsAreZero()) << "insert should be stable"; + + cont.insert(Pair(3, 0)); + cont.insert(Pair(3, 2)); + + EXPECT_TRUE(AllOfSecondsAreZero()) << "insert should be stable"; +} + +// ---------------------------------------------------------------------------- +// Types. + +// key_type +// key_compare +// value_type +// value_compare +// pointer +// const_pointer +// reference +// const_reference +// size_type +// difference_type +// iterator +// const_iterator +// reverse_iterator +// const_reverse_iterator + +TEST(FlatTree, Types) { + // These are guaranteed to be portable. + static_assert((std::is_same::value), ""); + static_assert((std::is_same::value), ""); + static_assert((std::is_same, IntTree::key_compare>::value), ""); + static_assert((std::is_same::value), ""); + static_assert((std::is_same::value), + ""); + static_assert((std::is_same::value), ""); + static_assert((std::is_same::value), ""); +} + +// ---------------------------------------------------------------------------- +// Lifetime. + +// flat_tree() +// flat_tree(const Compare& comp) + +TYPED_TEST_P(FlatTreeTest, DefaultConstructor) { + { + TypedTree cont; + EXPECT_THAT(cont, ElementsAre()); + } + + { + TreeWithStrangeCompare cont(NonDefaultConstructibleCompare(0)); + EXPECT_THAT(cont, ElementsAre()); + } +} + +// flat_tree(const flat_tree& x) + +TYPED_TEST_P(FlatTreeTest, CopyConstructor) { + TypedTree original({1, 2, 3, 4}); + TypedTree copied(original); + + EXPECT_THAT(copied, ElementsAre(1, 2, 3, 4)); + + EXPECT_THAT(copied, ElementsAre(1, 2, 3, 4)); + EXPECT_THAT(original, ElementsAre(1, 2, 3, 4)); + EXPECT_EQ(original, copied); +} + +// flat_tree(flat_tree&& x) + +TEST(FlatTree, MoveConstructor) { + int input_range[] = {1, 2, 3, 4}; + + MoveOnlyTree original(std::begin(input_range), std::end(input_range)); + MoveOnlyTree moved(std::move(original)); + + EXPECT_EQ(1U, moved.count(MoveOnlyInt(1))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(2))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(3))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(4))); +} + +// flat_tree(InputIterator first, +// InputIterator last, +// const Compare& comp = Compare()) + +TEST(FlatTree, RangeConstructor) { + { + IntPair input_vals[] = {{1, 1}, {1, 2}, {2, 1}, {2, 2}, {1, 3}, + {2, 3}, {3, 1}, {3, 2}, {3, 3}}; + + IntPairTree first_of(MakeInputIterator(std::begin(input_vals)), + MakeInputIterator(std::end(input_vals))); + EXPECT_THAT(first_of, + ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1))); + } + { + TreeWithStrangeCompare::value_type input_vals[] = {1, 1, 1, 2, 2, + 2, 3, 3, 3}; + + TreeWithStrangeCompare cont(MakeInputIterator(std::begin(input_vals)), + MakeInputIterator(std::end(input_vals)), + NonDefaultConstructibleCompare(0)); + EXPECT_THAT(cont, ElementsAre(1, 2, 3)); + } +} + +// flat_tree(const container_type&) + +TYPED_TEST_P(FlatTreeTest, ContainerCopyConstructor) { + TypeParam items = {1, 2, 3, 4}; + TypedTree tree(items); + + EXPECT_THAT(tree, ElementsAre(1, 2, 3, 4)); + EXPECT_THAT(items, ElementsAre(1, 2, 3, 4)); +} + +// flat_tree(container_type&&) + +TEST(FlatTree, ContainerMoveConstructor) { + using Pair = std::pair; + + // Construct an unsorted vector with a duplicate item in it. Sorted by the + // first item, the second allows us to test for stability. Using a move + // only type to ensure the vector is not copied. + std::vector storage; + storage.push_back(Pair(2, MoveOnlyInt(0))); + storage.push_back(Pair(1, MoveOnlyInt(0))); + storage.push_back(Pair(2, MoveOnlyInt(1))); + + using Tree = flat_tree, std::vector>; + Tree tree(std::move(storage)); + + // The list should be two items long, with only the first "2" saved. + ASSERT_EQ(2u, tree.size()); + const Pair& zeroth = *tree.begin(); + ASSERT_EQ(1, zeroth.first); + ASSERT_EQ(0, zeroth.second.data()); + + const Pair& first = *(tree.begin() + 1); + ASSERT_EQ(2, first.first); + ASSERT_EQ(0, first.second.data()); +} + +// flat_tree(std::initializer_list ilist, +// const Compare& comp = Compare()) + +TYPED_TEST_P(FlatTreeTest, InitializerListConstructor) { + { + TypedTree cont({1, 2, 3, 4, 5, 6, 10, 8}); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10)); + } + { + TypedTree cont({1, 2, 3, 4, 5, 6, 10, 8}); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10)); + } + { + TreeWithStrangeCompare cont({1, 2, 3, 4, 5, 6, 10, 8}, + NonDefaultConstructibleCompare(0)); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10)); + } + { + IntPairTree first_of({{1, 1}, {2, 1}, {1, 2}}); + EXPECT_THAT(first_of, ElementsAre(IntPair(1, 1), IntPair(2, 1))); + } +} + +// flat_tree(sorted_unique_t, +// InputIterator first, +// InputIterator last, +// const Compare& comp = Compare()) + +TEST(FlatTree, SortedUniqueRangeConstructor) { + { + IntPair input_vals[] = {{1, 1}, {2, 1}, {3, 1}}; + + IntPairTree first_of(sorted_unique, + MakeInputIterator(std::begin(input_vals)), + MakeInputIterator(std::end(input_vals))); + EXPECT_THAT(first_of, + ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1))); + } + { + TreeWithStrangeCompare::value_type input_vals[] = {1, 2, 3}; + + TreeWithStrangeCompare cont(sorted_unique, + MakeInputIterator(std::begin(input_vals)), + MakeInputIterator(std::end(input_vals)), + NonDefaultConstructibleCompare(0)); + EXPECT_THAT(cont, ElementsAre(1, 2, 3)); + } +} + +// flat_tree(sorted_unique_t, const container_type&) + +TYPED_TEST_P(FlatTreeTest, SortedUniqueContainerCopyConstructor) { + TypeParam items = {1, 2, 3, 4}; + TypedTree tree(sorted_unique, items); + + EXPECT_THAT(tree, ElementsAre(1, 2, 3, 4)); + EXPECT_THAT(items, ElementsAre(1, 2, 3, 4)); +} + +// flat_tree(sorted_unique_t, std::vector&&) + +TEST(FlatTree, SortedUniqueVectorMoveConstructor) { + using Pair = std::pair; + + std::vector storage; + storage.push_back(Pair(1, MoveOnlyInt(0))); + storage.push_back(Pair(2, MoveOnlyInt(0))); + + using Tree = flat_tree, std::vector>; + Tree tree(sorted_unique, std::move(storage)); + + ASSERT_EQ(2u, tree.size()); + const Pair& zeroth = *tree.begin(); + ASSERT_EQ(1, zeroth.first); + ASSERT_EQ(0, zeroth.second.data()); + + const Pair& first = *(tree.begin() + 1); + ASSERT_EQ(2, first.first); + ASSERT_EQ(0, first.second.data()); +} + +// flat_tree(sorted_unique_t, +// std::initializer_list ilist, +// const Compare& comp = Compare()) + +TYPED_TEST_P(FlatTreeTest, SortedUniqueInitializerListConstructor) { + { + TypedTree cont(sorted_unique, {1, 2, 3, 4, 5, 6, 8, 10}); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10)); + } + { + TypedTree cont(sorted_unique, {1, 2, 3, 4, 5, 6, 8, 10}); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10)); + } + { + TreeWithStrangeCompare cont(sorted_unique, {1, 2, 3, 4, 5, 6, 8, 10}, + NonDefaultConstructibleCompare(0)); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10)); + } + { + IntPairTree first_of(sorted_unique, {{1, 1}, {2, 1}}); + EXPECT_THAT(first_of, ElementsAre(IntPair(1, 1), IntPair(2, 1))); + } +} + +// ---------------------------------------------------------------------------- +// Assignments. + +// flat_tree& operator=(const flat_tree&) + +TYPED_TEST_P(FlatTreeTest, CopyAssignable) { + TypedTree original({1, 2, 3, 4}); + TypedTree copied; + copied = original; + + EXPECT_THAT(copied, ElementsAre(1, 2, 3, 4)); + EXPECT_THAT(original, ElementsAre(1, 2, 3, 4)); + EXPECT_EQ(original, copied); +} + +// flat_tree& operator=(flat_tree&&) + +TEST(FlatTree, MoveAssignable) { + int input_range[] = {1, 2, 3, 4}; + + MoveOnlyTree original(std::begin(input_range), std::end(input_range)); + MoveOnlyTree moved; + moved = std::move(original); + + EXPECT_EQ(1U, moved.count(MoveOnlyInt(1))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(2))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(3))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(4))); +} + +// flat_tree& operator=(std::initializer_list ilist) + +TYPED_TEST_P(FlatTreeTest, InitializerListAssignable) { + TypedTree cont({0}); + cont = {1, 2, 3, 4, 5, 6, 10, 8}; + + EXPECT_EQ(0U, cont.count(0)); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10)); +} + +// -------------------------------------------------------------------------- +// Memory management. + +// void reserve(size_type new_capacity) + +TEST(FlatTreeTest, Reserve) { + IntTree cont({1, 2, 3}); + + cont.reserve(5); + EXPECT_LE(5U, cont.capacity()); +} + +// size_type capacity() const + +TEST(FlatTreeTest, Capacity) { + IntTree cont({1, 2, 3}); + + EXPECT_LE(cont.size(), cont.capacity()); + cont.reserve(5); + EXPECT_LE(cont.size(), cont.capacity()); +} + +// void shrink_to_fit() + +TEST(FlatTreeTest, ShrinkToFit) { + IntTree cont({1, 2, 3}); + + IntTree::size_type capacity_before = cont.capacity(); + cont.shrink_to_fit(); + EXPECT_GE(capacity_before, cont.capacity()); +} + +// ---------------------------------------------------------------------------- +// Size management. + +// void clear() + +TYPED_TEST_P(FlatTreeTest, Clear) { + TypedTree cont({1, 2, 3, 4, 5, 6, 7, 8}); + cont.clear(); + EXPECT_THAT(cont, ElementsAre()); +} + +// size_type size() const + +TYPED_TEST_P(FlatTreeTest, Size) { + TypedTree cont; + + EXPECT_EQ(0U, cont.size()); + cont.insert(2); + EXPECT_EQ(1U, cont.size()); + cont.insert(1); + EXPECT_EQ(2U, cont.size()); + cont.insert(3); + EXPECT_EQ(3U, cont.size()); + cont.erase(cont.begin()); + EXPECT_EQ(2U, cont.size()); + cont.erase(cont.begin()); + EXPECT_EQ(1U, cont.size()); + cont.erase(cont.begin()); + EXPECT_EQ(0U, cont.size()); +} + +// bool empty() const + +TYPED_TEST_P(FlatTreeTest, Empty) { + TypedTree cont; + + EXPECT_TRUE(cont.empty()); + cont.insert(1); + EXPECT_FALSE(cont.empty()); + cont.clear(); + EXPECT_TRUE(cont.empty()); +} + +// ---------------------------------------------------------------------------- +// Iterators. + +// iterator begin() +// const_iterator begin() const +// iterator end() +// const_iterator end() const +// +// reverse_iterator rbegin() +// const_reverse_iterator rbegin() const +// reverse_iterator rend() +// const_reverse_iterator rend() const +// +// const_iterator cbegin() const +// const_iterator cend() const +// const_reverse_iterator crbegin() const +// const_reverse_iterator crend() const + +TYPED_TEST_P(FlatTreeTest, Iterators) { + TypedTree cont({1, 2, 3, 4, 5, 6, 7, 8}); + + auto size = + static_cast::difference_type>(cont.size()); + + EXPECT_EQ(size, std::distance(cont.begin(), cont.end())); + EXPECT_EQ(size, std::distance(cont.cbegin(), cont.cend())); + EXPECT_EQ(size, std::distance(cont.rbegin(), cont.rend())); + EXPECT_EQ(size, std::distance(cont.crbegin(), cont.crend())); + + { + auto it = cont.begin(); + auto c_it = cont.cbegin(); + EXPECT_EQ(it, c_it); + for (int j = 1; it != cont.end(); ++it, ++c_it, ++j) { + EXPECT_EQ(j, *it); + EXPECT_EQ(j, *c_it); + } + } + { + auto rit = cont.rbegin(); + auto c_rit = cont.crbegin(); + EXPECT_EQ(rit, c_rit); + for (int j = static_cast(size); rit != cont.rend(); + ++rit, ++c_rit, --j) { + EXPECT_EQ(j, *rit); + EXPECT_EQ(j, *c_rit); + } + } +} + +// ---------------------------------------------------------------------------- +// Insert operations. + +// pair insert(const value_type& val) + +TYPED_TEST_P(FlatTreeTest, InsertLValue) { + TypedTree cont; + + int value = 2; + std::pair::iterator, bool> result = + cont.insert(value); + EXPECT_TRUE(result.second); + EXPECT_EQ(cont.begin(), result.first); + EXPECT_EQ(1U, cont.size()); + EXPECT_EQ(2, *result.first); + + value = 1; + result = cont.insert(value); + EXPECT_TRUE(result.second); + EXPECT_EQ(cont.begin(), result.first); + EXPECT_EQ(2U, cont.size()); + EXPECT_EQ(1, *result.first); + + value = 3; + result = cont.insert(value); + EXPECT_TRUE(result.second); + EXPECT_EQ(std::prev(cont.end()), result.first); + EXPECT_EQ(3U, cont.size()); + EXPECT_EQ(3, *result.first); + + value = 3; + result = cont.insert(value); + EXPECT_FALSE(result.second); + EXPECT_EQ(std::prev(cont.end()), result.first); + EXPECT_EQ(3U, cont.size()); + EXPECT_EQ(3, *result.first); +} + +// pair insert(value_type&& val) + +TEST(FlatTree, InsertRValue) { + MoveOnlyTree cont; + + std::pair result = cont.insert(MoveOnlyInt(2)); + EXPECT_TRUE(result.second); + EXPECT_EQ(cont.begin(), result.first); + EXPECT_EQ(1U, cont.size()); + EXPECT_EQ(2, result.first->data()); + + result = cont.insert(MoveOnlyInt(1)); + EXPECT_TRUE(result.second); + EXPECT_EQ(cont.begin(), result.first); + EXPECT_EQ(2U, cont.size()); + EXPECT_EQ(1, result.first->data()); + + result = cont.insert(MoveOnlyInt(3)); + EXPECT_TRUE(result.second); + EXPECT_EQ(std::prev(cont.end()), result.first); + EXPECT_EQ(3U, cont.size()); + EXPECT_EQ(3, result.first->data()); + + result = cont.insert(MoveOnlyInt(3)); + EXPECT_FALSE(result.second); + EXPECT_EQ(std::prev(cont.end()), result.first); + EXPECT_EQ(3U, cont.size()); + EXPECT_EQ(3, result.first->data()); +} + +// iterator insert(const_iterator position_hint, const value_type& val) + +TYPED_TEST_P(FlatTreeTest, InsertPositionLValue) { + TypedTree cont; + + auto result = cont.insert(cont.cend(), 2); + EXPECT_EQ(cont.begin(), result); + EXPECT_EQ(1U, cont.size()); + EXPECT_EQ(2, *result); + + result = cont.insert(cont.cend(), 1); + EXPECT_EQ(cont.begin(), result); + EXPECT_EQ(2U, cont.size()); + EXPECT_EQ(1, *result); + + result = cont.insert(cont.cend(), 3); + EXPECT_EQ(std::prev(cont.end()), result); + EXPECT_EQ(3U, cont.size()); + EXPECT_EQ(3, *result); + + result = cont.insert(cont.cend(), 3); + EXPECT_EQ(std::prev(cont.end()), result); + EXPECT_EQ(3U, cont.size()); + EXPECT_EQ(3, *result); +} + +// iterator insert(const_iterator position_hint, value_type&& val) + +TEST(FlatTree, InsertPositionRValue) { + MoveOnlyTree cont; + + auto result = cont.insert(cont.cend(), MoveOnlyInt(2)); + EXPECT_EQ(cont.begin(), result); + EXPECT_EQ(1U, cont.size()); + EXPECT_EQ(2, result->data()); + + result = cont.insert(cont.cend(), MoveOnlyInt(1)); + EXPECT_EQ(cont.begin(), result); + EXPECT_EQ(2U, cont.size()); + EXPECT_EQ(1, result->data()); + + result = cont.insert(cont.cend(), MoveOnlyInt(3)); + EXPECT_EQ(std::prev(cont.end()), result); + EXPECT_EQ(3U, cont.size()); + EXPECT_EQ(3, result->data()); + + result = cont.insert(cont.cend(), MoveOnlyInt(3)); + EXPECT_EQ(std::prev(cont.end()), result); + EXPECT_EQ(3U, cont.size()); + EXPECT_EQ(3, result->data()); +} + +// template +// void insert(InputIterator first, InputIterator last); + +TEST(FlatTree, InsertIterIter) { + struct GetKeyFromIntIntPair { + const int& operator()(const std::pair& p) const { + return p.first; + } + }; + + using IntIntMap = flat_tree, + std::vector>; + + { + IntIntMap cont; + IntPair int_pairs[] = {{3, 1}, {1, 1}, {4, 1}, {2, 1}}; + cont.insert(std::begin(int_pairs), std::end(int_pairs)); + EXPECT_THAT(cont, ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1), + IntPair(4, 1))); + } + + { + IntIntMap cont({{1, 1}, {2, 1}, {3, 1}, {4, 1}}); + std::vector int_pairs; + cont.insert(std::begin(int_pairs), std::end(int_pairs)); + EXPECT_THAT(cont, ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1), + IntPair(4, 1))); + } + + { + IntIntMap cont({{1, 1}, {2, 1}, {3, 1}, {4, 1}}); + IntPair int_pairs[] = {{1, 1}}; + cont.insert(std::begin(int_pairs), std::end(int_pairs)); + EXPECT_THAT(cont, ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1), + IntPair(4, 1))); + } + + { + IntIntMap cont({{1, 1}, {2, 1}, {3, 1}, {4, 1}}); + IntPair int_pairs[] = {{5, 1}}; + cont.insert(std::begin(int_pairs), std::end(int_pairs)); + EXPECT_THAT(cont, ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1), + IntPair(4, 1), IntPair(5, 1))); + } + + { + IntIntMap cont({{1, 1}, {2, 1}, {3, 1}, {4, 1}}); + IntPair int_pairs[] = {{3, 2}, {1, 2}, {4, 2}, {2, 2}}; + cont.insert(std::begin(int_pairs), std::end(int_pairs)); + EXPECT_THAT(cont, ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1), + IntPair(4, 1))); + } + + { + IntIntMap cont({{1, 1}, {2, 1}, {3, 1}, {4, 1}}); + IntPair int_pairs[] = {{3, 2}, {1, 2}, {4, 2}, {2, 2}, {7, 2}, {6, 2}, + {8, 2}, {5, 2}, {5, 3}, {6, 3}, {7, 3}, {8, 3}}; + cont.insert(std::begin(int_pairs), std::end(int_pairs)); + EXPECT_THAT(cont, ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1), + IntPair(4, 1), IntPair(5, 2), IntPair(6, 2), + IntPair(7, 2), IntPair(8, 2))); + } +} + +// template +// pair emplace(Args&&... args) + +TYPED_TEST_P(FlatTreeTest, Emplace) { + { + EmplaceableTree cont; + + std::pair result = cont.emplace(); + EXPECT_TRUE(result.second); + EXPECT_EQ(cont.begin(), result.first); + EXPECT_EQ(1U, cont.size()); + EXPECT_EQ(Emplaceable(), *cont.begin()); + + result = cont.emplace(2, 3.5); + EXPECT_TRUE(result.second); + EXPECT_EQ(std::next(cont.begin()), result.first); + EXPECT_EQ(2U, cont.size()); + EXPECT_EQ(Emplaceable(2, 3.5), *result.first); + + result = cont.emplace(2, 3.5); + EXPECT_FALSE(result.second); + EXPECT_EQ(std::next(cont.begin()), result.first); + EXPECT_EQ(2U, cont.size()); + EXPECT_EQ(Emplaceable(2, 3.5), *result.first); + } + { + TypedTree cont; + + std::pair::iterator, bool> result = + cont.emplace(2); + EXPECT_TRUE(result.second); + EXPECT_EQ(cont.begin(), result.first); + EXPECT_EQ(1U, cont.size()); + EXPECT_EQ(2, *result.first); + } +} + +// template +// iterator emplace_hint(const_iterator position_hint, Args&&... args) + +TYPED_TEST_P(FlatTreeTest, EmplacePosition) { + { + EmplaceableTree cont; + + auto result = cont.emplace_hint(cont.cend()); + EXPECT_EQ(cont.begin(), result); + EXPECT_EQ(1U, cont.size()); + EXPECT_EQ(Emplaceable(), *cont.begin()); + + result = cont.emplace_hint(cont.cend(), 2, 3.5); + EXPECT_EQ(std::next(cont.begin()), result); + EXPECT_EQ(2U, cont.size()); + EXPECT_EQ(Emplaceable(2, 3.5), *result); + + result = cont.emplace_hint(cont.cbegin(), 2, 3.5); + EXPECT_EQ(std::next(cont.begin()), result); + EXPECT_EQ(2U, cont.size()); + EXPECT_EQ(Emplaceable(2, 3.5), *result); + } + { + TypedTree cont; + + auto result = cont.emplace_hint(cont.cend(), 2); + EXPECT_EQ(cont.begin(), result); + EXPECT_EQ(1U, cont.size()); + EXPECT_EQ(2, *result); + } +} + +// ---------------------------------------------------------------------------- +// Underlying type operations. + +// underlying_type extract() && +TYPED_TEST_P(FlatTreeTest, Extract) { + TypedTree cont; + cont.emplace(3); + cont.emplace(1); + cont.emplace(2); + cont.emplace(4); + + TypeParam body = std::move(cont).extract(); + EXPECT_THAT(cont, IsEmpty()); + EXPECT_THAT(body, ElementsAre(1, 2, 3, 4)); +} + +// replace(underlying_type&&) +TYPED_TEST_P(FlatTreeTest, Replace) { + TypeParam body = {1, 2, 3, 4}; + TypedTree cont; + cont.replace(std::move(body)); + + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4)); +} + +// ---------------------------------------------------------------------------- +// Erase operations. + +// iterator erase(const_iterator position_hint) + +TYPED_TEST_P(FlatTreeTest, ErasePosition) { + { + TypedTree cont({1, 2, 3, 4, 5, 6, 7, 8}); + + auto it = cont.erase(std::next(cont.cbegin(), 3)); + EXPECT_EQ(std::next(cont.begin(), 3), it); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 5, 6, 7, 8)); + + it = cont.erase(std::next(cont.cbegin(), 0)); + EXPECT_EQ(cont.begin(), it); + EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7, 8)); + + it = cont.erase(std::next(cont.cbegin(), 5)); + EXPECT_EQ(cont.end(), it); + EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7)); + + it = cont.erase(std::next(cont.cbegin(), 1)); + EXPECT_EQ(std::next(cont.begin()), it); + EXPECT_THAT(cont, ElementsAre(2, 5, 6, 7)); + + it = cont.erase(std::next(cont.cbegin(), 2)); + EXPECT_EQ(std::next(cont.begin(), 2), it); + EXPECT_THAT(cont, ElementsAre(2, 5, 7)); + + it = cont.erase(std::next(cont.cbegin(), 2)); + EXPECT_EQ(std::next(cont.begin(), 2), it); + EXPECT_THAT(cont, ElementsAre(2, 5)); + + it = cont.erase(std::next(cont.cbegin(), 0)); + EXPECT_EQ(std::next(cont.begin(), 0), it); + EXPECT_THAT(cont, ElementsAre(5)); + + it = cont.erase(cont.cbegin()); + EXPECT_EQ(cont.begin(), it); + EXPECT_EQ(cont.end(), it); + } + // This is LWG #2059. + // There is a potential ambiguity between erase with an iterator and erase + // with a key, if key has a templated constructor. + { + using T = TemplateConstructor; + + flat_tree, std::vector> cont; + T v(0); + + auto it = cont.find(v); + if (it != cont.end()) + cont.erase(it); + } +} + +// iterator erase(const_iterator first, const_iterator last) + +TYPED_TEST_P(FlatTreeTest, EraseRange) { + TypedTree cont({1, 2, 3, 4, 5, 6, 7, 8}); + + auto it = + cont.erase(std::next(cont.cbegin(), 5), std::next(cont.cbegin(), 5)); + EXPECT_EQ(std::next(cont.begin(), 5), it); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 7, 8)); + + it = cont.erase(std::next(cont.cbegin(), 3), std::next(cont.cbegin(), 4)); + EXPECT_EQ(std::next(cont.begin(), 3), it); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 5, 6, 7, 8)); + + it = cont.erase(std::next(cont.cbegin(), 2), std::next(cont.cbegin(), 5)); + EXPECT_EQ(std::next(cont.begin(), 2), it); + EXPECT_THAT(cont, ElementsAre(1, 2, 7, 8)); + + it = cont.erase(std::next(cont.cbegin(), 0), std::next(cont.cbegin(), 2)); + EXPECT_EQ(std::next(cont.begin(), 0), it); + EXPECT_THAT(cont, ElementsAre(7, 8)); + + it = cont.erase(cont.cbegin(), cont.cend()); + EXPECT_EQ(cont.begin(), it); + EXPECT_EQ(cont.end(), it); +} + +// size_type erase(const key_type& key) + +TYPED_TEST_P(FlatTreeTest, EraseKey) { + TypedTree cont({1, 2, 3, 4, 5, 6, 7, 8}); + + EXPECT_EQ(0U, cont.erase(9)); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 7, 8)); + + EXPECT_EQ(1U, cont.erase(4)); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 5, 6, 7, 8)); + + EXPECT_EQ(1U, cont.erase(1)); + EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7, 8)); + + EXPECT_EQ(1U, cont.erase(8)); + EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7)); + + EXPECT_EQ(1U, cont.erase(3)); + EXPECT_THAT(cont, ElementsAre(2, 5, 6, 7)); + + EXPECT_EQ(1U, cont.erase(6)); + EXPECT_THAT(cont, ElementsAre(2, 5, 7)); + + EXPECT_EQ(1U, cont.erase(7)); + EXPECT_THAT(cont, ElementsAre(2, 5)); + + EXPECT_EQ(1U, cont.erase(2)); + EXPECT_THAT(cont, ElementsAre(5)); + + EXPECT_EQ(1U, cont.erase(5)); + EXPECT_THAT(cont, ElementsAre()); +} + +TYPED_TEST_P(FlatTreeTest, EraseEndDeath) { + { + TypedTree tree; + ASSERT_DEATH_IF_SUPPORTED(tree.erase(tree.cend()), ""); + } + + { + TypedTree tree = {1, 2, 3, 4}; + ASSERT_DEATH_IF_SUPPORTED(tree.erase(tree.find(5)), ""); + } +} + +// ---------------------------------------------------------------------------- +// Comparators. + +// key_compare key_comp() const + +TEST(FlatTree, KeyComp) { + ReversedTree cont({1, 2, 3, 4, 5}); + + EXPECT_TRUE(absl::c_is_sorted(cont, cont.key_comp())); + int new_elements[] = {6, 7, 8, 9, 10}; + std::copy(std::begin(new_elements), std::end(new_elements), + std::inserter(cont, cont.end())); + EXPECT_TRUE(absl::c_is_sorted(cont, cont.key_comp())); +} + +// value_compare value_comp() const + +TEST(FlatTree, ValueComp) { + ReversedTree cont({1, 2, 3, 4, 5}); + + EXPECT_TRUE(absl::c_is_sorted(cont, cont.value_comp())); + int new_elements[] = {6, 7, 8, 9, 10}; + std::copy(std::begin(new_elements), std::end(new_elements), + std::inserter(cont, cont.end())); + EXPECT_TRUE(absl::c_is_sorted(cont, cont.value_comp())); +} + +// ---------------------------------------------------------------------------- +// Search operations. + +// size_type count(const key_type& key) const + +TYPED_TEST_P(FlatTreeTest, Count) { + const TypedTree cont({5, 6, 7, 8, 9, 10, 11, 12}); + + EXPECT_EQ(1U, cont.count(5)); + EXPECT_EQ(1U, cont.count(6)); + EXPECT_EQ(1U, cont.count(7)); + EXPECT_EQ(1U, cont.count(8)); + EXPECT_EQ(1U, cont.count(9)); + EXPECT_EQ(1U, cont.count(10)); + EXPECT_EQ(1U, cont.count(11)); + EXPECT_EQ(1U, cont.count(12)); + EXPECT_EQ(0U, cont.count(4)); +} + +// iterator find(const key_type& key) +// const_iterator find(const key_type& key) const + +TYPED_TEST_P(FlatTreeTest, Find) { + { + TypedTree cont({5, 6, 7, 8, 9, 10, 11, 12}); + + EXPECT_EQ(cont.begin(), cont.find(5)); + EXPECT_EQ(std::next(cont.begin()), cont.find(6)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.find(7)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.find(8)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.find(9)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.find(10)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.find(11)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.find(12)); + EXPECT_EQ(std::next(cont.begin(), 8), cont.find(4)); + } + { + const TypedTree cont({5, 6, 7, 8, 9, 10, 11, 12}); + + EXPECT_EQ(cont.begin(), cont.find(5)); + EXPECT_EQ(std::next(cont.begin()), cont.find(6)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.find(7)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.find(8)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.find(9)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.find(10)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.find(11)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.find(12)); + EXPECT_EQ(std::next(cont.begin(), 8), cont.find(4)); + } +} + +// bool contains(const key_type& key) const + +TYPED_TEST_P(FlatTreeTest, Contains) { + const TypedTree cont({5, 6, 7, 8, 9, 10, 11, 12}); + + EXPECT_TRUE(cont.contains(5)); + EXPECT_TRUE(cont.contains(6)); + EXPECT_TRUE(cont.contains(7)); + EXPECT_TRUE(cont.contains(8)); + EXPECT_TRUE(cont.contains(9)); + EXPECT_TRUE(cont.contains(10)); + EXPECT_TRUE(cont.contains(11)); + EXPECT_TRUE(cont.contains(12)); + EXPECT_FALSE(cont.contains(4)); +} + +// pair equal_range(const key_type& key) +// pair equal_range(const key_type& key) const + +TYPED_TEST_P(FlatTreeTest, EqualRange) { + { + TypedTree cont({5, 7, 9, 11, 13, 15, 17, 19}); + + std::pair::iterator, + typename TypedTree::iterator> + result = cont.equal_range(5); + EXPECT_EQ(std::next(cont.begin(), 0), result.first); + EXPECT_EQ(std::next(cont.begin(), 1), result.second); + result = cont.equal_range(7); + EXPECT_EQ(std::next(cont.begin(), 1), result.first); + EXPECT_EQ(std::next(cont.begin(), 2), result.second); + result = cont.equal_range(9); + EXPECT_EQ(std::next(cont.begin(), 2), result.first); + EXPECT_EQ(std::next(cont.begin(), 3), result.second); + result = cont.equal_range(11); + EXPECT_EQ(std::next(cont.begin(), 3), result.first); + EXPECT_EQ(std::next(cont.begin(), 4), result.second); + result = cont.equal_range(13); + EXPECT_EQ(std::next(cont.begin(), 4), result.first); + EXPECT_EQ(std::next(cont.begin(), 5), result.second); + result = cont.equal_range(15); + EXPECT_EQ(std::next(cont.begin(), 5), result.first); + EXPECT_EQ(std::next(cont.begin(), 6), result.second); + result = cont.equal_range(17); + EXPECT_EQ(std::next(cont.begin(), 6), result.first); + EXPECT_EQ(std::next(cont.begin(), 7), result.second); + result = cont.equal_range(19); + EXPECT_EQ(std::next(cont.begin(), 7), result.first); + EXPECT_EQ(std::next(cont.begin(), 8), result.second); + result = cont.equal_range(4); + EXPECT_EQ(std::next(cont.begin(), 0), result.first); + EXPECT_EQ(std::next(cont.begin(), 0), result.second); + result = cont.equal_range(6); + EXPECT_EQ(std::next(cont.begin(), 1), result.first); + EXPECT_EQ(std::next(cont.begin(), 1), result.second); + result = cont.equal_range(8); + EXPECT_EQ(std::next(cont.begin(), 2), result.first); + EXPECT_EQ(std::next(cont.begin(), 2), result.second); + result = cont.equal_range(10); + EXPECT_EQ(std::next(cont.begin(), 3), result.first); + EXPECT_EQ(std::next(cont.begin(), 3), result.second); + result = cont.equal_range(12); + EXPECT_EQ(std::next(cont.begin(), 4), result.first); + EXPECT_EQ(std::next(cont.begin(), 4), result.second); + result = cont.equal_range(14); + EXPECT_EQ(std::next(cont.begin(), 5), result.first); + EXPECT_EQ(std::next(cont.begin(), 5), result.second); + result = cont.equal_range(16); + EXPECT_EQ(std::next(cont.begin(), 6), result.first); + EXPECT_EQ(std::next(cont.begin(), 6), result.second); + result = cont.equal_range(18); + EXPECT_EQ(std::next(cont.begin(), 7), result.first); + EXPECT_EQ(std::next(cont.begin(), 7), result.second); + result = cont.equal_range(20); + EXPECT_EQ(std::next(cont.begin(), 8), result.first); + EXPECT_EQ(std::next(cont.begin(), 8), result.second); + } + { + const TypedTree cont({5, 7, 9, 11, 13, 15, 17, 19}); + + std::pair::const_iterator, + typename TypedTree::const_iterator> + result = cont.equal_range(5); + EXPECT_EQ(std::next(cont.begin(), 0), result.first); + EXPECT_EQ(std::next(cont.begin(), 1), result.second); + result = cont.equal_range(7); + EXPECT_EQ(std::next(cont.begin(), 1), result.first); + EXPECT_EQ(std::next(cont.begin(), 2), result.second); + result = cont.equal_range(9); + EXPECT_EQ(std::next(cont.begin(), 2), result.first); + EXPECT_EQ(std::next(cont.begin(), 3), result.second); + result = cont.equal_range(11); + EXPECT_EQ(std::next(cont.begin(), 3), result.first); + EXPECT_EQ(std::next(cont.begin(), 4), result.second); + result = cont.equal_range(13); + EXPECT_EQ(std::next(cont.begin(), 4), result.first); + EXPECT_EQ(std::next(cont.begin(), 5), result.second); + result = cont.equal_range(15); + EXPECT_EQ(std::next(cont.begin(), 5), result.first); + EXPECT_EQ(std::next(cont.begin(), 6), result.second); + result = cont.equal_range(17); + EXPECT_EQ(std::next(cont.begin(), 6), result.first); + EXPECT_EQ(std::next(cont.begin(), 7), result.second); + result = cont.equal_range(19); + EXPECT_EQ(std::next(cont.begin(), 7), result.first); + EXPECT_EQ(std::next(cont.begin(), 8), result.second); + result = cont.equal_range(4); + EXPECT_EQ(std::next(cont.begin(), 0), result.first); + EXPECT_EQ(std::next(cont.begin(), 0), result.second); + result = cont.equal_range(6); + EXPECT_EQ(std::next(cont.begin(), 1), result.first); + EXPECT_EQ(std::next(cont.begin(), 1), result.second); + result = cont.equal_range(8); + EXPECT_EQ(std::next(cont.begin(), 2), result.first); + EXPECT_EQ(std::next(cont.begin(), 2), result.second); + result = cont.equal_range(10); + EXPECT_EQ(std::next(cont.begin(), 3), result.first); + EXPECT_EQ(std::next(cont.begin(), 3), result.second); + result = cont.equal_range(12); + EXPECT_EQ(std::next(cont.begin(), 4), result.first); + EXPECT_EQ(std::next(cont.begin(), 4), result.second); + result = cont.equal_range(14); + EXPECT_EQ(std::next(cont.begin(), 5), result.first); + EXPECT_EQ(std::next(cont.begin(), 5), result.second); + result = cont.equal_range(16); + EXPECT_EQ(std::next(cont.begin(), 6), result.first); + EXPECT_EQ(std::next(cont.begin(), 6), result.second); + result = cont.equal_range(18); + EXPECT_EQ(std::next(cont.begin(), 7), result.first); + EXPECT_EQ(std::next(cont.begin(), 7), result.second); + result = cont.equal_range(20); + EXPECT_EQ(std::next(cont.begin(), 8), result.first); + EXPECT_EQ(std::next(cont.begin(), 8), result.second); + } +} + +// iterator lower_bound(const key_type& key); +// const_iterator lower_bound(const key_type& key) const; + +TYPED_TEST_P(FlatTreeTest, LowerBound) { + { + TypedTree cont({5, 7, 9, 11, 13, 15, 17, 19}); + + EXPECT_EQ(cont.begin(), cont.lower_bound(5)); + EXPECT_EQ(std::next(cont.begin()), cont.lower_bound(7)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(9)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(11)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(13)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(15)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(17)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(19)); + EXPECT_EQ(std::next(cont.begin(), 0), cont.lower_bound(4)); + EXPECT_EQ(std::next(cont.begin(), 1), cont.lower_bound(6)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(8)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(10)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(12)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(14)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(16)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(18)); + EXPECT_EQ(std::next(cont.begin(), 8), cont.lower_bound(20)); + } + { + const TypedTree cont({5, 7, 9, 11, 13, 15, 17, 19}); + + EXPECT_EQ(cont.begin(), cont.lower_bound(5)); + EXPECT_EQ(std::next(cont.begin()), cont.lower_bound(7)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(9)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(11)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(13)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(15)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(17)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(19)); + EXPECT_EQ(std::next(cont.begin(), 0), cont.lower_bound(4)); + EXPECT_EQ(std::next(cont.begin(), 1), cont.lower_bound(6)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(8)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(10)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(12)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(14)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(16)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(18)); + EXPECT_EQ(std::next(cont.begin(), 8), cont.lower_bound(20)); + } +} + +// iterator upper_bound(const key_type& key) +// const_iterator upper_bound(const key_type& key) const + +TYPED_TEST_P(FlatTreeTest, UpperBound) { + { + TypedTree cont({5, 7, 9, 11, 13, 15, 17, 19}); + + EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(5)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(7)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(9)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(11)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(13)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(15)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(17)); + EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(19)); + EXPECT_EQ(std::next(cont.begin(), 0), cont.upper_bound(4)); + EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(6)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(8)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(10)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(12)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(14)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(16)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(18)); + EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(20)); + } + { + const TypedTree cont({5, 7, 9, 11, 13, 15, 17, 19}); + + EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(5)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(7)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(9)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(11)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(13)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(15)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(17)); + EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(19)); + EXPECT_EQ(std::next(cont.begin(), 0), cont.upper_bound(4)); + EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(6)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(8)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(10)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(12)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(14)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(16)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(18)); + EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(20)); + } +} + +// ---------------------------------------------------------------------------- +// General operations. + +// void swap(flat_tree& other) +// void swap(flat_tree& lhs, flat_tree& rhs) + +TYPED_TEST_P(FlatTreeTest, Swap) { + TypedTree x({1, 2, 3}); + TypedTree y({4}); + swap(x, y); + EXPECT_THAT(x, ElementsAre(4)); + EXPECT_THAT(y, ElementsAre(1, 2, 3)); + + y.swap(x); + EXPECT_THAT(x, ElementsAre(1, 2, 3)); + EXPECT_THAT(y, ElementsAre(4)); +} + +// bool operator==(const flat_tree& lhs, const flat_tree& rhs) +// bool operator!=(const flat_tree& lhs, const flat_tree& rhs) +// bool operator<(const flat_tree& lhs, const flat_tree& rhs) +// bool operator>(const flat_tree& lhs, const flat_tree& rhs) +// bool operator<=(const flat_tree& lhs, const flat_tree& rhs) +// bool operator>=(const flat_tree& lhs, const flat_tree& rhs) + +TEST(FlatTree, Comparison) { + // Provided comparator does not participate in comparison. + ReversedTree biggest({3}); + ReversedTree smallest({1}); + ReversedTree middle({1, 2}); + + EXPECT_EQ(biggest, biggest); + EXPECT_NE(biggest, smallest); + EXPECT_LT(smallest, middle); + EXPECT_LE(smallest, middle); + EXPECT_LE(middle, middle); + EXPECT_GT(biggest, middle); + EXPECT_GE(biggest, middle); + EXPECT_GE(biggest, biggest); +} + +TYPED_TEST_P(FlatTreeTest, SupportsEraseIf) { + TypedTree x; + EXPECT_EQ(0u, EraseIf(x, [](int) { return false; })); + EXPECT_THAT(x, ElementsAre()); + + x = {1, 2, 3}; + EXPECT_EQ(1u, EraseIf(x, [](int elem) { return !(elem & 1); })); + EXPECT_THAT(x, ElementsAre(1, 3)); + + x = {1, 2, 3, 4}; + EXPECT_EQ(2u, EraseIf(x, [](int elem) { return elem & 1; })); + EXPECT_THAT(x, ElementsAre(2, 4)); +} + +REGISTER_TYPED_TEST_SUITE_P(FlatTreeTest, + DefaultConstructor, + CopyConstructor, + ContainerCopyConstructor, + InitializerListConstructor, + SortedUniqueContainerCopyConstructor, + SortedUniqueInitializerListConstructor, + CopyAssignable, + InitializerListAssignable, + Clear, + Size, + Empty, + Iterators, + InsertLValue, + InsertPositionLValue, + Emplace, + EmplacePosition, + Extract, + Replace, + ErasePosition, + EraseRange, + EraseKey, + EraseEndDeath, + Count, + Find, + Contains, + EqualRange, + LowerBound, + UpperBound, + Swap, + SupportsEraseIf); + +using IntSequenceContainers = + ::testing::Types, std::vector>; +INSTANTIATE_TYPED_TEST_SUITE_P(My, FlatTreeTest, IntSequenceContainers); + +} // namespace +} // namespace flat_containers_internal +} // namespace webrtc diff --git a/rtc_base/containers/identity.h b/rtc_base/containers/identity.h new file mode 100644 index 0000000000..29592931bd --- /dev/null +++ b/rtc_base/containers/identity.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#ifndef RTC_BASE_CONTAINERS_IDENTITY_H_ +#define RTC_BASE_CONTAINERS_IDENTITY_H_ + +#include + +namespace webrtc { + +// Implementation of C++20's std::identity. +// +// Reference: +// - https://en.cppreference.com/w/cpp/utility/functional/identity +// - https://wg21.link/func.identity +struct identity { + template + constexpr T&& operator()(T&& t) const noexcept { + return std::forward(t); + } + + using is_transparent = void; +}; + +} // namespace webrtc + +#endif // RTC_BASE_CONTAINERS_IDENTITY_H_ diff --git a/rtc_base/containers/invoke.h b/rtc_base/containers/invoke.h new file mode 100644 index 0000000000..5d17a70beb --- /dev/null +++ b/rtc_base/containers/invoke.h @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#ifndef RTC_BASE_CONTAINERS_INVOKE_H_ +#define RTC_BASE_CONTAINERS_INVOKE_H_ + +#include +#include + +namespace webrtc { + +namespace invoke_internal { + +// Helper struct and alias to deduce the class type from a member function +// pointer or member object pointer. +template +struct member_pointer_class {}; + +template +struct member_pointer_class { + using type = ClassT; +}; + +template +using member_pointer_class_t = typename member_pointer_class::type; + +// Utility struct to detect specializations of std::reference_wrapper. +template +struct is_reference_wrapper : std::false_type {}; + +template +struct is_reference_wrapper> : std::true_type {}; + +// Small helpers used below in invoke_internal::invoke to make the SFINAE more +// concise. +template +const bool& IsMemFunPtr = + std::is_member_function_pointer>::value; + +template +const bool& IsMemObjPtr = std::is_member_object_pointer>::value; + +template >> +const bool& IsMemPtrToBaseOf = + std::is_base_of>::value; + +template +const bool& IsRefWrapper = is_reference_wrapper>::value; + +template +using EnableIf = std::enable_if_t; + +// Invokes a member function pointer on a reference to an object of a suitable +// type. Covers bullet 1 of the INVOKE definition. +// +// Reference: https://wg21.link/func.require#1.1 +template && IsMemPtrToBaseOf> = true> +constexpr decltype(auto) InvokeImpl(F&& f, T1&& t1, Args&&... args) { + return (std::forward(t1).*f)(std::forward(args)...); +} + +// Invokes a member function pointer on a std::reference_wrapper to an object of +// a suitable type. Covers bullet 2 of the INVOKE definition. +// +// Reference: https://wg21.link/func.require#1.2 +template && IsRefWrapper> = true> +constexpr decltype(auto) InvokeImpl(F&& f, T1&& t1, Args&&... args) { + return (t1.get().*f)(std::forward(args)...); +} + +// Invokes a member function pointer on a pointer-like type to an object of a +// suitable type. Covers bullet 3 of the INVOKE definition. +// +// Reference: https://wg21.link/func.require#1.3 +template && !IsMemPtrToBaseOf && + !IsRefWrapper> = true> +constexpr decltype(auto) InvokeImpl(F&& f, T1&& t1, Args&&... args) { + return ((*std::forward(t1)).*f)(std::forward(args)...); +} + +// Invokes a member object pointer on a reference to an object of a suitable +// type. Covers bullet 4 of the INVOKE definition. +// +// Reference: https://wg21.link/func.require#1.4 +template && IsMemPtrToBaseOf> = true> +constexpr decltype(auto) InvokeImpl(F&& f, T1&& t1) { + return std::forward(t1).*f; +} + +// Invokes a member object pointer on a std::reference_wrapper to an object of +// a suitable type. Covers bullet 5 of the INVOKE definition. +// +// Reference: https://wg21.link/func.require#1.5 +template && IsRefWrapper> = true> +constexpr decltype(auto) InvokeImpl(F&& f, T1&& t1) { + return t1.get().*f; +} + +// Invokes a member object pointer on a pointer-like type to an object of a +// suitable type. Covers bullet 6 of the INVOKE definition. +// +// Reference: https://wg21.link/func.require#1.6 +template && !IsMemPtrToBaseOf && + !IsRefWrapper> = true> +constexpr decltype(auto) InvokeImpl(F&& f, T1&& t1) { + return (*std::forward(t1)).*f; +} + +// Invokes a regular function or function object. Covers bullet 7 of the INVOKE +// definition. +// +// Reference: https://wg21.link/func.require#1.7 +template +constexpr decltype(auto) InvokeImpl(F&& f, Args&&... args) { + return std::forward(f)(std::forward(args)...); +} + +} // namespace invoke_internal + +// Implementation of C++17's std::invoke. This is not based on implementation +// referenced in original std::invoke proposal, but rather a manual +// implementation, so that it can be constexpr. +// +// References: +// - https://wg21.link/n4169#implementability +// - https://en.cppreference.com/w/cpp/utility/functional/invoke +// - https://wg21.link/func.invoke +template +constexpr decltype(auto) invoke(F&& f, Args&&... args) { + return invoke_internal::InvokeImpl(std::forward(f), + std::forward(args)...); +} + +} // namespace webrtc + +#endif // RTC_BASE_CONTAINERS_INVOKE_H_ diff --git a/rtc_base/containers/move_only_int.h b/rtc_base/containers/move_only_int.h new file mode 100644 index 0000000000..8f745aa688 --- /dev/null +++ b/rtc_base/containers/move_only_int.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#ifndef RTC_BASE_CONTAINERS_MOVE_ONLY_INT_H_ +#define RTC_BASE_CONTAINERS_MOVE_ONLY_INT_H_ + +namespace webrtc { + +// A move-only class that holds an integer. This is designed for testing +// containers. See also CopyOnlyInt. +class MoveOnlyInt { + public: + explicit MoveOnlyInt(int data = 1) : data_(data) {} + MoveOnlyInt(const MoveOnlyInt& other) = delete; + MoveOnlyInt& operator=(const MoveOnlyInt& other) = delete; + MoveOnlyInt(MoveOnlyInt&& other) : data_(other.data_) { other.data_ = 0; } + ~MoveOnlyInt() { data_ = 0; } + + MoveOnlyInt& operator=(MoveOnlyInt&& other) { + data_ = other.data_; + other.data_ = 0; + return *this; + } + + friend bool operator==(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) { + return lhs.data_ == rhs.data_; + } + + friend bool operator!=(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) { + return !operator==(lhs, rhs); + } + + friend bool operator<(const MoveOnlyInt& lhs, int rhs) { + return lhs.data_ < rhs; + } + + friend bool operator<(int lhs, const MoveOnlyInt& rhs) { + return lhs < rhs.data_; + } + + friend bool operator<(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) { + return lhs.data_ < rhs.data_; + } + + friend bool operator>(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) { + return rhs < lhs; + } + + friend bool operator<=(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) { + return !(rhs < lhs); + } + + friend bool operator>=(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) { + return !(lhs < rhs); + } + + int data() const { return data_; } + + private: + volatile int data_; +}; + +} // namespace webrtc + +#endif // RTC_BASE_CONTAINERS_MOVE_ONLY_INT_H_ diff --git a/rtc_base/containers/not_fn.h b/rtc_base/containers/not_fn.h new file mode 100644 index 0000000000..39cfd2763c --- /dev/null +++ b/rtc_base/containers/not_fn.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#ifndef RTC_BASE_CONTAINERS_NOT_FN_H_ +#define RTC_BASE_CONTAINERS_NOT_FN_H_ + +#include +#include + +#include "rtc_base/containers/invoke.h" + +namespace webrtc { + +namespace not_fn_internal { + +template +struct NotFnImpl { + F f; + + template + constexpr decltype(auto) operator()(Args&&... args) & noexcept { + return !webrtc::invoke(f, std::forward(args)...); + } + + template + constexpr decltype(auto) operator()(Args&&... args) const& noexcept { + return !webrtc::invoke(f, std::forward(args)...); + } + + template + constexpr decltype(auto) operator()(Args&&... args) && noexcept { + return !webrtc::invoke(std::move(f), std::forward(args)...); + } + + template + constexpr decltype(auto) operator()(Args&&... args) const&& noexcept { + return !webrtc::invoke(std::move(f), std::forward(args)...); + } +}; + +} // namespace not_fn_internal + +// Implementation of C++17's std::not_fn. +// +// Reference: +// - https://en.cppreference.com/w/cpp/utility/functional/not_fn +// - https://wg21.link/func.not.fn +template +constexpr not_fn_internal::NotFnImpl> not_fn(F&& f) { + return {std::forward(f)}; +} + +} // namespace webrtc + +#endif // RTC_BASE_CONTAINERS_NOT_FN_H_ diff --git a/rtc_base/containers/void_t.h b/rtc_base/containers/void_t.h new file mode 100644 index 0000000000..62c57d4bec --- /dev/null +++ b/rtc_base/containers/void_t.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#ifndef RTC_BASE_CONTAINERS_VOID_T_H_ +#define RTC_BASE_CONTAINERS_VOID_T_H_ + +namespace webrtc { +namespace void_t_internal { +// Implementation detail of webrtc::void_t below. +template +struct make_void { + using type = void; +}; + +} // namespace void_t_internal + +// webrtc::void_t is an implementation of std::void_t from C++17. +// +// We use |webrtc::void_t_internal::make_void| as a helper struct to avoid a +// C++14 defect: +// http://en.cppreference.com/w/cpp/types/void_t +// http://open-std.org/JTC1/SC22/WG21/docs/cwg_defects.html#1558 +template +using void_t = typename ::webrtc::void_t_internal::make_void::type; +} // namespace webrtc + +#endif // RTC_BASE_CONTAINERS_VOID_T_H_ diff --git a/rtc_base/openssl_adapter.cc b/rtc_base/openssl_adapter.cc index e5c2c42761..c381f04899 100644 --- a/rtc_base/openssl_adapter.cc +++ b/rtc_base/openssl_adapter.cc @@ -289,8 +289,8 @@ int OpenSSLAdapter::BeginSSL() { RTC_LOG(LS_INFO) << "OpenSSLAdapter::BeginSSL: " << ssl_host_name_; RTC_DCHECK(state_ == SSL_CONNECTING); - int err = 0; - BIO* bio = nullptr; + // Cleanup action to deal with on error cleanup a bit cleaner. + EarlyExitCatcher early_exit_catcher(*this); // First set up the context. We should either have a factory, with its own // pre-existing context, or be running standalone, in which case we will @@ -301,26 +301,22 @@ int OpenSSLAdapter::BeginSSL() { } if (!ssl_ctx_) { - err = -1; - goto ssl_error; + return -1; } if (identity_ && !identity_->ConfigureIdentity(ssl_ctx_)) { - SSL_CTX_free(ssl_ctx_); - err = -1; - goto ssl_error; + return -1; } - bio = BIO_new_socket(socket_); + std::unique_ptr bio{BIO_new_socket(socket_), + ::BIO_free}; if (!bio) { - err = -1; - goto ssl_error; + return -1; } ssl_ = SSL_new(ssl_ctx_); if (!ssl_) { - err = -1; - goto ssl_error; + return -1; } SSL_set_app_data(ssl_, this); @@ -346,8 +342,7 @@ int OpenSSLAdapter::BeginSSL() { if (cached) { if (SSL_set_session(ssl_, cached) == 0) { RTC_LOG(LS_WARNING) << "Failed to apply SSL session from cache"; - err = -1; - goto ssl_error; + return -1; } RTC_LOG(LS_INFO) << "Attempting to resume SSL session to " @@ -377,24 +372,16 @@ int OpenSSLAdapter::BeginSSL() { // Now that the initial config is done, transfer ownership of |bio| to the // SSL object. If ContinueSSL() fails, the bio will be freed in Cleanup(). - SSL_set_bio(ssl_, bio, bio); - bio = nullptr; + SSL_set_bio(ssl_, bio.get(), bio.get()); + bio.release(); // Do the connect. - err = ContinueSSL(); + int err = ContinueSSL(); if (err != 0) { - goto ssl_error; - } - - return err; - -ssl_error: - Cleanup(); - if (bio) { - BIO_free(bio); + return err; } - - return err; + early_exit_catcher.disable(); + return 0; } int OpenSSLAdapter::ContinueSSL() { @@ -981,6 +968,9 @@ SSL_CTX* OpenSSLAdapter::CreateContext(SSLMode mode, bool enable_cache) { SSL_CTX_set_custom_verify(ctx, SSL_VERIFY_PEER, SSLVerifyCallback); #else SSL_CTX_set_verify(ctx, SSL_VERIFY_PEER, SSLVerifyCallback); + // Verify certificate chains up to a depth of 4. This is not + // needed for DTLS-SRTP which uses self-signed certificates + // (so the depth is 0) but is required to support TURN/TLS. SSL_CTX_set_verify_depth(ctx, 4); #endif // Use defaults, but disable HMAC-SHA256 and HMAC-SHA384 ciphers @@ -1057,4 +1047,17 @@ OpenSSLAdapter* OpenSSLAdapterFactory::CreateAdapter(AsyncSocket* socket) { ssl_cert_verifier_); } +OpenSSLAdapter::EarlyExitCatcher::EarlyExitCatcher(OpenSSLAdapter& adapter_ptr) + : adapter_ptr_(adapter_ptr) {} + +void OpenSSLAdapter::EarlyExitCatcher::disable() { + disabled_ = true; +} + +OpenSSLAdapter::EarlyExitCatcher::~EarlyExitCatcher() { + if (!disabled_) { + adapter_ptr_.Cleanup(); + } +} + } // namespace rtc diff --git a/rtc_base/openssl_adapter.h b/rtc_base/openssl_adapter.h index 76b003a7dd..9b2a36e00f 100644 --- a/rtc_base/openssl_adapter.h +++ b/rtc_base/openssl_adapter.h @@ -89,6 +89,16 @@ class OpenSSLAdapter final : public SSLAdapter, void OnCloseEvent(AsyncSocket* socket, int err) override; private: + class EarlyExitCatcher { + public: + EarlyExitCatcher(OpenSSLAdapter& adapter_ptr); + void disable(); + ~EarlyExitCatcher(); + + private: + bool disabled_ = false; + OpenSSLAdapter& adapter_ptr_; + }; enum SSLState { SSL_NONE, SSL_WAIT, @@ -202,6 +212,10 @@ class OpenSSLAdapterFactory : public SSLAdapterFactory { friend class OpenSSLAdapter; }; +// The EarlyExitCatcher is responsible for calling OpenSSLAdapter::Cleanup on +// destruction. By doing this we have scoped cleanup which can be disabled if +// there were no errors, aka early exits. + std::string TransformAlpnProtocols(const std::vector& protos); } // namespace rtc diff --git a/rtc_base/openssl_certificate.cc b/rtc_base/openssl_certificate.cc index bd9bb04fd4..802787dcfb 100644 --- a/rtc_base/openssl_certificate.cc +++ b/rtc_base/openssl_certificate.cc @@ -59,27 +59,30 @@ static X509* MakeCertificate(EVP_PKEY* pkey, const SSLIdentityParams& params) { RTC_LOG(LS_INFO) << "Making certificate for " << params.common_name; ASN1_INTEGER* asn1_serial_number = nullptr; - BIGNUM* serial_number = nullptr; - X509* x509 = nullptr; - X509_NAME* name = nullptr; + std::unique_ptr serial_number{nullptr, + ::BN_free}; + std::unique_ptr x509{nullptr, ::X509_free}; + std::unique_ptr name{ + nullptr, ::X509_NAME_free}; time_t epoch_off = 0; // Time offset since epoch. - - if ((x509 = X509_new()) == nullptr) { - goto error; + x509.reset(X509_new()); + if (x509 == nullptr) { + return nullptr; } - if (!X509_set_pubkey(x509, pkey)) { - goto error; + if (!X509_set_pubkey(x509.get(), pkey)) { + return nullptr; } // serial number - temporary reference to serial number inside x509 struct - if ((serial_number = BN_new()) == nullptr || - !BN_pseudo_rand(serial_number, SERIAL_RAND_BITS, 0, 0) || - (asn1_serial_number = X509_get_serialNumber(x509)) == nullptr || - !BN_to_ASN1_INTEGER(serial_number, asn1_serial_number)) { - goto error; + serial_number.reset(BN_new()); + if (serial_number == nullptr || + !BN_pseudo_rand(serial_number.get(), SERIAL_RAND_BITS, 0, 0) || + (asn1_serial_number = X509_get_serialNumber(x509.get())) == nullptr || + !BN_to_ASN1_INTEGER(serial_number.get(), asn1_serial_number)) { + return nullptr; } // Set version to X509.V3 - if (!X509_set_version(x509, 2L)) { - goto error; + if (!X509_set_version(x509.get(), 2L)) { + return nullptr; } // There are a lot of possible components for the name entries. In @@ -89,31 +92,27 @@ static X509* MakeCertificate(EVP_PKEY* pkey, const SSLIdentityParams& params) { // arbitrary common_name. Note that this certificate goes out in // clear during SSL negotiation, so there may be a privacy issue in // putting anything recognizable here. - if ((name = X509_NAME_new()) == nullptr || - !X509_NAME_add_entry_by_NID(name, NID_commonName, MBSTRING_UTF8, + name.reset(X509_NAME_new()); + if (name == nullptr || + !X509_NAME_add_entry_by_NID(name.get(), NID_commonName, MBSTRING_UTF8, (unsigned char*)params.common_name.c_str(), -1, -1, 0) || - !X509_set_subject_name(x509, name) || !X509_set_issuer_name(x509, name)) { - goto error; + !X509_set_subject_name(x509.get(), name.get()) || + !X509_set_issuer_name(x509.get(), name.get())) { + return nullptr; } - if (!X509_time_adj(X509_get_notBefore(x509), params.not_before, &epoch_off) || - !X509_time_adj(X509_get_notAfter(x509), params.not_after, &epoch_off)) { - goto error; + if (!X509_time_adj(X509_get_notBefore(x509.get()), params.not_before, + &epoch_off) || + !X509_time_adj(X509_get_notAfter(x509.get()), params.not_after, + &epoch_off)) { + return nullptr; } - if (!X509_sign(x509, pkey, EVP_sha256())) { - goto error; + if (!X509_sign(x509.get(), pkey, EVP_sha256())) { + return nullptr; } - BN_free(serial_number); - X509_NAME_free(name); RTC_LOG(LS_INFO) << "Returning certificate"; - return x509; - -error: - BN_free(serial_number); - X509_NAME_free(name); - X509_free(x509); - return nullptr; + return x509.release(); } } // namespace diff --git a/rtc_base/openssl_stream_adapter.cc b/rtc_base/openssl_stream_adapter.cc index ab2289b1f3..aa0bc3d40c 100644 --- a/rtc_base/openssl_stream_adapter.cc +++ b/rtc_base/openssl_stream_adapter.cc @@ -834,7 +834,12 @@ void OpenSSLStreamAdapter::SetTimeout(int delay_ms) { if (flag->alive()) { RTC_DLOG(LS_INFO) << "DTLS timeout expired"; timeout_task_.Stop(); - DTLSv1_handle_timeout(ssl_); + int res = DTLSv1_handle_timeout(ssl_); + if (res > 0) { + RTC_LOG(LS_INFO) << "DTLS retransmission"; + } else if (res < 0) { + RTC_LOG(LS_INFO) << "DTLSv1_handle_timeout() return -1"; + } ContinueSSL(); } else { RTC_NOTREACHED(); diff --git a/rtc_base/platform_thread_unittest.cc b/rtc_base/platform_thread_unittest.cc index 0da822cf85..b60d2131b7 100644 --- a/rtc_base/platform_thread_unittest.cc +++ b/rtc_base/platform_thread_unittest.cc @@ -29,10 +29,12 @@ TEST(PlatformThreadTest, StartFinalize) { EXPECT_FALSE(thread.empty()); thread.Finalize(); EXPECT_TRUE(thread.empty()); - thread = PlatformThread::SpawnDetached([] {}, "2"); + rtc::Event done; + thread = PlatformThread::SpawnDetached([&] { done.Set(); }, "2"); EXPECT_FALSE(thread.empty()); thread.Finalize(); EXPECT_TRUE(thread.empty()); + done.Wait(30000); } TEST(PlatformThreadTest, MovesEmpty) { @@ -47,10 +49,12 @@ TEST(PlatformThreadTest, MovesHandles) { PlatformThread thread2 = std::move(thread1); EXPECT_TRUE(thread1.empty()); EXPECT_FALSE(thread2.empty()); - thread1 = PlatformThread::SpawnDetached([] {}, "2"); + rtc::Event done; + thread1 = PlatformThread::SpawnDetached([&] { done.Set(); }, "2"); thread2 = std::move(thread1); EXPECT_TRUE(thread1.empty()); EXPECT_FALSE(thread2.empty()); + done.Wait(30000); } TEST(PlatformThreadTest, diff --git a/rtc_base/random.cc b/rtc_base/random.cc index 5deb621727..5206b817f3 100644 --- a/rtc_base/random.cc +++ b/rtc_base/random.cc @@ -49,14 +49,14 @@ int32_t Random::Rand(int32_t low, int32_t high) { template <> float Random::Rand() { double result = NextOutput() - 1; - result = result / 0xFFFFFFFFFFFFFFFEull; + result = result / static_cast(0xFFFFFFFFFFFFFFFFull); return static_cast(result); } template <> double Random::Rand() { double result = NextOutput() - 1; - result = result / 0xFFFFFFFFFFFFFFFEull; + result = result / static_cast(0xFFFFFFFFFFFFFFFFull); return result; } @@ -72,8 +72,10 @@ double Random::Gaussian(double mean, double standard_deviation) { // in the range [1, 2^64-1]. Normally this behavior is a bit frustrating, // but here it is exactly what we need. const double kPi = 3.14159265358979323846; - double u1 = static_cast(NextOutput()) / 0xFFFFFFFFFFFFFFFFull; - double u2 = static_cast(NextOutput()) / 0xFFFFFFFFFFFFFFFFull; + double u1 = static_cast(NextOutput()) / + static_cast(0xFFFFFFFFFFFFFFFFull); + double u2 = static_cast(NextOutput()) / + static_cast(0xFFFFFFFFFFFFFFFFull); return mean + standard_deviation * sqrt(-2 * log(u1)) * cos(2 * kPi * u2); } diff --git a/rtc_base/rtc_certificate.cc b/rtc_base/rtc_certificate.cc index 937defc6c2..496b4ac4b4 100644 --- a/rtc_base/rtc_certificate.cc +++ b/rtc_base/rtc_certificate.cc @@ -46,11 +46,6 @@ const SSLCertificate& RTCCertificate::GetSSLCertificate() const { return identity_->certificate(); } -// Deprecated: TODO(benwright) - Remove once chromium is updated. -const SSLCertificate& RTCCertificate::ssl_certificate() const { - return identity_->certificate(); -} - const SSLCertChain& RTCCertificate::GetSSLCertificateChain() const { return identity_->cert_chain(); } diff --git a/rtc_base/rtc_certificate.h b/rtc_base/rtc_certificate.h index ce9aa47512..fa026ec331 100644 --- a/rtc_base/rtc_certificate.h +++ b/rtc_base/rtc_certificate.h @@ -16,6 +16,7 @@ #include #include +#include "absl/base/attributes.h" #include "api/ref_counted_base.h" #include "api/scoped_refptr.h" #include "rtc_base/system/rtc_export.h" @@ -65,9 +66,6 @@ class RTC_EXPORT RTCCertificate final const SSLCertificate& GetSSLCertificate() const; const SSLCertChain& GetSSLCertificateChain() const; - // Deprecated: TODO(benwright) - Remove once chromium is updated. - const SSLCertificate& ssl_certificate() const; - // TODO(hbos): If possible, remove once RTCCertificate and its // GetSSLCertificate() is used in all relevant places. Should not pass around // raw SSLIdentity* for the sake of accessing SSLIdentity::certificate(). diff --git a/rtc_base/string_utils.h b/rtc_base/string_utils.h index 23c55cb893..d844e5e125 100644 --- a/rtc_base/string_utils.h +++ b/rtc_base/string_utils.h @@ -88,6 +88,43 @@ std::string string_trim(const std::string& s); // TODO(jonasolsson): replace with absl::Hex when that becomes available. std::string ToHex(const int i); +// CompileTimeString comprises of a string-like object which can be used as a +// regular const char* in compile time and supports concatenation. Useful for +// concatenating constexpr strings in for example macro declarations. +namespace rtc_base_string_utils_internal { +template +struct CompileTimeString { + char string[NPlus1] = {0}; + constexpr CompileTimeString() = default; + template + explicit constexpr CompileTimeString(const char (&chars)[MPlus1]) { + char* chars_pointer = string; + for (auto c : chars) + *chars_pointer++ = c; + } + template + constexpr auto Concat(CompileTimeString b) { + CompileTimeString result; + char* chars_pointer = result.string; + for (auto c : string) + *chars_pointer++ = c; + chars_pointer = result.string + NPlus1 - 1; + for (auto c : b.string) + *chars_pointer++ = c; + result.string[NPlus1 + MPlus1 - 2] = 0; + return result; + } + constexpr operator const char*() { return string; } +}; +} // namespace rtc_base_string_utils_internal + +// Makes a constexpr CompileTimeString without having to specify X +// explicitly. +template +constexpr auto MakeCompileTimeString(const char (&a)[N]) { + return rtc_base_string_utils_internal::CompileTimeString(a); +} + } // namespace rtc #endif // RTC_BASE_STRING_UTILS_H_ diff --git a/rtc_base/string_utils_unittest.cc b/rtc_base/string_utils_unittest.cc index 2fa1f220ac..120f7e60f5 100644 --- a/rtc_base/string_utils_unittest.cc +++ b/rtc_base/string_utils_unittest.cc @@ -39,4 +39,29 @@ TEST(string_toutf, Empty) { #endif // WEBRTC_WIN +TEST(CompileTimeString, MakeActsLikeAString) { + EXPECT_STREQ(MakeCompileTimeString("abc123"), "abc123"); +} + +TEST(CompileTimeString, ConvertibleToStdString) { + EXPECT_EQ(std::string(MakeCompileTimeString("abab")), "abab"); +} + +namespace detail { +constexpr bool StringEquals(const char* a, const char* b) { + while (*a && *a == *b) + a++, b++; + return *a == *b; +} +} // namespace detail + +static_assert(detail::StringEquals(MakeCompileTimeString("handellm"), + "handellm"), + "String should initialize."); + +static_assert(detail::StringEquals(MakeCompileTimeString("abc123").Concat( + MakeCompileTimeString("def456ghi")), + "abc123def456ghi"), + "Strings should concatenate."); + } // namespace rtc diff --git a/rtc_base/strings/json.cc b/rtc_base/strings/json.cc index 8a544a0c0d..99664404cf 100644 --- a/rtc_base/strings/json.cc +++ b/rtc_base/strings/json.cc @@ -286,9 +286,9 @@ bool GetDoubleFromJsonObject(const Json::Value& in, } std::string JsonValueToString(const Json::Value& json) { - Json::FastWriter w; - std::string value = w.write(json); - return value.substr(0, value.size() - 1); // trim trailing newline + Json::StreamWriterBuilder builder; + std::string output = Json::writeString(builder, json); + return output.substr(0, output.size() - 1); // trim trailing newline } } // namespace rtc diff --git a/rtc_base/synchronization/sequence_checker_internal.cc b/rtc_base/synchronization/sequence_checker_internal.cc index 7b66d8020a..63badd9538 100644 --- a/rtc_base/synchronization/sequence_checker_internal.cc +++ b/rtc_base/synchronization/sequence_checker_internal.cc @@ -107,8 +107,9 @@ std::string SequenceCheckerImpl::ExpectationToString() const { std::string ExpectationToString(const SequenceCheckerImpl* checker) { #if RTC_DCHECK_IS_ON return checker->ExpectationToString(); -#endif +#else return std::string(); +#endif } } // namespace webrtc_sequence_checker_internal diff --git a/rtc_base/system/unused.h b/rtc_base/system/unused.h index 084c526626..a5732a7e84 100644 --- a/rtc_base/system/unused.h +++ b/rtc_base/system/unused.h @@ -13,7 +13,7 @@ // Prevent the compiler from warning about an unused variable. For example: // int result = DoSomething(); -// assert(result == 17); +// RTC_DCHECK(result == 17); // RTC_UNUSED(result); // Note: In most cases it is better to remove the unused variable rather than // suppressing the compiler warning. diff --git a/rtc_base/task_queue_stdlib.cc b/rtc_base/task_queue_stdlib.cc index 548f7ef69a..41da285ee7 100644 --- a/rtc_base/task_queue_stdlib.cc +++ b/rtc_base/task_queue_stdlib.cc @@ -82,16 +82,9 @@ class TaskQueueStdlib final : public TaskQueueBase { // Indicates if the thread has started. rtc::Event started_; - // Indicates if the thread has stopped. - rtc::Event stopped_; - // Signaled whenever a new task is pending. rtc::Event flag_notify_; - // Contains the active worker thread assigned to processing - // tasks (including delayed tasks). - rtc::PlatformThread thread_; - Mutex pending_lock_; // Indicates if the worker thread needs to shutdown now. @@ -114,12 +107,17 @@ class TaskQueueStdlib final : public TaskQueueBase { // std::unique_ptr out of the queue without the presence of a hack. std::map> delayed_queue_ RTC_GUARDED_BY(pending_lock_); + + // Contains the active worker thread assigned to processing + // tasks (including delayed tasks). + // Placing this last ensures the thread doesn't touch uninitialized attributes + // throughout it's lifetime. + rtc::PlatformThread thread_; }; TaskQueueStdlib::TaskQueueStdlib(absl::string_view queue_name, rtc::ThreadPriority priority) : started_(/*manual_reset=*/false, /*initially_signaled=*/false), - stopped_(/*manual_reset=*/false, /*initially_signaled=*/false), flag_notify_(/*manual_reset=*/false, /*initially_signaled=*/false), thread_(rtc::PlatformThread::SpawnJoinable( [this] { @@ -141,8 +139,6 @@ void TaskQueueStdlib::Delete() { NotifyWake(); - stopped_.Wait(rtc::Event::kForever); - thread_.Finalize(); delete this; } @@ -243,8 +239,6 @@ void TaskQueueStdlib::ProcessTasks() { else flag_notify_.Wait(task.sleep_time_ms_); } - - stopped_.Set(); } void TaskQueueStdlib::NotifyWake() { diff --git a/rtc_base/task_utils/BUILD.gn b/rtc_base/task_utils/BUILD.gn index ca9a14a324..64a041908e 100644 --- a/rtc_base/task_utils/BUILD.gn +++ b/rtc_base/task_utils/BUILD.gn @@ -14,6 +14,7 @@ rtc_library("repeating_task") { "repeating_task.h", ] deps = [ + ":pending_task_safety_flag", ":to_queued_task", "..:logging", "..:timeutils", diff --git a/rtc_base/task_utils/repeating_task.cc b/rtc_base/task_utils/repeating_task.cc index 574e6331f1..9636680cb4 100644 --- a/rtc_base/task_utils/repeating_task.cc +++ b/rtc_base/task_utils/repeating_task.cc @@ -12,32 +12,36 @@ #include "absl/memory/memory.h" #include "rtc_base/logging.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" #include "rtc_base/task_utils/to_queued_task.h" #include "rtc_base/time_utils.h" namespace webrtc { namespace webrtc_repeating_task_impl { -RepeatingTaskBase::RepeatingTaskBase(TaskQueueBase* task_queue, - TimeDelta first_delay, - Clock* clock) +RepeatingTaskBase::RepeatingTaskBase( + TaskQueueBase* task_queue, + TimeDelta first_delay, + Clock* clock, + rtc::scoped_refptr alive_flag) : task_queue_(task_queue), clock_(clock), - next_run_time_(clock_->CurrentTime() + first_delay) {} + next_run_time_(clock_->CurrentTime() + first_delay), + alive_flag_(std::move(alive_flag)) {} RepeatingTaskBase::~RepeatingTaskBase() = default; bool RepeatingTaskBase::Run() { RTC_DCHECK_RUN_ON(task_queue_); // Return true to tell the TaskQueue to destruct this object. - if (next_run_time_.IsPlusInfinity()) + if (!alive_flag_->alive()) return true; TimeDelta delay = RunClosure(); // The closure might have stopped this task, in which case we return true to // destruct this object. - if (next_run_time_.IsPlusInfinity()) + if (!alive_flag_->alive()) return true; RTC_DCHECK(delay.IsFinite()); @@ -53,33 +57,11 @@ bool RepeatingTaskBase::Run() { return false; } -void RepeatingTaskBase::Stop() { - RTC_DCHECK_RUN_ON(task_queue_); - RTC_DCHECK(next_run_time_.IsFinite()); - next_run_time_ = Timestamp::PlusInfinity(); -} - } // namespace webrtc_repeating_task_impl -RepeatingTaskHandle::RepeatingTaskHandle(RepeatingTaskHandle&& other) - : repeating_task_(other.repeating_task_) { - other.repeating_task_ = nullptr; -} - -RepeatingTaskHandle& RepeatingTaskHandle::operator=( - RepeatingTaskHandle&& other) { - repeating_task_ = other.repeating_task_; - other.repeating_task_ = nullptr; - return *this; -} - -RepeatingTaskHandle::RepeatingTaskHandle( - webrtc_repeating_task_impl::RepeatingTaskBase* repeating_task) - : repeating_task_(repeating_task) {} - void RepeatingTaskHandle::Stop() { if (repeating_task_) { - repeating_task_->Stop(); + repeating_task_->SetNotAlive(); repeating_task_ = nullptr; } } diff --git a/rtc_base/task_utils/repeating_task.h b/rtc_base/task_utils/repeating_task.h index 487b7d19d4..d5066fdb5c 100644 --- a/rtc_base/task_utils/repeating_task.h +++ b/rtc_base/task_utils/repeating_task.h @@ -19,22 +19,19 @@ #include "api/task_queue/task_queue_base.h" #include "api/units/time_delta.h" #include "api/units/timestamp.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" #include "system_wrappers/include/clock.h" namespace webrtc { - -class RepeatingTaskHandle; - namespace webrtc_repeating_task_impl { class RepeatingTaskBase : public QueuedTask { public: RepeatingTaskBase(TaskQueueBase* task_queue, TimeDelta first_delay, - Clock* clock); + Clock* clock, + rtc::scoped_refptr alive_flag); ~RepeatingTaskBase() override; - void Stop(); - private: virtual TimeDelta RunClosure() = 0; @@ -42,9 +39,10 @@ class RepeatingTaskBase : public QueuedTask { TaskQueueBase* const task_queue_; Clock* const clock_; - // This is always finite, except for the special case where it's PlusInfinity - // to signal that the task should stop. + // This is always finite. Timestamp next_run_time_ RTC_GUARDED_BY(task_queue_); + rtc::scoped_refptr alive_flag_ + RTC_GUARDED_BY(task_queue_); }; // The template closure pattern is based on rtc::ClosureTask. @@ -54,8 +52,12 @@ class RepeatingTaskImpl final : public RepeatingTaskBase { RepeatingTaskImpl(TaskQueueBase* task_queue, TimeDelta first_delay, Closure&& closure, - Clock* clock) - : RepeatingTaskBase(task_queue, first_delay, clock), + Clock* clock, + rtc::scoped_refptr alive_flag) + : RepeatingTaskBase(task_queue, + first_delay, + clock, + std::move(alive_flag)), closure_(std::forward(closure)) { static_assert( std::is_same static RepeatingTaskHandle Start(TaskQueueBase* task_queue, Closure&& closure, Clock* clock = Clock::GetRealTimeClock()) { - auto repeating_task = std::make_unique< - webrtc_repeating_task_impl::RepeatingTaskImpl>( - task_queue, TimeDelta::Zero(), std::forward(closure), clock); - auto* repeating_task_ptr = repeating_task.get(); - task_queue->PostTask(std::move(repeating_task)); - return RepeatingTaskHandle(repeating_task_ptr); + auto alive_flag = PendingTaskSafetyFlag::CreateDetached(); + task_queue->PostTask( + std::make_unique< + webrtc_repeating_task_impl::RepeatingTaskImpl>( + task_queue, TimeDelta::Zero(), std::forward(closure), + clock, alive_flag)); + return RepeatingTaskHandle(std::move(alive_flag)); } // DelayedStart is equivalent to Start except that the first invocation of the @@ -113,12 +114,14 @@ class RepeatingTaskHandle { TimeDelta first_delay, Closure&& closure, Clock* clock = Clock::GetRealTimeClock()) { - auto repeating_task = std::make_unique< - webrtc_repeating_task_impl::RepeatingTaskImpl>( - task_queue, first_delay, std::forward(closure), clock); - auto* repeating_task_ptr = repeating_task.get(); - task_queue->PostDelayedTask(std::move(repeating_task), first_delay.ms()); - return RepeatingTaskHandle(repeating_task_ptr); + auto alive_flag = PendingTaskSafetyFlag::CreateDetached(); + task_queue->PostDelayedTask( + std::make_unique< + webrtc_repeating_task_impl::RepeatingTaskImpl>( + task_queue, first_delay, std::forward(closure), clock, + alive_flag), + first_delay.ms()); + return RepeatingTaskHandle(std::move(alive_flag)); } // Stops future invocations of the repeating task closure. Can only be called @@ -127,15 +130,15 @@ class RepeatingTaskHandle { // closure itself. void Stop(); - // Returns true if Start() or DelayedStart() was called most recently. Returns - // false initially and if Stop() or PostStop() was called most recently. + // Returns true until Stop() was called. + // Can only be called from the TaskQueue where the task is running. bool Running() const; private: explicit RepeatingTaskHandle( - webrtc_repeating_task_impl::RepeatingTaskBase* repeating_task); - // Owned by the task queue. - webrtc_repeating_task_impl::RepeatingTaskBase* repeating_task_ = nullptr; + rtc::scoped_refptr alive_flag) + : repeating_task_(std::move(alive_flag)) {} + rtc::scoped_refptr repeating_task_; }; } // namespace webrtc diff --git a/rtc_base/task_utils/repeating_task_unittest.cc b/rtc_base/task_utils/repeating_task_unittest.cc index 2fb15d1e5a..b23284f988 100644 --- a/rtc_base/task_utils/repeating_task_unittest.cc +++ b/rtc_base/task_utils/repeating_task_unittest.cc @@ -276,4 +276,22 @@ TEST(RepeatingTaskTest, ClockIntegration) { handle.Stop(); } +TEST(RepeatingTaskTest, CanBeStoppedAfterTaskQueueDeletedTheRepeatingTask) { + std::unique_ptr repeating_task; + + MockTaskQueue task_queue; + EXPECT_CALL(task_queue, PostDelayedTask) + .WillOnce([&](std::unique_ptr task, uint32_t milliseconds) { + repeating_task = std::move(task); + }); + + RepeatingTaskHandle handle = + RepeatingTaskHandle::DelayedStart(&task_queue, TimeDelta::Millis(100), + [] { return TimeDelta::Millis(100); }); + + // shutdown task queue: delete all pending tasks and run 'regular' task. + repeating_task = nullptr; + handle.Stop(); +} + } // namespace webrtc diff --git a/rtc_base/third_party/base64/BUILD.gn b/rtc_base/third_party/base64/BUILD.gn index db03e0273d..969c7c0c64 100644 --- a/rtc_base/third_party/base64/BUILD.gn +++ b/rtc_base/third_party/base64/BUILD.gn @@ -14,5 +14,8 @@ rtc_library("base64") { "base64.cc", "base64.h", ] - deps = [ "../../system:rtc_export" ] + deps = [ + "../..:checks", + "../../system:rtc_export", + ] } diff --git a/rtc_base/third_party/base64/base64.cc b/rtc_base/third_party/base64/base64.cc index 53ff6b9d54..b9acf9a4c9 100644 --- a/rtc_base/third_party/base64/base64.cc +++ b/rtc_base/third_party/base64/base64.cc @@ -19,6 +19,8 @@ #include #include +#include "rtc_base/checks.h" + using std::vector; namespace rtc { @@ -95,7 +97,7 @@ bool Base64::IsBase64Encoded(const std::string& str) { void Base64::EncodeFromArray(const void* data, size_t len, std::string* result) { - assert(nullptr != result); + RTC_DCHECK(result); result->clear(); result->resize(((len + 2) / 3) * 4); const unsigned char* byte_data = static_cast(data); @@ -223,15 +225,15 @@ bool Base64::DecodeFromArrayTemplate(const char* data, DecodeFlags flags, T* result, size_t* data_used) { - assert(nullptr != result); - assert(flags <= (DO_PARSE_MASK | DO_PAD_MASK | DO_TERM_MASK)); + RTC_DCHECK(result); + RTC_DCHECK_LE(flags, (DO_PARSE_MASK | DO_PAD_MASK | DO_TERM_MASK)); const DecodeFlags parse_flags = flags & DO_PARSE_MASK; const DecodeFlags pad_flags = flags & DO_PAD_MASK; const DecodeFlags term_flags = flags & DO_TERM_MASK; - assert(0 != parse_flags); - assert(0 != pad_flags); - assert(0 != term_flags); + RTC_DCHECK_NE(0, parse_flags); + RTC_DCHECK_NE(0, pad_flags); + RTC_DCHECK_NE(0, term_flags); result->clear(); result->reserve(len); diff --git a/rtc_base/thread.cc b/rtc_base/thread.cc index 2a5d5eccdd..8ca9ce76a8 100644 --- a/rtc_base/thread.cc +++ b/rtc_base/thread.cc @@ -929,6 +929,7 @@ void Thread::Send(const Location& posted_from, msg.pdata = pdata; if (IsCurrent()) { #if RTC_DCHECK_IS_ON + RTC_DCHECK(this->IsInvokeToThreadAllowed(this)); RTC_DCHECK_RUN_ON(this); could_be_blocking_call_count_++; #endif diff --git a/rtc_base/virtual_socket_server.cc b/rtc_base/virtual_socket_server.cc index 8140fcb6aa..f5e993645e 100644 --- a/rtc_base/virtual_socket_server.cc +++ b/rtc_base/virtual_socket_server.cc @@ -19,7 +19,6 @@ #include "absl/algorithm/container.h" #include "rtc_base/checks.h" -#include "rtc_base/deprecated/recursive_critical_section.h" #include "rtc_base/fake_clock.h" #include "rtc_base/logging.h" #include "rtc_base/physical_socket_server.h" @@ -164,6 +163,8 @@ int VirtualSocket::Close() { } if (SOCK_STREAM == type_) { + webrtc::MutexLock lock(&mutex_); + // Cancel pending sockets if (listen_queue_) { while (!listen_queue_->empty()) { @@ -173,7 +174,6 @@ int VirtualSocket::Close() { server_->Disconnect(addr); listen_queue_->pop_front(); } - delete listen_queue_; listen_queue_ = nullptr; } // Disconnect stream sockets @@ -231,6 +231,8 @@ int VirtualSocket::RecvFrom(void* pv, if (timestamp) { *timestamp = -1; } + + webrtc::MutexLock lock(&mutex_); // If we don't have a packet, then either error or wait for one to arrive. if (recv_buffer_.empty()) { if (async_) { @@ -273,6 +275,7 @@ int VirtualSocket::RecvFrom(void* pv, } int VirtualSocket::Listen(int backlog) { + webrtc::MutexLock lock(&mutex_); RTC_DCHECK(SOCK_STREAM == type_); RTC_DCHECK(CS_CLOSED == state_); if (local_addr_.IsNil()) { @@ -280,12 +283,13 @@ int VirtualSocket::Listen(int backlog) { return -1; } RTC_DCHECK(nullptr == listen_queue_); - listen_queue_ = new ListenQueue; + listen_queue_ = std::make_unique(); state_ = CS_CONNECTING; return 0; } VirtualSocket* VirtualSocket::Accept(SocketAddress* paddr) { + webrtc::MutexLock lock(&mutex_); if (nullptr == listen_queue_) { error_ = EINVAL; return nullptr; @@ -304,7 +308,7 @@ VirtualSocket* VirtualSocket::Accept(SocketAddress* paddr) { delete socket; continue; } - socket->CompleteConnect(remote_addr, false); + socket->CompleteConnect(remote_addr); if (paddr) { *paddr = remote_addr; } @@ -341,47 +345,57 @@ int VirtualSocket::SetOption(Option opt, int value) { } void VirtualSocket::OnMessage(Message* pmsg) { - if (pmsg->message_id == MSG_ID_PACKET) { - RTC_DCHECK(nullptr != pmsg->pdata); - Packet* packet = static_cast(pmsg->pdata); - - recv_buffer_.push_back(packet); - - if (async_) { - SignalReadEvent(this); - } - } else if (pmsg->message_id == MSG_ID_CONNECT) { - RTC_DCHECK(nullptr != pmsg->pdata); - MessageAddress* data = static_cast(pmsg->pdata); - if (listen_queue_ != nullptr) { - listen_queue_->push_back(data->addr); - if (async_) { - SignalReadEvent(this); + bool signal_read_event = false; + bool signal_close_event = false; + bool signal_connect_event = false; + int error_to_signal = 0; + { + webrtc::MutexLock lock(&mutex_); + if (pmsg->message_id == MSG_ID_PACKET) { + RTC_DCHECK(nullptr != pmsg->pdata); + Packet* packet = static_cast(pmsg->pdata); + + recv_buffer_.push_back(packet); + signal_read_event = async_; + } else if (pmsg->message_id == MSG_ID_CONNECT) { + RTC_DCHECK(nullptr != pmsg->pdata); + MessageAddress* data = static_cast(pmsg->pdata); + if (listen_queue_ != nullptr) { + listen_queue_->push_back(data->addr); + signal_read_event = async_; + } else if ((SOCK_STREAM == type_) && (CS_CONNECTING == state_)) { + CompleteConnect(data->addr); + signal_connect_event = async_; + } else { + RTC_LOG(LS_VERBOSE) + << "Socket at " << local_addr_.ToString() << " is not listening"; + server_->Disconnect(data->addr); } - } else if ((SOCK_STREAM == type_) && (CS_CONNECTING == state_)) { - CompleteConnect(data->addr, true); - } else { - RTC_LOG(LS_VERBOSE) << "Socket at " << local_addr_.ToString() - << " is not listening"; - server_->Disconnect(data->addr); - } - delete data; - } else if (pmsg->message_id == MSG_ID_DISCONNECT) { - RTC_DCHECK(SOCK_STREAM == type_); - if (CS_CLOSED != state_) { - int error = (CS_CONNECTING == state_) ? ECONNREFUSED : 0; - state_ = CS_CLOSED; - remote_addr_.Clear(); - if (async_) { - SignalCloseEvent(this, error); + delete data; + } else if (pmsg->message_id == MSG_ID_DISCONNECT) { + RTC_DCHECK(SOCK_STREAM == type_); + if (CS_CLOSED != state_) { + error_to_signal = (CS_CONNECTING == state_) ? ECONNREFUSED : 0; + state_ = CS_CLOSED; + remote_addr_.Clear(); + signal_close_event = async_; } + } else if (pmsg->message_id == MSG_ID_SIGNALREADEVENT) { + signal_read_event = !recv_buffer_.empty(); + } else { + RTC_NOTREACHED(); } - } else if (pmsg->message_id == MSG_ID_SIGNALREADEVENT) { - if (!recv_buffer_.empty()) { - SignalReadEvent(this); - } - } else { - RTC_NOTREACHED(); + } + // Signal events without holding `mutex_`, to avoid recursive locking, as well + // as issues with sigslot and lock order. + if (signal_read_event) { + SignalReadEvent(this); + } + if (signal_close_event) { + SignalCloseEvent(this, error_to_signal); + } + if (signal_connect_event) { + SignalConnectEvent(this); } } @@ -416,14 +430,11 @@ int VirtualSocket::InitiateConnect(const SocketAddress& addr, bool use_delay) { return 0; } -void VirtualSocket::CompleteConnect(const SocketAddress& addr, bool notify) { +void VirtualSocket::CompleteConnect(const SocketAddress& addr) { RTC_DCHECK(CS_CONNECTING == state_); remote_addr_ = addr; state_ = CS_CONNECTED; server_->AddConnection(remote_addr_, local_addr_, this); - if (async_ && notify) { - SignalConnectEvent(this); - } } int VirtualSocket::SendUdp(const void* pv, @@ -475,7 +486,7 @@ void VirtualSocket::OnSocketServerReadyToSend() { } void VirtualSocket::SetToBlocked() { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); ready_to_send_ = false; error_ = EWOULDBLOCK; } @@ -525,7 +536,7 @@ int64_t VirtualSocket::UpdateOrderedDelivery(int64_t ts) { } size_t VirtualSocket::PurgeNetworkPackets(int64_t cur_time) { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); while (!network_.empty() && (network_.front().done_time <= cur_time)) { RTC_DCHECK(network_size_ >= network_.front().size); diff --git a/rtc_base/virtual_socket_server.h b/rtc_base/virtual_socket_server.h index faf31f007a..6c58a4bdfe 100644 --- a/rtc_base/virtual_socket_server.h +++ b/rtc_base/virtual_socket_server.h @@ -17,11 +17,11 @@ #include "rtc_base/checks.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/deprecated/recursive_critical_section.h" #include "rtc_base/event.h" #include "rtc_base/fake_clock.h" #include "rtc_base/message_handler.h" #include "rtc_base/socket_server.h" +#include "rtc_base/synchronization/mutex.h" namespace rtc { @@ -394,22 +394,23 @@ class VirtualSocket : public AsyncSocket, typedef std::map OptionsMap; int InitiateConnect(const SocketAddress& addr, bool use_delay); - void CompleteConnect(const SocketAddress& addr, bool notify); + void CompleteConnect(const SocketAddress& addr); int SendUdp(const void* pv, size_t cb, const SocketAddress& addr); int SendTcp(const void* pv, size_t cb); void OnSocketServerReadyToSend(); - VirtualSocketServer* server_; - int type_; - bool async_; + VirtualSocketServer* const server_; + const int type_; + const bool async_; ConnState state_; int error_; SocketAddress local_addr_; SocketAddress remote_addr_; // Pending sockets which can be Accepted - ListenQueue* listen_queue_; + std::unique_ptr listen_queue_ RTC_GUARDED_BY(mutex_) + RTC_PT_GUARDED_BY(mutex_); // Data which tcp has buffered for sending SendBuffer send_buffer_; @@ -417,8 +418,8 @@ class VirtualSocket : public AsyncSocket, // Set back to true when the socket can send again. bool ready_to_send_ = true; - // Critical section to protect the recv_buffer and queue_ - RecursiveCriticalSection crit_; + // Mutex to protect the recv_buffer and listen_queue_ + webrtc::Mutex mutex_; // Network model that enforces bandwidth and capacity constraints NetworkQueue network_; @@ -428,7 +429,7 @@ class VirtualSocket : public AsyncSocket, int64_t last_delivery_time_ = 0; // Data which has been received from the network - RecvBuffer recv_buffer_; + RecvBuffer recv_buffer_ RTC_GUARDED_BY(mutex_); // The amount of data which is in flight or in recv_buffer_ size_t recv_buffer_size_; diff --git a/rtc_tools/BUILD.gn b/rtc_tools/BUILD.gn index fb8bbe9718..b841228a8e 100644 --- a/rtc_tools/BUILD.gn +++ b/rtc_tools/BUILD.gn @@ -243,6 +243,7 @@ if (!is_component_build) { "../call:call_interfaces", "../common_video", "../media:rtc_internal_video_codecs", + "../modules/rtp_rtcp:rtp_rtcp_format", "../modules/video_coding:video_coding_utility", "../rtc_base:checks", "../rtc_base:rtc_json", @@ -397,6 +398,7 @@ if (!build_with_chromium) { "../rtc_base:rtc_base_approved", "../rtc_base:rtc_numerics", "../rtc_base:stringutils", + "../system_wrappers", "../test:explicit_key_value_config", ] absl_deps = [ diff --git a/rtc_tools/frame_analyzer/video_geometry_aligner.cc b/rtc_tools/frame_analyzer/video_geometry_aligner.cc index db397bc3a5..88da26d4d0 100644 --- a/rtc_tools/frame_analyzer/video_geometry_aligner.cc +++ b/rtc_tools/frame_analyzer/video_geometry_aligner.cc @@ -61,7 +61,7 @@ rtc::scoped_refptr CropAndZoom( adjusted_frame->MutableDataY(), adjusted_frame->StrideY(), adjusted_frame->MutableDataU(), adjusted_frame->StrideU(), adjusted_frame->MutableDataV(), adjusted_frame->StrideV(), - frame->width(), frame->height(), libyuv::kFilterBilinear); + frame->width(), frame->height(), libyuv::kFilterBox); return adjusted_frame; } diff --git a/rtc_tools/rtc_event_log_visualizer/analyze_audio.cc b/rtc_tools/rtc_event_log_visualizer/analyze_audio.cc index 51cc3b9245..02184a64ea 100644 --- a/rtc_tools/rtc_event_log_visualizer/analyze_audio.cc +++ b/rtc_tools/rtc_event_log_visualizer/analyze_audio.cc @@ -307,10 +307,6 @@ std::unique_ptr CreateNetEqTestAndRun( input.reset(new test::NetEqReplacementInput(std::move(input), kReplacementPt, cn_types, forbidden_types)); - NetEq::Config config; - config.max_packets_in_buffer = 200; - config.enable_fast_accelerate = true; - std::unique_ptr output(new test::VoidAudioSink()); rtc::scoped_refptr decoder_factory = @@ -330,6 +326,7 @@ std::unique_ptr CreateNetEqTestAndRun( callbacks.post_insert_packet = neteq_stats_getter->delay_analyzer(); callbacks.get_audio_callback = neteq_stats_getter.get(); + NetEq::Config config; test::NetEqTest test(config, decoder_factory, codecs, /*text_log=*/nullptr, /*factory=*/nullptr, std::move(input), std::move(output), callbacks); diff --git a/rtc_tools/rtc_event_log_visualizer/analyzer.cc b/rtc_tools/rtc_event_log_visualizer/analyzer.cc index 7690d7d6ea..0f727f2815 100644 --- a/rtc_tools/rtc_event_log_visualizer/analyzer.cc +++ b/rtc_tools/rtc_event_log_visualizer/analyzer.cc @@ -1267,7 +1267,7 @@ void EventLogAnalyzer::CreateSendSideBweSimulationGraph(Plot* plot) { const RtpPacketType& rtp_packet = *rtp_iterator->second; if (rtp_packet.rtp.header.extension.hasTransportSequenceNumber) { RtpPacketSendInfo packet_info; - packet_info.ssrc = rtp_packet.rtp.header.ssrc; + packet_info.media_ssrc = rtp_packet.rtp.header.ssrc; packet_info.transport_sequence_number = rtp_packet.rtp.header.extension.transportSequenceNumber; packet_info.rtp_sequence_number = rtp_packet.rtp.header.sequenceNumber; diff --git a/rtc_tools/rtc_event_log_visualizer/log_simulation.cc b/rtc_tools/rtc_event_log_visualizer/log_simulation.cc index 0e5b5d04a9..c0b418de4b 100644 --- a/rtc_tools/rtc_event_log_visualizer/log_simulation.cc +++ b/rtc_tools/rtc_event_log_visualizer/log_simulation.cc @@ -14,6 +14,7 @@ #include "logging/rtc_event_log/rtc_event_processor.h" #include "modules/rtp_rtcp/source/time_util.h" +#include "system_wrappers/include/clock.h" namespace webrtc { @@ -83,7 +84,7 @@ void LogBasedNetworkControllerSimulation::OnPacketSent( } RtpPacketSendInfo packet_info; - packet_info.ssrc = packet.ssrc; + packet_info.media_ssrc = packet.ssrc; packet_info.transport_sequence_number = packet.transport_seq_no; packet_info.rtp_sequence_number = packet.stream_seq_no; packet_info.length = packet.size; @@ -142,11 +143,13 @@ void LogBasedNetworkControllerSimulation::OnReceiverReport( HandleStateUpdate(controller_->OnTransportLossReport(msg)); } + Clock* clock = Clock::GetRealTimeClock(); TimeDelta rtt = TimeDelta::PlusInfinity(); for (auto& rb : report.rr.report_blocks()) { if (rb.last_sr()) { + Timestamp report_log_time = Timestamp::Micros(report.log_time_us()); uint32_t receive_time_ntp = - CompactNtp(TimeMicrosToNtp(report.log_time_us())); + CompactNtp(clock->ConvertTimestampToNtpTime(report_log_time)); uint32_t rtt_ntp = receive_time_ntp - rb.delay_since_last_sr() - rb.last_sr(); rtt = std::min(rtt, TimeDelta::Millis(CompactNtpRttToMs(rtt_ntp))); diff --git a/rtc_tools/rtp_generator/rtp_generator.cc b/rtc_tools/rtp_generator/rtp_generator.cc index 3180897ec5..c2fc1cff06 100644 --- a/rtc_tools/rtp_generator/rtp_generator.cc +++ b/rtc_tools/rtp_generator/rtp_generator.cc @@ -136,10 +136,15 @@ absl::optional ParseRtpGeneratorOptionsFromFile( } // Parse the file as JSON - Json::Reader json_reader; + Json::CharReaderBuilder builder; Json::Value json; - if (!json_reader.parse(raw_json_buffer.data(), json)) { - RTC_LOG(LS_ERROR) << "Unable to parse the corpus config json file"; + std::string error_message; + std::unique_ptr json_reader(builder.newCharReader()); + if (!json_reader->parse(raw_json_buffer.data(), + raw_json_buffer.data() + raw_json_buffer.size(), + &json, &error_message)) { + RTC_LOG(LS_ERROR) << "Unable to parse the corpus config json file. Error:" + << error_message; return absl::nullopt; } diff --git a/rtc_tools/video_replay.cc b/rtc_tools/video_replay.cc index 08f4c7accc..62981b6b1a 100644 --- a/rtc_tools/video_replay.cc +++ b/rtc_tools/video_replay.cc @@ -25,6 +25,7 @@ #include "call/call.h" #include "common_video/libyuv/include/webrtc_libyuv.h" #include "media/engine/internal_decoder_factory.h" +#include "modules/rtp_rtcp/source/rtp_packet.h" #include "modules/video_coding/utility/ivf_file_writer.h" #include "rtc_base/checks.h" #include "rtc_base/string_to_number.h" @@ -39,7 +40,6 @@ #include "test/gtest.h" #include "test/null_transport.h" #include "test/rtp_file_reader.h" -#include "test/rtp_header_parser.h" #include "test/run_loop.h" #include "test/run_test.h" #include "test/test_video_capturer.h" @@ -74,20 +74,15 @@ ABSL_FLAG(int, webrtc::test::CallTest::kRtxRedPayloadType, "RED over RTX payload type"); -// Flag for SSRC. -const std::string& DefaultSsrc() { - static const std::string ssrc = - std::to_string(webrtc::test::CallTest::kVideoSendSsrcs[0]); - return ssrc; -} -ABSL_FLAG(std::string, ssrc, DefaultSsrc().c_str(), "Incoming SSRC"); - -const std::string& DefaultSsrcRtx() { - static const std::string ssrc_rtx = - std::to_string(webrtc::test::CallTest::kSendRtxSsrcs[0]); - return ssrc_rtx; -} -ABSL_FLAG(std::string, ssrc_rtx, DefaultSsrcRtx().c_str(), "Incoming RTX SSRC"); +// Flag for SSRC and RTX SSRC. +ABSL_FLAG(uint32_t, + ssrc, + webrtc::test::CallTest::kVideoSendSsrcs[0], + "Incoming SSRC"); +ABSL_FLAG(uint32_t, + ssrc_rtx, + webrtc::test::CallTest::kSendRtxSsrcs[0], + "Incoming RTX SSRC"); // Flag for abs-send-time id. ABSL_FLAG(int, abs_send_time_id, -1, "RTP extension ID for abs-send-time"); @@ -137,10 +132,6 @@ static bool ValidatePayloadType(int32_t payload_type) { return payload_type > 0 && payload_type <= 127; } -static bool ValidateSsrc(const char* ssrc_string) { - return rtc::StringToNumber(ssrc_string).has_value(); -} - static bool ValidateOptionalPayloadType(int32_t payload_type) { return payload_type == -1 || ValidatePayloadType(payload_type); } @@ -174,11 +165,11 @@ static int RedPayloadTypeRtx() { } static uint32_t Ssrc() { - return rtc::StringToNumber(absl::GetFlag(FLAGS_ssrc)).value(); + return absl::GetFlag(FLAGS_ssrc); } static uint32_t SsrcRtx() { - return rtc::StringToNumber(absl::GetFlag(FLAGS_ssrc_rtx)).value(); + return absl::GetFlag(FLAGS_ssrc_rtx); } static int AbsSendTimeId() { @@ -289,7 +280,7 @@ class DecoderIvfFileWriter : public test::FakeDecoder { video_codec_type_ = VideoCodecType::kVideoCodecH264; } else { RTC_LOG(LS_ERROR) << "Unsupported video codec " << codec; - RTC_DCHECK(false); + RTC_NOTREACHED(); } } ~DecoderIvfFileWriter() override { file_writer_->Close(); } @@ -399,11 +390,14 @@ class RtpReplayer final { std::stringstream raw_json_buffer; raw_json_buffer << config_file.rdbuf(); std::string raw_json = raw_json_buffer.str(); - Json::Reader json_reader; + Json::CharReaderBuilder builder; Json::Value json_configs; - if (!json_reader.parse(raw_json, json_configs)) { + std::string error_message; + std::unique_ptr json_reader(builder.newCharReader()); + if (!json_reader->parse(raw_json.data(), raw_json.data() + raw_json.size(), + &json_configs, &error_message)) { fprintf(stderr, "Error parsing JSON config\n"); - fprintf(stderr, "%s\n", json_reader.getFormatedErrorMessages().c_str()); + fprintf(stderr, "%s\n", error_message.c_str()); return nullptr; } @@ -544,11 +538,11 @@ class RtpReplayer final { if (!rtp_reader->NextPacket(&packet)) { break; } - RTPHeader header; - std::unique_ptr parser(RtpHeaderParser::CreateForTest()); - parser->Parse(packet.data, packet.length, &header); - if (header.timestamp < start_timestamp || - header.timestamp > stop_timestamp) { + rtc::CopyOnWriteBuffer packet_buffer(packet.data, packet.length); + RtpPacket header; + header.Parse(packet_buffer); + if (header.Timestamp() < start_timestamp || + header.Timestamp() > stop_timestamp) { continue; } @@ -560,10 +554,9 @@ class RtpReplayer final { ++num_packets; PacketReceiver::DeliveryStatus result = PacketReceiver::DELIVERY_OK; worker_thread->PostTask(ToQueuedTask([&]() { - result = call->Receiver()->DeliverPacket( - webrtc::MediaType::VIDEO, - rtc::CopyOnWriteBuffer(packet.data, packet.length), - /* packet_time_us */ -1); + result = call->Receiver()->DeliverPacket(webrtc::MediaType::VIDEO, + std::move(packet_buffer), + /* packet_time_us */ -1); event.Set(); })); event.Wait(/*give_up_after_ms=*/10000); @@ -571,21 +564,17 @@ class RtpReplayer final { case PacketReceiver::DELIVERY_OK: break; case PacketReceiver::DELIVERY_UNKNOWN_SSRC: { - if (unknown_packets[header.ssrc] == 0) - fprintf(stderr, "Unknown SSRC: %u!\n", header.ssrc); - ++unknown_packets[header.ssrc]; + if (unknown_packets[header.Ssrc()] == 0) + fprintf(stderr, "Unknown SSRC: %u!\n", header.Ssrc()); + ++unknown_packets[header.Ssrc()]; break; } case PacketReceiver::DELIVERY_PACKET_ERROR: { fprintf(stderr, "Packet error, corrupt packets or incorrect setup?\n"); - RTPHeader header; - std::unique_ptr parser( - RtpHeaderParser::CreateForTest()); - parser->Parse(packet.data, packet.length, &header); fprintf(stderr, "Packet len=%zu pt=%u seq=%u ts=%u ssrc=0x%8x\n", - packet.length, header.payloadType, header.sequenceNumber, - header.timestamp, header.ssrc); + packet.length, header.PayloadType(), header.SequenceNumber(), + header.Timestamp(), header.Ssrc()); break; } } @@ -617,8 +606,6 @@ int main(int argc, char* argv[]) { ValidateOptionalPayloadType(absl::GetFlag(FLAGS_red_payload_type_rtx))); RTC_CHECK( ValidateOptionalPayloadType(absl::GetFlag(FLAGS_ulpfec_payload_type))); - RTC_CHECK(ValidateSsrc(absl::GetFlag(FLAGS_ssrc).c_str())); - RTC_CHECK(ValidateSsrc(absl::GetFlag(FLAGS_ssrc_rtx).c_str())); RTC_CHECK( ValidateRtpHeaderExtensionId(absl::GetFlag(FLAGS_abs_send_time_id))); RTC_CHECK(ValidateRtpHeaderExtensionId( diff --git a/sdk/BUILD.gn b/sdk/BUILD.gn index aafacb312b..1553e3a92a 100644 --- a/sdk/BUILD.gn +++ b/sdk/BUILD.gn @@ -390,6 +390,7 @@ if (is_ios || is_mac) { "../api/video:video_rtp_headers", "../common_video", "../media:rtc_media_base", + "../pc:rtc_pc_base", "../rtc_base", "../rtc_base:checks", "../rtc_base:threading", @@ -433,7 +434,7 @@ if (is_ios || is_mac) { ] } - rtc_library("video_objc") { + rtc_library("opengl_objc") { sources = [ "objc/components/renderer/opengl/RTCDefaultShader.h", "objc/components/renderer/opengl/RTCDefaultShader.mm", @@ -462,6 +463,10 @@ if (is_ios || is_mac) { ] } + # TODO(bugs.webrtc.org/12937): Remove OpenGL deprecation warning + # workaround. + defines = [ "GLES_SILENCE_DEPRECATION" ] + deps = [ ":base_objc", ":helpers_objc", @@ -485,9 +490,12 @@ if (is_ios || is_mac) { ] } - rtc_library("ui_objc") { + rtc_library("opengl_ui_objc") { visibility = [ "*" ] - allow_poison = [ "audio_codecs" ] # TODO(bugs.webrtc.org/8396): Remove. + allow_poison = [ + "audio_codecs", # TODO(bugs.webrtc.org/8396): Remove. + "default_task_queue", + ] if (is_ios) { sources = [ "objc/components/renderer/opengl/RTCDisplayLinkTimer.h", @@ -495,6 +503,10 @@ if (is_ios || is_mac) { "objc/components/renderer/opengl/RTCEAGLVideoView.h", "objc/components/renderer/opengl/RTCEAGLVideoView.m", ] + + # TODO(bugs.webrtc.org/12937): Remove OpenGL deprecation warning + # workaround. + defines = [ "GLES_SILENCE_DEPRECATION" ] } if (is_mac) { sources = [ @@ -506,61 +518,59 @@ if (is_ios || is_mac) { deps = [ ":base_objc", ":helpers_objc", - ":video_objc", + ":metal_objc", + ":opengl_objc", ":videocapture_objc", ":videoframebuffer_objc", ] } - if (rtc_use_metal_rendering) { - rtc_library("metal_objc") { - visibility = [ "*" ] - allow_poison = [ - "audio_codecs", # TODO(bugs.webrtc.org/8396): Remove. - "default_task_queue", - ] - sources = [ - "objc/components/renderer/metal/RTCMTLI420Renderer.h", - "objc/components/renderer/metal/RTCMTLI420Renderer.mm", - "objc/components/renderer/metal/RTCMTLRenderer+Private.h", - "objc/components/renderer/metal/RTCMTLRenderer.h", - "objc/components/renderer/metal/RTCMTLRenderer.mm", - ] - if (is_ios) { - sources += [ - "objc/components/renderer/metal/RTCMTLNV12Renderer.h", - "objc/components/renderer/metal/RTCMTLNV12Renderer.mm", - "objc/components/renderer/metal/RTCMTLRGBRenderer.h", - "objc/components/renderer/metal/RTCMTLRGBRenderer.mm", - "objc/components/renderer/metal/RTCMTLVideoView.h", - "objc/components/renderer/metal/RTCMTLVideoView.m", - ] - } - frameworks = [ - "CoreVideo.framework", - "Metal.framework", - "MetalKit.framework", + rtc_library("metal_objc") { + visibility = [ "*" ] + allow_poison = [ + "audio_codecs", # TODO(bugs.webrtc.org/8396): Remove. + "default_task_queue", + ] + sources = [ + "objc/components/renderer/metal/RTCMTLI420Renderer.h", + "objc/components/renderer/metal/RTCMTLI420Renderer.mm", + "objc/components/renderer/metal/RTCMTLNV12Renderer.h", + "objc/components/renderer/metal/RTCMTLNV12Renderer.mm", + "objc/components/renderer/metal/RTCMTLRGBRenderer.h", + "objc/components/renderer/metal/RTCMTLRGBRenderer.mm", + "objc/components/renderer/metal/RTCMTLRenderer+Private.h", + "objc/components/renderer/metal/RTCMTLRenderer.h", + "objc/components/renderer/metal/RTCMTLRenderer.mm", + ] + frameworks = [ + "CoreVideo.framework", + "Metal.framework", + "MetalKit.framework", + ] + if (is_ios) { + sources += [ + "objc/components/renderer/metal/RTCMTLVideoView.h", + "objc/components/renderer/metal/RTCMTLVideoView.m", ] - if (is_mac) { - sources += [ - "objc/components/renderer/metal/RTCMTLNSVideoView.h", - "objc/components/renderer/metal/RTCMTLNSVideoView.m", - ] - frameworks += [ "AppKit.framework" ] - } - deps = [ - ":base_objc", - ":peerconnectionfactory_base_objc", - ":video_objc", - ":videoframebuffer_objc", - "../api/video:video_frame", - "../api/video:video_rtp_headers", - "../rtc_base:checks", - "../rtc_base:rtc_base_approved", + } + if (is_mac) { + sources += [ + "objc/components/renderer/metal/RTCMTLNSVideoView.h", + "objc/components/renderer/metal/RTCMTLNSVideoView.m", ] - configs += [ "..:common_objc" ] - public_configs = [ ":common_config_objc" ] + frameworks += [ "AppKit.framework" ] } + deps = [ + ":base_objc", + ":peerconnectionfactory_base_objc", + ":videoframebuffer_objc", + "../api/video:video_frame", + "../api/video:video_rtp_headers", + "../rtc_base:checks", + "../rtc_base:rtc_base_approved", + ] + configs += [ "..:common_objc" ] + public_configs = [ ":common_config_objc" ] } # TODO(bugs.webrtc.org/9627): Remove this target. @@ -600,7 +610,6 @@ if (is_ios || is_mac) { deps = [ ":base_objc", ":helpers_objc", - ":video_objc", ":videoframebuffer_objc", "../rtc_base/system:gcd_helpers", ] @@ -989,7 +998,6 @@ if (is_ios || is_mac) { ":mediasource_objc", ":native_api", ":native_video", - ":video_objc", ":videoframebuffer_objc", ":videorendereradapter_objc", ":videosource_objc", @@ -1043,6 +1051,7 @@ if (is_ios || is_mac) { "objc/unittests/RTCEncodedImage_xctest.mm", "objc/unittests/RTCFileVideoCapturer_xctest.mm", "objc/unittests/RTCH264ProfileLevelId_xctest.m", + "objc/unittests/RTCMTLVideoView_xctest.m", "objc/unittests/RTCNV12TextureCache_xctest.m", "objc/unittests/RTCPeerConnectionFactory_xctest.m", "objc/unittests/frame_buffer_helpers.h", @@ -1050,6 +1059,10 @@ if (is_ios || is_mac) { "objc/unittests/nalu_rewriter_xctest.mm", ] + # TODO(bugs.webrtc.org/12937): Remove OpenGL deprecation warning + # workaround. + defines = [ "GLES_SILENCE_DEPRECATION" ] + # TODO(peterhanspers): Reenable these tests on simulator. # See bugs.webrtc.org/7812 if (target_environment != "simulator") { @@ -1067,11 +1080,11 @@ if (is_ios || is_mac) { ":callback_logger_objc", ":framework_objc", ":mediaconstraints_objc", + ":metal_objc", ":native_api", ":native_api_audio_device_module", ":native_video", ":peerconnectionfactory_base_objc", - ":video_objc", ":video_toolbox_cc", ":videocapture_objc", ":videocodec_objc", @@ -1090,9 +1103,8 @@ if (is_ios || is_mac) { "//third_party/libyuv", ] - if (rtc_use_metal_rendering) { - sources += [ "objc/unittests/RTCMTLVideoView_xctest.m" ] - deps += [ ":metal_objc" ] + if (rtc_ios_macos_use_opengl_rendering) { + deps += [ ":opengl_objc" ] } public_deps = [ @@ -1186,7 +1198,6 @@ if (is_ios || is_mac) { ":native_api", ":native_video", ":peerconnectionfactory_base_objc", - ":video_objc", ":videocapture_objc", ":videocodec_objc", ":videoframebuffer_objc", @@ -1332,14 +1343,13 @@ if (is_ios || is_mac) { ":native_api", ":native_video", ":peerconnectionfactory_base_objc", - ":ui_objc", ":videocapture_objc", ":videocodec_objc", ":videotoolbox_objc", "../rtc_base:rtc_base_approved", ] - if (rtc_use_metal_rendering) { - deps += [ ":metal_objc" ] + if (rtc_ios_macos_use_opengl_rendering) { + deps += [ ":opengl_ui_objc" ] } if (!build_with_chromium) { deps += [ @@ -1352,7 +1362,6 @@ if (is_ios || is_mac) { "AVFoundation.framework", "CoreGraphics.framework", "CoreMedia.framework", - "GLKit.framework", ] configs += [ @@ -1468,16 +1477,13 @@ if (is_ios || is_mac) { ":default_codec_factory_objc", ":native_api", ":native_video", + ":opengl_ui_objc", ":peerconnectionfactory_base_objc", - ":ui_objc", ":videocapture_objc", ":videocodec_objc", ":videotoolbox_objc", "../rtc_base:rtc_base_approved", ] - if (rtc_use_metal_rendering) { - deps += [ ":metal_objc" ] - } if (!build_with_chromium) { deps += [ ":callback_logger_objc", diff --git a/sdk/android/BUILD.gn b/sdk/android/BUILD.gn index ceebb4c65e..29574e9ab4 100644 --- a/sdk/android/BUILD.gn +++ b/sdk/android/BUILD.gn @@ -234,6 +234,7 @@ if (is_android) { "src/java/org/webrtc/GlGenericDrawer.java", "src/java/org/webrtc/H264Utils.java", "src/java/org/webrtc/NV21Buffer.java", + "src/java/org/webrtc/VideoCodecMimeType.java", "src/java/org/webrtc/VideoDecoderWrapper.java", "src/java/org/webrtc/VideoEncoderWrapper.java", "src/java/org/webrtc/WrappedNativeI420Buffer.java", @@ -403,7 +404,6 @@ if (is_android) { "src/java/org/webrtc/MediaCodecWrapperFactory.java", "src/java/org/webrtc/MediaCodecWrapperFactoryImpl.java", "src/java/org/webrtc/NV12Buffer.java", - "src/java/org/webrtc/VideoCodecMimeType.java", ] deps = [ diff --git a/sdk/android/api/org/webrtc/Camera2Enumerator.java b/sdk/android/api/org/webrtc/Camera2Enumerator.java index b32b3ad302..2c6bb57b68 100644 --- a/sdk/android/api/org/webrtc/Camera2Enumerator.java +++ b/sdk/android/api/org/webrtc/Camera2Enumerator.java @@ -55,7 +55,7 @@ public String[] getDeviceNames() { // catch statement with an Exception from a newer API, even if the code is never executed. // https://code.google.com/p/android/issues/detail?id=209129 } catch (/* CameraAccessException */ AndroidException e) { - Logging.e(TAG, "Camera access exception: " + e); + Logging.e(TAG, "Camera access exception", e); return new String[] {}; } } @@ -97,7 +97,7 @@ public CameraVideoCapturer createCapturer( // catch statement with an Exception from a newer API, even if the code is never executed. // https://code.google.com/p/android/issues/detail?id=209129 } catch (/* CameraAccessException */ AndroidException e) { - Logging.e(TAG, "Camera access exception: " + e); + Logging.e(TAG, "Camera access exception", e); return null; } } @@ -123,8 +123,8 @@ public static boolean isSupported(Context context) { // On Android OS pre 4.4.2, a class will not load because of VerifyError if it contains a // catch statement with an Exception from a newer API, even if the code is never executed. // https://code.google.com/p/android/issues/detail?id=209129 - } catch (/* CameraAccessException */ AndroidException e) { - Logging.e(TAG, "Camera access exception: " + e); + } catch (/* CameraAccessException */ AndroidException | RuntimeException e) { + Logging.e(TAG, "Failed to check if camera2 is supported", e); return false; } return true; @@ -186,7 +186,7 @@ static List getSupportedFormats(CameraManager cameraManager, Stri try { cameraCharacteristics = cameraManager.getCameraCharacteristics(cameraId); } catch (Exception ex) { - Logging.e(TAG, "getCameraCharacteristics(): " + ex); + Logging.e(TAG, "getCameraCharacteristics()", ex); return new ArrayList(); } diff --git a/sdk/android/api/org/webrtc/HardwareVideoEncoderFactory.java b/sdk/android/api/org/webrtc/HardwareVideoEncoderFactory.java index 17ba76ab30..c9831c1843 100644 --- a/sdk/android/api/org/webrtc/HardwareVideoEncoderFactory.java +++ b/sdk/android/api/org/webrtc/HardwareVideoEncoderFactory.java @@ -94,7 +94,7 @@ public VideoEncoder createEncoder(VideoCodecInfo input) { return null; } - VideoCodecMimeType type = VideoCodecMimeType.valueOf(input.name); + VideoCodecMimeType type = VideoCodecMimeType.fromSdpCodecName(input.getName()); MediaCodecInfo info = findCodecForType(type); if (info == null) { @@ -142,7 +142,7 @@ public VideoCodecInfo[] getSupportedCodecs() { VideoCodecMimeType.VP9, VideoCodecMimeType.H264, VideoCodecMimeType.AV1}) { MediaCodecInfo codec = findCodecForType(type); if (codec != null) { - String name = type.name(); + String name = type.toSdpCodecName(); // TODO(sakal): Always add H264 HP once WebRTC correctly removes codecs that are not // supported by the decoder. if (type == VideoCodecMimeType.H264 && isH264HighProfileSupported(codec)) { diff --git a/sdk/android/api/org/webrtc/RTCStats.java b/sdk/android/api/org/webrtc/RTCStats.java index 7ad7634c82..573d95300f 100644 --- a/sdk/android/api/org/webrtc/RTCStats.java +++ b/sdk/android/api/org/webrtc/RTCStats.java @@ -62,6 +62,7 @@ public String getId() { * - Double * - String * - The array form of any of the above (e.g., Integer[]) + * - Map of String keys to BigInteger / Double values */ public Map getMembers() { return members; diff --git a/sdk/android/api/org/webrtc/SoftwareVideoDecoderFactory.java b/sdk/android/api/org/webrtc/SoftwareVideoDecoderFactory.java index da11e87ec3..c59db3b47b 100644 --- a/sdk/android/api/org/webrtc/SoftwareVideoDecoderFactory.java +++ b/sdk/android/api/org/webrtc/SoftwareVideoDecoderFactory.java @@ -16,23 +16,20 @@ import java.util.List; public class SoftwareVideoDecoderFactory implements VideoDecoderFactory { - @Deprecated @Nullable @Override - public VideoDecoder createDecoder(String codecType) { - return createDecoder(new VideoCodecInfo(codecType, new HashMap<>())); - } + public VideoDecoder createDecoder(VideoCodecInfo codecInfo) { + String codecName = codecInfo.getName(); - @Nullable - @Override - public VideoDecoder createDecoder(VideoCodecInfo codecType) { - if (codecType.getName().equalsIgnoreCase("VP8")) { + if (codecName.equalsIgnoreCase(VideoCodecMimeType.VP8.toSdpCodecName())) { return new LibvpxVp8Decoder(); } - if (codecType.getName().equalsIgnoreCase("VP9") && LibvpxVp9Decoder.nativeIsSupported()) { + if (codecName.equalsIgnoreCase(VideoCodecMimeType.VP9.toSdpCodecName()) + && LibvpxVp9Decoder.nativeIsSupported()) { return new LibvpxVp9Decoder(); } - if (codecType.getName().equalsIgnoreCase("AV1") && LibaomAv1Decoder.nativeIsSupported()) { + if (codecName.equalsIgnoreCase(VideoCodecMimeType.AV1.toSdpCodecName()) + && LibaomAv1Decoder.nativeIsSupported()) { return new LibaomAv1Decoder(); } @@ -47,12 +44,12 @@ public VideoCodecInfo[] getSupportedCodecs() { static VideoCodecInfo[] supportedCodecs() { List codecs = new ArrayList(); - codecs.add(new VideoCodecInfo("VP8", new HashMap<>())); + codecs.add(new VideoCodecInfo(VideoCodecMimeType.VP8.toSdpCodecName(), new HashMap<>())); if (LibvpxVp9Decoder.nativeIsSupported()) { - codecs.add(new VideoCodecInfo("VP9", new HashMap<>())); + codecs.add(new VideoCodecInfo(VideoCodecMimeType.VP9.toSdpCodecName(), new HashMap<>())); } if (LibaomAv1Decoder.nativeIsSupported()) { - codecs.add(new VideoCodecInfo("AV1", new HashMap<>())); + codecs.add(new VideoCodecInfo(VideoCodecMimeType.AV1.toSdpCodecName(), new HashMap<>())); } return codecs.toArray(new VideoCodecInfo[codecs.size()]); diff --git a/sdk/android/api/org/webrtc/SoftwareVideoEncoderFactory.java b/sdk/android/api/org/webrtc/SoftwareVideoEncoderFactory.java index 528adab98c..4de39dcdba 100644 --- a/sdk/android/api/org/webrtc/SoftwareVideoEncoderFactory.java +++ b/sdk/android/api/org/webrtc/SoftwareVideoEncoderFactory.java @@ -18,14 +18,18 @@ public class SoftwareVideoEncoderFactory implements VideoEncoderFactory { @Nullable @Override - public VideoEncoder createEncoder(VideoCodecInfo info) { - if (info.name.equalsIgnoreCase("VP8")) { + public VideoEncoder createEncoder(VideoCodecInfo codecInfo) { + String codecName = codecInfo.getName(); + + if (codecName.equalsIgnoreCase(VideoCodecMimeType.VP8.toSdpCodecName())) { return new LibvpxVp8Encoder(); } - if (info.name.equalsIgnoreCase("VP9") && LibvpxVp9Encoder.nativeIsSupported()) { + if (codecName.equalsIgnoreCase(VideoCodecMimeType.VP9.toSdpCodecName()) + && LibvpxVp9Encoder.nativeIsSupported()) { return new LibvpxVp9Encoder(); } - if (info.name.equalsIgnoreCase("AV1") && LibaomAv1Encoder.nativeIsSupported()) { + if (codecName.equalsIgnoreCase(VideoCodecMimeType.AV1.toSdpCodecName()) + && LibaomAv1Encoder.nativeIsSupported()) { return new LibaomAv1Encoder(); } @@ -40,12 +44,12 @@ public VideoCodecInfo[] getSupportedCodecs() { static VideoCodecInfo[] supportedCodecs() { List codecs = new ArrayList(); - codecs.add(new VideoCodecInfo("VP8", new HashMap<>())); + codecs.add(new VideoCodecInfo(VideoCodecMimeType.VP8.toSdpCodecName(), new HashMap<>())); if (LibvpxVp9Encoder.nativeIsSupported()) { - codecs.add(new VideoCodecInfo("VP9", new HashMap<>())); + codecs.add(new VideoCodecInfo(VideoCodecMimeType.VP9.toSdpCodecName(), new HashMap<>())); } if (LibaomAv1Encoder.nativeIsSupported()) { - codecs.add(new VideoCodecInfo("AV1", new HashMap<>())); + codecs.add(new VideoCodecInfo(VideoCodecMimeType.AV1.toSdpCodecName(), new HashMap<>())); } return codecs.toArray(new VideoCodecInfo[codecs.size()]); diff --git a/sdk/android/api/org/webrtc/VideoCodecInfo.java b/sdk/android/api/org/webrtc/VideoCodecInfo.java index 8dd9295fd7..e11782dedd 100644 --- a/sdk/android/api/org/webrtc/VideoCodecInfo.java +++ b/sdk/android/api/org/webrtc/VideoCodecInfo.java @@ -69,6 +69,11 @@ public int hashCode() { return Arrays.hashCode(values); } + @Override + public String toString() { + return "VideoCodec{" + name + " " + params + "}"; + } + @CalledByNative String getName() { return name; diff --git a/sdk/android/api/org/webrtc/VideoDecoderFactory.java b/sdk/android/api/org/webrtc/VideoDecoderFactory.java index 2dd09670bd..3f0168f23e 100644 --- a/sdk/android/api/org/webrtc/VideoDecoderFactory.java +++ b/sdk/android/api/org/webrtc/VideoDecoderFactory.java @@ -18,18 +18,7 @@ public interface VideoDecoderFactory { * Creates a VideoDecoder for the given codec. Supports the same codecs supported by * VideoEncoderFactory. */ - @Deprecated - @Nullable - default VideoDecoder createDecoder(String codecType) { - throw new UnsupportedOperationException("Deprecated and not implemented."); - } - - /** Creates a decoder for the given video codec. */ - @Nullable - @CalledByNative - default VideoDecoder createDecoder(VideoCodecInfo info) { - return createDecoder(info.getName()); - } + @Nullable @CalledByNative VideoDecoder createDecoder(VideoCodecInfo info); /** * Enumerates the list of supported video codecs. diff --git a/sdk/android/api/org/webrtc/YuvConverter.java b/sdk/android/api/org/webrtc/YuvConverter.java index 0e2d5055f7..9c00678900 100644 --- a/sdk/android/api/org/webrtc/YuvConverter.java +++ b/sdk/android/api/org/webrtc/YuvConverter.java @@ -12,6 +12,8 @@ import android.graphics.Matrix; import android.opengl.GLES20; +import android.opengl.GLException; +import android.support.annotation.Nullable; import java.nio.ByteBuffer; import org.webrtc.VideoFrame.I420Buffer; import org.webrtc.VideoFrame.TextureBuffer; @@ -20,7 +22,9 @@ * Class for converting OES textures to a YUV ByteBuffer. It can be constructed on any thread, but * should only be operated from a single thread with an active EGL context. */ -public class YuvConverter { +public final class YuvConverter { + private static final String TAG = "YuvConverter"; + private static final String FRAGMENT_SHADER = // Difference in texture coordinate corresponding to one // sub-pixel in the x direction. @@ -122,9 +126,17 @@ public YuvConverter(VideoFrameDrawer videoFrameDrawer) { } /** Converts the texture buffer to I420. */ + @Nullable public I420Buffer convert(TextureBuffer inputTextureBuffer) { - threadChecker.checkIsOnValidThread(); + try { + return convertInternal(inputTextureBuffer); + } catch (GLException e) { + Logging.w(TAG, "Failed to convert TextureBuffer", e); + } + return null; + } + private I420Buffer convertInternal(TextureBuffer inputTextureBuffer) { TextureBuffer preparedBuffer = (TextureBuffer) videoFrameDrawer.prepareBufferForViewportSize( inputTextureBuffer, inputTextureBuffer.getWidth(), inputTextureBuffer.getHeight()); diff --git a/sdk/android/instrumentationtests/src/org/webrtc/DefaultVideoEncoderFactoryTest.java b/sdk/android/instrumentationtests/src/org/webrtc/DefaultVideoEncoderFactoryTest.java index 8ffacbe788..8135e80eaf 100644 --- a/sdk/android/instrumentationtests/src/org/webrtc/DefaultVideoEncoderFactoryTest.java +++ b/sdk/android/instrumentationtests/src/org/webrtc/DefaultVideoEncoderFactoryTest.java @@ -73,7 +73,7 @@ public void testGetSupportedCodecsWithHardwareH264HighProfile() { assertEquals(5, videoCodecs.length); assertEquals("VP8", videoCodecs[0].name); assertEquals("VP9", videoCodecs[1].name); - assertEquals("AV1", videoCodecs[2].name); + assertEquals("AV1X", videoCodecs[2].name); assertEquals("H264", videoCodecs[3].name); assertEquals("42e01f", videoCodecs[3].params.get("profile-level-id")); assertEquals("H264", videoCodecs[4].name); @@ -89,7 +89,7 @@ public void testGetSupportedCodecsWithoutHardwareH264HighProfile() { assertEquals(4, videoCodecs.length); assertEquals("VP8", videoCodecs[0].name); assertEquals("VP9", videoCodecs[1].name); - assertEquals("AV1", videoCodecs[2].name); + assertEquals("AV1X", videoCodecs[2].name); assertEquals("H264", videoCodecs[3].name); assertEquals("42e01f", videoCodecs[3].params.get("profile-level-id")); } @@ -103,7 +103,7 @@ public void testGetSupportedCodecsWithoutHardwareVP8() { assertEquals(5, videoCodecs.length); assertEquals("VP8", videoCodecs[0].name); assertEquals("VP9", videoCodecs[1].name); - assertEquals("AV1", videoCodecs[2].name); + assertEquals("AV1X", videoCodecs[2].name); assertEquals("H264", videoCodecs[3].name); assertEquals("42e01f", videoCodecs[3].params.get("profile-level-id")); assertEquals("H264", videoCodecs[4].name); diff --git a/sdk/android/src/java/org/webrtc/MediaCodecVideoDecoderFactory.java b/sdk/android/src/java/org/webrtc/MediaCodecVideoDecoderFactory.java index bd446fb741..5a1d63e1c5 100644 --- a/sdk/android/src/java/org/webrtc/MediaCodecVideoDecoderFactory.java +++ b/sdk/android/src/java/org/webrtc/MediaCodecVideoDecoderFactory.java @@ -46,7 +46,7 @@ public MediaCodecVideoDecoderFactory(@Nullable EglBase.Context sharedContext, @Nullable @Override public VideoDecoder createDecoder(VideoCodecInfo codecType) { - VideoCodecMimeType type = VideoCodecMimeType.valueOf(codecType.getName()); + VideoCodecMimeType type = VideoCodecMimeType.fromSdpCodecName(codecType.getName()); MediaCodecInfo info = findCodecForType(type); if (info == null) { @@ -68,7 +68,7 @@ public VideoCodecInfo[] getSupportedCodecs() { VideoCodecMimeType.VP9, VideoCodecMimeType.H264, VideoCodecMimeType.AV1}) { MediaCodecInfo codec = findCodecForType(type); if (codec != null) { - String name = type.name(); + String name = type.toSdpCodecName(); if (type == VideoCodecMimeType.H264 && isH264HighProfileSupported(codec)) { supportedCodecInfos.add(new VideoCodecInfo( name, MediaCodecUtils.getCodecProperties(type, /* highProfile= */ true))); diff --git a/sdk/android/src/java/org/webrtc/VideoCodecMimeType.java b/sdk/android/src/java/org/webrtc/VideoCodecMimeType.java index 26a030919d..93a9286165 100644 --- a/sdk/android/src/java/org/webrtc/VideoCodecMimeType.java +++ b/sdk/android/src/java/org/webrtc/VideoCodecMimeType.java @@ -26,4 +26,12 @@ private VideoCodecMimeType(String mimeType) { String mimeType() { return mimeType; } + + static VideoCodecMimeType fromSdpCodecName(String codecName) { + return codecName.equals("AV1X") ? AV1 : valueOf(codecName); + } + + String toSdpCodecName() { + return this == AV1 ? "AV1X" : name(); + } } diff --git a/sdk/android/src/jni/pc/peer_connection.cc b/sdk/android/src/jni/pc/peer_connection.cc index ca9e25cfad..09b8f33edb 100644 --- a/sdk/android/src/jni/pc/peer_connection.cc +++ b/sdk/android/src/jni/pc/peer_connection.cc @@ -548,10 +548,12 @@ static ScopedJavaLocalRef JNI_PeerConnection_CreateDataChannel( const JavaParamRef& j_label, const JavaParamRef& j_init) { DataChannelInit init = JavaToNativeDataChannelInit(jni, j_init); - rtc::scoped_refptr channel( - ExtractNativePC(jni, j_pc)->CreateDataChannel( - JavaToNativeString(jni, j_label), &init)); - return WrapNativeDataChannel(jni, channel); + auto result = ExtractNativePC(jni, j_pc)->CreateDataChannelOrError( + JavaToNativeString(jni, j_label), &init); + if (!result.ok()) { + return WrapNativeDataChannel(jni, nullptr); + } + return WrapNativeDataChannel(jni, result.MoveValue()); } static void JNI_PeerConnection_CreateOffer( diff --git a/sdk/android/src/jni/pc/rtc_stats_collector_callback_wrapper.cc b/sdk/android/src/jni/pc/rtc_stats_collector_callback_wrapper.cc index b334bb4a72..baa3f276e7 100644 --- a/sdk/android/src/jni/pc/rtc_stats_collector_callback_wrapper.cc +++ b/sdk/android/src/jni/pc/rtc_stats_collector_callback_wrapper.cc @@ -94,6 +94,23 @@ ScopedJavaLocalRef MemberToJava( case RTCStatsMemberInterface::kSequenceString: return NativeToJavaStringArray( env, *member.cast_to>>()); + + case RTCStatsMemberInterface::kMapStringUint64: + return NativeToJavaMap( + env, + *member.cast_to>>(), + [](JNIEnv* env, const auto& entry) { + return std::make_pair(NativeToJavaString(env, entry.first), + NativeToJavaBigInteger(env, entry.second)); + }); + + case RTCStatsMemberInterface::kMapStringDouble: + return NativeToJavaMap( + env, *member.cast_to>>(), + [](JNIEnv* env, const auto& entry) { + return std::make_pair(NativeToJavaString(env, entry.first), + NativeToJavaDouble(env, entry.second)); + }); } RTC_NOTREACHED(); return nullptr; diff --git a/sdk/android/src/jni/video_codec_info.cc b/sdk/android/src/jni/video_codec_info.cc index 8c86b7c376..a218a1d23f 100644 --- a/sdk/android/src/jni/video_codec_info.cc +++ b/sdk/android/src/jni/video_codec_info.cc @@ -19,33 +19,18 @@ namespace jni { SdpVideoFormat VideoCodecInfoToSdpVideoFormat(JNIEnv* jni, const JavaRef& j_info) { - std::string codecName = - JavaToNativeString(jni, Java_VideoCodecInfo_getName(jni, j_info)); - std::string sdpCodecName; - if (codecName == "AV1") { - // TODO(yyaroshevich): Undo mapping once AV1 sdp name is standardized - sdpCodecName = "AV1X"; - } else { - sdpCodecName = codecName; - } return SdpVideoFormat( - sdpCodecName, + JavaToNativeString(jni, Java_VideoCodecInfo_getName(jni, j_info)), JavaToNativeStringMap(jni, Java_VideoCodecInfo_getParams(jni, j_info))); } ScopedJavaLocalRef SdpVideoFormatToVideoCodecInfo( JNIEnv* jni, const SdpVideoFormat& format) { - std::string codecName; - if (format.name == "AV1X" || format.name == "AV1") { - codecName = "AV1"; - } else { - codecName = format.name; - } ScopedJavaLocalRef j_params = NativeToJavaStringMap(jni, format.parameters); return Java_VideoCodecInfo_Constructor( - jni, NativeToJavaString(jni, codecName), j_params); + jni, NativeToJavaString(jni, format.name), j_params); } } // namespace jni diff --git a/sdk/android/src/jni/video_frame.cc b/sdk/android/src/jni/video_frame.cc index dd3562f540..98728032e8 100644 --- a/sdk/android/src/jni/video_frame.cc +++ b/sdk/android/src/jni/video_frame.cc @@ -179,6 +179,10 @@ rtc::scoped_refptr AndroidVideoBuffer::ToI420() { JNIEnv* jni = AttachCurrentThreadIfNeeded(); ScopedJavaLocalRef j_i420_buffer = Java_Buffer_toI420(jni, j_video_frame_buffer_); + // In case I420 conversion fails, we propagate the nullptr. + if (j_i420_buffer.is_null()) { + return nullptr; + } // We don't need to retain the buffer because toI420 returns a new object that // we are assumed to take the ownership of. diff --git a/sdk/objc/api/peerconnection/RTCPeerConnection+DataChannel.mm b/sdk/objc/api/peerconnection/RTCPeerConnection+DataChannel.mm index 1ded45d670..cb75f061d8 100644 --- a/sdk/objc/api/peerconnection/RTCPeerConnection+DataChannel.mm +++ b/sdk/objc/api/peerconnection/RTCPeerConnection+DataChannel.mm @@ -23,14 +23,12 @@ @implementation RTC_OBJC_TYPE (RTCPeerConnection) std::string labelString = [NSString stdStringForString:label]; const webrtc::DataChannelInit nativeInit = configuration.nativeDataChannelInit; - rtc::scoped_refptr dataChannel = - self.nativePeerConnection->CreateDataChannel(labelString, - &nativeInit); - if (!dataChannel) { + auto result = self.nativePeerConnection->CreateDataChannelOrError(labelString, &nativeInit); + if (!result.ok()) { return nil; } return [[RTC_OBJC_TYPE(RTCDataChannel) alloc] initWithFactory:self.factory - nativeDataChannel:dataChannel]; + nativeDataChannel:result.MoveValue()]; } @end diff --git a/sdk/objc/api/peerconnection/RTCRtpCodecParameters.h b/sdk/objc/api/peerconnection/RTCRtpCodecParameters.h index afa2fd5fe2..6135223720 100644 --- a/sdk/objc/api/peerconnection/RTCRtpCodecParameters.h +++ b/sdk/objc/api/peerconnection/RTCRtpCodecParameters.h @@ -31,7 +31,7 @@ RTC_EXTERN const NSString *const kRTCVp8CodecName; RTC_EXTERN const NSString *const kRTCVp9CodecName; RTC_EXTERN const NSString *const kRTCH264CodecName; -/** Defined in http://w3c.github.io/webrtc-pc/#idl-def-RTC_OBJC_TYPE(RTCRtpCodecParameters) */ +/** Defined in https://www.w3.org/TR/webrtc/#idl-def-rtcrtpcodecparameters */ RTC_OBJC_EXPORT @interface RTC_OBJC_TYPE (RTCRtpCodecParameters) : NSObject diff --git a/sdk/objc/api/peerconnection/RTCStatisticsReport.h b/sdk/objc/api/peerconnection/RTCStatisticsReport.h index 38d93e8771..06dbf48d88 100644 --- a/sdk/objc/api/peerconnection/RTCStatisticsReport.h +++ b/sdk/objc/api/peerconnection/RTCStatisticsReport.h @@ -44,8 +44,8 @@ RTC_OBJC_EXPORT @property(nonatomic, readonly) NSString *type; /** The keys and values of the subreport, e.g. "totalFramesDuration = 5.551". - The values are either NSNumbers or NSStrings, or NSArrays encapsulating NSNumbers - or NSStrings. */ + The values are either NSNumbers or NSStrings or NSArrays encapsulating NSNumbers + or NSStrings, or NSDictionary of NSString keys to NSNumber values. */ @property(nonatomic, readonly) NSDictionary *values; - (instancetype)init NS_UNAVAILABLE; diff --git a/sdk/objc/api/peerconnection/RTCStatisticsReport.mm b/sdk/objc/api/peerconnection/RTCStatisticsReport.mm index 1dd72772ed..967683fc91 100644 --- a/sdk/objc/api/peerconnection/RTCStatisticsReport.mm +++ b/sdk/objc/api/peerconnection/RTCStatisticsReport.mm @@ -16,7 +16,7 @@ namespace webrtc { /** Converts a single value to a suitable NSNumber, NSString or NSArray containing NSNumbers - or NSStrings.*/ + or NSStrings, or NSDictionary of NSString keys to NSNumber values.*/ NSObject *ValueFromStatsMember(const RTCStatsMemberInterface *member) { if (member->is_defined()) { switch (member->type()) { @@ -91,6 +91,26 @@ } return [array copy]; } + case RTCStatsMemberInterface::kMapStringUint64: { + std::map map = + *member->cast_to>>(); + NSMutableDictionary *dictionary = + [NSMutableDictionary dictionaryWithCapacity:map.size()]; + for (const auto &item : map) { + dictionary[[NSString stringForStdString:item.first]] = @(item.second); + } + return [dictionary copy]; + } + case RTCStatsMemberInterface::kMapStringDouble: { + std::map map = + *member->cast_to>>(); + NSMutableDictionary *dictionary = + [NSMutableDictionary dictionaryWithCapacity:map.size()]; + for (const auto &item : map) { + dictionary[[NSString stringForStdString:item.first]] = @(item.second); + } + return [dictionary copy]; + } default: RTC_NOTREACHED(); } diff --git a/sdk/objc/api/peerconnection/RTCVideoSource.mm b/sdk/objc/api/peerconnection/RTCVideoSource.mm index 15b0d6f1be..3a1ea6a322 100644 --- a/sdk/objc/api/peerconnection/RTCVideoSource.mm +++ b/sdk/objc/api/peerconnection/RTCVideoSource.mm @@ -10,7 +10,7 @@ #import "RTCVideoSource+Private.h" -#include "api/video_track_source_proxy.h" +#include "pc/video_track_source_proxy.h" #include "rtc_base/checks.h" #include "sdk/objc/native/src/objc_video_track_source.h" diff --git a/sdk/objc/api/video_codec/RTCWrappedNativeVideoEncoder.mm b/sdk/objc/api/video_codec/RTCWrappedNativeVideoEncoder.mm index 843b6ad001..ea2a459360 100644 --- a/sdk/objc/api/video_codec/RTCWrappedNativeVideoEncoder.mm +++ b/sdk/objc/api/video_codec/RTCWrappedNativeVideoEncoder.mm @@ -69,4 +69,14 @@ - (NSString *)implementationName { return nil; } +- (NSInteger)resolutionAlignment { + RTC_NOTREACHED(); + return 1; +} + +- (BOOL)applyAlignmentToAllSimulcastLayers { + RTC_NOTREACHED(); + return NO; +} + @end diff --git a/sdk/objc/base/RTCVideoEncoder.h b/sdk/objc/base/RTCVideoEncoder.h index 29e8a89901..26cf4ec03f 100644 --- a/sdk/objc/base/RTCVideoEncoder.h +++ b/sdk/objc/base/RTCVideoEncoder.h @@ -28,7 +28,7 @@ RTC_OBJC_EXPORT @protocol RTC_OBJC_TYPE (RTCVideoEncoder) - - (void)setCallback : (RTCVideoEncoderCallback)callback; +- (void)setCallback:(nullable RTCVideoEncoderCallback)callback; - (NSInteger)startEncodeWithSettings:(RTC_OBJC_TYPE(RTCVideoEncoderSettings) *)settings numberOfCores:(int)numberOfCores; - (NSInteger)releaseEncoder; @@ -43,6 +43,13 @@ RTC_OBJC_EXPORT * disables quality scaling. */ - (nullable RTC_OBJC_TYPE(RTCVideoEncoderQpThresholds) *)scalingSettings; +/** Resolutions should be aligned to this value. */ +@property(nonatomic, readonly) NSInteger resolutionAlignment; + +/** If enabled, resolution alignment is applied to all simulcast layers simultaneously so that when + scaled, all resolutions comply with 'resolutionAlignment'. */ +@property(nonatomic, readonly) BOOL applyAlignmentToAllSimulcastLayers; + @end NS_ASSUME_NONNULL_END diff --git a/sdk/objc/components/audio/RTCAudioSession.h b/sdk/objc/components/audio/RTCAudioSession.h index 1eb0ee2b64..521b57a231 100644 --- a/sdk/objc/components/audio/RTCAudioSession.h +++ b/sdk/objc/components/audio/RTCAudioSession.h @@ -140,8 +140,6 @@ RTC_OBJC_EXPORT * AVAudioSession. */ @property(nonatomic, readonly) BOOL isActive; -/** Whether RTCAudioSession is currently locked for configuration. */ -@property(nonatomic, readonly) BOOL isLocked; /** If YES, WebRTC will not initialize the audio unit automatically when an * audio track is ready for playout or recording. Instead, applications should diff --git a/sdk/objc/components/renderer/metal/RTCMTLVideoView.h b/sdk/objc/components/renderer/metal/RTCMTLVideoView.h index 5678112ade..3320d12076 100644 --- a/sdk/objc/components/renderer/metal/RTCMTLVideoView.h +++ b/sdk/objc/components/renderer/metal/RTCMTLVideoView.h @@ -21,8 +21,6 @@ NS_ASSUME_NONNULL_BEGIN * * It has id property that renders video frames in the view's * bounds using Metal. - * NOTE: always check if metal is available on the running device via - * RTC_SUPPORTS_METAL macro before initializing this class. */ NS_CLASS_AVAILABLE_IOS(9) diff --git a/sdk/objc/components/renderer/metal/RTCMTLVideoView.m b/sdk/objc/components/renderer/metal/RTCMTLVideoView.m index f5be7c061c..4c50bcf9c1 100644 --- a/sdk/objc/components/renderer/metal/RTCMTLVideoView.m +++ b/sdk/objc/components/renderer/metal/RTCMTLVideoView.m @@ -86,11 +86,7 @@ - (void)setVideoContentMode:(UIViewContentMode)mode { #pragma mark - Private + (BOOL)isMetalAvailable { -#if defined(RTC_SUPPORTS_METAL) return MTLCreateSystemDefaultDevice() != nil; -#else - return NO; -#endif } + (MTKView *)createMetalView:(CGRect)frame { diff --git a/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm b/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm index 667553002c..06cfb741d8 100644 --- a/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm +++ b/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm @@ -209,7 +209,9 @@ - (int)resetDecompressionSession { #endif CFTypeRef keys[attributesSize] = { -#if defined(WEBRTC_IOS) +#if defined(WEBRTC_IOS) && TARGET_OS_MACCATALYST + kCVPixelBufferMetalCompatibilityKey, +#elif defined(WEBRTC_IOS) kCVPixelBufferOpenGLESCompatibilityKey, #elif defined(WEBRTC_MAC) kCVPixelBufferOpenGLCompatibilityKey, diff --git a/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm b/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm index 6c958327e0..4064da2ec2 100644 --- a/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm +++ b/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm @@ -529,6 +529,14 @@ - (int)setBitrate:(uint32_t)bitrateKbit framerate:(uint32_t)framerate { return WEBRTC_VIDEO_CODEC_OK; } +- (NSInteger)resolutionAlignment { + return 1; +} + +- (BOOL)applyAlignmentToAllSimulcastLayers { + return NO; +} + #pragma mark - Private - (NSInteger)releaseEncoder { @@ -595,14 +603,15 @@ - (int)resetCompressionSessionWithPixelFormat:(OSType)framePixelFormat { // buffers retrieved from the encoder's pixel buffer pool. const size_t attributesSize = 3; CFTypeRef keys[attributesSize] = { -#if defined(WEBRTC_IOS) - kCVPixelBufferOpenGLESCompatibilityKey, +#if defined(WEBRTC_IOS) && TARGET_OS_MACCATALYST + kCVPixelBufferMetalCompatibilityKey, +#elif defined(WEBRTC_IOS) + kCVPixelBufferOpenGLESCompatibilityKey, #elif defined(WEBRTC_MAC) - kCVPixelBufferOpenGLCompatibilityKey, + kCVPixelBufferOpenGLCompatibilityKey, #endif - kCVPixelBufferIOSurfacePropertiesKey, - kCVPixelBufferPixelFormatTypeKey - }; + kCVPixelBufferIOSurfacePropertiesKey, + kCVPixelBufferPixelFormatTypeKey}; CFDictionaryRef ioSurfaceValue = CreateCFTypeDictionary(nullptr, nullptr, 0); int64_t pixelFormatType = framePixelFormat; CFNumberRef pixelFormat = CFNumberCreate(nullptr, kCFNumberLongType, &pixelFormatType); @@ -760,6 +769,10 @@ - (void)frameWasEncoded:(OSStatus)status renderTimeMs:(int64_t)renderTimeMs timestamp:(uint32_t)timestamp rotation:(RTCVideoRotation)rotation { + RTCVideoEncoderCallback callback = _callback; + if (!callback) { + return; + } if (status != noErr) { RTC_LOG(LS_ERROR) << "H264 encode failed with code: " << status; return; @@ -806,7 +819,7 @@ - (void)frameWasEncoded:(OSStatus)status _h264BitstreamParser.ParseBitstream(*buffer); frame.qp = @(_h264BitstreamParser.GetLastSliceQp().value_or(-1)); - BOOL res = _callback(frame, codecSpecificInfo); + BOOL res = callback(frame, codecSpecificInfo); if (!res) { RTC_LOG(LS_ERROR) << "Encode callback failed"; return; diff --git a/sdk/objc/native/api/video_capturer.mm b/sdk/objc/native/api/video_capturer.mm index 6dd0edbcd9..cae7a50318 100644 --- a/sdk/objc/native/api/video_capturer.mm +++ b/sdk/objc/native/api/video_capturer.mm @@ -11,7 +11,8 @@ #include "sdk/objc/native/api/video_capturer.h" #include "absl/memory/memory.h" -#include "api/video_track_source_proxy.h" +#include "api/video_track_source_proxy_factory.h" +#include "rtc_base/ref_counted_object.h" #include "sdk/objc/native/src/objc_video_track_source.h" namespace webrtc { @@ -24,8 +25,7 @@ rtc::scoped_refptr objc_video_track_source( new rtc::RefCountedObject(adapter)); rtc::scoped_refptr video_source = - webrtc::VideoTrackSourceProxy::Create( - signaling_thread, worker_thread, objc_video_track_source); + webrtc::CreateVideoTrackSourceProxy(signaling_thread, worker_thread, objc_video_track_source); objc_video_capturer.delegate = adapter; diff --git a/sdk/objc/native/src/audio/voice_processing_audio_unit.mm b/sdk/objc/native/src/audio/voice_processing_audio_unit.mm index b0e8a65114..a931308799 100644 --- a/sdk/objc/native/src/audio/voice_processing_audio_unit.mm +++ b/sdk/objc/native/src/audio/voice_processing_audio_unit.mm @@ -200,7 +200,7 @@ static OSStatus GetAGCState(AudioUnit audio_unit, UInt32* enabled) { [session.category isEqualToString: AVAudioSessionCategoryRecord]) { enable_input = 1; } - RTCLog(@"Initializing AudioUnit, category=%@, enable_input=%d", session.category, enable_input); + RTCLog(@"Initializing AudioUnit, category=%@, enable_input=%s", session.category, (enable_input == 1) ? "true" : "false"); // LOGI() << "Initialize" << session.category << ", enable_input=" << enable_input; result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &enable_input, diff --git a/sdk/objc/native/src/objc_video_encoder_factory.mm b/sdk/objc/native/src/objc_video_encoder_factory.mm index e51fc9d319..b66554b1a4 100644 --- a/sdk/objc/native/src/objc_video_encoder_factory.mm +++ b/sdk/objc/native/src/objc_video_encoder_factory.mm @@ -48,21 +48,24 @@ int32_t InitEncode(const VideoCodec *codec_settings, const Settings &encoder_set } int32_t RegisterEncodeCompleteCallback(EncodedImageCallback *callback) override { - [encoder_ setCallback:^BOOL(RTC_OBJC_TYPE(RTCEncodedImage) * _Nonnull frame, - id _Nonnull info) { - EncodedImage encodedImage = [frame nativeEncodedImage]; - - // Handle types that can be converted into one of CodecSpecificInfo's hard coded cases. - CodecSpecificInfo codecSpecificInfo; - if ([info isKindOfClass:[RTC_OBJC_TYPE(RTCCodecSpecificInfoH264) class]]) { - codecSpecificInfo = - [(RTC_OBJC_TYPE(RTCCodecSpecificInfoH264) *)info nativeCodecSpecificInfo]; - } - - EncodedImageCallback::Result res = callback->OnEncodedImage(encodedImage, &codecSpecificInfo); - return res.error == EncodedImageCallback::Result::OK; - }]; - + if (callback) { + [encoder_ setCallback:^BOOL(RTC_OBJC_TYPE(RTCEncodedImage) * _Nonnull frame, + id _Nonnull info) { + EncodedImage encodedImage = [frame nativeEncodedImage]; + + // Handle types that can be converted into one of CodecSpecificInfo's hard coded cases. + CodecSpecificInfo codecSpecificInfo; + if ([info isKindOfClass:[RTC_OBJC_TYPE(RTCCodecSpecificInfoH264) class]]) { + codecSpecificInfo = + [(RTC_OBJC_TYPE(RTCCodecSpecificInfoH264) *)info nativeCodecSpecificInfo]; + } + + EncodedImageCallback::Result res = callback->OnEncodedImage(encodedImage, &codecSpecificInfo); + return res.error == EncodedImageCallback::Result::OK; + }]; + } else { + [encoder_ setCallback:nil]; + } return WEBRTC_VIDEO_CODEC_OK; } @@ -95,6 +98,8 @@ void SetRates(const RateControlParameters ¶meters) override { info.scaling_settings = qp_thresholds ? ScalingSettings(qp_thresholds.low, qp_thresholds.high) : ScalingSettings::kOff; + info.requested_resolution_alignment = encoder_.resolutionAlignment > 0 ?: 1; + info.apply_alignment_to_all_simulcast_layers = encoder_.applyAlignmentToAllSimulcastLayers; info.is_hardware_accelerated = true; info.has_internal_source = false; return info; diff --git a/sdk/objc/unittests/RTCAudioSessionTest.mm b/sdk/objc/unittests/RTCAudioSessionTest.mm index 805f601bdd..e2c26634b0 100644 --- a/sdk/objc/unittests/RTCAudioSessionTest.mm +++ b/sdk/objc/unittests/RTCAudioSessionTest.mm @@ -230,7 +230,7 @@ - (void)testConfigureWebRTCSession { __autoreleasing NSError **retError; [invocation getArgument:&retError atIndex:4]; *retError = [NSError errorWithDomain:@"AVAudioSession" - code:AVAudioSessionErrorInsufficientPriority + code:AVAudioSessionErrorCodeCannotInterruptOthers userInfo:nil]; BOOL failure = NO; [invocation setReturnValue:&failure]; diff --git a/stats/rtc_stats.cc b/stats/rtc_stats.cc index 59de664c0e..4895edc738 100644 --- a/stats/rtc_stats.cc +++ b/stats/rtc_stats.cc @@ -64,6 +64,20 @@ std::string VectorOfStringsToString(const std::vector& strings) { return sb.Release(); } +template +std::string MapToString(const std::map& map) { + rtc::StringBuilder sb; + sb << "{"; + const char* separator = ""; + for (const auto& element : map) { + sb << separator << rtc::ToString(element.first) << ":" + << rtc::ToString(element.second); + separator = ","; + } + sb << "}"; + return sb.Release(); +} + template std::string ToStringAsDouble(const T value) { // JSON represents numbers as floating point numbers with about 15 decimal @@ -88,6 +102,20 @@ std::string VectorToStringAsDouble(const std::vector& vector) { return sb.Release(); } +template +std::string MapToStringAsDouble(const std::map& map) { + rtc::StringBuilder sb; + sb << "{"; + const char* separator = ""; + for (const auto& element : map) { + sb << separator << "\"" << rtc::ToString(element.first) + << "\":" << ToStringAsDouble(element.second); + separator = ","; + } + sb << "}"; + return sb.Release(); +} + } // namespace bool RTCStats::operator==(const RTCStats& other) const { @@ -248,6 +276,18 @@ WEBRTC_DEFINE_RTCSTATSMEMBER(std::vector, false, VectorOfStringsToString(value_), VectorOfStringsToString(value_)); +WEBRTC_DEFINE_RTCSTATSMEMBER(rtc_stats_internal::MapStringUint64, + kMapStringUint64, + false, + false, + MapToString(value_), + MapToStringAsDouble(value_)); +WEBRTC_DEFINE_RTCSTATSMEMBER(rtc_stats_internal::MapStringDouble, + kMapStringDouble, + false, + false, + MapToString(value_), + MapToStringAsDouble(value_)); template class RTC_EXPORT_TEMPLATE_DEFINE(RTC_EXPORT) RTCNonStandardStatsMember; diff --git a/stats/rtc_stats_unittest.cc b/stats/rtc_stats_unittest.cc index b159977858..2cad90d02b 100644 --- a/stats/rtc_stats_unittest.cc +++ b/stats/rtc_stats_unittest.cc @@ -71,7 +71,7 @@ TEST(RTCStatsTest, RTCStatsAndMembers) { EXPECT_EQ(stats.id(), "testId"); EXPECT_EQ(stats.timestamp_us(), static_cast(42)); std::vector members = stats.Members(); - EXPECT_EQ(members.size(), static_cast(14)); + EXPECT_EQ(members.size(), static_cast(16)); for (const RTCStatsMemberInterface* member : members) { EXPECT_FALSE(member->is_defined()); } @@ -98,6 +98,9 @@ TEST(RTCStatsTest, RTCStatsAndMembers) { std::vector sequence_string; sequence_string.push_back(std::string("six")); + std::map map_string_uint64{{"seven", 8}}; + std::map map_string_double{{"nine", 10.0}}; + stats.m_sequence_bool = sequence_bool; stats.m_sequence_int32 = sequence_int32; stats.m_sequence_uint32 = sequence_uint32; @@ -106,6 +109,8 @@ TEST(RTCStatsTest, RTCStatsAndMembers) { stats.m_sequence_uint64 = sequence_uint64; stats.m_sequence_double = sequence_double; stats.m_sequence_string = sequence_string; + stats.m_map_string_uint64 = map_string_uint64; + stats.m_map_string_double = map_string_double; for (const RTCStatsMemberInterface* member : members) { EXPECT_TRUE(member->is_defined()); } @@ -123,6 +128,8 @@ TEST(RTCStatsTest, RTCStatsAndMembers) { EXPECT_EQ(*stats.m_sequence_uint64, sequence_uint64); EXPECT_EQ(*stats.m_sequence_double, sequence_double); EXPECT_EQ(*stats.m_sequence_string, sequence_string); + EXPECT_EQ(*stats.m_map_string_uint64, map_string_uint64); + EXPECT_EQ(*stats.m_map_string_double, map_string_double); int32_t numbers[] = {4, 8, 15, 16, 23, 42}; std::vector numbers_sequence(&numbers[0], &numbers[6]); @@ -152,6 +159,8 @@ TEST(RTCStatsTest, EqualityOperator) { stats_with_all_values.m_sequence_uint64 = std::vector(); stats_with_all_values.m_sequence_double = std::vector(); stats_with_all_values.m_sequence_string = std::vector(); + stats_with_all_values.m_map_string_uint64 = std::map(); + stats_with_all_values.m_map_string_double = std::map(); EXPECT_NE(stats_with_all_values, empty_stats); EXPECT_EQ(stats_with_all_values, stats_with_all_values); EXPECT_NE(stats_with_all_values.m_int32, stats_with_all_values.m_uint32); @@ -180,6 +189,8 @@ TEST(RTCStatsTest, EqualityOperator) { one_member_different[11].m_sequence_uint64->push_back(321); one_member_different[12].m_sequence_double->push_back(321.0); one_member_different[13].m_sequence_string->push_back("321"); + (*one_member_different[13].m_map_string_uint64)["321"] = 321; + (*one_member_different[13].m_map_string_double)["321"] = 321.0; for (size_t i = 0; i < 14; ++i) { EXPECT_NE(stats_with_all_values, one_member_different[i]); } @@ -238,6 +249,11 @@ TEST(RTCStatsTest, RTCStatsPrintsValidJson) { std::vector sequence_string; sequence_string.push_back(std::string("four")); + std::map map_string_uint64{ + {"long", static_cast(1234567890123456499L)}}; + std::map map_string_double{ + {"three", 123.4567890123456499}, {"thirteen", 123.4567890123456499}}; + RTCTestStats stats(id, timestamp); stats.m_bool = m_bool; stats.m_int32 = m_int32; @@ -249,9 +265,16 @@ TEST(RTCStatsTest, RTCStatsPrintsValidJson) { stats.m_sequence_int64 = sequence_int64; stats.m_sequence_double = sequence_double; stats.m_sequence_string = sequence_string; + stats.m_map_string_uint64 = map_string_uint64; + stats.m_map_string_double = map_string_double; + std::string json_stats = stats.ToJson(); + Json::CharReaderBuilder builder; Json::Value json_output; - EXPECT_TRUE(Json::Reader().parse(stats.ToJson(), json_output)); + std::unique_ptr json_reader(builder.newCharReader()); + EXPECT_TRUE(json_reader->parse(json_stats.c_str(), + json_stats.c_str() + json_stats.size(), + &json_output, nullptr)); EXPECT_TRUE(rtc::GetStringFromJsonObject(json_output, "id", &id)); EXPECT_TRUE(rtc::GetIntFromJsonObject(json_output, "timestamp", ×tamp)); @@ -278,6 +301,16 @@ TEST(RTCStatsTest, RTCStatsPrintsValidJson) { rtc::GetValueFromJsonObject(json_output, "mSequenceString", &json_array)); EXPECT_TRUE(rtc::JsonArrayToStringVector(json_array, &sequence_string)); + Json::Value json_map; + EXPECT_TRUE( + rtc::GetValueFromJsonObject(json_output, "mMapStringDouble", &json_map)); + for (const auto& entry : map_string_double) { + double double_output = 0.0; + EXPECT_TRUE( + rtc::GetDoubleFromJsonObject(json_map, entry.first, &double_output)); + EXPECT_NEAR(double_output, entry.second, GetExpectedError(entry.second)); + } + EXPECT_EQ(id, stats.id()); EXPECT_EQ(timestamp, stats.timestamp_us()); EXPECT_EQ(m_bool, *stats.m_bool); @@ -286,6 +319,7 @@ TEST(RTCStatsTest, RTCStatsPrintsValidJson) { EXPECT_EQ(sequence_bool, *stats.m_sequence_bool); EXPECT_EQ(sequence_int32, *stats.m_sequence_int32); EXPECT_EQ(sequence_string, *stats.m_sequence_string); + EXPECT_EQ(map_string_double, *stats.m_map_string_double); EXPECT_NEAR(m_double, *stats.m_double, GetExpectedError(*stats.m_double)); @@ -295,6 +329,13 @@ TEST(RTCStatsTest, RTCStatsPrintsValidJson) { GetExpectedError(stats.m_sequence_double->at(i))); } + EXPECT_EQ(map_string_double.size(), stats.m_map_string_double->size()); + for (const auto& entry : map_string_double) { + auto it = stats.m_map_string_double->find(entry.first); + EXPECT_NE(it, stats.m_map_string_double->end()); + EXPECT_NEAR(entry.second, it->second, GetExpectedError(it->second)); + } + // We read mInt64 as double since JSON stores all numbers as doubles, so there // is not enough precision to represent large numbers. double m_int64_as_double; @@ -320,6 +361,19 @@ TEST(RTCStatsTest, RTCStatsPrintsValidJson) { GetExpectedError(stats_value_as_double)); } + // Similarly, read Uint64 as double + EXPECT_TRUE( + rtc::GetValueFromJsonObject(json_output, "mMapStringUint64", &json_map)); + for (const auto& entry : map_string_uint64) { + const double stats_value_as_double = + static_cast((*stats.m_map_string_uint64)[entry.first]); + double double_output = 0.0; + EXPECT_TRUE( + rtc::GetDoubleFromJsonObject(json_map, entry.first, &double_output)); + EXPECT_NEAR(double_output, stats_value_as_double, + GetExpectedError(stats_value_as_double)); + } + // Neither stats.m_uint32 nor stats.m_uint64 are defined, so "mUint64" and // "mUint32" should not be part of the generated JSON object. int m_uint32; diff --git a/stats/rtcstats_objects.cc b/stats/rtcstats_objects.cc index dcd2aeb776..a2d7aa0b07 100644 --- a/stats/rtcstats_objects.cc +++ b/stats/rtcstats_objects.cc @@ -811,6 +811,7 @@ WEBRTC_RTCSTATS_IMPL( &huge_frames_sent, &total_packet_send_delay, &quality_limitation_reason, + &quality_limitation_durations, &quality_limitation_resolution_changes, &content_type, &encoder_implementation, @@ -847,6 +848,7 @@ RTCOutboundRTPStreamStats::RTCOutboundRTPStreamStats(std::string&& id, huge_frames_sent("hugeFramesSent"), total_packet_send_delay("totalPacketSendDelay"), quality_limitation_reason("qualityLimitationReason"), + quality_limitation_durations("qualityLimitationDurations"), quality_limitation_resolution_changes( "qualityLimitationResolutionChanges"), content_type("contentType"), @@ -879,6 +881,7 @@ RTCOutboundRTPStreamStats::RTCOutboundRTPStreamStats( huge_frames_sent(other.huge_frames_sent), total_packet_send_delay(other.total_packet_send_delay), quality_limitation_reason(other.quality_limitation_reason), + quality_limitation_durations(other.quality_limitation_durations), quality_limitation_resolution_changes( other.quality_limitation_resolution_changes), content_type(other.content_type), @@ -984,7 +987,9 @@ RTCMediaSourceStats::~RTCMediaSourceStats() {} WEBRTC_RTCSTATS_IMPL(RTCAudioSourceStats, RTCMediaSourceStats, "media-source", &audio_level, &total_audio_energy, - &total_samples_duration) + &total_samples_duration, + &echo_return_loss, + &echo_return_loss_enhancement) // clang-format on RTCAudioSourceStats::RTCAudioSourceStats(const std::string& id, @@ -995,13 +1000,17 @@ RTCAudioSourceStats::RTCAudioSourceStats(std::string&& id, int64_t timestamp_us) : RTCMediaSourceStats(std::move(id), timestamp_us), audio_level("audioLevel"), total_audio_energy("totalAudioEnergy"), - total_samples_duration("totalSamplesDuration") {} + total_samples_duration("totalSamplesDuration"), + echo_return_loss("echoReturnLoss"), + echo_return_loss_enhancement("echoReturnLossEnhancement") {} RTCAudioSourceStats::RTCAudioSourceStats(const RTCAudioSourceStats& other) : RTCMediaSourceStats(other), audio_level(other.audio_level), total_audio_energy(other.total_audio_energy), - total_samples_duration(other.total_samples_duration) {} + total_samples_duration(other.total_samples_duration), + echo_return_loss(other.echo_return_loss), + echo_return_loss_enhancement(other.echo_return_loss_enhancement) {} RTCAudioSourceStats::~RTCAudioSourceStats() {} diff --git a/stats/test/rtc_test_stats.cc b/stats/test/rtc_test_stats.cc index d8bcbb19eb..e73da76fa9 100644 --- a/stats/test/rtc_test_stats.cc +++ b/stats/test/rtc_test_stats.cc @@ -30,7 +30,9 @@ WEBRTC_RTCSTATS_IMPL(RTCTestStats, &m_sequence_int64, &m_sequence_uint64, &m_sequence_double, - &m_sequence_string) + &m_sequence_string, + &m_map_string_uint64, + &m_map_string_double) RTCTestStats::RTCTestStats(const std::string& id, int64_t timestamp_us) : RTCStats(id, timestamp_us), @@ -47,7 +49,9 @@ RTCTestStats::RTCTestStats(const std::string& id, int64_t timestamp_us) m_sequence_int64("mSequenceInt64"), m_sequence_uint64("mSequenceUint64"), m_sequence_double("mSequenceDouble"), - m_sequence_string("mSequenceString") {} + m_sequence_string("mSequenceString"), + m_map_string_uint64("mMapStringUint64"), + m_map_string_double("mMapStringDouble") {} RTCTestStats::RTCTestStats(const RTCTestStats& other) : RTCStats(other.id(), other.timestamp_us()), @@ -64,7 +68,9 @@ RTCTestStats::RTCTestStats(const RTCTestStats& other) m_sequence_int64(other.m_sequence_int64), m_sequence_uint64(other.m_sequence_uint64), m_sequence_double(other.m_sequence_double), - m_sequence_string(other.m_sequence_string) {} + m_sequence_string(other.m_sequence_string), + m_map_string_uint64(other.m_map_string_uint64), + m_map_string_double(other.m_map_string_double) {} RTCTestStats::~RTCTestStats() {} diff --git a/stats/test/rtc_test_stats.h b/stats/test/rtc_test_stats.h index 1db32c25c1..0feb07e78e 100644 --- a/stats/test/rtc_test_stats.h +++ b/stats/test/rtc_test_stats.h @@ -12,6 +12,7 @@ #define STATS_TEST_RTC_TEST_STATS_H_ #include +#include #include #include @@ -42,6 +43,8 @@ class RTC_EXPORT RTCTestStats : public RTCStats { RTCStatsMember> m_sequence_uint64; RTCStatsMember> m_sequence_double; RTCStatsMember> m_sequence_string; + RTCStatsMember> m_map_string_uint64; + RTCStatsMember> m_map_string_double; }; } // namespace webrtc diff --git a/system_wrappers/include/clock.h b/system_wrappers/include/clock.h index bcb7feaa7d..271291c214 100644 --- a/system_wrappers/include/clock.h +++ b/system_wrappers/include/clock.h @@ -34,22 +34,23 @@ class RTC_EXPORT Clock { virtual ~Clock() {} // Return a timestamp relative to an unspecified epoch. - // TODO(bugs.webrtc.org/11327): Make this a pure virtual function. - virtual Timestamp CurrentTime() { - return Timestamp::Micros(TimeInMicroseconds()); - } - - // TODO(bugs.webrtc.org/11327): Make the following two methods non-virtual - // or completely remove them. - virtual int64_t TimeInMilliseconds() { return CurrentTime().ms(); } - virtual int64_t TimeInMicroseconds() { return CurrentTime().us(); } + virtual Timestamp CurrentTime() = 0; + int64_t TimeInMilliseconds() { return CurrentTime().ms(); } + int64_t TimeInMicroseconds() { return CurrentTime().us(); } // Retrieve an NTP absolute timestamp (with an epoch of Jan 1, 1900). - virtual NtpTime CurrentNtpTime() = 0; + // TODO(bugs.webrtc.org/11327): Make this non-virtual once + // "WebRTC-SystemIndependentNtpTimeKillSwitch" is removed. + virtual NtpTime CurrentNtpTime() { + return ConvertTimestampToNtpTime(CurrentTime()); + } + int64_t CurrentNtpInMilliseconds() { return CurrentNtpTime().ToMs(); } - // TODO(bugs.webrtc.org/11327): Make the following method non-virtual - // or completely remove it. - virtual int64_t CurrentNtpInMilliseconds() { return CurrentNtpTime().ToMs(); } + // Converts between a relative timestamp returned by this clock, to NTP time. + virtual NtpTime ConvertTimestampToNtpTime(Timestamp timestamp) = 0; + int64_t ConvertTimestampToNtpTimeInMilliseconds(int64_t timestamp_ms) { + return ConvertTimestampToNtpTime(Timestamp::Millis(timestamp_ms)).ToMs(); + } // Returns an instance of the real-time system clock implementation. static Clock* GetRealTimeClock(); @@ -65,7 +66,7 @@ class SimulatedClock : public Clock { // Return a timestamp with an epoch of Jan 1, 1970. Timestamp CurrentTime() override; - NtpTime CurrentNtpTime() override; + NtpTime ConvertTimestampToNtpTime(Timestamp timestamp) override; // Advance the simulated clock with a given number of milliseconds or // microseconds. diff --git a/system_wrappers/source/clock.cc b/system_wrappers/source/clock.cc index 2c3981a5a4..77c1d36327 100644 --- a/system_wrappers/source/clock.cc +++ b/system_wrappers/source/clock.cc @@ -93,6 +93,12 @@ class RealTimeClock : public Clock { : SystemDependentNtpTime(); } + NtpTime ConvertTimestampToNtpTime(Timestamp timestamp) override { + // This method does not check |use_system_independent_ntp_time_| because + // all callers never used the old behavior of |CurrentNtpTime|. + return TimeMicrosToNtp(timestamp.us()); + } + protected: virtual timeval CurrentTimeVal() = 0; @@ -276,11 +282,11 @@ Timestamp SimulatedClock::CurrentTime() { return Timestamp::Micros(time_us_.load(std::memory_order_relaxed)); } -NtpTime SimulatedClock::CurrentNtpTime() { - int64_t now_ms = TimeInMilliseconds(); - uint32_t seconds = (now_ms / 1000) + kNtpJan1970; - uint32_t fractions = - static_cast((now_ms % 1000) * kMagicNtpFractionalUnit / 1000); +NtpTime SimulatedClock::ConvertTimestampToNtpTime(Timestamp timestamp) { + int64_t now_us = timestamp.us(); + uint32_t seconds = (now_us / 1'000'000) + kNtpJan1970; + uint32_t fractions = static_cast( + (now_us % 1'000'000) * kMagicNtpFractionalUnit / 1'000'000); return NtpTime(seconds, fractions); } diff --git a/system_wrappers/source/field_trial.cc b/system_wrappers/source/field_trial.cc index f1dccc987b..d10b5cff3f 100644 --- a/system_wrappers/source/field_trial.cc +++ b/system_wrappers/source/field_trial.cc @@ -85,7 +85,7 @@ void InsertOrReplaceFieldTrialStringsInMap( (*fieldtrial_map)[tokens[idx]] = tokens[idx + 1]; } } else { - RTC_DCHECK(false) << "Invalid field trials string:" << trials_string; + RTC_NOTREACHED() << "Invalid field trials string:" << trials_string; } } diff --git a/test/BUILD.gn b/test/BUILD.gn index 988d15fd30..82d0b9ea28 100644 --- a/test/BUILD.gn +++ b/test/BUILD.gn @@ -212,6 +212,7 @@ rtc_library("rtp_test_utils") { "../rtc_base/synchronization:mutex", "../rtc_base/system:arch", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("field_trial") { @@ -735,7 +736,6 @@ rtc_library("direct_transport") { "direct_transport.h", ] deps = [ - ":rtp_test_utils", "../api:sequence_checker", "../api:simulated_network_api", "../api:transport_api", @@ -743,6 +743,7 @@ rtc_library("direct_transport") { "../api/units:time_delta", "../call:call_interfaces", "../call:simulated_packet_receiver", + "../modules/rtp_rtcp:rtp_rtcp_format", "../rtc_base:macromagic", "../rtc_base:timeutils", "../rtc_base/synchronization:mutex", @@ -847,9 +848,9 @@ rtc_library("test_common") { ":fake_video_codecs", ":fileutils", ":mock_transport", - ":rtp_test_utils", ":test_support", ":video_test_common", + "../api:array_view", "../api:create_frame_generator", "../api:frame_generator_api", "../api:rtp_headers", diff --git a/test/call_test.cc b/test/call_test.cc index 0ba947ce08..11230dae2f 100644 --- a/test/call_test.cc +++ b/test/call_test.cc @@ -409,7 +409,7 @@ void CallTest::CreateMatchingAudioAndFecConfigs( if (num_flexfec_streams_ == 1) { CreateMatchingFecConfig(rtcp_send_transport, *GetVideoSendConfig()); for (const RtpExtension& extension : GetVideoSendConfig()->rtp.extensions) - GetFlexFecConfig()->rtp_header_extensions.push_back(extension); + GetFlexFecConfig()->rtp.extensions.push_back(extension); } } @@ -444,9 +444,9 @@ void CallTest::CreateMatchingFecConfig( const VideoSendStream::Config& send_config) { FlexfecReceiveStream::Config config(transport); config.payload_type = send_config.rtp.flexfec.payload_type; - config.remote_ssrc = send_config.rtp.flexfec.ssrc; + config.rtp.remote_ssrc = send_config.rtp.flexfec.ssrc; config.protected_media_ssrcs = send_config.rtp.flexfec.protected_media_ssrcs; - config.local_ssrc = kReceiverLocalVideoSsrc; + config.rtp.local_ssrc = kReceiverLocalVideoSsrc; if (!video_receive_configs_.empty()) { video_receive_configs_[0].rtp.protected_by_flexfec = true; video_receive_configs_[0].rtp.packet_sink_ = this; diff --git a/test/direct_transport.cc b/test/direct_transport.cc index 9c7a8f88d0..7e9c5aefeb 100644 --- a/test/direct_transport.cc +++ b/test/direct_transport.cc @@ -14,9 +14,9 @@ #include "api/units/time_delta.h" #include "call/call.h" #include "call/fake_network_pipe.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "rtc_base/task_utils/repeating_task.h" #include "rtc_base/time_utils.h" -#include "test/rtp_header_parser.h" namespace webrtc { namespace test { @@ -26,7 +26,7 @@ Demuxer::Demuxer(const std::map& payload_type_map) MediaType Demuxer::GetMediaType(const uint8_t* packet_data, const size_t packet_length) const { - if (!RtpHeaderParser::IsRtcp(packet_data, packet_length)) { + if (IsRtpPacket(rtc::MakeArrayView(packet_data, packet_length))) { RTC_CHECK_GE(packet_length, 2); const uint8_t payload_type = packet_data[1] & 0x7f; std::map::const_iterator it = diff --git a/test/direct_transport_unittest.cc b/test/direct_transport_unittest.cc index 66ab5bcac1..ab00971089 100644 --- a/test/direct_transport_unittest.cc +++ b/test/direct_transport_unittest.cc @@ -18,12 +18,13 @@ namespace test { TEST(DemuxerTest, Demuxing) { constexpr uint8_t kVideoPayloadType = 100; constexpr uint8_t kAudioPayloadType = 101; - constexpr size_t kPacketSize = 10; + constexpr size_t kPacketSize = 12; Demuxer demuxer({{kVideoPayloadType, MediaType::VIDEO}, {kAudioPayloadType, MediaType::AUDIO}}); uint8_t data[kPacketSize]; memset(data, 0, kPacketSize); + data[0] = 0x80; data[1] = kVideoPayloadType; EXPECT_EQ(demuxer.GetMediaType(data, kPacketSize), MediaType::VIDEO); data[1] = kAudioPayloadType; diff --git a/test/drifting_clock.cc b/test/drifting_clock.cc index 1a5154557e..47c8e56916 100644 --- a/test/drifting_clock.cc +++ b/test/drifting_clock.cc @@ -28,22 +28,18 @@ TimeDelta DriftingClock::Drift() const { return (now - start_time_) * drift_; } -Timestamp DriftingClock::CurrentTime() { - return clock_->CurrentTime() + Drift() / 1000.; +Timestamp DriftingClock::Drift(Timestamp timestamp) const { + return timestamp + Drift() / 1000.; } -NtpTime DriftingClock::CurrentNtpTime() { +NtpTime DriftingClock::Drift(NtpTime ntp_time) const { // NTP precision is 1/2^32 seconds, i.e. 2^32 ntp fractions = 1 second. const double kNtpFracPerMicroSecond = 4294.967296; // = 2^32 / 10^6 - NtpTime ntp = clock_->CurrentNtpTime(); - uint64_t total_fractions = static_cast(ntp); + uint64_t total_fractions = static_cast(ntp_time); total_fractions += Drift().us() * kNtpFracPerMicroSecond; return NtpTime(total_fractions); } -int64_t DriftingClock::CurrentNtpInMilliseconds() { - return clock_->CurrentNtpInMilliseconds() + Drift().ms(); -} } // namespace test } // namespace webrtc diff --git a/test/drifting_clock.h b/test/drifting_clock.h index 2539b61786..3471c008a1 100644 --- a/test/drifting_clock.h +++ b/test/drifting_clock.h @@ -30,12 +30,16 @@ class DriftingClock : public Clock { return 1.0f - percent / 100.0f; } - Timestamp CurrentTime() override; - NtpTime CurrentNtpTime() override; - int64_t CurrentNtpInMilliseconds() override; + Timestamp CurrentTime() override { return Drift(clock_->CurrentTime()); } + NtpTime CurrentNtpTime() override { return Drift(clock_->CurrentNtpTime()); } + NtpTime ConvertTimestampToNtpTime(Timestamp timestamp) override { + return Drift(clock_->ConvertTimestampToNtpTime(timestamp)); + } private: TimeDelta Drift() const; + Timestamp Drift(Timestamp timestamp) const; + NtpTime Drift(NtpTime ntp_time) const; Clock* const clock_; const float drift_; diff --git a/test/frame_generator_unittest.cc b/test/frame_generator_unittest.cc index 12d5111bff..8e5cde8c5f 100644 --- a/test/frame_generator_unittest.cc +++ b/test/frame_generator_unittest.cc @@ -54,7 +54,7 @@ class FrameGeneratorTest : public ::testing::Test { protected: void WriteYuvFile(FILE* file, uint8_t y, uint8_t u, uint8_t v) { - assert(file); + RTC_DCHECK(file); std::unique_ptr plane_buffer(new uint8_t[y_size]); memset(plane_buffer.get(), y, y_size); fwrite(plane_buffer.get(), 1, y_size, file); diff --git a/test/fuzzers/BUILD.gn b/test/fuzzers/BUILD.gn index 23ad728dba..9824bebb5f 100644 --- a/test/fuzzers/BUILD.gn +++ b/test/fuzzers/BUILD.gn @@ -622,8 +622,12 @@ webrtc_fuzzer_test("rtp_header_parser_fuzzer") { } webrtc_fuzzer_test("ssl_certificate_fuzzer") { - sources = [ "rtp_header_parser_fuzzer.cc" ] - deps = [ "../:rtp_test_utils" ] + sources = [ "ssl_certificate_fuzzer.cc" ] + deps = [ + "../:rtp_test_utils", + "../../rtc_base", + "../../rtc_base:stringutils", + ] } webrtc_fuzzer_test("vp8_replay_fuzzer") { diff --git a/test/fuzzers/rtp_frame_reference_finder_fuzzer.cc b/test/fuzzers/rtp_frame_reference_finder_fuzzer.cc index aeeb5c03a4..fdb4aa5f3c 100644 --- a/test/fuzzers/rtp_frame_reference_finder_fuzzer.cc +++ b/test/fuzzers/rtp_frame_reference_finder_fuzzer.cc @@ -12,9 +12,7 @@ #include "api/rtp_packet_infos.h" #include "modules/video_coding/frame_object.h" -#include "modules/video_coding/packet_buffer.h" #include "modules/video_coding/rtp_frame_reference_finder.h" -#include "system_wrappers/include/clock.h" namespace webrtc { @@ -58,10 +56,6 @@ class DataReader { size_t offset_ = 0; }; -class NullCallback : public OnCompleteFrameCallback { - void OnCompleteFrame(std::unique_ptr frame) override {} -}; - absl::optional GenerateGenericFrameDependencies(DataReader* reader) { absl::optional result; @@ -91,8 +85,7 @@ GenerateGenericFrameDependencies(DataReader* reader) { void FuzzOneInput(const uint8_t* data, size_t size) { DataReader reader(data, size); - NullCallback cb; - RtpFrameReferenceFinder reference_finder(&cb); + RtpFrameReferenceFinder reference_finder; auto codec = static_cast(reader.GetNum() % 5); diff --git a/test/fuzzers/rtp_header_parser_fuzzer.cc b/test/fuzzers/rtp_header_parser_fuzzer.cc index d6af5ca3ce..435c64bbb4 100644 --- a/test/fuzzers/rtp_header_parser_fuzzer.cc +++ b/test/fuzzers/rtp_header_parser_fuzzer.cc @@ -20,29 +20,7 @@ namespace webrtc { void FuzzOneInput(const uint8_t* data, size_t size) { - RtpHeaderParser::IsRtcp(data, size); RtpHeaderParser::GetSsrc(data, size); - RTPHeader rtp_header; - - std::unique_ptr rtp_header_parser( - RtpHeaderParser::CreateForTest()); - - rtp_header_parser->Parse(data, size, &rtp_header); - for (int i = 1; i < kRtpExtensionNumberOfExtensions; ++i) { - if (size > 0 && i >= data[size - 1]) { - RTPExtensionType add_extension = static_cast(i); - rtp_header_parser->RegisterRtpHeaderExtension(add_extension, i); - } - } - rtp_header_parser->Parse(data, size, &rtp_header); - - for (int i = 1; i < kRtpExtensionNumberOfExtensions; ++i) { - if (size > 1 && i >= data[size - 2]) { - RTPExtensionType remove_extension = static_cast(i); - rtp_header_parser->DeregisterRtpHeaderExtension(remove_extension); - } - } - rtp_header_parser->Parse(data, size, &rtp_header); } } // namespace webrtc diff --git a/test/fuzzers/rtp_packet_fuzzer.cc b/test/fuzzers/rtp_packet_fuzzer.cc index 9e8fd6f4c1..3f2fc5e668 100644 --- a/test/fuzzers/rtp_packet_fuzzer.cc +++ b/test/fuzzers/rtp_packet_fuzzer.cc @@ -9,6 +9,7 @@ */ #include +#include #include "absl/types/optional.h" #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" @@ -76,6 +77,11 @@ void FuzzOneInput(const uint8_t* data, size_t size) { uint8_t audio_level; packet.GetExtension(&voice_activity, &audio_level); break; + case kRtpExtensionCsrcAudioLevel: { + std::vector audio_levels; + packet.GetExtension(&audio_levels); + break; + } case kRtpExtensionAbsoluteSendTime: uint32_t sendtime; packet.GetExtension(&sendtime); @@ -109,10 +115,11 @@ void FuzzOneInput(const uint8_t* data, size_t size) { VideoContentType content_type; packet.GetExtension(&content_type); break; - case kRtpExtensionVideoTiming: + case kRtpExtensionVideoTiming: { VideoSendTiming timing; packet.GetExtension(&timing); break; + } case kRtpExtensionRtpStreamId: { std::string rsid; packet.GetExtension(&rsid); diff --git a/test/fuzzers/ssl_certificate_fuzzer.cc b/test/fuzzers/ssl_certificate_fuzzer.cc index 7ab59b51dd..4bab5c8f02 100644 --- a/test/fuzzers/ssl_certificate_fuzzer.cc +++ b/test/fuzzers/ssl_certificate_fuzzer.cc @@ -13,6 +13,7 @@ #include +#include "rtc_base/message_digest.h" #include "rtc_base/ssl_certificate.h" #include "rtc_base/string_encode.h" @@ -34,7 +35,7 @@ void FuzzOneInput(const uint8_t* data, size_t size) { cert->CertificateExpirationTime(); std::string algorithm; - cert->GetSignatureDigestAlgorithm(algorithm); + cert->GetSignatureDigestAlgorithm(&algorithm); unsigned char digest[rtc::MessageDigest::kMaxSize]; size_t digest_len; diff --git a/test/fuzzers/utils/BUILD.gn b/test/fuzzers/utils/BUILD.gn index 6249156058..3e0782f39d 100644 --- a/test/fuzzers/utils/BUILD.gn +++ b/test/fuzzers/utils/BUILD.gn @@ -24,6 +24,7 @@ rtc_library("rtp_replayer") { "../../../call:call_interfaces", "../../../common_video", "../../../media:rtc_internal_video_codecs", + "../../../modules/rtp_rtcp:rtp_rtcp_format", "../../../rtc_base:checks", "../../../rtc_base:rtc_base_approved", "../../../rtc_base:rtc_base_tests_utils", diff --git a/test/fuzzers/utils/rtp_replayer.cc b/test/fuzzers/utils/rtp_replayer.cc index a664adb31d..43b1fc2ea4 100644 --- a/test/fuzzers/utils/rtp_replayer.cc +++ b/test/fuzzers/utils/rtp_replayer.cc @@ -17,13 +17,13 @@ #include "api/task_queue/default_task_queue_factory.h" #include "api/transport/field_trial_based_config.h" +#include "modules/rtp_rtcp/source/rtp_packet.h" #include "rtc_base/strings/json.h" #include "system_wrappers/include/clock.h" #include "test/call_config_utils.h" #include "test/encoder_settings.h" #include "test/fake_decoder.h" #include "test/rtp_file_reader.h" -#include "test/rtp_header_parser.h" #include "test/run_loop.h" namespace webrtc { @@ -164,37 +164,32 @@ void RtpReplayer::ReplayPackets(rtc::FakeClock* clock, std::min(deliver_in_ms, static_cast(100)))); } + rtc::CopyOnWriteBuffer packet_buffer(packet.data, packet.length); ++num_packets; - switch (call->Receiver()->DeliverPacket( - webrtc::MediaType::VIDEO, - rtc::CopyOnWriteBuffer(packet.data, packet.length), - /* packet_time_us */ -1)) { + switch (call->Receiver()->DeliverPacket(webrtc::MediaType::VIDEO, + packet_buffer, + /* packet_time_us */ -1)) { case PacketReceiver::DELIVERY_OK: break; case PacketReceiver::DELIVERY_UNKNOWN_SSRC: { - RTPHeader header; - std::unique_ptr parser( - RtpHeaderParser::CreateForTest()); - - parser->Parse(packet.data, packet.length, &header); - if (unknown_packets[header.ssrc] == 0) { - RTC_LOG(LS_ERROR) << "Unknown SSRC: " << header.ssrc; + webrtc::RtpPacket header; + header.Parse(packet_buffer); + if (unknown_packets[header.Ssrc()] == 0) { + RTC_LOG(LS_ERROR) << "Unknown SSRC: " << header.Ssrc(); } - ++unknown_packets[header.ssrc]; + ++unknown_packets[header.Ssrc()]; break; } case PacketReceiver::DELIVERY_PACKET_ERROR: { RTC_LOG(LS_ERROR) << "Packet error, corrupt packets or incorrect setup?"; - RTPHeader header; - std::unique_ptr parser( - RtpHeaderParser::CreateForTest()); - parser->Parse(packet.data, packet.length, &header); + webrtc::RtpPacket header; + header.Parse(packet_buffer); RTC_LOG(LS_ERROR) << "Packet packet_length=" << packet.length - << " payload_type=" << header.payloadType - << " sequence_number=" << header.sequenceNumber - << " time_stamp=" << header.timestamp - << " ssrc=" << header.ssrc; + << " payload_type=" << header.PayloadType() + << " sequence_number=" << header.SequenceNumber() + << " time_stamp=" << header.Timestamp() + << " ssrc=" << header.Ssrc(); break; } } diff --git a/test/fuzzers/vp9_encoder_references_fuzzer.cc b/test/fuzzers/vp9_encoder_references_fuzzer.cc index 805c34f75b..9c793ae9aa 100644 --- a/test/fuzzers/vp9_encoder_references_fuzzer.cc +++ b/test/fuzzers/vp9_encoder_references_fuzzer.cc @@ -49,19 +49,26 @@ class FrameValidator : public EncodedImageCallback { LayerFrame& layer_frame = frames_[frame_id % kMaxFrameHistorySize]; layer_frame.picture_id = picture_id_; layer_frame.spatial_id = encoded_image.SpatialIndex().value_or(0); - layer_frame.info = *codec_specific_info; layer_frame.frame_id = frame_id; - CheckVp9References(layer_frame); + layer_frame.temporal_id = + codec_specific_info->codecSpecific.VP9.temporal_idx; + if (layer_frame.temporal_id == kNoTemporalIdx) { + layer_frame.temporal_id = 0; + } + layer_frame.vp9_non_ref_for_inter_layer_pred = + codec_specific_info->codecSpecific.VP9.non_ref_for_inter_layer_pred; + CheckVp9References(layer_frame, codec_specific_info->codecSpecific.VP9); - if (layer_frame.info.generic_frame_info.has_value()) { - layer_frame.frame_dependencies = + if (codec_specific_info->generic_frame_info.has_value()) { + absl::InlinedVector frame_dependencies = dependencies_calculator_.FromBuffersUsage( - frame_id, layer_frame.info.generic_frame_info->encoder_buffers); + frame_id, + codec_specific_info->generic_frame_info->encoder_buffers); - CheckGenericReferences(layer_frame); - CheckGenericAndCodecSpecificReferencesAreConsistent(layer_frame); - } else { - layer_frame.frame_dependencies = {}; + CheckGenericReferences(frame_dependencies, + *codec_specific_info->generic_frame_info); + CheckGenericAndCodecSpecificReferencesAreConsistent( + frame_dependencies, *codec_specific_info, layer_frame); } return Result(Result::OK); @@ -72,25 +79,21 @@ class FrameValidator : public EncodedImageCallback { // to keep 32 last frames to validate dependencies. static constexpr size_t kMaxFrameHistorySize = 32; struct LayerFrame { - const CodecSpecificInfoVP9& vp9() const { return info.codecSpecific.VP9; } - int temporal_id() const { - return vp9().temporal_idx == kNoTemporalIdx ? 0 : vp9().temporal_idx; - } - int64_t frame_id; int64_t picture_id; int spatial_id; - absl::InlinedVector frame_dependencies; - CodecSpecificInfo info; + int temporal_id; + bool vp9_non_ref_for_inter_layer_pred; }; - void CheckVp9References(const LayerFrame& layer_frame) { + void CheckVp9References(const LayerFrame& layer_frame, + const CodecSpecificInfoVP9& vp9_info) { if (layer_frame.frame_id == 0) { - RTC_CHECK(!layer_frame.vp9().inter_layer_predicted); + RTC_CHECK(!vp9_info.inter_layer_predicted); } else { const LayerFrame& previous_frame = Frame(layer_frame.frame_id - 1); - if (layer_frame.vp9().inter_layer_predicted) { - RTC_CHECK(!previous_frame.vp9().non_ref_for_inter_layer_pred); + if (vp9_info.inter_layer_predicted) { + RTC_CHECK(!previous_frame.vp9_non_ref_for_inter_layer_pred); RTC_CHECK_EQ(layer_frame.picture_id, previous_frame.picture_id); } if (previous_frame.picture_id == layer_frame.picture_id) { @@ -98,51 +101,51 @@ class FrameValidator : public EncodedImageCallback { // The check below would fail for temporal shift structures. Remove it // or move it to !flexible_mode section when vp9 encoder starts // supporting such structures. - RTC_CHECK_EQ(layer_frame.vp9().temporal_idx, - previous_frame.vp9().temporal_idx); + RTC_CHECK_EQ(layer_frame.temporal_id, previous_frame.temporal_id); } } - if (!layer_frame.vp9().flexible_mode) { - if (layer_frame.vp9().gof.num_frames_in_gof > 0) { - gof_.CopyGofInfoVP9(layer_frame.vp9().gof); + if (!vp9_info.flexible_mode) { + if (vp9_info.gof.num_frames_in_gof > 0) { + gof_.CopyGofInfoVP9(vp9_info.gof); } - RTC_CHECK_EQ(gof_.temporal_idx[layer_frame.vp9().gof_idx], - layer_frame.temporal_id()); + RTC_CHECK_EQ(gof_.temporal_idx[vp9_info.gof_idx], + layer_frame.temporal_id); } } - void CheckGenericReferences(const LayerFrame& layer_frame) const { - const GenericFrameInfo& generic_info = *layer_frame.info.generic_frame_info; - for (int64_t dependency_frame_id : layer_frame.frame_dependencies) { + void CheckGenericReferences(rtc::ArrayView frame_dependencies, + const GenericFrameInfo& generic_info) const { + for (int64_t dependency_frame_id : frame_dependencies) { RTC_CHECK_GE(dependency_frame_id, 0); const LayerFrame& dependency = Frame(dependency_frame_id); - RTC_CHECK(dependency.info.generic_frame_info.has_value()); - RTC_CHECK_GE(generic_info.spatial_id, - dependency.info.generic_frame_info->spatial_id); - RTC_CHECK_GE(generic_info.temporal_id, - dependency.info.generic_frame_info->temporal_id); + RTC_CHECK_GE(generic_info.spatial_id, dependency.spatial_id); + RTC_CHECK_GE(generic_info.temporal_id, dependency.temporal_id); } } void CheckGenericAndCodecSpecificReferencesAreConsistent( + rtc::ArrayView frame_dependencies, + const CodecSpecificInfo& info, const LayerFrame& layer_frame) const { - const GenericFrameInfo& generic_info = *layer_frame.info.generic_frame_info; + const CodecSpecificInfoVP9& vp9_info = info.codecSpecific.VP9; + const GenericFrameInfo& generic_info = *info.generic_frame_info; + RTC_CHECK_EQ(generic_info.spatial_id, layer_frame.spatial_id); - RTC_CHECK_EQ(generic_info.temporal_id, layer_frame.temporal_id()); - auto picture_id_diffs = rtc::MakeArrayView(layer_frame.vp9().p_diff, - layer_frame.vp9().num_ref_pics); - RTC_CHECK_EQ(layer_frame.frame_dependencies.size(), - picture_id_diffs.size() + - (layer_frame.vp9().inter_layer_predicted ? 1 : 0)); - for (int64_t dependency_frame_id : layer_frame.frame_dependencies) { + RTC_CHECK_EQ(generic_info.temporal_id, layer_frame.temporal_id); + auto picture_id_diffs = + rtc::MakeArrayView(vp9_info.p_diff, vp9_info.num_ref_pics); + RTC_CHECK_EQ( + frame_dependencies.size(), + picture_id_diffs.size() + (vp9_info.inter_layer_predicted ? 1 : 0)); + for (int64_t dependency_frame_id : frame_dependencies) { RTC_CHECK_GE(dependency_frame_id, 0); const LayerFrame& dependency = Frame(dependency_frame_id); if (dependency.spatial_id != layer_frame.spatial_id) { - RTC_CHECK(layer_frame.vp9().inter_layer_predicted); + RTC_CHECK(vp9_info.inter_layer_predicted); RTC_CHECK_EQ(layer_frame.picture_id, dependency.picture_id); RTC_CHECK_GT(layer_frame.spatial_id, dependency.spatial_id); } else { - RTC_CHECK(layer_frame.vp9().inter_pic_predicted); + RTC_CHECK(vp9_info.inter_pic_predicted); RTC_CHECK_EQ(layer_frame.spatial_id, dependency.spatial_id); RTC_CHECK(absl::c_linear_search( picture_id_diffs, layer_frame.picture_id - dependency.picture_id)); diff --git a/test/linux/glx_renderer.cc b/test/linux/glx_renderer.cc index 50f2a06a8e..04d482c88b 100644 --- a/test/linux/glx_renderer.cc +++ b/test/linux/glx_renderer.cc @@ -20,8 +20,8 @@ namespace test { GlxRenderer::GlxRenderer(size_t width, size_t height) : width_(width), height_(height), display_(NULL), context_(NULL) { - assert(width > 0); - assert(height > 0); + RTC_DCHECK_GT(width, 0); + RTC_DCHECK_GT(height, 0); } GlxRenderer::~GlxRenderer() { diff --git a/test/pc/e2e/BUILD.gn b/test/pc/e2e/BUILD.gn index 95b0a2a31c..9e9d5c2db5 100644 --- a/test/pc/e2e/BUILD.gn +++ b/test/pc/e2e/BUILD.gn @@ -289,6 +289,7 @@ if (!build_with_chromium) { "../../../api:peer_connection_quality_test_fixture_api", "../../../api/video:video_frame", "../../../pc:peerconnection", + "../../../pc:session_description", "../../../pc:video_track_source", ] absl_deps = [ "//third_party/abseil-cpp/absl/types:variant" ] @@ -737,6 +738,8 @@ if (!build_with_chromium) { "../../../p2p:rtc_p2p", "../../../pc:peerconnection", "../../../pc:rtc_pc_base", + "../../../pc:session_description", + "../../../pc:simulcast_description", "../../../rtc_base:stringutils", ] absl_deps = [ diff --git a/test/peer_scenario/BUILD.gn b/test/peer_scenario/BUILD.gn index 2034c9ad99..033ef4115a 100644 --- a/test/peer_scenario/BUILD.gn +++ b/test/peer_scenario/BUILD.gn @@ -47,6 +47,7 @@ if (rtc_include_tests) { "../../p2p:rtc_p2p", "../../pc:pc_test_utils", "../../pc:rtc_pc_base", + "../../pc:session_description", "../../rtc_base", "../../rtc_base:null_socket_server", "../../rtc_base:stringutils", diff --git a/test/peer_scenario/tests/BUILD.gn b/test/peer_scenario/tests/BUILD.gn index 0cf7cf3472..a8b9c2563e 100644 --- a/test/peer_scenario/tests/BUILD.gn +++ b/test/peer_scenario/tests/BUILD.gn @@ -25,6 +25,7 @@ if (rtc_include_tests) { "../../../modules/rtp_rtcp:rtp_rtcp", "../../../modules/rtp_rtcp:rtp_rtcp_format", "../../../pc:rtc_pc_base", + "../../../pc:session_description", ] } } diff --git a/test/peer_scenario/tests/remote_estimate_test.cc b/test/peer_scenario/tests/remote_estimate_test.cc index b882ad9dc2..f1d8345fde 100644 --- a/test/peer_scenario/tests/remote_estimate_test.cc +++ b/test/peer_scenario/tests/remote_estimate_test.cc @@ -8,6 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ +#include "modules/rtp_rtcp/source/rtp_util.h" #include "modules/rtp_rtcp/source/rtp_utility.h" #include "pc/media_session.h" #include "pc/session_description.h" @@ -29,7 +30,7 @@ absl::optional GetRtpPacketExtensions( const rtc::ArrayView packet, const RtpHeaderExtensionMap& extension_map) { RtpUtility::RtpHeaderParser rtp_parser(packet.data(), packet.size()); - if (!rtp_parser.RTCP()) { + if (IsRtpPacket(packet)) { RTPHeader header; if (rtp_parser.Parse(&header, &extension_map, true)) { return header.extension; diff --git a/test/peer_scenario/tests/unsignaled_stream_test.cc b/test/peer_scenario/tests/unsignaled_stream_test.cc index 95510a24bd..e0fe02edcf 100644 --- a/test/peer_scenario/tests/unsignaled_stream_test.cc +++ b/test/peer_scenario/tests/unsignaled_stream_test.cc @@ -10,20 +10,44 @@ #include "media/base/stream_params.h" #include "modules/rtp_rtcp/source/byte_io.h" - +#include "modules/rtp_rtcp/source/rtp_util.h" #include "pc/media_session.h" #include "pc/session_description.h" #include "test/field_trial.h" -#include "test/peer_scenario/peer_scenario.h" -#include "test/rtp_header_parser.h" - #include "test/gmock.h" #include "test/gtest.h" +#include "test/peer_scenario/peer_scenario.h" +#include "test/rtp_header_parser.h" namespace webrtc { namespace test { namespace { +enum class MidTestConfiguration { + // Legacy endpoint setup where PT demuxing is used. + kMidNotNegotiated, + // MID is negotiated but missing from packets. PT demuxing is disabled, so + // SSRCs have to be added to the SDP for WebRTC to forward packets correctly. + // Happens when client is spec compliant but the SFU isn't. Popular legacy. + kMidNegotiatedButMissingFromPackets, + // Fully spec-compliant: MID is present so we can safely drop packets with + // unknown MIDs. + kMidNegotiatedAndPresentInPackets, +}; + +// Gives the parameterized test a readable suffix. +std::string TestParametersMidTestConfigurationToString( + testing::TestParamInfo info) { + switch (info.param) { + case MidTestConfiguration::kMidNotNegotiated: + return "MidNotNegotiated"; + case MidTestConfiguration::kMidNegotiatedButMissingFromPackets: + return "MidNegotiatedButMissingFromPackets"; + case MidTestConfiguration::kMidNegotiatedAndPresentInPackets: + return "MidNegotiatedAndPresentInPackets"; + } +} + class FrameObserver : public rtc::VideoSinkInterface { public: FrameObserver() : frame_observed_(false) {} @@ -53,19 +77,24 @@ void set_ssrc(SessionDescriptionInterface* offer, size_t index, uint32_t ssrc) { } // namespace -TEST(UnsignaledStreamTest, ReplacesUnsignaledStreamOnCompletedSignaling) { +class UnsignaledStreamTest + : public ::testing::Test, + public ::testing::WithParamInterface {}; + +TEST_P(UnsignaledStreamTest, ReplacesUnsignaledStreamOnCompletedSignaling) { // This test covers a scenario that might occur if a remote client starts - // sending media packets before negotiation has completed. These packets will - // trigger an unsignalled default stream to be created, and connects that to - // a default video sink. - // In some edge cases using unified plan, the default stream is create in a - // different transceiver to where the media SSRC will actually be used. - // This test verifies that the default stream is removed properly, and that - // packets are demuxed and video frames reach the desired sink. + // sending media packets before negotiation has completed. Depending on setup, + // these packets either get dropped or trigger an unsignalled default stream + // to be created, and connects that to a default video sink. + // In some edge cases using Unified Plan and PT demuxing, the default stream + // is create in a different transceiver to where the media SSRC will actually + // be used. This test verifies that the default stream is removed properly, + // and that packets are demuxed and video frames reach the desired sink. + const MidTestConfiguration kMidTestConfiguration = GetParam(); // Defined before PeerScenario so it gets destructed after, to avoid use after // free. - PeerScenario s(*test_info_); + PeerScenario s(*::testing::UnitTest::GetInstance()->current_test_info()); PeerScenarioClient::Config config = PeerScenarioClient::Config(); // Disable encryption so that we can inject a fake early media packet without @@ -93,34 +122,109 @@ TEST(UnsignaledStreamTest, ReplacesUnsignaledStreamOnCompletedSignaling) { std::atomic got_unsignaled_packet(false); // We will capture the media ssrc of the first added stream, and preemptively - // inject a new media packet using a different ssrc. - // This will create "default stream" for the second ssrc and connected it to - // the default video sink (not set in this test). + // inject a new media packet using a different ssrc. What happens depends on + // the test configuration. + // + // MidTestConfiguration::kMidNotNegotiated: + // - MID is not negotiated which means PT-based demuxing is enabled. Because + // the packets have no MID, the second ssrc packet gets forwarded to the + // first m= section. This will create a "default stream" for the second ssrc + // and connect it to the default video sink (not set in this test). The test + // verifies we can recover from this when we later get packets for the first + // ssrc. + // + // MidTestConfiguration::kMidNegotiatedButMissingFromPackets: + // - MID is negotiated wich means PT-based demuxing is disabled. Because we + // modify the packets not to contain the MID anyway (simulating a legacy SFU + // that does not negotiate properly) unknown SSRCs are dropped but do not + // otherwise cause any issues. + // + // MidTestConfiguration::kMidNegotiatedAndPresentInPackets: + // - MID is negotiated which means PT-based demuxing is enabled. In this case + // the packets have the MID so they either get forwarded or dropped + // depending on if the MID is known. The spec-compliant way is also the most + // straight-forward one. + uint32_t first_ssrc = 0; uint32_t second_ssrc = 0; + absl::optional mid_header_extension_id = absl::nullopt; signaling.NegotiateSdp( - /* munge_sdp = */ {}, + /* munge_sdp = */ + [&](SessionDescriptionInterface* offer) { + // Obtain the MID header extension ID and if we want the + // MidTestConfiguration::kMidNotNegotiated setup then we remove the MID + // header extension through SDP munging (otherwise SDP is not modified). + for (cricket::ContentInfo& content_info : + offer->description()->contents()) { + std::vector header_extensions = + content_info.media_description()->rtp_header_extensions(); + for (auto it = header_extensions.begin(); + it != header_extensions.end(); ++it) { + if (it->uri == RtpExtension::kMidUri) { + // MID header extension found! + mid_header_extension_id = it->id; + if (kMidTestConfiguration == + MidTestConfiguration::kMidNotNegotiated) { + // Munge away the extension. + header_extensions.erase(it); + } + break; + } + } + content_info.media_description()->set_rtp_header_extensions( + std::move(header_extensions)); + } + ASSERT_TRUE(mid_header_extension_id.has_value()); + }, /* modify_sdp = */ [&](SessionDescriptionInterface* offer) { first_ssrc = get_ssrc(offer, 0); second_ssrc = first_ssrc + 1; send_node->router()->SetWatcher([&](const EmulatedIpPacket& packet) { - if (packet.size() > 1 && packet.cdata()[0] >> 6 == 2 && - !RtpHeaderParser::IsRtcp(packet.data.cdata(), - packet.data.size())) { - if (ByteReader::ReadBigEndian(&(packet.cdata()[8])) == - first_ssrc && - !got_unsignaled_packet) { - rtc::CopyOnWriteBuffer updated_buffer = packet.data; - ByteWriter::WriteBigEndian( - updated_buffer.MutableData() + 8, second_ssrc); - EmulatedIpPacket updated_packet( - packet.from, packet.to, updated_buffer, packet.arrival_time); - send_node->OnPacketReceived(std::move(updated_packet)); - got_unsignaled_packet = true; + if (IsRtpPacket(packet.data) && + ByteReader::ReadBigEndian(&(packet.cdata()[8])) == + first_ssrc && + !got_unsignaled_packet) { + // Parse packet and modify the SSRC to simulate a second m= + // section that has not been negotiated yet. + std::vector extensions; + extensions.emplace_back(RtpExtension::kMidUri, + mid_header_extension_id.value()); + RtpHeaderExtensionMap extensions_map(extensions); + RtpPacket parsed_packet; + parsed_packet.IdentifyExtensions(extensions_map); + ASSERT_TRUE(parsed_packet.Parse(packet.data)); + parsed_packet.SetSsrc(second_ssrc); + // The MID extension is present if and only if it was negotiated. + // If present, we either want to remove it or modify it depending + // on setup. + switch (kMidTestConfiguration) { + case MidTestConfiguration::kMidNotNegotiated: + EXPECT_FALSE(parsed_packet.HasExtension()); + break; + case MidTestConfiguration::kMidNegotiatedButMissingFromPackets: + EXPECT_TRUE(parsed_packet.HasExtension()); + ASSERT_TRUE(parsed_packet.RemoveExtension(RtpMid::kId)); + break; + case MidTestConfiguration::kMidNegotiatedAndPresentInPackets: + EXPECT_TRUE(parsed_packet.HasExtension()); + // The simulated second m= section would have a different MID. + // If we don't modify it here then |second_ssrc| would end up + // being mapped to the first m= section which would cause SSRC + // conflicts if we later add the same SSRC to a second m= + // section. Hidden assumption: first m= section does not use + // MID:1. + ASSERT_TRUE(parsed_packet.SetExtension("1")); + break; } + // Inject the modified packet. + rtc::CopyOnWriteBuffer updated_buffer = parsed_packet.Buffer(); + EmulatedIpPacket updated_packet( + packet.from, packet.to, updated_buffer, packet.arrival_time); + send_node->OnPacketReceived(std::move(updated_packet)); + got_unsignaled_packet = true; } }); }, @@ -153,5 +257,13 @@ TEST(UnsignaledStreamTest, ReplacesUnsignaledStreamOnCompletedSignaling) { EXPECT_TRUE(s.WaitAndProcess(&second_sink.frame_observed_)); } +INSTANTIATE_TEST_SUITE_P( + All, + UnsignaledStreamTest, + ::testing::Values(MidTestConfiguration::kMidNotNegotiated, + MidTestConfiguration::kMidNegotiatedButMissingFromPackets, + MidTestConfiguration::kMidNegotiatedAndPresentInPackets), + TestParametersMidTestConfigurationToString); + } // namespace test } // namespace webrtc diff --git a/test/rtp_file_reader.cc b/test/rtp_file_reader.cc index cc5f6f78a2..a09d5a66e4 100644 --- a/test/rtp_file_reader.cc +++ b/test/rtp_file_reader.cc @@ -17,6 +17,7 @@ #include #include +#include "modules/rtp_rtcp/source/rtp_util.h" #include "modules/rtp_rtcp/source/rtp_utility.h" #include "rtc_base/checks.h" #include "rtc_base/constructor_magic.h" @@ -82,7 +83,7 @@ class InterleavedRtpFileReader : public RtpFileReaderImpl { } bool NextPacket(RtpPacket* packet) override { - assert(file_ != nullptr); + RTC_DCHECK(file_); packet->length = RtpPacket::kMaxPacketBufferSize; uint32_t len = 0; TRY(ReadUint32(&len, file_)); @@ -275,7 +276,7 @@ class PcapReader : public RtpFileReaderImpl { if (result == kResultFail) { break; } else if (result == kResultSuccess && packets_.size() == 1) { - assert(stream_start_ms == 0); + RTC_DCHECK_EQ(stream_start_ms, 0); PacketIterator it = packets_.begin(); stream_start_ms = it->time_offset_ms; it->time_offset_ms = 0; @@ -329,9 +330,9 @@ class PcapReader : public RtpFileReaderImpl { } virtual int NextPcap(uint8_t* data, uint32_t* length, uint32_t* time_ms) { - assert(data); - assert(length); - assert(time_ms); + RTC_DCHECK(data); + RTC_DCHECK(length); + RTC_DCHECK(time_ms); if (next_packet_it_ == packets_.end()) { return -1; @@ -408,7 +409,7 @@ class PcapReader : public RtpFileReaderImpl { uint32_t stream_start_ms, uint32_t number, const std::set& ssrc_filter) { - assert(next_packet_pos); + RTC_DCHECK(next_packet_pos); uint32_t ts_sec; // Timestamp seconds. uint32_t ts_usec; // Timestamp microseconds. @@ -434,7 +435,7 @@ class PcapReader : public RtpFileReaderImpl { TRY_PCAP(Read(read_buffer_, marker.payload_length)); RtpUtility::RtpHeaderParser rtp_parser(read_buffer_, marker.payload_length); - if (rtp_parser.RTCP()) { + if (IsRtcpPacket(rtc::MakeArrayView(read_buffer_, marker.payload_length))) { rtp_parser.ParseRtcp(&marker.rtp_header); packets_.push_back(marker); } else { @@ -503,7 +504,7 @@ class PcapReader : public RtpFileReaderImpl { } int ReadXxpIpHeader(RtpPacketMarker* marker) { - assert(marker); + RTC_DCHECK(marker); uint16_t version; uint16_t length; @@ -533,7 +534,7 @@ class PcapReader : public RtpFileReaderImpl { // Skip remaining fields of IP header. uint16_t header_length = (version & 0x0f00) >> (8 - 2); - assert(header_length >= kMinIpHeaderLength); + RTC_DCHECK_GE(header_length, kMinIpHeaderLength); TRY_PCAP(Skip(header_length - kMinIpHeaderLength)); protocol = protocol & 0x00ff; diff --git a/test/rtp_header_parser.cc b/test/rtp_header_parser.cc index 45686acb4c..48e493ddeb 100644 --- a/test/rtp_header_parser.cc +++ b/test/rtp_header_parser.cc @@ -9,46 +9,10 @@ */ #include "test/rtp_header_parser.h" -#include - -#include "modules/rtp_rtcp/include/rtp_header_extension_map.h" #include "modules/rtp_rtcp/source/rtp_utility.h" -#include "rtc_base/synchronization/mutex.h" -#include "rtc_base/thread_annotations.h" namespace webrtc { -class RtpHeaderParserImpl : public RtpHeaderParser { - public: - RtpHeaderParserImpl(); - ~RtpHeaderParserImpl() override = default; - - bool Parse(const uint8_t* packet, - size_t length, - RTPHeader* header) const override; - - bool RegisterRtpHeaderExtension(RTPExtensionType type, uint8_t id) override; - bool RegisterRtpHeaderExtension(RtpExtension extension) override; - - bool DeregisterRtpHeaderExtension(RTPExtensionType type) override; - bool DeregisterRtpHeaderExtension(RtpExtension extension) override; - - private: - mutable Mutex mutex_; - RtpHeaderExtensionMap rtp_header_extension_map_ RTC_GUARDED_BY(mutex_); -}; - -std::unique_ptr RtpHeaderParser::CreateForTest() { - return std::make_unique(); -} - -RtpHeaderParserImpl::RtpHeaderParserImpl() {} - -bool RtpHeaderParser::IsRtcp(const uint8_t* packet, size_t length) { - RtpUtility::RtpHeaderParser rtp_parser(packet, length); - return rtp_parser.RTCP(); -} - absl::optional RtpHeaderParser::GetSsrc(const uint8_t* packet, size_t length) { RtpUtility::RtpHeaderParser rtp_parser(packet, length); @@ -59,43 +23,4 @@ absl::optional RtpHeaderParser::GetSsrc(const uint8_t* packet, return absl::nullopt; } -bool RtpHeaderParserImpl::Parse(const uint8_t* packet, - size_t length, - RTPHeader* header) const { - RtpUtility::RtpHeaderParser rtp_parser(packet, length); - *header = RTPHeader(); - - RtpHeaderExtensionMap map; - { - MutexLock lock(&mutex_); - map = rtp_header_extension_map_; - } - - const bool valid_rtpheader = rtp_parser.Parse(header, &map); - if (!valid_rtpheader) { - return false; - } - return true; -} -bool RtpHeaderParserImpl::RegisterRtpHeaderExtension(RtpExtension extension) { - MutexLock lock(&mutex_); - return rtp_header_extension_map_.RegisterByUri(extension.id, extension.uri); -} - -bool RtpHeaderParserImpl::RegisterRtpHeaderExtension(RTPExtensionType type, - uint8_t id) { - MutexLock lock(&mutex_); - return rtp_header_extension_map_.RegisterByType(id, type); -} - -bool RtpHeaderParserImpl::DeregisterRtpHeaderExtension(RtpExtension extension) { - MutexLock lock(&mutex_); - return rtp_header_extension_map_.Deregister( - rtp_header_extension_map_.GetType(extension.id)); -} - -bool RtpHeaderParserImpl::DeregisterRtpHeaderExtension(RTPExtensionType type) { - MutexLock lock(&mutex_); - return rtp_header_extension_map_.Deregister(type) == 0; -} } // namespace webrtc diff --git a/test/rtp_header_parser.h b/test/rtp_header_parser.h index 851ccf3bc2..f6ed74c043 100644 --- a/test/rtp_header_parser.h +++ b/test/rtp_header_parser.h @@ -10,44 +10,16 @@ #ifndef TEST_RTP_HEADER_PARSER_H_ #define TEST_RTP_HEADER_PARSER_H_ -#include +#include +#include -#include "api/rtp_parameters.h" -#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" +#include "absl/types/optional.h" namespace webrtc { -struct RTPHeader; - class RtpHeaderParser { public: - static std::unique_ptr CreateForTest(); - virtual ~RtpHeaderParser() {} - - // Returns true if the packet is an RTCP packet, false otherwise. - static bool IsRtcp(const uint8_t* packet, size_t length); static absl::optional GetSsrc(const uint8_t* packet, size_t length); - - // Parses the packet and stores the parsed packet in |header|. Returns true on - // success, false otherwise. - // This method is thread-safe in the sense that it can parse multiple packets - // at once. - virtual bool Parse(const uint8_t* packet, - size_t length, - RTPHeader* header) const = 0; - - // Registers an RTP header extension and binds it to |id|. - virtual bool RegisterRtpHeaderExtension(RTPExtensionType type, - uint8_t id) = 0; - - // Registers an RTP header extension. - virtual bool RegisterRtpHeaderExtension(RtpExtension extension) = 0; - - // De-registers an RTP header extension. - virtual bool DeregisterRtpHeaderExtension(RTPExtensionType type) = 0; - - // De-registers an RTP header extension. - virtual bool DeregisterRtpHeaderExtension(RtpExtension extension) = 0; }; } // namespace webrtc #endif // TEST_RTP_HEADER_PARSER_H_ diff --git a/test/rtp_rtcp_observer.h b/test/rtp_rtcp_observer.h index 036f5cdc20..f17560f021 100644 --- a/test/rtp_rtcp_observer.h +++ b/test/rtp_rtcp_observer.h @@ -15,14 +15,15 @@ #include #include +#include "api/array_view.h" #include "api/test/simulated_network.h" #include "call/simulated_packet_receiver.h" #include "call/video_send_stream.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "rtc_base/event.h" #include "system_wrappers/include/field_trial.h" #include "test/direct_transport.h" #include "test/gtest.h" -#include "test/rtp_header_parser.h" namespace { const int kShortTimeoutMs = 500; @@ -98,7 +99,7 @@ class PacketTransport : public test::DirectTransport { bool SendRtp(const uint8_t* packet, size_t length, const PacketOptions& options) override { - EXPECT_FALSE(RtpHeaderParser::IsRtcp(packet, length)); + EXPECT_TRUE(IsRtpPacket(rtc::MakeArrayView(packet, length))); RtpRtcpObserver::Action action; { if (transport_type_ == kSender) { @@ -118,7 +119,7 @@ class PacketTransport : public test::DirectTransport { } bool SendRtcp(const uint8_t* packet, size_t length) override { - EXPECT_TRUE(RtpHeaderParser::IsRtcp(packet, length)); + EXPECT_TRUE(IsRtcpPacket(rtc::MakeArrayView(packet, length))); RtpRtcpObserver::Action action; { if (transport_type_ == kSender) { diff --git a/test/scenario/audio_stream.cc b/test/scenario/audio_stream.cc index f3cb8320aa..63f78c8f71 100644 --- a/test/scenario/audio_stream.cc +++ b/test/scenario/audio_stream.cc @@ -185,7 +185,6 @@ ReceiveAudioStream::ReceiveAudioStream( recv_config.rtp.extensions = {{RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberExtensionId}}; } - receiver_->AddExtensions(recv_config.rtp.extensions); recv_config.decoder_factory = decoder_factory; recv_config.decoder_map = { {CallTest::kAudioSendPayloadType, {"opus", 48000, 2}}}; diff --git a/test/scenario/call_client.cc b/test/scenario/call_client.cc index f7cd47c36e..be8d39f2a5 100644 --- a/test/scenario/call_client.cc +++ b/test/scenario/call_client.cc @@ -17,6 +17,8 @@ #include "api/rtc_event_log/rtc_event_log_factory.h" #include "api/transport/network_types.h" #include "modules/audio_mixer/audio_mixer_impl.h" +#include "modules/rtp_rtcp/source/rtp_util.h" +#include "test/rtp_header_parser.h" namespace webrtc { namespace test { @@ -213,7 +215,6 @@ CallClient::CallClient( clock_(time_controller->GetClock()), log_writer_factory_(std::move(log_writer_factory)), network_controller_factory_(log_writer_factory_.get(), config.transport), - header_parser_(RtpHeaderParser::CreateForTest()), task_queue_(time_controller->GetTaskQueueFactory()->CreateTaskQueue( "CallClient", TaskQueueFactory::Priority::NORMAL)) { @@ -293,7 +294,7 @@ void CallClient::UpdateBitrateConstraints( void CallClient::OnPacketReceived(EmulatedIpPacket packet) { MediaType media_type = MediaType::ANY; - if (!RtpHeaderParser::IsRtcp(packet.cdata(), packet.data.size())) { + if (IsRtpPacket(packet.data)) { auto ssrc = RtpHeaderParser::GetSsrc(packet.cdata(), packet.data.size()); RTC_CHECK(ssrc.has_value()); media_type = ssrc_media_types_[*ssrc]; @@ -338,11 +339,6 @@ uint32_t CallClient::GetNextRtxSsrc() { return kSendRtxSsrcs[next_rtx_ssrc_index_++]; } -void CallClient::AddExtensions(std::vector extensions) { - for (const auto& extension : extensions) - header_parser_->RegisterRtpHeaderExtension(extension); -} - void CallClient::SendTask(std::function task) { task_queue_.SendTask(std::move(task), RTC_FROM_HERE); } diff --git a/test/scenario/call_client.h b/test/scenario/call_client.h index 27ec9fa39c..08b0131350 100644 --- a/test/scenario/call_client.h +++ b/test/scenario/call_client.h @@ -26,7 +26,6 @@ #include "rtc_base/task_queue_for_test.h" #include "test/logging/log_writer.h" #include "test/network/network_emulation.h" -#include "test/rtp_header_parser.h" #include "test/scenario/column_printer.h" #include "test/scenario/network_node.h" #include "test/scenario/scenario_config.h" @@ -137,7 +136,6 @@ class CallClient : public EmulatedNetworkReceiverInterface { uint32_t GetNextAudioSsrc(); uint32_t GetNextAudioLocalSsrc(); uint32_t GetNextRtxSsrc(); - void AddExtensions(std::vector extensions); int16_t Bind(EmulatedEndpoint* endpoint); void UnBind(); @@ -149,7 +147,6 @@ class CallClient : public EmulatedNetworkReceiverInterface { CallClientFakeAudio fake_audio_setup_; std::unique_ptr call_; std::unique_ptr transport_; - std::unique_ptr const header_parser_; std::vector> endpoints_; int next_video_ssrc_index_ = 0; diff --git a/test/scenario/video_stream.cc b/test/scenario/video_stream.cc index 709f0b71b5..96f6f5bc59 100644 --- a/test/scenario/video_stream.cc +++ b/test/scenario/video_stream.cc @@ -571,10 +571,10 @@ ReceiveVideoStream::ReceiveVideoStream(CallClient* receiver, RTC_DCHECK(num_streams == 1); FlexfecReceiveStream::Config flexfec(feedback_transport); flexfec.payload_type = CallTest::kFlexfecPayloadType; - flexfec.remote_ssrc = CallTest::kFlexfecSendSsrc; + flexfec.rtp.remote_ssrc = CallTest::kFlexfecSendSsrc; flexfec.protected_media_ssrcs = send_stream->rtx_ssrcs_; - flexfec.local_ssrc = recv_config.rtp.local_ssrc; - receiver_->ssrc_media_types_[flexfec.remote_ssrc] = MediaType::VIDEO; + flexfec.rtp.local_ssrc = recv_config.rtp.local_ssrc; + receiver_->ssrc_media_types_[flexfec.rtp.remote_ssrc] = MediaType::VIDEO; receiver_->SendTask([this, &flexfec] { flecfec_stream_ = receiver_->call_->CreateFlexfecReceiveStream(flexfec); diff --git a/test/testsupport/file_utils.cc b/test/testsupport/file_utils.cc index 0b4ffa446c..1f829d320b 100644 --- a/test/testsupport/file_utils.cc +++ b/test/testsupport/file_utils.cc @@ -107,7 +107,7 @@ std::string TempFilename(const std::string& dir, const std::string& prefix) { if (::GetTempFileNameW(rtc::ToUtf16(dir).c_str(), rtc::ToUtf16(prefix).c_str(), 0, filename) != 0) return rtc::ToUtf8(filename); - assert(false); + RTC_NOTREACHED(); return ""; #else int len = dir.size() + prefix.size() + 2 + 6; @@ -116,7 +116,7 @@ std::string TempFilename(const std::string& dir, const std::string& prefix) { snprintf(tempname.get(), len, "%s/%sXXXXXX", dir.c_str(), prefix.c_str()); int fd = ::mkstemp(tempname.get()); if (fd == -1) { - assert(false); + RTC_NOTREACHED(); return ""; } else { ::close(fd); diff --git a/test/testsupport/ivf_video_frame_generator_unittest.cc b/test/testsupport/ivf_video_frame_generator_unittest.cc index bea9cd2489..126f7203b8 100644 --- a/test/testsupport/ivf_video_frame_generator_unittest.cc +++ b/test/testsupport/ivf_video_frame_generator_unittest.cc @@ -48,7 +48,7 @@ constexpr int kMaxFramerate = 30; constexpr int kMaxFrameEncodeWaitTimeoutMs = 2000; static const VideoEncoder::Capabilities kCapabilities(false); -#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) +#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) || defined(WEBRTC_ARCH_ARM64) constexpr double kExpectedMinPsnr = 35; #else constexpr double kExpectedMinPsnr = 39; diff --git a/test/time_controller/simulated_time_controller.cc b/test/time_controller/simulated_time_controller.cc index aba8c6600e..a34abe8ced 100644 --- a/test/time_controller/simulated_time_controller.cc +++ b/test/time_controller/simulated_time_controller.cc @@ -226,4 +226,14 @@ void GlobalSimulatedTimeController::AdvanceTime(TimeDelta duration) { impl_.RunReadyRunners(); } +void GlobalSimulatedTimeController::Register( + sim_time_impl::SimulatedSequenceRunner* runner) { + impl_.Register(runner); +} + +void GlobalSimulatedTimeController::Unregister( + sim_time_impl::SimulatedSequenceRunner* runner) { + impl_.Unregister(runner); +} + } // namespace webrtc diff --git a/test/time_controller/simulated_time_controller.h b/test/time_controller/simulated_time_controller.h index 0ff3c2f894..9ded4689de 100644 --- a/test/time_controller/simulated_time_controller.h +++ b/test/time_controller/simulated_time_controller.h @@ -140,6 +140,17 @@ class GlobalSimulatedTimeController : public TimeController { void AdvanceTime(TimeDelta duration) override; + // Makes the simulated time controller aware of a custom + // SimulatedSequenceRunner. + // TODO(bugs.webrtc.org/11581): remove method once the ModuleRtpRtcpImpl2 unit + // test stops using it. + void Register(sim_time_impl::SimulatedSequenceRunner* runner); + // Removes a previously installed custom SimulatedSequenceRunner from the + // simulated time controller. + // TODO(bugs.webrtc.org/11581): remove method once the ModuleRtpRtcpImpl2 unit + // test stops using it. + void Unregister(sim_time_impl::SimulatedSequenceRunner* runner); + private: rtc::ScopedBaseFakeClock global_clock_; // Provides simulated CurrentNtpInMilliseconds() diff --git a/test/time_controller/time_controller_conformance_test.cc b/test/time_controller/time_controller_conformance_test.cc index 10f0e1d724..3d582cad8e 100644 --- a/test/time_controller/time_controller_conformance_test.cc +++ b/test/time_controller/time_controller_conformance_test.cc @@ -92,6 +92,9 @@ TEST_P(SimulatedRealTimeControllerConformanceTest, ThreadPostOrderTest) { thread->PostTask(RTC_FROM_HERE, [&]() { execution_order.Executed(2); }); time_controller->AdvanceTime(TimeDelta::Millis(100)); EXPECT_THAT(execution_order.order(), ElementsAreArray({1, 2})); + // Destroy `thread` before `execution_order` to be sure `execution_order` + // is not accessed on the posted task after it is destroyed. + thread = nullptr; } TEST_P(SimulatedRealTimeControllerConformanceTest, ThreadPostDelayedOrderTest) { @@ -105,6 +108,9 @@ TEST_P(SimulatedRealTimeControllerConformanceTest, ThreadPostDelayedOrderTest) { thread->PostTask(ToQueuedTask([&]() { execution_order.Executed(1); })); time_controller->AdvanceTime(TimeDelta::Millis(600)); EXPECT_THAT(execution_order.order(), ElementsAreArray({1, 2})); + // Destroy `thread` before `execution_order` to be sure `execution_order` + // is not accessed on the posted task after it is destroyed. + thread = nullptr; } TEST_P(SimulatedRealTimeControllerConformanceTest, ThreadPostInvokeOrderTest) { @@ -119,6 +125,9 @@ TEST_P(SimulatedRealTimeControllerConformanceTest, ThreadPostInvokeOrderTest) { thread->Invoke(RTC_FROM_HERE, [&]() { execution_order.Executed(2); }); time_controller->AdvanceTime(TimeDelta::Millis(100)); EXPECT_THAT(execution_order.order(), ElementsAreArray({1, 2})); + // Destroy `thread` before `execution_order` to be sure `execution_order` + // is not accessed on the posted task after it is destroyed. + thread = nullptr; } TEST_P(SimulatedRealTimeControllerConformanceTest, @@ -136,6 +145,9 @@ TEST_P(SimulatedRealTimeControllerConformanceTest, }); time_controller->AdvanceTime(TimeDelta::Millis(100)); EXPECT_THAT(execution_order.order(), ElementsAreArray({1, 2})); + // Destroy `thread` before `execution_order` to be sure `execution_order` + // is not accessed on the posted task after it is destroyed. + thread = nullptr; } TEST_P(SimulatedRealTimeControllerConformanceTest, @@ -158,6 +170,9 @@ TEST_P(SimulatedRealTimeControllerConformanceTest, /*warn_after_ms=*/10'000)); time_controller->AdvanceTime(TimeDelta::Millis(100)); EXPECT_THAT(execution_order.order(), ElementsAreArray({1, 2})); + // Destroy `task_queue` before `execution_order` to be sure `execution_order` + // is not accessed on the posted task after it is destroyed. + task_queue = nullptr; } INSTANTIATE_TEST_SUITE_P(ConformanceTest, diff --git a/tools_webrtc/autoroller/roll_deps.py b/tools_webrtc/autoroller/roll_deps.py index f1a1235f20..286c3c4cda 100755 --- a/tools_webrtc/autoroller/roll_deps.py +++ b/tools_webrtc/autoroller/roll_deps.py @@ -568,16 +568,16 @@ def _IsTreeClean(): return False -def _EnsureUpdatedMasterBranch(dry_run): +def _EnsureUpdatedMainBranch(dry_run): current_branch = _RunCommand(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])[0].splitlines()[0] - if current_branch != 'master': + if current_branch != 'main': logging.error( - 'Please checkout the master branch and re-run this script.') + 'Please checkout the main branch and re-run this script.') if not dry_run: sys.exit(-1) - logging.info('Updating master branch...') + logging.info('Updating main branch...') _RunCommand(['git', 'pull']) @@ -590,7 +590,7 @@ def _CreateRollBranch(dry_run): def _RemovePreviousRollBranch(dry_run): active_branch, branches = _GetBranches() if active_branch == ROLL_BRANCH_NAME: - active_branch = 'master' + active_branch = 'main' if ROLL_BRANCH_NAME in branches: logging.info('Removing previous roll branch (%s)', ROLL_BRANCH_NAME) if not dry_run: @@ -672,7 +672,7 @@ def main(): '--ignore-unclean-workdir', action='store_true', default=False, - help=('Ignore if the current branch is not master or if there ' + help=('Ignore if the current branch is not main or if there ' 'are uncommitted changes (default: %(default)s).')) grp = p.add_mutually_exclusive_group() grp.add_argument( @@ -705,7 +705,7 @@ def main(): _RemovePreviousRollBranch(opts.dry_run) if not opts.ignore_unclean_workdir: - _EnsureUpdatedMasterBranch(opts.dry_run) + _EnsureUpdatedMainBranch(opts.dry_run) deps_filename = os.path.join(CHECKOUT_SRC_DIR, 'DEPS') webrtc_deps = ParseLocalDepsFile(deps_filename) diff --git a/tools_webrtc/ios/build_ios_libs.py b/tools_webrtc/ios/build_ios_libs.py index 3be0eb1975..c931853229 100755 --- a/tools_webrtc/ios/build_ios_libs.py +++ b/tools_webrtc/ios/build_ios_libs.py @@ -7,7 +7,7 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. -"""WebRTC iOS FAT libraries build script. +"""WebRTC iOS XCFramework build script. Each architecture is compiled separately before being merged together. By default, the library is created in out_ios_libs/. (Change with -o.) """ @@ -29,9 +29,17 @@ SDK_OUTPUT_DIR = os.path.join(SRC_DIR, 'out_ios_libs') SDK_FRAMEWORK_NAME = 'WebRTC.framework' - -DEFAULT_ARCHS = ENABLED_ARCHS = ['arm64', 'arm', 'x64', 'x86'] -IOS_DEPLOYMENT_TARGET = '10.0' +SDK_DSYM_NAME = 'WebRTC.dSYM' +SDK_XCFRAMEWORK_NAME = 'WebRTC.xcframework' + +ENABLED_ARCHS = [ + 'device:arm64', 'simulator:arm64', 'simulator:x64', + 'arm64', 'x64' +] +DEFAULT_ARCHS = [ + 'device:arm64', 'simulator:arm64', 'simulator:x64' +] +IOS_DEPLOYMENT_TARGET = '12.0' LIBVPX_BUILD_VP9 = False sys.path.append(os.path.join(SCRIPT_DIR, '..', 'libs')) @@ -114,15 +122,37 @@ def _CleanTemporary(output_dir, architectures): if os.path.isdir(output_dir): logging.info('Removing temporary build files.') for arch in architectures: - arch_lib_path = os.path.join(output_dir, arch + '_libs') + arch_lib_path = os.path.join(output_dir, arch) if os.path.isdir(arch_lib_path): shutil.rmtree(arch_lib_path) -def BuildWebRTC(output_dir, target_arch, flavor, gn_target_name, - ios_deployment_target, libvpx_build_vp9, use_bitcode, use_goma, - extra_gn_args): - output_dir = os.path.join(output_dir, target_arch + '_libs') +def _ParseArchitecture(architectures): + result = dict() + for arch in architectures: + if ":" in arch: + target_environment, target_cpu = arch.split(":") + else: + logging.warning('The environment for build is not specified.') + logging.warning('It is assumed based on cpu type.') + logging.warning('See crbug.com/1138425 for more details.') + if arch == "x64": + target_environment = "simulator" + else: + target_environment = "device" + target_cpu = arch + archs = result.get(target_environment) + if archs is None: + result[target_environment] = {target_cpu} + else: + archs.add(target_cpu) + + return result + + +def BuildWebRTC(output_dir, target_environment, target_arch, flavor, + gn_target_name, ios_deployment_target, libvpx_build_vp9, + use_bitcode, use_goma, extra_gn_args): gn_args = [ 'target_os="ios"', 'ios_enable_code_signing=false', 'use_xcode_clang=true', 'is_component_build=false', @@ -137,6 +167,8 @@ def BuildWebRTC(output_dir, target_arch, flavor, gn_target_name, else: raise ValueError('Unexpected flavor type: %s' % flavor) + gn_args.append('target_environment="%s"' % target_environment) + gn_args.append('target_cpu="%s"' % target_arch) gn_args.append('ios_deployment_target="%s"' % ios_deployment_target) @@ -182,11 +214,14 @@ def main(): _CleanArtifacts(args.output_dir) return 0 - architectures = list(args.arch) + # architectures is typed as Dict[str, Set[str]], + # where key is for the environment (device or simulator) + # and value is for the cpu type. + architectures = _ParseArchitecture(args.arch) gn_args = args.extra_gn_args if args.purify: - _CleanTemporary(args.output_dir, architectures) + _CleanTemporary(args.output_dir, architectures.keys()) return 0 gn_target_name = 'framework_objc' @@ -195,78 +230,101 @@ def main(): gn_args.append('enable_stripping=true') # Build all architectures. - for arch in architectures: - BuildWebRTC(args.output_dir, arch, args.build_config, gn_target_name, - IOS_DEPLOYMENT_TARGET, LIBVPX_BUILD_VP9, args.bitcode, - args.use_goma, gn_args) - - # Create FAT archive. - lib_paths = [ - os.path.join(args.output_dir, arch + '_libs') for arch in architectures - ] - - # Combine the slices. - dylib_path = os.path.join(SDK_FRAMEWORK_NAME, 'WebRTC') - # Dylibs will be combined, all other files are the same across archs. - # Use distutils instead of shutil to support merging folders. - distutils.dir_util.copy_tree( - os.path.join(lib_paths[0], SDK_FRAMEWORK_NAME), - os.path.join(args.output_dir, SDK_FRAMEWORK_NAME)) - logging.info('Merging framework slices.') - dylib_paths = [os.path.join(path, dylib_path) for path in lib_paths] - out_dylib_path = os.path.join(args.output_dir, dylib_path) - try: - os.remove(out_dylib_path) - except OSError: - pass - cmd = ['lipo'] + dylib_paths + ['-create', '-output', out_dylib_path] - _RunCommand(cmd) - - # Merge the dSYM slices. - lib_dsym_dir_path = os.path.join(lib_paths[0], 'WebRTC.dSYM') - if os.path.isdir(lib_dsym_dir_path): + framework_paths = [] + all_lib_paths = [] + for (environment, archs) in architectures.items(): + framework_path = os.path.join(args.output_dir, environment) + framework_paths.append(framework_path) + lib_paths = [] + for arch in archs: + lib_path = os.path.join(framework_path, arch + '_libs') + lib_paths.append(lib_path) + BuildWebRTC(lib_path, environment, arch, args.build_config, + gn_target_name, IOS_DEPLOYMENT_TARGET, + LIBVPX_BUILD_VP9, args.bitcode, args.use_goma, gn_args) + all_lib_paths.extend(lib_paths) + + # Combine the slices. + dylib_path = os.path.join(SDK_FRAMEWORK_NAME, 'WebRTC') + # Dylibs will be combined, all other files are the same across archs. + # Use distutils instead of shutil to support merging folders. distutils.dir_util.copy_tree( - lib_dsym_dir_path, os.path.join(args.output_dir, 'WebRTC.dSYM')) - logging.info('Merging dSYM slices.') - dsym_path = os.path.join('WebRTC.dSYM', 'Contents', 'Resources', - 'DWARF', 'WebRTC') - lib_dsym_paths = [os.path.join(path, dsym_path) for path in lib_paths] - out_dsym_path = os.path.join(args.output_dir, dsym_path) + os.path.join(lib_paths[0], SDK_FRAMEWORK_NAME), + os.path.join(framework_path, SDK_FRAMEWORK_NAME)) + logging.info('Merging framework slices for %s.', environment) + dylib_paths = [os.path.join(path, dylib_path) for path in lib_paths] + out_dylib_path = os.path.join(framework_path, dylib_path) try: - os.remove(out_dsym_path) + os.remove(out_dylib_path) except OSError: pass - cmd = ['lipo'] + lib_dsym_paths + ['-create', '-output', out_dsym_path] + cmd = ['lipo'] + dylib_paths + ['-create', '-output', out_dylib_path] _RunCommand(cmd) - # Generate the license file. - ninja_dirs = [ - os.path.join(args.output_dir, arch + '_libs') - for arch in architectures - ] - gn_target_full_name = '//sdk:' + gn_target_name - builder = LicenseBuilder(ninja_dirs, [gn_target_full_name]) - builder.GenerateLicenseText( - os.path.join(args.output_dir, SDK_FRAMEWORK_NAME)) - - # Modify the version number. - # Format should be ... - # e.g. 55.0.14986 means branch cut 55, no hotfixes, and revision 14986. - infoplist_path = os.path.join(args.output_dir, SDK_FRAMEWORK_NAME, - 'Info.plist') - cmd = [ - 'PlistBuddy', '-c', 'Print :CFBundleShortVersionString', - infoplist_path + # Merge the dSYM slices. + lib_dsym_dir_path = os.path.join(lib_paths[0], SDK_DSYM_NAME) + if os.path.isdir(lib_dsym_dir_path): + distutils.dir_util.copy_tree( + lib_dsym_dir_path, os.path.join(framework_path, SDK_DSYM_NAME)) + logging.info('Merging dSYM slices.') + dsym_path = os.path.join(SDK_DSYM_NAME, 'Contents', 'Resources', + 'DWARF', 'WebRTC') + lib_dsym_paths = [ + os.path.join(path, dsym_path) for path in lib_paths + ] + out_dsym_path = os.path.join(framework_path, dsym_path) + try: + os.remove(out_dsym_path) + except OSError: + pass + cmd = ['lipo' + ] + lib_dsym_paths + ['-create', '-output', out_dsym_path] + _RunCommand(cmd) + + # Modify the version number. + # Format should be ... + # e.g. 55.0.14986 means + # branch cut 55, no hotfixes, and revision 14986. + infoplist_path = os.path.join(framework_path, SDK_FRAMEWORK_NAME, + 'Info.plist') + cmd = [ + 'PlistBuddy', '-c', 'Print :CFBundleShortVersionString', + infoplist_path + ] + major_minor = subprocess.check_output(cmd).strip() + version_number = '%s.%s' % (major_minor, args.revision) + logging.info('Substituting revision number: %s', version_number) + cmd = [ + 'PlistBuddy', '-c', 'Set :CFBundleVersion ' + version_number, + infoplist_path + ] + _RunCommand(cmd) + _RunCommand(['plutil', '-convert', 'binary1', infoplist_path]) + + xcframework_dir = os.path.join(args.output_dir, SDK_XCFRAMEWORK_NAME) + if os.path.isdir(xcframework_dir): + shutil.rmtree(xcframework_dir) + + logging.info('Creating xcframework.') + cmd = ['xcodebuild', '-create-xcframework', '-output', xcframework_dir] + + # Apparently, xcodebuild needs absolute paths for input arguments + for framework_path in framework_paths: + cmd += [ + '-framework', + os.path.abspath(os.path.join(framework_path, SDK_FRAMEWORK_NAME)), + '-debug-symbols', + os.path.abspath(os.path.join(framework_path, SDK_DSYM_NAME)) ] - major_minor = subprocess.check_output(cmd).strip() - version_number = '%s.%s' % (major_minor, args.revision) - logging.info('Substituting revision number: %s', version_number) - cmd = [ - 'PlistBuddy', '-c', 'Set :CFBundleVersion ' + version_number, - infoplist_path - ] - _RunCommand(cmd) - _RunCommand(['plutil', '-convert', 'binary1', infoplist_path]) + + _RunCommand(cmd) + + # Generate the license file. + logging.info('Generate license file.') + gn_target_full_name = '//sdk:' + gn_target_name + builder = LicenseBuilder(all_lib_paths, [gn_target_full_name]) + builder.GenerateLicenseText( + os.path.join(args.output_dir, SDK_XCFRAMEWORK_NAME)) logging.info('Done.') return 0 diff --git a/tools_webrtc/libs/generate_licenses.py b/tools_webrtc/libs/generate_licenses.py index a23123e760..cbb1514d3c 100755 --- a/tools_webrtc/libs/generate_licenses.py +++ b/tools_webrtc/libs/generate_licenses.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright 2016 The WebRTC project authors. All Rights Reserved. # @@ -23,12 +23,16 @@ import sys import argparse -import cgi import json import logging import os import re import subprocess +try: + # python 3.2+ + from html import escape +except ImportError: + from cgi import escape # Third_party library to licences mapping. Keys are names of the libraries # (right after the `third_party/` prefix) @@ -182,7 +186,7 @@ def _RunGN(buildfile_dir, target): target, ] logging.debug('Running: %r', cmd) - output_json = subprocess.check_output(cmd, cwd=WEBRTC_ROOT) + output_json = subprocess.check_output(cmd, cwd=WEBRTC_ROOT).decode('UTF-8') logging.debug('Output: %s', output_json) return output_json @@ -209,7 +213,7 @@ def GenerateLicenseText(self, output_dir): self.common_licenses_dict.keys()) if missing_licenses: error_msg = 'Missing licenses for following third_party targets: %s' % \ - ', '.join(missing_licenses) + ', '.join(sorted(missing_licenses)) logging.error(error_msg) raise Exception(error_msg) @@ -234,7 +238,7 @@ def GenerateLicenseText(self, output_dir): for path in self.common_licenses_dict[license_lib]: license_path = os.path.join(WEBRTC_ROOT, path) with open(license_path, 'r') as license_file: - license_text = cgi.escape(license_file.read(), quote=True) + license_text = escape(license_file.read(), quote=True) output_license_file.write(license_text) output_license_file.write('\n') output_license_file.write('```\n\n') diff --git a/tools_webrtc/libs/generate_licenses_test.py b/tools_webrtc/libs/generate_licenses_test.py index 51acb89881..ebef78e132 100755 --- a/tools_webrtc/libs/generate_licenses_test.py +++ b/tools_webrtc/libs/generate_licenses_test.py @@ -10,7 +10,12 @@ # be found in the AUTHORS file in the root of the source tree. import unittest -import mock +try: + # python 3.3+ + from unittest.mock import patch +except ImportError: + # From site-package + from mock import patch from generate_licenses import LicenseBuilder @@ -32,21 +37,21 @@ def _FakeRunGN(buildfile_dir, target): """ def testParseLibraryName(self): - self.assertEquals( + self.assertEqual( LicenseBuilder._ParseLibraryName('//a/b/third_party/libname1:c'), 'libname1') - self.assertEquals( + self.assertEqual( LicenseBuilder._ParseLibraryName( '//a/b/third_party/libname2:c(d)'), 'libname2') - self.assertEquals( + self.assertEqual( LicenseBuilder._ParseLibraryName( '//a/b/third_party/libname3/c:d(e)'), 'libname3') - self.assertEquals( + self.assertEqual( LicenseBuilder._ParseLibraryName('//a/b/not_third_party/c'), None) def testParseLibrarySimpleMatch(self): builder = LicenseBuilder([], [], {}, {}) - self.assertEquals(builder._ParseLibrary('//a/b/third_party/libname:c'), + self.assertEqual(builder._ParseLibrary('//a/b/third_party/libname:c'), 'libname') def testParseLibraryRegExNoMatchFallbacksToDefaultLibname(self): @@ -54,7 +59,7 @@ def testParseLibraryRegExNoMatchFallbacksToDefaultLibname(self): 'libname:foo.*': ['path/to/LICENSE'], } builder = LicenseBuilder([], [], lib_dict, {}) - self.assertEquals( + self.assertEqual( builder._ParseLibrary('//a/b/third_party/libname:bar_java'), 'libname') @@ -63,7 +68,7 @@ def testParseLibraryRegExMatch(self): 'libname:foo.*': ['path/to/LICENSE'], } builder = LicenseBuilder([], [], {}, lib_regex_dict) - self.assertEquals( + self.assertEqual( builder._ParseLibrary('//a/b/third_party/libname:foo_bar_java'), 'libname:foo.*') @@ -72,7 +77,7 @@ def testParseLibraryRegExMatchWithSubDirectory(self): 'libname/foo:bar.*': ['path/to/LICENSE'], } builder = LicenseBuilder([], [], {}, lib_regex_dict) - self.assertEquals( + self.assertEqual( builder._ParseLibrary('//a/b/third_party/libname/foo:bar_java'), 'libname/foo:bar.*') @@ -81,29 +86,29 @@ def testParseLibraryRegExMatchWithStarInside(self): 'libname/foo.*bar.*': ['path/to/LICENSE'], } builder = LicenseBuilder([], [], {}, lib_regex_dict) - self.assertEquals( + self.assertEqual( builder._ParseLibrary( '//a/b/third_party/libname/fooHAHA:bar_java'), 'libname/foo.*bar.*') - @mock.patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN) + @patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN) def testGetThirdPartyLibrariesWithoutRegex(self): builder = LicenseBuilder([], [], {}, {}) - self.assertEquals( + self.assertEqual( builder._GetThirdPartyLibraries('out/arm', 'target1'), set(['libname1', 'libname2', 'libname3'])) - @mock.patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN) + @patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN) def testGetThirdPartyLibrariesWithRegex(self): lib_regex_dict = { 'libname2:c.*': ['path/to/LICENSE'], } builder = LicenseBuilder([], [], {}, lib_regex_dict) - self.assertEquals( + self.assertEqual( builder._GetThirdPartyLibraries('out/arm', 'target1'), set(['libname1', 'libname2:c.*', 'libname3'])) - @mock.patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN) + @patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN) def testGenerateLicenseTextFailIfUnknownLibrary(self): lib_dict = { 'simple_library': ['path/to/LICENSE'], @@ -113,8 +118,8 @@ def testGenerateLicenseTextFailIfUnknownLibrary(self): with self.assertRaises(Exception) as context: builder.GenerateLicenseText('dummy/dir') - self.assertEquals( - context.exception.message, + self.assertEqual( + context.exception.args[0], 'Missing licenses for following third_party targets: ' 'libname1, libname2, libname3') diff --git a/tools_webrtc/mb/mb_config.pyl b/tools_webrtc/mb/mb_config.pyl index 13159a3d94..253a57acc5 100644 --- a/tools_webrtc/mb/mb_config.pyl +++ b/tools_webrtc/mb/mb_config.pyl @@ -20,8 +20,6 @@ 'builder_groups': { 'client.webrtc': { # iOS - 'iOS32 Debug': 'ios_debug_bot_arm', - 'iOS32 Release': 'ios_release_bot_arm', 'iOS64 Debug': 'ios_debug_bot_arm64', 'iOS64 Release': 'ios_release_bot_arm64', 'iOS64 Sim Debug (iOS 12)': 'ios_debug_bot_x64', @@ -33,6 +31,7 @@ 'Mac64 Release': 'release_bot_x64', 'Mac64 Builder': 'pure_release_bot_x64', 'Mac Asan': 'mac_asan_clang_release_bot_x64', + 'MacARM64 M1 Release': 'release_bot_arm64', # Linux 'Linux32 Debug': 'no_h264_debug_bot_x86', @@ -91,7 +90,6 @@ 'Win64 Debug (Clang)': 'win_clang_debug_bot_x64', 'Win64 Release (Clang)': 'win_clang_release_bot_x64', 'Win64 ASan': 'win_asan_clang_release_bot_x64', - 'Win64 UWP': 'win_uwp_release_bot_x64', 'Win (more configs)': { 'bwe_test_logging': 'bwe_test_logging_x86', @@ -112,6 +110,7 @@ 'Perf Android64 (M Nexus5X)': 'release_bot_x64', 'Perf Android64 (O Pixel2)': 'release_bot_x64', 'Perf Linux Trusty': 'release_bot_x64', + 'Perf Linux Bionic': 'release_bot_x64', 'Perf Mac 10.11': 'release_bot_x64', 'Perf Win7': 'release_bot_x64', }, @@ -148,8 +147,6 @@ }, 'tryserver.webrtc': { # iOS - 'ios_compile_arm_dbg': 'ios_debug_bot_arm', - 'ios_compile_arm_rel': 'ios_release_bot_arm', 'ios_compile_arm64_dbg': 'ios_debug_bot_arm64', 'ios_compile_arm64_rel': 'ios_release_bot_arm64', 'ios_sim_x64_dbg_ios12': 'ios_debug_bot_x64', @@ -233,7 +230,6 @@ 'win_asan': 'win_asan_clang_release_bot_x64', 'win_x64_clang_dbg_win8': 'win_clang_debug_bot_x64', 'win_x64_clang_dbg_win10': 'win_clang_debug_bot_x64', - 'win_x64_uwp': 'win_uwp_release_bot_x64', 'win_x86_more_configs': { 'bwe_test_logging': 'bwe_test_logging_x86', @@ -340,10 +336,6 @@ 'asan', 'clang', 'full_symbols', 'openh264', 'release_bot', 'x64', 'win_fastlink', ], - 'win_uwp_release_bot_x64': [ - # UWP passes compiler flags that are not supported by goma. - 'no_clang', 'openh264', 'x64', 'winuwp', 'release_bot_no_goma' - ], # Mac 'mac_asan_clang_release_bot_x64': [ @@ -386,14 +378,6 @@ ], # iOS - 'ios_debug_bot_arm': [ - 'ios', 'debug_bot', 'arm', 'no_ios_code_signing', 'ios_use_goma_rbe', - 'xctest', - ], - 'ios_release_bot_arm': [ - 'ios', 'release_bot', 'arm', 'no_ios_code_signing', 'ios_use_goma_rbe', - 'xctest', - ], 'ios_debug_bot_arm64': [ 'ios', 'debug_bot', 'arm64', 'no_ios_code_signing', 'ios_use_goma_rbe', 'xctest', @@ -615,10 +599,6 @@ 'gn_args': 'rtc_enable_sctp=false', }, - 'winuwp': { - 'gn_args': 'target_os="winuwp"', - }, - 'win_undef_unicode': { 'gn_args': 'rtc_win_undef_unicode=true', }, diff --git a/tools_webrtc/msan/suppressions.txt b/tools_webrtc/msan/suppressions.txt index ce8b14292e..47a0dff16f 100644 --- a/tools_webrtc/msan/suppressions.txt +++ b/tools_webrtc/msan/suppressions.txt @@ -4,8 +4,8 @@ # # Please think twice before you add or remove these rules. -# This is a stripped down copy of Chromium's blacklist.txt, to enable -# adding WebRTC-specific blacklist entries. +# This is a stripped down copy of Chromium's ignorelist.txt, to enable +# adding WebRTC-specific ignorelist entries. # Uninit in zlib. http://crbug.com/116277 fun:*MOZ_Z_deflate* diff --git a/tools_webrtc/perf/catapult_uploader.py b/tools_webrtc/perf/catapult_uploader.py index de7bd81c73..a10dd84cb5 100644 --- a/tools_webrtc/perf/catapult_uploader.py +++ b/tools_webrtc/perf/catapult_uploader.py @@ -145,13 +145,13 @@ def _CheckFullUploadInfo(url, upload_token, '?additional_info=measurements', method='GET', headers=headers) - print 'Full upload info: %r.' % content - if response.status != 200: print 'Failed to reach the dashboard to get full upload info.' return False resp_json = json.loads(content) + print 'Full upload info: %s.' % json.dumps(resp_json, indent=4) + if 'measurements' in resp_json: measurements_cnt = len(resp_json['measurements']) not_completed_state_cnt = len([ @@ -247,10 +247,13 @@ def UploadToDashboard(options): print 'Upload completed.' return 0 - if response.status != 200 or resp_json['state'] == 'FAILED': - print('Upload failed with %d: %s\n\n%s' % (response.status, - response.reason, - str(resp_json))) + if response.status != 200: + print('Upload status poll failed with %d: %s' % (response.status, + response.reason)) + return 1 + + if resp_json['state'] == 'FAILED': + print 'Upload failed.' return 1 print('Upload wasn\'t completed in a given time: %d seconds.' % diff --git a/tools_webrtc/perf/webrtc_dashboard_upload.py b/tools_webrtc/perf/webrtc_dashboard_upload.py index a709af5dcd..19db0250cf 100644 --- a/tools_webrtc/perf/webrtc_dashboard_upload.py +++ b/tools_webrtc/perf/webrtc_dashboard_upload.py @@ -50,7 +50,8 @@ def _CreateParser(): help='Which dashboard to use.') parser.add_argument('--input-results-file', type=argparse.FileType(), required=True, - help='A JSON file with output from WebRTC tests.') + help='A HistogramSet proto file with output from ' + 'WebRTC tests.') parser.add_argument('--output-json-file', type=argparse.FileType('w'), help='Where to write the output (for debugging).') parser.add_argument('--outdir', required=True, diff --git a/tools_webrtc/ubsan/suppressions.txt b/tools_webrtc/ubsan/suppressions.txt index 50b66e915a..dc76f38c20 100644 --- a/tools_webrtc/ubsan/suppressions.txt +++ b/tools_webrtc/ubsan/suppressions.txt @@ -1,7 +1,7 @@ ############################################################################# -# UBSan blacklist. +# UBSan ignorelist. # -# This is a WebRTC-specific replacement of Chromium's blacklist.txt. +# This is a WebRTC-specific replacement of Chromium's ignorelist.txt. # Only exceptions for third party libraries go here. WebRTC's code should use # the RTC_NO_SANITIZE macro. Please think twice before adding new exceptions. diff --git a/tools_webrtc/ubsan/vptr_suppressions.txt b/tools_webrtc/ubsan/vptr_suppressions.txt index 739de36659..617ba88f98 100644 --- a/tools_webrtc/ubsan/vptr_suppressions.txt +++ b/tools_webrtc/ubsan/vptr_suppressions.txt @@ -1,5 +1,5 @@ ############################################################################# -# UBSan vptr blacklist. +# UBSan vptr ignorelist. # Function and type based blacklisting use a mangled name, and it is especially # tricky to represent C++ types. For now, any possible changes by name manglings # are simply represented as wildcard expressions of regexp, and thus it might be @@ -8,7 +8,7 @@ # Please think twice before you add or remove these rules. # # This is a stripped down copy of Chromium's vptr_blacklist.txt, to enable -# adding WebRTC-specific blacklist entries. +# adding WebRTC-specific ignorelist entries. ############################################################################# # Using raw pointer values. diff --git a/video/BUILD.gn b/video/BUILD.gn index c524212353..7743aba944 100644 --- a/video/BUILD.gn +++ b/video/BUILD.gn @@ -67,6 +67,7 @@ rtc_library("video") { "../api/crypto:options", "../api/rtc_event_log", "../api/task_queue", + "../api/units:time_delta", "../api/units:timestamp", "../api/video:encoded_image", "../api/video:recordable_encoded_frame", @@ -701,6 +702,7 @@ if (rtc_include_tests) { "../modules/video_coding:webrtc_multiplex", "../modules/video_coding:webrtc_vp8", "../modules/video_coding:webrtc_vp9", + "../modules/video_coding:webrtc_vp9_helpers", "../modules/video_coding/codecs/av1:libaom_av1_encoder", "../rtc_base", "../rtc_base:checks", diff --git a/video/adaptation/balanced_constraint.cc b/video/adaptation/balanced_constraint.cc index 3de81e72e0..ec0b8e41d5 100644 --- a/video/adaptation/balanced_constraint.cc +++ b/video/adaptation/balanced_constraint.cc @@ -41,16 +41,16 @@ bool BalancedConstraint::IsAdaptationUpAllowed( // exceed bitrate constraints. if (degradation_preference_provider_->degradation_preference() == DegradationPreference::BALANCED) { + int frame_size_pixels = input_state.single_active_stream_pixels().value_or( + input_state.frame_size_pixels().value()); if (!balanced_settings_.CanAdaptUp( - input_state.video_codec_type(), - input_state.frame_size_pixels().value(), + input_state.video_codec_type(), frame_size_pixels, encoder_target_bitrate_bps_.value_or(0))) { return false; } if (DidIncreaseResolution(restrictions_before, restrictions_after) && !balanced_settings_.CanAdaptUpResolution( - input_state.video_codec_type(), - input_state.frame_size_pixels().value(), + input_state.video_codec_type(), frame_size_pixels, encoder_target_bitrate_bps_.value_or(0))) { return false; } diff --git a/video/adaptation/overuse_frame_detector_unittest.cc b/video/adaptation/overuse_frame_detector_unittest.cc index d4bf910faa..37ad974a4c 100644 --- a/video/adaptation/overuse_frame_detector_unittest.cc +++ b/video/adaptation/overuse_frame_detector_unittest.cc @@ -455,6 +455,8 @@ TEST_F(OveruseFrameDetectorTest, RunOnTqNormalUsage) { EXPECT_TRUE(event.Wait(10000)); } +// TODO(crbug.com/webrtc/12846): investigate why the test fails on MAC bots. +#if !defined(WEBRTC_MAC) TEST_F(OveruseFrameDetectorTest, MaxIntervalScalesWithFramerate) { const int kCapturerMaxFrameRate = 30; const int kEncodeMaxFrameRate = 20; // Maximum fps the encoder can sustain. @@ -490,6 +492,7 @@ TEST_F(OveruseFrameDetectorTest, MaxIntervalScalesWithFramerate) { processing_time_us); overuse_detector_->CheckForOveruse(observer_); } +#endif TEST_F(OveruseFrameDetectorTest, RespectsMinFramerate) { const int kMinFrameRate = 7; // Minimum fps allowed by current detector impl. @@ -835,7 +838,7 @@ TEST_F(OveruseFrameDetectorTest2, ConvergesSlowly) { // Should have started to approach correct load of 15%, but not very far. EXPECT_LT(UsagePercent(), InitialUsage()); - EXPECT_GT(UsagePercent(), (InitialUsage() * 3 + 15) / 4); + EXPECT_GT(UsagePercent(), (InitialUsage() * 3 + 8) / 4); // Run for roughly 10s more, should now be closer. InsertAndSendFramesWithInterval(300, kFrameIntervalUs, kWidth, kHeight, diff --git a/video/adaptation/video_stream_encoder_resource_manager.cc b/video/adaptation/video_stream_encoder_resource_manager.cc index 1c2e5839f2..2705bf9af7 100644 --- a/video/adaptation/video_stream_encoder_resource_manager.cc +++ b/video/adaptation/video_stream_encoder_resource_manager.cc @@ -32,6 +32,7 @@ #include "rtc_base/ref_counted_object.h" #include "rtc_base/strings/string_builder.h" #include "rtc_base/time_utils.h" +#include "rtc_base/trace_event.h" #include "system_wrappers/include/field_trial.h" #include "video/adaptation/quality_scaler_resource.h" @@ -257,6 +258,9 @@ VideoStreamEncoderResourceManager::VideoStreamEncoderResourceManager( quality_rampup_experiment_( QualityRampUpExperimentHelper::CreateIfEnabled(this, clock_)), encoder_settings_(absl::nullopt) { + TRACE_EVENT0( + "webrtc", + "VideoStreamEncoderResourceManager::VideoStreamEncoderResourceManager"); RTC_CHECK(degradation_preference_provider_); RTC_CHECK(encoder_stats_observer_); } @@ -494,7 +498,7 @@ void VideoStreamEncoderResourceManager::OnMaybeEncodeFrame() { quality_scaler_resource_, bandwidth, DataRate::BitsPerSec(encoder_target_bitrate_bps_.value_or(0)), DataRate::KilobitsPerSec(encoder_settings_->video_codec().maxBitrate), - LastInputFrameSizeOrDefault()); + LastFrameSizeOrDefault()); } } @@ -551,7 +555,7 @@ void VideoStreamEncoderResourceManager::ConfigureQualityScaler( absl::optional thresholds = balanced_settings_.GetQpThresholds( GetVideoCodecTypeOrGeneric(encoder_settings_), - LastInputFrameSizeOrDefault()); + LastFrameSizeOrDefault()); if (thresholds) { quality_scaler_resource_->SetQpThresholds(*thresholds); } @@ -591,10 +595,13 @@ CpuOveruseOptions VideoStreamEncoderResourceManager::GetCpuOveruseOptions() return options; } -int VideoStreamEncoderResourceManager::LastInputFrameSizeOrDefault() const { +int VideoStreamEncoderResourceManager::LastFrameSizeOrDefault() const { RTC_DCHECK_RUN_ON(encoder_queue_); - return input_state_provider_->InputState().frame_size_pixels().value_or( - kDefaultInputPixelsWidth * kDefaultInputPixelsHeight); + return input_state_provider_->InputState() + .single_active_stream_pixels() + .value_or( + input_state_provider_->InputState().frame_size_pixels().value_or( + kDefaultInputPixelsWidth * kDefaultInputPixelsHeight)); } void VideoStreamEncoderResourceManager::OnVideoSourceRestrictionsUpdated( diff --git a/video/adaptation/video_stream_encoder_resource_manager.h b/video/adaptation/video_stream_encoder_resource_manager.h index 2e7060c604..e7174d2344 100644 --- a/video/adaptation/video_stream_encoder_resource_manager.h +++ b/video/adaptation/video_stream_encoder_resource_manager.h @@ -66,7 +66,7 @@ extern const int kDefaultInputPixelsHeight; // resources. // // The manager is also involved with various mitigations not part of the -// ResourceAdaptationProcessor code such as the inital frame dropping. +// ResourceAdaptationProcessor code such as the initial frame dropping. class VideoStreamEncoderResourceManager : public VideoSourceRestrictionsListener, public ResourceLimitationsListener, @@ -156,7 +156,7 @@ class VideoStreamEncoderResourceManager rtc::scoped_refptr resource) const; CpuOveruseOptions GetCpuOveruseOptions() const; - int LastInputFrameSizeOrDefault() const; + int LastFrameSizeOrDefault() const; // Calculates an up-to-date value of the target frame rate and informs the // |encode_usage_resource_| of the new value. diff --git a/video/encoder_rtcp_feedback.cc b/video/encoder_rtcp_feedback.cc index b81ff6120f..17095a0a0c 100644 --- a/video/encoder_rtcp_feedback.cc +++ b/video/encoder_rtcp_feedback.cc @@ -10,6 +10,9 @@ #include "video/encoder_rtcp_feedback.h" +#include +#include + #include "absl/types/optional.h" #include "api/video_codecs/video_encoder.h" #include "rtc_base/checks.h" @@ -21,47 +24,36 @@ namespace { constexpr int kMinKeyframeSendIntervalMs = 300; } // namespace -EncoderRtcpFeedback::EncoderRtcpFeedback(Clock* clock, - const std::vector& ssrcs, - VideoStreamEncoderInterface* encoder) +EncoderRtcpFeedback::EncoderRtcpFeedback( + Clock* clock, + const std::vector& ssrcs, + VideoStreamEncoderInterface* encoder, + std::function( + uint32_t ssrc, + const std::vector& seq_nums)> get_packet_infos) : clock_(clock), ssrcs_(ssrcs), - rtp_video_sender_(nullptr), + get_packet_infos_(std::move(get_packet_infos)), video_stream_encoder_(encoder), - time_last_intra_request_ms_(-1), - min_keyframe_send_interval_ms_( - KeyframeIntervalSettings::ParseFromFieldTrials() - .MinKeyframeSendIntervalMs() - .value_or(kMinKeyframeSendIntervalMs)) { + time_last_packet_delivery_queue_(Timestamp::Millis(0)), + min_keyframe_send_interval_( + TimeDelta::Millis(KeyframeIntervalSettings::ParseFromFieldTrials() + .MinKeyframeSendIntervalMs() + .value_or(kMinKeyframeSendIntervalMs))) { RTC_DCHECK(!ssrcs.empty()); + packet_delivery_queue_.Detach(); } -void EncoderRtcpFeedback::SetRtpVideoSender( - const RtpVideoSenderInterface* rtp_video_sender) { - RTC_DCHECK(rtp_video_sender); - RTC_DCHECK(!rtp_video_sender_); - rtp_video_sender_ = rtp_video_sender; -} +// Called via Call::DeliverRtcp. +void EncoderRtcpFeedback::OnReceivedIntraFrameRequest(uint32_t ssrc) { + RTC_DCHECK_RUN_ON(&packet_delivery_queue_); + RTC_DCHECK(std::find(ssrcs_.begin(), ssrcs_.end(), ssrc) != ssrcs_.end()); -bool EncoderRtcpFeedback::HasSsrc(uint32_t ssrc) { - for (uint32_t registered_ssrc : ssrcs_) { - if (registered_ssrc == ssrc) { - return true; - } - } - return false; -} + const Timestamp now = clock_->CurrentTime(); + if (time_last_packet_delivery_queue_ + min_keyframe_send_interval_ > now) + return; -void EncoderRtcpFeedback::OnReceivedIntraFrameRequest(uint32_t ssrc) { - RTC_DCHECK(HasSsrc(ssrc)); - { - int64_t now_ms = clock_->TimeInMilliseconds(); - MutexLock lock(&mutex_); - if (time_last_intra_request_ms_ + min_keyframe_send_interval_ms_ > now_ms) { - return; - } - time_last_intra_request_ms_ = now_ms; - } + time_last_packet_delivery_queue_ = now; // Always produce key frame for all streams. video_stream_encoder_->SendKeyFrame(); @@ -72,12 +64,12 @@ void EncoderRtcpFeedback::OnReceivedLossNotification( uint16_t seq_num_of_last_decodable, uint16_t seq_num_of_last_received, bool decodability_flag) { - RTC_DCHECK(rtp_video_sender_) << "Object initialization incomplete."; + RTC_DCHECK(get_packet_infos_) << "Object initialization incomplete."; const std::vector seq_nums = {seq_num_of_last_decodable, seq_num_of_last_received}; const std::vector infos = - rtp_video_sender_->GetSentRtpPacketInfos(ssrc, seq_nums); + get_packet_infos_(ssrc, seq_nums); if (infos.empty()) { return; } diff --git a/video/encoder_rtcp_feedback.h b/video/encoder_rtcp_feedback.h index 3bd1cb91f0..2aadcc34e7 100644 --- a/video/encoder_rtcp_feedback.h +++ b/video/encoder_rtcp_feedback.h @@ -10,12 +10,16 @@ #ifndef VIDEO_ENCODER_RTCP_FEEDBACK_H_ #define VIDEO_ENCODER_RTCP_FEEDBACK_H_ +#include #include +#include "api/sequence_checker.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" #include "api/video/video_stream_encoder_interface.h" #include "call/rtp_video_sender_interface.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" -#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/system/no_unique_address.h" #include "system_wrappers/include/clock.h" namespace webrtc { @@ -27,13 +31,15 @@ class VideoStreamEncoderInterface; class EncoderRtcpFeedback : public RtcpIntraFrameObserver, public RtcpLossNotificationObserver { public: - EncoderRtcpFeedback(Clock* clock, - const std::vector& ssrcs, - VideoStreamEncoderInterface* encoder); + EncoderRtcpFeedback( + Clock* clock, + const std::vector& ssrcs, + VideoStreamEncoderInterface* encoder, + std::function( + uint32_t ssrc, + const std::vector& seq_nums)> get_packet_infos); ~EncoderRtcpFeedback() override = default; - void SetRtpVideoSender(const RtpVideoSenderInterface* rtp_video_sender); - void OnReceivedIntraFrameRequest(uint32_t ssrc) override; // Implements RtcpLossNotificationObserver. @@ -43,17 +49,19 @@ class EncoderRtcpFeedback : public RtcpIntraFrameObserver, bool decodability_flag) override; private: - bool HasSsrc(uint32_t ssrc); - Clock* const clock_; const std::vector ssrcs_; - const RtpVideoSenderInterface* rtp_video_sender_; + const std::function( + uint32_t ssrc, + const std::vector& seq_nums)> + get_packet_infos_; VideoStreamEncoderInterface* const video_stream_encoder_; - Mutex mutex_; - int64_t time_last_intra_request_ms_ RTC_GUARDED_BY(mutex_); + RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_delivery_queue_; + Timestamp time_last_packet_delivery_queue_ + RTC_GUARDED_BY(packet_delivery_queue_); - const int min_keyframe_send_interval_ms_; + const TimeDelta min_keyframe_send_interval_; }; } // namespace webrtc diff --git a/video/encoder_rtcp_feedback_unittest.cc b/video/encoder_rtcp_feedback_unittest.cc index 81ac22b6c6..4cbb747e51 100644 --- a/video/encoder_rtcp_feedback_unittest.cc +++ b/video/encoder_rtcp_feedback_unittest.cc @@ -26,7 +26,8 @@ class VieKeyRequestTest : public ::testing::Test { encoder_rtcp_feedback_( &simulated_clock_, std::vector(1, VieKeyRequestTest::kSsrc), - &encoder_) {} + &encoder_, + nullptr) {} protected: const uint32_t kSsrc = 1234; diff --git a/video/end_to_end_tests/config_tests.cc b/video/end_to_end_tests/config_tests.cc index bf63e2a51f..1bd897cb34 100644 --- a/video/end_to_end_tests/config_tests.cc +++ b/video/end_to_end_tests/config_tests.cc @@ -104,7 +104,7 @@ TEST_F(ConfigEndToEndTest, VerifyDefaultFlexfecReceiveConfigParameters) { FlexfecReceiveStream::Config default_receive_config(&rtcp_send_transport); EXPECT_EQ(-1, default_receive_config.payload_type) << "Enabling FlexFEC requires rtpmap: flexfec negotiation."; - EXPECT_EQ(0U, default_receive_config.remote_ssrc) + EXPECT_EQ(0U, default_receive_config.rtp.remote_ssrc) << "Enabling FlexFEC requires ssrc-group: FEC-FR negotiation."; EXPECT_TRUE(default_receive_config.protected_media_ssrcs.empty()) << "Enabling FlexFEC requires ssrc-group: FEC-FR negotiation."; diff --git a/video/end_to_end_tests/fec_tests.cc b/video/end_to_end_tests/fec_tests.cc index 0d4ddac5a4..77ad9eb666 100644 --- a/video/end_to_end_tests/fec_tests.cc +++ b/video/end_to_end_tests/fec_tests.cc @@ -314,7 +314,7 @@ class FlexfecRenderObserver : public test::EndToEndTest, void ModifyFlexfecConfigs( std::vector* receive_configs) override { - (*receive_configs)[0].local_ssrc = kFlexfecLocalSsrc; + (*receive_configs)[0].rtp.local_ssrc = kFlexfecLocalSsrc; } void PerformTest() override { diff --git a/video/end_to_end_tests/network_state_tests.cc b/video/end_to_end_tests/network_state_tests.cc index 9abde3bb32..4e0e86f987 100644 --- a/video/end_to_end_tests/network_state_tests.cc +++ b/video/end_to_end_tests/network_state_tests.cc @@ -10,13 +10,19 @@ #include +#include "api/media_types.h" +#include "api/task_queue/default_task_queue_factory.h" +#include "api/task_queue/task_queue_base.h" +#include "api/task_queue/task_queue_factory.h" #include "api/test/simulated_network.h" #include "api/video_codecs/video_encoder.h" #include "call/fake_network_pipe.h" #include "call/simulated_network.h" #include "modules/rtp_rtcp/source/rtp_packet.h" +#include "rtc_base/location.h" #include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue_for_test.h" +#include "rtc_base/task_utils/to_queued_task.h" #include "system_wrappers/include/sleep.h" #include "test/call_test.h" #include "test/fake_encoder.h" @@ -166,7 +172,10 @@ TEST_F(NetworkStateEndToEndTest, RespectsNetworkState) { explicit NetworkStateTest(TaskQueueBase* task_queue) : EndToEndTest(kDefaultTimeoutMs), FakeEncoder(Clock::GetRealTimeClock()), - task_queue_(task_queue), + e2e_test_task_queue_(task_queue), + task_queue_(CreateDefaultTaskQueueFactory()->CreateTaskQueue( + "NetworkStateTest", + TaskQueueFactory::Priority::NORMAL)), sender_call_(nullptr), receiver_call_(nullptr), encoder_factory_(this), @@ -219,26 +228,36 @@ TEST_F(NetworkStateEndToEndTest, RespectsNetworkState) { send_config->encoder_settings.encoder_factory = &encoder_factory_; } + void SignalChannelNetworkState(Call* call, + MediaType media_type, + NetworkState network_state) { + SendTask(RTC_FROM_HERE, e2e_test_task_queue_, + [call, media_type, network_state] { + call->SignalChannelNetworkState(media_type, network_state); + }); + } + void PerformTest() override { EXPECT_TRUE(encoded_frames_.Wait(kDefaultTimeoutMs)) << "No frames received by the encoder."; - SendTask(RTC_FROM_HERE, task_queue_, [this]() { + SendTask(RTC_FROM_HERE, task_queue_.get(), [this]() { // Wait for packets from both sender/receiver. WaitForPacketsOrSilence(false, false); // Sender-side network down for audio; there should be no effect on // video - sender_call_->SignalChannelNetworkState(MediaType::AUDIO, kNetworkDown); + SignalChannelNetworkState(sender_call_, MediaType::AUDIO, kNetworkDown); + WaitForPacketsOrSilence(false, false); // Receiver-side network down for audio; no change expected - receiver_call_->SignalChannelNetworkState(MediaType::AUDIO, - kNetworkDown); + SignalChannelNetworkState(receiver_call_, MediaType::AUDIO, + kNetworkDown); WaitForPacketsOrSilence(false, false); // Sender-side network down. - sender_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkDown); + SignalChannelNetworkState(sender_call_, MediaType::VIDEO, kNetworkDown); { MutexLock lock(&test_mutex_); // After network goes down we shouldn't be encoding more frames. @@ -248,14 +267,14 @@ TEST_F(NetworkStateEndToEndTest, RespectsNetworkState) { WaitForPacketsOrSilence(true, false); // Receiver-side network down. - receiver_call_->SignalChannelNetworkState(MediaType::VIDEO, - kNetworkDown); + SignalChannelNetworkState(receiver_call_, MediaType::VIDEO, + kNetworkDown); WaitForPacketsOrSilence(true, true); // Network up for audio for both sides; video is still not expected to // start - sender_call_->SignalChannelNetworkState(MediaType::AUDIO, kNetworkUp); - receiver_call_->SignalChannelNetworkState(MediaType::AUDIO, kNetworkUp); + SignalChannelNetworkState(sender_call_, MediaType::AUDIO, kNetworkUp); + SignalChannelNetworkState(receiver_call_, MediaType::AUDIO, kNetworkUp); WaitForPacketsOrSilence(true, true); // Network back up again for both. @@ -265,8 +284,8 @@ TEST_F(NetworkStateEndToEndTest, RespectsNetworkState) { // network. sender_state_ = kNetworkUp; } - sender_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkUp); - receiver_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkUp); + SignalChannelNetworkState(sender_call_, MediaType::VIDEO, kNetworkUp); + SignalChannelNetworkState(receiver_call_, MediaType::VIDEO, kNetworkUp); WaitForPacketsOrSilence(false, false); // TODO(skvlad): add tests to verify that the audio streams are stopped @@ -340,7 +359,8 @@ TEST_F(NetworkStateEndToEndTest, RespectsNetworkState) { } } - TaskQueueBase* const task_queue_; + TaskQueueBase* const e2e_test_task_queue_; + std::unique_ptr task_queue_; Mutex test_mutex_; rtc::Event encoded_frames_; rtc::Event packet_event_; diff --git a/video/end_to_end_tests/rtp_rtcp_tests.cc b/video/end_to_end_tests/rtp_rtcp_tests.cc index d76a7f0ced..a698328dad 100644 --- a/video/end_to_end_tests/rtp_rtcp_tests.cc +++ b/video/end_to_end_tests/rtp_rtcp_tests.cc @@ -537,12 +537,13 @@ TEST_F(RtpRtcpEndToEndTest, DISABLED_TestFlexfecRtpStatePreservation) { receive_transport.get()); flexfec_receive_config.payload_type = GetVideoSendConfig()->rtp.flexfec.payload_type; - flexfec_receive_config.remote_ssrc = GetVideoSendConfig()->rtp.flexfec.ssrc; + flexfec_receive_config.rtp.remote_ssrc = + GetVideoSendConfig()->rtp.flexfec.ssrc; flexfec_receive_config.protected_media_ssrcs = GetVideoSendConfig()->rtp.flexfec.protected_media_ssrcs; - flexfec_receive_config.local_ssrc = kReceiverLocalVideoSsrc; - flexfec_receive_config.transport_cc = true; - flexfec_receive_config.rtp_header_extensions.emplace_back( + flexfec_receive_config.rtp.local_ssrc = kReceiverLocalVideoSsrc; + flexfec_receive_config.rtp.transport_cc = true; + flexfec_receive_config.rtp.extensions.emplace_back( RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberExtensionId); flexfec_receive_configs_.push_back(flexfec_receive_config); diff --git a/video/end_to_end_tests/ssrc_tests.cc b/video/end_to_end_tests/ssrc_tests.cc index 0c26311e92..bdca05d647 100644 --- a/video/end_to_end_tests/ssrc_tests.cc +++ b/video/end_to_end_tests/ssrc_tests.cc @@ -14,6 +14,7 @@ #include "call/fake_network_pipe.h" #include "call/simulated_network.h" #include "modules/rtp_rtcp/source/rtp_packet.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "rtc_base/task_queue_for_test.h" #include "test/call_test.h" #include "test/gtest.h" @@ -60,7 +61,7 @@ TEST_F(SsrcEndToEndTest, UnknownRtpPacketGivesUnknownSsrcReturnCode) { DeliveryStatus DeliverPacket(MediaType media_type, rtc::CopyOnWriteBuffer packet, int64_t packet_time_us) override { - if (RtpHeaderParser::IsRtcp(packet.cdata(), packet.size())) { + if (IsRtcpPacket(packet)) { return receiver_->DeliverPacket(media_type, std::move(packet), packet_time_us); } diff --git a/video/end_to_end_tests/stats_tests.cc b/video/end_to_end_tests/stats_tests.cc index bb613a544e..54e7bcff1c 100644 --- a/video/end_to_end_tests/stats_tests.cc +++ b/video/end_to_end_tests/stats_tests.cc @@ -17,7 +17,7 @@ #include "api/test/video/function_video_encoder_factory.h" #include "call/fake_network_pipe.h" #include "call/simulated_network.h" -#include "modules/rtp_rtcp/source/rtp_utility.h" +#include "modules/rtp_rtcp/source/rtp_packet.h" #include "modules/video_coding/include/video_coding_defines.h" #include "rtc_base/strings/string_builder.h" #include "rtc_base/synchronization/mutex.h" @@ -71,12 +71,11 @@ TEST_F(StatsEndToEndTest, GetStats) { Action OnSendRtp(const uint8_t* packet, size_t length) override { // Drop every 25th packet => 4% loss. static const int kPacketLossFrac = 25; - RTPHeader header; - RtpUtility::RtpHeaderParser parser(packet, length); - if (parser.Parse(&header) && - expected_send_ssrcs_.find(header.ssrc) != + RtpPacket header; + if (header.Parse(packet, length) && + expected_send_ssrcs_.find(header.Ssrc()) != expected_send_ssrcs_.end() && - header.sequenceNumber % kPacketLossFrac == 0) { + header.SequenceNumber() % kPacketLossFrac == 0) { return DROP_PACKET; } check_stats_event_.Set(); @@ -143,8 +142,8 @@ TEST_F(StatsEndToEndTest, GetStats) { stats.rtcp_packet_type_counts.nack_requests != 0 || stats.rtcp_packet_type_counts.unique_nack_requests != 0; - assert(stats.current_payload_type == -1 || - stats.current_payload_type == kFakeVideoSendPayloadType); + RTC_DCHECK(stats.current_payload_type == -1 || + stats.current_payload_type == kFakeVideoSendPayloadType); receive_stats_filled_["IncomingPayloadType"] |= stats.current_payload_type == kFakeVideoSendPayloadType; } @@ -613,11 +612,9 @@ TEST_F(StatsEndToEndTest, VerifyNackStats) { Action OnSendRtp(const uint8_t* packet, size_t length) override { MutexLock lock(&mutex_); if (++sent_rtp_packets_ == kPacketNumberToDrop) { - std::unique_ptr parser( - RtpHeaderParser::CreateForTest()); - RTPHeader header; - EXPECT_TRUE(parser->Parse(packet, length, &header)); - dropped_rtp_packet_ = header.sequenceNumber; + RtpPacket header; + EXPECT_TRUE(header.Parse(packet, length)); + dropped_rtp_packet_ = header.SequenceNumber(); return DROP_PACKET; } task_queue_->PostTask(std::unique_ptr(this)); diff --git a/video/quality_scaling_tests.cc b/video/quality_scaling_tests.cc index 833b7758ba..9837517b78 100644 --- a/video/quality_scaling_tests.cc +++ b/video/quality_scaling_tests.cc @@ -91,11 +91,19 @@ class ScalingObserver : public test::SendTest { automatic_resize_(automatic_resize), expect_scaling_(expect_scaling) {} + DegradationPreference degradation_preference_ = + DegradationPreference::MAINTAIN_FRAMERATE; + private: void ModifySenderBitrateConfig(BitrateConstraints* bitrate_config) override { bitrate_config->start_bitrate_bps = start_bps_; } + void ModifyVideoDegradationPreference( + DegradationPreference* degradation_preference) override { + *degradation_preference = degradation_preference_; + } + size_t GetNumVideoStreams() const override { return (payload_name_ == "VP9") ? 1 : streams_active_.size(); } @@ -183,6 +191,10 @@ class UpscalingObserver automatic_resize, expect_upscale) {} + void SetDegradationPreference(DegradationPreference preference) { + degradation_preference_ = preference; + } + private: void OnFrameGeneratorCapturerCreated( test::FrameGeneratorCapturer* frame_generator_capturer) override { @@ -206,7 +218,7 @@ TEST_F(QualityScalingTest, AdaptsDownForHighQp_Vp8) { // qp_low:1, qp_high:1 -> kHighQp test::ScopedFieldTrials field_trials(kPrefix + "1,1,0,0,0,0" + kEnd); - DownscalingObserver test("VP8", {true}, kHighStartBps, + DownscalingObserver test("VP8", /*streams_active=*/{true}, kHighStartBps, /*automatic_resize=*/true, /*expect_downscale=*/true); RunBaseTest(&test); @@ -216,7 +228,7 @@ TEST_F(QualityScalingTest, NoAdaptDownForHighQpIfScalingOff_Vp8) { // qp_low:1, qp_high:1 -> kHighQp test::ScopedFieldTrials field_trials(kPrefix + "1,1,0,0,0,0" + kEnd); - DownscalingObserver test("VP8", {true}, kHighStartBps, + DownscalingObserver test("VP8", /*streams_active=*/{true}, kHighStartBps, /*automatic_resize=*/false, /*expect_downscale=*/false); RunBaseTest(&test); @@ -226,7 +238,7 @@ TEST_F(QualityScalingTest, NoAdaptDownForNormalQp_Vp8) { // qp_low:1, qp_high:127 -> kNormalQp test::ScopedFieldTrials field_trials(kPrefix + "1,127,0,0,0,0" + kEnd); - DownscalingObserver test("VP8", {true}, kHighStartBps, + DownscalingObserver test("VP8", /*streams_active=*/{true}, kHighStartBps, /*automatic_resize=*/true, /*expect_downscale=*/false); RunBaseTest(&test); @@ -236,7 +248,7 @@ TEST_F(QualityScalingTest, AdaptsDownForLowStartBitrate_Vp8) { // qp_low:1, qp_high:127 -> kNormalQp test::ScopedFieldTrials field_trials(kPrefix + "1,127,0,0,0,0" + kEnd); - DownscalingObserver test("VP8", {true}, kLowStartBps, + DownscalingObserver test("VP8", /*streams_active=*/{true}, kLowStartBps, /*automatic_resize=*/true, /*expect_downscale=*/true); RunBaseTest(&test); @@ -244,10 +256,42 @@ TEST_F(QualityScalingTest, AdaptsDownForLowStartBitrate_Vp8) { TEST_F(QualityScalingTest, AdaptsDownForLowStartBitrateAndThenUp) { // qp_low:127, qp_high:127 -> kLowQp - test::ScopedFieldTrials field_trials(kPrefix + "127,127,0,0,0,0" + kEnd); + test::ScopedFieldTrials field_trials( + kPrefix + "127,127,0,0,0,0" + kEnd + + "WebRTC-Video-BalancedDegradationSettings/" + "pixels:230400|921600,fps:20|30,kbps:300|500/"); // should not affect + + UpscalingObserver test("VP8", /*streams_active=*/{true}, + kDefaultVgaMinStartBps - 1, + /*automatic_resize=*/true, /*expect_upscale=*/true); + RunBaseTest(&test); +} + +TEST_F(QualityScalingTest, AdaptsDownAndThenUpWithBalanced) { + // qp_low:127, qp_high:127 -> kLowQp + test::ScopedFieldTrials field_trials( + kPrefix + "127,127,0,0,0,0" + kEnd + + "WebRTC-Video-BalancedDegradationSettings/" + "pixels:230400|921600,fps:20|30,kbps:300|499/"); - UpscalingObserver test("VP8", {true}, kDefaultVgaMinStartBps - 1, + UpscalingObserver test("VP8", /*streams_active=*/{true}, + kDefaultVgaMinStartBps - 1, /*automatic_resize=*/true, /*expect_upscale=*/true); + test.SetDegradationPreference(DegradationPreference::BALANCED); + RunBaseTest(&test); +} + +TEST_F(QualityScalingTest, AdaptsDownButNotUpWithBalancedIfBitrateNotEnough) { + // qp_low:127, qp_high:127 -> kLowQp + test::ScopedFieldTrials field_trials( + kPrefix + "127,127,0,0,0,0" + kEnd + + "WebRTC-Video-BalancedDegradationSettings/" + "pixels:230400|921600,fps:20|30,kbps:300|500/"); + + UpscalingObserver test("VP8", /*streams_active=*/{true}, + kDefaultVgaMinStartBps - 1, + /*automatic_resize=*/true, /*expect_upscale=*/false); + test.SetDegradationPreference(DegradationPreference::BALANCED); RunBaseTest(&test); } @@ -255,7 +299,7 @@ TEST_F(QualityScalingTest, NoAdaptDownForLowStartBitrate_Simulcast) { // qp_low:1, qp_high:127 -> kNormalQp test::ScopedFieldTrials field_trials(kPrefix + "1,127,0,0,0,0" + kEnd); - DownscalingObserver test("VP8", {true, true}, kLowStartBps, + DownscalingObserver test("VP8", /*streams_active=*/{true, true}, kLowStartBps, /*automatic_resize=*/false, /*expect_downscale=*/false); RunBaseTest(&test); @@ -265,7 +309,8 @@ TEST_F(QualityScalingTest, AdaptsDownForHighQp_HighestStreamActive_Vp8) { // qp_low:1, qp_high:1 -> kHighQp test::ScopedFieldTrials field_trials(kPrefix + "1,1,0,0,0,0" + kEnd); - DownscalingObserver test("VP8", {false, false, true}, kHighStartBps, + DownscalingObserver test("VP8", /*streams_active=*/{false, false, true}, + kHighStartBps, /*automatic_resize=*/true, /*expect_downscale=*/true); RunBaseTest(&test); @@ -276,7 +321,7 @@ TEST_F(QualityScalingTest, // qp_low:1, qp_high:127 -> kNormalQp test::ScopedFieldTrials field_trials(kPrefix + "1,127,0,0,0,0" + kEnd); - DownscalingObserver test("VP8", {false, false, true}, + DownscalingObserver test("VP8", /*streams_active=*/{false, false, true}, kSinglecastLimits720pVp8->min_start_bitrate_bps - 1, /*automatic_resize=*/true, /*expect_downscale=*/true); @@ -287,7 +332,7 @@ TEST_F(QualityScalingTest, AdaptsDownButNotUpWithMinStartBitrateLimit) { // qp_low:127, qp_high:127 -> kLowQp test::ScopedFieldTrials field_trials(kPrefix + "127,127,0,0,0,0" + kEnd); - UpscalingObserver test("VP8", {false, true}, + UpscalingObserver test("VP8", /*streams_active=*/{false, true}, kSinglecastLimits720pVp8->min_start_bitrate_bps - 1, /*automatic_resize=*/true, /*expect_upscale=*/false); RunBaseTest(&test); @@ -297,7 +342,7 @@ TEST_F(QualityScalingTest, NoAdaptDownForLowStartBitrateIfBitrateEnough_Vp8) { // qp_low:1, qp_high:127 -> kNormalQp test::ScopedFieldTrials field_trials(kPrefix + "1,127,0,0,0,0" + kEnd); - DownscalingObserver test("VP8", {false, false, true}, + DownscalingObserver test("VP8", /*streams_active=*/{false, false, true}, kSinglecastLimits720pVp8->min_start_bitrate_bps, /*automatic_resize=*/true, /*expect_downscale=*/false); @@ -311,7 +356,7 @@ TEST_F(QualityScalingTest, kPrefix + "1,127,0,0,0,0" + kEnd + "WebRTC-DefaultBitrateLimitsKillSwitch/Enabled/"); - DownscalingObserver test("VP8", {false, false, true}, + DownscalingObserver test("VP8", /*streams_active=*/{false, false, true}, kSinglecastLimits720pVp8->min_start_bitrate_bps - 1, /*automatic_resize=*/true, /*expect_downscale=*/false); @@ -323,9 +368,10 @@ TEST_F(QualityScalingTest, // qp_low:1, qp_high:127 -> kNormalQp test::ScopedFieldTrials field_trials(kPrefix + "1,127,0,0,0,0" + kEnd); - DownscalingObserver test( - "VP8", {true}, kSinglecastLimits720pVp8->min_start_bitrate_bps - 1, - /*automatic_resize=*/true, /*expect_downscale=*/false); + DownscalingObserver test("VP8", /*streams_active=*/{true}, + kSinglecastLimits720pVp8->min_start_bitrate_bps - 1, + /*automatic_resize=*/true, + /*expect_downscale=*/false); RunBaseTest(&test); } @@ -333,7 +379,8 @@ TEST_F(QualityScalingTest, NoAdaptDownForHighQp_LowestStreamActive_Vp8) { // qp_low:1, qp_high:1 -> kHighQp test::ScopedFieldTrials field_trials(kPrefix + "1,1,0,0,0,0" + kEnd); - DownscalingObserver test("VP8", {true, false, false}, kHighStartBps, + DownscalingObserver test("VP8", /*streams_active=*/{true, false, false}, + kHighStartBps, /*automatic_resize=*/true, /*expect_downscale=*/false); RunBaseTest(&test); @@ -344,7 +391,8 @@ TEST_F(QualityScalingTest, // qp_low:1, qp_high:127 -> kNormalQp test::ScopedFieldTrials field_trials(kPrefix + "1,127,0,0,0,0" + kEnd); - DownscalingObserver test("VP8", {true, false, false}, kLowStartBps, + DownscalingObserver test("VP8", /*streams_active=*/{true, false, false}, + kLowStartBps, /*automatic_resize=*/true, /*expect_downscale=*/false); RunBaseTest(&test); @@ -354,7 +402,7 @@ TEST_F(QualityScalingTest, NoAdaptDownForLowStartBitrateIfScalingOff_Vp8) { // qp_low:1, qp_high:127 -> kNormalQp test::ScopedFieldTrials field_trials(kPrefix + "1,127,0,0,0,0" + kEnd); - DownscalingObserver test("VP8", {true}, kLowStartBps, + DownscalingObserver test("VP8", /*streams_active=*/{true}, kLowStartBps, /*automatic_resize=*/false, /*expect_downscale=*/false); RunBaseTest(&test); @@ -365,7 +413,7 @@ TEST_F(QualityScalingTest, AdaptsDownForHighQp_Vp9) { test::ScopedFieldTrials field_trials(kPrefix + "0,0,1,1,0,0" + kEnd + "WebRTC-VP9QualityScaler/Enabled/"); - DownscalingObserver test("VP9", {true}, kHighStartBps, + DownscalingObserver test("VP9", /*streams_active=*/{true}, kHighStartBps, /*automatic_resize=*/true, /*expect_downscale=*/true); RunBaseTest(&test); @@ -376,7 +424,7 @@ TEST_F(QualityScalingTest, NoAdaptDownForHighQpIfScalingOff_Vp9) { test::ScopedFieldTrials field_trials(kPrefix + "0,0,1,1,0,0" + kEnd + "WebRTC-VP9QualityScaler/Disabled/"); - DownscalingObserver test("VP9", {true}, kHighStartBps, + DownscalingObserver test("VP9", /*streams_active=*/{true}, kHighStartBps, /*automatic_resize=*/true, /*expect_downscale=*/false); RunBaseTest(&test); @@ -387,7 +435,7 @@ TEST_F(QualityScalingTest, AdaptsDownForLowStartBitrate_Vp9) { test::ScopedFieldTrials field_trials(kPrefix + "0,0,1,255,0,0" + kEnd + "WebRTC-VP9QualityScaler/Enabled/"); - DownscalingObserver test("VP9", {true}, kLowStartBps, + DownscalingObserver test("VP9", /*streams_active=*/{true}, kLowStartBps, /*automatic_resize=*/true, /*expect_downscale=*/true); RunBaseTest(&test); @@ -398,7 +446,8 @@ TEST_F(QualityScalingTest, NoAdaptDownForHighQp_LowestStreamActive_Vp9) { test::ScopedFieldTrials field_trials(kPrefix + "0,0,1,1,0,0" + kEnd + "WebRTC-VP9QualityScaler/Enabled/"); - DownscalingObserver test("VP9", {true, false, false}, kHighStartBps, + DownscalingObserver test("VP9", /*streams_active=*/{true, false, false}, + kHighStartBps, /*automatic_resize=*/true, /*expect_downscale=*/false); RunBaseTest(&test); @@ -410,7 +459,8 @@ TEST_F(QualityScalingTest, test::ScopedFieldTrials field_trials(kPrefix + "0,0,1,255,0,0" + kEnd + "WebRTC-VP9QualityScaler/Enabled/"); - DownscalingObserver test("VP9", {true, false, false}, kLowStartBps, + DownscalingObserver test("VP9", /*streams_active=*/{true, false, false}, + kLowStartBps, /*automatic_resize=*/true, /*expect_downscale=*/false); RunBaseTest(&test); @@ -421,7 +471,8 @@ TEST_F(QualityScalingTest, AdaptsDownForHighQp_MiddleStreamActive_Vp9) { test::ScopedFieldTrials field_trials(kPrefix + "0,0,1,1,0,0" + kEnd + "WebRTC-VP9QualityScaler/Enabled/"); - DownscalingObserver test("VP9", {false, true, false}, kHighStartBps, + DownscalingObserver test("VP9", /*streams_active=*/{false, true, false}, + kHighStartBps, /*automatic_resize=*/true, /*expect_downscale=*/true); RunBaseTest(&test); @@ -433,7 +484,7 @@ TEST_F(QualityScalingTest, test::ScopedFieldTrials field_trials(kPrefix + "0,0,1,255,0,0" + kEnd + "WebRTC-VP9QualityScaler/Enabled/"); - DownscalingObserver test("VP9", {false, true, false}, + DownscalingObserver test("VP9", /*streams_active=*/{false, true, false}, kSinglecastLimits360pVp9->min_start_bitrate_bps - 1, /*automatic_resize=*/true, /*expect_downscale=*/true); @@ -445,7 +496,7 @@ TEST_F(QualityScalingTest, NoAdaptDownForLowStartBitrateIfBitrateEnough_Vp9) { test::ScopedFieldTrials field_trials(kPrefix + "0,0,1,255,0,0" + kEnd + "WebRTC-VP9QualityScaler/Enabled/"); - DownscalingObserver test("VP9", {false, true, false}, + DownscalingObserver test("VP9", /*streams_active=*/{false, true, false}, kSinglecastLimits360pVp9->min_start_bitrate_bps, /*automatic_resize=*/true, /*expect_downscale=*/false); @@ -457,7 +508,7 @@ TEST_F(QualityScalingTest, AdaptsDownForHighQp_H264) { // qp_low:1, qp_high:1 -> kHighQp test::ScopedFieldTrials field_trials(kPrefix + "0,0,0,0,1,1" + kEnd); - DownscalingObserver test("H264", {true}, kHighStartBps, + DownscalingObserver test("H264", /*streams_active=*/{true}, kHighStartBps, /*automatic_resize=*/true, /*expect_downscale=*/true); RunBaseTest(&test); @@ -467,7 +518,7 @@ TEST_F(QualityScalingTest, AdaptsDownForLowStartBitrate_H264) { // qp_low:1, qp_high:51 -> kNormalQp test::ScopedFieldTrials field_trials(kPrefix + "0,0,0,0,1,51" + kEnd); - DownscalingObserver test("H264", {true}, kLowStartBps, + DownscalingObserver test("H264", /*streams_active=*/{true}, kLowStartBps, /*automatic_resize=*/true, /*expect_downscale=*/true); RunBaseTest(&test); diff --git a/video/receive_statistics_proxy2.cc b/video/receive_statistics_proxy2.cc index 3cce3c8ea4..af3cd221e7 100644 --- a/video/receive_statistics_proxy2.cc +++ b/video/receive_statistics_proxy2.cc @@ -946,26 +946,21 @@ void ReceiveStatisticsProxy::OnRenderedFrame( void ReceiveStatisticsProxy::OnSyncOffsetUpdated(int64_t video_playout_ntp_ms, int64_t sync_offset_ms, double estimated_freq_khz) { - RTC_DCHECK_RUN_ON(&incoming_render_queue_); - int64_t now_ms = clock_->TimeInMilliseconds(); - worker_thread_->PostTask( - ToQueuedTask(task_safety_, [video_playout_ntp_ms, sync_offset_ms, - estimated_freq_khz, now_ms, this]() { - RTC_DCHECK_RUN_ON(&main_thread_); - sync_offset_counter_.Add(std::abs(sync_offset_ms)); - stats_.sync_offset_ms = sync_offset_ms; - last_estimated_playout_ntp_timestamp_ms_ = video_playout_ntp_ms; - last_estimated_playout_time_ms_ = now_ms; - - const double kMaxFreqKhz = 10000.0; - int offset_khz = kMaxFreqKhz; - // Should not be zero or negative. If so, report max. - if (estimated_freq_khz < kMaxFreqKhz && estimated_freq_khz > 0.0) - offset_khz = - static_cast(std::fabs(estimated_freq_khz - 90.0) + 0.5); - - freq_offset_counter_.Add(offset_khz); - })); + RTC_DCHECK_RUN_ON(&main_thread_); + + const int64_t now_ms = clock_->TimeInMilliseconds(); + sync_offset_counter_.Add(std::abs(sync_offset_ms)); + stats_.sync_offset_ms = sync_offset_ms; + last_estimated_playout_ntp_timestamp_ms_ = video_playout_ntp_ms; + last_estimated_playout_time_ms_ = now_ms; + + const double kMaxFreqKhz = 10000.0; + int offset_khz = kMaxFreqKhz; + // Should not be zero or negative. If so, report max. + if (estimated_freq_khz < kMaxFreqKhz && estimated_freq_khz > 0.0) + offset_khz = static_cast(std::fabs(estimated_freq_khz - 90.0) + 0.5); + + freq_offset_counter_.Add(offset_khz); } void ReceiveStatisticsProxy::OnCompleteFrame(bool is_keyframe, diff --git a/video/rtp_video_stream_receiver.cc b/video/rtp_video_stream_receiver.cc index 7286a3ba4f..a0520cd350 100644 --- a/video/rtp_video_stream_receiver.cc +++ b/video/rtp_video_stream_receiver.cc @@ -272,6 +272,7 @@ RtpVideoStreamReceiver::RtpVideoStreamReceiver( // directly with |rtp_rtcp_|. rtcp_feedback_buffer_(this, nack_sender, this), packet_buffer_(kPacketBufferStartSize, PacketBufferMaxSize()), + reference_finder_(std::make_unique()), has_received_frame_(false), frames_decryptable_(false), absolute_capture_time_interpolator_(clock) { @@ -321,8 +322,6 @@ RtpVideoStreamReceiver::RtpVideoStreamReceiver( process_thread_->RegisterModule(nack_module_.get(), RTC_FROM_HERE); } - reference_finder_ = std::make_unique(this); - // Only construct the encrypted receiver if frame encryption is enabled. if (config_.crypto_options.sframe.require_frame_encryption) { buffered_frame_decryptor_ = @@ -886,7 +885,6 @@ void RtpVideoStreamReceiver::OnAssembledFrame( // start from the |last_completed_picture_id_| and add an offset in // case of reordering. reference_finder_ = std::make_unique( - this, last_completed_picture_id_ + std::numeric_limits::max()); current_codec_ = frame->codec_type(); } else { @@ -908,26 +906,30 @@ void RtpVideoStreamReceiver::OnAssembledFrame( } else if (frame_transformer_delegate_) { frame_transformer_delegate_->TransformFrame(std::move(frame)); } else { - reference_finder_->ManageFrame(std::move(frame)); + OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame))); } } -void RtpVideoStreamReceiver::OnCompleteFrame( - std::unique_ptr frame) { +void RtpVideoStreamReceiver::OnCompleteFrames( + RtpFrameReferenceFinder::ReturnVector frames) { { MutexLock lock(&last_seq_num_mutex_); - RtpFrameObject* rtp_frame = static_cast(frame.get()); - last_seq_num_for_pic_id_[rtp_frame->Id()] = rtp_frame->last_seq_num(); + for (const auto& frame : frames) { + RtpFrameObject* rtp_frame = static_cast(frame.get()); + last_seq_num_for_pic_id_[rtp_frame->Id()] = rtp_frame->last_seq_num(); + } + } + for (auto& frame : frames) { + last_completed_picture_id_ = + std::max(last_completed_picture_id_, frame->Id()); + complete_frame_callback_->OnCompleteFrame(std::move(frame)); } - last_completed_picture_id_ = - std::max(last_completed_picture_id_, frame->Id()); - complete_frame_callback_->OnCompleteFrame(std::move(frame)); } void RtpVideoStreamReceiver::OnDecryptedFrame( std::unique_ptr frame) { MutexLock lock(&reference_finder_lock_); - reference_finder_->ManageFrame(std::move(frame)); + OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame))); } void RtpVideoStreamReceiver::OnDecryptionStatusChange( @@ -1003,7 +1005,7 @@ void RtpVideoStreamReceiver::RemoveSecondarySink( void RtpVideoStreamReceiver::ManageFrame( std::unique_ptr frame) { MutexLock lock(&reference_finder_lock_); - reference_finder_->ManageFrame(std::move(frame)); + OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame))); } void RtpVideoStreamReceiver::ReceivePacket(const RtpPacketReceived& packet) { @@ -1058,7 +1060,7 @@ void RtpVideoStreamReceiver::ParseAndHandleEncapsulatingHeader( void RtpVideoStreamReceiver::NotifyReceiverOfEmptyPacket(uint16_t seq_num) { { MutexLock lock(&reference_finder_lock_); - reference_finder_->PaddingReceived(seq_num); + OnCompleteFrames(reference_finder_->PaddingReceived(seq_num)); } video_coding::PacketBuffer::InsertResult insert_result; diff --git a/video/rtp_video_stream_receiver.h b/video/rtp_video_stream_receiver.h index a302e1e77f..b3d62f34a4 100644 --- a/video/rtp_video_stream_receiver.h +++ b/video/rtp_video_stream_receiver.h @@ -69,11 +69,18 @@ class RtpVideoStreamReceiver : public LossNotificationSender, public RecoveredPacketReceiver, public RtpPacketSinkInterface, public KeyFrameRequestSender, - public OnCompleteFrameCallback, public OnDecryptedFrameCallback, public OnDecryptionStatusChangeCallback, public RtpVideoFrameReceiver { public: + // A complete frame is a frame which has received all its packets and all its + // references are known. + class OnCompleteFrameCallback { + public: + virtual ~OnCompleteFrameCallback() {} + virtual void OnCompleteFrame(std::unique_ptr frame) = 0; + }; + // DEPRECATED due to dependency on ReceiveStatisticsProxy. RtpVideoStreamReceiver( Clock* clock, @@ -173,8 +180,7 @@ class RtpVideoStreamReceiver : public LossNotificationSender, // Don't use, still experimental. void RequestPacketRetransmit(const std::vector& sequence_numbers); - // Implements OnCompleteFrameCallback. - void OnCompleteFrame(std::unique_ptr frame) override; + void OnCompleteFrames(RtpFrameReferenceFinder::ReturnVector frames); // Implements OnDecryptedFrameCallback. void OnDecryptedFrame(std::unique_ptr frame) override; diff --git a/video/rtp_video_stream_receiver2.cc b/video/rtp_video_stream_receiver2.cc index a45dbeb2ff..4b43247b18 100644 --- a/video/rtp_video_stream_receiver2.cc +++ b/video/rtp_video_stream_receiver2.cc @@ -36,7 +36,6 @@ #include "modules/rtp_rtcp/source/rtp_rtcp_config.h" #include "modules/rtp_rtcp/source/video_rtp_depacketizer.h" #include "modules/rtp_rtcp/source/video_rtp_depacketizer_raw.h" -#include "modules/utility/include/process_thread.h" #include "modules/video_coding/frame_object.h" #include "modules/video_coding/h264_sprop_parameter_sets.h" #include "modules/video_coding/h264_sps_pps_tracker.h" @@ -49,7 +48,6 @@ #include "system_wrappers/include/field_trial.h" #include "system_wrappers/include/metrics.h" #include "system_wrappers/include/ntp_time.h" -#include "video/receive_statistics_proxy2.h" namespace webrtc { @@ -134,17 +132,18 @@ RtpVideoStreamReceiver2::RtcpFeedbackBuffer::RtcpFeedbackBuffer( RTC_DCHECK(key_frame_request_sender_); RTC_DCHECK(nack_sender_); RTC_DCHECK(loss_notification_sender_); + packet_sequence_checker_.Detach(); } void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::RequestKeyFrame() { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); request_key_frame_ = true; } void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::SendNack( const std::vector& sequence_numbers, bool buffering_allowed) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); RTC_DCHECK(!sequence_numbers.empty()); nack_sequence_numbers_.insert(nack_sequence_numbers_.end(), sequence_numbers.cbegin(), @@ -161,7 +160,7 @@ void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::SendLossNotification( uint16_t last_received_seq_num, bool decodability_flag, bool buffering_allowed) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); RTC_DCHECK(buffering_allowed); RTC_DCHECK(!lntf_state_) << "SendLossNotification() called twice in a row with no call to " @@ -171,7 +170,7 @@ void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::SendLossNotification( } void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::SendBufferedRtcpFeedback() { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); bool request_key_frame = false; std::vector nack_sequence_numbers; @@ -211,7 +210,6 @@ RtpVideoStreamReceiver2::RtpVideoStreamReceiver2( ReceiveStatistics* rtp_receive_statistics, RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer, RtcpCnameCallback* rtcp_cname_callback, - ProcessThread* process_thread, NackSender* nack_sender, KeyFrameRequestSender* keyframe_request_sender, OnCompleteFrameCallback* complete_frame_callback, @@ -220,7 +218,6 @@ RtpVideoStreamReceiver2::RtpVideoStreamReceiver2( : clock_(clock), config_(*config), packet_router_(packet_router), - process_thread_(process_thread), ntp_estimator_(clock), rtp_header_extensions_(config_.rtp.extensions), forced_playout_delay_max_ms_("max_ms", absl::nullopt), @@ -251,9 +248,11 @@ RtpVideoStreamReceiver2::RtpVideoStreamReceiver2( &rtcp_feedback_buffer_, &rtcp_feedback_buffer_)), packet_buffer_(kPacketBufferStartSize, PacketBufferMaxSize()), + reference_finder_(std::make_unique()), has_received_frame_(false), frames_decryptable_(false), absolute_capture_time_interpolator_(clock) { + packet_sequence_checker_.Detach(); constexpr bool remb_candidate = true; if (packet_router_) packet_router_->AddReceiveRtpModule(rtp_rtcp_.get(), remb_candidate); @@ -287,16 +286,12 @@ RtpVideoStreamReceiver2::RtpVideoStreamReceiver2( {&forced_playout_delay_max_ms_, &forced_playout_delay_min_ms_}, field_trial::FindFullName("WebRTC-ForcePlayoutDelay")); - process_thread_->RegisterModule(rtp_rtcp_.get(), RTC_FROM_HERE); - if (config_.rtp.lntf.enabled) { loss_notification_controller_ = std::make_unique(&rtcp_feedback_buffer_, &rtcp_feedback_buffer_); } - reference_finder_ = std::make_unique(this); - // Only construct the encrypted receiver if frame encryption is enabled. if (config_.crypto_options.sframe.require_frame_encryption) { buffered_frame_decryptor_ = @@ -316,8 +311,6 @@ RtpVideoStreamReceiver2::RtpVideoStreamReceiver2( } RtpVideoStreamReceiver2::~RtpVideoStreamReceiver2() { - process_thread_->DeRegisterModule(rtp_rtcp_.get()); - if (packet_router_) packet_router_->RemoveReceiveRtpModule(rtp_rtcp_.get()); UpdateHistograms(); @@ -330,7 +323,7 @@ void RtpVideoStreamReceiver2::AddReceiveCodec( const VideoCodec& video_codec, const std::map& codec_params, bool raw_payload) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); if (codec_params.count(cricket::kH264FmtpSpsPpsIdrInKeyframe) || field_trial::IsEnabled("WebRTC-SpsPpsIdrIsH264Keyframe")) { packet_buffer_.ForceSpsPpsIdrIsH264Keyframe(); @@ -343,7 +336,7 @@ void RtpVideoStreamReceiver2::AddReceiveCodec( } absl::optional RtpVideoStreamReceiver2::GetSyncInfo() const { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); Syncable::Info info; if (rtp_rtcp_->RemoteNTP(&info.capture_time_ntp_secs, &info.capture_time_ntp_frac, @@ -363,6 +356,7 @@ absl::optional RtpVideoStreamReceiver2::GetSyncInfo() const { return info; } +// RTC_RUN_ON(packet_sequence_checker_) RtpVideoStreamReceiver2::ParseGenericDependenciesResult RtpVideoStreamReceiver2::ParseGenericDependenciesExtension( const RtpPacketReceived& rtp_packet, @@ -472,7 +466,7 @@ void RtpVideoStreamReceiver2::OnReceivedPayloadData( rtc::CopyOnWriteBuffer codec_payload, const RtpPacketReceived& rtp_packet, const RTPVideoHeader& video) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); auto packet = std::make_unique(rtp_packet, video); @@ -630,6 +624,8 @@ void RtpVideoStreamReceiver2::OnReceivedPayloadData( void RtpVideoStreamReceiver2::OnRecoveredPacket(const uint8_t* rtp_packet, size_t rtp_packet_length) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + RtpPacketReceived packet; if (!packet.Parse(rtp_packet, rtp_packet_length)) return; @@ -652,11 +648,10 @@ void RtpVideoStreamReceiver2::OnRecoveredPacket(const uint8_t* rtp_packet, // This method handles both regular RTP packets and packets recovered // via FlexFEC. void RtpVideoStreamReceiver2::OnRtpPacket(const RtpPacketReceived& packet) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); - if (!receiving_) { + if (!receiving_) return; - } ReceivePacket(packet); @@ -713,6 +708,7 @@ bool RtpVideoStreamReceiver2::IsDecryptable() const { return frames_decryptable_; } +// RTC_RUN_ON(packet_sequence_checker_) void RtpVideoStreamReceiver2::OnInsertedPacket( video_coding::PacketBuffer::InsertResult result) { RTC_DCHECK_RUN_ON(&worker_task_checker_); @@ -790,9 +786,9 @@ void RtpVideoStreamReceiver2::OnInsertedPacket( } } +// RTC_RUN_ON(packet_sequence_checker_) void RtpVideoStreamReceiver2::OnAssembledFrame( std::unique_ptr frame) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); RTC_DCHECK(frame); const absl::optional& descriptor = @@ -832,7 +828,6 @@ void RtpVideoStreamReceiver2::OnAssembledFrame( // start from the |last_completed_picture_id_| and add an offset in case // of reordering. reference_finder_ = std::make_unique( - this, last_completed_picture_id_ + std::numeric_limits::max()); current_codec_ = frame->codec_type(); } else { @@ -854,25 +849,27 @@ void RtpVideoStreamReceiver2::OnAssembledFrame( } else if (frame_transformer_delegate_) { frame_transformer_delegate_->TransformFrame(std::move(frame)); } else { - reference_finder_->ManageFrame(std::move(frame)); + OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame))); } } -void RtpVideoStreamReceiver2::OnCompleteFrame( - std::unique_ptr frame) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); - RtpFrameObject* rtp_frame = static_cast(frame.get()); - last_seq_num_for_pic_id_[rtp_frame->Id()] = rtp_frame->last_seq_num(); +// RTC_RUN_ON(packet_sequence_checker_) +void RtpVideoStreamReceiver2::OnCompleteFrames( + RtpFrameReferenceFinder::ReturnVector frames) { + for (auto& frame : frames) { + RtpFrameObject* rtp_frame = static_cast(frame.get()); + last_seq_num_for_pic_id_[rtp_frame->Id()] = rtp_frame->last_seq_num(); - last_completed_picture_id_ = - std::max(last_completed_picture_id_, frame->Id()); - complete_frame_callback_->OnCompleteFrame(std::move(frame)); + last_completed_picture_id_ = + std::max(last_completed_picture_id_, frame->Id()); + complete_frame_callback_->OnCompleteFrame(std::move(frame)); + } } void RtpVideoStreamReceiver2::OnDecryptedFrame( std::unique_ptr frame) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); - reference_finder_->ManageFrame(std::move(frame)); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame))); } void RtpVideoStreamReceiver2::OnDecryptionStatusChange( @@ -886,7 +883,9 @@ void RtpVideoStreamReceiver2::OnDecryptionStatusChange( void RtpVideoStreamReceiver2::SetFrameDecryptor( rtc::scoped_refptr frame_decryptor) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + // TODO(bugs.webrtc.org/11993): Update callers or post the operation over to + // the network thread. + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); if (buffered_frame_decryptor_ == nullptr) { buffered_frame_decryptor_ = std::make_unique(this, this); @@ -911,7 +910,7 @@ void RtpVideoStreamReceiver2::UpdateRtt(int64_t max_rtt_ms) { } absl::optional RtpVideoStreamReceiver2::LastReceivedPacketMs() const { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); if (last_received_rtp_system_time_) { return absl::optional(last_received_rtp_system_time_->ms()); } @@ -920,7 +919,7 @@ absl::optional RtpVideoStreamReceiver2::LastReceivedPacketMs() const { absl::optional RtpVideoStreamReceiver2::LastReceivedKeyframePacketMs() const { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); if (last_received_keyframe_rtp_system_time_) { return absl::optional( last_received_keyframe_rtp_system_time_->ms()); @@ -930,10 +929,11 @@ absl::optional RtpVideoStreamReceiver2::LastReceivedKeyframePacketMs() void RtpVideoStreamReceiver2::ManageFrame( std::unique_ptr frame) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); - reference_finder_->ManageFrame(std::move(frame)); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame))); } +// RTC_RUN_ON(packet_sequence_checker_) void RtpVideoStreamReceiver2::ReceivePacket(const RtpPacketReceived& packet) { RTC_DCHECK_RUN_ON(&worker_task_checker_); if (packet.payload_size() == 0) { @@ -963,9 +963,9 @@ void RtpVideoStreamReceiver2::ReceivePacket(const RtpPacketReceived& packet) { parsed_payload->video_header); } +// RTC_RUN_ON(packet_sequence_checker_) void RtpVideoStreamReceiver2::ParseAndHandleEncapsulatingHeader( const RtpPacketReceived& packet) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); if (packet.PayloadType() == config_.rtp.red_payload_type && packet.payload_size() > 0) { if (packet.payload()[0] == config_.rtp.ulpfec_payload_type) { @@ -984,10 +984,11 @@ void RtpVideoStreamReceiver2::ParseAndHandleEncapsulatingHeader( // In the case of a video stream without picture ids and no rtx the // RtpFrameReferenceFinder will need to know about padding to // correctly calculate frame references. +// RTC_RUN_ON(packet_sequence_checker_) void RtpVideoStreamReceiver2::NotifyReceiverOfEmptyPacket(uint16_t seq_num) { RTC_DCHECK_RUN_ON(&worker_task_checker_); - reference_finder_->PaddingReceived(seq_num); + OnCompleteFrames(reference_finder_->PaddingReceived(seq_num)); OnInsertedPacket(packet_buffer_.InsertPadding(seq_num)); if (nack_module_) { @@ -1003,7 +1004,7 @@ void RtpVideoStreamReceiver2::NotifyReceiverOfEmptyPacket(uint16_t seq_num) { bool RtpVideoStreamReceiver2::DeliverRtcp(const uint8_t* rtcp_packet, size_t rtcp_packet_length) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); if (!receiving_) { return false; @@ -1045,7 +1046,7 @@ bool RtpVideoStreamReceiver2::DeliverRtcp(const uint8_t* rtcp_packet, } void RtpVideoStreamReceiver2::FrameContinuous(int64_t picture_id) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); if (!nack_module_) return; @@ -1058,8 +1059,7 @@ void RtpVideoStreamReceiver2::FrameContinuous(int64_t picture_id) { } void RtpVideoStreamReceiver2::FrameDecoded(int64_t picture_id) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); - // Running on the decoder thread. + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); int seq_num = -1; auto seq_num_it = last_seq_num_for_pic_id_.find(picture_id); if (seq_num_it != last_seq_num_for_pic_id_.end()) { @@ -1084,12 +1084,12 @@ void RtpVideoStreamReceiver2::SignalNetworkState(NetworkState state) { } void RtpVideoStreamReceiver2::StartReceive() { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); receiving_ = true; } void RtpVideoStreamReceiver2::StopReceive() { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); receiving_ = false; } @@ -1120,6 +1120,7 @@ void RtpVideoStreamReceiver2::UpdateHistograms() { } } +// RTC_RUN_ON(packet_sequence_checker_) void RtpVideoStreamReceiver2::InsertSpsPpsIntoTracker(uint8_t payload_type) { RTC_DCHECK_RUN_ON(&worker_task_checker_); diff --git a/video/rtp_video_stream_receiver2.h b/video/rtp_video_stream_receiver2.h index f8d5a26b65..ddff26b3bd 100644 --- a/video/rtp_video_stream_receiver2.h +++ b/video/rtp_video_stream_receiver2.h @@ -54,7 +54,6 @@ namespace webrtc { class NackModule2; class PacketRouter; -class ProcessThread; class ReceiveStatistics; class RtcpRttStats; class RtpPacketReceived; @@ -65,11 +64,18 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender, public RecoveredPacketReceiver, public RtpPacketSinkInterface, public KeyFrameRequestSender, - public OnCompleteFrameCallback, public OnDecryptedFrameCallback, public OnDecryptionStatusChangeCallback, public RtpVideoFrameReceiver { public: + // A complete frame is a frame which has received all its packets and all its + // references are known. + class OnCompleteFrameCallback { + public: + virtual ~OnCompleteFrameCallback() {} + virtual void OnCompleteFrame(std::unique_ptr frame) = 0; + }; + RtpVideoStreamReceiver2( TaskQueueBase* current_queue, Clock* clock, @@ -83,7 +89,6 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender, ReceiveStatistics* rtp_receive_statistics, RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer, RtcpCnameCallback* rtcp_cname_callback, - ProcessThread* process_thread, NackSender* nack_sender, // The KeyFrameRequestSender is optional; if not provided, key frame // requests are sent via the internal RtpRtcp module. @@ -114,7 +119,7 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender, // Returns number of different frames seen. int GetUniqueFramesSeen() const { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); return frame_counter_.GetUniqueSeen(); } @@ -146,12 +151,12 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender, // Decryption not SRTP. bool IsDecryptable() const; - // Don't use, still experimental. + // Request packet retransmits via NACK. Called via + // VideoReceiveStream2::SendNack, which gets called when + // RtpVideoStreamReceiver2::RtcpFeedbackBuffer's SendNack and + // SendBufferedRtcpFeedback methods (see `rtcp_feedback_buffer_` below). void RequestPacketRetransmit(const std::vector& sequence_numbers); - // Implements OnCompleteFrameCallback. - void OnCompleteFrame(std::unique_ptr frame) override; - // Implements OnDecryptedFrameCallback. void OnDecryptedFrame(std::unique_ptr frame) override; @@ -179,6 +184,9 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender, // Implements RtpVideoFrameReceiver. void ManageFrame(std::unique_ptr frame) override; + void OnCompleteFrames(RtpFrameReferenceFinder::ReturnVector frame) + RTC_RUN_ON(packet_sequence_checker_); + // Used for buffering RTCP feedback messages and sending them all together. // Note: // 1. Key frame requests and NACKs are mutually exclusive, with the @@ -226,20 +234,20 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender, bool decodability_flag; }; - RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_task_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_; KeyFrameRequestSender* const key_frame_request_sender_; NackSender* const nack_sender_; LossNotificationSender* const loss_notification_sender_; // Key-frame-request-related state. - bool request_key_frame_ RTC_GUARDED_BY(worker_task_checker_); + bool request_key_frame_ RTC_GUARDED_BY(packet_sequence_checker_); // NACK-related state. std::vector nack_sequence_numbers_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); absl::optional lntf_state_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); }; enum ParseGenericDependenciesResult { kDropPacket, @@ -249,28 +257,34 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender, // Entry point doing non-stats work for a received packet. Called // for the same packet both before and after RED decapsulation. - void ReceivePacket(const RtpPacketReceived& packet); + void ReceivePacket(const RtpPacketReceived& packet) + RTC_RUN_ON(packet_sequence_checker_); + // Parses and handles RED headers. // This function assumes that it's being called from only one thread. - void ParseAndHandleEncapsulatingHeader(const RtpPacketReceived& packet); - void NotifyReceiverOfEmptyPacket(uint16_t seq_num); + void ParseAndHandleEncapsulatingHeader(const RtpPacketReceived& packet) + RTC_RUN_ON(packet_sequence_checker_); + void NotifyReceiverOfEmptyPacket(uint16_t seq_num) + RTC_RUN_ON(packet_sequence_checker_); void UpdateHistograms(); bool IsRedEnabled() const; - void InsertSpsPpsIntoTracker(uint8_t payload_type); - void OnInsertedPacket(video_coding::PacketBuffer::InsertResult result); + void InsertSpsPpsIntoTracker(uint8_t payload_type) + RTC_RUN_ON(packet_sequence_checker_); + void OnInsertedPacket(video_coding::PacketBuffer::InsertResult result) + RTC_RUN_ON(packet_sequence_checker_); ParseGenericDependenciesResult ParseGenericDependenciesExtension( const RtpPacketReceived& rtp_packet, - RTPVideoHeader* video_header) RTC_RUN_ON(worker_task_checker_); - void OnAssembledFrame(std::unique_ptr frame); + RTPVideoHeader* video_header) RTC_RUN_ON(packet_sequence_checker_); + void OnAssembledFrame(std::unique_ptr frame) + RTC_RUN_ON(packet_sequence_checker_); void UpdatePacketReceiveTimestamps(const RtpPacketReceived& packet, bool is_keyframe) - RTC_RUN_ON(worker_task_checker_); + RTC_RUN_ON(packet_sequence_checker_); Clock* const clock_; // Ownership of this object lies with VideoReceiveStream, which owns |this|. const VideoReceiveStream::Config& config_; PacketRouter* const packet_router_; - ProcessThread* const process_thread_; RemoteNtpTimeEstimator ntp_estimator_; @@ -283,8 +297,16 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender, std::unique_ptr ulpfec_receiver_; RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_task_checker_; - bool receiving_ RTC_GUARDED_BY(worker_task_checker_); - int64_t last_packet_log_ms_ RTC_GUARDED_BY(worker_task_checker_); + // TODO(bugs.webrtc.org/11993): This checker conceptually represents + // operations that belong to the network thread. The Call class is currently + // moving towards handling network packets on the network thread and while + // that work is ongoing, this checker may in practice represent the worker + // thread, but still serves as a mechanism of grouping together concepts + // that belong to the network thread. Once the packets are fully delivered + // on the network thread, this comment will be deleted. + RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_; + bool receiving_ RTC_GUARDED_BY(packet_sequence_checker_); + int64_t last_packet_log_ms_ RTC_GUARDED_BY(packet_sequence_checker_); const std::unique_ptr rtp_rtcp_; @@ -296,66 +318,68 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender, std::unique_ptr loss_notification_controller_; video_coding::PacketBuffer packet_buffer_ - RTC_GUARDED_BY(worker_task_checker_); - UniqueTimestampCounter frame_counter_ RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); + UniqueTimestampCounter frame_counter_ + RTC_GUARDED_BY(packet_sequence_checker_); SeqNumUnwrapper frame_id_unwrapper_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); // Video structure provided in the dependency descriptor in a first packet // of a key frame. It is required to parse dependency descriptor in the // following delta packets. std::unique_ptr video_structure_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); // Frame id of the last frame with the attached video structure. // absl::nullopt when `video_structure_ == nullptr`; absl::optional video_structure_frame_id_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); std::unique_ptr reference_finder_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); absl::optional current_codec_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); uint32_t last_assembled_frame_rtp_timestamp_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); std::map last_seq_num_for_pic_id_ - RTC_GUARDED_BY(worker_task_checker_); - video_coding::H264SpsPpsTracker tracker_ RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); + video_coding::H264SpsPpsTracker tracker_ + RTC_GUARDED_BY(packet_sequence_checker_); // Maps payload id to the depacketizer. std::map> payload_type_map_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); // TODO(johan): Remove pt_codec_params_ once // https://bugs.chromium.org/p/webrtc/issues/detail?id=6883 is resolved. // Maps a payload type to a map of out-of-band supplied codec parameters. std::map> pt_codec_params_ - RTC_GUARDED_BY(worker_task_checker_); - int16_t last_payload_type_ RTC_GUARDED_BY(worker_task_checker_) = -1; + RTC_GUARDED_BY(packet_sequence_checker_); + int16_t last_payload_type_ RTC_GUARDED_BY(packet_sequence_checker_) = -1; - bool has_received_frame_ RTC_GUARDED_BY(worker_task_checker_); + bool has_received_frame_ RTC_GUARDED_BY(packet_sequence_checker_); absl::optional last_received_rtp_timestamp_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); absl::optional last_received_keyframe_rtp_timestamp_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); absl::optional last_received_rtp_system_time_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); absl::optional last_received_keyframe_rtp_system_time_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); // Handles incoming encrypted frames and forwards them to the // rtp_reference_finder if they are decryptable. std::unique_ptr buffered_frame_decryptor_ - RTC_PT_GUARDED_BY(worker_task_checker_); + RTC_PT_GUARDED_BY(packet_sequence_checker_); bool frames_decryptable_ RTC_GUARDED_BY(worker_task_checker_); absl::optional last_color_space_; AbsoluteCaptureTimeInterpolator absolute_capture_time_interpolator_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); CaptureClockOffsetUpdater capture_clock_offset_updater_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); int64_t last_completed_picture_id_ = 0; @@ -363,9 +387,9 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender, frame_transformer_delegate_; SeqNumUnwrapper rtp_seq_num_unwrapper_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); std::map packet_infos_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); }; } // namespace webrtc diff --git a/video/rtp_video_stream_receiver2_unittest.cc b/video/rtp_video_stream_receiver2_unittest.cc index 9ade57d81b..7ccf0a5faa 100644 --- a/video/rtp_video_stream_receiver2_unittest.cc +++ b/video/rtp_video_stream_receiver2_unittest.cc @@ -13,6 +13,7 @@ #include #include +#include "api/task_queue/task_queue_base.h" #include "api/video/video_codec_type.h" #include "api/video/video_frame_type.h" #include "common_video/h264/h264_common.h" @@ -38,6 +39,7 @@ #include "test/gtest.h" #include "test/mock_frame_transformer.h" #include "test/time_controller/simulated_task_queue.h" +#include "test/time_controller/simulated_time_controller.h" using ::testing::_; using ::testing::ElementsAre; @@ -94,7 +96,8 @@ class MockKeyFrameRequestSender : public KeyFrameRequestSender { MOCK_METHOD(void, RequestKeyFrame, (), (override)); }; -class MockOnCompleteFrameCallback : public OnCompleteFrameCallback { +class MockOnCompleteFrameCallback + : public RtpVideoStreamReceiver2::OnCompleteFrameCallback { public: MOCK_METHOD(void, DoOnCompleteFrame, (EncodedFrame*), ()); MOCK_METHOD(void, DoOnCompleteFrameFailNullptr, (EncodedFrame*), ()); @@ -157,17 +160,20 @@ class RtpVideoStreamReceiver2Test : public ::testing::Test, public: RtpVideoStreamReceiver2Test() : RtpVideoStreamReceiver2Test("") {} explicit RtpVideoStreamReceiver2Test(std::string field_trials) - : override_field_trials_(field_trials), - config_(CreateConfig()), - process_thread_(ProcessThread::Create("TestThread")) { + : time_controller_(Timestamp::Millis(100)), + task_queue_(time_controller_.GetTaskQueueFactory()->CreateTaskQueue( + "RtpVideoStreamReceiver2Test", + TaskQueueFactory::Priority::NORMAL)), + task_queue_setter_(task_queue_.get()), + override_field_trials_(field_trials), + config_(CreateConfig()) { rtp_receive_statistics_ = ReceiveStatistics::Create(Clock::GetRealTimeClock()); rtp_video_stream_receiver_ = std::make_unique( TaskQueueBase::Current(), Clock::GetRealTimeClock(), &mock_transport_, nullptr, nullptr, &config_, rtp_receive_statistics_.get(), nullptr, - nullptr, process_thread_.get(), &mock_nack_sender_, - &mock_key_frame_request_sender_, &mock_on_complete_frame_callback_, - nullptr, nullptr); + nullptr, &mock_nack_sender_, &mock_key_frame_request_sender_, + &mock_on_complete_frame_callback_, nullptr, nullptr); VideoCodec codec; codec.codecType = kVideoCodecGeneric; rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, codec, {}, @@ -232,8 +238,9 @@ class RtpVideoStreamReceiver2Test : public ::testing::Test, return config; } - TokenTaskQueue task_queue_; - TokenTaskQueue::CurrentTaskQueueSetter task_queue_setter_{&task_queue_}; + GlobalSimulatedTimeController time_controller_; + std::unique_ptr task_queue_; + TokenTaskQueue::CurrentTaskQueueSetter task_queue_setter_; const webrtc::test::ScopedFieldTrials override_field_trials_; VideoReceiveStream::Config config_; @@ -241,7 +248,6 @@ class RtpVideoStreamReceiver2Test : public ::testing::Test, MockKeyFrameRequestSender mock_key_frame_request_sender_; MockTransport mock_transport_; MockOnCompleteFrameCallback mock_on_complete_frame_callback_; - std::unique_ptr process_thread_; std::unique_ptr rtp_receive_statistics_; std::unique_ptr rtp_video_stream_receiver_; RtpPacketSinkInterface* test_packet_sink_ = nullptr; @@ -1126,8 +1132,8 @@ TEST_F(RtpVideoStreamReceiver2Test, TransformFrame) { auto receiver = std::make_unique( TaskQueueBase::Current(), Clock::GetRealTimeClock(), &mock_transport_, nullptr, nullptr, &config_, rtp_receive_statistics_.get(), nullptr, - nullptr, process_thread_.get(), &mock_nack_sender_, nullptr, - &mock_on_complete_frame_callback_, nullptr, mock_frame_transformer); + nullptr, &mock_nack_sender_, nullptr, &mock_on_complete_frame_callback_, + nullptr, mock_frame_transformer); VideoCodec video_codec; video_codec.codecType = kVideoCodecGeneric; receiver->AddReceiveCodec(kPayloadType, video_codec, {}, diff --git a/video/rtp_video_stream_receiver_unittest.cc b/video/rtp_video_stream_receiver_unittest.cc index 5a79b2a986..765e1e1716 100644 --- a/video/rtp_video_stream_receiver_unittest.cc +++ b/video/rtp_video_stream_receiver_unittest.cc @@ -93,7 +93,8 @@ class MockKeyFrameRequestSender : public KeyFrameRequestSender { MOCK_METHOD(void, RequestKeyFrame, (), (override)); }; -class MockOnCompleteFrameCallback : public OnCompleteFrameCallback { +class MockOnCompleteFrameCallback + : public RtpVideoStreamReceiver::OnCompleteFrameCallback { public: MOCK_METHOD(void, DoOnCompleteFrame, (EncodedFrame*), ()); MOCK_METHOD(void, DoOnCompleteFrameFailNullptr, (EncodedFrame*), ()); diff --git a/video/video_analyzer.cc b/video/video_analyzer.cc index 81dcf055b8..b90ba2973a 100644 --- a/video/video_analyzer.cc +++ b/video/video_analyzer.cc @@ -18,6 +18,7 @@ #include "common_video/libyuv/include/webrtc_libyuv.h" #include "modules/rtp_rtcp/source/create_video_rtp_depacketizer.h" #include "modules/rtp_rtcp/source/rtp_packet.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "rtc_base/cpu_time.h" #include "rtc_base/format_macros.h" #include "rtc_base/memory_usage.h" @@ -212,7 +213,7 @@ PacketReceiver::DeliveryStatus VideoAnalyzer::DeliverPacket( int64_t packet_time_us) { // Ignore timestamps of RTCP packets. They're not synchronized with // RTP packet timestamps and so they would confuse wrap_handler_. - if (RtpHeaderParser::IsRtcp(packet.cdata(), packet.size())) { + if (IsRtcpPacket(packet)) { return receiver_->DeliverPacket(media_type, std::move(packet), packet_time_us); } @@ -600,7 +601,7 @@ bool VideoAnalyzer::AllFramesRecordedLocked() { bool VideoAnalyzer::FrameProcessed() { MutexLock lock(&comparison_lock_); ++frames_processed_; - assert(frames_processed_ <= frames_to_process_); + RTC_DCHECK_LE(frames_processed_, frames_to_process_); return frames_processed_ == frames_to_process_ || (clock_->CurrentTime() > test_end_ && comparisons_.empty()); } diff --git a/video/video_quality_test.cc b/video/video_quality_test.cc index b87957f1c6..b77a4759a2 100644 --- a/video/video_quality_test.cc +++ b/video/video_quality_test.cc @@ -925,13 +925,13 @@ void VideoQualityTest::SetupVideo(Transport* send_transport, } CreateMatchingFecConfig(recv_transport, *GetVideoSendConfig()); - GetFlexFecConfig()->transport_cc = params_.call.send_side_bwe; + GetFlexFecConfig()->rtp.transport_cc = params_.call.send_side_bwe; if (params_.call.send_side_bwe) { - GetFlexFecConfig()->rtp_header_extensions.push_back( + GetFlexFecConfig()->rtp.extensions.push_back( RtpExtension(RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberExtensionId)); } else { - GetFlexFecConfig()->rtp_header_extensions.push_back( + GetFlexFecConfig()->rtp.extensions.push_back( RtpExtension(RtpExtension::kAbsSendTimeUri, kAbsSendTimeExtensionId)); } } diff --git a/video/video_receive_stream.cc b/video/video_receive_stream.cc index dbdba388aa..da8eb7de60 100644 --- a/video/video_receive_stream.cc +++ b/video/video_receive_stream.cc @@ -762,7 +762,6 @@ VideoReceiveStream::RecordingState VideoReceiveStream::SetAndGetRecordingState( RTC_DCHECK_RUN_ON(&decode_queue_); // Save old state. old_state.callback = std::move(encoded_frame_buffer_function_); - old_state.keyframe_needed = keyframe_generation_requested_; old_state.last_keyframe_request_ms = last_keyframe_request_ms_; // Set new state. @@ -771,7 +770,7 @@ VideoReceiveStream::RecordingState VideoReceiveStream::SetAndGetRecordingState( RequestKeyFrame(clock_->TimeInMilliseconds()); keyframe_generation_requested_ = true; } else { - keyframe_generation_requested_ = state.keyframe_needed; + keyframe_generation_requested_ = false; last_keyframe_request_ms_ = state.last_keyframe_request_ms.value_or(0); } event.Set(); diff --git a/video/video_receive_stream.h b/video/video_receive_stream.h index f3b51892ff..c778d74558 100644 --- a/video/video_receive_stream.h +++ b/video/video_receive_stream.h @@ -45,12 +45,13 @@ class VCMTiming; namespace internal { -class VideoReceiveStream : public webrtc::DEPRECATED_VideoReceiveStream, - public rtc::VideoSinkInterface, - public NackSender, - public OnCompleteFrameCallback, - public Syncable, - public CallStatsObserver { +class VideoReceiveStream + : public webrtc::DEPRECATED_VideoReceiveStream, + public rtc::VideoSinkInterface, + public NackSender, + public RtpVideoStreamReceiver::OnCompleteFrameCallback, + public Syncable, + public CallStatsObserver { public: // The default number of milliseconds to pass before re-requesting a key frame // to be sent. @@ -86,6 +87,8 @@ class VideoReceiveStream : public webrtc::DEPRECATED_VideoReceiveStream, void Start() override; void Stop() override; + const RtpConfig& rtp_config() const override { return config_.rtp; } + webrtc::VideoReceiveStream::Stats GetStats() const override; void AddSecondarySink(RtpPacketSinkInterface* sink) override; @@ -111,7 +114,7 @@ class VideoReceiveStream : public webrtc::DEPRECATED_VideoReceiveStream, void SendNack(const std::vector& sequence_numbers, bool buffering_allowed) override; - // Implements OnCompleteFrameCallback. + // Implements RtpVideoStreamReceiver::OnCompleteFrameCallback. void OnCompleteFrame(std::unique_ptr frame) override; // Implements CallStatsObserver::OnRttUpdate diff --git a/video/video_receive_stream2.cc b/video/video_receive_stream2.cc index c9ec9e0123..72257f01cc 100644 --- a/video/video_receive_stream2.cc +++ b/video/video_receive_stream2.cc @@ -211,30 +211,27 @@ int DetermineMaxWaitForFrame(const VideoReceiveStream::Config& config, : kMaxWaitForFrameMs; } -VideoReceiveStream2::VideoReceiveStream2( - TaskQueueFactory* task_queue_factory, - TaskQueueBase* current_queue, - RtpStreamReceiverControllerInterface* receiver_controller, - int num_cpu_cores, - PacketRouter* packet_router, - VideoReceiveStream::Config config, - ProcessThread* process_thread, - CallStats* call_stats, - Clock* clock, - VCMTiming* timing) +VideoReceiveStream2::VideoReceiveStream2(TaskQueueFactory* task_queue_factory, + Call* call, + int num_cpu_cores, + PacketRouter* packet_router, + VideoReceiveStream::Config config, + CallStats* call_stats, + Clock* clock, + VCMTiming* timing) : task_queue_factory_(task_queue_factory), transport_adapter_(config.rtcp_send_transport), config_(std::move(config)), num_cpu_cores_(num_cpu_cores), - worker_thread_(current_queue), + call_(call), clock_(clock), call_stats_(call_stats), source_tracker_(clock_), - stats_proxy_(&config_, clock_, worker_thread_), + stats_proxy_(&config_, clock_, call->worker_thread()), rtp_receive_statistics_(ReceiveStatistics::Create(clock_)), timing_(timing), video_receiver_(clock_, timing_.get()), - rtp_video_stream_receiver_(worker_thread_, + rtp_video_stream_receiver_(call->worker_thread(), clock_, &transport_adapter_, call_stats->AsRtcpRttStats(), @@ -243,13 +240,12 @@ VideoReceiveStream2::VideoReceiveStream2( rtp_receive_statistics_.get(), &stats_proxy_, &stats_proxy_, - process_thread, this, // NackSender nullptr, // Use default KeyFrameRequestSender this, // OnCompleteFrameCallback config_.frame_decryptor, config_.frame_transformer), - rtp_stream_sync_(current_queue, this), + rtp_stream_sync_(call->worker_thread(), this), max_wait_for_keyframe_ms_(DetermineMaxWaitForFrame(config, true)), max_wait_for_frame_ms_(DetermineMaxWaitForFrame(config, false)), low_latency_renderer_enabled_("enabled", true), @@ -261,10 +257,10 @@ VideoReceiveStream2::VideoReceiveStream2( TaskQueueFactory::Priority::HIGH)) { RTC_LOG(LS_INFO) << "VideoReceiveStream2: " << config_.ToString(); - RTC_DCHECK(worker_thread_); + RTC_DCHECK(call_->worker_thread()); RTC_DCHECK(config_.renderer); RTC_DCHECK(call_stats_); - module_process_sequence_checker_.Detach(); + packet_sequence_checker_.Detach(); RTC_DCHECK(!config_.decoders.empty()); RTC_CHECK(config_.decoder_factory); @@ -282,15 +278,10 @@ VideoReceiveStream2::VideoReceiveStream2( frame_buffer_.reset( new video_coding::FrameBuffer(clock_, timing_.get(), &stats_proxy_)); - // Register with RtpStreamReceiverController. - media_receiver_ = receiver_controller->CreateReceiver( - config_.rtp.remote_ssrc, &rtp_video_stream_receiver_); if (config_.rtp.rtx_ssrc) { rtx_receive_stream_ = std::make_unique( &rtp_video_stream_receiver_, config.rtp.rtx_associated_payload_types, config_.rtp.remote_ssrc, rtp_receive_statistics_.get()); - rtx_receiver_ = receiver_controller->CreateReceiver( - config_.rtp.rtx_ssrc, rtx_receive_stream_.get()); } else { rtp_receive_statistics_->EnableRetransmitDetection(config.rtp.remote_ssrc, true); @@ -309,20 +300,45 @@ VideoReceiveStream2::VideoReceiveStream2( VideoReceiveStream2::~VideoReceiveStream2() { RTC_DCHECK_RUN_ON(&worker_sequence_checker_); RTC_LOG(LS_INFO) << "~VideoReceiveStream2: " << config_.ToString(); + RTC_DCHECK(!media_receiver_); + RTC_DCHECK(!rtx_receiver_); Stop(); } +void VideoReceiveStream2::RegisterWithTransport( + RtpStreamReceiverControllerInterface* receiver_controller) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + RTC_DCHECK(!media_receiver_); + RTC_DCHECK(!rtx_receiver_); + + // Register with RtpStreamReceiverController. + media_receiver_ = receiver_controller->CreateReceiver( + config_.rtp.remote_ssrc, &rtp_video_stream_receiver_); + if (config_.rtp.rtx_ssrc) { + RTC_DCHECK(rtx_receive_stream_); + rtx_receiver_ = receiver_controller->CreateReceiver( + config_.rtp.rtx_ssrc, rtx_receive_stream_.get()); + } +} + +void VideoReceiveStream2::UnregisterFromTransport() { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + media_receiver_.reset(); + rtx_receiver_.reset(); +} + void VideoReceiveStream2::SignalNetworkState(NetworkState state) { RTC_DCHECK_RUN_ON(&worker_sequence_checker_); rtp_video_stream_receiver_.SignalNetworkState(state); } bool VideoReceiveStream2::DeliverRtcp(const uint8_t* packet, size_t length) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); return rtp_video_stream_receiver_.DeliverRtcp(packet, length); } void VideoReceiveStream2::SetSync(Syncable* audio_syncable) { - RTC_DCHECK_RUN_ON(&worker_sequence_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); rtp_stream_sync_.ConfigureSync(audio_syncable); } @@ -365,9 +381,13 @@ void VideoReceiveStream2::Start() { const bool raw_payload = config_.rtp.raw_payload_types.count(decoder.payload_type) > 0; - rtp_video_stream_receiver_.AddReceiveCodec(decoder.payload_type, codec, - decoder.video_format.parameters, - raw_payload); + { + // TODO(bugs.webrtc.org/11993): Make this call on the network thread. + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + rtp_video_stream_receiver_.AddReceiveCodec( + decoder.payload_type, codec, decoder.video_format.parameters, + raw_payload); + } RTC_CHECK_EQ(VCM_OK, video_receiver_.RegisterReceiveCodec( decoder.payload_type, &codec, num_cpu_cores_)); } @@ -389,12 +409,23 @@ void VideoReceiveStream2::Start() { StartNextDecode(); }); decoder_running_ = true; - rtp_video_stream_receiver_.StartReceive(); + + { + // TODO(bugs.webrtc.org/11993): Make this call on the network thread. + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + rtp_video_stream_receiver_.StartReceive(); + } } void VideoReceiveStream2::Stop() { RTC_DCHECK_RUN_ON(&worker_sequence_checker_); - rtp_video_stream_receiver_.StopReceive(); + { + // TODO(bugs.webrtc.org/11993): Make this call on the network thread. + // Also call `GetUniqueFramesSeen()` at the same time (since it's a counter + // that's updated on the network thread). + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + rtp_video_stream_receiver_.StopReceive(); + } stats_proxy_.OnUniqueFramesCounted( rtp_video_stream_receiver_.GetUniqueFramesSeen()); @@ -432,6 +463,8 @@ void VideoReceiveStream2::Stop() { void VideoReceiveStream2::CreateAndRegisterExternalDecoder( const Decoder& decoder) { + TRACE_EVENT0("webrtc", + "VideoReceiveStream2::CreateAndRegisterExternalDecoder"); std::unique_ptr video_decoder = config_.decoder_factory->CreateVideoDecoder(decoder.video_format); // If we still have no valid decoder, we have to create a "Null" decoder @@ -529,7 +562,7 @@ void VideoReceiveStream2::OnFrame(const VideoFrame& video_frame) { // TODO(bugs.webrtc.org/10739): we should set local capture clock offset for // |video_frame.packet_infos|. But VideoFrame is const qualified here. - worker_thread_->PostTask( + call_->worker_thread()->PostTask( ToQueuedTask(task_safety_, [frame_meta, this]() { RTC_DCHECK_RUN_ON(&worker_sequence_checker_); int64_t video_playout_ntp_ms; @@ -617,8 +650,13 @@ void VideoReceiveStream2::OnCompleteFrame(std::unique_ptr frame) { } int64_t last_continuous_pid = frame_buffer_->InsertFrame(std::move(frame)); - if (last_continuous_pid != -1) - rtp_video_stream_receiver_.FrameContinuous(last_continuous_pid); + if (last_continuous_pid != -1) { + { + // TODO(bugs.webrtc.org/11993): Call on the network thread. + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + rtp_video_stream_receiver_.FrameContinuous(last_continuous_pid); + } + } } void VideoReceiveStream2::OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) { @@ -634,7 +672,7 @@ uint32_t VideoReceiveStream2::id() const { } absl::optional VideoReceiveStream2::GetInfo() const { - RTC_DCHECK_RUN_ON(&worker_sequence_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); absl::optional info = rtp_video_stream_receiver_.GetSyncInfo(); @@ -685,9 +723,10 @@ void VideoReceiveStream2::StartNextDecode() { HandleEncodedFrame(std::move(frame)); } else { int64_t now_ms = clock_->TimeInMilliseconds(); - worker_thread_->PostTask(ToQueuedTask( + // TODO(bugs.webrtc.org/11993): PostTask to the network thread. + call_->worker_thread()->PostTask(ToQueuedTask( task_safety_, [this, now_ms, wait_ms = GetMaxWaitMs()]() { - RTC_DCHECK_RUN_ON(&worker_sequence_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); HandleFrameBufferTimeout(now_ms, wait_ms); })); } @@ -746,19 +785,22 @@ void VideoReceiveStream2::HandleEncodedFrame( force_request_key_frame = true; } - worker_thread_->PostTask(ToQueuedTask( - task_safety_, - [this, now_ms, received_frame_is_keyframe, force_request_key_frame, - decoded_frame_picture_id, keyframe_request_is_due]() { - RTC_DCHECK_RUN_ON(&worker_sequence_checker_); + { + // TODO(bugs.webrtc.org/11993): Make this PostTask to the network thread. + call_->worker_thread()->PostTask(ToQueuedTask( + task_safety_, + [this, now_ms, received_frame_is_keyframe, force_request_key_frame, + decoded_frame_picture_id, keyframe_request_is_due]() { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); - if (decoded_frame_picture_id != -1) - rtp_video_stream_receiver_.FrameDecoded(decoded_frame_picture_id); + if (decoded_frame_picture_id != -1) + rtp_video_stream_receiver_.FrameDecoded(decoded_frame_picture_id); - HandleKeyFrameGeneration(received_frame_is_keyframe, now_ms, - force_request_key_frame, - keyframe_request_is_due); - })); + HandleKeyFrameGeneration(received_frame_is_keyframe, now_ms, + force_request_key_frame, + keyframe_request_is_due); + })); + } } int VideoReceiveStream2::DecodeAndMaybeDispatchEncodedFrame( @@ -821,13 +863,12 @@ int VideoReceiveStream2::DecodeAndMaybeDispatchEncodedFrame( return decode_result; } +// RTC_RUN_ON(packet_sequence_checker_) void VideoReceiveStream2::HandleKeyFrameGeneration( bool received_frame_is_keyframe, int64_t now_ms, bool always_request_key_frame, bool keyframe_request_is_due) { - // Running on worker_sequence_checker_. - bool request_key_frame = always_request_key_frame; // Repeat sending keyframe requests if we've requested a keyframe. @@ -851,9 +892,9 @@ void VideoReceiveStream2::HandleKeyFrameGeneration( } } +// RTC_RUN_ON(packet_sequence_checker_) void VideoReceiveStream2::HandleFrameBufferTimeout(int64_t now_ms, int64_t wait_ms) { - // Running on |worker_sequence_checker_|. absl::optional last_packet_ms = rtp_video_stream_receiver_.LastReceivedPacketMs(); @@ -873,8 +914,8 @@ void VideoReceiveStream2::HandleFrameBufferTimeout(int64_t now_ms, } } +// RTC_RUN_ON(packet_sequence_checker_) bool VideoReceiveStream2::IsReceivingKeyFrame(int64_t timestamp_ms) const { - // Running on worker_sequence_checker_. absl::optional last_keyframe_packet_ms = rtp_video_stream_receiver_.LastReceivedKeyframePacketMs(); @@ -946,13 +987,13 @@ VideoReceiveStream2::SetAndGetRecordingState(RecordingState state, event.Set(); }); - old_state.keyframe_needed = keyframe_generation_requested_; - if (generate_key_frame) { rtp_video_stream_receiver_.RequestKeyFrame(); - keyframe_generation_requested_ = true; - } else { - keyframe_generation_requested_ = state.keyframe_needed; + { + // TODO(bugs.webrtc.org/11993): Post this to the network thread. + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + keyframe_generation_requested_ = true; + } } event.Wait(rtc::Event::kForever); @@ -960,7 +1001,7 @@ VideoReceiveStream2::SetAndGetRecordingState(RecordingState state, } void VideoReceiveStream2::GenerateKeyFrame() { - RTC_DCHECK_RUN_ON(&worker_sequence_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); RequestKeyFrame(clock_->TimeInMilliseconds()); keyframe_generation_requested_ = true; } diff --git a/video/video_receive_stream2.h b/video/video_receive_stream2.h index c22ce1c027..9557044277 100644 --- a/video/video_receive_stream2.h +++ b/video/video_receive_stream2.h @@ -18,6 +18,7 @@ #include "api/task_queue/task_queue_factory.h" #include "api/units/timestamp.h" #include "api/video/recordable_encoded_frame.h" +#include "call/call.h" #include "call/rtp_packet_sink_interface.h" #include "call/syncable.h" #include "call/video_receive_stream.h" @@ -38,7 +39,6 @@ namespace webrtc { -class ProcessThread; class RtpStreamReceiverInterface; class RtpStreamReceiverControllerInterface; class RtxReceiveStream; @@ -75,12 +75,13 @@ struct VideoFrameMetaData { const Timestamp decode_timestamp; }; -class VideoReceiveStream2 : public webrtc::VideoReceiveStream, - public rtc::VideoSinkInterface, - public NackSender, - public OnCompleteFrameCallback, - public Syncable, - public CallStatsObserver { +class VideoReceiveStream2 + : public webrtc::VideoReceiveStream, + public rtc::VideoSinkInterface, + public NackSender, + public RtpVideoStreamReceiver2::OnCompleteFrameCallback, + public Syncable, + public CallStatsObserver { public: // The default number of milliseconds to pass before re-requesting a key frame // to be sent. @@ -90,17 +91,29 @@ class VideoReceiveStream2 : public webrtc::VideoReceiveStream, static constexpr size_t kBufferedEncodedFramesMaxSize = 60; VideoReceiveStream2(TaskQueueFactory* task_queue_factory, - TaskQueueBase* current_queue, - RtpStreamReceiverControllerInterface* receiver_controller, + Call* call, int num_cpu_cores, PacketRouter* packet_router, VideoReceiveStream::Config config, - ProcessThread* process_thread, CallStats* call_stats, Clock* clock, VCMTiming* timing); + // Destruction happens on the worker thread. Prior to destruction the caller + // must ensure that a registration with the transport has been cleared. See + // `RegisterWithTransport` for details. + // TODO(tommi): As a further improvement to this, performing the full + // destruction on the network thread could be made the default. ~VideoReceiveStream2() override; + // Called on `packet_sequence_checker_` to register/unregister with the + // network transport. + void RegisterWithTransport( + RtpStreamReceiverControllerInterface* receiver_controller); + // If registration has previously been done (via `RegisterWithTransport`) then + // `UnregisterFromTransport` must be called prior to destruction, on the + // network thread. + void UnregisterFromTransport(); + const Config& config() const { return config_; } void SignalNetworkState(NetworkState state); @@ -112,6 +125,8 @@ class VideoReceiveStream2 : public webrtc::VideoReceiveStream, void Start() override; void Stop() override; + const RtpConfig& rtp_config() const override { return config_.rtp; } + webrtc::VideoReceiveStream::Stats GetStats() const override; // SetBaseMinimumPlayoutDelayMs and GetBaseMinimumPlayoutDelayMs are called @@ -134,7 +149,7 @@ class VideoReceiveStream2 : public webrtc::VideoReceiveStream, void SendNack(const std::vector& sequence_numbers, bool buffering_allowed) override; - // Implements OnCompleteFrameCallback. + // Implements RtpVideoStreamReceiver2::OnCompleteFrameCallback. void OnCompleteFrame(std::unique_ptr frame) override; // Implements CallStatsObserver::OnRttUpdate @@ -164,32 +179,39 @@ class VideoReceiveStream2 : public webrtc::VideoReceiveStream, void HandleEncodedFrame(std::unique_ptr frame) RTC_RUN_ON(decode_queue_); void HandleFrameBufferTimeout(int64_t now_ms, int64_t wait_ms) - RTC_RUN_ON(worker_sequence_checker_); + RTC_RUN_ON(packet_sequence_checker_); void UpdatePlayoutDelays() const RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_sequence_checker_); void RequestKeyFrame(int64_t timestamp_ms) - RTC_RUN_ON(worker_sequence_checker_); + RTC_RUN_ON(packet_sequence_checker_); void HandleKeyFrameGeneration(bool received_frame_is_keyframe, int64_t now_ms, bool always_request_key_frame, bool keyframe_request_is_due) - RTC_RUN_ON(worker_sequence_checker_); + RTC_RUN_ON(packet_sequence_checker_); bool IsReceivingKeyFrame(int64_t timestamp_ms) const - RTC_RUN_ON(worker_sequence_checker_); + RTC_RUN_ON(packet_sequence_checker_); int DecodeAndMaybeDispatchEncodedFrame(std::unique_ptr frame) RTC_RUN_ON(decode_queue_); void UpdateHistograms(); RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_sequence_checker_; - RTC_NO_UNIQUE_ADDRESS SequenceChecker module_process_sequence_checker_; + // TODO(bugs.webrtc.org/11993): This checker conceptually represents + // operations that belong to the network thread. The Call class is currently + // moving towards handling network packets on the network thread and while + // that work is ongoing, this checker may in practice represent the worker + // thread, but still serves as a mechanism of grouping together concepts + // that belong to the network thread. Once the packets are fully delivered + // on the network thread, this comment will be deleted. + RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_; TaskQueueFactory* const task_queue_factory_; TransportAdapter transport_adapter_; const VideoReceiveStream::Config config_; const int num_cpu_cores_; - TaskQueueBase* const worker_thread_; + Call* const call_; Clock* const clock_; CallStats* const call_stats_; @@ -217,9 +239,12 @@ class VideoReceiveStream2 : public webrtc::VideoReceiveStream, // Members for the new jitter buffer experiment. std::unique_ptr frame_buffer_; - std::unique_ptr media_receiver_; - std::unique_ptr rtx_receive_stream_; - std::unique_ptr rtx_receiver_; + std::unique_ptr media_receiver_ + RTC_GUARDED_BY(packet_sequence_checker_); + std::unique_ptr rtx_receive_stream_ + RTC_GUARDED_BY(packet_sequence_checker_); + std::unique_ptr rtx_receiver_ + RTC_GUARDED_BY(packet_sequence_checker_); // Whenever we are in an undecodable state (stream has just started or due to // a decoding error) we require a keyframe to restart the stream. @@ -258,7 +283,7 @@ class VideoReceiveStream2 : public webrtc::VideoReceiveStream, std::function encoded_frame_buffer_function_ RTC_GUARDED_BY(decode_queue_); // Set to true while we're requesting keyframes but not yet received one. - bool keyframe_generation_requested_ RTC_GUARDED_BY(worker_sequence_checker_) = + bool keyframe_generation_requested_ RTC_GUARDED_BY(packet_sequence_checker_) = false; // Lock to avoid unnecessary per-frame idle wakeups in the code. webrtc::Mutex pending_resolution_mutex_; diff --git a/video/video_receive_stream2_unittest.cc b/video/video_receive_stream2_unittest.cc index 3bbc0704b9..850fd0dbb5 100644 --- a/video/video_receive_stream2_unittest.cc +++ b/video/video_receive_stream2_unittest.cc @@ -23,6 +23,7 @@ #include "call/rtp_stream_receiver_controller.h" #include "common_video/test/utilities.h" #include "media/base/fake_video_renderer.h" +#include "media/engine/fake_webrtc_call.h" #include "modules/pacing/packet_router.h" #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" #include "modules/utility/include/process_thread.h" @@ -108,18 +109,20 @@ class FrameObjectFake : public EncodedFrame { class VideoReceiveStream2Test : public ::testing::Test { public: VideoReceiveStream2Test() - : process_thread_(ProcessThread::Create("TestThread")), - task_queue_factory_(CreateDefaultTaskQueueFactory()), - config_(&mock_transport_), - call_stats_(Clock::GetRealTimeClock(), loop_.task_queue()), - h264_decoder_factory_(&mock_h264_video_decoder_) {} + : task_queue_factory_(CreateDefaultTaskQueueFactory()), + h264_decoder_factory_(&mock_h264_video_decoder_), + config_(&mock_transport_, &h264_decoder_factory_), + call_stats_(Clock::GetRealTimeClock(), loop_.task_queue()) {} + ~VideoReceiveStream2Test() override { + if (video_receive_stream_) + video_receive_stream_->UnregisterFromTransport(); + } - void SetUp() { + void SetUp() override { constexpr int kDefaultNumCpuCores = 2; config_.rtp.remote_ssrc = 1111; config_.rtp.local_ssrc = 2222; config_.renderer = &fake_renderer_; - config_.decoder_factory = &h264_decoder_factory_; VideoReceiveStream::Decoder h264_decoder; h264_decoder.payload_type = 99; h264_decoder.video_format = SdpVideoFormat("H264"); @@ -133,21 +136,21 @@ class VideoReceiveStream2Test : public ::testing::Test { video_receive_stream_ = std::make_unique( - task_queue_factory_.get(), loop_.task_queue(), - &rtp_stream_receiver_controller_, kDefaultNumCpuCores, - &packet_router_, config_.Copy(), process_thread_.get(), - &call_stats_, clock_, timing_); + task_queue_factory_.get(), &fake_call_, kDefaultNumCpuCores, + &packet_router_, config_.Copy(), &call_stats_, clock_, timing_); + video_receive_stream_->RegisterWithTransport( + &rtp_stream_receiver_controller_); } protected: test::RunLoop loop_; - std::unique_ptr process_thread_; const std::unique_ptr task_queue_factory_; + test::VideoDecoderProxyFactory h264_decoder_factory_; VideoReceiveStream::Config config_; internal::CallStats call_stats_; MockVideoDecoder mock_h264_video_decoder_; - test::VideoDecoderProxyFactory h264_decoder_factory_; cricket::FakeVideoRenderer fake_renderer_; + cricket::FakeCall fake_call_; MockTransport mock_transport_; PacketRouter packet_router_; RtpStreamReceiverController rtp_stream_receiver_controller_; @@ -284,16 +287,18 @@ class VideoReceiveStream2TestWithFakeDecoder : public ::testing::Test { VideoReceiveStream2TestWithFakeDecoder() : fake_decoder_factory_( []() { return std::make_unique(); }), - process_thread_(ProcessThread::Create("TestThread")), task_queue_factory_(CreateDefaultTaskQueueFactory()), - config_(&mock_transport_), + config_(&mock_transport_, &fake_decoder_factory_), call_stats_(Clock::GetRealTimeClock(), loop_.task_queue()) {} + ~VideoReceiveStream2TestWithFakeDecoder() override { + if (video_receive_stream_) + video_receive_stream_->UnregisterFromTransport(); + } - void SetUp() { + void SetUp() override { config_.rtp.remote_ssrc = 1111; config_.rtp.local_ssrc = 2222; config_.renderer = &fake_renderer_; - config_.decoder_factory = &fake_decoder_factory_; VideoReceiveStream::Decoder fake_decoder; fake_decoder.payload_type = 99; fake_decoder.video_format = SdpVideoFormat("VP8"); @@ -304,19 +309,22 @@ class VideoReceiveStream2TestWithFakeDecoder : public ::testing::Test { void ReCreateReceiveStream(VideoReceiveStream::RecordingState state) { constexpr int kDefaultNumCpuCores = 2; - video_receive_stream_ = nullptr; + if (video_receive_stream_) { + video_receive_stream_->UnregisterFromTransport(); + video_receive_stream_ = nullptr; + } timing_ = new VCMTiming(clock_); video_receive_stream_.reset(new webrtc::internal::VideoReceiveStream2( - task_queue_factory_.get(), loop_.task_queue(), - &rtp_stream_receiver_controller_, kDefaultNumCpuCores, &packet_router_, - config_.Copy(), process_thread_.get(), &call_stats_, clock_, timing_)); + task_queue_factory_.get(), &fake_call_, kDefaultNumCpuCores, + &packet_router_, config_.Copy(), &call_stats_, clock_, timing_)); + video_receive_stream_->RegisterWithTransport( + &rtp_stream_receiver_controller_); video_receive_stream_->SetAndGetRecordingState(std::move(state), false); } protected: test::RunLoop loop_; test::FunctionVideoDecoderFactory fake_decoder_factory_; - std::unique_ptr process_thread_; const std::unique_ptr task_queue_factory_; VideoReceiveStream::Config config_; internal::CallStats call_stats_; @@ -324,6 +332,7 @@ class VideoReceiveStream2TestWithFakeDecoder : public ::testing::Test { MockTransport mock_transport_; PacketRouter packet_router_; RtpStreamReceiverController rtp_stream_receiver_controller_; + cricket::FakeCall fake_call_; std::unique_ptr video_receive_stream_; Clock* clock_; VCMTiming* timing_; @@ -544,12 +553,11 @@ class VideoReceiveStream2TestWithSimulatedClock Transport* transport, VideoDecoderFactory* decoder_factory, rtc::VideoSinkInterface* renderer) { - VideoReceiveStream::Config config(transport); + VideoReceiveStream::Config config(transport, decoder_factory); config.rtp.remote_ssrc = 1111; config.rtp.local_ssrc = 2222; config.rtp.nack.rtp_history_ms = GetParam(); // rtx-time. config.renderer = renderer; - config.decoder_factory = decoder_factory; VideoReceiveStream::Decoder fake_decoder; fake_decoder.payload_type = 99; fake_decoder.video_format = SdpVideoFormat("VP8"); @@ -562,24 +570,27 @@ class VideoReceiveStream2TestWithSimulatedClock fake_decoder_factory_([this] { return std::make_unique([this] { OnFrameDecoded(); }); }), - process_thread_(time_controller_.CreateProcessThread("ProcessThread")), config_(GetConfig(&mock_transport_, &fake_decoder_factory_, &fake_renderer_)), call_stats_(time_controller_.GetClock(), loop_.task_queue()), video_receive_stream_(time_controller_.GetTaskQueueFactory(), - loop_.task_queue(), - &rtp_stream_receiver_controller_, + &fake_call_, /*num_cores=*/2, &packet_router_, config_.Copy(), - process_thread_.get(), &call_stats_, time_controller_.GetClock(), new VCMTiming(time_controller_.GetClock())) { + video_receive_stream_.RegisterWithTransport( + &rtp_stream_receiver_controller_); video_receive_stream_.Start(); } + ~VideoReceiveStream2TestWithSimulatedClock() override { + video_receive_stream_.UnregisterFromTransport(); + } + void OnFrameDecoded() { event_->Set(); } void PassEncodedFrameAndWait(std::unique_ptr frame) { @@ -594,9 +605,9 @@ class VideoReceiveStream2TestWithSimulatedClock GlobalSimulatedTimeController time_controller_; test::RunLoop loop_; test::FunctionVideoDecoderFactory fake_decoder_factory_; - std::unique_ptr process_thread_; MockTransport mock_transport_; FakeRenderer fake_renderer_; + cricket::FakeCall fake_call_; VideoReceiveStream::Config config_; internal::CallStats call_stats_; PacketRouter packet_router_; @@ -709,19 +720,21 @@ INSTANTIATE_TEST_SUITE_P( class VideoReceiveStream2TestWithLazyDecoderCreation : public ::testing::Test { public: VideoReceiveStream2TestWithLazyDecoderCreation() - : process_thread_(ProcessThread::Create("TestThread")), - task_queue_factory_(CreateDefaultTaskQueueFactory()), - config_(&mock_transport_), + : task_queue_factory_(CreateDefaultTaskQueueFactory()), + config_(&mock_transport_, &mock_h264_decoder_factory_), call_stats_(Clock::GetRealTimeClock(), loop_.task_queue()) {} - void SetUp() { + ~VideoReceiveStream2TestWithLazyDecoderCreation() override { + video_receive_stream_->UnregisterFromTransport(); + } + + void SetUp() override { webrtc::test::ScopedFieldTrials field_trials( "WebRTC-PreStreamDecoders/max:0/"); constexpr int kDefaultNumCpuCores = 2; config_.rtp.remote_ssrc = 1111; config_.rtp.local_ssrc = 2222; config_.renderer = &fake_renderer_; - config_.decoder_factory = &mock_h264_decoder_factory_; VideoReceiveStream::Decoder h264_decoder; h264_decoder.payload_type = 99; h264_decoder.video_format = SdpVideoFormat("H264"); @@ -735,21 +748,21 @@ class VideoReceiveStream2TestWithLazyDecoderCreation : public ::testing::Test { video_receive_stream_ = std::make_unique( - task_queue_factory_.get(), loop_.task_queue(), - &rtp_stream_receiver_controller_, kDefaultNumCpuCores, - &packet_router_, config_.Copy(), process_thread_.get(), - &call_stats_, clock_, timing_); + task_queue_factory_.get(), &fake_call_, kDefaultNumCpuCores, + &packet_router_, config_.Copy(), &call_stats_, clock_, timing_); + video_receive_stream_->RegisterWithTransport( + &rtp_stream_receiver_controller_); } protected: test::RunLoop loop_; - std::unique_ptr process_thread_; const std::unique_ptr task_queue_factory_; + MockVideoDecoderFactory mock_h264_decoder_factory_; VideoReceiveStream::Config config_; internal::CallStats call_stats_; MockVideoDecoder mock_h264_video_decoder_; - MockVideoDecoderFactory mock_h264_decoder_factory_; cricket::FakeVideoRenderer fake_renderer_; + cricket::FakeCall fake_call_; MockTransport mock_transport_; PacketRouter packet_router_; RtpStreamReceiverController rtp_stream_receiver_controller_; diff --git a/video/video_receive_stream_unittest.cc b/video/video_receive_stream_unittest.cc index c320bfa569..cb14f7dc06 100644 --- a/video/video_receive_stream_unittest.cc +++ b/video/video_receive_stream_unittest.cc @@ -96,16 +96,15 @@ class VideoReceiveStreamTest : public ::testing::Test { VideoReceiveStreamTest() : process_thread_(ProcessThread::Create("TestThread")), task_queue_factory_(CreateDefaultTaskQueueFactory()), - config_(&mock_transport_), - call_stats_(Clock::GetRealTimeClock(), process_thread_.get()), - h264_decoder_factory_(&mock_h264_video_decoder_) {} + h264_decoder_factory_(&mock_h264_video_decoder_), + config_(&mock_transport_, &h264_decoder_factory_), + call_stats_(Clock::GetRealTimeClock(), process_thread_.get()) {} void SetUp() { constexpr int kDefaultNumCpuCores = 2; config_.rtp.remote_ssrc = 1111; config_.rtp.local_ssrc = 2222; config_.renderer = &fake_renderer_; - config_.decoder_factory = &h264_decoder_factory_; VideoReceiveStream::Decoder h264_decoder; h264_decoder.payload_type = 99; h264_decoder.video_format = SdpVideoFormat("H264"); @@ -126,10 +125,10 @@ class VideoReceiveStreamTest : public ::testing::Test { protected: std::unique_ptr process_thread_; const std::unique_ptr task_queue_factory_; + test::VideoDecoderProxyFactory h264_decoder_factory_; VideoReceiveStream::Config config_; CallStats call_stats_; MockVideoDecoder mock_h264_video_decoder_; - test::VideoDecoderProxyFactory h264_decoder_factory_; cricket::FakeVideoRenderer fake_renderer_; MockTransport mock_transport_; PacketRouter packet_router_; @@ -235,14 +234,13 @@ class VideoReceiveStreamTestWithFakeDecoder : public ::testing::Test { []() { return std::make_unique(); }), process_thread_(ProcessThread::Create("TestThread")), task_queue_factory_(CreateDefaultTaskQueueFactory()), - config_(&mock_transport_), + config_(&mock_transport_, &fake_decoder_factory_), call_stats_(Clock::GetRealTimeClock(), process_thread_.get()) {} void SetUp() { config_.rtp.remote_ssrc = 1111; config_.rtp.local_ssrc = 2222; config_.renderer = &fake_renderer_; - config_.decoder_factory = &fake_decoder_factory_; VideoReceiveStream::Decoder fake_decoder; fake_decoder.payload_type = 99; fake_decoder.video_format = SdpVideoFormat("VP8"); diff --git a/video/video_send_stream.cc b/video/video_send_stream.cc index 295e56bdf5..8c0f8f6f72 100644 --- a/video/video_send_stream.cc +++ b/video/video_send_stream.cc @@ -23,7 +23,6 @@ #include "system_wrappers/include/clock.h" #include "system_wrappers/include/field_trial.h" #include "video/adaptation/overuse_frame_detector.h" -#include "video/video_send_stream_impl.h" #include "video/video_stream_encoder.h" namespace webrtc { @@ -65,7 +64,10 @@ VideoStreamEncoder::BitrateAllocationCallbackType GetBitrateAllocationCallbackType(const VideoSendStream::Config& config) { if (webrtc::RtpExtension::FindHeaderExtensionByUri( config.rtp.extensions, - webrtc::RtpExtension::kVideoLayersAllocationUri)) { + webrtc::RtpExtension::kVideoLayersAllocationUri, + config.crypto_options.srtp.enable_encrypted_rtp_header_extensions + ? RtpExtension::Filter::kPreferEncryptedExtension + : RtpExtension::Filter::kDiscardEncryptedExtension)) { return VideoStreamEncoder::BitrateAllocationCallbackType:: kVideoLayersAllocation; } @@ -77,6 +79,32 @@ GetBitrateAllocationCallbackType(const VideoSendStream::Config& config) { kVideoBitrateAllocationWhenScreenSharing; } +RtpSenderFrameEncryptionConfig CreateFrameEncryptionConfig( + const VideoSendStream::Config* config) { + RtpSenderFrameEncryptionConfig frame_encryption_config; + frame_encryption_config.frame_encryptor = config->frame_encryptor; + frame_encryption_config.crypto_options = config->crypto_options; + return frame_encryption_config; +} + +RtpSenderObservers CreateObservers(RtcpRttStats* call_stats, + EncoderRtcpFeedback* encoder_feedback, + SendStatisticsProxy* stats_proxy, + SendDelayStats* send_delay_stats) { + RtpSenderObservers observers; + observers.rtcp_rtt_stats = call_stats; + observers.intra_frame_callback = encoder_feedback; + observers.rtcp_loss_notification_observer = encoder_feedback; + observers.report_block_data_observer = stats_proxy; + observers.rtp_stats = stats_proxy; + observers.bitrate_observer = stats_proxy; + observers.frame_count_observer = stats_proxy; + observers.rtcp_type_observer = stats_proxy; + observers.send_delay_observer = stats_proxy; + observers.send_packet_observer = send_delay_stats; + return observers; +} + } // namespace namespace internal { @@ -84,7 +112,6 @@ namespace internal { VideoSendStream::VideoSendStream( Clock* clock, int num_cpu_cores, - ProcessThread* module_process_thread, TaskQueueFactory* task_queue_factory, RtcpRttStats* call_stats, RtpTransportControllerSendInterface* transport, @@ -96,56 +123,79 @@ VideoSendStream::VideoSendStream( const std::map& suspended_ssrcs, const std::map& suspended_payload_states, std::unique_ptr fec_controller) - : worker_queue_(transport->GetWorkerQueue()), + : rtp_transport_queue_(transport->GetWorkerQueue()), + transport_(transport), stats_proxy_(clock, config, encoder_config.content_type), config_(std::move(config)), - content_type_(encoder_config.content_type) { + content_type_(encoder_config.content_type), + video_stream_encoder_(std::make_unique( + clock, + num_cpu_cores, + &stats_proxy_, + config_.encoder_settings, + std::make_unique(&stats_proxy_), + task_queue_factory, + GetBitrateAllocationCallbackType(config_))), + encoder_feedback_( + clock, + config_.rtp.ssrcs, + video_stream_encoder_.get(), + [this](uint32_t ssrc, const std::vector& seq_nums) { + return rtp_video_sender_->GetSentRtpPacketInfos(ssrc, seq_nums); + }), + rtp_video_sender_( + transport->CreateRtpVideoSender(suspended_ssrcs, + suspended_payload_states, + config_.rtp, + config_.rtcp_report_interval_ms, + config_.send_transport, + CreateObservers(call_stats, + &encoder_feedback_, + &stats_proxy_, + send_delay_stats), + event_log, + std::move(fec_controller), + CreateFrameEncryptionConfig(&config_), + config_.frame_transformer)), + send_stream_(clock, + &stats_proxy_, + rtp_transport_queue_, + transport, + bitrate_allocator, + video_stream_encoder_.get(), + &config_, + encoder_config.max_bitrate_bps, + encoder_config.bitrate_priority, + encoder_config.content_type, + rtp_video_sender_) { RTC_DCHECK(config_.encoder_settings.encoder_factory); RTC_DCHECK(config_.encoder_settings.bitrate_allocator_factory); - video_stream_encoder_ = std::make_unique( - clock, num_cpu_cores, &stats_proxy_, config_.encoder_settings, - std::make_unique(&stats_proxy_), task_queue_factory, - GetBitrateAllocationCallbackType(config_)); - - // TODO(srte): Initialization should not be done posted on a task queue. - // Note that the posted task must not outlive this scope since the closure - // references local variables. - worker_queue_->PostTask(ToQueuedTask( - [this, clock, call_stats, transport, bitrate_allocator, send_delay_stats, - event_log, &suspended_ssrcs, &encoder_config, &suspended_payload_states, - &fec_controller]() { - send_stream_.reset(new VideoSendStreamImpl( - clock, &stats_proxy_, worker_queue_, call_stats, transport, - bitrate_allocator, send_delay_stats, video_stream_encoder_.get(), - event_log, &config_, encoder_config.max_bitrate_bps, - encoder_config.bitrate_priority, suspended_ssrcs, - suspended_payload_states, encoder_config.content_type, - std::move(fec_controller))); - }, - [this]() { thread_sync_event_.Set(); })); - - // Wait for ConstructionTask to complete so that |send_stream_| can be used. - // |module_process_thread| must be registered and deregistered on the thread - // it was created on. - thread_sync_event_.Wait(rtc::Event::kForever); - send_stream_->RegisterProcessThread(module_process_thread); + video_stream_encoder_->SetFecControllerOverride(rtp_video_sender_); + ReconfigureVideoEncoder(std::move(encoder_config)); } VideoSendStream::~VideoSendStream() { RTC_DCHECK_RUN_ON(&thread_checker_); - RTC_DCHECK(!send_stream_); + RTC_DCHECK(!running_); + transport_->DestroyRtpVideoSender(rtp_video_sender_); } void VideoSendStream::UpdateActiveSimulcastLayers( const std::vector active_layers) { RTC_DCHECK_RUN_ON(&thread_checker_); + // Keep our `running_` flag expected state in sync with active layers since + // the `send_stream_` will be implicitly stopped/started depending on the + // state of the layers. + bool running = false; + rtc::StringBuilder active_layers_string; active_layers_string << "{"; for (size_t i = 0; i < active_layers.size(); ++i) { if (active_layers[i]) { + running = true; active_layers_string << "1"; } else { active_layers_string << "0"; @@ -158,35 +208,53 @@ void VideoSendStream::UpdateActiveSimulcastLayers( RTC_LOG(LS_INFO) << "UpdateActiveSimulcastLayers: " << active_layers_string.str(); - VideoSendStreamImpl* send_stream = send_stream_.get(); - worker_queue_->PostTask([this, send_stream, active_layers] { - send_stream->UpdateActiveSimulcastLayers(active_layers); - thread_sync_event_.Set(); - }); + rtp_transport_queue_->PostTask( + ToQueuedTask(transport_queue_safety_, [this, active_layers] { + send_stream_.UpdateActiveSimulcastLayers(active_layers); + })); - thread_sync_event_.Wait(rtc::Event::kForever); + running_ = running; } void VideoSendStream::Start() { RTC_DCHECK_RUN_ON(&thread_checker_); RTC_DLOG(LS_INFO) << "VideoSendStream::Start"; - VideoSendStreamImpl* send_stream = send_stream_.get(); - worker_queue_->PostTask([this, send_stream] { - send_stream->Start(); + if (running_) + return; + + running_ = true; + + rtp_transport_queue_->PostTask(ToQueuedTask([this] { + transport_queue_safety_->SetAlive(); + send_stream_.Start(); thread_sync_event_.Set(); - }); + })); // It is expected that after VideoSendStream::Start has been called, incoming // frames are not dropped in VideoStreamEncoder. To ensure this, Start has to // be synchronized. + // TODO(tommi): ^^^ Validate if this still holds. thread_sync_event_.Wait(rtc::Event::kForever); } void VideoSendStream::Stop() { RTC_DCHECK_RUN_ON(&thread_checker_); + if (!running_) + return; RTC_DLOG(LS_INFO) << "VideoSendStream::Stop"; - VideoSendStreamImpl* send_stream = send_stream_.get(); - worker_queue_->PostTask([send_stream] { send_stream->Stop(); }); + running_ = false; + rtp_transport_queue_->PostTask(ToQueuedTask(transport_queue_safety_, [this] { + // As the stream can get re-used and implicitly restarted via changing + // the state of the active layers, we do not mark the + // `transport_queue_safety_` flag with `SetNotAlive()` here. That's only + // done when we stop permanently via `StopPermanentlyAndGetRtpStates()`. + send_stream_.Stop(); + })); +} + +bool VideoSendStream::started() { + RTC_DCHECK_RUN_ON(&thread_checker_); + return running_; } void VideoSendStream::AddAdaptationResource( @@ -224,7 +292,7 @@ VideoSendStream::Stats VideoSendStream::GetStats() { } absl::optional VideoSendStream::GetPacingFactorOverride() const { - return send_stream_->configured_pacing_factor_; + return send_stream_.configured_pacing_factor(); } void VideoSendStream::StopPermanentlyAndGetRtpStates( @@ -232,12 +300,16 @@ void VideoSendStream::StopPermanentlyAndGetRtpStates( VideoSendStream::RtpPayloadStateMap* payload_state_map) { RTC_DCHECK_RUN_ON(&thread_checker_); video_stream_encoder_->Stop(); - send_stream_->DeRegisterProcessThread(); - worker_queue_->PostTask([this, rtp_state_map, payload_state_map]() { - send_stream_->Stop(); - *rtp_state_map = send_stream_->GetRtpStates(); - *payload_state_map = send_stream_->GetRtpPayloadStates(); - send_stream_.reset(); + + running_ = false; + // Always run these cleanup steps regardless of whether running_ was set + // or not. This will unregister callbacks before destruction. + // See `VideoSendStreamImpl::StopVideoSendStream` for more. + rtp_transport_queue_->PostTask([this, rtp_state_map, payload_state_map]() { + transport_queue_safety_->SetNotAlive(); + send_stream_.Stop(); + *rtp_state_map = send_stream_.GetRtpStates(); + *payload_state_map = send_stream_.GetRtpPayloadStates(); thread_sync_event_.Set(); }); thread_sync_event_.Wait(rtc::Event::kForever); @@ -245,7 +317,7 @@ void VideoSendStream::StopPermanentlyAndGetRtpStates( void VideoSendStream::DeliverRtcp(const uint8_t* packet, size_t length) { // Called on a network thread. - send_stream_->DeliverRtcp(packet, length); + send_stream_.DeliverRtcp(packet, length); } } // namespace internal diff --git a/video/video_send_stream.h b/video/video_send_stream.h index e36f279ca6..0d132dd666 100644 --- a/video/video_send_stream.h +++ b/video/video_send_stream.h @@ -22,9 +22,13 @@ #include "call/video_receive_stream.h" #include "call/video_send_stream.h" #include "rtc_base/event.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/task_queue.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "video/encoder_rtcp_feedback.h" #include "video/send_delay_stats.h" #include "video/send_statistics_proxy.h" +#include "video/video_send_stream_impl.h" namespace webrtc { namespace test { @@ -33,7 +37,6 @@ class VideoSendStreamPeer; class CallStats; class IvfFileWriter; -class ProcessThread; class RateLimiter; class RtpRtcp; class RtpTransportControllerSendInterface; @@ -45,8 +48,7 @@ class VideoSendStreamImpl; // VideoSendStream implements webrtc::VideoSendStream. // Internally, it delegates all public methods to VideoSendStreamImpl and / or -// VideoStreamEncoder. VideoSendStreamInternal is created and deleted on -// |worker_queue|. +// VideoStreamEncoder. class VideoSendStream : public webrtc::VideoSendStream { public: using RtpStateMap = std::map; @@ -55,7 +57,6 @@ class VideoSendStream : public webrtc::VideoSendStream { VideoSendStream( Clock* clock, int num_cpu_cores, - ProcessThread* module_process_thread, TaskQueueFactory* task_queue_factory, RtcpRttStats* call_stats, RtpTransportControllerSendInterface* transport, @@ -77,6 +78,7 @@ class VideoSendStream : public webrtc::VideoSendStream { const std::vector active_layers) override; void Start() override; void Stop() override; + bool started() override; void AddAdaptationResource(rtc::scoped_refptr resource) override; std::vector> GetAdaptationResources() override; @@ -93,19 +95,23 @@ class VideoSendStream : public webrtc::VideoSendStream { private: friend class test::VideoSendStreamPeer; - class ConstructionTask; - absl::optional GetPacingFactorOverride() const; - SequenceChecker thread_checker_; - rtc::TaskQueue* const worker_queue_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker thread_checker_; + rtc::TaskQueue* const rtp_transport_queue_; + RtpTransportControllerSendInterface* const transport_; rtc::Event thread_sync_event_; + rtc::scoped_refptr transport_queue_safety_ = + PendingTaskSafetyFlag::CreateDetached(); SendStatisticsProxy stats_proxy_; const VideoSendStream::Config config_; const VideoEncoderConfig::ContentType content_type_; - std::unique_ptr send_stream_; std::unique_ptr video_stream_encoder_; + EncoderRtcpFeedback encoder_feedback_; + RtpVideoSenderInterface* const rtp_video_sender_; + VideoSendStreamImpl send_stream_; + bool running_ RTC_GUARDED_BY(thread_checker_) = false; }; } // namespace internal diff --git a/video/video_send_stream_impl.cc b/video/video_send_stream_impl.cc index ebd4445004..3fc6b676dc 100644 --- a/video/video_send_stream_impl.cc +++ b/video/video_send_stream_impl.cc @@ -33,6 +33,7 @@ #include "rtc_base/experiments/rate_control_settings.h" #include "rtc_base/logging.h" #include "rtc_base/numerics/safe_conversions.h" +#include "rtc_base/task_utils/to_queued_task.h" #include "rtc_base/trace_event.h" #include "system_wrappers/include/clock.h" #include "system_wrappers/include/field_trial.h" @@ -130,32 +131,6 @@ int CalculateMaxPadBitrateBps(const std::vector& streams, return pad_up_to_bitrate_bps; } -RtpSenderFrameEncryptionConfig CreateFrameEncryptionConfig( - const VideoSendStream::Config* config) { - RtpSenderFrameEncryptionConfig frame_encryption_config; - frame_encryption_config.frame_encryptor = config->frame_encryptor; - frame_encryption_config.crypto_options = config->crypto_options; - return frame_encryption_config; -} - -RtpSenderObservers CreateObservers(RtcpRttStats* call_stats, - EncoderRtcpFeedback* encoder_feedback, - SendStatisticsProxy* stats_proxy, - SendDelayStats* send_delay_stats) { - RtpSenderObservers observers; - observers.rtcp_rtt_stats = call_stats; - observers.intra_frame_callback = encoder_feedback; - observers.rtcp_loss_notification_observer = encoder_feedback; - observers.report_block_data_observer = stats_proxy; - observers.rtp_stats = stats_proxy; - observers.bitrate_observer = stats_proxy; - observers.frame_count_observer = stats_proxy; - observers.rtcp_type_observer = stats_proxy; - observers.send_delay_observer = stats_proxy; - observers.send_packet_observer = send_delay_stats; - return observers; -} - absl::optional GetAlrSettings( VideoEncoderConfig::ContentType content_type) { if (content_type == VideoEncoderConfig::ContentType::kScreen) { @@ -177,6 +152,44 @@ bool SameStreamsEnabled(const VideoBitrateAllocation& lhs, } return true; } + +// Returns an optional that has value iff TransportSeqNumExtensionConfigured +// is `true` for the given video send stream config. +absl::optional GetConfiguredPacingFactor( + const VideoSendStream::Config& config, + VideoEncoderConfig::ContentType content_type, + const PacingConfig& default_pacing_config) { + if (!TransportSeqNumExtensionConfigured(config)) + return absl::nullopt; + + absl::optional alr_settings = + GetAlrSettings(content_type); + if (alr_settings) + return alr_settings->pacing_factor; + + RateControlSettings rate_control_settings = + RateControlSettings::ParseFromFieldTrials(); + return rate_control_settings.GetPacingFactor().value_or( + default_pacing_config.pacing_factor); +} + +uint32_t GetInitialEncoderMaxBitrate(int initial_encoder_max_bitrate) { + if (initial_encoder_max_bitrate > 0) + return rtc::dchecked_cast(initial_encoder_max_bitrate); + + // TODO(srte): Make sure max bitrate is not set to negative values. We don't + // have any way to handle unset values in downstream code, such as the + // bitrate allocator. Previously -1 was implicitly casted to UINT32_MAX, a + // behaviour that is not safe. Converting to 10 Mbps should be safe for + // reasonable use cases as it allows adding the max of multiple streams + // without wrappping around. + const int kFallbackMaxBitrateBps = 10000000; + RTC_DLOG(LS_ERROR) << "ERROR: Initial encoder max bitrate = " + << initial_encoder_max_bitrate << " which is <= 0!"; + RTC_DLOG(LS_INFO) << "Using default encoder max bitrate = 10 Mbps"; + return kFallbackMaxBitrateBps; +} + } // namespace PacingConfig::PacingConfig() @@ -192,162 +205,109 @@ PacingConfig::~PacingConfig() = default; VideoSendStreamImpl::VideoSendStreamImpl( Clock* clock, SendStatisticsProxy* stats_proxy, - rtc::TaskQueue* worker_queue, - RtcpRttStats* call_stats, + rtc::TaskQueue* rtp_transport_queue, RtpTransportControllerSendInterface* transport, BitrateAllocatorInterface* bitrate_allocator, - SendDelayStats* send_delay_stats, VideoStreamEncoderInterface* video_stream_encoder, - RtcEventLog* event_log, const VideoSendStream::Config* config, int initial_encoder_max_bitrate, double initial_encoder_bitrate_priority, - std::map suspended_ssrcs, - std::map suspended_payload_states, VideoEncoderConfig::ContentType content_type, - std::unique_ptr fec_controller) + RtpVideoSenderInterface* rtp_video_sender) : clock_(clock), has_alr_probing_(config->periodic_alr_bandwidth_probing || GetAlrSettings(content_type)), pacing_config_(PacingConfig()), stats_proxy_(stats_proxy), config_(config), - worker_queue_(worker_queue), + rtp_transport_queue_(rtp_transport_queue), timed_out_(false), transport_(transport), bitrate_allocator_(bitrate_allocator), disable_padding_(true), max_padding_bitrate_(0), encoder_min_bitrate_bps_(0), + encoder_max_bitrate_bps_( + GetInitialEncoderMaxBitrate(initial_encoder_max_bitrate)), encoder_target_rate_bps_(0), encoder_bitrate_priority_(initial_encoder_bitrate_priority), - has_packet_feedback_(false), video_stream_encoder_(video_stream_encoder), - encoder_feedback_(clock, config_->rtp.ssrcs, video_stream_encoder), bandwidth_observer_(transport->GetBandwidthObserver()), - rtp_video_sender_( - transport_->CreateRtpVideoSender(suspended_ssrcs, - suspended_payload_states, - config_->rtp, - config_->rtcp_report_interval_ms, - config_->send_transport, - CreateObservers(call_stats, - &encoder_feedback_, - stats_proxy_, - send_delay_stats), - event_log, - std::move(fec_controller), - CreateFrameEncryptionConfig(config_), - config->frame_transformer)), - weak_ptr_factory_(this) { - video_stream_encoder->SetFecControllerOverride(rtp_video_sender_); - RTC_DCHECK_RUN_ON(worker_queue_); - RTC_LOG(LS_INFO) << "VideoSendStreamInternal: " << config_->ToString(); - weak_ptr_ = weak_ptr_factory_.GetWeakPtr(); - - encoder_feedback_.SetRtpVideoSender(rtp_video_sender_); - + rtp_video_sender_(rtp_video_sender), + configured_pacing_factor_( + GetConfiguredPacingFactor(*config_, content_type, pacing_config_)) { + RTC_DCHECK_GE(config_->rtp.payload_type, 0); + RTC_DCHECK_LE(config_->rtp.payload_type, 127); RTC_DCHECK(!config_->rtp.ssrcs.empty()); RTC_DCHECK(transport_); RTC_DCHECK_NE(initial_encoder_max_bitrate, 0); - - if (initial_encoder_max_bitrate > 0) { - encoder_max_bitrate_bps_ = - rtc::dchecked_cast(initial_encoder_max_bitrate); - } else { - // TODO(srte): Make sure max bitrate is not set to negative values. We don't - // have any way to handle unset values in downstream code, such as the - // bitrate allocator. Previously -1 was implicitly casted to UINT32_MAX, a - // behaviour that is not safe. Converting to 10 Mbps should be safe for - // reasonable use cases as it allows adding the max of multiple streams - // without wrappping around. - const int kFallbackMaxBitrateBps = 10000000; - RTC_DLOG(LS_ERROR) << "ERROR: Initial encoder max bitrate = " - << initial_encoder_max_bitrate << " which is <= 0!"; - RTC_DLOG(LS_INFO) << "Using default encoder max bitrate = 10 Mbps"; - encoder_max_bitrate_bps_ = kFallbackMaxBitrateBps; - } + RTC_LOG(LS_INFO) << "VideoSendStreamImpl: " << config_->ToString(); RTC_CHECK(AlrExperimentSettings::MaxOneFieldTrialEnabled()); + + // Only request rotation at the source when we positively know that the remote + // side doesn't support the rotation extension. This allows us to prepare the + // encoder in the expectation that rotation is supported - which is the common + // case. + bool rotation_applied = absl::c_none_of( + config_->rtp.extensions, [](const RtpExtension& extension) { + return extension.uri == RtpExtension::kVideoRotationUri; + }); + + video_stream_encoder_->SetSink(this, rotation_applied); + + absl::optional enable_alr_bw_probing; + // If send-side BWE is enabled, check if we should apply updated probing and // pacing settings. - if (TransportSeqNumExtensionConfigured(*config_)) { - has_packet_feedback_ = true; - + if (configured_pacing_factor_) { absl::optional alr_settings = GetAlrSettings(content_type); + int queue_time_limit_ms; if (alr_settings) { - transport->EnablePeriodicAlrProbing(true); - transport->SetPacingFactor(alr_settings->pacing_factor); - configured_pacing_factor_ = alr_settings->pacing_factor; - transport->SetQueueTimeLimit(alr_settings->max_paced_queue_time); + enable_alr_bw_probing = true; + queue_time_limit_ms = alr_settings->max_paced_queue_time; } else { RateControlSettings rate_control_settings = RateControlSettings::ParseFromFieldTrials(); - - transport->EnablePeriodicAlrProbing( - rate_control_settings.UseAlrProbing()); - const double pacing_factor = - rate_control_settings.GetPacingFactor().value_or( - pacing_config_.pacing_factor); - transport->SetPacingFactor(pacing_factor); - configured_pacing_factor_ = pacing_factor; - transport->SetQueueTimeLimit(pacing_config_.max_pacing_delay.Get().ms()); + enable_alr_bw_probing = rate_control_settings.UseAlrProbing(); + queue_time_limit_ms = pacing_config_.max_pacing_delay.Get().ms(); } + + transport->SetQueueTimeLimit(queue_time_limit_ms); } if (config_->periodic_alr_bandwidth_probing) { - transport->EnablePeriodicAlrProbing(true); + enable_alr_bw_probing = config_->periodic_alr_bandwidth_probing; } - RTC_DCHECK_GE(config_->rtp.payload_type, 0); - RTC_DCHECK_LE(config_->rtp.payload_type, 127); - - video_stream_encoder_->SetStartBitrate( - bitrate_allocator_->GetStartBitrate(this)); -} - -VideoSendStreamImpl::~VideoSendStreamImpl() { - RTC_DCHECK_RUN_ON(worker_queue_); - RTC_DCHECK(!rtp_video_sender_->IsActive()) - << "VideoSendStreamImpl::Stop not called"; - RTC_LOG(LS_INFO) << "~VideoSendStreamInternal: " << config_->ToString(); - transport_->DestroyRtpVideoSender(rtp_video_sender_); -} - -void VideoSendStreamImpl::RegisterProcessThread( - ProcessThread* module_process_thread) { - // Called on libjingle's worker thread (not worker_queue_), as part of the - // initialization steps. That's also the correct thread/queue for setting the - // state for |video_stream_encoder_|. - - // Only request rotation at the source when we positively know that the remote - // side doesn't support the rotation extension. This allows us to prepare the - // encoder in the expectation that rotation is supported - which is the common - // case. - bool rotation_applied = absl::c_none_of( - config_->rtp.extensions, [](const RtpExtension& extension) { - return extension.uri == RtpExtension::kVideoRotationUri; - }); + if (enable_alr_bw_probing) { + transport->EnablePeriodicAlrProbing(*enable_alr_bw_probing); + } - video_stream_encoder_->SetSink(this, rotation_applied); + rtp_transport_queue_->PostTask(ToQueuedTask(transport_queue_safety_, [this] { + if (configured_pacing_factor_) + transport_->SetPacingFactor(*configured_pacing_factor_); - rtp_video_sender_->RegisterProcessThread(module_process_thread); + video_stream_encoder_->SetStartBitrate( + bitrate_allocator_->GetStartBitrate(this)); + })); } -void VideoSendStreamImpl::DeRegisterProcessThread() { - rtp_video_sender_->DeRegisterProcessThread(); +VideoSendStreamImpl::~VideoSendStreamImpl() { + RTC_DCHECK_RUN_ON(&thread_checker_); + RTC_LOG(LS_INFO) << "~VideoSendStreamImpl: " << config_->ToString(); } void VideoSendStreamImpl::DeliverRtcp(const uint8_t* packet, size_t length) { // Runs on a network thread. - RTC_DCHECK(!worker_queue_->IsCurrent()); + RTC_DCHECK(!rtp_transport_queue_->IsCurrent()); rtp_video_sender_->DeliverRtcp(packet, length); } void VideoSendStreamImpl::UpdateActiveSimulcastLayers( const std::vector active_layers) { - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); bool previously_active = rtp_video_sender_->IsActive(); rtp_video_sender_->SetActiveModules(active_layers); if (!rtp_video_sender_->IsActive() && previously_active) { @@ -360,17 +320,21 @@ void VideoSendStreamImpl::UpdateActiveSimulcastLayers( } void VideoSendStreamImpl::Start() { - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); RTC_LOG(LS_INFO) << "VideoSendStream::Start"; if (rtp_video_sender_->IsActive()) return; + TRACE_EVENT_INSTANT0("webrtc", "VideoSendStream::Start"); rtp_video_sender_->SetActive(true); StartupVideoSendStream(); } void VideoSendStreamImpl::StartupVideoSendStream() { - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); + + transport_queue_safety_->SetAlive(); + bitrate_allocator_->AddObserver(this, GetAllocationConfig()); // Start monitoring encoder activity. { @@ -379,8 +343,8 @@ void VideoSendStreamImpl::StartupVideoSendStream() { activity_ = false; timed_out_ = false; check_encoder_activity_task_ = RepeatingTaskHandle::DelayedStart( - worker_queue_->Get(), kEncoderTimeOut, [this] { - RTC_DCHECK_RUN_ON(worker_queue_); + rtp_transport_queue_->Get(), kEncoderTimeOut, [this] { + RTC_DCHECK_RUN_ON(rtp_transport_queue_); if (!activity_) { if (!timed_out_) { SignalEncoderTimedOut(); @@ -400,25 +364,29 @@ void VideoSendStreamImpl::StartupVideoSendStream() { } void VideoSendStreamImpl::Stop() { - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); RTC_LOG(LS_INFO) << "VideoSendStreamImpl::Stop"; if (!rtp_video_sender_->IsActive()) return; + + RTC_DCHECK(transport_queue_safety_->alive()); TRACE_EVENT_INSTANT0("webrtc", "VideoSendStream::Stop"); rtp_video_sender_->SetActive(false); StopVideoSendStream(); } +// RTC_RUN_ON(rtp_transport_queue_) void VideoSendStreamImpl::StopVideoSendStream() { bitrate_allocator_->RemoveObserver(this); check_encoder_activity_task_.Stop(); video_stream_encoder_->OnBitrateUpdated(DataRate::Zero(), DataRate::Zero(), DataRate::Zero(), 0, 0, 0); stats_proxy_->OnSetEncoderTargetRate(0); + transport_queue_safety_->SetNotAlive(); } void VideoSendStreamImpl::SignalEncoderTimedOut() { - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); // If the encoder has not produced anything the last kEncoderTimeOut and it // is supposed to, deregister as BitrateAllocatorObserver. This can happen // if a camera stops producing frames. @@ -430,17 +398,14 @@ void VideoSendStreamImpl::SignalEncoderTimedOut() { void VideoSendStreamImpl::OnBitrateAllocationUpdated( const VideoBitrateAllocation& allocation) { - if (!worker_queue_->IsCurrent()) { - auto ptr = weak_ptr_; - worker_queue_->PostTask([=] { - if (!ptr.get()) - return; - ptr->OnBitrateAllocationUpdated(allocation); - }); + if (!rtp_transport_queue_->IsCurrent()) { + rtp_transport_queue_->PostTask(ToQueuedTask(transport_queue_safety_, [=] { + OnBitrateAllocationUpdated(allocation); + })); return; } - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); int64_t now_ms = clock_->TimeInMilliseconds(); if (encoder_target_rate_bps_ != 0) { @@ -485,7 +450,7 @@ void VideoSendStreamImpl::OnVideoLayersAllocationUpdated( } void VideoSendStreamImpl::SignalEncoderActive() { - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); if (rtp_video_sender_->IsActive()) { RTC_LOG(LS_INFO) << "SignalEncoderActive, Encoder is active."; bitrate_allocator_->AddObserver(this, GetAllocationConfig()); @@ -507,21 +472,20 @@ void VideoSendStreamImpl::OnEncoderConfigurationChanged( bool is_svc, VideoEncoderConfig::ContentType content_type, int min_transmit_bitrate_bps) { - if (!worker_queue_->IsCurrent()) { - rtc::WeakPtr send_stream = weak_ptr_; - worker_queue_->PostTask([send_stream, streams, is_svc, content_type, - min_transmit_bitrate_bps]() mutable { - if (send_stream) { - send_stream->OnEncoderConfigurationChanged( - std::move(streams), is_svc, content_type, min_transmit_bitrate_bps); - } - }); + if (!rtp_transport_queue_->IsCurrent()) { + rtp_transport_queue_->PostTask(ToQueuedTask( + transport_queue_safety_, + [this, streams = std::move(streams), is_svc, content_type, + min_transmit_bitrate_bps]() mutable { + OnEncoderConfigurationChanged(std::move(streams), is_svc, + content_type, min_transmit_bitrate_bps); + })); return; } RTC_DCHECK_GE(config_->rtp.ssrcs.size(), streams.size()); TRACE_EVENT0("webrtc", "VideoSendStream::OnEncoderConfigurationChanged"); - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); const VideoCodecType codec_type = PayloadStringToCodecType(config_->rtp.payload_name); @@ -584,14 +548,15 @@ EncodedImageCallback::Result VideoSendStreamImpl::OnEncodedImage( auto enable_padding_task = [this]() { if (disable_padding_) { - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); disable_padding_ = false; // To ensure that padding bitrate is propagated to the bitrate allocator. SignalEncoderActive(); } }; - if (!worker_queue_->IsCurrent()) { - worker_queue_->PostTask(enable_padding_task); + if (!rtp_transport_queue_->IsCurrent()) { + rtp_transport_queue_->PostTask( + ToQueuedTask(transport_queue_safety_, std::move(enable_padding_task))); } else { enable_padding_task(); } @@ -601,18 +566,16 @@ EncodedImageCallback::Result VideoSendStreamImpl::OnEncodedImage( rtp_video_sender_->OnEncodedImage(encoded_image, codec_specific_info); // Check if there's a throttled VideoBitrateAllocation that we should try // sending. - rtc::WeakPtr send_stream = weak_ptr_; - auto update_task = [send_stream]() { - if (send_stream) { - RTC_DCHECK_RUN_ON(send_stream->worker_queue_); - auto& context = send_stream->video_bitrate_allocation_context_; - if (context && context->throttled_allocation) { - send_stream->OnBitrateAllocationUpdated(*context->throttled_allocation); - } + auto update_task = [this]() { + RTC_DCHECK_RUN_ON(rtp_transport_queue_); + auto& context = video_bitrate_allocation_context_; + if (context && context->throttled_allocation) { + OnBitrateAllocationUpdated(*context->throttled_allocation); } }; - if (!worker_queue_->IsCurrent()) { - worker_queue_->PostTask(update_task); + if (!rtp_transport_queue_->IsCurrent()) { + rtp_transport_queue_->PostTask( + ToQueuedTask(transport_queue_safety_, std::move(update_task))); } else { update_task(); } @@ -635,7 +598,7 @@ std::map VideoSendStreamImpl::GetRtpPayloadStates() } uint32_t VideoSendStreamImpl::OnBitrateUpdated(BitrateAllocationUpdate update) { - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); RTC_DCHECK(rtp_video_sender_->IsActive()) << "VideoSendStream::Start has not been called."; diff --git a/video/video_send_stream_impl.h b/video/video_send_stream_impl.h index 41a7859a77..babf1dcfe5 100644 --- a/video/video_send_stream_impl.h +++ b/video/video_send_stream_impl.h @@ -19,8 +19,6 @@ #include #include "absl/types/optional.h" -#include "api/fec_controller.h" -#include "api/rtc_event_log/rtc_event_log.h" #include "api/video/encoded_image.h" #include "api/video/video_bitrate_allocation.h" #include "api/video/video_bitrate_allocator.h" @@ -33,18 +31,14 @@ #include "call/rtp_video_sender_interface.h" #include "modules/include/module_common_types.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" -#include "modules/utility/include/process_thread.h" #include "modules/video_coding/include/video_codec_interface.h" #include "rtc_base/experiments/field_trial_parser.h" -#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/task_queue.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" #include "rtc_base/task_utils/repeating_task.h" #include "rtc_base/thread_annotations.h" -#include "rtc_base/weak_ptr.h" -#include "video/encoder_rtcp_feedback.h" -#include "video/send_delay_stats.h" #include "video/send_statistics_proxy.h" -#include "video/video_send_stream.h" namespace webrtc { namespace internal { @@ -60,42 +54,28 @@ struct PacingConfig { }; // VideoSendStreamImpl implements internal::VideoSendStream. -// It is created and destroyed on |worker_queue|. The intent is to decrease the -// need for locking and to ensure methods are called in sequence. -// Public methods except |DeliverRtcp| must be called on |worker_queue|. +// It is created and destroyed on `rtp_transport_queue`. The intent is to +// decrease the need for locking and to ensure methods are called in sequence. +// Public methods except `DeliverRtcp` must be called on `rtp_transport_queue`. // DeliverRtcp is called on the libjingle worker thread or a network thread. // An encoder may deliver frames through the EncodedImageCallback on an // arbitrary thread. class VideoSendStreamImpl : public webrtc::BitrateAllocatorObserver, public VideoStreamEncoderInterface::EncoderSink { public: - VideoSendStreamImpl( - Clock* clock, - SendStatisticsProxy* stats_proxy, - rtc::TaskQueue* worker_queue, - RtcpRttStats* call_stats, - RtpTransportControllerSendInterface* transport, - BitrateAllocatorInterface* bitrate_allocator, - SendDelayStats* send_delay_stats, - VideoStreamEncoderInterface* video_stream_encoder, - RtcEventLog* event_log, - const VideoSendStream::Config* config, - int initial_encoder_max_bitrate, - double initial_encoder_bitrate_priority, - std::map suspended_ssrcs, - std::map suspended_payload_states, - VideoEncoderConfig::ContentType content_type, - std::unique_ptr fec_controller); + VideoSendStreamImpl(Clock* clock, + SendStatisticsProxy* stats_proxy, + rtc::TaskQueue* rtp_transport_queue, + RtpTransportControllerSendInterface* transport, + BitrateAllocatorInterface* bitrate_allocator, + VideoStreamEncoderInterface* video_stream_encoder, + const VideoSendStream::Config* config, + int initial_encoder_max_bitrate, + double initial_encoder_bitrate_priority, + VideoEncoderConfig::ContentType content_type, + RtpVideoSenderInterface* rtp_video_sender); ~VideoSendStreamImpl() override; - // RegisterProcessThread register |module_process_thread| with those objects - // that use it. Registration has to happen on the thread were - // |module_process_thread| was created (libjingle's worker thread). - // TODO(perkj): Replace the use of |module_process_thread| with a TaskQueue, - // maybe |worker_queue|. - void RegisterProcessThread(ProcessThread* module_process_thread); - void DeRegisterProcessThread(); - void DeliverRtcp(const uint8_t* packet, size_t length); void UpdateActiveSimulcastLayers(const std::vector active_layers); void Start(); @@ -106,7 +86,9 @@ class VideoSendStreamImpl : public webrtc::BitrateAllocatorObserver, std::map GetRtpPayloadStates() const; - absl::optional configured_pacing_factor_; + const absl::optional& configured_pacing_factor() const { + return configured_pacing_factor_; + } private: // Implements BitrateAllocatorObserver. @@ -138,14 +120,16 @@ class VideoSendStreamImpl : public webrtc::BitrateAllocatorObserver, void StartupVideoSendStream(); // Removes the bitrate observer, stops monitoring and notifies the video // encoder of the bitrate update. - void StopVideoSendStream() RTC_RUN_ON(worker_queue_); + void StopVideoSendStream() RTC_RUN_ON(rtp_transport_queue_); void ConfigureProtection(); void ConfigureSsrcs(); void SignalEncoderTimedOut(); void SignalEncoderActive(); MediaStreamAllocationConfig GetAllocationConfig() const - RTC_RUN_ON(worker_queue_); + RTC_RUN_ON(rtp_transport_queue_); + + RTC_NO_UNIQUE_ADDRESS SequenceChecker thread_checker_; Clock* const clock_; const bool has_alr_probing_; const PacingConfig pacing_config_; @@ -153,40 +137,31 @@ class VideoSendStreamImpl : public webrtc::BitrateAllocatorObserver, SendStatisticsProxy* const stats_proxy_; const VideoSendStream::Config* const config_; - rtc::TaskQueue* const worker_queue_; + rtc::TaskQueue* const rtp_transport_queue_; RepeatingTaskHandle check_encoder_activity_task_ - RTC_GUARDED_BY(worker_queue_); + RTC_GUARDED_BY(rtp_transport_queue_); std::atomic_bool activity_; - bool timed_out_ RTC_GUARDED_BY(worker_queue_); + bool timed_out_ RTC_GUARDED_BY(rtp_transport_queue_); RtpTransportControllerSendInterface* const transport_; BitrateAllocatorInterface* const bitrate_allocator_; - Mutex ivf_writers_mutex_; - bool disable_padding_; int max_padding_bitrate_; int encoder_min_bitrate_bps_; uint32_t encoder_max_bitrate_bps_; uint32_t encoder_target_rate_bps_; double encoder_bitrate_priority_; - bool has_packet_feedback_; VideoStreamEncoderInterface* const video_stream_encoder_; - EncoderRtcpFeedback encoder_feedback_; RtcpBandwidthObserver* const bandwidth_observer_; RtpVideoSenderInterface* const rtp_video_sender_; - // |weak_ptr_| to our self. This is used since we can not call - // |weak_ptr_factory_.GetWeakPtr| from multiple sequences but it is ok to copy - // an existing WeakPtr. - rtc::WeakPtr weak_ptr_; - // |weak_ptr_factory_| must be declared last to make sure all WeakPtr's are - // invalidated before any other members are destroyed. - rtc::WeakPtrFactory weak_ptr_factory_; + rtc::scoped_refptr transport_queue_safety_ = + PendingTaskSafetyFlag::CreateDetached(); // Context for the most recent and last sent video bitrate allocation. Used to // throttle sending of similar bitrate allocations. @@ -196,7 +171,8 @@ class VideoSendStreamImpl : public webrtc::BitrateAllocatorObserver, int64_t last_send_time_ms; }; absl::optional video_bitrate_allocation_context_ - RTC_GUARDED_BY(worker_queue_); + RTC_GUARDED_BY(rtp_transport_queue_); + const absl::optional configured_pacing_factor_; }; } // namespace internal } // namespace webrtc diff --git a/video/video_send_stream_impl_unittest.cc b/video/video_send_stream_impl_unittest.cc index ee303b4eac..30a4aacd92 100644 --- a/video/video_send_stream_impl_unittest.cc +++ b/video/video_send_stream_impl_unittest.cc @@ -31,6 +31,7 @@ #include "test/mock_transport.h" #include "video/call_stats.h" #include "video/test/mock_video_stream_encoder.h" +#include "video/video_send_stream.h" namespace webrtc { @@ -61,8 +62,6 @@ std::string GetAlrProbingExperimentString() { } class MockRtpVideoSender : public RtpVideoSenderInterface { public: - MOCK_METHOD(void, RegisterProcessThread, (ProcessThread*), (override)); - MOCK_METHOD(void, DeRegisterProcessThread, (), (override)); MOCK_METHOD(void, SetActive, (bool), (override)); MOCK_METHOD(void, SetActiveModules, (const std::vector), (override)); MOCK_METHOD(bool, IsActive, (), (override)); @@ -145,17 +144,24 @@ class VideoSendStreamImplTest : public ::testing::Test { int initial_encoder_max_bitrate, double initial_encoder_bitrate_priority, VideoEncoderConfig::ContentType content_type) { + RTC_DCHECK(!test_queue_.IsCurrent()); + EXPECT_CALL(bitrate_allocator_, GetStartBitrate(_)) .WillOnce(Return(123000)); + std::map suspended_ssrcs; std::map suspended_payload_states; - return std::make_unique( - &clock_, &stats_proxy_, &test_queue_, &call_stats_, - &transport_controller_, &bitrate_allocator_, &send_delay_stats_, - &video_stream_encoder_, &event_log_, &config_, + auto ret = std::make_unique( + &clock_, &stats_proxy_, &test_queue_, &transport_controller_, + &bitrate_allocator_, &video_stream_encoder_, &config_, initial_encoder_max_bitrate, initial_encoder_bitrate_priority, - suspended_ssrcs, suspended_payload_states, content_type, - std::make_unique(&clock_)); + content_type, &rtp_video_sender_); + + // The call to GetStartBitrate() executes asynchronously on the tq. + test_queue_.WaitForPreviouslyPostedTasks(); + testing::Mock::VerifyAndClearExpectations(&bitrate_allocator_); + + return ret; } protected: @@ -179,22 +185,22 @@ class VideoSendStreamImplTest : public ::testing::Test { }; TEST_F(VideoSendStreamImplTest, RegistersAsBitrateObserverOnStart) { + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kRealtimeVideo); + const bool kSuspend = false; + config_.suspend_below_min_bitrate = kSuspend; + EXPECT_CALL(bitrate_allocator_, AddObserver(vss_impl.get(), _)) + .WillOnce(Invoke( + [&](BitrateAllocatorObserver*, MediaStreamAllocationConfig config) { + EXPECT_EQ(config.min_bitrate_bps, 0u); + EXPECT_EQ(config.max_bitrate_bps, kDefaultInitialBitrateBps); + EXPECT_EQ(config.pad_up_bitrate_bps, 0u); + EXPECT_EQ(config.enforce_min_bitrate, !kSuspend); + EXPECT_EQ(config.bitrate_priority, kDefaultBitratePriority); + })); test_queue_.SendTask( - [this] { - const bool kSuspend = false; - config_.suspend_below_min_bitrate = kSuspend; - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kRealtimeVideo); - EXPECT_CALL(bitrate_allocator_, AddObserver(vss_impl.get(), _)) - .WillOnce(Invoke([&](BitrateAllocatorObserver*, - MediaStreamAllocationConfig config) { - EXPECT_EQ(config.min_bitrate_bps, 0u); - EXPECT_EQ(config.max_bitrate_bps, kDefaultInitialBitrateBps); - EXPECT_EQ(config.pad_up_bitrate_bps, 0u); - EXPECT_EQ(config.enforce_min_bitrate, !kSuspend); - EXPECT_EQ(config.bitrate_priority, kDefaultBitratePriority); - })); + [&] { vss_impl->Start(); EXPECT_CALL(bitrate_allocator_, RemoveObserver(vss_impl.get())) .Times(1); @@ -204,15 +210,16 @@ TEST_F(VideoSendStreamImplTest, RegistersAsBitrateObserverOnStart) { } TEST_F(VideoSendStreamImplTest, UpdatesObserverOnConfigurationChange) { + const bool kSuspend = false; + config_.suspend_below_min_bitrate = kSuspend; + config_.rtp.extensions.emplace_back(RtpExtension::kTransportSequenceNumberUri, + 1); + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kRealtimeVideo); + test_queue_.SendTask( - [this] { - const bool kSuspend = false; - config_.suspend_below_min_bitrate = kSuspend; - config_.rtp.extensions.emplace_back( - RtpExtension::kTransportSequenceNumberUri, 1); - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kRealtimeVideo); + [&] { vss_impl->Start(); // QVGA + VGA configuration matching defaults in @@ -269,16 +276,16 @@ TEST_F(VideoSendStreamImplTest, UpdatesObserverOnConfigurationChange) { } TEST_F(VideoSendStreamImplTest, UpdatesObserverOnConfigurationChangeWithAlr) { + const bool kSuspend = false; + config_.suspend_below_min_bitrate = kSuspend; + config_.rtp.extensions.emplace_back(RtpExtension::kTransportSequenceNumberUri, + 1); + config_.periodic_alr_bandwidth_probing = true; + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kScreen); test_queue_.SendTask( - [this] { - const bool kSuspend = false; - config_.suspend_below_min_bitrate = kSuspend; - config_.rtp.extensions.emplace_back( - RtpExtension::kTransportSequenceNumberUri, 1); - config_.periodic_alr_bandwidth_probing = true; - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kScreen); + [&] { vss_impl->Start(); // Simulcast screenshare. @@ -341,11 +348,12 @@ TEST_F(VideoSendStreamImplTest, test::ScopedFieldTrials hysteresis_experiment( "WebRTC-VideoRateControl/video_hysteresis:1.25/"); + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kRealtimeVideo); + test_queue_.SendTask( - [this] { - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kRealtimeVideo); + [&] { vss_impl->Start(); // 2-layer video simulcast. @@ -401,17 +409,17 @@ TEST_F(VideoSendStreamImplTest, TEST_F(VideoSendStreamImplTest, SetsScreensharePacingFactorWithFeedback) { test::ScopedFieldTrials alr_experiment(GetAlrProbingExperimentString()); + constexpr int kId = 1; + config_.rtp.extensions.emplace_back(RtpExtension::kTransportSequenceNumberUri, + kId); + EXPECT_CALL(transport_controller_, + SetPacingFactor(kAlrProbingExperimentPaceMultiplier)) + .Times(1); + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kScreen); test_queue_.SendTask( - [this] { - constexpr int kId = 1; - config_.rtp.extensions.emplace_back( - RtpExtension::kTransportSequenceNumberUri, kId); - EXPECT_CALL(transport_controller_, - SetPacingFactor(kAlrProbingExperimentPaceMultiplier)) - .Times(1); - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kScreen); + [&] { vss_impl->Start(); vss_impl->Stop(); }, @@ -420,12 +428,12 @@ TEST_F(VideoSendStreamImplTest, SetsScreensharePacingFactorWithFeedback) { TEST_F(VideoSendStreamImplTest, DoesNotSetPacingFactorWithoutFeedback) { test::ScopedFieldTrials alr_experiment(GetAlrProbingExperimentString()); + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kScreen); test_queue_.SendTask( - [this] { + [&] { EXPECT_CALL(transport_controller_, SetPacingFactor(_)).Times(0); - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kScreen); vss_impl->Start(); vss_impl->Stop(); }, @@ -433,12 +441,12 @@ TEST_F(VideoSendStreamImplTest, DoesNotSetPacingFactorWithoutFeedback) { } TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationWhenEnabled) { + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kScreen); test_queue_.SendTask( - [this] { + [&] { EXPECT_CALL(transport_controller_, SetPacingFactor(_)).Times(0); - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kScreen); VideoStreamEncoderInterface::EncoderSink* const sink = static_cast( vss_impl.get()); @@ -483,11 +491,11 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationWhenEnabled) { } TEST_F(VideoSendStreamImplTest, ThrottlesVideoBitrateAllocationWhenTooSimilar) { + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kScreen); test_queue_.SendTask( - [this] { - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kScreen); + [&] { vss_impl->Start(); // Unpause encoder, to allows allocations to be passed through. const uint32_t kBitrateBps = 100000; @@ -529,8 +537,8 @@ TEST_F(VideoSendStreamImplTest, ThrottlesVideoBitrateAllocationWhenTooSimilar) { .Times(1); sink->OnBitrateAllocationUpdated(updated_alloc); - // This is now a decrease compared to last forward allocation, forward - // immediately. + // This is now a decrease compared to last forward allocation, + // forward immediately. updated_alloc.SetBitrate(0, 0, base_layer_min_update_bitrate_bps - 1); EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(updated_alloc)) @@ -543,11 +551,11 @@ TEST_F(VideoSendStreamImplTest, ThrottlesVideoBitrateAllocationWhenTooSimilar) { } TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationOnLayerChange) { + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kScreen); test_queue_.SendTask( - [this] { - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kScreen); + [&] { vss_impl->Start(); // Unpause encoder, to allows allocations to be passed through. const uint32_t kBitrateBps = 100000; @@ -572,8 +580,8 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationOnLayerChange) { .Times(1); sink->OnBitrateAllocationUpdated(alloc); - // Move some bitrate from one layer to a new one, but keep sum the same. - // Since layout has changed, immediately trigger forward. + // Move some bitrate from one layer to a new one, but keep sum the + // same. Since layout has changed, immediately trigger forward. VideoBitrateAllocation updated_alloc = alloc; updated_alloc.SetBitrate(2, 0, 10000); updated_alloc.SetBitrate(1, 1, alloc.GetBitrate(1, 1) - 10000); @@ -589,11 +597,11 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationOnLayerChange) { } TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationAfterTimeout) { + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kScreen); test_queue_.SendTask( - [this] { - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kScreen); + [&] { vss_impl->Start(); const uint32_t kBitrateBps = 100000; // Unpause encoder, to allows allocations to be passed through. @@ -639,7 +647,8 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationAfterTimeout) { clock_.AdvanceTimeMicroseconds(kMaxVbaThrottleTimeMs * 1000); { - // Sending similar allocation again after timeout, should forward. + // Sending similar allocation again after timeout, should + // forward. EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)) .Times(1); sink->OnBitrateAllocationUpdated(alloc); @@ -661,8 +670,8 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationAfterTimeout) { } { - // Advance time and send encoded image, this should wake up and send - // cached bitrate allocation. + // Advance time and send encoded image, this should wake up and + // send cached bitrate allocation. clock_.AdvanceTimeMicroseconds(kMaxVbaThrottleTimeMs * 1000); EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)) .Times(1); @@ -671,8 +680,8 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationAfterTimeout) { } { - // Advance time and send encoded image, there should be no cached - // allocation to send. + // Advance time and send encoded image, there should be no + // cached allocation to send. clock_.AdvanceTimeMicroseconds(kMaxVbaThrottleTimeMs * 1000); EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)) .Times(0); @@ -686,15 +695,15 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationAfterTimeout) { } TEST_F(VideoSendStreamImplTest, CallsVideoStreamEncoderOnBitrateUpdate) { + const bool kSuspend = false; + config_.suspend_below_min_bitrate = kSuspend; + config_.rtp.extensions.emplace_back(RtpExtension::kTransportSequenceNumberUri, + 1); + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kRealtimeVideo); test_queue_.SendTask( - [this] { - const bool kSuspend = false; - config_.suspend_below_min_bitrate = kSuspend; - config_.rtp.extensions.emplace_back( - RtpExtension::kTransportSequenceNumberUri, 1); - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kRealtimeVideo); + [&] { vss_impl->Start(); VideoStream qvga_stream; @@ -733,8 +742,8 @@ TEST_F(VideoSendStreamImplTest, CallsVideoStreamEncoderOnBitrateUpdate) { static_cast(vss_impl.get()) ->OnBitrateUpdated(update); - // Test allocation where the link allocation is larger than the target, - // meaning we have some headroom on the link. + // Test allocation where the link allocation is larger than the + // target, meaning we have some headroom on the link. const DataRate qvga_max_bitrate = DataRate::BitsPerSec(qvga_stream.max_bitrate_bps); const DataRate headroom = DataRate::BitsPerSec(50000); @@ -750,8 +759,8 @@ TEST_F(VideoSendStreamImplTest, CallsVideoStreamEncoderOnBitrateUpdate) { static_cast(vss_impl.get()) ->OnBitrateUpdated(update); - // Add protection bitrate to the mix, this should be subtracted from the - // headroom. + // Add protection bitrate to the mix, this should be subtracted + // from the headroom. const uint32_t protection_bitrate_bps = 10000; EXPECT_CALL(rtp_video_sender_, GetProtectionBitrateBps()) .WillOnce(Return(protection_bitrate_bps)); @@ -791,14 +800,11 @@ TEST_F(VideoSendStreamImplTest, CallsVideoStreamEncoderOnBitrateUpdate) { TEST_F(VideoSendStreamImplTest, DisablesPaddingOnPausedEncoder) { int padding_bitrate = 0; - std::unique_ptr vss_impl; - + std::unique_ptr vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kRealtimeVideo); test_queue_.SendTask( [&] { - vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kRealtimeVideo); - // Capture padding bitrate for testing. EXPECT_CALL(bitrate_allocator_, AddObserver(vss_impl.get(), _)) .WillRepeatedly(Invoke([&](BitrateAllocatorObserver*, @@ -871,7 +877,6 @@ TEST_F(VideoSendStreamImplTest, DisablesPaddingOnPausedEncoder) { EXPECT_EQ(0, padding_bitrate); testing::Mock::VerifyAndClearExpectations(&bitrate_allocator_); vss_impl->Stop(); - vss_impl.reset(); done.Set(); }, 5000); @@ -881,12 +886,11 @@ TEST_F(VideoSendStreamImplTest, DisablesPaddingOnPausedEncoder) { } TEST_F(VideoSendStreamImplTest, KeepAliveOnDroppedFrame) { - std::unique_ptr vss_impl; + std::unique_ptr vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kRealtimeVideo); test_queue_.SendTask( [&] { - vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kRealtimeVideo); vss_impl->Start(); const uint32_t kBitrateBps = 100000; EXPECT_CALL(rtp_video_sender_, GetPayloadBitrateBps()) @@ -909,7 +913,6 @@ TEST_F(VideoSendStreamImplTest, KeepAliveOnDroppedFrame) { [&] { testing::Mock::VerifyAndClearExpectations(&bitrate_allocator_); vss_impl->Stop(); - vss_impl.reset(); done.Set(); }, 2000); @@ -933,18 +936,18 @@ TEST_F(VideoSendStreamImplTest, ConfiguresBitratesForSvc) { } for (const TestConfig& test_config : test_variants) { + const bool kSuspend = false; + config_.suspend_below_min_bitrate = kSuspend; + config_.rtp.extensions.emplace_back( + RtpExtension::kTransportSequenceNumberUri, 1); + config_.periodic_alr_bandwidth_probing = test_config.alr; + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + test_config.screenshare + ? VideoEncoderConfig::ContentType::kScreen + : VideoEncoderConfig::ContentType::kRealtimeVideo); test_queue_.SendTask( - [this, test_config] { - const bool kSuspend = false; - config_.suspend_below_min_bitrate = kSuspend; - config_.rtp.extensions.emplace_back( - RtpExtension::kTransportSequenceNumberUri, 1); - config_.periodic_alr_bandwidth_probing = test_config.alr; - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - test_config.screenshare - ? VideoEncoderConfig::ContentType::kScreen - : VideoEncoderConfig::ContentType::kRealtimeVideo); + [&] { vss_impl->Start(); // Svc diff --git a/video/video_send_stream_tests.cc b/video/video_send_stream_tests.cc index 78265cc7dc..42963cb8ee 100644 --- a/video/video_send_stream_tests.cc +++ b/video/video_send_stream_tests.cc @@ -30,6 +30,7 @@ #include "modules/rtp_rtcp/source/rtp_header_extensions.h" #include "modules/rtp_rtcp/source/rtp_packet.h" #include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h" #include "modules/video_coding/codecs/vp8/include/vp8.h" #include "modules/video_coding/codecs/vp9/include/vp9.h" @@ -57,7 +58,6 @@ #include "test/gtest.h" #include "test/null_transport.h" #include "test/rtcp_packet_parser.h" -#include "test/rtp_header_parser.h" #include "test/testsupport/perf_test.h" #include "test/video_encoder_proxy_factory.h" #include "video/send_statistics_proxy.h" @@ -90,6 +90,9 @@ enum : int { // The first valid value is 1. kVideoTimingExtensionId, }; +// Readability convenience enum for `WaitBitrateChanged()`. +enum class WaitUntil : bool { kZero = false, kNonZero = true }; + constexpr int64_t kRtcpIntervalMs = 1000; enum VideoFormat { @@ -948,10 +951,10 @@ void VideoSendStreamTest::TestNackRetransmission( non_padding_sequence_numbers_.end() - kNackedPacketsAtOnceCount, non_padding_sequence_numbers_.end()); - RtpRtcpInterface::Configuration config; + RTCPSender::Configuration config; config.clock = Clock::GetRealTimeClock(); config.outgoing_transport = transport_adapter_.get(); - config.rtcp_report_interval_ms = kRtcpIntervalMs; + config.rtcp_report_interval = TimeDelta::Millis(kRtcpIntervalMs); config.local_media_ssrc = kReceiverLocalVideoSsrc; RTCPSender rtcp_sender(config); @@ -1164,11 +1167,11 @@ void VideoSendStreamTest::TestPacketFragmentationSize(VideoFormat format, kVideoSendSsrcs[0], rtp_packet.SequenceNumber(), packets_lost_, // Cumulative lost. loss_ratio); // Loss percent. - RtpRtcpInterface::Configuration config; + RTCPSender::Configuration config; config.clock = Clock::GetRealTimeClock(); config.receive_statistics = &lossy_receive_stats; config.outgoing_transport = transport_adapter_.get(); - config.rtcp_report_interval_ms = kRtcpIntervalMs; + config.rtcp_report_interval = TimeDelta::Millis(kRtcpIntervalMs); config.local_media_ssrc = kVideoSendSsrcs[0]; RTCPSender rtcp_sender(config); @@ -1467,7 +1470,7 @@ TEST_F(VideoSendStreamTest, MinTransmitBitrateRespectsRemb) { private: Action OnSendRtp(const uint8_t* packet, size_t length) override { - if (RtpHeaderParser::IsRtcp(packet, length)) + if (IsRtcpPacket(rtc::MakeArrayView(packet, length))) return DROP_PACKET; RtpPacket rtp_packet; @@ -1486,7 +1489,6 @@ TEST_F(VideoSendStreamTest, MinTransmitBitrateRespectsRemb) { "bps", false); if (total_bitrate_bps > kHighBitrateBps) { rtp_rtcp_->SetRemb(kRembBitrateBps, {rtp_packet.Ssrc()}); - rtp_rtcp_->Process(); bitrate_capped_ = true; } else if (bitrate_capped_ && total_bitrate_bps < kRembRespectedBitrateBps) { @@ -2154,7 +2156,7 @@ class StartStopBitrateObserver : public test::FakeEncoder { return encoder_init_.Wait(VideoSendStreamTest::kDefaultTimeoutMs); } - bool WaitBitrateChanged(bool non_zero) { + bool WaitBitrateChanged(WaitUntil until) { do { absl::optional bitrate_kbps; { @@ -2164,8 +2166,8 @@ class StartStopBitrateObserver : public test::FakeEncoder { if (!bitrate_kbps) continue; - if ((non_zero && *bitrate_kbps > 0) || - (!non_zero && *bitrate_kbps == 0)) { + if ((until == WaitUntil::kNonZero && *bitrate_kbps > 0) || + (until == WaitUntil::kZero && *bitrate_kbps == 0)) { return true; } } while (bitrate_changed_.Wait(VideoSendStreamTest::kDefaultTimeoutMs)); @@ -2212,15 +2214,15 @@ TEST_F(VideoSendStreamTest, VideoSendStreamStopSetEncoderRateToZero) { SendTask(RTC_FROM_HERE, task_queue(), [this]() { GetVideoSendStream()->Start(); }); - EXPECT_TRUE(encoder.WaitBitrateChanged(true)); + EXPECT_TRUE(encoder.WaitBitrateChanged(WaitUntil::kNonZero)); SendTask(RTC_FROM_HERE, task_queue(), [this]() { GetVideoSendStream()->Stop(); }); - EXPECT_TRUE(encoder.WaitBitrateChanged(false)); + EXPECT_TRUE(encoder.WaitBitrateChanged(WaitUntil::kZero)); SendTask(RTC_FROM_HERE, task_queue(), [this]() { GetVideoSendStream()->Start(); }); - EXPECT_TRUE(encoder.WaitBitrateChanged(true)); + EXPECT_TRUE(encoder.WaitBitrateChanged(WaitUntil::kNonZero)); SendTask(RTC_FROM_HERE, task_queue(), [this]() { DestroyStreams(); @@ -2252,6 +2254,8 @@ TEST_F(VideoSendStreamTest, VideoSendStreamUpdateActiveSimulcastLayers) { CreateVideoStreams(); + EXPECT_FALSE(GetVideoSendStream()->started()); + // Inject a frame, to force encoder creation. GetVideoSendStream()->Start(); GetVideoSendStream()->SetSource(&forwarder, @@ -2265,8 +2269,9 @@ TEST_F(VideoSendStreamTest, VideoSendStreamUpdateActiveSimulcastLayers) { // which in turn updates the VideoEncoder's bitrate. SendTask(RTC_FROM_HERE, task_queue(), [this]() { GetVideoSendStream()->UpdateActiveSimulcastLayers({true, true}); + EXPECT_TRUE(GetVideoSendStream()->started()); }); - EXPECT_TRUE(encoder.WaitBitrateChanged(true)); + EXPECT_TRUE(encoder.WaitBitrateChanged(WaitUntil::kNonZero)); GetVideoEncoderConfig()->simulcast_layers[0].active = true; GetVideoEncoderConfig()->simulcast_layers[1].active = false; @@ -2274,15 +2279,40 @@ TEST_F(VideoSendStreamTest, VideoSendStreamUpdateActiveSimulcastLayers) { GetVideoSendStream()->ReconfigureVideoEncoder( GetVideoEncoderConfig()->Copy()); }); - EXPECT_TRUE(encoder.WaitBitrateChanged(true)); + EXPECT_TRUE(encoder.WaitBitrateChanged(WaitUntil::kNonZero)); // Turning off both simulcast layers should trigger a bitrate change of 0. GetVideoEncoderConfig()->simulcast_layers[0].active = false; GetVideoEncoderConfig()->simulcast_layers[1].active = false; SendTask(RTC_FROM_HERE, task_queue(), [this]() { GetVideoSendStream()->UpdateActiveSimulcastLayers({false, false}); + EXPECT_FALSE(GetVideoSendStream()->started()); + }); + EXPECT_TRUE(encoder.WaitBitrateChanged(WaitUntil::kZero)); + + // Re-activating a layer should resume sending and trigger a bitrate change. + GetVideoEncoderConfig()->simulcast_layers[0].active = true; + SendTask(RTC_FROM_HERE, task_queue(), [this]() { + GetVideoSendStream()->UpdateActiveSimulcastLayers({true, false}); + EXPECT_TRUE(GetVideoSendStream()->started()); + }); + EXPECT_TRUE(encoder.WaitBitrateChanged(WaitUntil::kNonZero)); + + // Stop the stream and make sure the bit rate goes to zero again. + SendTask(RTC_FROM_HERE, task_queue(), [this]() { + GetVideoSendStream()->Stop(); + EXPECT_FALSE(GetVideoSendStream()->started()); + }); + EXPECT_TRUE(encoder.WaitBitrateChanged(WaitUntil::kZero)); + + // One last test to verify that after `Stop()` we can still implicitly start + // the stream if needed. This is what will happen when a send stream gets + // re-used. See crbug.com/1241213. + SendTask(RTC_FROM_HERE, task_queue(), [this]() { + GetVideoSendStream()->UpdateActiveSimulcastLayers({true, true}); + EXPECT_TRUE(GetVideoSendStream()->started()); }); - EXPECT_TRUE(encoder.WaitBitrateChanged(false)); + EXPECT_TRUE(encoder.WaitBitrateChanged(WaitUntil::kNonZero)); SendTask(RTC_FROM_HERE, task_queue(), [this]() { DestroyStreams(); diff --git a/video/video_stream_encoder.cc b/video/video_stream_encoder.cc index 191918a591..107110987b 100644 --- a/video/video_stream_encoder.cc +++ b/video/video_stream_encoder.cc @@ -660,6 +660,7 @@ VideoStreamEncoder::VideoStreamEncoder( encoder_queue_(task_queue_factory->CreateTaskQueue( "EncoderQueue", TaskQueueFactory::Priority::NORMAL)) { + TRACE_EVENT0("webrtc", "VideoStreamEncoder::VideoStreamEncoder"); RTC_DCHECK(main_queue_); RTC_DCHECK(encoder_stats_observer); RTC_DCHECK_GE(number_of_cores, 1); @@ -742,11 +743,16 @@ void VideoStreamEncoder::SetFecControllerOverride( void VideoStreamEncoder::AddAdaptationResource( rtc::scoped_refptr resource) { RTC_DCHECK_RUN_ON(main_queue_); + TRACE_EVENT0("webrtc", "VideoStreamEncoder::AddAdaptationResource"); // Map any externally added resources as kCpu for the sake of stats reporting. // TODO(hbos): Make the manager map any unknown resources to kCpu and get rid // of this MapResourceToReason() call. + TRACE_EVENT_ASYNC_BEGIN0( + "webrtc", "VideoStreamEncoder::AddAdaptationResource(latency)", this); rtc::Event map_resource_event; encoder_queue_.PostTask([this, resource, &map_resource_event] { + TRACE_EVENT_ASYNC_END0( + "webrtc", "VideoStreamEncoder::AddAdaptationResource(latency)", this); RTC_DCHECK_RUN_ON(&encoder_queue_); additional_resources_.push_back(resource); stream_resource_manager_.AddResource(resource, VideoAdaptationReason::kCpu); @@ -1610,6 +1616,12 @@ void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame, if (encoder_failed_) return; + // It's possible that EncodeVideoFrame can be called after we've completed + // a Stop() operation. Check if the encoder_ is set before continuing. + // See: bugs.webrtc.org/12857 + if (!encoder_) + return; + TraceFrameDropEnd(); // Encoder metadata needs to be updated before encode complete callback. @@ -1768,6 +1780,9 @@ void VideoStreamEncoder::SendKeyFrame() { TRACE_EVENT0("webrtc", "OnKeyFrameRequest"); RTC_DCHECK(!next_frame_types_.empty()); + if (!encoder_) + return; // Shutting down. + // TODO(webrtc:10615): Map keyframe request to spatial layer. std::fill(next_frame_types_.begin(), next_frame_types_.end(), VideoFrameType::kVideoFrameKey); diff --git a/video/video_stream_encoder_unittest.cc b/video/video_stream_encoder_unittest.cc index d63e1bc902..cbfd93e9e2 100644 --- a/video/video_stream_encoder_unittest.cc +++ b/video/video_stream_encoder_unittest.cc @@ -42,6 +42,7 @@ #include "modules/video_coding/codecs/vp8/include/vp8.h" #include "modules/video_coding/codecs/vp9/include/vp9.h" #include "modules/video_coding/codecs/vp9/include/vp9_globals.h" +#include "modules/video_coding/codecs/vp9/svc_config.h" #include "modules/video_coding/utility/quality_scaler.h" #include "modules/video_coding/utility/simulcast_rate_allocator.h" #include "rtc_base/event.h" @@ -545,6 +546,15 @@ class AdaptingFrameForwarder : public test::FrameForwarder { } } + void OnOutputFormatRequest(int width, int height) { + absl::optional> target_aspect_ratio = + std::make_pair(width, height); + absl::optional max_pixel_count = width * height; + absl::optional max_fps; + adapter_.OnOutputFormatRequest(target_aspect_ratio, max_pixel_count, + max_fps); + } + void AddOrUpdateSink(rtc::VideoSinkInterface* sink, const rtc::VideoSinkWants& wants) override { MutexLock lock(&mutex_); @@ -746,9 +756,10 @@ class VideoStreamEncoderTest : public ::testing::Test { } VideoFrame CreateFrame(int64_t ntp_time_ms, int width, int height) const { + auto buffer = rtc::make_ref_counted(nullptr, width, height); + I420Buffer::SetBlack(buffer.get()); return VideoFrame::Builder() - .set_video_frame_buffer( - rtc::make_ref_counted(nullptr, width, height)) + .set_video_frame_buffer(std::move(buffer)) .set_ntp_time_ms(ntp_time_ms) .set_timestamp_ms(ntp_time_ms) .set_rotation(kVideoRotation_0) @@ -1305,13 +1316,15 @@ class VideoStreamEncoderTest : public ::testing::Test { uint32_t timestamp = encoded_image.Timestamp(); if (last_timestamp_ != timestamp) { num_received_layers_ = 1; + last_width_ = encoded_image._encodedWidth; + last_height_ = encoded_image._encodedHeight; } else { ++num_received_layers_; + last_width_ = std::max(encoded_image._encodedWidth, last_width_); + last_height_ = std::max(encoded_image._encodedHeight, last_height_); } last_timestamp_ = timestamp; last_capture_time_ms_ = encoded_image.capture_time_ms_; - last_width_ = encoded_image._encodedWidth; - last_height_ = encoded_image._encodedHeight; last_rotation_ = encoded_image.rotation_; if (num_received_layers_ == num_expected_layers_) { encoded_frame_event_.Set(); @@ -3339,6 +3352,257 @@ TEST_F(VideoStreamEncoderTest, SkipsSameOrLargerAdaptDownRequest_BalancedMode) { video_stream_encoder_->Stop(); } +TEST_F(VideoStreamEncoderTest, + FpsCountReturnsToZeroForFewerAdaptationsUpThanDown) { + const int kWidth = 640; + const int kHeight = 360; + const int64_t kFrameIntervalMs = 150; + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + + // Enable BALANCED preference, no initial limitation. + AdaptingFrameForwarder source(&time_controller_); + source.set_adaptation_enabled(true); + video_stream_encoder_->SetSource(&source, + webrtc::DegradationPreference::BALANCED); + + int64_t timestamp_ms = kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + sink_.WaitForEncodedFrame(kWidth, kHeight); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt down, expect reduced fps (640x360@15fps). + video_stream_encoder_->TriggerQualityLow(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + sink_.WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), + FpsMatchesResolutionMax(Lt(kDefaultFramerate))); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Source requests 270p, expect reduced resolution (480x270@15fps). + source.OnOutputFormatRequest(480, 270); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(480, 270); + EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt down, expect reduced fps (480x270@10fps). + video_stream_encoder_->TriggerQualityLow(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + sink_.WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants())); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Source requests QVGA, expect reduced resolution (320x180@10fps). + source.OnOutputFormatRequest(320, 180); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(320, 180); + EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt down, expect reduced fps (320x180@7fps). + video_stream_encoder_->TriggerQualityLow(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + sink_.WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants())); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Source requests VGA, expect increased resolution (640x360@7fps). + source.OnOutputFormatRequest(640, 360); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt up, expect increased fps (640x360@(max-2)fps). + video_stream_encoder_->TriggerQualityHigh(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants())); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt up, expect increased fps (640x360@(max-1)fps). + video_stream_encoder_->TriggerQualityHigh(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants())); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(5, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt up, expect increased fps (640x360@maxfps). + video_stream_encoder_->TriggerQualityHigh(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants())); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(6, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + FpsCountReturnsToZeroForFewerAdaptationsUpThanDownWithTwoResources) { + const int kWidth = 1280; + const int kHeight = 720; + const int64_t kFrameIntervalMs = 150; + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + + // Enable BALANCED preference, no initial limitation. + AdaptingFrameForwarder source(&time_controller_); + source.set_adaptation_enabled(true); + video_stream_encoder_->SetSource(&source, + webrtc::DegradationPreference::BALANCED); + + int64_t timestamp_ms = kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + sink_.WaitForEncodedFrame(kWidth, kHeight); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt down, expect scaled down resolution (960x540@maxfps). + video_stream_encoder_->TriggerQualityLow(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + sink_.WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), + FpsMaxResolutionMatches(Lt(kWidth * kHeight))); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt down, expect scaled down resolution (640x360@maxfps). + video_stream_encoder_->TriggerQualityLow(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + sink_.WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants())); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt down, expect reduced fps (640x360@15fps). + video_stream_encoder_->TriggerQualityLow(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants())); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Source requests QVGA, expect reduced resolution (320x180@15fps). + source.OnOutputFormatRequest(320, 180); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(320, 180); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + + // Trigger adapt down, expect reduced fps (320x180@7fps). + video_stream_encoder_->TriggerCpuOveruse(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants())); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_framerate); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + + // Source requests HD, expect increased resolution (640x360@7fps). + source.OnOutputFormatRequest(1280, 720); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + + // Trigger adapt up, expect increased fps (640x360@(max-1)fps). + video_stream_encoder_->TriggerCpuUnderuse(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants())); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_framerate); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + + // Trigger adapt up, expect increased fps (640x360@maxfps). + video_stream_encoder_->TriggerQualityHigh(); + video_stream_encoder_->TriggerCpuUnderuse(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants())); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); + EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + + // Trigger adapt up, expect increased resolution (960x570@maxfps). + video_stream_encoder_->TriggerQualityHigh(); + video_stream_encoder_->TriggerCpuUnderuse(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants())); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); + EXPECT_EQ(5, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + + // Trigger adapt up, expect increased resolution (1280x720@maxfps). + video_stream_encoder_->TriggerQualityHigh(); + video_stream_encoder_->TriggerCpuUnderuse(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants())); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); + EXPECT_EQ(6, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(5, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + + video_stream_encoder_->Stop(); +} + TEST_F(VideoStreamEncoderTest, NoChangeForInitialNormalUsage_MaintainFramerateMode) { const int kWidth = 1280; @@ -6348,7 +6612,7 @@ TEST_F(VideoStreamEncoderTest, EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_EQ(7, stats_proxy_->GetStats().number_of_quality_adapt_changes); - // Trigger adapt up, expect expect increased fps (320x180@10fps). + // Trigger adapt up, expect increased fps (320x180@10fps). video_stream_encoder_->TriggerQualityHigh(); timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); @@ -7831,10 +8095,13 @@ TEST_F(VideoStreamEncoderTest, EncoderResolutionsExposedInSimulcast) { } TEST_F(VideoStreamEncoderTest, QpPresent_QpKept) { - // Enable encoder source to force encoder reconfig. - encoder_factory_.SetHasInternalSource(true); ResetEncoder("VP8", 1, 1, 1, false); + // Force encoder reconfig. + video_source_.IncomingCapturedFrame( + CreateFrame(1, codec_width_, codec_height_)); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + // Set QP on encoded frame and pass the frame to encode complete callback. // Since QP is present QP parsing won't be triggered and the original value // should be kept. @@ -7851,10 +8118,13 @@ TEST_F(VideoStreamEncoderTest, QpPresent_QpKept) { } TEST_F(VideoStreamEncoderTest, QpAbsent_QpParsed) { - // Enable encoder source to force encoder reconfig. - encoder_factory_.SetHasInternalSource(true); ResetEncoder("VP8", 1, 1, 1, false); + // Force encoder reconfig. + video_source_.IncomingCapturedFrame( + CreateFrame(1, codec_width_, codec_height_)); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + // Pass an encoded frame without QP to encode complete callback. QP should be // parsed and set. EncodedImage encoded_image; @@ -7873,10 +8143,13 @@ TEST_F(VideoStreamEncoderTest, QpAbsentParsingDisabled_QpAbsent) { webrtc::test::ScopedFieldTrials field_trials( "WebRTC-QpParsingKillSwitch/Enabled/"); - // Enable encoder source to force encoder reconfig. - encoder_factory_.SetHasInternalSource(true); ResetEncoder("VP8", 1, 1, 1, false); + // Force encoder reconfig. + video_source_.IncomingCapturedFrame( + CreateFrame(1, codec_width_, codec_height_)); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + EncodedImage encoded_image; encoded_image.qp_ = -1; encoded_image.SetEncodedData(EncodedImageBuffer::Create( @@ -8066,6 +8339,133 @@ TEST_P(VideoStreamEncoderWithRealEncoderTest, EncoderMapsNativeNV12) { } } +TEST_P(VideoStreamEncoderWithRealEncoderTest, HandlesLayerToggling) { + if (codec_type_ == kVideoCodecMultiplex) { + // Multiplex codec here uses wrapped mock codecs, ignore for this test. + return; + } + + const size_t kNumSpatialLayers = 3u; + const float kDownscaleFactors[] = {4.0, 2.0, 1.0}; + const int kFrameWidth = 1280; + const int kFrameHeight = 720; + const rtc::VideoSinkWants::FrameSize kLayer0Size( + kFrameWidth / kDownscaleFactors[0], kFrameHeight / kDownscaleFactors[0]); + const rtc::VideoSinkWants::FrameSize kLayer1Size( + kFrameWidth / kDownscaleFactors[1], kFrameHeight / kDownscaleFactors[1]); + const rtc::VideoSinkWants::FrameSize kLayer2Size( + kFrameWidth / kDownscaleFactors[2], kFrameHeight / kDownscaleFactors[2]); + + VideoEncoderConfig config; + if (codec_type_ == VideoCodecType::kVideoCodecVP9) { + test::FillEncoderConfiguration(codec_type_, 1, &config); + config.max_bitrate_bps = kSimulcastTargetBitrateBps; + VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings(); + vp9_settings.numberOfSpatialLayers = kNumSpatialLayers; + vp9_settings.numberOfTemporalLayers = 3; + vp9_settings.automaticResizeOn = false; + config.encoder_specific_settings = + rtc::make_ref_counted( + vp9_settings); + config.spatial_layers = GetSvcConfig(kFrameWidth, kFrameHeight, + /*fps=*/30.0, + /*first_active_layer=*/0, + /*num_spatial_layers=*/3, + /*num_temporal_layers=*/3, + /*is_screenshare=*/false); + } else if (codec_type_ == VideoCodecType::kVideoCodecAV1) { + test::FillEncoderConfiguration(codec_type_, 1, &config); + config.max_bitrate_bps = kSimulcastTargetBitrateBps; + config.spatial_layers = GetSvcConfig(kFrameWidth, kFrameHeight, + /*fps=*/30.0, + /*first_active_layer=*/0, + /*num_spatial_layers=*/3, + /*num_temporal_layers=*/3, + /*is_screenshare=*/false); + config.simulcast_layers[0].scalability_mode = "L3T3_KEY"; + } else { + // Simulcast for VP8/H264. + test::FillEncoderConfiguration(codec_type_, kNumSpatialLayers, &config); + for (size_t i = 0; i < kNumSpatialLayers; ++i) { + config.simulcast_layers[i].scale_resolution_down_by = + kDownscaleFactors[i]; + config.simulcast_layers[i].active = true; + } + if (codec_type_ == VideoCodecType::kVideoCodecH264) { + // Turn off frame dropping to prevent flakiness. + VideoCodecH264 h264_settings = VideoEncoder::GetDefaultH264Settings(); + h264_settings.frameDroppingOn = false; + config.encoder_specific_settings = rtc::make_ref_counted< + VideoEncoderConfig::H264EncoderSpecificSettings>(h264_settings); + } + } + + auto set_layer_active = [&](int layer_idx, bool active) { + if (codec_type_ == VideoCodecType::kVideoCodecVP9 || + codec_type_ == VideoCodecType::kVideoCodecAV1) { + config.spatial_layers[layer_idx].active = active; + } else { + config.simulcast_layers[layer_idx].active = active; + } + }; + + config.video_stream_factory = + rtc::make_ref_counted( + CodecTypeToPayloadString(codec_type_), /*max qp*/ 56, + /*screencast*/ false, + /*screenshare enabled*/ false); + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), 0, 0, 0); + + // Capture a frame with all layers active. + sink_.SetNumExpectedLayers(kNumSpatialLayers); + video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength); + int64_t timestamp_ms = kFrameIntervalMs; + video_source_.IncomingCapturedFrame( + CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight)); + + WaitForEncodedFrame(kLayer2Size.width, kLayer2Size.height); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + + // Capture a frame with one of the layers inactive. + set_layer_active(2, false); + sink_.SetNumExpectedLayers(kNumSpatialLayers - 1); + video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength); + timestamp_ms += kFrameIntervalMs; + video_source_.IncomingCapturedFrame( + CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight)); + WaitForEncodedFrame(kLayer1Size.width, kLayer1Size.height); + + // New target bitrates signaled based on lower resolution. + DataRate kTwoLayerBitrate = DataRate::KilobitsPerSec(833); + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + kTwoLayerBitrate, kTwoLayerBitrate, kTwoLayerBitrate, 0, 0, 0); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + + // Re-enable the top layer. + set_layer_active(2, true); + sink_.SetNumExpectedLayers(kNumSpatialLayers); + video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + + // Bitrate target adjusted back up to enable HD layer... + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::KilobitsPerSec(1800), DataRate::KilobitsPerSec(1800), + DataRate::KilobitsPerSec(1800), 0, 0, 0); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + + // ...then add a new frame. + timestamp_ms += kFrameIntervalMs; + video_source_.IncomingCapturedFrame( + CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight)); + WaitForEncodedFrame(kLayer2Size.width, kLayer2Size.height); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + + video_stream_encoder_->Stop(); +} + std::string TestParametersVideoCodecAndAllowI420ConversionToString( testing::TestParamInfo> info) { VideoCodecType codec_type = std::get<0>(info.param); diff --git a/webrtc.gni b/webrtc.gni index 559078db34..c0ff14fe51 100644 --- a/webrtc.gni +++ b/webrtc.gni @@ -183,8 +183,9 @@ declare_args() { rtc_apprtcmobile_broadcast_extension = false } - # Determines whether Metal is available on iOS/macOS. - rtc_use_metal_rendering = is_mac || (is_ios && current_cpu == "arm64") + # Determines whether OpenGL is available on iOS/macOS. + rtc_ios_macos_use_opengl_rendering = + !(is_ios && target_environment == "catalyst") # When set to false, builtin audio encoder/decoder factories and all the # audio codecs they depend on will not be included in libwebrtc.{a|lib} @@ -995,10 +996,16 @@ if (is_ios) { deps = [ ":create_bracket_include_headers_$this_target_name" ] } + if (target_environment == "catalyst") { + # Catalyst frameworks use the same layout as regular Mac frameworks. + headers_dir = "Versions/A/Headers" + } else { + headers_dir = "Headers" + } copy("copy_umbrella_header_$target_name") { sources = [ umbrella_header_path ] outputs = - [ "$root_out_dir/$output_name.framework/Headers/$output_name.h" ] + [ "$root_out_dir/$output_name.framework/$headers_dir/$output_name.h" ] deps = [ ":umbrella_header_$target_name" ] } diff --git a/whitespace.txt b/whitespace.txt index daecb0a09d..42d622a4cb 100644 --- a/whitespace.txt +++ b/whitespace.txt @@ -4,4 +4,3 @@ Try to write something funny. And please don't add trailing whitespace. Once upon a time there was an elephant in Stockholm. Everyone knew about it, but nobody dared say anything. In the end it didn't make a difference since everyone was working from home. -