aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2022-04-08 16:02:03 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2022-04-08 16:02:03 +0000
commit34c40490440c7fcec1a4c944c1fbf18b1fee57a6 (patch)
treeb207ce2b6cb08f3ba9b14ea81e060885101abb7a
parente7a9fb61a353807cb59c4ea9e0ff328e2ea18f8c (diff)
parentec976846efd4d2d8b1b00652b69e3c2f526980f3 (diff)
downloadv4l2_codec2-android12-mainline-tzdata2-release.tar.gz
Change-Id: I640c9ccb6dfc3cdc336453090a7931a5dc5cb6c9
-rw-r--r--Android.bp44
-rw-r--r--METADATA3
-rw-r--r--README.md9
-rw-r--r--accel/.clang-format4
-rw-r--r--accel/Android.bp64
-rw-r--r--accel/accelerated_video_decoder.h68
-rw-r--r--accel/bit_reader.cc49
-rw-r--r--accel/bit_reader.h70
-rw-r--r--accel/bit_reader_core.cc191
-rw-r--r--accel/bit_reader_core.h125
-rw-r--r--accel/bitstream_buffer.cc28
-rw-r--r--accel/bitstream_buffer.h77
-rw-r--r--accel/color_plane_layout.cc31
-rw-r--r--accel/color_plane_layout.h41
-rw-r--r--accel/fourcc.cc283
-rw-r--r--accel/fourcc.h161
-rw-r--r--accel/generic_v4l2_device.cc352
-rw-r--r--accel/generic_v4l2_device.h113
-rw-r--r--accel/h264_bit_reader.cc123
-rw-r--r--accel/h264_bit_reader.h81
-rw-r--r--accel/h264_decoder.cc1459
-rw-r--r--accel/h264_decoder.h284
-rw-r--r--accel/h264_dpb.cc171
-rw-r--r--accel/h264_dpb.h181
-rw-r--r--accel/h264_parser.cc1612
-rw-r--r--accel/h264_parser.h563
-rw-r--r--accel/macros.h14
-rw-r--r--accel/media_limits.h91
-rw-r--r--accel/native_pixmap_handle.cc29
-rw-r--r--accel/native_pixmap_handle.h57
-rw-r--r--accel/picture.cc37
-rw-r--r--accel/picture.h81
-rw-r--r--accel/ranges.cc16
-rw-r--r--accel/ranges.h163
-rw-r--r--accel/rect.h148
-rw-r--r--accel/shared_memory_region.cc42
-rw-r--r--accel/shared_memory_region.h57
-rw-r--r--accel/size.h73
-rw-r--r--accel/subsample_entry.h32
-rw-r--r--accel/v4l2_device.cc1858
-rw-r--r--accel/v4l2_device.h587
-rw-r--r--accel/v4l2_device_poller.cc140
-rw-r--r--accel/v4l2_device_poller.h97
-rw-r--r--accel/v4l2_video_decode_accelerator.cc1922
-rw-r--r--accel/v4l2_video_decode_accelerator.h497
-rw-r--r--accel/video_codecs.cc80
-rw-r--r--accel/video_codecs.h99
-rw-r--r--accel/video_decode_accelerator.cc82
-rw-r--r--accel/video_decode_accelerator.h348
-rw-r--r--accel/video_encode_accelerator.cc46
-rw-r--r--accel/video_encode_accelerator.h87
-rw-r--r--accel/video_frame.cc821
-rw-r--r--accel/video_frame.h425
-rw-r--r--accel/video_frame_layout.cc179
-rw-r--r--accel/video_frame_layout.h160
-rw-r--r--accel/video_frame_metadata.cc49
-rw-r--r--accel/video_frame_metadata.h195
-rw-r--r--accel/video_pixel_format.cc134
-rw-r--r--accel/video_pixel_format.h95
-rw-r--r--accel/vp8_bool_decoder.cc209
-rw-r--r--accel/vp8_bool_decoder.h135
-rw-r--r--accel/vp8_decoder.cc197
-rw-r--r--accel/vp8_decoder.h114
-rw-r--r--accel/vp8_parser.cc877
-rw-r--r--accel/vp8_parser.h199
-rw-r--r--accel/vp8_picture.cc18
-rw-r--r--accel/vp8_picture.h35
-rw-r--r--accel/vp9_bool_decoder.cc165
-rw-r--r--accel/vp9_bool_decoder.h73
-rw-r--r--accel/vp9_compressed_header_parser.cc294
-rw-r--r--accel/vp9_compressed_header_parser.h52
-rw-r--r--accel/vp9_decoder.cc227
-rw-r--r--accel/vp9_decoder.h154
-rw-r--r--accel/vp9_parser.cc676
-rw-r--r--accel/vp9_parser.h448
-rw-r--r--accel/vp9_picture.cc18
-rw-r--r--accel/vp9_picture.h42
-rw-r--r--accel/vp9_raw_bits_reader.cc62
-rw-r--r--accel/vp9_raw_bits_reader.h67
-rw-r--r--accel/vp9_uncompressed_header_parser.cc1103
-rw-r--r--accel/vp9_uncompressed_header_parser.h49
-rw-r--r--common/Android.bp17
-rw-r--r--common/Common.cpp35
-rw-r--r--common/EncodeHelpers.cpp118
-rw-r--r--common/FormatConverter.cpp118
-rw-r--r--common/Fourcc.cpp280
-rw-r--r--common/NalParser.cpp217
-rw-r--r--common/V4L2ComponentCommon.cpp10
-rw-r--r--common/V4L2Device.cpp2010
-rw-r--r--common/V4L2DevicePoller.cpp130
-rw-r--r--common/VideoPixelFormat.cpp371
-rw-r--r--common/VideoTypes.cpp59
-rw-r--r--common/include/v4l2_codec2/common/Common.h39
-rw-r--r--common/include/v4l2_codec2/common/EncodeHelpers.h51
-rw-r--r--common/include/v4l2_codec2/common/FormatConverter.h20
-rw-r--r--common/include/v4l2_codec2/common/Fourcc.h157
-rw-r--r--common/include/v4l2_codec2/common/NalParser.h68
-rw-r--r--common/include/v4l2_codec2/common/V4L2ComponentCommon.h2
-rw-r--r--common/include/v4l2_codec2/common/V4L2Device.h518
-rw-r--r--common/include/v4l2_codec2/common/V4L2DevicePoller.h88
-rw-r--r--common/include/v4l2_codec2/common/VideoPixelFormat.h90
-rw-r--r--common/include/v4l2_codec2/common/VideoTypes.h6
-rw-r--r--components/Android.bp21
-rw-r--r--components/V4L2ComponentFactory.cpp87
-rw-r--r--components/V4L2DecodeComponent.cpp296
-rw-r--r--components/V4L2DecodeInterface.cpp41
-rw-r--r--components/V4L2Decoder.cpp236
-rw-r--r--components/V4L2EncodeComponent.cpp1300
-rw-r--r--components/V4L2EncodeInterface.cpp203
-rw-r--r--components/V4L2Encoder.cpp1016
-rw-r--r--components/VideoEncoder.cpp18
-rw-r--r--components/VideoFrame.cpp4
-rw-r--r--components/VideoFramePool.cpp41
-rw-r--r--components/include/v4l2_codec2/components/BitstreamBuffer.h2
-rw-r--r--components/include/v4l2_codec2/components/V4L2ComponentFactory.h39
-rw-r--r--components/include/v4l2_codec2/components/V4L2DecodeComponent.h17
-rw-r--r--components/include/v4l2_codec2/components/V4L2DecodeInterface.h10
-rw-r--r--components/include/v4l2_codec2/components/V4L2Decoder.h20
-rw-r--r--components/include/v4l2_codec2/components/V4L2EncodeComponent.h136
-rw-r--r--components/include/v4l2_codec2/components/V4L2EncodeInterface.h22
-rw-r--r--components/include/v4l2_codec2/components/V4L2Encoder.h189
-rw-r--r--components/include/v4l2_codec2/components/VideoDecoder.h5
-rw-r--r--components/include/v4l2_codec2/components/VideoEncoder.h81
-rw-r--r--components/include/v4l2_codec2/components/VideoFrame.h10
-rw-r--r--components/include/v4l2_codec2/components/VideoFramePool.h24
-rw-r--r--plugin_store/Android.bp14
-rw-r--r--plugin_store/C2VdaBqBlockPool.cpp1441
-rw-r--r--plugin_store/C2VdaPooledBlockPool.cpp1
-rw-r--r--plugin_store/DrmGrallocHelpers.cpp73
-rw-r--r--plugin_store/H2BGraphicBufferProducer.cpp287
-rw-r--r--plugin_store/V4L2PluginStore.cpp2
-rw-r--r--plugin_store/include/v4l2_codec2/plugin_store/C2VdaBqBlockPool.h60
-rw-r--r--plugin_store/include/v4l2_codec2/plugin_store/DrmGrallocHelpers.h19
-rw-r--r--plugin_store/include/v4l2_codec2/plugin_store/H2BGraphicBufferProducer.h48
-rw-r--r--service/Android.bp28
-rw-r--r--service/android.hardware.media.c2@1.0-service-v4l2-64.rc7
-rw-r--r--service/android.hardware.media.c2@1.0-service-v4l2.rc (renamed from service/android.hardware.media.c2@1.0-service-v4l2-32.rc)2
-rw-r--r--service/service.cpp4
-rw-r--r--store/Android.bp30
-rw-r--r--store/V4L2ComponentStore.cpp (renamed from components/V4L2ComponentStore.cpp)115
-rw-r--r--store/include/v4l2_codec2/store/V4L2ComponentStore.h (renamed from components/include/v4l2_codec2/components/V4L2ComponentStore.h)24
-rw-r--r--tests/c2_comp_intf/Android.bp11
-rw-r--r--tests/c2_e2e_test/Android.mk3
-rw-r--r--tests/c2_e2e_test/jni/Android.mk8
-rw-r--r--tests/c2_e2e_test/jni/common.cpp116
-rw-r--r--tests/c2_e2e_test/jni/common.h47
-rw-r--r--tests/c2_e2e_test/jni/e2e_test_jni.cpp12
-rw-r--r--tests/c2_e2e_test/jni/e2e_test_jni.h6
-rw-r--r--tests/c2_e2e_test/jni/encoded_data_helper.cpp4
-rw-r--r--tests/c2_e2e_test/jni/mediacodec_decoder.cpp95
-rw-r--r--tests/c2_e2e_test/jni/mediacodec_decoder.h17
-rw-r--r--tests/c2_e2e_test/jni/mediacodec_encoder.cpp69
-rw-r--r--tests/c2_e2e_test/jni/mediacodec_encoder.h10
-rw-r--r--tests/c2_e2e_test/jni/video_decoder_e2e_test.cpp29
-rw-r--r--tests/c2_e2e_test/jni/video_encoder_e2e_test.cpp68
-rw-r--r--tests/c2_e2e_test/jni/video_frame.cpp2
-rw-r--r--tests/c2_e2e_test/src/org/chromium/c2/test/E2eTestActivity.java32
157 files changed, 22964 insertions, 8084 deletions
diff --git a/Android.bp b/Android.bp
deleted file mode 100644
index 7f53aec..0000000
--- a/Android.bp
+++ /dev/null
@@ -1,44 +0,0 @@
-//
-// Copyright (C) 2021 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package {
- default_applicable_licenses: ["external_v4l2_codec2_license"],
-}
-
-// Added automatically by a large-scale-change that took the approach of
-// 'apply every license found to every target'. While this makes sure we respect
-// every license restriction, it may not be entirely correct.
-//
-// e.g. GPL in an MIT project might only apply to the contrib/ directory.
-//
-// Please consider splitting the single license below into multiple licenses,
-// taking care not to lose any license_kind information, and overriding the
-// default license using the 'licenses: [...]' property on targets as needed.
-//
-// For unused files, consider creating a 'fileGroup' with "//visibility:private"
-// to attach the license to, and including a comment whether the files may be
-// used in the current project.
-// See: http://go/android-license-faq
-license {
- name: "external_v4l2_codec2_license",
- visibility: [":__subpackages__"],
- license_kinds: [
- "SPDX-license-identifier-BSD",
- "legacy_unencumbered",
- ],
- license_text: [
- "NOTICE",
- ],
-}
diff --git a/METADATA b/METADATA
deleted file mode 100644
index d97975c..0000000
--- a/METADATA
+++ /dev/null
@@ -1,3 +0,0 @@
-third_party {
- license_type: NOTICE
-}
diff --git a/README.md b/README.md
index e7ce74b..44c88bb 100644
--- a/README.md
+++ b/README.md
@@ -6,11 +6,14 @@
Core V4L2 API and codec utilities, ported from Chromium project.
* common/
-Common helper classes for components.
+Common helper classes for both components/ and store/.
* components/
-The C2Component implementations based on V4L2 API, and the implementation of
-C2ComponentStore for creating all the C2Components.
+The C2Component implementations based on V4L2 API.
+
+* store/
+The implementation of C2ComponentStore. It is used for creating all the
+C2Components implemented at components/ folder.
* service/
The Codec2's V4L2 IComponentStore service. The service initiates the component
diff --git a/accel/.clang-format b/accel/.clang-format
new file mode 100644
index 0000000..d174c9d
--- /dev/null
+++ b/accel/.clang-format
@@ -0,0 +1,4 @@
+# The codes in this directory are ported from Chromium Project.
+# Therefore, they are obviously based on Chromium coding style
+# and shouldn't be formatted by Android Coding Style
+BasedOnStyle: Chromium
diff --git a/accel/Android.bp b/accel/Android.bp
new file mode 100644
index 0000000..1bf4805
--- /dev/null
+++ b/accel/Android.bp
@@ -0,0 +1,64 @@
+cc_library {
+ name: "libv4l2_codec2_accel",
+ vendor: true,
+
+ srcs: [
+ "bit_reader.cc",
+ "bit_reader_core.cc",
+ "bitstream_buffer.cc",
+ "color_plane_layout.cc",
+ "fourcc.cc",
+ "h264_bit_reader.cc",
+ "h264_decoder.cc",
+ "h264_dpb.cc",
+ "h264_parser.cc",
+ "generic_v4l2_device.cc",
+ "native_pixmap_handle.cc",
+ "picture.cc",
+ "ranges.cc",
+ "shared_memory_region.cc",
+ "v4l2_device.cc",
+ "v4l2_device_poller.cc",
+ "v4l2_video_decode_accelerator.cc",
+ "video_codecs.cc",
+ "video_decode_accelerator.cc",
+ "video_encode_accelerator.cc",
+ "video_frame.cc",
+ "video_frame_layout.cc",
+ "video_frame_metadata.cc",
+ "video_pixel_format.cc",
+ "vp8_bool_decoder.cc",
+ "vp8_decoder.cc",
+ "vp8_parser.cc",
+ "vp8_picture.cc",
+ "vp9_bool_decoder.cc",
+ "vp9_compressed_header_parser.cc",
+ "vp9_decoder.cc",
+ "vp9_parser.cc",
+ "vp9_picture.cc",
+ "vp9_raw_bits_reader.cc",
+ "vp9_uncompressed_header_parser.cc",
+ ],
+
+ shared_libs: ["libchrome"],
+ // -Wno-unused-parameter is needed for libchrome/base codes
+ cflags: [
+ "-Wall",
+ "-Werror",
+ "-Wno-unused-parameter",
+ "-Wno-implicit-fallthrough", // at h264_decoder.cc:1374
+ ],
+ clang: true,
+ sanitize: {
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ },
+
+ ldflags: [
+ "-Wl",
+ "-Bsymbolic",
+ ],
+ export_include_dirs: ["."],
+}
diff --git a/accel/accelerated_video_decoder.h b/accel/accelerated_video_decoder.h
new file mode 100644
index 0000000..238e34d
--- /dev/null
+++ b/accel/accelerated_video_decoder.h
@@ -0,0 +1,68 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 77118c9
+
+#ifndef ACCELERATED_VIDEO_DECODER_H_
+#define ACCELERATED_VIDEO_DECODER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/macros.h"
+#include "size.h"
+
+namespace media {
+
+// An AcceleratedVideoDecoder is a video decoder that requires support from an
+// external accelerator (typically a hardware accelerator) to partially offload
+// the decode process after parsing stream headers, and performing reference
+// frame and state management.
+class AcceleratedVideoDecoder {
+ public:
+ AcceleratedVideoDecoder() {}
+ virtual ~AcceleratedVideoDecoder() {}
+
+ virtual void SetStream(const uint8_t* ptr, size_t size) = 0;
+
+ // Have the decoder flush its state and trigger output of all previously
+ // decoded surfaces. Return false on failure.
+ virtual bool Flush() WARN_UNUSED_RESULT = 0;
+
+ // Stop (pause) decoding, discarding all remaining inputs and outputs,
+ // but do not flush decoder state, so that playback can be resumed later,
+ // possibly from a different location.
+ // To be called during decoding.
+ virtual void Reset() = 0;
+
+ enum DecodeResult {
+ kDecodeError, // Error while decoding.
+ // TODO(posciak): unsupported streams are currently treated as error
+ // in decoding; in future it could perhaps be possible to fall back
+ // to software decoding instead.
+ // kStreamError, // Error in stream.
+ kAllocateNewSurfaces, // Need a new set of surfaces to be allocated.
+ kRanOutOfStreamData, // Need more stream data to proceed.
+ kRanOutOfSurfaces, // Waiting for the client to free up output surfaces.
+ kNeedContextUpdate, // Waiting for the client to update decoding context
+ // with data acquired from the accelerator.
+ };
+
+ // Try to decode more of the stream, returning decoded frames asynchronously.
+ // Return when more stream is needed, when we run out of free surfaces, when
+ // we need a new set of them, or when an error occurs.
+ virtual DecodeResult Decode() WARN_UNUSED_RESULT = 0;
+
+ // Return dimensions/required number of output surfaces that client should
+ // be ready to provide for the decoder to function properly.
+ // To be used after Decode() returns kAllocateNewSurfaces.
+ virtual Size GetPicSize() const = 0;
+ virtual size_t GetRequiredNumOfPictures() const = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AcceleratedVideoDecoder);
+};
+
+} // namespace media
+
+#endif // ACCELERATED_VIDEO_DECODER_H_
diff --git a/accel/bit_reader.cc b/accel/bit_reader.cc
new file mode 100644
index 0000000..95e7634
--- /dev/null
+++ b/accel/bit_reader.cc
@@ -0,0 +1,49 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
+
+#include "bit_reader.h"
+
+namespace media {
+
+BitReader::BitReader(const uint8_t* data, int size)
+ : initial_size_(size),
+ data_(data),
+ bytes_left_(size),
+ bit_reader_core_(this) {
+ DCHECK(data != NULL);
+ DCHECK_GE(size, 0);
+}
+
+BitReader::~BitReader() = default;
+
+bool BitReader::ReadString(int num_bits, std::string* str) {
+ DCHECK_EQ(num_bits % 8, 0);
+ DCHECK_GT(num_bits, 0);
+ DCHECK(str);
+ int num_bytes = num_bits / 8;
+ str->resize(num_bytes);
+ char* ptr = &str->front();
+ while (num_bytes--) {
+ if (!ReadBits(8, ptr++))
+ return false;
+ }
+ return true;
+}
+
+int BitReader::GetBytes(int max_nbytes, const uint8_t** out) {
+ DCHECK_GE(max_nbytes, 0);
+ DCHECK(out);
+
+ int nbytes = max_nbytes;
+ if (nbytes > bytes_left_)
+ nbytes = bytes_left_;
+
+ *out = data_;
+ data_ += nbytes;
+ bytes_left_ -= nbytes;
+ return nbytes;
+}
+
+} // namespace media
diff --git a/accel/bit_reader.h b/accel/bit_reader.h
new file mode 100644
index 0000000..dfc2b0b
--- /dev/null
+++ b/accel/bit_reader.h
@@ -0,0 +1,70 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 43ddd7a
+
+#ifndef BIT_READER_H_
+#define BIT_READER_H_
+
+#include <stdint.h>
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "bit_reader_core.h"
+
+namespace media {
+
+class BitReader : private BitReaderCore::ByteStreamProvider {
+ public:
+ // Initialize the reader to start reading at |data|, |size| being size
+ // of |data| in bytes.
+ BitReader(const uint8_t* data, int size);
+ ~BitReader() override;
+
+ template<typename T> bool ReadBits(int num_bits, T* out) {
+ return bit_reader_core_.ReadBits(num_bits, out);
+ }
+
+ bool ReadFlag(bool* flag) {
+ return bit_reader_core_.ReadFlag(flag);
+ }
+
+ // Read |num_bits| of binary data into |str|. |num_bits| must be a positive
+ // multiple of 8. This is not efficient for extracting large strings.
+ // If false is returned, |str| may not be valid.
+ bool ReadString(int num_bits, std::string* str);
+
+ bool SkipBits(int num_bits) {
+ return bit_reader_core_.SkipBits(num_bits);
+ }
+
+ int bits_available() const {
+ return initial_size_ * 8 - bits_read();
+ }
+
+ int bits_read() const {
+ return bit_reader_core_.bits_read();
+ }
+
+ private:
+ // BitReaderCore::ByteStreamProvider implementation.
+ int GetBytes(int max_n, const uint8_t** out) override;
+
+ // Total number of bytes that was initially passed to BitReader.
+ const int initial_size_;
+
+ // Pointer to the next unread byte in the stream.
+ const uint8_t* data_;
+
+ // Bytes left in the stream.
+ int bytes_left_;
+
+ BitReaderCore bit_reader_core_;
+
+ DISALLOW_COPY_AND_ASSIGN(BitReader);
+};
+
+} // namespace media
+
+#endif // BIT_READER_H_
diff --git a/accel/bit_reader_core.cc b/accel/bit_reader_core.cc
new file mode 100644
index 0000000..92b3211
--- /dev/null
+++ b/accel/bit_reader_core.cc
@@ -0,0 +1,191 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
+
+#include "bit_reader_core.h"
+
+#include <stdint.h>
+
+#include "base/sys_byteorder.h"
+
+namespace {
+const int kRegWidthInBits = sizeof(uint64_t) * 8;
+}
+
+namespace media {
+
+BitReaderCore::ByteStreamProvider::ByteStreamProvider() = default;
+
+BitReaderCore::ByteStreamProvider::~ByteStreamProvider() = default;
+
+BitReaderCore::BitReaderCore(ByteStreamProvider* byte_stream_provider)
+ : byte_stream_provider_(byte_stream_provider),
+ bits_read_(0),
+ nbits_(0),
+ reg_(0),
+ nbits_next_(0),
+ reg_next_(0) {
+}
+
+BitReaderCore::~BitReaderCore() = default;
+
+bool BitReaderCore::ReadFlag(bool* flag) {
+ if (nbits_ == 0 && !Refill(1))
+ return false;
+
+ *flag = (reg_ & (UINT64_C(1) << (kRegWidthInBits - 1))) != 0;
+ reg_ <<= 1;
+ nbits_--;
+ bits_read_++;
+ return true;
+}
+
+int BitReaderCore::PeekBitsMsbAligned(int num_bits, uint64_t* out) {
+ // Try to have at least |num_bits| in the bit register.
+ if (nbits_ < num_bits)
+ Refill(num_bits);
+
+ *out = reg_;
+ return nbits_;
+}
+
+bool BitReaderCore::SkipBitsSmall(int num_bits) {
+ DCHECK_GE(num_bits, 0);
+ uint64_t dummy;
+ while (num_bits >= kRegWidthInBits) {
+ if (!ReadBitsInternal(kRegWidthInBits, &dummy))
+ return false;
+ num_bits -= kRegWidthInBits;
+ }
+ return ReadBitsInternal(num_bits, &dummy);
+}
+
+bool BitReaderCore::SkipBits(int num_bits) {
+ DCHECK_GE(num_bits, 0);
+
+ const int remaining_bits = nbits_ + nbits_next_;
+ if (remaining_bits >= num_bits)
+ return SkipBitsSmall(num_bits);
+
+ // Skip first the remaining available bits.
+ num_bits -= remaining_bits;
+ bits_read_ += remaining_bits;
+ nbits_ = 0;
+ reg_ = 0;
+ nbits_next_ = 0;
+ reg_next_ = 0;
+
+ // Next, skip an integer number of bytes.
+ const int nbytes = num_bits / 8;
+ if (nbytes > 0) {
+ const uint8_t* byte_stream_window;
+ const int window_size =
+ byte_stream_provider_->GetBytes(nbytes, &byte_stream_window);
+ DCHECK_GE(window_size, 0);
+ DCHECK_LE(window_size, nbytes);
+ if (window_size < nbytes) {
+ // Note that some bytes were consumed.
+ bits_read_ += 8 * window_size;
+ return false;
+ }
+ num_bits -= 8 * nbytes;
+ bits_read_ += 8 * nbytes;
+ }
+
+ // Skip the remaining bits.
+ return SkipBitsSmall(num_bits);
+}
+
+int BitReaderCore::bits_read() const {
+ return bits_read_;
+}
+
+bool BitReaderCore::ReadBitsInternal(int num_bits, uint64_t* out) {
+ DCHECK_GE(num_bits, 0);
+
+ if (num_bits == 0) {
+ *out = 0;
+ return true;
+ }
+
+ if (num_bits > nbits_ && !Refill(num_bits)) {
+ // Any subsequent ReadBits should fail:
+ // empty the current bit register for that purpose.
+ nbits_ = 0;
+ reg_ = 0;
+ return false;
+ }
+
+ bits_read_ += num_bits;
+
+ if (num_bits == kRegWidthInBits) {
+ // Special case needed since for example for a 64 bit integer "a"
+ // "a << 64" is not defined by the C/C++ standard.
+ *out = reg_;
+ reg_ = 0;
+ nbits_ = 0;
+ return true;
+ }
+
+ *out = reg_ >> (kRegWidthInBits - num_bits);
+ reg_ <<= num_bits;
+ nbits_ -= num_bits;
+ return true;
+}
+
+bool BitReaderCore::Refill(int min_nbits) {
+ DCHECK_LE(min_nbits, kRegWidthInBits);
+
+ // Transfer from the next to the current register.
+ RefillCurrentRegister();
+ if (min_nbits <= nbits_)
+ return true;
+ DCHECK_EQ(nbits_next_, 0);
+ DCHECK_EQ(reg_next_, 0u);
+
+ // Max number of bytes to refill.
+ int max_nbytes = sizeof(reg_next_);
+
+ // Refill.
+ const uint8_t* byte_stream_window;
+ int window_size =
+ byte_stream_provider_->GetBytes(max_nbytes, &byte_stream_window);
+ DCHECK_GE(window_size, 0);
+ DCHECK_LE(window_size, max_nbytes);
+ if (window_size == 0)
+ return false;
+
+ reg_next_ = 0;
+ memcpy(&reg_next_, byte_stream_window, window_size);
+ reg_next_ = base::NetToHost64(reg_next_);
+ nbits_next_ = window_size * 8;
+
+ // Transfer from the next to the current register.
+ RefillCurrentRegister();
+
+ return (nbits_ >= min_nbits);
+}
+
+void BitReaderCore::RefillCurrentRegister() {
+ // No refill possible if the destination register is full
+ // or the source register is empty.
+ if (nbits_ == kRegWidthInBits || nbits_next_ == 0)
+ return;
+
+ reg_ |= (reg_next_ >> nbits_);
+
+ int free_nbits = kRegWidthInBits - nbits_;
+ if (free_nbits >= nbits_next_) {
+ nbits_ += nbits_next_;
+ reg_next_ = 0;
+ nbits_next_ = 0;
+ return;
+ }
+
+ nbits_ += free_nbits;
+ reg_next_ <<= free_nbits;
+ nbits_next_ -= free_nbits;
+}
+
+} // namespace media
diff --git a/accel/bit_reader_core.h b/accel/bit_reader_core.h
new file mode 100644
index 0000000..62a21e2
--- /dev/null
+++ b/accel/bit_reader_core.h
@@ -0,0 +1,125 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 1323b9c
+
+#ifndef BIT_READER_CORE_H_
+#define BIT_READER_CORE_H_
+
+#include <stdint.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+namespace media {
+
+class BitReaderCore {
+ public:
+ class ByteStreamProvider {
+ public:
+ ByteStreamProvider();
+ virtual ~ByteStreamProvider();
+
+ // Consume at most the following |max_n| bytes of the stream
+ // and return the number n of bytes actually consumed.
+ // Set |*array| to point to a memory buffer containing those n bytes.
+ // Note: |*array| must be valid until the next call to GetBytes
+ // but there is no guarantee it is valid after.
+ virtual int GetBytes(int max_n, const uint8_t** array) = 0;
+ };
+
+ // Lifetime of |byte_stream_provider| must be longer than BitReaderCore.
+ explicit BitReaderCore(ByteStreamProvider* byte_stream_provider);
+ ~BitReaderCore();
+
+ // Read one bit from the stream and return it as a boolean in |*out|.
+ // Remark: we do not use the template version for reading a bool
+ // since it generates some optimization warnings during compilation
+ // on Windows platforms.
+ bool ReadBits(int num_bits, bool* out) {
+ DCHECK_EQ(num_bits, 1);
+ return ReadFlag(out);
+ }
+
+ // Read |num_bits| next bits from stream and return in |*out|, first bit
+ // from the stream starting at |num_bits| position in |*out|,
+ // bits of |*out| whose position is strictly greater than |num_bits|
+ // are all set to zero.
+ // Notes:
+ // - |num_bits| cannot be larger than the bits the type can hold.
+ // - From the above description, passing a signed type in |T| does not
+ // mean the first bit read from the stream gives the sign of the value.
+ // Return false if the given number of bits cannot be read (not enough
+ // bits in the stream), true otherwise. When return false, the stream will
+ // enter a state where further ReadBits/SkipBits operations will always
+ // return false unless |num_bits| is 0. The type |T| has to be a primitive
+ // integer type.
+ template<typename T> bool ReadBits(int num_bits, T* out) {
+ DCHECK_LE(num_bits, static_cast<int>(sizeof(T) * 8));
+ uint64_t temp;
+ bool ret = ReadBitsInternal(num_bits, &temp);
+ *out = static_cast<T>(temp);
+ return ret;
+ }
+
+ // Read one bit from the stream and return it as a boolean in |*flag|.
+ bool ReadFlag(bool* flag);
+
+ // Retrieve some bits without actually consuming them.
+ // Bits returned in |*out| are shifted so the most significant bit contains
+ // the next bit that can be read from the stream.
+ // Return the number of bits actually written in |out|.
+ // Note: |num_bits| is just a suggestion of how many bits the caller
+ // wish to get in |*out| and must be less than 64:
+ // - The number of bits returned can be more than |num_bits|.
+ // - However, it will be strictly less than |num_bits|
+ // if and only if there are not enough bits left in the stream.
+ int PeekBitsMsbAligned(int num_bits, uint64_t* out);
+
+ // Skip |num_bits| next bits from stream. Return false if the given number of
+ // bits cannot be skipped (not enough bits in the stream), true otherwise.
+ // When return false, the stream will enter a state where further
+ // ReadBits/ReadFlag/SkipBits operations
+ // will always return false unless |num_bits| is 0.
+ bool SkipBits(int num_bits);
+
+ // Returns the number of bits read so far.
+ int bits_read() const;
+
+ private:
+ // This function can skip any number of bits but is more efficient
+ // for small numbers. Return false if the given number of bits cannot be
+ // skipped (not enough bits in the stream), true otherwise.
+ bool SkipBitsSmall(int num_bits);
+
+ // Help function used by ReadBits to avoid inlining the bit reading logic.
+ bool ReadBitsInternal(int num_bits, uint64_t* out);
+
+ // Refill bit registers to have at least |min_nbits| bits available.
+ // Return true if the mininimum bit count condition is met after the refill.
+ bool Refill(int min_nbits);
+
+ // Refill the current bit register from the next bit register.
+ void RefillCurrentRegister();
+
+ ByteStreamProvider* const byte_stream_provider_;
+
+ // Number of bits read so far.
+ int bits_read_;
+
+ // Number of bits in |reg_| that have not been consumed yet.
+ // Note: bits are consumed from MSB to LSB.
+ int nbits_;
+ uint64_t reg_;
+
+ // Number of bits in |reg_next_| that have not been consumed yet.
+ // Note: bits are consumed from MSB to LSB.
+ int nbits_next_;
+ uint64_t reg_next_;
+
+ DISALLOW_COPY_AND_ASSIGN(BitReaderCore);
+};
+
+} // namespace media
+
+#endif // BIT_READER_CORE_H_
diff --git a/accel/bitstream_buffer.cc b/accel/bitstream_buffer.cc
new file mode 100644
index 0000000..36b8d06
--- /dev/null
+++ b/accel/bitstream_buffer.cc
@@ -0,0 +1,28 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
+
+#include "bitstream_buffer.h"
+
+namespace media {
+
+BitstreamBuffer::BitstreamBuffer()
+ : BitstreamBuffer(-1, base::SharedMemoryHandle(), 0) {}
+
+BitstreamBuffer::BitstreamBuffer(int32_t id,
+ base::SharedMemoryHandle handle,
+ size_t size,
+ off_t offset,
+ base::TimeDelta presentation_timestamp)
+ : id_(id),
+ handle_(handle),
+ size_(size),
+ offset_(offset),
+ presentation_timestamp_(presentation_timestamp) {}
+
+BitstreamBuffer::BitstreamBuffer(const BitstreamBuffer& other) = default;
+
+BitstreamBuffer::~BitstreamBuffer() = default;
+
+} // namespace media
diff --git a/accel/bitstream_buffer.h b/accel/bitstream_buffer.h
new file mode 100644
index 0000000..3a267a0
--- /dev/null
+++ b/accel/bitstream_buffer.h
@@ -0,0 +1,77 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 39a7f93
+
+#ifndef MEDIA_BASE_BITSTREAM_BUFFER_H_
+#define MEDIA_BASE_BITSTREAM_BUFFER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/macros.h"
+#include "base/memory/shared_memory.h"
+#include "base/time/time.h"
+
+namespace media {
+
+// Indicates an invalid or missing timestamp.
+constexpr base::TimeDelta kNoTimestamp =
+ base::TimeDelta::FromMicroseconds(std::numeric_limits<int64_t>::min());
+
+// Class for passing bitstream buffers around. Does not take ownership of the
+// data. This is the media-namespace equivalent of PP_VideoBitstreamBuffer_Dev.
+class BitstreamBuffer {
+ public:
+ BitstreamBuffer();
+
+ // Constructs a new BitstreamBuffer. The content of the bitstream is located
+ // at |offset| bytes away from the start of the shared memory and the payload
+ // is |size| bytes. When not provided, the default value for |offset| is 0.
+ // |presentation_timestamp| is when the decoded frame should be displayed.
+ // When not provided, |presentation_timestamp| will be
+ // |media::kNoTimestamp|.
+ BitstreamBuffer(int32_t id,
+ base::SharedMemoryHandle handle,
+ size_t size,
+ off_t offset = 0,
+ base::TimeDelta presentation_timestamp = kNoTimestamp);
+
+ BitstreamBuffer(const BitstreamBuffer& other);
+
+ ~BitstreamBuffer();
+
+ int32_t id() const { return id_; }
+ base::SharedMemoryHandle handle() const { return handle_; }
+
+ // The number of bytes of the actual bitstream data. It is the size of the
+ // content instead of the whole shared memory.
+ size_t size() const { return size_; }
+
+ // The offset to the start of actual bitstream data in the shared memory.
+ off_t offset() const { return offset_; }
+
+ // The timestamp is only valid if it's not equal to |media::kNoTimestamp|.
+ base::TimeDelta presentation_timestamp() const {
+ return presentation_timestamp_;
+ }
+
+ void set_handle(const base::SharedMemoryHandle& handle) { handle_ = handle; }
+
+ private:
+ int32_t id_;
+ base::SharedMemoryHandle handle_;
+ size_t size_;
+ off_t offset_;
+
+ // This is only set when necessary. For example, AndroidVideoDecodeAccelerator
+ // needs the timestamp because the underlying decoder may require it to
+ // determine the output order.
+ base::TimeDelta presentation_timestamp_;
+
+ // Allow compiler-generated copy & assign constructors.
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_BITSTREAM_BUFFER_H_
diff --git a/accel/color_plane_layout.cc b/accel/color_plane_layout.cc
new file mode 100644
index 0000000..3a19798
--- /dev/null
+++ b/accel/color_plane_layout.cc
@@ -0,0 +1,31 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 57ec858cddff
+
+#include "color_plane_layout.h"
+
+namespace media {
+
+ColorPlaneLayout::ColorPlaneLayout() = default;
+
+ColorPlaneLayout::ColorPlaneLayout(int32_t stride, size_t offset, size_t size)
+ : stride(stride), offset(offset), size(size) {}
+
+ColorPlaneLayout::~ColorPlaneLayout() = default;
+
+bool ColorPlaneLayout::operator==(const ColorPlaneLayout& rhs) const {
+ return stride == rhs.stride && offset == rhs.offset && size == rhs.size;
+}
+
+bool ColorPlaneLayout::operator!=(const ColorPlaneLayout& rhs) const {
+ return !(*this == rhs);
+}
+
+std::ostream& operator<<(std::ostream& ostream, const ColorPlaneLayout& plane) {
+ ostream << "(" << plane.stride << ", " << plane.offset << ", " << plane.size
+ << ")";
+ return ostream;
+}
+
+} // namespace media
diff --git a/accel/color_plane_layout.h b/accel/color_plane_layout.h
new file mode 100644
index 0000000..829c0ec
--- /dev/null
+++ b/accel/color_plane_layout.h
@@ -0,0 +1,41 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 57ec858cddff
+
+#ifndef MEDIA_COLOR_PLANE_LAYOUT_H_
+#define MEDIA_COLOR_PLANE_LAYOUT_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <ostream>
+
+namespace media {
+
+// Encapsulates a color plane's memory layout: (stride, offset, size)
+// stride: in bytes of a plane. Note that stride can be negative if the image
+// layout is bottom-up.
+// offset: in bytes of a plane, which stands for the offset of a start point of
+// a color plane from a buffer FD.
+// size: in bytes of a plane. This |size| bytes data must contain all the data
+// a decoder will access (e.g. visible area and padding).
+struct ColorPlaneLayout {
+ ColorPlaneLayout();
+ ColorPlaneLayout(int32_t stride, size_t offset, size_t size);
+ ~ColorPlaneLayout();
+
+ bool operator==(const ColorPlaneLayout& rhs) const;
+ bool operator!=(const ColorPlaneLayout& rhs) const;
+
+ int32_t stride = 0;
+ size_t offset = 0;
+ size_t size = 0;
+};
+
+// Outputs ColorPlaneLayout to stream.
+std::ostream& operator<<(std::ostream& ostream, const ColorPlaneLayout& plane);
+
+} // namespace media
+
+#endif // MEDIA_COLOR_PLANE_LAYOUT_H_
diff --git a/accel/fourcc.cc b/accel/fourcc.cc
new file mode 100644
index 0000000..d340ddf
--- /dev/null
+++ b/accel/fourcc.cc
@@ -0,0 +1,283 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 27c98933749f
+
+#include "fourcc.h"
+
+#include <linux/videodev2.h>
+
+#include "base/logging.h"
+#include "base/strings/stringprintf.h"
+
+#include "macros.h"
+
+namespace media {
+
+Fourcc::Fourcc(Fourcc::Value fourcc) : value_(fourcc) {}
+Fourcc::~Fourcc() = default;
+Fourcc& Fourcc::operator=(const Fourcc& other) = default;
+
+// static
+base::Optional<Fourcc> Fourcc::FromUint32(uint32_t fourcc) {
+ switch (fourcc) {
+ case AR24:
+ case AB24:
+ case XR24:
+ case XB24:
+ case RGB4:
+ case YU12:
+ case YV12:
+ case YM12:
+ case YM21:
+ case YUYV:
+ case NV12:
+ case NV21:
+ case NM12:
+ case NM21:
+ case YM16:
+ case MT21:
+ case MM21:
+ return Fourcc(static_cast<Value>(fourcc));
+ }
+ DVLOGF(3) << "Unmapped fourcc: " << FourccToString(fourcc);
+ return base::nullopt;
+}
+
+// static
+base::Optional<Fourcc> Fourcc::FromVideoPixelFormat(
+ VideoPixelFormat pixel_format,
+ bool single_planar) {
+ if (single_planar) {
+ switch (pixel_format) {
+ case PIXEL_FORMAT_ARGB:
+ return Fourcc(AR24);
+ case PIXEL_FORMAT_ABGR:
+ return Fourcc(AB24);
+ case PIXEL_FORMAT_XRGB:
+ return Fourcc(XR24);
+ case PIXEL_FORMAT_XBGR:
+ return Fourcc(XB24);
+ case PIXEL_FORMAT_BGRA:
+ return Fourcc(RGB4);
+ case PIXEL_FORMAT_I420:
+ return Fourcc(YU12);
+ case PIXEL_FORMAT_YV12:
+ return Fourcc(YV12);
+ case PIXEL_FORMAT_YUY2:
+ return Fourcc(YUYV);
+ case PIXEL_FORMAT_NV12:
+ return Fourcc(NV12);
+ case PIXEL_FORMAT_NV21:
+ return Fourcc(NV21);
+ case PIXEL_FORMAT_I422:
+ case PIXEL_FORMAT_I420A:
+ case PIXEL_FORMAT_I444:
+ case PIXEL_FORMAT_RGB24:
+ case PIXEL_FORMAT_MJPEG:
+ case PIXEL_FORMAT_YUV420P9:
+ case PIXEL_FORMAT_YUV420P10:
+ case PIXEL_FORMAT_YUV422P9:
+ case PIXEL_FORMAT_YUV422P10:
+ case PIXEL_FORMAT_YUV444P9:
+ case PIXEL_FORMAT_YUV444P10:
+ case PIXEL_FORMAT_YUV420P12:
+ case PIXEL_FORMAT_YUV422P12:
+ case PIXEL_FORMAT_YUV444P12:
+ case PIXEL_FORMAT_Y16:
+ case PIXEL_FORMAT_P016LE:
+ case PIXEL_FORMAT_XR30:
+ case PIXEL_FORMAT_XB30:
+ case PIXEL_FORMAT_UNKNOWN:
+ break;
+ }
+ } else {
+ switch (pixel_format) {
+ case PIXEL_FORMAT_I420:
+ return Fourcc(YM12);
+ case PIXEL_FORMAT_YV12:
+ return Fourcc(YM21);
+ case PIXEL_FORMAT_NV12:
+ return Fourcc(NM12);
+ case PIXEL_FORMAT_I422:
+ return Fourcc(YM16);
+ case PIXEL_FORMAT_NV21:
+ return Fourcc(NM21);
+ case PIXEL_FORMAT_I420A:
+ case PIXEL_FORMAT_I444:
+ case PIXEL_FORMAT_YUY2:
+ case PIXEL_FORMAT_ARGB:
+ case PIXEL_FORMAT_XRGB:
+ case PIXEL_FORMAT_RGB24:
+ case PIXEL_FORMAT_MJPEG:
+ case PIXEL_FORMAT_YUV420P9:
+ case PIXEL_FORMAT_YUV420P10:
+ case PIXEL_FORMAT_YUV422P9:
+ case PIXEL_FORMAT_YUV422P10:
+ case PIXEL_FORMAT_YUV444P9:
+ case PIXEL_FORMAT_YUV444P10:
+ case PIXEL_FORMAT_YUV420P12:
+ case PIXEL_FORMAT_YUV422P12:
+ case PIXEL_FORMAT_YUV444P12:
+ case PIXEL_FORMAT_Y16:
+ case PIXEL_FORMAT_ABGR:
+ case PIXEL_FORMAT_XBGR:
+ case PIXEL_FORMAT_P016LE:
+ case PIXEL_FORMAT_XR30:
+ case PIXEL_FORMAT_XB30:
+ case PIXEL_FORMAT_BGRA:
+ case PIXEL_FORMAT_UNKNOWN:
+ break;
+ }
+ }
+ DVLOGF(3) << "Unmapped " << VideoPixelFormatToString(pixel_format) << " for "
+ << (single_planar ? "single-planar" : "multi-planar");
+ return base::nullopt;
+}
+
+VideoPixelFormat Fourcc::ToVideoPixelFormat() const {
+ switch (value_) {
+ case AR24:
+ return PIXEL_FORMAT_ARGB;
+ case AB24:
+ return PIXEL_FORMAT_ABGR;
+ case XR24:
+ return PIXEL_FORMAT_XRGB;
+ case XB24:
+ return PIXEL_FORMAT_XBGR;
+ case RGB4:
+ return PIXEL_FORMAT_BGRA;
+ case YU12:
+ case YM12:
+ return PIXEL_FORMAT_I420;
+ case YV12:
+ case YM21:
+ return PIXEL_FORMAT_YV12;
+ case YUYV:
+ return PIXEL_FORMAT_YUY2;
+ case NV12:
+ case NM12:
+ return PIXEL_FORMAT_NV12;
+ case NV21:
+ case NM21:
+ return PIXEL_FORMAT_NV21;
+ case YM16:
+ return PIXEL_FORMAT_I422;
+ // V4L2_PIX_FMT_MT21C is only used for MT8173 hardware video decoder output
+ // and should be converted by MT8173 image processor for compositor to
+ // render. Since it is an intermediate format for video decoder,
+ // VideoPixelFormat shall not have its mapping. However, we need to create a
+ // VideoFrameLayout for the format to process the intermediate frame. Hence
+ // we map V4L2_PIX_FMT_MT21C to PIXEL_FORMAT_NV12 as their layout are the
+ // same.
+ case MT21:
+ // V4L2_PIX_FMT_MM21 is used for MT8183 hardware video decoder. It is
+ // similar to V4L2_PIX_FMT_MT21C but is not compressed ; thus it can also
+ // be mapped to PIXEL_FORMAT_NV12.
+ case MM21:
+ return PIXEL_FORMAT_NV12;
+ }
+ NOTREACHED() << "Unmapped Fourcc: " << ToString();
+ return PIXEL_FORMAT_UNKNOWN;
+}
+
+// static
+base::Optional<Fourcc> Fourcc::FromV4L2PixFmt(uint32_t v4l2_pix_fmt) {
+ // We can do that because we adopt the same internal definition of Fourcc as
+ // V4L2.
+ return FromUint32(v4l2_pix_fmt);
+}
+
+uint32_t Fourcc::ToV4L2PixFmt() const {
+ // Note that we can do that because we adopt the same internal definition of
+ // Fourcc as V4L2.
+ return static_cast<uint32_t>(value_);
+}
+
+base::Optional<Fourcc> Fourcc::ToSinglePlanar() const {
+ switch (value_) {
+ case AR24:
+ case AB24:
+ case XR24:
+ case XB24:
+ case RGB4:
+ case YU12:
+ case YV12:
+ case YUYV:
+ case NV12:
+ case NV21:
+ return Fourcc(value_);
+ case YM12:
+ return Fourcc(YU12);
+ case YM21:
+ return Fourcc(YV12);
+ case NM12:
+ return Fourcc(NV12);
+ case NM21:
+ return Fourcc(NV21);
+ case YM16:
+ case MT21:
+ case MM21:
+ return base::nullopt;
+ }
+}
+
+bool operator!=(const Fourcc& lhs, const Fourcc& rhs) {
+ return !(lhs == rhs);
+}
+
+bool Fourcc::IsMultiPlanar() const {
+ switch (value_) {
+ case AR24:
+ case AB24:
+ case XR24:
+ case XB24:
+ case RGB4:
+ case YU12:
+ case YV12:
+ case YUYV:
+ case NV12:
+ case NV21:
+ return false;
+ case YM12:
+ case YM21:
+ case NM12:
+ case NM21:
+ case YM16:
+ case MT21:
+ case MM21:
+ return true;
+ }
+}
+
+std::string Fourcc::ToString() const {
+ return FourccToString(static_cast<uint32_t>(value_));
+}
+
+static_assert(Fourcc::AR24 == V4L2_PIX_FMT_ABGR32, "Mismatch Fourcc");
+#ifdef V4L2_PIX_FMT_RGBA32
+// V4L2_PIX_FMT_RGBA32 is defined since v5.2
+static_assert(Fourcc::AB24 == V4L2_PIX_FMT_RGBA32, "Mismatch Fourcc");
+#endif // V4L2_PIX_FMT_RGBA32
+static_assert(Fourcc::XR24 == V4L2_PIX_FMT_XBGR32, "Mismatch Fourcc");
+#ifdef V4L2_PIX_FMT_RGBX32
+// V4L2_PIX_FMT_RGBX32 is defined since v5.2
+static_assert(Fourcc::XB24 == V4L2_PIX_FMT_RGBX32, "Mismatch Fourcc");
+#endif // V4L2_PIX_FMT_RGBX32
+static_assert(Fourcc::RGB4 == V4L2_PIX_FMT_RGB32, "Mismatch Fourcc");
+static_assert(Fourcc::YU12 == V4L2_PIX_FMT_YUV420, "Mismatch Fourcc");
+static_assert(Fourcc::YV12 == V4L2_PIX_FMT_YVU420, "Mismatch Fourcc");
+static_assert(Fourcc::YM12 == V4L2_PIX_FMT_YUV420M, "Mismatch Fourcc");
+static_assert(Fourcc::YM21 == V4L2_PIX_FMT_YVU420M, "Mismatch Fourcc");
+static_assert(Fourcc::YUYV == V4L2_PIX_FMT_YUYV, "Mismatch Fourcc");
+static_assert(Fourcc::NV12 == V4L2_PIX_FMT_NV12, "Mismatch Fourcc");
+static_assert(Fourcc::NV21 == V4L2_PIX_FMT_NV21, "Mismatch Fourcc");
+static_assert(Fourcc::NM12 == V4L2_PIX_FMT_NV12M, "Mismatch Fourcc");
+static_assert(Fourcc::NM21 == V4L2_PIX_FMT_NV21M, "Mismatch Fourcc");
+static_assert(Fourcc::YM16 == V4L2_PIX_FMT_YUV422M, "Mismatch Fourcc");
+static_assert(Fourcc::MT21 == V4L2_PIX_FMT_MT21C, "Mismatch Fourcc");
+#ifdef V4L2_PIX_FMT_MM21
+// V4L2_PIX_FMT_MM21 is not yet upstreamed.
+static_assert(Fourcc::MM21 == V4L2_PIX_FMT_MM21, "Mismatch Fourcc");
+#endif // V4L2_PIX_FMT_MM21
+} // namespace media
diff --git a/accel/fourcc.h b/accel/fourcc.h
new file mode 100644
index 0000000..408b845
--- /dev/null
+++ b/accel/fourcc.h
@@ -0,0 +1,161 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 27c98933749f
+
+#ifndef FOURCC_H_
+#define FOURCC_H_
+
+#include <stdint.h>
+#include <string>
+
+#include "base/optional.h"
+#include "video_pixel_format.h"
+
+namespace media {
+
+// Composes a Fourcc value.
+constexpr uint32_t ComposeFourcc(char a, char b, char c, char d) {
+ return static_cast<uint32_t>(a) | (static_cast<uint32_t>(b) << 8) |
+ (static_cast<uint32_t>(c) << 16) | (static_cast<uint32_t>(d) << 24);
+}
+
+// Fourcc enum holder and converters.
+// Usage:
+// Fourcc f1(Fourcc::AR24);
+// EXPECT_EQ("AR24", f1.ToString());
+// Fourcc f2 = Fourcc::FromVideoPixelFormat(PIXEL_FORMAT_ARGB);
+// EXPECT_EQ(f2, f1);
+class Fourcc {
+ public:
+ enum Value : uint32_t {
+ // RGB formats.
+ // https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/pixfmt-rgb.html
+ // Maps to PIXEL_FORMAT_ARGB, V4L2_PIX_FMT_ABGR32, VA_FOURCC_BGRA.
+ // 32bpp BGRA (byte-order), 1 plane.
+ AR24 = ComposeFourcc('A', 'R', '2', '4'),
+
+ // Maps to PIXEL_FORMAT_ABGR, V4L2_PIX_FMT_RGBA32, VA_FOURCC_RGBA.
+ // 32bpp RGBA (byte-order), 1 plane
+ AB24 = ComposeFourcc('A', 'B', '2', '4'),
+
+ // Maps to PIXEL_FORMAT_XRGB, V4L2_PIX_FMT_XBGR32, VA_FOURCC_BGRX.
+ // 32bpp BGRX (byte-order), 1 plane.
+ XR24 = ComposeFourcc('X', 'R', '2', '4'),
+
+ // Maps to PIXEL_FORMAT_XBGR, V4L2_PIX_FMT_RGBX32, VA_FOURCC_RGBX.
+ // 32bpp RGBX (byte-order), 1 plane.
+ XB24 = ComposeFourcc('X', 'B', '2', '4'),
+
+ // Maps to PIXEL_FORMAT_BGRA, V4L2_PIX_FMT_RGB32, VA_FOURCC_ARGB.
+ // 32bpp ARGB (byte-order), 1 plane.
+ // Note that V4L2_PIX_FMT_RGB32("RGB4") is deprecated and replaced by
+ // V4L2_PIX_FMT_ARGB32("BA24"), however, some board relies on the fourcc
+ // mapping so we keep it as-is.
+ RGB4 = ComposeFourcc('R', 'G', 'B', '4'),
+
+ // YUV420 single-planar formats.
+ // https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/pixfmt-yuv420.html
+ // Maps to PIXEL_FORMAT_I420, V4L2_PIX_FMT_YUV420, VA_FOURCC_I420.
+ // 12bpp YUV planar 1x1 Y, 2x2 UV samples.
+ YU12 = ComposeFourcc('Y', 'U', '1', '2'),
+ // Maps to PIXEL_FORMAT_YV12, V4L2_PIX_FMT_YVU420, VA_FOURCC_YV12.
+ // 12bpp YVU planar 1x1 Y, 2x2 VU samples.
+ YV12 = ComposeFourcc('Y', 'V', '1', '2'),
+
+ // YUV420 multi-planar format.
+ // https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/pixfmt-yuv420m.htm
+ // Maps to PIXEL_FORMAT_I420, V4L2_PIX_FMT_YUV420M.
+ YM12 = ComposeFourcc('Y', 'M', '1', '2'),
+ // Maps to PIXEL_FORMAT_YV12, V4L2_PIX_FMT_YVU420M.
+ YM21 = ComposeFourcc('Y', 'M', '2', '1'),
+
+ // YUYV format.
+ // https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/pixfmt-yuyv.html
+ // Maps to PIXEL_FORMAT_YUY2, V4L2_PIX_FMT_YUYV, VA_FOURCC_YUY2.
+ // 16bpp YUV planar (YUV 4:2:2), YUYV (byte-order), 1 plane.
+ YUYV = ComposeFourcc('Y', 'U', 'Y', 'V'),
+
+ // NV12 single-planar format.
+ // https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/pixfmt-nv12.html
+ // Maps to PIXEL_FORMAT_NV12, V4L2_PIX_FMT_NV12, VA_FOURCC_NV12.
+ // 12bpp with Y plane followed by a 2x2 interleaved UV plane.
+ NV12 = ComposeFourcc('N', 'V', '1', '2'),
+ // Maps to PIXEL_FORMAT_NV21, V4L2_PIX_FMT_NV21, VA_FOURCC_NV21.
+ // 12bpp with Y plane followed by a 2x2 interleaved VU plane.
+ NV21 = ComposeFourcc('N', 'V', '2', '1'),
+
+ // NV12 multi-planar format.
+ // https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/pixfmt-nv12m.html
+ // Maps to PIXEL_FORMAT_NV12, V4L2_PIX_FMT_NV12M,
+ NM12 = ComposeFourcc('N', 'M', '1', '2'),
+ // Maps to PIXEL_FORMAT_NV21, V4L2_PIX_FMT_NV21M.
+ NM21 = ComposeFourcc('N', 'M', '2', '1'),
+
+ // YUV422 multi-planar format.
+ // https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/pixfmt-yuv422m.html
+ // Maps to PIXEL_FORMAT_I422, V4L2_PIX_FMT_YUV422M
+ // 16bpp YUV planar 1x1 Y, 2x1 UV samples.
+ YM16 = ComposeFourcc('Y', 'M', '1', '6'),
+
+ // V4L2 proprietary format.
+ // https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/pixfmt-reserved.html
+ // Maps to V4L2_PIX_FMT_MT21C.
+ // It is used for MT8173 hardware video decoder output and should be
+ // converted by MT8173 image processor for compositor to render.
+ MT21 = ComposeFourcc('M', 'T', '2', '1'),
+ // Maps to V4L2_PIX_FMT_MM21.
+ // It is used for MT8183 hardware video decoder.
+ MM21 = ComposeFourcc('M', 'M', '2', '1'),
+ };
+
+ explicit Fourcc(Fourcc::Value fourcc);
+ Fourcc& operator=(const Fourcc& fourcc);
+ ~Fourcc();
+
+ bool operator==(const Fourcc& rhs) const { return value_ == rhs.value_; }
+
+ // Factory methods:
+
+ // Builds a Fourcc from a given fourcc code. This will return a valid
+ // Fourcc if the argument is part of the |Value| enum, or nullopt otherwise.
+ static base::Optional<Fourcc> FromUint32(uint32_t fourcc);
+
+ // Converts a VideoPixelFormat to Fourcc.
+ // Returns nullopt for invalid input.
+ // Note that a VideoPixelFormat may have two Fourcc counterparts. Caller has
+ // to specify if it is for single-planar or multi-planar format.
+ static base::Optional<Fourcc> FromVideoPixelFormat(
+ VideoPixelFormat pixel_format,
+ bool single_planar = true);
+ // Converts a V4L2PixFmt to Fourcc.
+ // Returns nullopt for invalid input.
+ static base::Optional<Fourcc> FromV4L2PixFmt(uint32_t v4l2_pix_fmt);
+
+ // Value getters:
+ // Returns the VideoPixelFormat counterpart of the value.
+ // Returns PIXEL_FORMAT_UNKNOWN if no mapping is found.
+ VideoPixelFormat ToVideoPixelFormat() const;
+ // Returns the V4L2PixFmt counterpart of the value.
+ // Returns 0 if no mapping is found.
+ uint32_t ToV4L2PixFmt() const;
+
+ // Returns the single-planar Fourcc of the value. If value is a single-planar,
+ // returns the same Fourcc. Returns nullopt if no mapping is found.
+ base::Optional<Fourcc> ToSinglePlanar() const;
+
+ // Returns whether |value_| is multi planar format.
+ bool IsMultiPlanar() const;
+
+ // Outputs human readable fourcc string, e.g. "NV12".
+ std::string ToString() const;
+
+ private:
+ Value value_;
+};
+
+bool operator!=(const Fourcc& lhs, const Fourcc& rhs);
+
+} // namespace media
+
+#endif // FOURCC_H_
diff --git a/accel/generic_v4l2_device.cc b/accel/generic_v4l2_device.cc
new file mode 100644
index 0000000..8dea028
--- /dev/null
+++ b/accel/generic_v4l2_device.cc
@@ -0,0 +1,352 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 8c9190713ed9
+
+#include "generic_v4l2_device.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/videodev2.h>
+#include <poll.h>
+#include <string.h>
+#include <sys/eventfd.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include <algorithm>
+#include <memory>
+
+#include "base/files/scoped_file.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/stl_util.h"
+#include "base/strings/stringprintf.h"
+
+#include "macros.h"
+
+namespace media {
+
+GenericV4L2Device::GenericV4L2Device() {}
+
+GenericV4L2Device::~GenericV4L2Device() {
+ CloseDevice();
+}
+
+int GenericV4L2Device::Ioctl(int request, void* arg) {
+ DCHECK(device_fd_.is_valid());
+ return HANDLE_EINTR(ioctl(device_fd_.get(), request, arg));
+}
+
+bool GenericV4L2Device::Poll(bool poll_device, bool* event_pending) {
+ struct pollfd pollfds[2];
+ nfds_t nfds;
+ int pollfd = -1;
+
+ pollfds[0].fd = device_poll_interrupt_fd_.get();
+ pollfds[0].events = POLLIN | POLLERR;
+ nfds = 1;
+
+ if (poll_device) {
+ DVLOGF(5) << "adding device fd to poll() set";
+ pollfds[nfds].fd = device_fd_.get();
+ pollfds[nfds].events = POLLIN | POLLOUT | POLLERR | POLLPRI;
+ pollfd = nfds;
+ nfds++;
+ }
+
+ if (HANDLE_EINTR(poll(pollfds, nfds, -1)) == -1) {
+ VPLOGF(1) << "poll() failed";
+ return false;
+ }
+ *event_pending = (pollfd != -1 && pollfds[pollfd].revents & POLLPRI);
+ return true;
+}
+
+void* GenericV4L2Device::Mmap(void* addr,
+ unsigned int len,
+ int prot,
+ int flags,
+ unsigned int offset) {
+ DCHECK(device_fd_.is_valid());
+ return mmap(addr, len, prot, flags, device_fd_.get(), offset);
+}
+
+void GenericV4L2Device::Munmap(void* addr, unsigned int len) {
+ munmap(addr, len);
+}
+
+bool GenericV4L2Device::SetDevicePollInterrupt() {
+ DVLOGF(4);
+
+ const uint64_t buf = 1;
+ if (HANDLE_EINTR(write(device_poll_interrupt_fd_.get(), &buf, sizeof(buf))) ==
+ -1) {
+ VPLOGF(1) << "write() failed";
+ return false;
+ }
+ return true;
+}
+
+bool GenericV4L2Device::ClearDevicePollInterrupt() {
+ DVLOGF(5);
+
+ uint64_t buf;
+ if (HANDLE_EINTR(read(device_poll_interrupt_fd_.get(), &buf, sizeof(buf))) ==
+ -1) {
+ if (errno == EAGAIN) {
+ // No interrupt flag set, and we're reading nonblocking. Not an error.
+ return true;
+ } else {
+ VPLOGF(1) << "read() failed";
+ return false;
+ }
+ }
+ return true;
+}
+
+bool GenericV4L2Device::Initialize() {
+ DVLOGF(3);
+ static bool v4l2_functions_initialized = PostSandboxInitialization();
+ if (!v4l2_functions_initialized) {
+ VLOGF(1) << "Failed to initialize LIBV4L2 libs";
+ return false;
+ }
+
+ return true;
+}
+
+bool GenericV4L2Device::Open(Type type, uint32_t v4l2_pixfmt) {
+ DVLOGF(3);
+ std::string path = GetDevicePathFor(type, v4l2_pixfmt);
+
+ if (path.empty()) {
+ VLOGF(1) << "No devices supporting " << FourccToString(v4l2_pixfmt)
+ << " for type: " << static_cast<int>(type);
+ return false;
+ }
+
+ if (!OpenDevicePath(path, type)) {
+ VLOGF(1) << "Failed opening " << path;
+ return false;
+ }
+
+ device_poll_interrupt_fd_.reset(eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC));
+ if (!device_poll_interrupt_fd_.is_valid()) {
+ VLOGF(1) << "Failed creating a poll interrupt fd";
+ return false;
+ }
+
+ return true;
+}
+
+std::vector<base::ScopedFD> GenericV4L2Device::GetDmabufsForV4L2Buffer(
+ int index,
+ size_t num_planes,
+ enum v4l2_buf_type buf_type) {
+ DVLOGF(3);
+ DCHECK(V4L2_TYPE_IS_MULTIPLANAR(buf_type));
+
+ std::vector<base::ScopedFD> dmabuf_fds;
+ for (size_t i = 0; i < num_planes; ++i) {
+ struct v4l2_exportbuffer expbuf;
+ memset(&expbuf, 0, sizeof(expbuf));
+ expbuf.type = buf_type;
+ expbuf.index = index;
+ expbuf.plane = i;
+ expbuf.flags = O_CLOEXEC;
+ if (Ioctl(VIDIOC_EXPBUF, &expbuf) != 0) {
+ dmabuf_fds.clear();
+ break;
+ }
+
+ dmabuf_fds.push_back(base::ScopedFD(expbuf.fd));
+ }
+
+ return dmabuf_fds;
+}
+
+std::vector<uint32_t> GenericV4L2Device::PreferredInputFormat(Type type) {
+ if (type == Type::kEncoder)
+ return {V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_NV12};
+
+ return {};
+}
+
+std::vector<uint32_t> GenericV4L2Device::GetSupportedImageProcessorPixelformats(
+ v4l2_buf_type buf_type) {
+ std::vector<uint32_t> supported_pixelformats;
+
+ Type type = Type::kImageProcessor;
+ const auto& devices = GetDevicesForType(type);
+ for (const auto& device : devices) {
+ if (!OpenDevicePath(device.first, type)) {
+ VLOGF(1) << "Failed opening " << device.first;
+ continue;
+ }
+
+ std::vector<uint32_t> pixelformats =
+ EnumerateSupportedPixelformats(buf_type);
+
+ supported_pixelformats.insert(supported_pixelformats.end(),
+ pixelformats.begin(), pixelformats.end());
+ CloseDevice();
+ }
+
+ return supported_pixelformats;
+}
+
+VideoDecodeAccelerator::SupportedProfiles
+GenericV4L2Device::GetSupportedDecodeProfiles(const size_t num_formats,
+ const uint32_t pixelformats[]) {
+ VideoDecodeAccelerator::SupportedProfiles supported_profiles;
+
+ Type type = Type::kDecoder;
+ const auto& devices = GetDevicesForType(type);
+ for (const auto& device : devices) {
+ if (!OpenDevicePath(device.first, type)) {
+ VLOGF(1) << "Failed opening " << device.first;
+ continue;
+ }
+
+ const auto& profiles =
+ EnumerateSupportedDecodeProfiles(num_formats, pixelformats);
+ supported_profiles.insert(supported_profiles.end(), profiles.begin(),
+ profiles.end());
+ CloseDevice();
+ }
+
+ return supported_profiles;
+}
+
+VideoEncodeAccelerator::SupportedProfiles
+GenericV4L2Device::GetSupportedEncodeProfiles() {
+ VideoEncodeAccelerator::SupportedProfiles supported_profiles;
+
+ Type type = Type::kEncoder;
+ const auto& devices = GetDevicesForType(type);
+ for (const auto& device : devices) {
+ if (!OpenDevicePath(device.first, type)) {
+ VLOGF(1) << "Failed opening " << device.first;
+ continue;
+ }
+
+ const auto& profiles = EnumerateSupportedEncodeProfiles();
+ supported_profiles.insert(supported_profiles.end(), profiles.begin(),
+ profiles.end());
+ CloseDevice();
+ }
+
+ return supported_profiles;
+}
+
+bool GenericV4L2Device::IsImageProcessingSupported() {
+ const auto& devices = GetDevicesForType(Type::kImageProcessor);
+ return !devices.empty();
+}
+
+bool GenericV4L2Device::IsJpegDecodingSupported() {
+ const auto& devices = GetDevicesForType(Type::kJpegDecoder);
+ return !devices.empty();
+}
+
+bool GenericV4L2Device::IsJpegEncodingSupported() {
+ const auto& devices = GetDevicesForType(Type::kJpegEncoder);
+ return !devices.empty();
+}
+
+bool GenericV4L2Device::OpenDevicePath(const std::string& path, Type type) {
+ DCHECK(!device_fd_.is_valid());
+
+ device_fd_.reset(
+ HANDLE_EINTR(open(path.c_str(), O_RDWR | O_NONBLOCK | O_CLOEXEC)));
+ if (!device_fd_.is_valid())
+ return false;
+
+ return true;
+}
+
+void GenericV4L2Device::CloseDevice() {
+ DVLOGF(3);
+ device_fd_.reset();
+}
+
+// static
+bool GenericV4L2Device::PostSandboxInitialization() {
+ return true;
+}
+
+void GenericV4L2Device::EnumerateDevicesForType(Type type) {
+ // video input/output devices are registered as /dev/videoX in V4L2.
+ static const std::string kVideoDevicePattern = "/dev/video";
+
+ std::string device_pattern;
+ v4l2_buf_type buf_type;
+ switch (type) {
+ case Type::kDecoder:
+ device_pattern = kVideoDevicePattern;
+ buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ break;
+ case Type::kEncoder:
+ device_pattern = kVideoDevicePattern;
+ buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ break;
+ default:
+ LOG(ERROR) << "Only decoder and encoder types are supported!!";
+ return;
+ }
+
+ std::vector<std::string> candidate_paths;
+
+ // TODO(posciak): Remove this legacy unnumbered device once
+ // all platforms are updated to use numbered devices.
+ candidate_paths.push_back(device_pattern);
+
+ // We are sandboxed, so we can't query directory contents to check which
+ // devices are actually available. Try to open the first 10; if not present,
+ // we will just fail to open immediately.
+ for (int i = 0; i < 10; ++i) {
+ candidate_paths.push_back(
+ base::StringPrintf("%s%d", device_pattern.c_str(), i));
+ }
+
+ Devices devices;
+ for (const auto& path : candidate_paths) {
+ if (!OpenDevicePath(path, type))
+ continue;
+
+ const auto& supported_pixelformats =
+ EnumerateSupportedPixelformats(buf_type);
+ if (!supported_pixelformats.empty()) {
+ DVLOGF(3) << "Found device: " << path;
+ devices.push_back(std::make_pair(path, supported_pixelformats));
+ }
+
+ CloseDevice();
+ }
+
+ DCHECK_EQ(devices_by_type_.count(type), 0u);
+ devices_by_type_[type] = devices;
+}
+
+const GenericV4L2Device::Devices& GenericV4L2Device::GetDevicesForType(
+ Type type) {
+ if (devices_by_type_.count(type) == 0)
+ EnumerateDevicesForType(type);
+
+ DCHECK_NE(devices_by_type_.count(type), 0u);
+ return devices_by_type_[type];
+}
+
+std::string GenericV4L2Device::GetDevicePathFor(Type type, uint32_t pixfmt) {
+ const Devices& devices = GetDevicesForType(type);
+
+ for (const auto& device : devices) {
+ if (std::find(device.second.begin(), device.second.end(), pixfmt) !=
+ device.second.end())
+ return device.first;
+ }
+
+ return std::string();
+}
+
+} // namespace media
diff --git a/accel/generic_v4l2_device.h b/accel/generic_v4l2_device.h
new file mode 100644
index 0000000..9567b43
--- /dev/null
+++ b/accel/generic_v4l2_device.h
@@ -0,0 +1,113 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains the implementation of GenericV4L2Device used on
+// platforms, which provide generic V4L2 video codec devices.
+// Note: ported from Chromium commit head: 9a075af92855
+// Note: removed all references to 'USE_LIBV4L2'.
+// Note: removed GL-related functionality.
+
+#ifndef V4L2_GENERIC_V4L2_DEVICE_H_
+#define V4L2_GENERIC_V4L2_DEVICE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <vector>
+
+#include "base/files/scoped_file.h"
+#include "base/macros.h"
+#include "v4l2_device.h"
+
+namespace media {
+
+class GenericV4L2Device : public V4L2Device {
+ public:
+ GenericV4L2Device();
+
+ // V4L2Device implementation.
+ bool Open(Type type, uint32_t v4l2_pixfmt) override;
+ int Ioctl(int request, void* arg) override;
+ bool Poll(bool poll_device, bool* event_pending) override;
+ bool SetDevicePollInterrupt() override;
+ bool ClearDevicePollInterrupt() override;
+ void* Mmap(void* addr,
+ unsigned int len,
+ int prot,
+ int flags,
+ unsigned int offset) override;
+ void Munmap(void* addr, unsigned int len) override;
+
+ std::vector<base::ScopedFD> GetDmabufsForV4L2Buffer(
+ int index,
+ size_t num_planes,
+ enum v4l2_buf_type buf_type) override;
+
+ std::vector<uint32_t> PreferredInputFormat(Type type) override;
+
+ std::vector<uint32_t> GetSupportedImageProcessorPixelformats(
+ v4l2_buf_type buf_type) override;
+
+ VideoDecodeAccelerator::SupportedProfiles GetSupportedDecodeProfiles(
+ const size_t num_formats,
+ const uint32_t pixelformats[]) override;
+
+ VideoEncodeAccelerator::SupportedProfiles GetSupportedEncodeProfiles()
+ override;
+
+ bool IsImageProcessingSupported() override;
+
+ bool IsJpegDecodingSupported() override;
+ bool IsJpegEncodingSupported() override;
+
+ protected:
+ ~GenericV4L2Device() override;
+
+ bool Initialize() override;
+
+ private:
+ // Vector of video device node paths and corresponding pixelformats supported
+ // by each device node.
+ using Devices = std::vector<std::pair<std::string, std::vector<uint32_t>>>;
+
+ // Open device node for |path| as a device of |type|.
+ bool OpenDevicePath(const std::string& path, Type type);
+
+ // Close the currently open device.
+ void CloseDevice();
+
+ // Enumerate all V4L2 devices on the system for |type| and store the results
+ // under devices_by_type_[type].
+ void EnumerateDevicesForType(V4L2Device::Type type);
+
+ // Return device information for all devices of |type| available in the
+ // system. Enumerates and queries devices on first run and caches the results
+ // for subsequent calls.
+ const Devices& GetDevicesForType(V4L2Device::Type type);
+
+ // Return device node path for device of |type| supporting |pixfmt|, or
+ // an empty string if the given combination is not supported by the system.
+ std::string GetDevicePathFor(V4L2Device::Type type, uint32_t pixfmt);
+
+ // Stores information for all devices available on the system
+ // for each device Type.
+ std::map<V4L2Device::Type, Devices> devices_by_type_;
+
+ // The actual device fd.
+ base::ScopedFD device_fd_;
+
+ // eventfd fd to signal device poll thread when its poll() should be
+ // interrupted.
+ base::ScopedFD device_poll_interrupt_fd_;
+
+ DISALLOW_COPY_AND_ASSIGN(GenericV4L2Device);
+
+ // Lazily initialize static data after sandbox is enabled. Return false on
+ // init failure.
+ static bool PostSandboxInitialization();
+};
+} // namespace media
+
+#endif // V4L2_GENERIC_V4L2_DEVICE_H_
diff --git a/accel/h264_bit_reader.cc b/accel/h264_bit_reader.cc
new file mode 100644
index 0000000..6713655
--- /dev/null
+++ b/accel/h264_bit_reader.cc
@@ -0,0 +1,123 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
+
+#include "base/logging.h"
+#include "h264_bit_reader.h"
+
+namespace media {
+
+H264BitReader::H264BitReader()
+ : data_(NULL),
+ bytes_left_(0),
+ curr_byte_(0),
+ num_remaining_bits_in_curr_byte_(0),
+ prev_two_bytes_(0),
+ emulation_prevention_bytes_(0) {}
+
+H264BitReader::~H264BitReader() = default;
+
+bool H264BitReader::Initialize(const uint8_t* data, off_t size) {
+ DCHECK(data);
+
+ if (size < 1)
+ return false;
+
+ data_ = data;
+ bytes_left_ = size;
+ num_remaining_bits_in_curr_byte_ = 0;
+ // Initially set to 0xffff to accept all initial two-byte sequences.
+ prev_two_bytes_ = 0xffff;
+ emulation_prevention_bytes_ = 0;
+
+ return true;
+}
+
+bool H264BitReader::UpdateCurrByte() {
+ if (bytes_left_ < 1)
+ return false;
+
+ // Emulation prevention three-byte detection.
+ // If a sequence of 0x000003 is found, skip (ignore) the last byte (0x03).
+ if (*data_ == 0x03 && (prev_two_bytes_ & 0xffff) == 0) {
+ // Detected 0x000003, skip last byte.
+ ++data_;
+ --bytes_left_;
+ ++emulation_prevention_bytes_;
+ // Need another full three bytes before we can detect the sequence again.
+ prev_two_bytes_ = 0xffff;
+
+ if (bytes_left_ < 1)
+ return false;
+ }
+
+ // Load a new byte and advance pointers.
+ curr_byte_ = *data_++ & 0xff;
+ --bytes_left_;
+ num_remaining_bits_in_curr_byte_ = 8;
+
+ prev_two_bytes_ = ((prev_two_bytes_ & 0xff) << 8) | curr_byte_;
+
+ return true;
+}
+
+// Read |num_bits| (1 to 31 inclusive) from the stream and return them
+// in |out|, with first bit in the stream as MSB in |out| at position
+// (|num_bits| - 1).
+bool H264BitReader::ReadBits(int num_bits, int* out) {
+ int bits_left = num_bits;
+ *out = 0;
+ DCHECK(num_bits <= 31);
+
+ while (num_remaining_bits_in_curr_byte_ < bits_left) {
+ // Take all that's left in current byte, shift to make space for the rest.
+ *out |= (curr_byte_ << (bits_left - num_remaining_bits_in_curr_byte_));
+ bits_left -= num_remaining_bits_in_curr_byte_;
+
+ if (!UpdateCurrByte())
+ return false;
+ }
+
+ *out |= (curr_byte_ >> (num_remaining_bits_in_curr_byte_ - bits_left));
+ *out &= ((1u << num_bits) - 1u);
+ num_remaining_bits_in_curr_byte_ -= bits_left;
+
+ return true;
+}
+
+off_t H264BitReader::NumBitsLeft() {
+ return (num_remaining_bits_in_curr_byte_ + bytes_left_ * 8);
+}
+
+bool H264BitReader::HasMoreRBSPData() {
+ // Make sure we have more bits, if we are at 0 bits in current byte and
+ // updating current byte fails, we don't have more data anyway.
+ if (num_remaining_bits_in_curr_byte_ == 0 && !UpdateCurrByte())
+ return false;
+
+ // If there is no more RBSP data, then |curr_byte_| contains the stop bit and
+ // zero padding. Check to see if there is other data instead.
+ // (We don't actually check for the stop bit itself, instead treating the
+ // invalid case of all trailing zeros identically).
+ if ((curr_byte_ & ((1 << (num_remaining_bits_in_curr_byte_ - 1)) - 1)) != 0)
+ return true;
+
+ // While the spec disallows it (7.4.1: "The last byte of the NAL unit shall
+ // not be equal to 0x00"), some streams have trailing null bytes anyway. We
+ // don't handle emulation prevention sequences because HasMoreRBSPData() is
+ // not used when parsing slices (where cabac_zero_word elements are legal).
+ for (off_t i = 0; i < bytes_left_; i++) {
+ if (data_[i] != 0)
+ return true;
+ }
+
+ bytes_left_ = 0;
+ return false;
+}
+
+size_t H264BitReader::NumEmulationPreventionBytesRead() {
+ return emulation_prevention_bytes_;
+}
+
+} // namespace media
diff --git a/accel/h264_bit_reader.h b/accel/h264_bit_reader.h
new file mode 100644
index 0000000..aa162ce
--- /dev/null
+++ b/accel/h264_bit_reader.h
@@ -0,0 +1,81 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains an implementation of an H264 Annex-B video stream parser.
+// Note: ported from Chromium commit head: 77be7ae
+
+#ifndef H264_BIT_READER_H_
+#define H264_BIT_READER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#include "base/macros.h"
+
+namespace media {
+
+// A class to provide bit-granularity reading of H.264 streams.
+// This is not a generic bit reader class, as it takes into account
+// H.264 stream-specific constraints, such as skipping emulation-prevention
+// bytes and stop bits. See spec for more details.
+class H264BitReader {
+ public:
+ H264BitReader();
+ ~H264BitReader();
+
+ // Initialize the reader to start reading at |data|, |size| being size
+ // of |data| in bytes.
+ // Return false on insufficient size of stream..
+ // TODO(posciak,fischman): consider replacing Initialize() with
+ // heap-allocating and creating bit readers on demand instead.
+ bool Initialize(const uint8_t* data, off_t size);
+
+ // Read |num_bits| next bits from stream and return in |*out|, first bit
+ // from the stream starting at |num_bits| position in |*out|.
+ // |num_bits| may be 1-32, inclusive.
+ // Return false if the given number of bits cannot be read (not enough
+ // bits in the stream), true otherwise.
+ bool ReadBits(int num_bits, int* out);
+
+ // Return the number of bits left in the stream.
+ off_t NumBitsLeft();
+
+ // See the definition of more_rbsp_data() in spec.
+ bool HasMoreRBSPData();
+
+ // Return the number of emulation prevention bytes already read.
+ size_t NumEmulationPreventionBytesRead();
+
+ private:
+ // Advance to the next byte, loading it into curr_byte_.
+ // Return false on end of stream.
+ bool UpdateCurrByte();
+
+ // Pointer to the next unread (not in curr_byte_) byte in the stream.
+ const uint8_t* data_;
+
+ // Bytes left in the stream (without the curr_byte_).
+ off_t bytes_left_;
+
+ // Contents of the current byte; first unread bit starting at position
+ // 8 - num_remaining_bits_in_curr_byte_ from MSB.
+ int curr_byte_;
+
+ // Number of bits remaining in curr_byte_
+ int num_remaining_bits_in_curr_byte_;
+
+ // Used in emulation prevention three byte detection (see spec).
+ // Initially set to 0xffff to accept all initial two-byte sequences.
+ int prev_two_bytes_;
+
+ // Number of emulation preventation bytes (0x000003) we met.
+ size_t emulation_prevention_bytes_;
+
+ DISALLOW_COPY_AND_ASSIGN(H264BitReader);
+};
+
+} // namespace media
+
+#endif // H264_BIT_READER_H_
diff --git a/accel/h264_decoder.cc b/accel/h264_decoder.cc
new file mode 100644
index 0000000..abaaac5
--- /dev/null
+++ b/accel/h264_decoder.cc
@@ -0,0 +1,1459 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: c3bd64c
+
+#include <algorithm>
+#include <limits>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
+#include "base/macros.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/optional.h"
+#include "base/stl_util.h"
+#include "h264_decoder.h"
+
+namespace media {
+
+H264Decoder::H264Accelerator::H264Accelerator() = default;
+
+H264Decoder::H264Accelerator::~H264Accelerator() = default;
+
+H264Decoder::H264Decoder(H264Accelerator* accelerator)
+ : state_(kNeedStreamMetadata),
+ max_frame_num_(0),
+ max_pic_num_(0),
+ max_long_term_frame_idx_(0),
+ max_num_reorder_frames_(0),
+ accelerator_(accelerator) {
+ DCHECK(accelerator_);
+ Reset();
+}
+
+H264Decoder::~H264Decoder() = default;
+
+void H264Decoder::Reset() {
+ curr_pic_ = nullptr;
+ curr_nalu_ = nullptr;
+ curr_slice_hdr_ = nullptr;
+ curr_sps_id_ = -1;
+ curr_pps_id_ = -1;
+
+ prev_frame_num_ = -1;
+ prev_ref_frame_num_ = -1;
+ prev_frame_num_offset_ = -1;
+ prev_has_memmgmnt5_ = false;
+
+ prev_ref_has_memmgmnt5_ = false;
+ prev_ref_top_field_order_cnt_ = -1;
+ prev_ref_pic_order_cnt_msb_ = -1;
+ prev_ref_pic_order_cnt_lsb_ = -1;
+ prev_ref_field_ = H264Picture::FIELD_NONE;
+
+ ref_pic_list_p0_.clear();
+ ref_pic_list_b0_.clear();
+ ref_pic_list_b1_.clear();
+ dpb_.Clear();
+ parser_.Reset();
+ accelerator_->Reset();
+ last_output_poc_ = std::numeric_limits<int>::min();
+
+ // If we are in kDecoding, we can resume without processing an SPS.
+ if (state_ == kDecoding)
+ state_ = kAfterReset;
+}
+
+void H264Decoder::PrepareRefPicLists(const H264SliceHeader* slice_hdr) {
+ ConstructReferencePicListsP(slice_hdr);
+ ConstructReferencePicListsB(slice_hdr);
+}
+
+bool H264Decoder::ModifyReferencePicLists(const H264SliceHeader* slice_hdr,
+ H264Picture::Vector* ref_pic_list0,
+ H264Picture::Vector* ref_pic_list1) {
+ ref_pic_list0->clear();
+ ref_pic_list1->clear();
+
+ // Fill reference picture lists for B and S/SP slices.
+ if (slice_hdr->IsPSlice() || slice_hdr->IsSPSlice()) {
+ *ref_pic_list0 = ref_pic_list_p0_;
+ return ModifyReferencePicList(slice_hdr, 0, ref_pic_list0);
+ } else if (slice_hdr->IsBSlice()) {
+ *ref_pic_list0 = ref_pic_list_b0_;
+ *ref_pic_list1 = ref_pic_list_b1_;
+ return ModifyReferencePicList(slice_hdr, 0, ref_pic_list0) &&
+ ModifyReferencePicList(slice_hdr, 1, ref_pic_list1);
+ }
+
+ return true;
+}
+
+bool H264Decoder::DecodePicture() {
+ DCHECK(curr_pic_.get());
+
+ DVLOG(4) << "Decoding POC " << curr_pic_->pic_order_cnt;
+ return accelerator_->SubmitDecode(curr_pic_);
+}
+
+bool H264Decoder::InitNonexistingPicture(scoped_refptr<H264Picture> pic,
+ int frame_num) {
+ pic->nonexisting = true;
+ pic->nal_ref_idc = 1;
+ pic->frame_num = pic->pic_num = frame_num;
+ pic->adaptive_ref_pic_marking_mode_flag = false;
+ pic->ref = true;
+ pic->long_term_reference_flag = false;
+ pic->field = H264Picture::FIELD_NONE;
+
+ return CalculatePicOrderCounts(pic);
+}
+
+bool H264Decoder::InitCurrPicture(const H264SliceHeader* slice_hdr) {
+ DCHECK(curr_pic_.get());
+
+ curr_pic_->idr = slice_hdr->idr_pic_flag;
+ if (curr_pic_->idr)
+ curr_pic_->idr_pic_id = slice_hdr->idr_pic_id;
+
+ if (slice_hdr->field_pic_flag) {
+ curr_pic_->field = slice_hdr->bottom_field_flag ? H264Picture::FIELD_BOTTOM
+ : H264Picture::FIELD_TOP;
+ } else {
+ curr_pic_->field = H264Picture::FIELD_NONE;
+ }
+
+ if (curr_pic_->field != H264Picture::FIELD_NONE) {
+ DVLOG(1) << "Interlaced video not supported.";
+ return false;
+ }
+
+ curr_pic_->nal_ref_idc = slice_hdr->nal_ref_idc;
+ curr_pic_->ref = slice_hdr->nal_ref_idc != 0;
+ // This assumes non-interlaced stream.
+ curr_pic_->frame_num = curr_pic_->pic_num = slice_hdr->frame_num;
+
+ DCHECK_NE(curr_sps_id_, -1);
+ const H264SPS* sps = parser_.GetSPS(curr_sps_id_);
+ if (!sps)
+ return false;
+
+ curr_pic_->pic_order_cnt_type = sps->pic_order_cnt_type;
+ switch (curr_pic_->pic_order_cnt_type) {
+ case 0:
+ curr_pic_->pic_order_cnt_lsb = slice_hdr->pic_order_cnt_lsb;
+ curr_pic_->delta_pic_order_cnt_bottom =
+ slice_hdr->delta_pic_order_cnt_bottom;
+ break;
+
+ case 1:
+ curr_pic_->delta_pic_order_cnt0 = slice_hdr->delta_pic_order_cnt0;
+ curr_pic_->delta_pic_order_cnt1 = slice_hdr->delta_pic_order_cnt1;
+ break;
+
+ case 2:
+ break;
+
+ default:
+ NOTREACHED();
+ return false;
+ }
+
+ if (!CalculatePicOrderCounts(curr_pic_))
+ return false;
+
+ curr_pic_->long_term_reference_flag = slice_hdr->long_term_reference_flag;
+ curr_pic_->adaptive_ref_pic_marking_mode_flag =
+ slice_hdr->adaptive_ref_pic_marking_mode_flag;
+
+ // If the slice header indicates we will have to perform reference marking
+ // process after this picture is decoded, store required data for that
+ // purpose.
+ if (slice_hdr->adaptive_ref_pic_marking_mode_flag) {
+ static_assert(sizeof(curr_pic_->ref_pic_marking) ==
+ sizeof(slice_hdr->ref_pic_marking),
+ "Array sizes of ref pic marking do not match.");
+ memcpy(curr_pic_->ref_pic_marking, slice_hdr->ref_pic_marking,
+ sizeof(curr_pic_->ref_pic_marking));
+ }
+
+ curr_pic_->visible_rect = visible_rect_;
+
+ return true;
+}
+
+bool H264Decoder::CalculatePicOrderCounts(scoped_refptr<H264Picture> pic) {
+ const H264SPS* sps = parser_.GetSPS(curr_sps_id_);
+ if (!sps)
+ return false;
+
+ switch (pic->pic_order_cnt_type) {
+ case 0: {
+ // See spec 8.2.1.1.
+ int prev_pic_order_cnt_msb, prev_pic_order_cnt_lsb;
+
+ if (pic->idr) {
+ prev_pic_order_cnt_msb = prev_pic_order_cnt_lsb = 0;
+ } else {
+ if (prev_ref_has_memmgmnt5_) {
+ if (prev_ref_field_ != H264Picture::FIELD_BOTTOM) {
+ prev_pic_order_cnt_msb = 0;
+ prev_pic_order_cnt_lsb = prev_ref_top_field_order_cnt_;
+ } else {
+ prev_pic_order_cnt_msb = 0;
+ prev_pic_order_cnt_lsb = 0;
+ }
+ } else {
+ prev_pic_order_cnt_msb = prev_ref_pic_order_cnt_msb_;
+ prev_pic_order_cnt_lsb = prev_ref_pic_order_cnt_lsb_;
+ }
+ }
+
+ int max_pic_order_cnt_lsb =
+ 1 << (sps->log2_max_pic_order_cnt_lsb_minus4 + 4);
+ DCHECK_NE(max_pic_order_cnt_lsb, 0);
+ if ((pic->pic_order_cnt_lsb < prev_pic_order_cnt_lsb) &&
+ (prev_pic_order_cnt_lsb - pic->pic_order_cnt_lsb >=
+ max_pic_order_cnt_lsb / 2)) {
+ pic->pic_order_cnt_msb = prev_pic_order_cnt_msb + max_pic_order_cnt_lsb;
+ } else if ((pic->pic_order_cnt_lsb > prev_pic_order_cnt_lsb) &&
+ (pic->pic_order_cnt_lsb - prev_pic_order_cnt_lsb >
+ max_pic_order_cnt_lsb / 2)) {
+ pic->pic_order_cnt_msb = prev_pic_order_cnt_msb - max_pic_order_cnt_lsb;
+ } else {
+ pic->pic_order_cnt_msb = prev_pic_order_cnt_msb;
+ }
+
+ if (pic->field != H264Picture::FIELD_BOTTOM) {
+ pic->top_field_order_cnt =
+ pic->pic_order_cnt_msb + pic->pic_order_cnt_lsb;
+ }
+
+ if (pic->field != H264Picture::FIELD_TOP) {
+ if (pic->field == H264Picture::FIELD_NONE) {
+ pic->bottom_field_order_cnt =
+ pic->top_field_order_cnt + pic->delta_pic_order_cnt_bottom;
+ } else {
+ pic->bottom_field_order_cnt =
+ pic->pic_order_cnt_msb + pic->pic_order_cnt_lsb;
+ }
+ }
+ break;
+ }
+
+ case 1: {
+ // See spec 8.2.1.2.
+ if (prev_has_memmgmnt5_)
+ prev_frame_num_offset_ = 0;
+
+ if (pic->idr)
+ pic->frame_num_offset = 0;
+ else if (prev_frame_num_ > pic->frame_num)
+ pic->frame_num_offset = prev_frame_num_offset_ + max_frame_num_;
+ else
+ pic->frame_num_offset = prev_frame_num_offset_;
+
+ int abs_frame_num = 0;
+ if (sps->num_ref_frames_in_pic_order_cnt_cycle != 0)
+ abs_frame_num = pic->frame_num_offset + pic->frame_num;
+ else
+ abs_frame_num = 0;
+
+ if (pic->nal_ref_idc == 0 && abs_frame_num > 0)
+ --abs_frame_num;
+
+ int expected_pic_order_cnt = 0;
+ if (abs_frame_num > 0) {
+ if (sps->num_ref_frames_in_pic_order_cnt_cycle == 0) {
+ DVLOG(1) << "Invalid num_ref_frames_in_pic_order_cnt_cycle "
+ << "in stream";
+ return false;
+ }
+
+ int pic_order_cnt_cycle_cnt =
+ (abs_frame_num - 1) / sps->num_ref_frames_in_pic_order_cnt_cycle;
+ int frame_num_in_pic_order_cnt_cycle =
+ (abs_frame_num - 1) % sps->num_ref_frames_in_pic_order_cnt_cycle;
+
+ expected_pic_order_cnt = pic_order_cnt_cycle_cnt *
+ sps->expected_delta_per_pic_order_cnt_cycle;
+ // frame_num_in_pic_order_cnt_cycle is verified < 255 in parser
+ for (int i = 0; i <= frame_num_in_pic_order_cnt_cycle; ++i)
+ expected_pic_order_cnt += sps->offset_for_ref_frame[i];
+ }
+
+ if (!pic->nal_ref_idc)
+ expected_pic_order_cnt += sps->offset_for_non_ref_pic;
+
+ if (pic->field == H264Picture::FIELD_NONE) {
+ pic->top_field_order_cnt =
+ expected_pic_order_cnt + pic->delta_pic_order_cnt0;
+ pic->bottom_field_order_cnt = pic->top_field_order_cnt +
+ sps->offset_for_top_to_bottom_field +
+ pic->delta_pic_order_cnt1;
+ } else if (pic->field != H264Picture::FIELD_BOTTOM) {
+ pic->top_field_order_cnt =
+ expected_pic_order_cnt + pic->delta_pic_order_cnt0;
+ } else {
+ pic->bottom_field_order_cnt = expected_pic_order_cnt +
+ sps->offset_for_top_to_bottom_field +
+ pic->delta_pic_order_cnt0;
+ }
+ break;
+ }
+
+ case 2: {
+ // See spec 8.2.1.3.
+ if (prev_has_memmgmnt5_)
+ prev_frame_num_offset_ = 0;
+
+ if (pic->idr)
+ pic->frame_num_offset = 0;
+ else if (prev_frame_num_ > pic->frame_num)
+ pic->frame_num_offset = prev_frame_num_offset_ + max_frame_num_;
+ else
+ pic->frame_num_offset = prev_frame_num_offset_;
+
+ int temp_pic_order_cnt;
+ if (pic->idr) {
+ temp_pic_order_cnt = 0;
+ } else if (!pic->nal_ref_idc) {
+ temp_pic_order_cnt = 2 * (pic->frame_num_offset + pic->frame_num) - 1;
+ } else {
+ temp_pic_order_cnt = 2 * (pic->frame_num_offset + pic->frame_num);
+ }
+
+ if (pic->field == H264Picture::FIELD_NONE) {
+ pic->top_field_order_cnt = temp_pic_order_cnt;
+ pic->bottom_field_order_cnt = temp_pic_order_cnt;
+ } else if (pic->field == H264Picture::FIELD_BOTTOM) {
+ pic->bottom_field_order_cnt = temp_pic_order_cnt;
+ } else {
+ pic->top_field_order_cnt = temp_pic_order_cnt;
+ }
+ break;
+ }
+
+ default:
+ DVLOG(1) << "Invalid pic_order_cnt_type: " << sps->pic_order_cnt_type;
+ return false;
+ }
+
+ switch (pic->field) {
+ case H264Picture::FIELD_NONE:
+ pic->pic_order_cnt =
+ std::min(pic->top_field_order_cnt, pic->bottom_field_order_cnt);
+ break;
+ case H264Picture::FIELD_TOP:
+ pic->pic_order_cnt = pic->top_field_order_cnt;
+ break;
+ case H264Picture::FIELD_BOTTOM:
+ pic->pic_order_cnt = pic->bottom_field_order_cnt;
+ break;
+ }
+
+ return true;
+}
+
+void H264Decoder::UpdatePicNums(int frame_num) {
+ for (auto& pic : dpb_) {
+ if (!pic->ref)
+ continue;
+
+ // 8.2.4.1. Assumes non-interlaced stream.
+ DCHECK_EQ(pic->field, H264Picture::FIELD_NONE);
+ if (pic->long_term) {
+ pic->long_term_pic_num = pic->long_term_frame_idx;
+ } else {
+ if (pic->frame_num > frame_num)
+ pic->frame_num_wrap = pic->frame_num - max_frame_num_;
+ else
+ pic->frame_num_wrap = pic->frame_num;
+
+ pic->pic_num = pic->frame_num_wrap;
+ }
+ }
+}
+
+struct PicNumDescCompare {
+ bool operator()(const scoped_refptr<H264Picture>& a,
+ const scoped_refptr<H264Picture>& b) const {
+ return a->pic_num > b->pic_num;
+ }
+};
+
+struct LongTermPicNumAscCompare {
+ bool operator()(const scoped_refptr<H264Picture>& a,
+ const scoped_refptr<H264Picture>& b) const {
+ return a->long_term_pic_num < b->long_term_pic_num;
+ }
+};
+
+void H264Decoder::ConstructReferencePicListsP(
+ const H264SliceHeader* slice_hdr) {
+ // RefPicList0 (8.2.4.2.1) [[1] [2]], where:
+ // [1] shortterm ref pics sorted by descending pic_num,
+ // [2] longterm ref pics by ascending long_term_pic_num.
+ ref_pic_list_p0_.clear();
+
+ // First get the short ref pics...
+ dpb_.GetShortTermRefPicsAppending(&ref_pic_list_p0_);
+ size_t num_short_refs = ref_pic_list_p0_.size();
+
+ // and sort them to get [1].
+ std::sort(ref_pic_list_p0_.begin(), ref_pic_list_p0_.end(),
+ PicNumDescCompare());
+
+ // Now get long term pics and sort them by long_term_pic_num to get [2].
+ dpb_.GetLongTermRefPicsAppending(&ref_pic_list_p0_);
+ std::sort(ref_pic_list_p0_.begin() + num_short_refs, ref_pic_list_p0_.end(),
+ LongTermPicNumAscCompare());
+}
+
+struct POCAscCompare {
+ bool operator()(const scoped_refptr<H264Picture>& a,
+ const scoped_refptr<H264Picture>& b) const {
+ return a->pic_order_cnt < b->pic_order_cnt;
+ }
+};
+
+struct POCDescCompare {
+ bool operator()(const scoped_refptr<H264Picture>& a,
+ const scoped_refptr<H264Picture>& b) const {
+ return a->pic_order_cnt > b->pic_order_cnt;
+ }
+};
+
+void H264Decoder::ConstructReferencePicListsB(
+ const H264SliceHeader* slice_hdr) {
+ // RefPicList0 (8.2.4.2.3) [[1] [2] [3]], where:
+ // [1] shortterm ref pics with POC < curr_pic's POC sorted by descending POC,
+ // [2] shortterm ref pics with POC > curr_pic's POC by ascending POC,
+ // [3] longterm ref pics by ascending long_term_pic_num.
+ ref_pic_list_b0_.clear();
+ ref_pic_list_b1_.clear();
+ dpb_.GetShortTermRefPicsAppending(&ref_pic_list_b0_);
+ size_t num_short_refs = ref_pic_list_b0_.size();
+
+ // First sort ascending, this will put [1] in right place and finish [2].
+ std::sort(ref_pic_list_b0_.begin(), ref_pic_list_b0_.end(), POCAscCompare());
+
+ // Find first with POC > curr_pic's POC to get first element in [2]...
+ H264Picture::Vector::iterator iter;
+ iter = std::upper_bound(ref_pic_list_b0_.begin(), ref_pic_list_b0_.end(),
+ curr_pic_.get(), POCAscCompare());
+
+ // and sort [1] descending, thus finishing sequence [1] [2].
+ std::sort(ref_pic_list_b0_.begin(), iter, POCDescCompare());
+
+ // Now add [3] and sort by ascending long_term_pic_num.
+ dpb_.GetLongTermRefPicsAppending(&ref_pic_list_b0_);
+ std::sort(ref_pic_list_b0_.begin() + num_short_refs, ref_pic_list_b0_.end(),
+ LongTermPicNumAscCompare());
+
+ // RefPicList1 (8.2.4.2.4) [[1] [2] [3]], where:
+ // [1] shortterm ref pics with POC > curr_pic's POC sorted by ascending POC,
+ // [2] shortterm ref pics with POC < curr_pic's POC by descending POC,
+ // [3] longterm ref pics by ascending long_term_pic_num.
+
+ dpb_.GetShortTermRefPicsAppending(&ref_pic_list_b1_);
+ num_short_refs = ref_pic_list_b1_.size();
+
+ // First sort by descending POC.
+ std::sort(ref_pic_list_b1_.begin(), ref_pic_list_b1_.end(), POCDescCompare());
+
+ // Find first with POC < curr_pic's POC to get first element in [2]...
+ iter = std::upper_bound(ref_pic_list_b1_.begin(), ref_pic_list_b1_.end(),
+ curr_pic_.get(), POCDescCompare());
+
+ // and sort [1] ascending.
+ std::sort(ref_pic_list_b1_.begin(), iter, POCAscCompare());
+
+ // Now add [3] and sort by ascending long_term_pic_num
+ dpb_.GetShortTermRefPicsAppending(&ref_pic_list_b1_);
+ std::sort(ref_pic_list_b1_.begin() + num_short_refs, ref_pic_list_b1_.end(),
+ LongTermPicNumAscCompare());
+
+ // If lists identical, swap first two entries in RefPicList1 (spec 8.2.4.2.3)
+ if (ref_pic_list_b1_.size() > 1 &&
+ std::equal(ref_pic_list_b0_.begin(), ref_pic_list_b0_.end(),
+ ref_pic_list_b1_.begin()))
+ std::swap(ref_pic_list_b1_[0], ref_pic_list_b1_[1]);
+}
+
+// See 8.2.4
+int H264Decoder::PicNumF(const scoped_refptr<H264Picture>& pic) {
+ if (!pic)
+ return -1;
+
+ if (!pic->long_term)
+ return pic->pic_num;
+ else
+ return max_pic_num_;
+}
+
+// See 8.2.4
+int H264Decoder::LongTermPicNumF(const scoped_refptr<H264Picture>& pic) {
+ if (pic->ref && pic->long_term)
+ return pic->long_term_pic_num;
+ else
+ return 2 * (max_long_term_frame_idx_ + 1);
+}
+
+// Shift elements on the |v| starting from |from| to |to|, inclusive,
+// one position to the right and insert pic at |from|.
+static void ShiftRightAndInsert(H264Picture::Vector* v,
+ int from,
+ int to,
+ const scoped_refptr<H264Picture>& pic) {
+ // Security checks, do not disable in Debug mode.
+ CHECK(from <= to);
+ CHECK(to <= std::numeric_limits<int>::max() - 2);
+ // Additional checks. Debug mode ok.
+ DCHECK(v);
+ DCHECK(pic);
+ DCHECK((to + 1 == static_cast<int>(v->size())) ||
+ (to + 2 == static_cast<int>(v->size())));
+
+ v->resize(to + 2);
+
+ for (int i = to + 1; i > from; --i)
+ (*v)[i] = (*v)[i - 1];
+
+ (*v)[from] = pic;
+}
+
+bool H264Decoder::ModifyReferencePicList(const H264SliceHeader* slice_hdr,
+ int list,
+ H264Picture::Vector* ref_pic_listx) {
+ bool ref_pic_list_modification_flag_lX;
+ int num_ref_idx_lX_active_minus1;
+ const H264ModificationOfPicNum* list_mod;
+
+ // This can process either ref_pic_list0 or ref_pic_list1, depending on
+ // the list argument. Set up pointers to proper list to be processed here.
+ if (list == 0) {
+ ref_pic_list_modification_flag_lX =
+ slice_hdr->ref_pic_list_modification_flag_l0;
+ num_ref_idx_lX_active_minus1 = slice_hdr->num_ref_idx_l0_active_minus1;
+ list_mod = slice_hdr->ref_list_l0_modifications;
+ } else {
+ ref_pic_list_modification_flag_lX =
+ slice_hdr->ref_pic_list_modification_flag_l1;
+ num_ref_idx_lX_active_minus1 = slice_hdr->num_ref_idx_l1_active_minus1;
+ list_mod = slice_hdr->ref_list_l1_modifications;
+ }
+
+ // Resize the list to the size requested in the slice header.
+ // Note that per 8.2.4.2 it's possible for num_ref_idx_lX_active_minus1 to
+ // indicate there should be more ref pics on list than we constructed.
+ // Those superfluous ones should be treated as non-reference and will be
+ // initialized to nullptr, which must be handled by clients.
+ DCHECK_GE(num_ref_idx_lX_active_minus1, 0);
+ ref_pic_listx->resize(num_ref_idx_lX_active_minus1 + 1);
+
+ if (!ref_pic_list_modification_flag_lX)
+ return true;
+
+ // Spec 8.2.4.3:
+ // Reorder pictures on the list in a way specified in the stream.
+ int pic_num_lx_pred = curr_pic_->pic_num;
+ int ref_idx_lx = 0;
+ int pic_num_lx_no_wrap;
+ int pic_num_lx;
+ bool done = false;
+ scoped_refptr<H264Picture> pic;
+ for (int i = 0; i < H264SliceHeader::kRefListModSize && !done; ++i) {
+ switch (list_mod->modification_of_pic_nums_idc) {
+ case 0:
+ case 1:
+ // Modify short reference picture position.
+ if (list_mod->modification_of_pic_nums_idc == 0) {
+ // Subtract given value from predicted PicNum.
+ pic_num_lx_no_wrap =
+ pic_num_lx_pred -
+ (static_cast<int>(list_mod->abs_diff_pic_num_minus1) + 1);
+ // Wrap around max_pic_num_ if it becomes < 0 as result
+ // of subtraction.
+ if (pic_num_lx_no_wrap < 0)
+ pic_num_lx_no_wrap += max_pic_num_;
+ } else {
+ // Add given value to predicted PicNum.
+ pic_num_lx_no_wrap =
+ pic_num_lx_pred +
+ (static_cast<int>(list_mod->abs_diff_pic_num_minus1) + 1);
+ // Wrap around max_pic_num_ if it becomes >= max_pic_num_ as result
+ // of the addition.
+ if (pic_num_lx_no_wrap >= max_pic_num_)
+ pic_num_lx_no_wrap -= max_pic_num_;
+ }
+
+ // For use in next iteration.
+ pic_num_lx_pred = pic_num_lx_no_wrap;
+
+ if (pic_num_lx_no_wrap > curr_pic_->pic_num)
+ pic_num_lx = pic_num_lx_no_wrap - max_pic_num_;
+ else
+ pic_num_lx = pic_num_lx_no_wrap;
+
+ DCHECK_LT(num_ref_idx_lX_active_minus1 + 1,
+ H264SliceHeader::kRefListModSize);
+ pic = dpb_.GetShortRefPicByPicNum(pic_num_lx);
+ if (!pic) {
+ DVLOG(1) << "Malformed stream, no pic num " << pic_num_lx;
+ return false;
+ }
+ ShiftRightAndInsert(ref_pic_listx, ref_idx_lx,
+ num_ref_idx_lX_active_minus1, pic);
+ ref_idx_lx++;
+
+ for (int src = ref_idx_lx, dst = ref_idx_lx;
+ src <= num_ref_idx_lX_active_minus1 + 1; ++src) {
+ if (PicNumF((*ref_pic_listx)[src]) != pic_num_lx)
+ (*ref_pic_listx)[dst++] = (*ref_pic_listx)[src];
+ }
+ break;
+
+ case 2:
+ // Modify long term reference picture position.
+ DCHECK_LT(num_ref_idx_lX_active_minus1 + 1,
+ H264SliceHeader::kRefListModSize);
+ pic = dpb_.GetLongRefPicByLongTermPicNum(list_mod->long_term_pic_num);
+ if (!pic) {
+ DVLOG(1) << "Malformed stream, no pic num "
+ << list_mod->long_term_pic_num;
+ return false;
+ }
+ ShiftRightAndInsert(ref_pic_listx, ref_idx_lx,
+ num_ref_idx_lX_active_minus1, pic);
+ ref_idx_lx++;
+
+ for (int src = ref_idx_lx, dst = ref_idx_lx;
+ src <= num_ref_idx_lX_active_minus1 + 1; ++src) {
+ if (LongTermPicNumF((*ref_pic_listx)[src]) !=
+ static_cast<int>(list_mod->long_term_pic_num))
+ (*ref_pic_listx)[dst++] = (*ref_pic_listx)[src];
+ }
+ break;
+
+ case 3:
+ // End of modification list.
+ done = true;
+ break;
+
+ default:
+ // May be recoverable.
+ DVLOG(1) << "Invalid modification_of_pic_nums_idc="
+ << list_mod->modification_of_pic_nums_idc
+ << " in position " << i;
+ break;
+ }
+
+ ++list_mod;
+ }
+
+ // Per NOTE 2 in 8.2.4.3.2, the ref_pic_listx size in the above loop is
+ // temporarily made one element longer than the required final list.
+ // Resize the list back to its required size.
+ ref_pic_listx->resize(num_ref_idx_lX_active_minus1 + 1);
+
+ return true;
+}
+
+void H264Decoder::OutputPic(scoped_refptr<H264Picture> pic) {
+ DCHECK(!pic->outputted);
+ pic->outputted = true;
+
+ if (pic->nonexisting) {
+ DVLOG(4) << "Skipping output, non-existing frame_num: " << pic->frame_num;
+ return;
+ }
+
+ DVLOG_IF(1, pic->pic_order_cnt < last_output_poc_)
+ << "Outputting out of order, likely a broken stream: "
+ << last_output_poc_ << " -> " << pic->pic_order_cnt;
+ last_output_poc_ = pic->pic_order_cnt;
+
+ DVLOG(4) << "Posting output task for POC: " << pic->pic_order_cnt;
+ accelerator_->OutputPicture(pic);
+}
+
+void H264Decoder::ClearDPB() {
+ // Clear DPB contents, marking the pictures as unused first.
+ dpb_.Clear();
+ last_output_poc_ = std::numeric_limits<int>::min();
+}
+
+bool H264Decoder::OutputAllRemainingPics() {
+ // Output all pictures that are waiting to be outputted.
+ FinishPrevFrameIfPresent();
+ H264Picture::Vector to_output;
+ dpb_.GetNotOutputtedPicsAppending(&to_output);
+ // Sort them by ascending POC to output in order.
+ std::sort(to_output.begin(), to_output.end(), POCAscCompare());
+
+ for (auto& pic : to_output)
+ OutputPic(pic);
+
+ return true;
+}
+
+bool H264Decoder::Flush() {
+ DVLOG(2) << "Decoder flush";
+
+ if (!OutputAllRemainingPics())
+ return false;
+
+ ClearDPB();
+ DVLOG(2) << "Decoder flush finished";
+ return true;
+}
+
+bool H264Decoder::StartNewFrame(const H264SliceHeader* slice_hdr) {
+ // TODO posciak: add handling of max_num_ref_frames per spec.
+ CHECK(curr_pic_.get());
+ DCHECK(slice_hdr);
+
+ curr_pps_id_ = slice_hdr->pic_parameter_set_id;
+ const H264PPS* pps = parser_.GetPPS(curr_pps_id_);
+ if (!pps)
+ return false;
+
+ curr_sps_id_ = pps->seq_parameter_set_id;
+ const H264SPS* sps = parser_.GetSPS(curr_sps_id_);
+ if (!sps)
+ return false;
+
+ max_frame_num_ = 1 << (sps->log2_max_frame_num_minus4 + 4);
+ int frame_num = slice_hdr->frame_num;
+ if (slice_hdr->idr_pic_flag)
+ prev_ref_frame_num_ = 0;
+
+ // 7.4.3
+ if (frame_num != prev_ref_frame_num_ &&
+ frame_num != (prev_ref_frame_num_ + 1) % max_frame_num_) {
+ if (!HandleFrameNumGap(frame_num))
+ return false;
+ }
+
+ if (!InitCurrPicture(slice_hdr))
+ return false;
+
+ UpdatePicNums(frame_num);
+ PrepareRefPicLists(slice_hdr);
+
+ if (!accelerator_->SubmitFrameMetadata(sps, pps, dpb_, ref_pic_list_p0_,
+ ref_pic_list_b0_, ref_pic_list_b1_,
+ curr_pic_.get()))
+ return false;
+
+ return true;
+}
+
+bool H264Decoder::HandleMemoryManagementOps(scoped_refptr<H264Picture> pic) {
+ // 8.2.5.4
+ for (size_t i = 0; i < arraysize(pic->ref_pic_marking); ++i) {
+ // Code below does not support interlaced stream (per-field pictures).
+ H264DecRefPicMarking* ref_pic_marking = &pic->ref_pic_marking[i];
+ scoped_refptr<H264Picture> to_mark;
+ int pic_num_x;
+
+ switch (ref_pic_marking->memory_mgmnt_control_operation) {
+ case 0:
+ // Normal end of operations' specification.
+ return true;
+
+ case 1:
+ // Mark a short term reference picture as unused so it can be removed
+ // if outputted.
+ pic_num_x =
+ pic->pic_num - (ref_pic_marking->difference_of_pic_nums_minus1 + 1);
+ to_mark = dpb_.GetShortRefPicByPicNum(pic_num_x);
+ if (to_mark) {
+ to_mark->ref = false;
+ } else {
+ DVLOG(1) << "Invalid short ref pic num to unmark";
+ return false;
+ }
+ break;
+
+ case 2:
+ // Mark a long term reference picture as unused so it can be removed
+ // if outputted.
+ to_mark = dpb_.GetLongRefPicByLongTermPicNum(
+ ref_pic_marking->long_term_pic_num);
+ if (to_mark) {
+ to_mark->ref = false;
+ } else {
+ DVLOG(1) << "Invalid long term ref pic num to unmark";
+ return false;
+ }
+ break;
+
+ case 3:
+ // Mark a short term reference picture as long term reference.
+ pic_num_x =
+ pic->pic_num - (ref_pic_marking->difference_of_pic_nums_minus1 + 1);
+ to_mark = dpb_.GetShortRefPicByPicNum(pic_num_x);
+ if (to_mark) {
+ DCHECK(to_mark->ref && !to_mark->long_term);
+ to_mark->long_term = true;
+ to_mark->long_term_frame_idx = ref_pic_marking->long_term_frame_idx;
+ } else {
+ DVLOG(1) << "Invalid short term ref pic num to mark as long ref";
+ return false;
+ }
+ break;
+
+ case 4: {
+ // Unmark all reference pictures with long_term_frame_idx over new max.
+ max_long_term_frame_idx_ =
+ ref_pic_marking->max_long_term_frame_idx_plus1 - 1;
+ H264Picture::Vector long_terms;
+ dpb_.GetLongTermRefPicsAppending(&long_terms);
+ for (size_t i = 0; i < long_terms.size(); ++i) {
+ scoped_refptr<H264Picture>& long_term_pic = long_terms[i];
+ DCHECK(long_term_pic->ref && long_term_pic->long_term);
+ // Ok to cast, max_long_term_frame_idx is much smaller than 16bit.
+ if (long_term_pic->long_term_frame_idx >
+ static_cast<int>(max_long_term_frame_idx_))
+ long_term_pic->ref = false;
+ }
+ break;
+ }
+
+ case 5:
+ // Unmark all reference pictures.
+ dpb_.MarkAllUnusedForRef();
+ max_long_term_frame_idx_ = -1;
+ pic->mem_mgmt_5 = true;
+ break;
+
+ case 6: {
+ // Replace long term reference pictures with current picture.
+ // First unmark if any existing with this long_term_frame_idx...
+ H264Picture::Vector long_terms;
+ dpb_.GetLongTermRefPicsAppending(&long_terms);
+ for (size_t i = 0; i < long_terms.size(); ++i) {
+ scoped_refptr<H264Picture>& long_term_pic = long_terms[i];
+ DCHECK(long_term_pic->ref && long_term_pic->long_term);
+ // Ok to cast, long_term_frame_idx is much smaller than 16bit.
+ if (long_term_pic->long_term_frame_idx ==
+ static_cast<int>(ref_pic_marking->long_term_frame_idx))
+ long_term_pic->ref = false;
+ }
+
+ // and mark the current one instead.
+ pic->ref = true;
+ pic->long_term = true;
+ pic->long_term_frame_idx = ref_pic_marking->long_term_frame_idx;
+ break;
+ }
+
+ default:
+ // Would indicate a bug in parser.
+ NOTREACHED();
+ }
+ }
+
+ return true;
+}
+
+// This method ensures that DPB does not overflow, either by removing
+// reference pictures as specified in the stream, or using a sliding window
+// procedure to remove the oldest one.
+// It also performs marking and unmarking pictures as reference.
+// See spac 8.2.5.1.
+bool H264Decoder::ReferencePictureMarking(scoped_refptr<H264Picture> pic) {
+ // If the current picture is an IDR, all reference pictures are unmarked.
+ if (pic->idr) {
+ dpb_.MarkAllUnusedForRef();
+
+ if (pic->long_term_reference_flag) {
+ pic->long_term = true;
+ pic->long_term_frame_idx = 0;
+ max_long_term_frame_idx_ = 0;
+ } else {
+ pic->long_term = false;
+ max_long_term_frame_idx_ = -1;
+ }
+
+ return true;
+ }
+
+ // Not an IDR. If the stream contains instructions on how to discard pictures
+ // from DPB and how to mark/unmark existing reference pictures, do so.
+ // Otherwise, fall back to default sliding window process.
+ if (pic->adaptive_ref_pic_marking_mode_flag) {
+ DCHECK(!pic->nonexisting);
+ return HandleMemoryManagementOps(pic);
+ } else {
+ return SlidingWindowPictureMarking();
+ }
+}
+
+bool H264Decoder::SlidingWindowPictureMarking() {
+ const H264SPS* sps = parser_.GetSPS(curr_sps_id_);
+ if (!sps)
+ return false;
+
+ // 8.2.5.3. Ensure the DPB doesn't overflow by discarding the oldest picture.
+ int num_ref_pics = dpb_.CountRefPics();
+ DCHECK_LE(num_ref_pics, std::max<int>(sps->max_num_ref_frames, 1));
+ if (num_ref_pics == std::max<int>(sps->max_num_ref_frames, 1)) {
+ // Max number of reference pics reached, need to remove one of the short
+ // term ones. Find smallest frame_num_wrap short reference picture and mark
+ // it as unused.
+ scoped_refptr<H264Picture> to_unmark =
+ dpb_.GetLowestFrameNumWrapShortRefPic();
+ if (!to_unmark) {
+ DVLOG(1) << "Couldn't find a short ref picture to unmark";
+ return false;
+ }
+
+ to_unmark->ref = false;
+ }
+
+ return true;
+}
+
+bool H264Decoder::FinishPicture(scoped_refptr<H264Picture> pic) {
+ // Finish processing the picture.
+ // Start by storing previous picture data for later use.
+ if (pic->ref) {
+ ReferencePictureMarking(pic);
+ prev_ref_has_memmgmnt5_ = pic->mem_mgmt_5;
+ prev_ref_top_field_order_cnt_ = pic->top_field_order_cnt;
+ prev_ref_pic_order_cnt_msb_ = pic->pic_order_cnt_msb;
+ prev_ref_pic_order_cnt_lsb_ = pic->pic_order_cnt_lsb;
+ prev_ref_field_ = pic->field;
+ prev_ref_frame_num_ = pic->frame_num;
+ }
+ prev_frame_num_ = pic->frame_num;
+ prev_has_memmgmnt5_ = pic->mem_mgmt_5;
+ prev_frame_num_offset_ = pic->frame_num_offset;
+
+ // Remove unused (for reference or later output) pictures from DPB, marking
+ // them as such.
+ dpb_.DeleteUnused();
+
+ DVLOG(4) << "Finishing picture frame_num: " << pic->frame_num
+ << ", entries in DPB: " << dpb_.size();
+
+ // The ownership of pic will either be transferred to DPB - if the picture is
+ // still needed (for output and/or reference) - or we will release it
+ // immediately if we manage to output it here and won't have to store it for
+ // future reference.
+
+ // Get all pictures that haven't been outputted yet.
+ H264Picture::Vector not_outputted;
+ dpb_.GetNotOutputtedPicsAppending(&not_outputted);
+ // Include the one we've just decoded.
+ not_outputted.push_back(pic);
+
+ // Sort in output order.
+ std::sort(not_outputted.begin(), not_outputted.end(), POCAscCompare());
+
+ // Try to output as many pictures as we can. A picture can be output,
+ // if the number of decoded and not yet outputted pictures that would remain
+ // in DPB afterwards would at least be equal to max_num_reorder_frames.
+ // If the outputted picture is not a reference picture, it doesn't have
+ // to remain in the DPB and can be removed.
+ H264Picture::Vector::iterator output_candidate = not_outputted.begin();
+ size_t num_remaining = not_outputted.size();
+ while (num_remaining > max_num_reorder_frames_ ||
+ // If the condition below is used, this is an invalid stream. We should
+ // not be forced to output beyond max_num_reorder_frames in order to
+ // make room in DPB to store the current picture (if we need to do so).
+ // However, if this happens, ignore max_num_reorder_frames and try
+ // to output more. This may cause out-of-order output, but is not
+ // fatal, and better than failing instead.
+ ((dpb_.IsFull() && (!pic->outputted || pic->ref)) && num_remaining)) {
+ DVLOG_IF(1, num_remaining <= max_num_reorder_frames_)
+ << "Invalid stream: max_num_reorder_frames not preserved";
+
+ OutputPic(*output_candidate);
+
+ if (!(*output_candidate)->ref) {
+ // Current picture hasn't been inserted into DPB yet, so don't remove it
+ // if we managed to output it immediately.
+ int outputted_poc = (*output_candidate)->pic_order_cnt;
+ if (outputted_poc != pic->pic_order_cnt)
+ dpb_.DeleteByPOC(outputted_poc);
+ }
+
+ ++output_candidate;
+ --num_remaining;
+ }
+
+ // If we haven't managed to output the picture that we just decoded, or if
+ // it's a reference picture, we have to store it in DPB.
+ if (!pic->outputted || pic->ref) {
+ if (dpb_.IsFull()) {
+ // If we haven't managed to output anything to free up space in DPB
+ // to store this picture, it's an error in the stream.
+ DVLOG(1) << "Could not free up space in DPB!";
+ return false;
+ }
+
+ dpb_.StorePic(pic);
+ }
+
+ return true;
+}
+
+static int LevelToMaxDpbMbs(int level) {
+ // See table A-1 in spec.
+ switch (level) {
+ case 10:
+ return 396;
+ case 11:
+ return 900;
+ case 12: // fallthrough
+ case 13: // fallthrough
+ case 20:
+ return 2376;
+ case 21:
+ return 4752;
+ case 22: // fallthrough
+ case 30:
+ return 8100;
+ case 31:
+ return 18000;
+ case 32:
+ return 20480;
+ case 40: // fallthrough
+ case 41:
+ return 32768;
+ case 42:
+ return 34816;
+ case 50:
+ return 110400;
+ case 51: // fallthrough
+ case 52:
+ return 184320;
+ default:
+ DVLOG(1) << "Invalid codec level (" << level << ")";
+ return 0;
+ }
+}
+
+bool H264Decoder::UpdateMaxNumReorderFrames(const H264SPS* sps) {
+ if (sps->vui_parameters_present_flag && sps->bitstream_restriction_flag) {
+ max_num_reorder_frames_ =
+ base::checked_cast<size_t>(sps->max_num_reorder_frames);
+ if (max_num_reorder_frames_ > dpb_.max_num_pics()) {
+ DVLOG(1)
+ << "max_num_reorder_frames present, but larger than MaxDpbFrames ("
+ << max_num_reorder_frames_ << " > " << dpb_.max_num_pics() << ")";
+ max_num_reorder_frames_ = 0;
+ return false;
+ }
+ return true;
+ }
+
+ // max_num_reorder_frames not present, infer from profile/constraints
+ // (see VUI semantics in spec).
+ if (sps->constraint_set3_flag) {
+ switch (sps->profile_idc) {
+ case 44:
+ case 86:
+ case 100:
+ case 110:
+ case 122:
+ case 244:
+ max_num_reorder_frames_ = 0;
+ break;
+ default:
+ max_num_reorder_frames_ = dpb_.max_num_pics();
+ break;
+ }
+ } else {
+ max_num_reorder_frames_ = dpb_.max_num_pics();
+ }
+
+ return true;
+}
+
+bool H264Decoder::ProcessSPS(int sps_id, bool* need_new_buffers) {
+ DVLOG(4) << "Processing SPS id:" << sps_id;
+
+ const H264SPS* sps = parser_.GetSPS(sps_id);
+ if (!sps)
+ return false;
+
+ *need_new_buffers = false;
+
+ if (sps->frame_mbs_only_flag == 0) {
+ DVLOG(1) << "frame_mbs_only_flag != 1 not supported";
+ return false;
+ }
+
+ Size new_pic_size = sps->GetCodedSize().value_or(Size());
+ if (new_pic_size.IsEmpty()) {
+ DVLOG(1) << "Invalid picture size";
+ return false;
+ }
+
+ int width_mb = new_pic_size.width() / 16;
+ int height_mb = new_pic_size.height() / 16;
+
+ // Verify that the values are not too large before multiplying.
+ if (std::numeric_limits<int>::max() / width_mb < height_mb) {
+ DVLOG(1) << "Picture size is too big: " << new_pic_size.ToString();
+ return false;
+ }
+
+ int level = sps->level_idc;
+ int max_dpb_mbs = LevelToMaxDpbMbs(level);
+ if (max_dpb_mbs == 0)
+ return false;
+
+ // MaxDpbFrames from level limits per spec.
+ size_t max_dpb_frames = std::min(max_dpb_mbs / (width_mb * height_mb),
+ static_cast<int>(H264DPB::kDPBMaxSize));
+ DVLOG(1) << "MaxDpbFrames: " << max_dpb_frames
+ << ", max_num_ref_frames: " << sps->max_num_ref_frames
+ << ", max_dec_frame_buffering: " << sps->max_dec_frame_buffering;
+
+ // Set DPB size to at least the level limit, or what the stream requires.
+ size_t max_dpb_size =
+ std::max(static_cast<int>(max_dpb_frames),
+ std::max(sps->max_num_ref_frames, sps->max_dec_frame_buffering));
+ // Some non-conforming streams specify more frames are needed than the current
+ // level limit. Allow this, but only up to the maximum number of reference
+ // frames allowed per spec.
+ DVLOG_IF(1, max_dpb_size > max_dpb_frames)
+ << "Invalid stream, DPB size > MaxDpbFrames";
+ if (max_dpb_size == 0 || max_dpb_size > H264DPB::kDPBMaxSize) {
+ DVLOG(1) << "Invalid DPB size: " << max_dpb_size;
+ return false;
+ }
+
+ if ((pic_size_ != new_pic_size) || (dpb_.max_num_pics() != max_dpb_size)) {
+ if (!Flush())
+ return false;
+ DVLOG(1) << "Codec level: " << level << ", DPB size: " << max_dpb_size
+ << ", Picture size: " << new_pic_size.ToString();
+ *need_new_buffers = true;
+ pic_size_ = new_pic_size;
+ dpb_.set_max_num_pics(max_dpb_size);
+ }
+
+ Rect new_visible_rect = sps->GetVisibleRect().value_or(Rect());
+ if (visible_rect_ != new_visible_rect) {
+ DVLOG(2) << "New visible rect: " << new_visible_rect.ToString();
+ visible_rect_ = new_visible_rect;
+ }
+
+ if (!UpdateMaxNumReorderFrames(sps))
+ return false;
+ DVLOG(1) << "max_num_reorder_frames: " << max_num_reorder_frames_;
+
+ return true;
+}
+
+bool H264Decoder::FinishPrevFrameIfPresent() {
+ // If we already have a frame waiting to be decoded, decode it and finish.
+ if (curr_pic_) {
+ if (!DecodePicture())
+ return false;
+
+ scoped_refptr<H264Picture> pic = curr_pic_;
+ curr_pic_ = nullptr;
+ return FinishPicture(pic);
+ }
+
+ return true;
+}
+
+bool H264Decoder::HandleFrameNumGap(int frame_num) {
+ const H264SPS* sps = parser_.GetSPS(curr_sps_id_);
+ if (!sps)
+ return false;
+
+ if (!sps->gaps_in_frame_num_value_allowed_flag) {
+ DVLOG(1) << "Invalid frame_num: " << frame_num;
+ return false;
+ }
+
+ DVLOG(2) << "Handling frame_num gap: " << prev_ref_frame_num_ << "->"
+ << frame_num;
+
+ // 7.4.3/7-23
+ int unused_short_term_frame_num = (prev_ref_frame_num_ + 1) % max_frame_num_;
+ while (unused_short_term_frame_num != frame_num) {
+ scoped_refptr<H264Picture> pic = new H264Picture();
+ if (!InitNonexistingPicture(pic, unused_short_term_frame_num))
+ return false;
+
+ UpdatePicNums(unused_short_term_frame_num);
+
+ if (!FinishPicture(pic))
+ return false;
+
+ unused_short_term_frame_num++;
+ unused_short_term_frame_num %= max_frame_num_;
+ }
+
+ return true;
+}
+
+bool H264Decoder::IsNewPrimaryCodedPicture(
+ const H264SliceHeader* slice_hdr) const {
+ if (!curr_pic_)
+ return true;
+
+ // 7.4.1.2.4, assumes non-interlaced.
+ if (slice_hdr->frame_num != curr_pic_->frame_num ||
+ slice_hdr->pic_parameter_set_id != curr_pps_id_ ||
+ slice_hdr->nal_ref_idc != curr_pic_->nal_ref_idc ||
+ slice_hdr->idr_pic_flag != curr_pic_->idr ||
+ (slice_hdr->idr_pic_flag &&
+ (slice_hdr->idr_pic_id != curr_pic_->idr_pic_id ||
+ // If we have two consecutive IDR slices, and the second one has
+ // first_mb_in_slice == 0, treat it as a new picture.
+ // Per spec, idr_pic_id should not be equal in this case (and we should
+ // have hit the condition above instead, see spec 7.4.3 on idr_pic_id),
+ // but some encoders neglect changing idr_pic_id for two consecutive
+ // IDRs. Work around this by checking if the next slice contains the
+ // zeroth macroblock, i.e. data that belongs to the next picture.
+ slice_hdr->first_mb_in_slice == 0)))
+ return true;
+
+ const H264SPS* sps = parser_.GetSPS(curr_sps_id_);
+ if (!sps)
+ return false;
+
+ if (sps->pic_order_cnt_type == curr_pic_->pic_order_cnt_type) {
+ if (curr_pic_->pic_order_cnt_type == 0) {
+ if (slice_hdr->pic_order_cnt_lsb != curr_pic_->pic_order_cnt_lsb ||
+ slice_hdr->delta_pic_order_cnt_bottom !=
+ curr_pic_->delta_pic_order_cnt_bottom)
+ return true;
+ } else if (curr_pic_->pic_order_cnt_type == 1) {
+ if (slice_hdr->delta_pic_order_cnt0 != curr_pic_->delta_pic_order_cnt0 ||
+ slice_hdr->delta_pic_order_cnt1 != curr_pic_->delta_pic_order_cnt1)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool H264Decoder::PreprocessCurrentSlice() {
+ const H264SliceHeader* slice_hdr = curr_slice_hdr_.get();
+ DCHECK(slice_hdr);
+
+ if (IsNewPrimaryCodedPicture(slice_hdr)) {
+ // New picture, so first finish the previous one before processing it.
+ if (!FinishPrevFrameIfPresent())
+ return false;
+
+ DCHECK(!curr_pic_);
+
+ if (slice_hdr->first_mb_in_slice != 0) {
+ DVLOG(1) << "ASO/invalid stream, first_mb_in_slice: "
+ << slice_hdr->first_mb_in_slice;
+ return false;
+ }
+
+ // If the new picture is an IDR, flush DPB.
+ if (slice_hdr->idr_pic_flag) {
+ // Output all remaining pictures, unless we are explicitly instructed
+ // not to do so.
+ if (!slice_hdr->no_output_of_prior_pics_flag) {
+ if (!Flush())
+ return false;
+ }
+ dpb_.Clear();
+ last_output_poc_ = std::numeric_limits<int>::min();
+ }
+ }
+
+ return true;
+}
+
+bool H264Decoder::ProcessCurrentSlice() {
+ DCHECK(curr_pic_);
+
+ const H264SliceHeader* slice_hdr = curr_slice_hdr_.get();
+ DCHECK(slice_hdr);
+
+ if (slice_hdr->field_pic_flag == 0)
+ max_pic_num_ = max_frame_num_;
+ else
+ max_pic_num_ = 2 * max_frame_num_;
+
+ H264Picture::Vector ref_pic_list0, ref_pic_list1;
+ if (!ModifyReferencePicLists(slice_hdr, &ref_pic_list0, &ref_pic_list1))
+ return false;
+
+ const H264PPS* pps = parser_.GetPPS(curr_pps_id_);
+ if (!pps)
+ return false;
+
+ if (!accelerator_->SubmitSlice(pps, slice_hdr, ref_pic_list0, ref_pic_list1,
+ curr_pic_.get(), slice_hdr->nalu_data,
+ slice_hdr->nalu_size))
+ return false;
+
+ return true;
+}
+
+#define SET_ERROR_AND_RETURN() \
+ do { \
+ DVLOG(1) << "Error during decode"; \
+ state_ = kError; \
+ return H264Decoder::kDecodeError; \
+ } while (0)
+
+void H264Decoder::SetStream(const uint8_t* ptr, size_t size) {
+ DCHECK(ptr);
+ DCHECK(size);
+
+ DVLOG(4) << "New input stream at: " << (void*)ptr << " size: " << size;
+ parser_.SetStream(ptr, size);
+}
+
+H264Decoder::DecodeResult H264Decoder::Decode() {
+ if (state_ == kError) {
+ DVLOG(1) << "Decoder in error state";
+ return kDecodeError;
+ }
+
+ while (1) {
+ H264Parser::Result par_res;
+
+ if (!curr_nalu_) {
+ curr_nalu_.reset(new H264NALU());
+ par_res = parser_.AdvanceToNextNALU(curr_nalu_.get());
+ if (par_res == H264Parser::kEOStream)
+ return kRanOutOfStreamData;
+ else if (par_res != H264Parser::kOk)
+ SET_ERROR_AND_RETURN();
+
+ DVLOG(4) << "New NALU: " << static_cast<int>(curr_nalu_->nal_unit_type);
+ }
+
+ switch (curr_nalu_->nal_unit_type) {
+ case H264NALU::kNonIDRSlice:
+ // We can't resume from a non-IDR slice.
+ if (state_ != kDecoding)
+ break;
+
+ // else fallthrough
+ case H264NALU::kIDRSlice: {
+ // TODO(posciak): the IDR may require an SPS that we don't have
+ // available. For now we'd fail if that happens, but ideally we'd like
+ // to keep going until the next SPS in the stream.
+ if (state_ == kNeedStreamMetadata) {
+ // We need an SPS, skip this IDR and keep looking.
+ break;
+ }
+
+ // If after reset, we should be able to recover from an IDR.
+ state_ = kDecoding;
+
+ if (!curr_slice_hdr_) {
+ curr_slice_hdr_.reset(new H264SliceHeader());
+ par_res =
+ parser_.ParseSliceHeader(*curr_nalu_, curr_slice_hdr_.get());
+ if (par_res != H264Parser::kOk)
+ SET_ERROR_AND_RETURN();
+
+ if (!PreprocessCurrentSlice())
+ SET_ERROR_AND_RETURN();
+ }
+
+ if (!curr_pic_) {
+ // New picture/finished previous one, try to start a new one
+ // or tell the client we need more surfaces.
+ curr_pic_ = accelerator_->CreateH264Picture();
+ if (!curr_pic_)
+ return kRanOutOfSurfaces;
+
+ if (!StartNewFrame(curr_slice_hdr_.get()))
+ SET_ERROR_AND_RETURN();
+ }
+
+ if (!ProcessCurrentSlice())
+ SET_ERROR_AND_RETURN();
+
+ curr_slice_hdr_.reset();
+ break;
+ }
+
+ case H264NALU::kSPS: {
+ int sps_id;
+
+ if (!FinishPrevFrameIfPresent())
+ SET_ERROR_AND_RETURN();
+
+ par_res = parser_.ParseSPS(&sps_id);
+ if (par_res != H264Parser::kOk)
+ SET_ERROR_AND_RETURN();
+
+ bool need_new_buffers = false;
+ if (!ProcessSPS(sps_id, &need_new_buffers))
+ SET_ERROR_AND_RETURN();
+
+ if (state_ == kNeedStreamMetadata)
+ state_ = kAfterReset;
+
+ if (need_new_buffers) {
+ curr_pic_ = nullptr;
+ curr_nalu_ = nullptr;
+ ref_pic_list_p0_.clear();
+ ref_pic_list_b0_.clear();
+ ref_pic_list_b1_.clear();
+
+ return kAllocateNewSurfaces;
+ }
+ break;
+ }
+
+ case H264NALU::kPPS: {
+ int pps_id;
+
+ if (!FinishPrevFrameIfPresent())
+ SET_ERROR_AND_RETURN();
+
+ par_res = parser_.ParsePPS(&pps_id);
+ if (par_res != H264Parser::kOk)
+ SET_ERROR_AND_RETURN();
+
+ break;
+ }
+
+ case H264NALU::kAUD:
+ case H264NALU::kEOSeq:
+ case H264NALU::kEOStream:
+ if (state_ != kDecoding)
+ break;
+
+ if (!FinishPrevFrameIfPresent())
+ SET_ERROR_AND_RETURN();
+
+ break;
+
+ default:
+ DVLOG(4) << "Skipping NALU type: " << curr_nalu_->nal_unit_type;
+ break;
+ }
+
+ DVLOG(4) << "NALU done";
+ curr_nalu_.reset();
+ }
+}
+
+Size H264Decoder::GetPicSize() const {
+ return pic_size_;
+}
+
+size_t H264Decoder::GetRequiredNumOfPictures() const {
+ return dpb_.max_num_pics() + kPicsInPipeline;
+}
+
+} // namespace media
diff --git a/accel/h264_decoder.h b/accel/h264_decoder.h
new file mode 100644
index 0000000..82ab98f
--- /dev/null
+++ b/accel/h264_decoder.h
@@ -0,0 +1,284 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 77be7ae
+
+#ifndef H264_DECODER_H_
+#define H264_DECODER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "accelerated_video_decoder.h"
+#include "h264_dpb.h"
+#include "h264_parser.h"
+#include "rect.h"
+#include "size.h"
+
+namespace media {
+
+// Clients of this class are expected to pass H264 Annex-B byte stream
+// and are expected to provide an implementation of H264Accelerator for
+// offloading final steps of the decoding process.
+//
+// This class must be created, called and destroyed on a single thread, and
+// does nothing internally on any other thread.
+class H264Decoder : public AcceleratedVideoDecoder {
+ public:
+ class H264Accelerator {
+ public:
+ H264Accelerator();
+ virtual ~H264Accelerator();
+
+ // Create a new H264Picture that the decoder client can use for decoding
+ // and pass back to this accelerator for decoding or reference.
+ // When the picture is no longer needed by decoder, it will just drop
+ // its reference to it, and it may do so at any time.
+ // Note that this may return nullptr if accelerator is not able to provide
+ // any new pictures at given time. The decoder is expected to handle
+ // this situation as normal and return from Decode() with kRanOutOfSurfaces.
+ virtual scoped_refptr<H264Picture> CreateH264Picture() = 0;
+
+ // Submit metadata for the current frame, providing the current |sps| and
+ // |pps| for it, |dpb| has to contain all the pictures in DPB for current
+ // frame, and |ref_pic_p0/b0/b1| as specified in the H264 spec. Note that
+ // depending on the frame type, either p0, or b0 and b1 are used. |pic|
+ // contains information about the picture for the current frame.
+ // Note that this does not run decode in the accelerator and the decoder
+ // is expected to follow this call with one or more SubmitSlice() calls
+ // before calling SubmitDecode().
+ // Return true if successful.
+ virtual bool SubmitFrameMetadata(const H264SPS* sps,
+ const H264PPS* pps,
+ const H264DPB& dpb,
+ const H264Picture::Vector& ref_pic_listp0,
+ const H264Picture::Vector& ref_pic_listb0,
+ const H264Picture::Vector& ref_pic_listb1,
+ const scoped_refptr<H264Picture>& pic) = 0;
+
+ // Submit one slice for the current frame, passing the current |pps| and
+ // |pic| (same as in SubmitFrameMetadata()), the parsed header for the
+ // current slice in |slice_hdr|, and the reordered |ref_pic_listX|,
+ // as per H264 spec.
+ // |data| pointing to the full slice (including the unparsed header| of
+ // |size| in bytes.
+ // This must be called one or more times per frame, before SubmitDecode().
+ // Note that |data| does not have to remain valid after this call returns.
+ // Return true if successful.
+ virtual bool SubmitSlice(const H264PPS* pps,
+ const H264SliceHeader* slice_hdr,
+ const H264Picture::Vector& ref_pic_list0,
+ const H264Picture::Vector& ref_pic_list1,
+ const scoped_refptr<H264Picture>& pic,
+ const uint8_t* data,
+ size_t size) = 0;
+
+ // Execute the decode in hardware for |pic|, using all the slices and
+ // metadata submitted via SubmitFrameMetadata() and SubmitSlice() since
+ // the previous call to SubmitDecode().
+ // Return true if successful.
+ virtual bool SubmitDecode(const scoped_refptr<H264Picture>& pic) = 0;
+
+ // Schedule output (display) of |pic|. Note that returning from this
+ // method does not mean that |pic| has already been outputted (displayed),
+ // but guarantees that all pictures will be outputted in the same order
+ // as this method was called for them. Decoder may drop its reference
+ // to |pic| after calling this method.
+ // Return true if successful.
+ virtual bool OutputPicture(const scoped_refptr<H264Picture>& pic) = 0;
+
+ // Reset any current state that may be cached in the accelerator, dropping
+ // any cached parameters/slices that have not been committed yet.
+ virtual void Reset() = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(H264Accelerator);
+ };
+
+ H264Decoder(H264Accelerator* accelerator);
+ ~H264Decoder() override;
+
+ // AcceleratedVideoDecoder implementation.
+ bool Flush() override WARN_UNUSED_RESULT;
+ void Reset() override;
+ void SetStream(const uint8_t* ptr, size_t size) override;
+ DecodeResult Decode() override WARN_UNUSED_RESULT;
+ Size GetPicSize() const override;
+ size_t GetRequiredNumOfPictures() const override;
+
+ private:
+ // We need to keep at most kDPBMaxSize pictures in DPB for
+ // reference/to display later and an additional one for the one currently
+ // being decoded. We also ask for some additional ones since VDA needs
+ // to accumulate a few ready-to-output pictures before it actually starts
+ // displaying and giving them back. +2 instead of +1 because of subjective
+ // smoothness improvement during testing.
+ enum {
+ // TODO(johnylin): see if we could get rid of kMaxVideoFrames.
+ kMaxVideoFrames = 4,
+ kPicsInPipeline = kMaxVideoFrames + 2,
+ kMaxNumReqPictures = H264DPB::kDPBMaxSize + kPicsInPipeline,
+ };
+
+ // Internal state of the decoder.
+ enum State {
+ kNeedStreamMetadata, // After initialization, need an SPS.
+ kDecoding, // Ready to decode from any point.
+ kAfterReset, // After Reset(), need a resume point.
+ kError, // Error in decode, can't continue.
+ };
+
+ // Process H264 stream structures.
+ bool ProcessSPS(int sps_id, bool* need_new_buffers);
+ // Process current slice header to discover if we need to start a new picture,
+ // finishing up the current one.
+ bool PreprocessCurrentSlice();
+ // Process current slice as a slice of the current picture.
+ bool ProcessCurrentSlice();
+
+ // Return true if we need to start a new picture.
+ bool IsNewPrimaryCodedPicture(const H264SliceHeader* slice_hdr) const;
+
+ // Initialize the current picture according to data in |slice_hdr|.
+ bool InitCurrPicture(const H264SliceHeader* slice_hdr);
+
+ // Initialize |pic| as a "non-existing" picture (see spec) with |frame_num|,
+ // to be used for frame gap concealment.
+ bool InitNonexistingPicture(scoped_refptr<H264Picture> pic, int frame_num);
+
+ // Calculate picture order counts for |pic| on initialization
+ // of a new frame (see spec).
+ bool CalculatePicOrderCounts(scoped_refptr<H264Picture> pic);
+
+ // Update PicNum values in pictures stored in DPB on creation of
+ // a picture with |frame_num|.
+ void UpdatePicNums(int frame_num);
+
+ bool UpdateMaxNumReorderFrames(const H264SPS* sps);
+
+ // Prepare reference picture lists for the current frame.
+ void PrepareRefPicLists(const H264SliceHeader* slice_hdr);
+ // Prepare reference picture lists for the given slice.
+ bool ModifyReferencePicLists(const H264SliceHeader* slice_hdr,
+ H264Picture::Vector* ref_pic_list0,
+ H264Picture::Vector* ref_pic_list1);
+
+ // Construct initial reference picture lists for use in decoding of
+ // P and B pictures (see 8.2.4 in spec).
+ void ConstructReferencePicListsP(const H264SliceHeader* slice_hdr);
+ void ConstructReferencePicListsB(const H264SliceHeader* slice_hdr);
+
+ // Helper functions for reference list construction, per spec.
+ int PicNumF(const scoped_refptr<H264Picture>& pic);
+ int LongTermPicNumF(const scoped_refptr<H264Picture>& pic);
+
+ // Perform the reference picture lists' modification (reordering), as
+ // specified in spec (8.2.4).
+ //
+ // |list| indicates list number and should be either 0 or 1.
+ bool ModifyReferencePicList(const H264SliceHeader* slice_hdr,
+ int list,
+ H264Picture::Vector* ref_pic_listx);
+
+ // Perform reference picture memory management operations (marking/unmarking
+ // of reference pictures, long term picture management, discarding, etc.).
+ // See 8.2.5 in spec.
+ bool HandleMemoryManagementOps(scoped_refptr<H264Picture> pic);
+ bool ReferencePictureMarking(scoped_refptr<H264Picture> pic);
+ bool SlidingWindowPictureMarking();
+
+ // Handle a gap in frame_num in the stream up to |frame_num|, by creating
+ // "non-existing" pictures (see spec).
+ bool HandleFrameNumGap(int frame_num);
+
+ // Start processing a new frame.
+ bool StartNewFrame(const H264SliceHeader* slice_hdr);
+
+ // All data for a frame received, process it and decode.
+ bool FinishPrevFrameIfPresent();
+
+ // Called after we are done processing |pic|. Performs all operations to be
+ // done after decoding, including DPB management, reference picture marking
+ // and memory management operations.
+ // This will also output pictures if any have become ready to be outputted
+ // after processing |pic|.
+ bool FinishPicture(scoped_refptr<H264Picture> pic);
+
+ // Clear DPB contents and remove all surfaces in DPB from *in_use_ list.
+ // Cleared pictures will be made available for decode, unless they are
+ // at client waiting to be displayed.
+ void ClearDPB();
+
+ // Commits all pending data for HW decoder and starts HW decoder.
+ bool DecodePicture();
+
+ // Notifies client that a picture is ready for output.
+ void OutputPic(scoped_refptr<H264Picture> pic);
+
+ // Output all pictures in DPB that have not been outputted yet.
+ bool OutputAllRemainingPics();
+
+ // Decoder state.
+ State state_;
+
+ // Parser in use.
+ H264Parser parser_;
+
+ // DPB in use.
+ H264DPB dpb_;
+
+ // Picture currently being processed/decoded.
+ scoped_refptr<H264Picture> curr_pic_;
+
+ // Reference picture lists, constructed for each frame.
+ H264Picture::Vector ref_pic_list_p0_;
+ H264Picture::Vector ref_pic_list_b0_;
+ H264Picture::Vector ref_pic_list_b1_;
+
+ // Global state values, needed in decoding. See spec.
+ int max_frame_num_;
+ int max_pic_num_;
+ int max_long_term_frame_idx_;
+ size_t max_num_reorder_frames_;
+
+ int prev_frame_num_;
+ int prev_ref_frame_num_;
+ int prev_frame_num_offset_;
+ bool prev_has_memmgmnt5_;
+
+ // Values related to previously decoded reference picture.
+ bool prev_ref_has_memmgmnt5_;
+ int prev_ref_top_field_order_cnt_;
+ int prev_ref_pic_order_cnt_msb_;
+ int prev_ref_pic_order_cnt_lsb_;
+ H264Picture::Field prev_ref_field_;
+
+ // Currently active SPS and PPS.
+ int curr_sps_id_;
+ int curr_pps_id_;
+
+ // Current NALU and slice header being processed.
+ std::unique_ptr<H264NALU> curr_nalu_;
+ std::unique_ptr<H264SliceHeader> curr_slice_hdr_;
+
+ // Output picture size.
+ Size pic_size_;
+ // Output visible cropping rect.
+ Rect visible_rect_;
+
+ // PicOrderCount of the previously outputted frame.
+ int last_output_poc_;
+
+ H264Accelerator* accelerator_;
+
+ DISALLOW_COPY_AND_ASSIGN(H264Decoder);
+};
+
+} // namespace media
+
+#endif // H264_DECODER_H_
diff --git a/accel/h264_dpb.cc b/accel/h264_dpb.cc
new file mode 100644
index 0000000..af0b5e0
--- /dev/null
+++ b/accel/h264_dpb.cc
@@ -0,0 +1,171 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
+
+#include <string.h>
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/stl_util.h"
+#include "h264_dpb.h"
+
+namespace media {
+
+H264Picture::H264Picture()
+ : pic_order_cnt_type(0),
+ top_field_order_cnt(0),
+ bottom_field_order_cnt(0),
+ pic_order_cnt(0),
+ pic_order_cnt_msb(0),
+ pic_order_cnt_lsb(0),
+ delta_pic_order_cnt_bottom(0),
+ delta_pic_order_cnt0(0),
+ delta_pic_order_cnt1(0),
+ pic_num(0),
+ long_term_pic_num(0),
+ frame_num(0),
+ frame_num_offset(0),
+ frame_num_wrap(0),
+ long_term_frame_idx(0),
+ type(H264SliceHeader::kPSlice),
+ nal_ref_idc(0),
+ idr(false),
+ idr_pic_id(0),
+ ref(false),
+ long_term(false),
+ outputted(false),
+ mem_mgmt_5(false),
+ nonexisting(false),
+ field(FIELD_NONE),
+ long_term_reference_flag(false),
+ adaptive_ref_pic_marking_mode_flag(false),
+ dpb_position(0) {
+ memset(&ref_pic_marking, 0, sizeof(ref_pic_marking));
+}
+
+H264Picture::~H264Picture() = default;
+
+V4L2H264Picture* H264Picture::AsV4L2H264Picture() {
+ return nullptr;
+}
+
+H264DPB::H264DPB() : max_num_pics_(0) {}
+H264DPB::~H264DPB() = default;
+
+void H264DPB::Clear() {
+ pics_.clear();
+}
+
+void H264DPB::set_max_num_pics(size_t max_num_pics) {
+ DCHECK_LE(max_num_pics, static_cast<size_t>(kDPBMaxSize));
+ max_num_pics_ = max_num_pics;
+ if (pics_.size() > max_num_pics_)
+ pics_.resize(max_num_pics_);
+}
+
+void H264DPB::UpdatePicPositions() {
+ size_t i = 0;
+ for (auto& pic : pics_) {
+ pic->dpb_position = i;
+ ++i;
+ }
+}
+
+void H264DPB::DeleteByPOC(int poc) {
+ for (H264Picture::Vector::iterator it = pics_.begin(); it != pics_.end();
+ ++it) {
+ if ((*it)->pic_order_cnt == poc) {
+ pics_.erase(it);
+ UpdatePicPositions();
+ return;
+ }
+ }
+ NOTREACHED() << "Missing POC: " << poc;
+}
+
+void H264DPB::DeleteUnused() {
+ for (H264Picture::Vector::iterator it = pics_.begin(); it != pics_.end();) {
+ if ((*it)->outputted && !(*it)->ref)
+ it = pics_.erase(it);
+ else
+ ++it;
+ }
+ UpdatePicPositions();
+}
+
+void H264DPB::StorePic(const scoped_refptr<H264Picture>& pic) {
+ DCHECK_LT(pics_.size(), max_num_pics_);
+ DVLOG(3) << "Adding PicNum: " << pic->pic_num << " ref: " << (int)pic->ref
+ << " longterm: " << (int)pic->long_term << " to DPB";
+ pic->dpb_position = pics_.size();
+ pics_.push_back(pic);
+}
+
+int H264DPB::CountRefPics() {
+ int ret = 0;
+ for (size_t i = 0; i < pics_.size(); ++i) {
+ if (pics_[i]->ref)
+ ++ret;
+ }
+ return ret;
+}
+
+void H264DPB::MarkAllUnusedForRef() {
+ for (size_t i = 0; i < pics_.size(); ++i)
+ pics_[i]->ref = false;
+}
+
+scoped_refptr<H264Picture> H264DPB::GetShortRefPicByPicNum(int pic_num) {
+ for (const auto& pic : pics_) {
+ if (pic->ref && !pic->long_term && pic->pic_num == pic_num)
+ return pic;
+ }
+
+ DVLOG(1) << "Missing short ref pic num: " << pic_num;
+ return nullptr;
+}
+
+scoped_refptr<H264Picture> H264DPB::GetLongRefPicByLongTermPicNum(int pic_num) {
+ for (const auto& pic : pics_) {
+ if (pic->ref && pic->long_term && pic->long_term_pic_num == pic_num)
+ return pic;
+ }
+
+ DVLOG(1) << "Missing long term pic num: " << pic_num;
+ return nullptr;
+}
+
+scoped_refptr<H264Picture> H264DPB::GetLowestFrameNumWrapShortRefPic() {
+ scoped_refptr<H264Picture> ret;
+ for (const auto& pic : pics_) {
+ if (pic->ref && !pic->long_term &&
+ (!ret || pic->frame_num_wrap < ret->frame_num_wrap))
+ ret = pic;
+ }
+ return ret;
+}
+
+void H264DPB::GetNotOutputtedPicsAppending(H264Picture::Vector* out) {
+ for (const auto& pic : pics_) {
+ if (!pic->outputted)
+ out->push_back(pic);
+ }
+}
+
+void H264DPB::GetShortTermRefPicsAppending(H264Picture::Vector* out) {
+ for (const auto& pic : pics_) {
+ if (pic->ref && !pic->long_term)
+ out->push_back(pic);
+ }
+}
+
+void H264DPB::GetLongTermRefPicsAppending(H264Picture::Vector* out) {
+ for (const auto& pic : pics_) {
+ if (pic->ref && pic->long_term)
+ out->push_back(pic);
+ }
+}
+
+} // namespace media
diff --git a/accel/h264_dpb.h b/accel/h264_dpb.h
new file mode 100644
index 0000000..3da284e
--- /dev/null
+++ b/accel/h264_dpb.h
@@ -0,0 +1,181 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains an implementation of an H.264 Decoded Picture Buffer
+// used in H264 decoders.
+// Note: ported from Chromium commit head: 70340ce
+
+#ifndef H264_DPB_H_
+#define H264_DPB_H_
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "h264_parser.h"
+#include "rect.h"
+
+namespace media {
+
+class V4L2H264Picture;
+
+// A picture (a frame or a field) in the H.264 spec sense.
+// See spec at http://www.itu.int/rec/T-REC-H.264
+class H264Picture : public base::RefCountedThreadSafe<H264Picture> {
+ public:
+ using Vector = std::vector<scoped_refptr<H264Picture>>;
+
+ enum Field {
+ FIELD_NONE,
+ FIELD_TOP,
+ FIELD_BOTTOM,
+ };
+
+ H264Picture();
+
+ virtual V4L2H264Picture* AsV4L2H264Picture();
+
+ // Values calculated per H.264 specification or taken from slice header.
+ // See spec for more details on each (some names have been converted from
+ // CamelCase in spec to Chromium-style names).
+ int pic_order_cnt_type;
+ int top_field_order_cnt;
+ int bottom_field_order_cnt;
+ int pic_order_cnt;
+ int pic_order_cnt_msb;
+ int pic_order_cnt_lsb;
+ int delta_pic_order_cnt_bottom;
+ int delta_pic_order_cnt0;
+ int delta_pic_order_cnt1;
+
+ int pic_num;
+ int long_term_pic_num;
+ int frame_num; // from slice header
+ int frame_num_offset;
+ int frame_num_wrap;
+ int long_term_frame_idx;
+
+ H264SliceHeader::Type type;
+ int nal_ref_idc;
+ bool idr; // IDR picture?
+ int idr_pic_id; // Valid only if idr == true.
+ bool ref; // reference picture?
+ bool long_term; // long term reference picture?
+ bool outputted;
+ // Does memory management op 5 needs to be executed after this
+ // picture has finished decoding?
+ bool mem_mgmt_5;
+
+ // Created by the decoding process for gaps in frame_num.
+ // Not for decode or output.
+ bool nonexisting;
+
+ Field field;
+
+ // Values from slice_hdr to be used during reference marking and
+ // memory management after finishing this picture.
+ bool long_term_reference_flag;
+ bool adaptive_ref_pic_marking_mode_flag;
+ H264DecRefPicMarking ref_pic_marking[H264SliceHeader::kRefListSize];
+
+ // Position in DPB (i.e. index in DPB).
+ int dpb_position;
+
+ // The visible size of picture. This could be either parsed from SPS, or set
+ // to Rect(0, 0) for indicating invalid values or not available.
+ Rect visible_rect;
+
+ protected:
+ friend class base::RefCountedThreadSafe<H264Picture>;
+ virtual ~H264Picture();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(H264Picture);
+};
+
+// DPB - Decoded Picture Buffer.
+// Stores decoded pictures that will be used for future display
+// and/or reference.
+class H264DPB {
+ public:
+ H264DPB();
+ ~H264DPB();
+
+ void set_max_num_pics(size_t max_num_pics);
+ size_t max_num_pics() const { return max_num_pics_; }
+
+ // Remove unused (not reference and already outputted) pictures from DPB
+ // and free it.
+ void DeleteUnused();
+
+ // Remove a picture by its pic_order_cnt and free it.
+ void DeleteByPOC(int poc);
+
+ // Clear DPB.
+ void Clear();
+
+ // Store picture in DPB. DPB takes ownership of its resources.
+ void StorePic(const scoped_refptr<H264Picture>& pic);
+
+ // Return the number of reference pictures in DPB.
+ int CountRefPics();
+
+ // Mark all pictures in DPB as unused for reference.
+ void MarkAllUnusedForRef();
+
+ // Return a short-term reference picture by its pic_num.
+ scoped_refptr<H264Picture> GetShortRefPicByPicNum(int pic_num);
+
+ // Return a long-term reference picture by its long_term_pic_num.
+ scoped_refptr<H264Picture> GetLongRefPicByLongTermPicNum(int pic_num);
+
+ // Return the short reference picture with lowest frame_num. Used for sliding
+ // window memory management.
+ scoped_refptr<H264Picture> GetLowestFrameNumWrapShortRefPic();
+
+ // Append all pictures that have not been outputted yet to the passed |out|
+ // vector, sorted by lowest pic_order_cnt (in output order).
+ void GetNotOutputtedPicsAppending(H264Picture::Vector* out);
+
+ // Append all short term reference pictures to the passed |out| vector.
+ void GetShortTermRefPicsAppending(H264Picture::Vector* out);
+
+ // Append all long term reference pictures to the passed |out| vector.
+ void GetLongTermRefPicsAppending(H264Picture::Vector* out);
+
+ // Iterators for direct access to DPB contents.
+ // Will be invalidated after any of Remove* calls.
+ H264Picture::Vector::iterator begin() { return pics_.begin(); }
+ H264Picture::Vector::iterator end() { return pics_.end(); }
+ H264Picture::Vector::const_iterator begin() const { return pics_.begin(); }
+ H264Picture::Vector::const_iterator end() const { return pics_.end(); }
+ H264Picture::Vector::const_reverse_iterator rbegin() const {
+ return pics_.rbegin();
+ }
+ H264Picture::Vector::const_reverse_iterator rend() const {
+ return pics_.rend();
+ }
+
+ size_t size() const { return pics_.size(); }
+ bool IsFull() const { return pics_.size() == max_num_pics_; }
+
+ // Per H264 spec, increase to 32 if interlaced video is supported.
+ enum {
+ kDPBMaxSize = 16,
+ };
+
+ private:
+ void UpdatePicPositions();
+
+ H264Picture::Vector pics_;
+ size_t max_num_pics_;
+
+ DISALLOW_COPY_AND_ASSIGN(H264DPB);
+};
+
+} // namespace media
+
+#endif // H264_DPB_H_
diff --git a/accel/h264_parser.cc b/accel/h264_parser.cc
new file mode 100644
index 0000000..0e24473
--- /dev/null
+++ b/accel/h264_parser.cc
@@ -0,0 +1,1612 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 600904374759
+// Note: GetColorSpace() is not ported.
+
+#include "h264_parser.h"
+#include "subsample_entry.h"
+
+#include <limits>
+#include <memory>
+
+#include "base/logging.h"
+#include "base/numerics/safe_math.h"
+#include "base/stl_util.h"
+
+namespace media {
+
+namespace {
+// Converts [|start|, |end|) range with |encrypted_ranges| into a vector of
+// SubsampleEntry. |encrypted_ranges| must be with in the range defined by
+// |start| and |end|.
+// It is OK to pass in empty |encrypted_ranges|; this will return a vector
+// with single SubsampleEntry with clear_bytes set to the size of the buffer.
+std::vector<SubsampleEntry> EncryptedRangesToSubsampleEntry(
+ const uint8_t* start,
+ const uint8_t* end,
+ const Ranges<const uint8_t*>& encrypted_ranges) {
+ std::vector<SubsampleEntry> subsamples;
+ const uint8_t* cur = start;
+ for (size_t i = 0; i < encrypted_ranges.size(); ++i) {
+ SubsampleEntry subsample = {};
+
+ const uint8_t* encrypted_start = encrypted_ranges.start(i);
+ DCHECK_GE(encrypted_start, cur)
+ << "Encrypted range started before the current buffer pointer.";
+ subsample.clear_bytes = encrypted_start - cur;
+
+ const uint8_t* encrypted_end = encrypted_ranges.end(i);
+ subsample.cypher_bytes = encrypted_end - encrypted_start;
+
+ subsamples.push_back(subsample);
+ cur = encrypted_end;
+ DCHECK_LE(cur, end) << "Encrypted range is outside the buffer range.";
+ }
+
+ // If there is more data in the buffer but not covered by encrypted_ranges,
+ // then it must be in the clear.
+ if (cur < end) {
+ SubsampleEntry subsample = {};
+ subsample.clear_bytes = end - cur;
+ subsamples.push_back(subsample);
+ }
+ return subsamples;
+}
+} // namespace
+
+bool H264SliceHeader::IsPSlice() const {
+ return (slice_type % 5 == kPSlice);
+}
+
+bool H264SliceHeader::IsBSlice() const {
+ return (slice_type % 5 == kBSlice);
+}
+
+bool H264SliceHeader::IsISlice() const {
+ return (slice_type % 5 == kISlice);
+}
+
+bool H264SliceHeader::IsSPSlice() const {
+ return (slice_type % 5 == kSPSlice);
+}
+
+bool H264SliceHeader::IsSISlice() const {
+ return (slice_type % 5 == kSISlice);
+}
+
+H264NALU::H264NALU() {
+ memset(this, 0, sizeof(*this));
+}
+
+// static
+void H264SPS::GetLevelConfigFromProfileLevel(VideoCodecProfile profile,
+ uint8_t level,
+ int* level_idc,
+ bool* constraint_set3_flag) {
+ // Spec A.3.1.
+ // Note: we always use h264_output_level = 9 to indicate Level 1b in
+ // VideoEncodeAccelerator::Config, in order to tell apart from Level 1.1
+ // which level IDC is also 11.
+ // For Baseline and Main profile, if requested level is Level 1b, set
+ // level_idc to 11 and constraint_set3_flag to true. Otherwise, set level_idc
+ // to 9 for Level 1b, and ten times level number for others.
+ if ((profile == H264PROFILE_BASELINE || profile == H264PROFILE_MAIN) &&
+ level == kLevelIDC1B) {
+ *level_idc = 11;
+ *constraint_set3_flag = true;
+ } else {
+ *level_idc = level;
+ }
+}
+
+H264SPS::H264SPS() {
+ memset(this, 0, sizeof(*this));
+}
+
+// Based on T-REC-H.264 7.4.2.1.1, "Sequence parameter set data semantics",
+// available from http://www.itu.int/rec/T-REC-H.264.
+base::Optional<Size> H264SPS::GetCodedSize() const {
+ // Interlaced frames are twice the height of each field.
+ const int mb_unit = 16;
+ int map_unit = frame_mbs_only_flag ? 16 : 32;
+
+ // Verify that the values are not too large before multiplying them.
+ // TODO(sandersd): These limits could be much smaller. The currently-largest
+ // specified limit (excluding SVC, multiview, etc., which I didn't bother to
+ // read) is 543 macroblocks (section A.3.1).
+ int max_mb_minus1 = std::numeric_limits<int>::max() / mb_unit - 1;
+ int max_map_units_minus1 = std::numeric_limits<int>::max() / map_unit - 1;
+ if (pic_width_in_mbs_minus1 > max_mb_minus1 ||
+ pic_height_in_map_units_minus1 > max_map_units_minus1) {
+ DVLOG(1) << "Coded size is too large.";
+ return base::nullopt;
+ }
+
+ return Size(mb_unit * (pic_width_in_mbs_minus1 + 1),
+ map_unit * (pic_height_in_map_units_minus1 + 1));
+}
+
+// Also based on section 7.4.2.1.1.
+base::Optional<Rect> H264SPS::GetVisibleRect() const {
+ base::Optional<Size> coded_size = GetCodedSize();
+ if (!coded_size)
+ return base::nullopt;
+
+ if (!frame_cropping_flag)
+ return Rect(coded_size.value());
+
+ int crop_unit_x;
+ int crop_unit_y;
+ if (chroma_array_type == 0) {
+ crop_unit_x = 1;
+ crop_unit_y = frame_mbs_only_flag ? 1 : 2;
+ } else {
+ // Section 6.2.
+ // |chroma_format_idc| may be:
+ // 1 => 4:2:0
+ // 2 => 4:2:2
+ // 3 => 4:4:4
+ // Everything else has |chroma_array_type| == 0.
+ int sub_width_c = chroma_format_idc > 2 ? 1 : 2;
+ int sub_height_c = chroma_format_idc > 1 ? 1 : 2;
+ crop_unit_x = sub_width_c;
+ crop_unit_y = sub_height_c * (frame_mbs_only_flag ? 1 : 2);
+ }
+
+ // Verify that the values are not too large before multiplying.
+ if (coded_size->width() / crop_unit_x < frame_crop_left_offset ||
+ coded_size->width() / crop_unit_x < frame_crop_right_offset ||
+ coded_size->height() / crop_unit_y < frame_crop_top_offset ||
+ coded_size->height() / crop_unit_y < frame_crop_bottom_offset) {
+ DVLOG(1) << "Frame cropping exceeds coded size.";
+ return base::nullopt;
+ }
+ int crop_left = crop_unit_x * frame_crop_left_offset;
+ int crop_right = crop_unit_x * frame_crop_right_offset;
+ int crop_top = crop_unit_y * frame_crop_top_offset;
+ int crop_bottom = crop_unit_y * frame_crop_bottom_offset;
+
+ // Verify that the values are sane. Note that some decoders also require that
+ // crops are smaller than a macroblock and/or that crops must be adjacent to
+ // at least one corner of the coded frame.
+ if (coded_size->width() - crop_left <= crop_right ||
+ coded_size->height() - crop_top <= crop_bottom) {
+ DVLOG(1) << "Frame cropping excludes entire frame.";
+ return base::nullopt;
+ }
+
+ return Rect(crop_left, crop_top,
+ coded_size->width() - crop_left - crop_right,
+ coded_size->height() - crop_top - crop_bottom);
+}
+
+uint8_t H264SPS::GetIndicatedLevel() const {
+ // Spec A.3.1 and A.3.2
+ // For Baseline, Constrained Baseline and Main profile, the indicated level is
+ // Level 1b if level_idc is equal to 11 and constraint_set3_flag is true.
+ if ((profile_idc == H264SPS::kProfileIDCBaseline ||
+ profile_idc == H264SPS::kProfileIDCConstrainedBaseline ||
+ profile_idc == H264SPS::kProfileIDCMain) &&
+ level_idc == 11 && constraint_set3_flag) {
+ return kLevelIDC1B; // Level 1b
+ }
+
+ // Otherwise, the level_idc is equal to 9 for Level 1b, and others are equal
+ // to values of ten times the level numbers.
+ return base::checked_cast<uint8_t>(level_idc);
+}
+
+bool H264SPS::CheckIndicatedLevelWithinTarget(uint8_t target_level) const {
+ // See table A-1 in spec.
+ // Level 1.0 < 1b < 1.1 < 1.2 .... (in numeric order).
+ uint8_t level = GetIndicatedLevel();
+ if (target_level == kLevelIDC1p0)
+ return level == kLevelIDC1p0;
+ if (target_level == kLevelIDC1B)
+ return level == kLevelIDC1p0 || level == kLevelIDC1B;
+ return level <= target_level;
+}
+
+H264PPS::H264PPS() {
+ memset(this, 0, sizeof(*this));
+}
+
+H264SliceHeader::H264SliceHeader() {
+ memset(this, 0, sizeof(*this));
+}
+
+H264SEIMessage::H264SEIMessage() {
+ memset(this, 0, sizeof(*this));
+}
+
+#define READ_BITS_OR_RETURN(num_bits, out) \
+ do { \
+ int _out; \
+ if (!br_.ReadBits(num_bits, &_out)) { \
+ DVLOG(1) \
+ << "Error in stream: unexpected EOS while trying to read " #out; \
+ return kInvalidStream; \
+ } \
+ *out = _out; \
+ } while (0)
+
+#define READ_BOOL_OR_RETURN(out) \
+ do { \
+ int _out; \
+ if (!br_.ReadBits(1, &_out)) { \
+ DVLOG(1) \
+ << "Error in stream: unexpected EOS while trying to read " #out; \
+ return kInvalidStream; \
+ } \
+ *out = _out != 0; \
+ } while (0)
+
+#define READ_UE_OR_RETURN(out) \
+ do { \
+ if (ReadUE(out) != kOk) { \
+ DVLOG(1) << "Error in stream: invalid value while trying to read " #out; \
+ return kInvalidStream; \
+ } \
+ } while (0)
+
+#define READ_SE_OR_RETURN(out) \
+ do { \
+ if (ReadSE(out) != kOk) { \
+ DVLOG(1) << "Error in stream: invalid value while trying to read " #out; \
+ return kInvalidStream; \
+ } \
+ } while (0)
+
+#define IN_RANGE_OR_RETURN(val, min, max) \
+ do { \
+ if ((val) < (min) || (val) > (max)) { \
+ DVLOG(1) << "Error in stream: invalid value, expected " #val " to be" \
+ << " in range [" << (min) << ":" << (max) << "]" \
+ << " found " << (val) << " instead"; \
+ return kInvalidStream; \
+ } \
+ } while (0)
+
+#define TRUE_OR_RETURN(a) \
+ do { \
+ if (!(a)) { \
+ DVLOG(1) << "Error in stream: invalid value, expected " << #a; \
+ return kInvalidStream; \
+ } \
+ } while (0)
+
+// ISO 14496 part 10
+// VUI parameters: Table E-1 "Meaning of sample aspect ratio indicator"
+static const int kTableSarWidth[] = {0, 1, 12, 10, 16, 40, 24, 20, 32,
+ 80, 18, 15, 64, 160, 4, 3, 2};
+static const int kTableSarHeight[] = {0, 1, 11, 11, 11, 33, 11, 11, 11,
+ 33, 11, 11, 33, 99, 3, 2, 1};
+static_assert(base::size(kTableSarWidth) == base::size(kTableSarHeight),
+ "sar tables must have the same size");
+
+H264Parser::H264Parser() {
+ Reset();
+}
+
+H264Parser::~H264Parser() = default;
+
+void H264Parser::Reset() {
+ stream_ = NULL;
+ bytes_left_ = 0;
+ encrypted_ranges_.clear();
+ previous_nalu_range_.clear();
+}
+
+void H264Parser::SetStream(const uint8_t* stream, off_t stream_size) {
+ std::vector<SubsampleEntry> subsamples;
+ SetEncryptedStream(stream, stream_size, subsamples);
+}
+
+void H264Parser::SetEncryptedStream(
+ const uint8_t* stream,
+ off_t stream_size,
+ const std::vector<SubsampleEntry>& subsamples) {
+ DCHECK(stream);
+ DCHECK_GT(stream_size, 0);
+
+ stream_ = stream;
+ bytes_left_ = stream_size;
+ previous_nalu_range_.clear();
+
+ encrypted_ranges_.clear();
+ const uint8_t* start = stream;
+ const uint8_t* stream_end = stream_ + bytes_left_;
+ for (size_t i = 0; i < subsamples.size() && start < stream_end; ++i) {
+ start += subsamples[i].clear_bytes;
+
+ const uint8_t* end =
+ std::min(start + subsamples[i].cypher_bytes, stream_end);
+ encrypted_ranges_.Add(start, end);
+ start = end;
+ }
+}
+
+const H264PPS* H264Parser::GetPPS(int pps_id) const {
+ auto it = active_PPSes_.find(pps_id);
+ if (it == active_PPSes_.end()) {
+ DVLOG(1) << "Requested a nonexistent PPS id " << pps_id;
+ return nullptr;
+ }
+
+ return it->second.get();
+}
+
+const H264SPS* H264Parser::GetSPS(int sps_id) const {
+ auto it = active_SPSes_.find(sps_id);
+ if (it == active_SPSes_.end()) {
+ DVLOG(1) << "Requested a nonexistent SPS id " << sps_id;
+ return nullptr;
+ }
+
+ return it->second.get();
+}
+
+static inline bool IsStartCode(const uint8_t* data) {
+ return data[0] == 0x00 && data[1] == 0x00 && data[2] == 0x01;
+}
+
+// static
+bool H264Parser::FindStartCode(const uint8_t* data,
+ off_t data_size,
+ off_t* offset,
+ off_t* start_code_size) {
+ DCHECK_GE(data_size, 0);
+ off_t bytes_left = data_size;
+
+ while (bytes_left >= 3) {
+ // The start code is "\0\0\1", ones are more unusual than zeroes, so let's
+ // search for it first.
+ const uint8_t* tmp =
+ reinterpret_cast<const uint8_t*>(memchr(data + 2, 1, bytes_left - 2));
+ if (!tmp) {
+ data += bytes_left - 2;
+ bytes_left = 2;
+ break;
+ }
+ tmp -= 2;
+ bytes_left -= tmp - data;
+ data = tmp;
+
+ if (IsStartCode(data)) {
+ // Found three-byte start code, set pointer at its beginning.
+ *offset = data_size - bytes_left;
+ *start_code_size = 3;
+
+ // If there is a zero byte before this start code,
+ // then it's actually a four-byte start code, so backtrack one byte.
+ if (*offset > 0 && *(data - 1) == 0x00) {
+ --(*offset);
+ ++(*start_code_size);
+ }
+
+ return true;
+ }
+
+ ++data;
+ --bytes_left;
+ }
+
+ // End of data: offset is pointing to the first byte that was not considered
+ // as a possible start of a start code.
+ // Note: there is no security issue when receiving a negative |data_size|
+ // since in this case, |bytes_left| is equal to |data_size| and thus
+ // |*offset| is equal to 0 (valid offset).
+ *offset = data_size - bytes_left;
+ *start_code_size = 0;
+ return false;
+}
+
+bool H264Parser::LocateNALU(off_t* nalu_size, off_t* start_code_size) {
+ // Find the start code of next NALU.
+ off_t nalu_start_off = 0;
+ off_t annexb_start_code_size = 0;
+
+ if (!FindStartCodeInClearRanges(stream_, bytes_left_, encrypted_ranges_,
+ &nalu_start_off, &annexb_start_code_size)) {
+ DVLOG(4) << "Could not find start code, end of stream?";
+ return false;
+ }
+
+ // Move the stream to the beginning of the NALU (pointing at the start code).
+ stream_ += nalu_start_off;
+ bytes_left_ -= nalu_start_off;
+
+ const uint8_t* nalu_data = stream_ + annexb_start_code_size;
+ off_t max_nalu_data_size = bytes_left_ - annexb_start_code_size;
+ if (max_nalu_data_size <= 0) {
+ DVLOG(3) << "End of stream";
+ return false;
+ }
+
+ // Find the start code of next NALU;
+ // if successful, |nalu_size_without_start_code| is the number of bytes from
+ // after previous start code to before this one;
+ // if next start code is not found, it is still a valid NALU since there
+ // are some bytes left after the first start code: all the remaining bytes
+ // belong to the current NALU.
+ off_t next_start_code_size = 0;
+ off_t nalu_size_without_start_code = 0;
+ if (!FindStartCodeInClearRanges(
+ nalu_data, max_nalu_data_size, encrypted_ranges_,
+ &nalu_size_without_start_code, &next_start_code_size)) {
+ nalu_size_without_start_code = max_nalu_data_size;
+ }
+ *nalu_size = nalu_size_without_start_code + annexb_start_code_size;
+ *start_code_size = annexb_start_code_size;
+ return true;
+}
+
+// static
+bool H264Parser::FindStartCodeInClearRanges(
+ const uint8_t* data,
+ off_t data_size,
+ const Ranges<const uint8_t*>& encrypted_ranges,
+ off_t* offset,
+ off_t* start_code_size) {
+ if (encrypted_ranges.size() == 0)
+ return FindStartCode(data, data_size, offset, start_code_size);
+
+ DCHECK_GE(data_size, 0);
+ const uint8_t* start = data;
+ do {
+ off_t bytes_left = data_size - (start - data);
+
+ if (!FindStartCode(start, bytes_left, offset, start_code_size))
+ return false;
+
+ // Construct a Ranges object that represents the region occupied
+ // by the start code and the 1 byte needed to read the NAL unit type.
+ const uint8_t* start_code = start + *offset;
+ const uint8_t* start_code_end = start_code + *start_code_size;
+ Ranges<const uint8_t*> start_code_range;
+ start_code_range.Add(start_code, start_code_end + 1);
+
+ if (encrypted_ranges.IntersectionWith(start_code_range).size() > 0) {
+ // The start code is inside an encrypted section so we need to scan
+ // for another start code.
+ *start_code_size = 0;
+ start += std::min(*offset + 1, bytes_left);
+ }
+ } while (*start_code_size == 0);
+
+ // Update |*offset| to include the data we skipped over.
+ *offset += start - data;
+ return true;
+}
+
+// static
+VideoCodecProfile H264Parser::ProfileIDCToVideoCodecProfile(int profile_idc) {
+ switch (profile_idc) {
+ case H264SPS::kProfileIDCBaseline:
+ return H264PROFILE_BASELINE;
+ case H264SPS::kProfileIDCMain:
+ return H264PROFILE_MAIN;
+ case H264SPS::kProfileIDCHigh:
+ return H264PROFILE_HIGH;
+ case H264SPS::kProfileIDHigh10:
+ return H264PROFILE_HIGH10PROFILE;
+ case H264SPS::kProfileIDHigh422:
+ return H264PROFILE_HIGH422PROFILE;
+ case H264SPS::kProfileIDHigh444Predictive:
+ return H264PROFILE_HIGH444PREDICTIVEPROFILE;
+ case H264SPS::kProfileIDScalableBaseline:
+ return H264PROFILE_SCALABLEBASELINE;
+ case H264SPS::kProfileIDScalableHigh:
+ return H264PROFILE_SCALABLEHIGH;
+ case H264SPS::kProfileIDStereoHigh:
+ return H264PROFILE_STEREOHIGH;
+ case H264SPS::kProfileIDSMultiviewHigh:
+ return H264PROFILE_MULTIVIEWHIGH;
+ }
+ DVLOG(1) << "unknown video profile: " << profile_idc;
+ return VIDEO_CODEC_PROFILE_UNKNOWN;
+}
+
+// static
+bool H264Parser::ParseNALUs(const uint8_t* stream,
+ size_t stream_size,
+ std::vector<H264NALU>* nalus) {
+ DCHECK(nalus);
+ H264Parser parser;
+ parser.SetStream(stream, stream_size);
+
+ while (true) {
+ H264NALU nalu;
+ const H264Parser::Result result = parser.AdvanceToNextNALU(&nalu);
+ if (result == H264Parser::kOk) {
+ nalus->push_back(nalu);
+ } else if (result == media::H264Parser::kEOStream) {
+ return true;
+ } else {
+ DLOG(ERROR) << "Unexpected H264 parser result";
+ return false;
+ }
+ }
+ NOTREACHED();
+ return false;
+}
+
+H264Parser::Result H264Parser::ReadUE(int* val) {
+ int num_bits = -1;
+ int bit;
+ int rest;
+
+ // Count the number of contiguous zero bits.
+ do {
+ READ_BITS_OR_RETURN(1, &bit);
+ num_bits++;
+ } while (bit == 0);
+
+ if (num_bits > 31)
+ return kInvalidStream;
+
+ // Calculate exp-Golomb code value of size num_bits.
+ // Special case for |num_bits| == 31 to avoid integer overflow. The only
+ // valid representation as an int is 2^31 - 1, so the remaining bits must
+ // be 0 or else the number is too large.
+ *val = (1u << num_bits) - 1u;
+
+ if (num_bits == 31) {
+ READ_BITS_OR_RETURN(num_bits, &rest);
+ return (rest == 0) ? kOk : kInvalidStream;
+ }
+
+ if (num_bits > 0) {
+ READ_BITS_OR_RETURN(num_bits, &rest);
+ *val += rest;
+ }
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ReadSE(int* val) {
+ int ue;
+ Result res;
+
+ // See Chapter 9 in the spec.
+ res = ReadUE(&ue);
+ if (res != kOk)
+ return res;
+
+ if (ue % 2 == 0)
+ *val = -(ue / 2);
+ else
+ *val = ue / 2 + 1;
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::AdvanceToNextNALU(H264NALU* nalu) {
+ off_t start_code_size;
+ off_t nalu_size_with_start_code;
+ if (!LocateNALU(&nalu_size_with_start_code, &start_code_size)) {
+ DVLOG(4) << "Could not find next NALU, bytes left in stream: "
+ << bytes_left_;
+ stream_ = nullptr;
+ bytes_left_ = 0;
+ return kEOStream;
+ }
+
+ nalu->data = stream_ + start_code_size;
+ nalu->size = nalu_size_with_start_code - start_code_size;
+ DVLOG(4) << "NALU found: size=" << nalu_size_with_start_code;
+
+ // Initialize bit reader at the start of found NALU.
+ if (!br_.Initialize(nalu->data, nalu->size)) {
+ stream_ = nullptr;
+ bytes_left_ = 0;
+ return kEOStream;
+ }
+
+ // Move parser state to after this NALU, so next time AdvanceToNextNALU
+ // is called, we will effectively be skipping it;
+ // other parsing functions will use the position saved
+ // in bit reader for parsing, so we don't have to remember it here.
+ stream_ += nalu_size_with_start_code;
+ bytes_left_ -= nalu_size_with_start_code;
+
+ // Read NALU header, skip the forbidden_zero_bit, but check for it.
+ int data;
+ READ_BITS_OR_RETURN(1, &data);
+ TRUE_OR_RETURN(data == 0);
+
+ READ_BITS_OR_RETURN(2, &nalu->nal_ref_idc);
+ READ_BITS_OR_RETURN(5, &nalu->nal_unit_type);
+
+ DVLOG(4) << "NALU type: " << static_cast<int>(nalu->nal_unit_type)
+ << " at: " << reinterpret_cast<const void*>(nalu->data)
+ << " size: " << nalu->size
+ << " ref: " << static_cast<int>(nalu->nal_ref_idc);
+
+ previous_nalu_range_.clear();
+ previous_nalu_range_.Add(nalu->data, nalu->data + nalu->size);
+ return kOk;
+}
+
+// Default scaling lists (per spec).
+static const int kDefault4x4Intra[kH264ScalingList4x4Length] = {
+ 6, 13, 13, 20, 20, 20, 28, 28, 28, 28, 32, 32, 32, 37, 37, 42,
+};
+
+static const int kDefault4x4Inter[kH264ScalingList4x4Length] = {
+ 10, 14, 14, 20, 20, 20, 24, 24, 24, 24, 27, 27, 27, 30, 30, 34,
+};
+
+static const int kDefault8x8Intra[kH264ScalingList8x8Length] = {
+ 6, 10, 10, 13, 11, 13, 16, 16, 16, 16, 18, 18, 18, 18, 18, 23,
+ 23, 23, 23, 23, 23, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27, 27,
+ 27, 27, 27, 27, 29, 29, 29, 29, 29, 29, 29, 31, 31, 31, 31, 31,
+ 31, 33, 33, 33, 33, 33, 36, 36, 36, 36, 38, 38, 38, 40, 40, 42,
+};
+
+static const int kDefault8x8Inter[kH264ScalingList8x8Length] = {
+ 9, 13, 13, 15, 13, 15, 17, 17, 17, 17, 19, 19, 19, 19, 19, 21,
+ 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 24, 24, 24, 24,
+ 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27, 27, 27,
+ 27, 28, 28, 28, 28, 28, 30, 30, 30, 30, 32, 32, 32, 33, 33, 35,
+};
+
+static inline void DefaultScalingList4x4(
+ int i,
+ int scaling_list4x4[][kH264ScalingList4x4Length]) {
+ DCHECK_LT(i, 6);
+
+ if (i < 3)
+ memcpy(scaling_list4x4[i], kDefault4x4Intra, sizeof(kDefault4x4Intra));
+ else if (i < 6)
+ memcpy(scaling_list4x4[i], kDefault4x4Inter, sizeof(kDefault4x4Inter));
+}
+
+static inline void DefaultScalingList8x8(
+ int i,
+ int scaling_list8x8[][kH264ScalingList8x8Length]) {
+ DCHECK_LT(i, 6);
+
+ if (i % 2 == 0)
+ memcpy(scaling_list8x8[i], kDefault8x8Intra, sizeof(kDefault8x8Intra));
+ else
+ memcpy(scaling_list8x8[i], kDefault8x8Inter, sizeof(kDefault8x8Inter));
+}
+
+static void FallbackScalingList4x4(
+ int i,
+ const int default_scaling_list_intra[],
+ const int default_scaling_list_inter[],
+ int scaling_list4x4[][kH264ScalingList4x4Length]) {
+ static const int kScalingList4x4ByteSize =
+ sizeof(scaling_list4x4[0][0]) * kH264ScalingList4x4Length;
+
+ switch (i) {
+ case 0:
+ memcpy(scaling_list4x4[i], default_scaling_list_intra,
+ kScalingList4x4ByteSize);
+ break;
+
+ case 1:
+ memcpy(scaling_list4x4[i], scaling_list4x4[0], kScalingList4x4ByteSize);
+ break;
+
+ case 2:
+ memcpy(scaling_list4x4[i], scaling_list4x4[1], kScalingList4x4ByteSize);
+ break;
+
+ case 3:
+ memcpy(scaling_list4x4[i], default_scaling_list_inter,
+ kScalingList4x4ByteSize);
+ break;
+
+ case 4:
+ memcpy(scaling_list4x4[i], scaling_list4x4[3], kScalingList4x4ByteSize);
+ break;
+
+ case 5:
+ memcpy(scaling_list4x4[i], scaling_list4x4[4], kScalingList4x4ByteSize);
+ break;
+
+ default:
+ NOTREACHED();
+ break;
+ }
+}
+
+static void FallbackScalingList8x8(
+ int i,
+ const int default_scaling_list_intra[],
+ const int default_scaling_list_inter[],
+ int scaling_list8x8[][kH264ScalingList8x8Length]) {
+ static const int kScalingList8x8ByteSize =
+ sizeof(scaling_list8x8[0][0]) * kH264ScalingList8x8Length;
+
+ switch (i) {
+ case 0:
+ memcpy(scaling_list8x8[i], default_scaling_list_intra,
+ kScalingList8x8ByteSize);
+ break;
+
+ case 1:
+ memcpy(scaling_list8x8[i], default_scaling_list_inter,
+ kScalingList8x8ByteSize);
+ break;
+
+ case 2:
+ memcpy(scaling_list8x8[i], scaling_list8x8[0], kScalingList8x8ByteSize);
+ break;
+
+ case 3:
+ memcpy(scaling_list8x8[i], scaling_list8x8[1], kScalingList8x8ByteSize);
+ break;
+
+ case 4:
+ memcpy(scaling_list8x8[i], scaling_list8x8[2], kScalingList8x8ByteSize);
+ break;
+
+ case 5:
+ memcpy(scaling_list8x8[i], scaling_list8x8[3], kScalingList8x8ByteSize);
+ break;
+
+ default:
+ NOTREACHED();
+ break;
+ }
+}
+
+H264Parser::Result H264Parser::ParseScalingList(int size,
+ int* scaling_list,
+ bool* use_default) {
+ // See chapter 7.3.2.1.1.1.
+ int last_scale = 8;
+ int next_scale = 8;
+ int delta_scale;
+
+ *use_default = false;
+
+ for (int j = 0; j < size; ++j) {
+ if (next_scale != 0) {
+ READ_SE_OR_RETURN(&delta_scale);
+ IN_RANGE_OR_RETURN(delta_scale, -128, 127);
+ next_scale = (last_scale + delta_scale + 256) & 0xff;
+
+ if (j == 0 && next_scale == 0) {
+ *use_default = true;
+ return kOk;
+ }
+ }
+
+ scaling_list[j] = (next_scale == 0) ? last_scale : next_scale;
+ last_scale = scaling_list[j];
+ }
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParseSPSScalingLists(H264SPS* sps) {
+ // See 7.4.2.1.1.
+ bool seq_scaling_list_present_flag;
+ bool use_default;
+ Result res;
+
+ // Parse scaling_list4x4.
+ for (int i = 0; i < 6; ++i) {
+ READ_BOOL_OR_RETURN(&seq_scaling_list_present_flag);
+
+ if (seq_scaling_list_present_flag) {
+ res = ParseScalingList(base::size(sps->scaling_list4x4[i]),
+ sps->scaling_list4x4[i], &use_default);
+ if (res != kOk)
+ return res;
+
+ if (use_default)
+ DefaultScalingList4x4(i, sps->scaling_list4x4);
+
+ } else {
+ FallbackScalingList4x4(i, kDefault4x4Intra, kDefault4x4Inter,
+ sps->scaling_list4x4);
+ }
+ }
+
+ // Parse scaling_list8x8.
+ for (int i = 0; i < ((sps->chroma_format_idc != 3) ? 2 : 6); ++i) {
+ READ_BOOL_OR_RETURN(&seq_scaling_list_present_flag);
+
+ if (seq_scaling_list_present_flag) {
+ res = ParseScalingList(base::size(sps->scaling_list8x8[i]),
+ sps->scaling_list8x8[i], &use_default);
+ if (res != kOk)
+ return res;
+
+ if (use_default)
+ DefaultScalingList8x8(i, sps->scaling_list8x8);
+
+ } else {
+ FallbackScalingList8x8(i, kDefault8x8Intra, kDefault8x8Inter,
+ sps->scaling_list8x8);
+ }
+ }
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParsePPSScalingLists(const H264SPS& sps,
+ H264PPS* pps) {
+ // See 7.4.2.2.
+ bool pic_scaling_list_present_flag;
+ bool use_default;
+ Result res;
+
+ for (int i = 0; i < 6; ++i) {
+ READ_BOOL_OR_RETURN(&pic_scaling_list_present_flag);
+
+ if (pic_scaling_list_present_flag) {
+ res = ParseScalingList(base::size(pps->scaling_list4x4[i]),
+ pps->scaling_list4x4[i], &use_default);
+ if (res != kOk)
+ return res;
+
+ if (use_default)
+ DefaultScalingList4x4(i, pps->scaling_list4x4);
+
+ } else {
+ if (!sps.seq_scaling_matrix_present_flag) {
+ // Table 7-2 fallback rule A in spec.
+ FallbackScalingList4x4(i, kDefault4x4Intra, kDefault4x4Inter,
+ pps->scaling_list4x4);
+ } else {
+ // Table 7-2 fallback rule B in spec.
+ FallbackScalingList4x4(i, sps.scaling_list4x4[0],
+ sps.scaling_list4x4[3], pps->scaling_list4x4);
+ }
+ }
+ }
+
+ if (pps->transform_8x8_mode_flag) {
+ for (int i = 0; i < ((sps.chroma_format_idc != 3) ? 2 : 6); ++i) {
+ READ_BOOL_OR_RETURN(&pic_scaling_list_present_flag);
+
+ if (pic_scaling_list_present_flag) {
+ res = ParseScalingList(base::size(pps->scaling_list8x8[i]),
+ pps->scaling_list8x8[i], &use_default);
+ if (res != kOk)
+ return res;
+
+ if (use_default)
+ DefaultScalingList8x8(i, pps->scaling_list8x8);
+
+ } else {
+ if (!sps.seq_scaling_matrix_present_flag) {
+ // Table 7-2 fallback rule A in spec.
+ FallbackScalingList8x8(i, kDefault8x8Intra, kDefault8x8Inter,
+ pps->scaling_list8x8);
+ } else {
+ // Table 7-2 fallback rule B in spec.
+ FallbackScalingList8x8(i, sps.scaling_list8x8[0],
+ sps.scaling_list8x8[1], pps->scaling_list8x8);
+ }
+ }
+ }
+ }
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParseAndIgnoreHRDParameters(
+ bool* hrd_parameters_present) {
+ int data;
+ READ_BOOL_OR_RETURN(&data); // {nal,vcl}_hrd_parameters_present_flag
+ if (!data)
+ return kOk;
+
+ *hrd_parameters_present = true;
+
+ int cpb_cnt_minus1;
+ READ_UE_OR_RETURN(&cpb_cnt_minus1);
+ IN_RANGE_OR_RETURN(cpb_cnt_minus1, 0, 31);
+ READ_BITS_OR_RETURN(8, &data); // bit_rate_scale, cpb_size_scale
+ for (int i = 0; i <= cpb_cnt_minus1; ++i) {
+ READ_UE_OR_RETURN(&data); // bit_rate_value_minus1[i]
+ READ_UE_OR_RETURN(&data); // cpb_size_value_minus1[i]
+ READ_BOOL_OR_RETURN(&data); // cbr_flag
+ }
+ READ_BITS_OR_RETURN(20, &data); // cpb/dpb delays, etc.
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParseVUIParameters(H264SPS* sps) {
+ bool aspect_ratio_info_present_flag;
+ READ_BOOL_OR_RETURN(&aspect_ratio_info_present_flag);
+ if (aspect_ratio_info_present_flag) {
+ int aspect_ratio_idc;
+ READ_BITS_OR_RETURN(8, &aspect_ratio_idc);
+ if (aspect_ratio_idc == H264SPS::kExtendedSar) {
+ READ_BITS_OR_RETURN(16, &sps->sar_width);
+ READ_BITS_OR_RETURN(16, &sps->sar_height);
+ } else {
+ const int max_aspect_ratio_idc = base::size(kTableSarWidth) - 1;
+ IN_RANGE_OR_RETURN(aspect_ratio_idc, 0, max_aspect_ratio_idc);
+ sps->sar_width = kTableSarWidth[aspect_ratio_idc];
+ sps->sar_height = kTableSarHeight[aspect_ratio_idc];
+ }
+ }
+
+ int data;
+ // Read and ignore overscan and video signal type info.
+ READ_BOOL_OR_RETURN(&data); // overscan_info_present_flag
+ if (data)
+ READ_BOOL_OR_RETURN(&data); // overscan_appropriate_flag
+
+ READ_BOOL_OR_RETURN(&sps->video_signal_type_present_flag);
+ if (sps->video_signal_type_present_flag) {
+ READ_BITS_OR_RETURN(3, &sps->video_format);
+ READ_BOOL_OR_RETURN(&sps->video_full_range_flag);
+ READ_BOOL_OR_RETURN(&sps->colour_description_present_flag);
+ if (sps->colour_description_present_flag) {
+ // color description syntax elements
+ READ_BITS_OR_RETURN(8, &sps->colour_primaries);
+ READ_BITS_OR_RETURN(8, &sps->transfer_characteristics);
+ READ_BITS_OR_RETURN(8, &sps->matrix_coefficients);
+ }
+ }
+
+ READ_BOOL_OR_RETURN(&data); // chroma_loc_info_present_flag
+ if (data) {
+ READ_UE_OR_RETURN(&data); // chroma_sample_loc_type_top_field
+ READ_UE_OR_RETURN(&data); // chroma_sample_loc_type_bottom_field
+ }
+
+ // Read and ignore timing info.
+ READ_BOOL_OR_RETURN(&data); // timing_info_present_flag
+ if (data) {
+ READ_BITS_OR_RETURN(16, &data); // num_units_in_tick
+ READ_BITS_OR_RETURN(16, &data); // num_units_in_tick
+ READ_BITS_OR_RETURN(16, &data); // time_scale
+ READ_BITS_OR_RETURN(16, &data); // time_scale
+ READ_BOOL_OR_RETURN(&data); // fixed_frame_rate_flag
+ }
+
+ // Read and ignore NAL HRD parameters, if present.
+ bool hrd_parameters_present = false;
+ Result res = ParseAndIgnoreHRDParameters(&hrd_parameters_present);
+ if (res != kOk)
+ return res;
+
+ // Read and ignore VCL HRD parameters, if present.
+ res = ParseAndIgnoreHRDParameters(&hrd_parameters_present);
+ if (res != kOk)
+ return res;
+
+ if (hrd_parameters_present) // One of NAL or VCL params present is enough.
+ READ_BOOL_OR_RETURN(&data); // low_delay_hrd_flag
+
+ READ_BOOL_OR_RETURN(&data); // pic_struct_present_flag
+ READ_BOOL_OR_RETURN(&sps->bitstream_restriction_flag);
+ if (sps->bitstream_restriction_flag) {
+ READ_BOOL_OR_RETURN(&data); // motion_vectors_over_pic_boundaries_flag
+ READ_UE_OR_RETURN(&data); // max_bytes_per_pic_denom
+ READ_UE_OR_RETURN(&data); // max_bits_per_mb_denom
+ READ_UE_OR_RETURN(&data); // log2_max_mv_length_horizontal
+ READ_UE_OR_RETURN(&data); // log2_max_mv_length_vertical
+ READ_UE_OR_RETURN(&sps->max_num_reorder_frames);
+ READ_UE_OR_RETURN(&sps->max_dec_frame_buffering);
+ TRUE_OR_RETURN(sps->max_dec_frame_buffering >= sps->max_num_ref_frames);
+ IN_RANGE_OR_RETURN(sps->max_num_reorder_frames, 0,
+ sps->max_dec_frame_buffering);
+ }
+
+ return kOk;
+}
+
+static void FillDefaultSeqScalingLists(H264SPS* sps) {
+ for (int i = 0; i < 6; ++i)
+ for (int j = 0; j < kH264ScalingList4x4Length; ++j)
+ sps->scaling_list4x4[i][j] = 16;
+
+ for (int i = 0; i < 6; ++i)
+ for (int j = 0; j < kH264ScalingList8x8Length; ++j)
+ sps->scaling_list8x8[i][j] = 16;
+}
+
+H264Parser::Result H264Parser::ParseSPS(int* sps_id) {
+ // See 7.4.2.1.
+ int data;
+ Result res;
+
+ *sps_id = -1;
+
+ std::unique_ptr<H264SPS> sps(new H264SPS());
+
+ READ_BITS_OR_RETURN(8, &sps->profile_idc);
+ READ_BOOL_OR_RETURN(&sps->constraint_set0_flag);
+ READ_BOOL_OR_RETURN(&sps->constraint_set1_flag);
+ READ_BOOL_OR_RETURN(&sps->constraint_set2_flag);
+ READ_BOOL_OR_RETURN(&sps->constraint_set3_flag);
+ READ_BOOL_OR_RETURN(&sps->constraint_set4_flag);
+ READ_BOOL_OR_RETURN(&sps->constraint_set5_flag);
+ READ_BITS_OR_RETURN(2, &data); // reserved_zero_2bits
+ READ_BITS_OR_RETURN(8, &sps->level_idc);
+ READ_UE_OR_RETURN(&sps->seq_parameter_set_id);
+ TRUE_OR_RETURN(sps->seq_parameter_set_id < 32);
+
+ if (sps->profile_idc == 100 || sps->profile_idc == 110 ||
+ sps->profile_idc == 122 || sps->profile_idc == 244 ||
+ sps->profile_idc == 44 || sps->profile_idc == 83 ||
+ sps->profile_idc == 86 || sps->profile_idc == 118 ||
+ sps->profile_idc == 128) {
+ READ_UE_OR_RETURN(&sps->chroma_format_idc);
+ TRUE_OR_RETURN(sps->chroma_format_idc < 4);
+
+ if (sps->chroma_format_idc == 3)
+ READ_BOOL_OR_RETURN(&sps->separate_colour_plane_flag);
+
+ READ_UE_OR_RETURN(&sps->bit_depth_luma_minus8);
+ TRUE_OR_RETURN(sps->bit_depth_luma_minus8 < 7);
+
+ READ_UE_OR_RETURN(&sps->bit_depth_chroma_minus8);
+ TRUE_OR_RETURN(sps->bit_depth_chroma_minus8 < 7);
+
+ READ_BOOL_OR_RETURN(&sps->qpprime_y_zero_transform_bypass_flag);
+ READ_BOOL_OR_RETURN(&sps->seq_scaling_matrix_present_flag);
+
+ if (sps->seq_scaling_matrix_present_flag) {
+ DVLOG(4) << "Scaling matrix present";
+ res = ParseSPSScalingLists(sps.get());
+ if (res != kOk)
+ return res;
+ } else {
+ FillDefaultSeqScalingLists(sps.get());
+ }
+ } else {
+ sps->chroma_format_idc = 1;
+ FillDefaultSeqScalingLists(sps.get());
+ }
+
+ if (sps->separate_colour_plane_flag)
+ sps->chroma_array_type = 0;
+ else
+ sps->chroma_array_type = sps->chroma_format_idc;
+
+ READ_UE_OR_RETURN(&sps->log2_max_frame_num_minus4);
+ TRUE_OR_RETURN(sps->log2_max_frame_num_minus4 < 13);
+
+ READ_UE_OR_RETURN(&sps->pic_order_cnt_type);
+ TRUE_OR_RETURN(sps->pic_order_cnt_type < 3);
+
+ if (sps->pic_order_cnt_type == 0) {
+ READ_UE_OR_RETURN(&sps->log2_max_pic_order_cnt_lsb_minus4);
+ TRUE_OR_RETURN(sps->log2_max_pic_order_cnt_lsb_minus4 < 13);
+ sps->expected_delta_per_pic_order_cnt_cycle = 0;
+ } else if (sps->pic_order_cnt_type == 1) {
+ READ_BOOL_OR_RETURN(&sps->delta_pic_order_always_zero_flag);
+ READ_SE_OR_RETURN(&sps->offset_for_non_ref_pic);
+ READ_SE_OR_RETURN(&sps->offset_for_top_to_bottom_field);
+ READ_UE_OR_RETURN(&sps->num_ref_frames_in_pic_order_cnt_cycle);
+ TRUE_OR_RETURN(sps->num_ref_frames_in_pic_order_cnt_cycle < 255);
+
+ base::CheckedNumeric<int> offset_acc = 0;
+ for (int i = 0; i < sps->num_ref_frames_in_pic_order_cnt_cycle; ++i) {
+ READ_SE_OR_RETURN(&sps->offset_for_ref_frame[i]);
+ offset_acc += sps->offset_for_ref_frame[i];
+ }
+ if (!offset_acc.IsValid())
+ return kInvalidStream;
+ sps->expected_delta_per_pic_order_cnt_cycle = offset_acc.ValueOrDefault(0);
+ }
+
+ READ_UE_OR_RETURN(&sps->max_num_ref_frames);
+ READ_BOOL_OR_RETURN(&sps->gaps_in_frame_num_value_allowed_flag);
+
+ READ_UE_OR_RETURN(&sps->pic_width_in_mbs_minus1);
+ READ_UE_OR_RETURN(&sps->pic_height_in_map_units_minus1);
+
+ READ_BOOL_OR_RETURN(&sps->frame_mbs_only_flag);
+ if (!sps->frame_mbs_only_flag)
+ READ_BOOL_OR_RETURN(&sps->mb_adaptive_frame_field_flag);
+
+ READ_BOOL_OR_RETURN(&sps->direct_8x8_inference_flag);
+
+ READ_BOOL_OR_RETURN(&sps->frame_cropping_flag);
+ if (sps->frame_cropping_flag) {
+ READ_UE_OR_RETURN(&sps->frame_crop_left_offset);
+ READ_UE_OR_RETURN(&sps->frame_crop_right_offset);
+ READ_UE_OR_RETURN(&sps->frame_crop_top_offset);
+ READ_UE_OR_RETURN(&sps->frame_crop_bottom_offset);
+ }
+
+ READ_BOOL_OR_RETURN(&sps->vui_parameters_present_flag);
+ if (sps->vui_parameters_present_flag) {
+ DVLOG(4) << "VUI parameters present";
+ res = ParseVUIParameters(sps.get());
+ if (res != kOk)
+ return res;
+ }
+
+ // If an SPS with the same id already exists, replace it.
+ *sps_id = sps->seq_parameter_set_id;
+ active_SPSes_[*sps_id] = std::move(sps);
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParsePPS(int* pps_id) {
+ // See 7.4.2.2.
+ const H264SPS* sps;
+ Result res;
+
+ *pps_id = -1;
+
+ std::unique_ptr<H264PPS> pps(new H264PPS());
+
+ READ_UE_OR_RETURN(&pps->pic_parameter_set_id);
+ READ_UE_OR_RETURN(&pps->seq_parameter_set_id);
+ TRUE_OR_RETURN(pps->seq_parameter_set_id < 32);
+
+ if (active_SPSes_.find(pps->seq_parameter_set_id) == active_SPSes_.end()) {
+ DVLOG(1) << "Invalid stream, no SPS id: " << pps->seq_parameter_set_id;
+ return kInvalidStream;
+ }
+
+ sps = GetSPS(pps->seq_parameter_set_id);
+ TRUE_OR_RETURN(sps);
+
+ READ_BOOL_OR_RETURN(&pps->entropy_coding_mode_flag);
+ READ_BOOL_OR_RETURN(&pps->bottom_field_pic_order_in_frame_present_flag);
+
+ READ_UE_OR_RETURN(&pps->num_slice_groups_minus1);
+ if (pps->num_slice_groups_minus1 > 1) {
+ DVLOG(1) << "Slice groups not supported";
+ return kUnsupportedStream;
+ }
+
+ READ_UE_OR_RETURN(&pps->num_ref_idx_l0_default_active_minus1);
+ TRUE_OR_RETURN(pps->num_ref_idx_l0_default_active_minus1 < 32);
+
+ READ_UE_OR_RETURN(&pps->num_ref_idx_l1_default_active_minus1);
+ TRUE_OR_RETURN(pps->num_ref_idx_l1_default_active_minus1 < 32);
+
+ READ_BOOL_OR_RETURN(&pps->weighted_pred_flag);
+ READ_BITS_OR_RETURN(2, &pps->weighted_bipred_idc);
+ TRUE_OR_RETURN(pps->weighted_bipred_idc < 3);
+
+ READ_SE_OR_RETURN(&pps->pic_init_qp_minus26);
+ IN_RANGE_OR_RETURN(pps->pic_init_qp_minus26, -26, 25);
+
+ READ_SE_OR_RETURN(&pps->pic_init_qs_minus26);
+ IN_RANGE_OR_RETURN(pps->pic_init_qs_minus26, -26, 25);
+
+ READ_SE_OR_RETURN(&pps->chroma_qp_index_offset);
+ IN_RANGE_OR_RETURN(pps->chroma_qp_index_offset, -12, 12);
+ pps->second_chroma_qp_index_offset = pps->chroma_qp_index_offset;
+
+ READ_BOOL_OR_RETURN(&pps->deblocking_filter_control_present_flag);
+ READ_BOOL_OR_RETURN(&pps->constrained_intra_pred_flag);
+ READ_BOOL_OR_RETURN(&pps->redundant_pic_cnt_present_flag);
+
+ if (br_.HasMoreRBSPData()) {
+ READ_BOOL_OR_RETURN(&pps->transform_8x8_mode_flag);
+ READ_BOOL_OR_RETURN(&pps->pic_scaling_matrix_present_flag);
+
+ if (pps->pic_scaling_matrix_present_flag) {
+ DVLOG(4) << "Picture scaling matrix present";
+ res = ParsePPSScalingLists(*sps, pps.get());
+ if (res != kOk)
+ return res;
+ }
+
+ READ_SE_OR_RETURN(&pps->second_chroma_qp_index_offset);
+ }
+
+ // If a PPS with the same id already exists, replace it.
+ *pps_id = pps->pic_parameter_set_id;
+ active_PPSes_[*pps_id] = std::move(pps);
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParseSPSExt(int* sps_id) {
+ // See 7.4.2.1.
+ int local_sps_id = -1;
+
+ *sps_id = -1;
+
+ READ_UE_OR_RETURN(&local_sps_id);
+ TRUE_OR_RETURN(local_sps_id < 32);
+
+ *sps_id = local_sps_id;
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParseRefPicListModification(
+ int num_ref_idx_active_minus1,
+ H264ModificationOfPicNum* ref_list_mods) {
+ H264ModificationOfPicNum* pic_num_mod;
+
+ if (num_ref_idx_active_minus1 >= 32)
+ return kInvalidStream;
+
+ for (int i = 0; i < 32; ++i) {
+ pic_num_mod = &ref_list_mods[i];
+ READ_UE_OR_RETURN(&pic_num_mod->modification_of_pic_nums_idc);
+ TRUE_OR_RETURN(pic_num_mod->modification_of_pic_nums_idc < 4);
+
+ switch (pic_num_mod->modification_of_pic_nums_idc) {
+ case 0:
+ case 1:
+ READ_UE_OR_RETURN(&pic_num_mod->abs_diff_pic_num_minus1);
+ break;
+
+ case 2:
+ READ_UE_OR_RETURN(&pic_num_mod->long_term_pic_num);
+ break;
+
+ case 3:
+ // Per spec, list cannot be empty.
+ if (i == 0)
+ return kInvalidStream;
+ return kOk;
+
+ default:
+ return kInvalidStream;
+ }
+ }
+
+ // If we got here, we didn't get loop end marker prematurely,
+ // so make sure it is there for our client.
+ int modification_of_pic_nums_idc;
+ READ_UE_OR_RETURN(&modification_of_pic_nums_idc);
+ TRUE_OR_RETURN(modification_of_pic_nums_idc == 3);
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParseRefPicListModifications(
+ H264SliceHeader* shdr) {
+ Result res;
+
+ if (!shdr->IsISlice() && !shdr->IsSISlice()) {
+ READ_BOOL_OR_RETURN(&shdr->ref_pic_list_modification_flag_l0);
+ if (shdr->ref_pic_list_modification_flag_l0) {
+ res = ParseRefPicListModification(shdr->num_ref_idx_l0_active_minus1,
+ shdr->ref_list_l0_modifications);
+ if (res != kOk)
+ return res;
+ }
+ }
+
+ if (shdr->IsBSlice()) {
+ READ_BOOL_OR_RETURN(&shdr->ref_pic_list_modification_flag_l1);
+ if (shdr->ref_pic_list_modification_flag_l1) {
+ res = ParseRefPicListModification(shdr->num_ref_idx_l1_active_minus1,
+ shdr->ref_list_l1_modifications);
+ if (res != kOk)
+ return res;
+ }
+ }
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParseWeightingFactors(
+ int num_ref_idx_active_minus1,
+ int chroma_array_type,
+ int luma_log2_weight_denom,
+ int chroma_log2_weight_denom,
+ H264WeightingFactors* w_facts) {
+ int def_luma_weight = 1 << luma_log2_weight_denom;
+ int def_chroma_weight = 1 << chroma_log2_weight_denom;
+
+ for (int i = 0; i < num_ref_idx_active_minus1 + 1; ++i) {
+ READ_BOOL_OR_RETURN(&w_facts->luma_weight_flag);
+ if (w_facts->luma_weight_flag) {
+ READ_SE_OR_RETURN(&w_facts->luma_weight[i]);
+ IN_RANGE_OR_RETURN(w_facts->luma_weight[i], -128, 127);
+
+ READ_SE_OR_RETURN(&w_facts->luma_offset[i]);
+ IN_RANGE_OR_RETURN(w_facts->luma_offset[i], -128, 127);
+ } else {
+ w_facts->luma_weight[i] = def_luma_weight;
+ w_facts->luma_offset[i] = 0;
+ }
+
+ if (chroma_array_type != 0) {
+ READ_BOOL_OR_RETURN(&w_facts->chroma_weight_flag);
+ if (w_facts->chroma_weight_flag) {
+ for (int j = 0; j < 2; ++j) {
+ READ_SE_OR_RETURN(&w_facts->chroma_weight[i][j]);
+ IN_RANGE_OR_RETURN(w_facts->chroma_weight[i][j], -128, 127);
+
+ READ_SE_OR_RETURN(&w_facts->chroma_offset[i][j]);
+ IN_RANGE_OR_RETURN(w_facts->chroma_offset[i][j], -128, 127);
+ }
+ } else {
+ for (int j = 0; j < 2; ++j) {
+ w_facts->chroma_weight[i][j] = def_chroma_weight;
+ w_facts->chroma_offset[i][j] = 0;
+ }
+ }
+ }
+ }
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParsePredWeightTable(const H264SPS& sps,
+ H264SliceHeader* shdr) {
+ READ_UE_OR_RETURN(&shdr->luma_log2_weight_denom);
+ TRUE_OR_RETURN(shdr->luma_log2_weight_denom < 8);
+
+ if (sps.chroma_array_type != 0)
+ READ_UE_OR_RETURN(&shdr->chroma_log2_weight_denom);
+ TRUE_OR_RETURN(shdr->chroma_log2_weight_denom < 8);
+
+ Result res = ParseWeightingFactors(
+ shdr->num_ref_idx_l0_active_minus1, sps.chroma_array_type,
+ shdr->luma_log2_weight_denom, shdr->chroma_log2_weight_denom,
+ &shdr->pred_weight_table_l0);
+ if (res != kOk)
+ return res;
+
+ if (shdr->IsBSlice()) {
+ res = ParseWeightingFactors(
+ shdr->num_ref_idx_l1_active_minus1, sps.chroma_array_type,
+ shdr->luma_log2_weight_denom, shdr->chroma_log2_weight_denom,
+ &shdr->pred_weight_table_l1);
+ if (res != kOk)
+ return res;
+ }
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParseDecRefPicMarking(H264SliceHeader* shdr) {
+ size_t bits_left_at_start = br_.NumBitsLeft();
+
+ if (shdr->idr_pic_flag) {
+ READ_BOOL_OR_RETURN(&shdr->no_output_of_prior_pics_flag);
+ READ_BOOL_OR_RETURN(&shdr->long_term_reference_flag);
+ } else {
+ READ_BOOL_OR_RETURN(&shdr->adaptive_ref_pic_marking_mode_flag);
+
+ H264DecRefPicMarking* marking;
+ if (shdr->adaptive_ref_pic_marking_mode_flag) {
+ size_t i;
+ for (i = 0; i < base::size(shdr->ref_pic_marking); ++i) {
+ marking = &shdr->ref_pic_marking[i];
+
+ READ_UE_OR_RETURN(&marking->memory_mgmnt_control_operation);
+ if (marking->memory_mgmnt_control_operation == 0)
+ break;
+
+ if (marking->memory_mgmnt_control_operation == 1 ||
+ marking->memory_mgmnt_control_operation == 3)
+ READ_UE_OR_RETURN(&marking->difference_of_pic_nums_minus1);
+
+ if (marking->memory_mgmnt_control_operation == 2)
+ READ_UE_OR_RETURN(&marking->long_term_pic_num);
+
+ if (marking->memory_mgmnt_control_operation == 3 ||
+ marking->memory_mgmnt_control_operation == 6)
+ READ_UE_OR_RETURN(&marking->long_term_frame_idx);
+
+ if (marking->memory_mgmnt_control_operation == 4)
+ READ_UE_OR_RETURN(&marking->max_long_term_frame_idx_plus1);
+
+ if (marking->memory_mgmnt_control_operation > 6)
+ return kInvalidStream;
+ }
+
+ if (i == base::size(shdr->ref_pic_marking)) {
+ DVLOG(1) << "Ran out of dec ref pic marking fields";
+ return kUnsupportedStream;
+ }
+ }
+ }
+
+ shdr->dec_ref_pic_marking_bit_size = bits_left_at_start - br_.NumBitsLeft();
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParseSliceHeader(const H264NALU& nalu,
+ H264SliceHeader* shdr) {
+ // See 7.4.3.
+ const H264SPS* sps;
+ const H264PPS* pps;
+ Result res;
+
+ memset(shdr, 0, sizeof(*shdr));
+
+ shdr->idr_pic_flag = (nalu.nal_unit_type == 5);
+ shdr->nal_ref_idc = nalu.nal_ref_idc;
+ shdr->nalu_data = nalu.data;
+ shdr->nalu_size = nalu.size;
+
+ READ_UE_OR_RETURN(&shdr->first_mb_in_slice);
+ READ_UE_OR_RETURN(&shdr->slice_type);
+ TRUE_OR_RETURN(shdr->slice_type < 10);
+
+ READ_UE_OR_RETURN(&shdr->pic_parameter_set_id);
+
+ pps = GetPPS(shdr->pic_parameter_set_id);
+ TRUE_OR_RETURN(pps);
+
+ sps = GetSPS(pps->seq_parameter_set_id);
+ TRUE_OR_RETURN(sps);
+
+ if (sps->separate_colour_plane_flag) {
+ DVLOG(1) << "Interlaced streams not supported";
+ return kUnsupportedStream;
+ }
+
+ READ_BITS_OR_RETURN(sps->log2_max_frame_num_minus4 + 4, &shdr->frame_num);
+ if (!sps->frame_mbs_only_flag) {
+ READ_BOOL_OR_RETURN(&shdr->field_pic_flag);
+ if (shdr->field_pic_flag) {
+ DVLOG(1) << "Interlaced streams not supported";
+ return kUnsupportedStream;
+ }
+ }
+
+ if (shdr->idr_pic_flag)
+ READ_UE_OR_RETURN(&shdr->idr_pic_id);
+
+ size_t bits_left_at_pic_order_cnt_start = br_.NumBitsLeft();
+ if (sps->pic_order_cnt_type == 0) {
+ READ_BITS_OR_RETURN(sps->log2_max_pic_order_cnt_lsb_minus4 + 4,
+ &shdr->pic_order_cnt_lsb);
+ if (pps->bottom_field_pic_order_in_frame_present_flag &&
+ !shdr->field_pic_flag)
+ READ_SE_OR_RETURN(&shdr->delta_pic_order_cnt_bottom);
+ }
+
+ if (sps->pic_order_cnt_type == 1 && !sps->delta_pic_order_always_zero_flag) {
+ READ_SE_OR_RETURN(&shdr->delta_pic_order_cnt0);
+ if (pps->bottom_field_pic_order_in_frame_present_flag &&
+ !shdr->field_pic_flag)
+ READ_SE_OR_RETURN(&shdr->delta_pic_order_cnt1);
+ }
+
+ shdr->pic_order_cnt_bit_size =
+ bits_left_at_pic_order_cnt_start - br_.NumBitsLeft();
+
+ if (pps->redundant_pic_cnt_present_flag) {
+ READ_UE_OR_RETURN(&shdr->redundant_pic_cnt);
+ TRUE_OR_RETURN(shdr->redundant_pic_cnt < 128);
+ }
+
+ if (shdr->IsBSlice())
+ READ_BOOL_OR_RETURN(&shdr->direct_spatial_mv_pred_flag);
+
+ if (shdr->IsPSlice() || shdr->IsSPSlice() || shdr->IsBSlice()) {
+ READ_BOOL_OR_RETURN(&shdr->num_ref_idx_active_override_flag);
+ if (shdr->num_ref_idx_active_override_flag) {
+ READ_UE_OR_RETURN(&shdr->num_ref_idx_l0_active_minus1);
+ if (shdr->IsBSlice())
+ READ_UE_OR_RETURN(&shdr->num_ref_idx_l1_active_minus1);
+ } else {
+ shdr->num_ref_idx_l0_active_minus1 =
+ pps->num_ref_idx_l0_default_active_minus1;
+ if (shdr->IsBSlice()) {
+ shdr->num_ref_idx_l1_active_minus1 =
+ pps->num_ref_idx_l1_default_active_minus1;
+ }
+ }
+ }
+ if (shdr->field_pic_flag) {
+ TRUE_OR_RETURN(shdr->num_ref_idx_l0_active_minus1 < 32);
+ TRUE_OR_RETURN(shdr->num_ref_idx_l1_active_minus1 < 32);
+ } else {
+ TRUE_OR_RETURN(shdr->num_ref_idx_l0_active_minus1 < 16);
+ TRUE_OR_RETURN(shdr->num_ref_idx_l1_active_minus1 < 16);
+ }
+
+ if (nalu.nal_unit_type == H264NALU::kCodedSliceExtension) {
+ return kUnsupportedStream;
+ } else {
+ res = ParseRefPicListModifications(shdr);
+ if (res != kOk)
+ return res;
+ }
+
+ if ((pps->weighted_pred_flag && (shdr->IsPSlice() || shdr->IsSPSlice())) ||
+ (pps->weighted_bipred_idc == 1 && shdr->IsBSlice())) {
+ res = ParsePredWeightTable(*sps, shdr);
+ if (res != kOk)
+ return res;
+ }
+
+ if (nalu.nal_ref_idc != 0) {
+ res = ParseDecRefPicMarking(shdr);
+ if (res != kOk)
+ return res;
+ }
+
+ if (pps->entropy_coding_mode_flag && !shdr->IsISlice() &&
+ !shdr->IsSISlice()) {
+ READ_UE_OR_RETURN(&shdr->cabac_init_idc);
+ TRUE_OR_RETURN(shdr->cabac_init_idc < 3);
+ }
+
+ READ_SE_OR_RETURN(&shdr->slice_qp_delta);
+
+ if (shdr->IsSPSlice() || shdr->IsSISlice()) {
+ if (shdr->IsSPSlice())
+ READ_BOOL_OR_RETURN(&shdr->sp_for_switch_flag);
+ READ_SE_OR_RETURN(&shdr->slice_qs_delta);
+ }
+
+ if (pps->deblocking_filter_control_present_flag) {
+ READ_UE_OR_RETURN(&shdr->disable_deblocking_filter_idc);
+ TRUE_OR_RETURN(shdr->disable_deblocking_filter_idc < 3);
+
+ if (shdr->disable_deblocking_filter_idc != 1) {
+ READ_SE_OR_RETURN(&shdr->slice_alpha_c0_offset_div2);
+ IN_RANGE_OR_RETURN(shdr->slice_alpha_c0_offset_div2, -6, 6);
+
+ READ_SE_OR_RETURN(&shdr->slice_beta_offset_div2);
+ IN_RANGE_OR_RETURN(shdr->slice_beta_offset_div2, -6, 6);
+ }
+ }
+
+ if (pps->num_slice_groups_minus1 > 0) {
+ DVLOG(1) << "Slice groups not supported";
+ return kUnsupportedStream;
+ }
+
+ size_t epb = br_.NumEmulationPreventionBytesRead();
+ shdr->header_bit_size = (shdr->nalu_size - epb) * 8 - br_.NumBitsLeft();
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParseSEI(H264SEIMessage* sei_msg) {
+ int byte;
+
+ memset(sei_msg, 0, sizeof(*sei_msg));
+
+ READ_BITS_OR_RETURN(8, &byte);
+ while (byte == 0xff) {
+ sei_msg->type += 255;
+ READ_BITS_OR_RETURN(8, &byte);
+ }
+ sei_msg->type += byte;
+
+ READ_BITS_OR_RETURN(8, &byte);
+ while (byte == 0xff) {
+ sei_msg->payload_size += 255;
+ READ_BITS_OR_RETURN(8, &byte);
+ }
+ sei_msg->payload_size += byte;
+
+ DVLOG(4) << "Found SEI message type: " << sei_msg->type
+ << " payload size: " << sei_msg->payload_size;
+
+ switch (sei_msg->type) {
+ case H264SEIMessage::kSEIRecoveryPoint:
+ READ_UE_OR_RETURN(&sei_msg->recovery_point.recovery_frame_cnt);
+ READ_BOOL_OR_RETURN(&sei_msg->recovery_point.exact_match_flag);
+ READ_BOOL_OR_RETURN(&sei_msg->recovery_point.broken_link_flag);
+ READ_BITS_OR_RETURN(2, &sei_msg->recovery_point.changing_slice_group_idc);
+ break;
+
+ default:
+ DVLOG(4) << "Unsupported SEI message";
+ break;
+ }
+
+ return kOk;
+}
+
+std::vector<SubsampleEntry> H264Parser::GetCurrentSubsamples() {
+ DCHECK_EQ(previous_nalu_range_.size(), 1u)
+ << "This should only be called after a "
+ "successful call to AdvanceToNextNalu()";
+
+ auto intersection = encrypted_ranges_.IntersectionWith(previous_nalu_range_);
+ return EncryptedRangesToSubsampleEntry(
+ previous_nalu_range_.start(0), previous_nalu_range_.end(0), intersection);
+}
+
+} // namespace media
diff --git a/accel/h264_parser.h b/accel/h264_parser.h
new file mode 100644
index 0000000..9db53e7
--- /dev/null
+++ b/accel/h264_parser.h
@@ -0,0 +1,563 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains an implementation of an H264 Annex-B video stream parser.
+// Note: ported from Chromium commit head: 600904374759
+// Note: GetColorSpace() is not ported.
+
+#ifndef H264_PARSER_H_
+#define H264_PARSER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/optional.h"
+#include "h264_bit_reader.h"
+#include "ranges.h"
+#include "rect.h"
+#include "size.h"
+#include "subsample_entry.h"
+#include "video_codecs.h"
+
+namespace media {
+
+struct SubsampleEntry;
+
+// For explanations of each struct and its members, see H.264 specification
+// at http://www.itu.int/rec/T-REC-H.264.
+struct H264NALU {
+ H264NALU();
+
+ enum Type {
+ kUnspecified = 0,
+ kNonIDRSlice = 1,
+ kSliceDataA = 2,
+ kSliceDataB = 3,
+ kSliceDataC = 4,
+ kIDRSlice = 5,
+ kSEIMessage = 6,
+ kSPS = 7,
+ kPPS = 8,
+ kAUD = 9,
+ kEOSeq = 10,
+ kEOStream = 11,
+ kFiller = 12,
+ kSPSExt = 13,
+ kReserved14 = 14,
+ kReserved15 = 15,
+ kReserved16 = 16,
+ kReserved17 = 17,
+ kReserved18 = 18,
+ kCodedSliceAux = 19,
+ kCodedSliceExtension = 20,
+ };
+
+ // After (without) start code; we don't own the underlying memory
+ // and a shallow copy should be made when copying this struct.
+ const uint8_t* data;
+ off_t size; // From after start code to start code of next NALU (or EOS).
+
+ int nal_ref_idc;
+ int nal_unit_type;
+};
+
+enum {
+ kH264ScalingList4x4Length = 16,
+ kH264ScalingList8x8Length = 64,
+};
+
+struct H264SPS {
+ H264SPS();
+
+ enum H264ProfileIDC {
+ kProfileIDCBaseline = 66,
+ kProfileIDCConstrainedBaseline = kProfileIDCBaseline,
+ kProfileIDCMain = 77,
+ kProfileIDScalableBaseline = 83,
+ kProfileIDScalableHigh = 86,
+ kProfileIDCHigh = 100,
+ kProfileIDHigh10 = 110,
+ kProfileIDSMultiviewHigh = 118,
+ kProfileIDHigh422 = 122,
+ kProfileIDStereoHigh = 128,
+ kProfileIDHigh444Predictive = 244,
+ };
+
+ enum H264LevelIDC : uint8_t {
+ kLevelIDC1p0 = 10,
+ kLevelIDC1B = 9,
+ kLevelIDC1p1 = 11,
+ kLevelIDC1p2 = 12,
+ kLevelIDC1p3 = 13,
+ kLevelIDC2p0 = 20,
+ kLevelIDC2p1 = 21,
+ kLevelIDC2p2 = 22,
+ kLevelIDC3p0 = 30,
+ kLevelIDC3p1 = 31,
+ kLevelIDC3p2 = 32,
+ kLevelIDC4p0 = 40,
+ kLevelIDC4p1 = 41,
+ kLevelIDC4p2 = 42,
+ kLevelIDC5p0 = 50,
+ kLevelIDC5p1 = 51,
+ kLevelIDC5p2 = 52,
+ kLevelIDC6p0 = 60,
+ kLevelIDC6p1 = 61,
+ kLevelIDC6p2 = 62,
+ };
+
+ enum AspectRatioIdc {
+ kExtendedSar = 255,
+ };
+
+ enum {
+ // Constants for HRD parameters (spec ch. E.2.2).
+ kBitRateScaleConstantTerm = 6, // Equation E-37.
+ kCPBSizeScaleConstantTerm = 4, // Equation E-38.
+ kDefaultInitialCPBRemovalDelayLength = 24,
+ kDefaultDPBOutputDelayLength = 24,
+ kDefaultTimeOffsetLength = 24,
+ };
+
+ int profile_idc;
+ bool constraint_set0_flag;
+ bool constraint_set1_flag;
+ bool constraint_set2_flag;
+ bool constraint_set3_flag;
+ bool constraint_set4_flag;
+ bool constraint_set5_flag;
+ int level_idc;
+ int seq_parameter_set_id;
+
+ int chroma_format_idc;
+ bool separate_colour_plane_flag;
+ int bit_depth_luma_minus8;
+ int bit_depth_chroma_minus8;
+ bool qpprime_y_zero_transform_bypass_flag;
+
+ bool seq_scaling_matrix_present_flag;
+ int scaling_list4x4[6][kH264ScalingList4x4Length];
+ int scaling_list8x8[6][kH264ScalingList8x8Length];
+
+ int log2_max_frame_num_minus4;
+ int pic_order_cnt_type;
+ int log2_max_pic_order_cnt_lsb_minus4;
+ bool delta_pic_order_always_zero_flag;
+ int offset_for_non_ref_pic;
+ int offset_for_top_to_bottom_field;
+ int num_ref_frames_in_pic_order_cnt_cycle;
+ int expected_delta_per_pic_order_cnt_cycle; // calculated
+ int offset_for_ref_frame[255];
+ int max_num_ref_frames;
+ bool gaps_in_frame_num_value_allowed_flag;
+ int pic_width_in_mbs_minus1;
+ int pic_height_in_map_units_minus1;
+ bool frame_mbs_only_flag;
+ bool mb_adaptive_frame_field_flag;
+ bool direct_8x8_inference_flag;
+ bool frame_cropping_flag;
+ int frame_crop_left_offset;
+ int frame_crop_right_offset;
+ int frame_crop_top_offset;
+ int frame_crop_bottom_offset;
+
+ bool vui_parameters_present_flag;
+ int sar_width; // Set to 0 when not specified.
+ int sar_height; // Set to 0 when not specified.
+ bool bitstream_restriction_flag;
+ int max_num_reorder_frames;
+ int max_dec_frame_buffering;
+ bool timing_info_present_flag;
+ int num_units_in_tick;
+ int time_scale;
+ bool fixed_frame_rate_flag;
+
+ bool video_signal_type_present_flag;
+ int video_format;
+ bool video_full_range_flag;
+ bool colour_description_present_flag;
+ int colour_primaries;
+ int transfer_characteristics;
+ int matrix_coefficients;
+
+ // TODO(posciak): actually parse these instead of ParseAndIgnoreHRDParameters.
+ bool nal_hrd_parameters_present_flag;
+ int cpb_cnt_minus1;
+ int bit_rate_scale;
+ int cpb_size_scale;
+ int bit_rate_value_minus1[32];
+ int cpb_size_value_minus1[32];
+ bool cbr_flag[32];
+ int initial_cpb_removal_delay_length_minus_1;
+ int cpb_removal_delay_length_minus1;
+ int dpb_output_delay_length_minus1;
+ int time_offset_length;
+
+ bool low_delay_hrd_flag;
+
+ int chroma_array_type;
+
+ // Get corresponding SPS |level_idc| and |constraint_set3_flag| value from
+ // requested |profile| and |level| (see Spec A.3.1).
+ static void GetLevelConfigFromProfileLevel(VideoCodecProfile profile,
+ uint8_t level,
+ int* level_idc,
+ bool* constraint_set3_flag);
+
+ // Helpers to compute frequently-used values. These methods return
+ // base::nullopt if they encounter integer overflow. They do not verify that
+ // the results are in-spec for the given profile or level.
+ base::Optional<Size> GetCodedSize() const;
+ base::Optional<Rect> GetVisibleRect() const;
+
+ // Helper to compute indicated level from parsed SPS data. The value of
+ // indicated level would be included in H264LevelIDC enum representing the
+ // level as in name.
+ uint8_t GetIndicatedLevel() const;
+ // Helper to check if indicated level is lower than or equal to
+ // |target_level|.
+ bool CheckIndicatedLevelWithinTarget(uint8_t target_level) const;
+};
+
+struct H264PPS {
+ H264PPS();
+
+ int pic_parameter_set_id;
+ int seq_parameter_set_id;
+ bool entropy_coding_mode_flag;
+ bool bottom_field_pic_order_in_frame_present_flag;
+ int num_slice_groups_minus1;
+ // TODO(posciak): Slice groups not implemented, could be added at some point.
+ int num_ref_idx_l0_default_active_minus1;
+ int num_ref_idx_l1_default_active_minus1;
+ bool weighted_pred_flag;
+ int weighted_bipred_idc;
+ int pic_init_qp_minus26;
+ int pic_init_qs_minus26;
+ int chroma_qp_index_offset;
+ bool deblocking_filter_control_present_flag;
+ bool constrained_intra_pred_flag;
+ bool redundant_pic_cnt_present_flag;
+ bool transform_8x8_mode_flag;
+
+ bool pic_scaling_matrix_present_flag;
+ int scaling_list4x4[6][kH264ScalingList4x4Length];
+ int scaling_list8x8[6][kH264ScalingList8x8Length];
+
+ int second_chroma_qp_index_offset;
+};
+
+struct H264ModificationOfPicNum {
+ int modification_of_pic_nums_idc;
+ union {
+ int abs_diff_pic_num_minus1;
+ int long_term_pic_num;
+ };
+};
+
+struct H264WeightingFactors {
+ bool luma_weight_flag;
+ bool chroma_weight_flag;
+ int luma_weight[32];
+ int luma_offset[32];
+ int chroma_weight[32][2];
+ int chroma_offset[32][2];
+};
+
+struct H264DecRefPicMarking {
+ int memory_mgmnt_control_operation;
+ int difference_of_pic_nums_minus1;
+ int long_term_pic_num;
+ int long_term_frame_idx;
+ int max_long_term_frame_idx_plus1;
+};
+
+struct H264SliceHeader {
+ H264SliceHeader();
+
+ enum { kRefListSize = 32, kRefListModSize = kRefListSize };
+
+ enum Type {
+ kPSlice = 0,
+ kBSlice = 1,
+ kISlice = 2,
+ kSPSlice = 3,
+ kSISlice = 4,
+ };
+
+ bool IsPSlice() const;
+ bool IsBSlice() const;
+ bool IsISlice() const;
+ bool IsSPSlice() const;
+ bool IsSISlice() const;
+
+ bool idr_pic_flag; // from NAL header
+ int nal_ref_idc; // from NAL header
+ const uint8_t* nalu_data; // from NAL header
+ off_t nalu_size; // from NAL header
+ off_t header_bit_size; // calculated
+
+ int first_mb_in_slice;
+ int slice_type;
+ int pic_parameter_set_id;
+ int colour_plane_id; // TODO(posciak): use this! http://crbug.com/139878
+ int frame_num;
+ bool field_pic_flag;
+ bool bottom_field_flag;
+ int idr_pic_id;
+ int pic_order_cnt_lsb;
+ int delta_pic_order_cnt_bottom;
+ int delta_pic_order_cnt0;
+ int delta_pic_order_cnt1;
+ int redundant_pic_cnt;
+ bool direct_spatial_mv_pred_flag;
+
+ bool num_ref_idx_active_override_flag;
+ int num_ref_idx_l0_active_minus1;
+ int num_ref_idx_l1_active_minus1;
+ bool ref_pic_list_modification_flag_l0;
+ bool ref_pic_list_modification_flag_l1;
+ H264ModificationOfPicNum ref_list_l0_modifications[kRefListModSize];
+ H264ModificationOfPicNum ref_list_l1_modifications[kRefListModSize];
+
+ int luma_log2_weight_denom;
+ int chroma_log2_weight_denom;
+
+ bool luma_weight_l0_flag;
+ bool chroma_weight_l0_flag;
+ H264WeightingFactors pred_weight_table_l0;
+
+ bool luma_weight_l1_flag;
+ bool chroma_weight_l1_flag;
+ H264WeightingFactors pred_weight_table_l1;
+
+ bool no_output_of_prior_pics_flag;
+ bool long_term_reference_flag;
+
+ bool adaptive_ref_pic_marking_mode_flag;
+ H264DecRefPicMarking ref_pic_marking[kRefListSize];
+
+ int cabac_init_idc;
+ int slice_qp_delta;
+ bool sp_for_switch_flag;
+ int slice_qs_delta;
+ int disable_deblocking_filter_idc;
+ int slice_alpha_c0_offset_div2;
+ int slice_beta_offset_div2;
+
+ // Calculated.
+ // Size in bits of dec_ref_pic_marking() syntax element.
+ size_t dec_ref_pic_marking_bit_size;
+ size_t pic_order_cnt_bit_size;
+};
+
+struct H264SEIRecoveryPoint {
+ int recovery_frame_cnt;
+ bool exact_match_flag;
+ bool broken_link_flag;
+ int changing_slice_group_idc;
+};
+
+struct H264SEIMessage {
+ H264SEIMessage();
+
+ enum Type {
+ kSEIRecoveryPoint = 6,
+ };
+
+ int type;
+ int payload_size;
+ union {
+ // Placeholder; in future more supported types will contribute to more
+ // union members here.
+ H264SEIRecoveryPoint recovery_point;
+ };
+};
+
+// Class to parse an Annex-B H.264 stream,
+// as specified in chapters 7 and Annex B of the H.264 spec.
+class H264Parser {
+ public:
+ enum Result {
+ kOk,
+ kInvalidStream, // error in stream
+ kUnsupportedStream, // stream not supported by the parser
+ kEOStream, // end of stream
+ };
+
+ // Find offset from start of data to next NALU start code
+ // and size of found start code (3 or 4 bytes).
+ // If no start code is found, offset is pointing to the first unprocessed byte
+ // (i.e. the first byte that was not considered as a possible start of a start
+ // code) and |*start_code_size| is set to 0.
+ // Preconditions:
+ // - |data_size| >= 0
+ // Postconditions:
+ // - |*offset| is between 0 and |data_size| included.
+ // It is strictly less than |data_size| if |data_size| > 0.
+ // - |*start_code_size| is either 0, 3 or 4.
+ static bool FindStartCode(const uint8_t* data,
+ off_t data_size,
+ off_t* offset,
+ off_t* start_code_size);
+
+ // Wrapper for FindStartCode() that skips over start codes that
+ // may appear inside of |encrypted_ranges_|.
+ // Returns true if a start code was found. Otherwise returns false.
+ static bool FindStartCodeInClearRanges(const uint8_t* data,
+ off_t data_size,
+ const Ranges<const uint8_t*>& ranges,
+ off_t* offset,
+ off_t* start_code_size);
+
+ static VideoCodecProfile ProfileIDCToVideoCodecProfile(int profile_idc);
+
+ // Parses the input stream and returns all the NALUs through |nalus|. Returns
+ // false if the stream is invalid.
+ static bool ParseNALUs(const uint8_t* stream,
+ size_t stream_size,
+ std::vector<H264NALU>* nalus);
+
+ H264Parser();
+ ~H264Parser();
+
+ void Reset();
+ // Set current stream pointer to |stream| of |stream_size| in bytes,
+ // |stream| owned by caller.
+ // |subsamples| contains information about what parts of |stream| are
+ // encrypted.
+ void SetStream(const uint8_t* stream, off_t stream_size);
+ void SetEncryptedStream(const uint8_t* stream,
+ off_t stream_size,
+ const std::vector<SubsampleEntry>& subsamples);
+
+ // Read the stream to find the next NALU, identify it and return
+ // that information in |*nalu|. This advances the stream to the beginning
+ // of this NALU, but not past it, so subsequent calls to NALU-specific
+ // parsing functions (ParseSPS, etc.) will parse this NALU.
+ // If the caller wishes to skip the current NALU, it can call this function
+ // again, instead of any NALU-type specific parse functions below.
+ Result AdvanceToNextNALU(H264NALU* nalu);
+
+ // NALU-specific parsing functions.
+ // These should be called after AdvanceToNextNALU().
+
+ // SPSes and PPSes are owned by the parser class and the memory for their
+ // structures is managed here, not by the caller, as they are reused
+ // across NALUs.
+ //
+ // Parse an SPS/PPS NALU and save their data in the parser, returning id
+ // of the parsed structure in |*pps_id|/|*sps_id|.
+ // To get a pointer to a given SPS/PPS structure, use GetSPS()/GetPPS(),
+ // passing the returned |*sps_id|/|*pps_id| as parameter.
+ // TODO(posciak,fischman): consider replacing returning Result from Parse*()
+ // methods with a scoped_ptr and adding an AtEOS() function to check for EOS
+ // if Parse*() return NULL.
+ Result ParseSPS(int* sps_id);
+ Result ParsePPS(int* pps_id);
+
+ // Parses the SPS ID from the SPSExt, but otherwise does nothing.
+ Result ParseSPSExt(int* sps_id);
+
+ // Return a pointer to SPS/PPS with given |sps_id|/|pps_id| or NULL if not
+ // present.
+ const H264SPS* GetSPS(int sps_id) const;
+ const H264PPS* GetPPS(int pps_id) const;
+
+ // Slice headers and SEI messages are not used across NALUs by the parser
+ // and can be discarded after current NALU, so the parser does not store
+ // them, nor does it manage their memory.
+ // The caller has to provide and manage it instead.
+
+ // Parse a slice header, returning it in |*shdr|. |*nalu| must be set to
+ // the NALU returned from AdvanceToNextNALU() and corresponding to |*shdr|.
+ Result ParseSliceHeader(const H264NALU& nalu, H264SliceHeader* shdr);
+
+ // Parse a SEI message, returning it in |*sei_msg|, provided and managed
+ // by the caller.
+ Result ParseSEI(H264SEIMessage* sei_msg);
+
+ // The return value of this method changes for every successful call to
+ // AdvanceToNextNALU().
+ // This returns the subsample information for the last NALU that was output
+ // from AdvanceToNextNALU().
+ std::vector<SubsampleEntry> GetCurrentSubsamples();
+
+ private:
+ // Move the stream pointer to the beginning of the next NALU,
+ // i.e. pointing at the next start code.
+ // Return true if a NALU has been found.
+ // If a NALU is found:
+ // - its size in bytes is returned in |*nalu_size| and includes
+ // the start code as well as the trailing zero bits.
+ // - the size in bytes of the start code is returned in |*start_code_size|.
+ bool LocateNALU(off_t* nalu_size, off_t* start_code_size);
+
+ // Exp-Golomb code parsing as specified in chapter 9.1 of the spec.
+ // Read one unsigned exp-Golomb code from the stream and return in |*val|.
+ Result ReadUE(int* val);
+
+ // Read one signed exp-Golomb code from the stream and return in |*val|.
+ Result ReadSE(int* val);
+
+ // Parse scaling lists (see spec).
+ Result ParseScalingList(int size, int* scaling_list, bool* use_default);
+ Result ParseSPSScalingLists(H264SPS* sps);
+ Result ParsePPSScalingLists(const H264SPS& sps, H264PPS* pps);
+
+ // Parse optional VUI parameters in SPS (see spec).
+ Result ParseVUIParameters(H264SPS* sps);
+ // Set |hrd_parameters_present| to true only if they are present.
+ Result ParseAndIgnoreHRDParameters(bool* hrd_parameters_present);
+
+ // Parse reference picture lists' modifications (see spec).
+ Result ParseRefPicListModifications(H264SliceHeader* shdr);
+ Result ParseRefPicListModification(int num_ref_idx_active_minus1,
+ H264ModificationOfPicNum* ref_list_mods);
+
+ // Parse prediction weight table (see spec).
+ Result ParsePredWeightTable(const H264SPS& sps, H264SliceHeader* shdr);
+
+ // Parse weighting factors (see spec).
+ Result ParseWeightingFactors(int num_ref_idx_active_minus1,
+ int chroma_array_type,
+ int luma_log2_weight_denom,
+ int chroma_log2_weight_denom,
+ H264WeightingFactors* w_facts);
+
+ // Parse decoded reference picture marking information (see spec).
+ Result ParseDecRefPicMarking(H264SliceHeader* shdr);
+
+ // Pointer to the current NALU in the stream.
+ const uint8_t* stream_;
+
+ // Bytes left in the stream after the current NALU.
+ off_t bytes_left_;
+
+ H264BitReader br_;
+
+ // PPSes and SPSes stored for future reference.
+ std::map<int, std::unique_ptr<H264SPS>> active_SPSes_;
+ std::map<int, std::unique_ptr<H264PPS>> active_PPSes_;
+
+ // Ranges of encrypted bytes in the buffer passed to
+ // SetEncryptedStream().
+ Ranges<const uint8_t*> encrypted_ranges_;
+
+ // This contains the range of the previous NALU found in
+ // AdvanceToNextNalu(). Holds exactly one range.
+ Ranges<const uint8_t*> previous_nalu_range_;
+
+ DISALLOW_COPY_AND_ASSIGN(H264Parser);
+};
+
+} // namespace media
+
+#endif // H264_PARSER_H_
diff --git a/accel/macros.h b/accel/macros.h
new file mode 100644
index 0000000..22c3ad6
--- /dev/null
+++ b/accel/macros.h
@@ -0,0 +1,14 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MACROS_H_
+#define MACROS_H_
+
+#include "base/logging.h"
+
+#define DVLOGF(level) DVLOG(level) << __func__ << "(): "
+#define VLOGF(level) VLOG(level) << __func__ << "(): "
+#define VPLOGF(level) VPLOG(level) << __func__ << "(): "
+
+#endif // MACROS_H_
diff --git a/accel/media_limits.h b/accel/media_limits.h
new file mode 100644
index 0000000..97f3ab3
--- /dev/null
+++ b/accel/media_limits.h
@@ -0,0 +1,91 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: aa8dd1475415
+// Note: Changed name from "limits.h" to "media_limits.h" to prevent the
+// ambiguity with C library <limits.h>
+
+// Contains limit definition constants for the media subsystem.
+
+#ifndef MEDIA_LIMITS_H_
+#define MEDIA_LIMITS_H_
+
+namespace media {
+
+namespace limits {
+
+enum {
+ // Maximum possible dimension (width or height) for any video.
+ kMaxDimension = (1 << 15) - 1, // 32767
+
+ // Maximum possible canvas size (width multiplied by height) for any video.
+ kMaxCanvas = (1 << (14 * 2)), // 16384 x 16384
+
+ // Total number of video frames which are populating in the pipeline.
+ kMaxVideoFrames = 4,
+
+ // The following limits are used by AudioParameters::IsValid().
+ //
+ // A few notes on sample rates of common formats:
+ // - AAC files are limited to 96 kHz.
+ // - MP3 files are limited to 48 kHz.
+ // - Vorbis used to be limited to 96 kHz, but no longer has that
+ // restriction.
+ // - Most PC audio hardware is limited to 192 kHz, some specialized DAC
+ // devices will use 384 kHz though.
+ kMaxSampleRate = 384000,
+ kMinSampleRate = 3000,
+ kMaxChannels = 32,
+ kMaxBytesPerSample = 4,
+ kMaxBitsPerSample = kMaxBytesPerSample * 8,
+ kMaxSamplesPerPacket = kMaxSampleRate,
+ kMaxPacketSizeInBytes =
+ kMaxBytesPerSample * kMaxChannels * kMaxSamplesPerPacket,
+
+ // This limit is used by ParamTraits<VideoCaptureParams>.
+ kMaxFramesPerSecond = 1000,
+
+ // The minimum elapsed amount of time (in seconds) for a playback to be
+ // considered as having active engagement.
+ kMinimumElapsedWatchTimeSecs = 7,
+
+ // Maximum lengths for various EME API parameters. These are checks to
+ // prevent unnecessarily large parameters from being passed around, and the
+ // lengths are somewhat arbitrary as the EME spec doesn't specify any limits.
+ kMinCertificateLength = 128,
+ kMaxCertificateLength = 16 * 1024,
+ kMaxSessionIdLength = 512,
+ kMinKeyIdLength = 1,
+ kMaxKeyIdLength = 512,
+ kMaxKeyIds = 128,
+ kMaxInitDataLength = 64 * 1024, // 64 KB
+ kMaxSessionResponseLength = 64 * 1024, // 64 KB
+ kMaxKeySystemLength = 256,
+
+// Minimum and maximum buffer sizes for certain audio platforms.
+#if defined(OS_MACOSX)
+ kMinAudioBufferSize = 128,
+ kMaxAudioBufferSize = 4096,
+#elif defined(USE_CRAS)
+ // Though CRAS has different per-board defaults, allow explicitly requesting
+ // this buffer size on any board.
+ kMinAudioBufferSize = 256,
+ kMaxAudioBufferSize = 8192,
+#endif
+
+ // Maximum buffer size supported by Web Audio.
+ kMaxWebAudioBufferSize = 8192,
+
+ // Bounds for the number of threads used for software video decoding.
+ kMinVideoDecodeThreads = 2,
+ kMaxVideoDecodeThreads =
+ 16, // Matches ffmpeg's MAX_AUTO_THREADS. Higher values can result in
+ // immediate out of memory errors for high resolution content. See
+ // https://crbug.com/893984
+};
+
+} // namespace limits
+
+} // namespace media
+
+#endif // MEDIA_LIMITS_H_
diff --git a/accel/native_pixmap_handle.cc b/accel/native_pixmap_handle.cc
new file mode 100644
index 0000000..050a683
--- /dev/null
+++ b/accel/native_pixmap_handle.cc
@@ -0,0 +1,29 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: a9d98e6
+
+#include "native_pixmap_handle.h"
+
+namespace media {
+
+NativePixmapPlane::NativePixmapPlane()
+ : stride(0), offset(0), size(0), modifier(0) {}
+
+NativePixmapPlane::NativePixmapPlane(int stride,
+ int offset,
+ uint64_t size,
+ uint64_t modifier)
+ : stride(stride), offset(offset), size(size), modifier(modifier) {}
+
+NativePixmapPlane::NativePixmapPlane(const NativePixmapPlane& other) = default;
+
+NativePixmapPlane::~NativePixmapPlane() {}
+
+NativePixmapHandle::NativePixmapHandle() {}
+NativePixmapHandle::NativePixmapHandle(const NativePixmapHandle& other) =
+ default;
+
+NativePixmapHandle::~NativePixmapHandle() {}
+
+} // namespace media
diff --git a/accel/native_pixmap_handle.h b/accel/native_pixmap_handle.h
new file mode 100644
index 0000000..62e2294
--- /dev/null
+++ b/accel/native_pixmap_handle.h
@@ -0,0 +1,57 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: a9d98e6
+
+#ifndef NATIVE_PIXMAP_HANDLE_H_
+#define NATIVE_PIXMAP_HANDLE_H_
+
+#include <vector>
+
+#include "base/file_descriptor_posix.h"
+
+namespace media {
+
+// NativePixmapPlane is used to carry the plane related information for GBM
+// buffer. More fields can be added if they are plane specific.
+struct NativePixmapPlane {
+ // This is the same value as DRM_FORMAT_MOD_INVALID, which is not a valid
+ // modifier. We use this to indicate that layout information
+ // (tiling/compression) if any will be communicated out of band.
+ static constexpr uint64_t kNoModifier = 0x00ffffffffffffffULL;
+
+ NativePixmapPlane();
+ NativePixmapPlane(int stride,
+ int offset,
+ uint64_t size,
+ uint64_t modifier = kNoModifier);
+ NativePixmapPlane(const NativePixmapPlane& other);
+ ~NativePixmapPlane();
+
+ // The strides and offsets in bytes to be used when accessing the buffers via
+ // a memory mapping. One per plane per entry.
+ int stride;
+ int offset;
+ // Size in bytes of the plane.
+ // This is necessary to map the buffers.
+ uint64_t size;
+ // The modifier is retrieved from GBM library and passed to EGL driver.
+ // Generally it's platform specific, and we don't need to modify it in
+ // Chromium code. Also one per plane per entry.
+ uint64_t modifier;
+};
+
+struct NativePixmapHandle {
+ NativePixmapHandle();
+ NativePixmapHandle(const NativePixmapHandle& other);
+
+ ~NativePixmapHandle();
+
+ // File descriptors for the underlying memory objects (usually dmabufs).
+ std::vector<base::FileDescriptor> fds;
+ std::vector<NativePixmapPlane> planes;
+};
+
+} // namespace media
+
+#endif // NATIVE_PIXMAP_HANDLE_H_
diff --git a/accel/picture.cc b/accel/picture.cc
new file mode 100644
index 0000000..8933bc5
--- /dev/null
+++ b/accel/picture.cc
@@ -0,0 +1,37 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
+
+#include "picture.h"
+
+namespace media {
+
+PictureBuffer::PictureBuffer(int32_t id, const Size& size)
+ : id_(id), size_(size) {}
+
+PictureBuffer::PictureBuffer(int32_t id,
+ const Size& size,
+ VideoPixelFormat pixel_format)
+ : id_(id),
+ size_(size),
+ pixel_format_(pixel_format) {}
+
+PictureBuffer::PictureBuffer(const PictureBuffer& other) = default;
+
+PictureBuffer::~PictureBuffer() = default;
+
+Picture::Picture(int32_t picture_buffer_id,
+ int32_t bitstream_buffer_id,
+ const Rect& visible_rect,
+ bool allow_overlay)
+ : picture_buffer_id_(picture_buffer_id),
+ bitstream_buffer_id_(bitstream_buffer_id),
+ visible_rect_(visible_rect),
+ allow_overlay_(allow_overlay) {}
+
+Picture::Picture(const Picture& other) = default;
+
+Picture::~Picture() = default;
+
+} // namespace media
diff --git a/accel/picture.h b/accel/picture.h
new file mode 100644
index 0000000..e07b677
--- /dev/null
+++ b/accel/picture.h
@@ -0,0 +1,81 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: d264e47
+
+#ifndef PICTURE_H_
+#define PICTURE_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "rect.h"
+#include "size.h"
+#include "video_pixel_format.h"
+
+namespace media {
+
+// A picture buffer that has size and pixel format information.
+class PictureBuffer {
+ public:
+ PictureBuffer(int32_t id, const Size& size);
+ PictureBuffer(int32_t id,
+ const Size& size,
+ VideoPixelFormat pixel_format);
+ PictureBuffer(const PictureBuffer& other);
+ ~PictureBuffer();
+
+ // Returns the client-specified id of the buffer.
+ int32_t id() const { return id_; }
+
+ // Returns the size of the buffer.
+ Size size() const { return size_; }
+
+ void set_size(const Size& size) { size_ = size; }
+
+ VideoPixelFormat pixel_format() const { return pixel_format_; }
+
+ private:
+ int32_t id_;
+ Size size_;
+ VideoPixelFormat pixel_format_ = PIXEL_FORMAT_UNKNOWN;
+};
+
+// A decoded picture frame.
+class Picture {
+ public:
+ Picture(int32_t picture_buffer_id,
+ int32_t bitstream_buffer_id,
+ const Rect& visible_rect,
+ bool allow_overlay);
+ Picture(const Picture&);
+ ~Picture();
+
+ // Returns the id of the picture buffer where this picture is contained.
+ int32_t picture_buffer_id() const { return picture_buffer_id_; }
+
+ // Returns the id of the bitstream buffer from which this frame was decoded.
+ int32_t bitstream_buffer_id() const { return bitstream_buffer_id_; }
+
+ void set_bitstream_buffer_id(int32_t bitstream_buffer_id) {
+ bitstream_buffer_id_ = bitstream_buffer_id;
+ }
+
+ // Returns the visible rectangle of the picture. Its size may be smaller
+ // than the size of the PictureBuffer, as it is the only visible part of the
+ // Picture contained in the PictureBuffer.
+ Rect visible_rect() const { return visible_rect_; }
+
+ bool allow_overlay() const { return allow_overlay_; }
+
+ private:
+ int32_t picture_buffer_id_;
+ int32_t bitstream_buffer_id_;
+ Rect visible_rect_;
+ bool allow_overlay_;
+};
+
+} // namespace media
+
+#endif // PICTURE_H_
diff --git a/accel/ranges.cc b/accel/ranges.cc
new file mode 100644
index 0000000..4394011
--- /dev/null
+++ b/accel/ranges.cc
@@ -0,0 +1,16 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: a4f94d3
+
+#include "ranges.h"
+
+namespace media {
+
+template<>
+void Ranges<base::TimeDelta>::DCheckLT(const base::TimeDelta& lhs,
+ const base::TimeDelta& rhs) const {
+ DCHECK(lhs < rhs) << lhs.ToInternalValue() << " < " << rhs.ToInternalValue();
+}
+
+} // namespace media
diff --git a/accel/ranges.h b/accel/ranges.h
new file mode 100644
index 0000000..6a76ae4
--- /dev/null
+++ b/accel/ranges.h
@@ -0,0 +1,163 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 1323b9c
+
+#ifndef RANGES_H_
+#define RANGES_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <ostream>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/time/time.h"
+
+namespace media {
+
+// Ranges allows holding an ordered list of ranges of [start,end) intervals.
+// The canonical example use-case is holding the list of ranges of buffered
+// bytes or times in a <video> tag.
+template <class T> // Endpoint type; typically a base::TimeDelta or an int64_t.
+class Ranges {
+ public:
+ // Allow copy & assign.
+
+ // Add (start,end) to this object, coallescing overlaps as appropriate.
+ // Returns the number of stored ranges, post coallescing.
+ size_t Add(T start, T end);
+
+ // Return the number of disjoint ranges.
+ size_t size() const;
+
+ // Return the "i"'th range's start & end (0-based).
+ T start(size_t i) const;
+ T end(size_t i) const;
+
+ // Clear all ranges.
+ void clear();
+
+ // Computes the intersection between this range and |other|.
+ Ranges<T> IntersectionWith(const Ranges<T>& other) const;
+
+ private:
+ // Wrapper around DCHECK_LT allowing comparisons of operator<<'able T's.
+ void DCheckLT(const T& lhs, const T& rhs) const;
+
+ // Disjoint, in increasing order of start.
+ std::vector<std::pair<T, T> > ranges_;
+};
+
+//////////////////////////////////////////////////////////////////////
+// EVERYTHING BELOW HERE IS IMPLEMENTATION DETAIL!!
+//////////////////////////////////////////////////////////////////////
+
+template<class T>
+size_t Ranges<T>::Add(T start, T end) {
+ if (start == end) // Nothing to be done with empty ranges.
+ return ranges_.size();
+
+ DCheckLT(start, end);
+ size_t i;
+ // Walk along the array of ranges until |start| is no longer larger than the
+ // current interval's end.
+ for (i = 0; i < ranges_.size() && ranges_[i].second < start; ++i) {
+ // Empty body
+ }
+
+ // Now we know |start| belongs in the i'th slot.
+ // If i is the end of the range, append new range and done.
+ if (i == ranges_.size()) {
+ ranges_.push_back(std::make_pair(start, end));
+ return ranges_.size();
+ }
+
+ // If |end| is less than i->first, then [start,end) is a new (non-overlapping)
+ // i'th entry pushing everyone else back, and done.
+ if (end < ranges_[i].first) {
+ ranges_.insert(ranges_.begin() + i, std::make_pair(start, end));
+ return ranges_.size();
+ }
+
+ // Easy cases done. Getting here means there is overlap between [start,end)
+ // and the existing ranges.
+
+ // Now: start <= i->second && i->first <= end
+ if (start < ranges_[i].first)
+ ranges_[i].first = start;
+ if (ranges_[i].second < end)
+ ranges_[i].second = end;
+
+ // Now: [start,end) is contained in the i'th range, and we'd be done, except
+ // for the fact that the newly-extended i'th range might now overlap
+ // subsequent ranges. Merge until discontinuities appear. Note that there's
+ // no need to test/merge previous ranges, since needing that would mean the
+ // original loop went too far.
+ while ((i + 1) < ranges_.size() &&
+ ranges_[i + 1].first <= ranges_[i].second) {
+ ranges_[i].second = std::max(ranges_[i].second, ranges_[i + 1].second);
+ ranges_.erase(ranges_.begin() + i + 1);
+ }
+
+ return ranges_.size();
+}
+
+template<>
+void Ranges<base::TimeDelta>::DCheckLT(const base::TimeDelta& lhs,
+ const base::TimeDelta& rhs) const;
+
+template<class T>
+void Ranges<T>::DCheckLT(const T& lhs, const T& rhs) const {
+ DCHECK_LT(lhs, rhs);
+}
+
+template<class T>
+size_t Ranges<T>::size() const {
+ return ranges_.size();
+}
+
+template<class T>
+T Ranges<T>::start(size_t i) const {
+ return ranges_[i].first;
+}
+
+template<class T>
+T Ranges<T>::end(size_t i) const {
+ return ranges_[i].second;
+}
+
+template<class T>
+void Ranges<T>::clear() {
+ ranges_.clear();
+}
+
+template<class T>
+Ranges<T> Ranges<T>::IntersectionWith(const Ranges<T>& other) const {
+ Ranges<T> result;
+
+ size_t i = 0;
+ size_t j = 0;
+
+ while (i < size() && j < other.size()) {
+ T max_start = std::max(start(i), other.start(j));
+ T min_end = std::min(end(i), other.end(j));
+
+ // Add an intersection range to the result if the ranges overlap.
+ if (max_start < min_end)
+ result.Add(max_start, min_end);
+
+ if (end(i) < other.end(j))
+ ++i;
+ else
+ ++j;
+ }
+
+ return result;
+}
+
+} // namespace media
+
+#endif // RANGES_H_
diff --git a/accel/rect.h b/accel/rect.h
new file mode 100644
index 0000000..b98522d
--- /dev/null
+++ b/accel/rect.h
@@ -0,0 +1,148 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 8a796386c11a
+// Note: only necessary functions are ported from gfx::Rect
+
+// Defines a simple integer rectangle class. The containment semantics
+// are array-like; that is, the coordinate (x, y) is considered to be
+// contained by the rectangle, but the coordinate (x + width, y) is not.
+// The class will happily let you create malformed rectangles (that is,
+// rectangles with negative width and/or height), but there will be assertions
+// in the operations (such as Contains()) to complain in this case.
+
+#ifndef RECT_H_
+#define RECT_H_
+
+#include <string>
+
+#include "base/strings/stringprintf.h"
+#include "size.h"
+
+namespace media {
+
+// Helper struct for rect to replace gfx::Rect usage from original code.
+// Only partial functions of gfx::Rect is implemented here.
+class Rect {
+ public:
+ constexpr Rect() = default;
+ constexpr Rect(int width, int height) : size_(width, height) {}
+ constexpr Rect(int x, int y, int width, int height)
+ : x_(x),
+ y_(y),
+ size_(GetClampedValue(x, width), GetClampedValue(y, height)) {}
+ constexpr explicit Rect(const Size& size) : size_(size) {}
+
+ constexpr int x() const { return x_; }
+ // Sets the X position while preserving the width.
+ void set_x(int x) {
+ x_ = x;
+ size_.set_width(GetClampedValue(x, width()));
+ }
+
+ constexpr int y() const { return y_; }
+ // Sets the Y position while preserving the height.
+ void set_y(int y) {
+ y_ = y;
+ size_.set_height(GetClampedValue(y, height()));
+ }
+
+ constexpr int width() const { return size_.width(); }
+ void set_width(int width) { size_.set_width(GetClampedValue(x(), width)); }
+
+ constexpr int height() const { return size_.height(); }
+ void set_height(int height) {
+ size_.set_height(GetClampedValue(y(), height));
+ }
+
+ constexpr const Size& size() const { return size_; }
+ void set_size(const Size& size) {
+ set_width(size.width());
+ set_height(size.height());
+ }
+
+ constexpr int right() const { return x() + width(); }
+ constexpr int bottom() const { return y() + height(); }
+
+ void SetRect(int x, int y, int width, int height) {
+ set_x(x);
+ set_y(y);
+ // Ensure that width and height remain valid.
+ set_width(width);
+ set_height(height);
+ }
+
+ // Returns true if the area of the rectangle is zero.
+ bool IsEmpty() const { return size_.IsEmpty(); }
+
+ // Returns true if this rectangle contains the specified rectangle.
+ bool Contains(const Rect& rect) const {
+ return (rect.x() >= x() && rect.right() <= right() && rect.y() >= y() &&
+ rect.bottom() <= bottom());
+ }
+
+ // Computes the intersection of this rectangle with the given rectangle.
+ void Intersect(const Rect& rect) {
+ if (IsEmpty() || rect.IsEmpty()) {
+ SetRect(0, 0, 0, 0); // Throws away empty position.
+ return;
+ }
+
+ int left = std::max(x(), rect.x());
+ int top = std::max(y(), rect.y());
+ int new_right = std::min(right(), rect.right());
+ int new_bottom = std::min(bottom(), rect.bottom());
+
+ if (left >= new_right || top >= new_bottom) {
+ SetRect(0, 0, 0, 0); // Throws away empty position.
+ return;
+ }
+
+ SetRect(left, top, new_right - left, new_bottom - top);
+ }
+
+ std::string ToString() const {
+ return base::StringPrintf("(%d,%d) %s",
+ x_, y_, size().ToString().c_str());
+ }
+
+ private:
+ int x_ = 0;
+ int y_ = 0;
+ Size size_;
+
+ // Returns true iff a+b would overflow max int.
+ static constexpr bool AddWouldOverflow(int a, int b) {
+ // In this function, GCC tries to make optimizations that would only work if
+ // max - a wouldn't overflow but it isn't smart enough to notice that a > 0.
+ // So cast everything to unsigned to avoid this. As it is guaranteed that
+ // max - a and b are both already positive, the cast is a noop.
+ //
+ // This is intended to be: a > 0 && max - a < b
+ return a > 0 && b > 0 &&
+ static_cast<unsigned>(std::numeric_limits<int>::max() - a) <
+ static_cast<unsigned>(b);
+ }
+
+ // Clamp the size to avoid integer overflow in bottom() and right().
+ // This returns the width given an origin and a width.
+ // TODO(enne): this should probably use base::ClampAdd, but that
+ // function is not a constexpr.
+ static constexpr int GetClampedValue(int origin, int size) {
+ return AddWouldOverflow(origin, size)
+ ? std::numeric_limits<int>::max() - origin
+ : size;
+ }
+};
+
+inline bool operator==(const Rect& lhs, const Rect& rhs) {
+ return lhs.x() == rhs.x() && lhs.y() == rhs.y() && lhs.size() == rhs.size();
+}
+
+inline bool operator!=(const Rect& lhs, const Rect& rhs) {
+ return !(lhs == rhs);
+}
+
+} // namespace media
+
+#endif // RECT_H_
diff --git a/accel/shared_memory_region.cc b/accel/shared_memory_region.cc
new file mode 100644
index 0000000..775a5f2
--- /dev/null
+++ b/accel/shared_memory_region.cc
@@ -0,0 +1,42 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 60f9667
+
+#include "base/sys_info.h"
+#include "shared_memory_region.h"
+
+namespace media {
+
+SharedMemoryRegion::SharedMemoryRegion(const base::SharedMemoryHandle& handle,
+ off_t offset,
+ size_t size,
+ bool read_only)
+ : shm_(handle, read_only),
+ offset_(offset),
+ size_(size),
+ alignment_size_(offset % base::SysInfo::VMAllocationGranularity()) {
+ DCHECK_GE(offset_, 0) << "Invalid offset: " << offset_;
+}
+
+SharedMemoryRegion::SharedMemoryRegion(const BitstreamBuffer& bitstream_buffer,
+ bool read_only)
+ : SharedMemoryRegion(bitstream_buffer.handle(),
+ bitstream_buffer.offset(),
+ bitstream_buffer.size(),
+ read_only) {}
+
+bool SharedMemoryRegion::Map() {
+ if (offset_ < 0) {
+ DVLOG(1) << "Invalid offset: " << offset_;
+ return false;
+ }
+ return shm_.MapAt(offset_ - alignment_size_, size_ + alignment_size_);
+}
+
+void* SharedMemoryRegion::memory() {
+ int8_t* addr = reinterpret_cast<int8_t*>(shm_.memory());
+ return addr ? addr + alignment_size_ : nullptr;
+}
+
+} // namespace media
diff --git a/accel/shared_memory_region.h b/accel/shared_memory_region.h
new file mode 100644
index 0000000..3c5d4b3
--- /dev/null
+++ b/accel/shared_memory_region.h
@@ -0,0 +1,57 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 60f9667
+
+#ifndef SHARED_MEMORY_REGION_H_
+#define SHARED_MEMORY_REGION_H_
+
+#include "base/memory/shared_memory.h"
+#include "bitstream_buffer.h"
+
+namespace media {
+
+// Helper class to access a region of a SharedMemory. Different from
+// SharedMemory, in which the |offset| of function MapAt() must be aligned to
+// the value of |SysInfo::VMAllocationGranularity()|, the |offset| of a
+// SharedMemoryRegion needs not to be aligned, this class hides the details
+// and returns the mapped address of the given offset.
+class SharedMemoryRegion {
+ public:
+ // Creates a SharedMemoryRegion.
+ // The mapped memory region begins at |offset| bytes from the start of the
+ // shared memory and the length is |size|. It will take the ownership of
+ // the |handle| and release the resource when being destroyed. Different
+ // from SharedMemory, the |offset| needs not to be aligned to the value of
+ // |SysInfo::VMAllocationGranularity()|.
+ SharedMemoryRegion(const base::SharedMemoryHandle& handle,
+ off_t offset,
+ size_t size,
+ bool read_only);
+
+ // Creates a SharedMemoryRegion from the given |bistream_buffer|.
+ SharedMemoryRegion(const BitstreamBuffer& bitstream_buffer, bool read_only);
+
+ // Maps the shared memory into the caller's address space.
+ // Return true on success, false otherwise.
+ bool Map();
+
+ // Gets a pointer to the mapped region if it has been mapped via Map().
+ // Returns |nullptr| if it is not mapped. The returned pointer points
+ // to the memory at the offset previously passed to the constructor.
+ void* memory();
+
+ size_t size() const { return size_; }
+
+ private:
+ base::SharedMemory shm_;
+ off_t offset_;
+ size_t size_;
+ size_t alignment_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedMemoryRegion);
+};
+
+} // namespace media
+
+#endif // SHARED_MEMORY_REGION_H_
diff --git a/accel/size.h b/accel/size.h
new file mode 100644
index 0000000..265dc55
--- /dev/null
+++ b/accel/size.h
@@ -0,0 +1,73 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 4db7af61f923
+// Note: only necessary functions are ported from gfx::Size
+
+#ifndef SIZE_H_
+#define SIZE_H_
+
+#include <string>
+
+#include "base/numerics/safe_math.h"
+#include "base/strings/stringprintf.h"
+
+namespace media {
+
+// Helper struct for size to replace gfx::size usage from original code.
+// Only partial functions of gfx::size is implemented here.
+struct Size {
+ public:
+ constexpr Size() : width_(0), height_(0) {}
+ constexpr Size(int width, int height)
+ : width_(std::max(0, width)), height_(std::max(0, height)) {}
+
+ Size& operator=(const Size& ps) {
+ set_width(ps.width());
+ set_height(ps.height());
+ return *this;
+ }
+
+ constexpr int width() const { return width_; }
+ constexpr int height() const { return height_; }
+
+ void set_width(int width) { width_ = std::max(0, width); }
+ void set_height(int height) { height_ = std::max(0, height); }
+
+ // This call will CHECK if the area of this size would overflow int.
+ int GetArea() const { return GetCheckedArea().ValueOrDie(); }
+
+ // Returns a checked numeric representation of the area.
+ base::CheckedNumeric<int> GetCheckedArea() const {
+ base::CheckedNumeric<int> checked_area = width();
+ checked_area *= height();
+ return checked_area;
+ }
+
+ void SetSize(int width, int height) {
+ set_width(width);
+ set_height(height);
+ }
+
+ bool IsEmpty() const { return !width() || !height(); }
+
+ std::string ToString() const {
+ return base::StringPrintf("%dx%d", width(), height());
+ }
+
+ private:
+ int width_;
+ int height_;
+};
+
+inline bool operator==(const Size& lhs, const Size& rhs) {
+ return lhs.width() == rhs.width() && lhs.height() == rhs.height();
+}
+
+inline bool operator!=(const Size& lhs, const Size& rhs) {
+ return !(lhs == rhs);
+}
+
+} // namespace media
+
+#endif // SIZE_H_
diff --git a/accel/subsample_entry.h b/accel/subsample_entry.h
new file mode 100644
index 0000000..1e0bfad
--- /dev/null
+++ b/accel/subsample_entry.h
@@ -0,0 +1,32 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 7014d6d
+
+#ifndef SUBSAMPLE_ENTRY_H_
+#define SUBSAMPLE_ENTRY_H_
+
+#include <stdint.h>
+
+namespace media {
+
+// The Common Encryption spec provides for subsample encryption, where portions
+// of a sample are set in cleartext. A SubsampleEntry specifies the number of
+// clear and encrypted bytes in each subsample. For decryption, all of the
+// encrypted bytes in a sample should be considered a single logical stream,
+// regardless of how they are divided into subsamples, and the clear bytes
+// should not be considered as part of decryption. This is logically equivalent
+// to concatenating all 'cypher_bytes' portions of subsamples, decrypting that
+// result, and then copying each byte from the decrypted block over the
+// position of the corresponding encrypted byte.
+struct SubsampleEntry {
+ SubsampleEntry() : clear_bytes(0), cypher_bytes(0) {}
+ SubsampleEntry(uint32_t clear_bytes, uint32_t cypher_bytes)
+ : clear_bytes(clear_bytes), cypher_bytes(cypher_bytes) {}
+ uint32_t clear_bytes;
+ uint32_t cypher_bytes;
+};
+
+} // namespace media
+
+#endif // SUBSAMPLE_ENTRY_H_
diff --git a/accel/v4l2_device.cc b/accel/v4l2_device.cc
new file mode 100644
index 0000000..5c258ab
--- /dev/null
+++ b/accel/v4l2_device.cc
@@ -0,0 +1,1858 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 2f13d62f0c0d
+// Note: Added some missing defines that are only defined in newer kernel
+// versions (e.g. V4L2_PIX_FMT_VP8_FRAME)
+
+#include "v4l2_device.h"
+
+#include <fcntl.h>
+#include <linux/media.h>
+#include <linux/videodev2.h>
+#include <poll.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include <algorithm>
+#include <set>
+#include <sstream>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/posix/eintr_wrapper.h"
+
+#include "color_plane_layout.h"
+#include "generic_v4l2_device.h"
+#include "macros.h"
+#include "video_pixel_format.h"
+
+// VP8 parsed frames
+#ifndef V4L2_PIX_FMT_VP8_FRAME
+#define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F')
+#endif
+
+// VP9 parsed frames
+#ifndef V4L2_PIX_FMT_VP9_FRAME
+#define V4L2_PIX_FMT_VP9_FRAME v4l2_fourcc('V', 'P', '9', 'F')
+#endif
+
+// H264 parsed slices
+#ifndef V4L2_PIX_FMT_H264_SLICE
+#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4')
+#endif
+
+#define REQUEST_DEVICE "/dev/media-dec0"
+
+namespace media {
+
+V4L2ExtCtrl::V4L2ExtCtrl(uint32_t id) {
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = id;
+}
+
+V4L2ExtCtrl::V4L2ExtCtrl(uint32_t id, int32_t val) : V4L2ExtCtrl(id) {
+ ctrl.value = val;
+}
+
+// Class used to store the state of a buffer that should persist between
+// reference creations. This includes:
+// * Result of initial VIDIOC_QUERYBUF ioctl,
+// * Plane mappings.
+//
+// Also provides helper functions.
+class V4L2Buffer {
+ public:
+ static std::unique_ptr<V4L2Buffer> Create(scoped_refptr<V4L2Device> device,
+ enum v4l2_buf_type type,
+ enum v4l2_memory memory,
+ const struct v4l2_format& format,
+ size_t buffer_id);
+ ~V4L2Buffer();
+
+ void* GetPlaneMapping(const size_t plane);
+ size_t GetMemoryUsage() const;
+ const struct v4l2_buffer& v4l2_buffer() const { return v4l2_buffer_; }
+
+ private:
+ V4L2Buffer(scoped_refptr<V4L2Device> device,
+ enum v4l2_buf_type type,
+ enum v4l2_memory memory,
+ const struct v4l2_format& format,
+ size_t buffer_id);
+ bool Query();
+
+ scoped_refptr<V4L2Device> device_;
+ std::vector<void*> plane_mappings_;
+
+ // V4L2 data as queried by QUERYBUF.
+ struct v4l2_buffer v4l2_buffer_;
+ // WARNING: do not change this to a vector or something smaller than
+ // VIDEO_MAX_PLANES, otherwise the Tegra libv4l2 will write data beyond
+ // the number of allocated planes, resulting in memory corruption.
+ struct v4l2_plane v4l2_planes_[VIDEO_MAX_PLANES];
+
+ struct v4l2_format format_ __attribute__((unused));
+ scoped_refptr<VideoFrame> video_frame_;
+
+ DISALLOW_COPY_AND_ASSIGN(V4L2Buffer);
+};
+
+std::unique_ptr<V4L2Buffer> V4L2Buffer::Create(scoped_refptr<V4L2Device> device,
+ enum v4l2_buf_type type,
+ enum v4l2_memory memory,
+ const struct v4l2_format& format,
+ size_t buffer_id) {
+ // Not using std::make_unique because constructor is private.
+ std::unique_ptr<V4L2Buffer> buffer(
+ new V4L2Buffer(device, type, memory, format, buffer_id));
+
+ if (!buffer->Query())
+ return nullptr;
+
+ return buffer;
+}
+
+V4L2Buffer::V4L2Buffer(scoped_refptr<V4L2Device> device,
+ enum v4l2_buf_type type,
+ enum v4l2_memory memory,
+ const struct v4l2_format& format,
+ size_t buffer_id)
+ : device_(device), format_(format) {
+ DCHECK(V4L2_TYPE_IS_MULTIPLANAR(type));
+ DCHECK_LE(format.fmt.pix_mp.num_planes, base::size(v4l2_planes_));
+
+ memset(v4l2_planes_, 0, sizeof(v4l2_planes_));
+ memset(&v4l2_buffer_, 0, sizeof(v4l2_buffer_));
+ v4l2_buffer_.m.planes = v4l2_planes_;
+ // Just in case we got more planes than we want.
+ v4l2_buffer_.length =
+ std::min(static_cast<size_t>(format.fmt.pix_mp.num_planes),
+ base::size(v4l2_planes_));
+ v4l2_buffer_.index = buffer_id;
+ v4l2_buffer_.type = type;
+ v4l2_buffer_.memory = memory;
+ plane_mappings_.resize(v4l2_buffer_.length);
+}
+
+V4L2Buffer::~V4L2Buffer() {
+ if (v4l2_buffer_.memory == V4L2_MEMORY_MMAP) {
+ for (size_t i = 0; i < plane_mappings_.size(); i++)
+ if (plane_mappings_[i] != nullptr)
+ device_->Munmap(plane_mappings_[i], v4l2_buffer_.m.planes[i].length);
+ }
+}
+
+bool V4L2Buffer::Query() {
+ int ret = device_->Ioctl(VIDIOC_QUERYBUF, &v4l2_buffer_);
+ if (ret) {
+ VPLOGF(1) << "VIDIOC_QUERYBUF failed: ";
+ return false;
+ }
+
+ DCHECK(plane_mappings_.size() == v4l2_buffer_.length);
+
+ return true;
+}
+
+void* V4L2Buffer::GetPlaneMapping(const size_t plane) {
+ if (plane >= plane_mappings_.size()) {
+ VLOGF(1) << "Invalid plane " << plane << " requested.";
+ return nullptr;
+ }
+
+ void* p = plane_mappings_[plane];
+ if (p)
+ return p;
+
+ // Do this check here to avoid repeating it after a buffer has been
+ // successfully mapped (we know we are of MMAP type by then).
+ if (v4l2_buffer_.memory != V4L2_MEMORY_MMAP) {
+ VLOGF(1) << "Cannot create mapping on non-MMAP buffer";
+ return nullptr;
+ }
+
+ p = device_->Mmap(NULL, v4l2_buffer_.m.planes[plane].length,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ v4l2_buffer_.m.planes[plane].m.mem_offset);
+ if (p == MAP_FAILED) {
+ VPLOGF(1) << "mmap() failed: ";
+ return nullptr;
+ }
+
+ plane_mappings_[plane] = p;
+ return p;
+}
+
+size_t V4L2Buffer::GetMemoryUsage() const {
+ size_t usage = 0;
+ for (size_t i = 0; i < v4l2_buffer_.length; i++) {
+ usage += v4l2_buffer_.m.planes[i].length;
+ }
+ return usage;
+}
+
+// A thread-safe pool of buffer indexes, allowing buffers to be obtained and
+// returned from different threads. All the methods of this class are
+// thread-safe. Users should keep a scoped_refptr to instances of this class
+// in order to ensure the list remains alive as long as they need it.
+class V4L2BuffersList : public base::RefCountedThreadSafe<V4L2BuffersList> {
+ public:
+ V4L2BuffersList() = default;
+ // Return a buffer to this list. Also can be called to set the initial pool
+ // of buffers.
+ // Note that it is illegal to return the same buffer twice.
+ void ReturnBuffer(size_t buffer_id);
+ // Get any of the buffers in the list. There is no order guarantee whatsoever.
+ base::Optional<size_t> GetFreeBuffer();
+ // Get the buffer with specified index.
+ base::Optional<size_t> GetFreeBuffer(size_t requested_buffer_id);
+ // Number of buffers currently in this list.
+ size_t size() const;
+
+ private:
+ friend class base::RefCountedThreadSafe<V4L2BuffersList>;
+ ~V4L2BuffersList() = default;
+
+ mutable base::Lock lock_;
+ std::set<size_t> free_buffers_ GUARDED_BY(lock_);
+ DISALLOW_COPY_AND_ASSIGN(V4L2BuffersList);
+};
+
+void V4L2BuffersList::ReturnBuffer(size_t buffer_id) {
+ base::AutoLock auto_lock(lock_);
+
+ auto inserted = free_buffers_.emplace(buffer_id);
+ DCHECK(inserted.second);
+}
+
+base::Optional<size_t> V4L2BuffersList::GetFreeBuffer() {
+ base::AutoLock auto_lock(lock_);
+
+ auto iter = free_buffers_.begin();
+ if (iter == free_buffers_.end()) {
+ DVLOGF(4) << "No free buffer available!";
+ return base::nullopt;
+ }
+
+ size_t buffer_id = *iter;
+ free_buffers_.erase(iter);
+
+ return buffer_id;
+}
+
+base::Optional<size_t> V4L2BuffersList::GetFreeBuffer(
+ size_t requested_buffer_id) {
+ base::AutoLock auto_lock(lock_);
+
+ return (free_buffers_.erase(requested_buffer_id) > 0)
+ ? base::make_optional(requested_buffer_id)
+ : base::nullopt;
+}
+
+size_t V4L2BuffersList::size() const {
+ base::AutoLock auto_lock(lock_);
+
+ return free_buffers_.size();
+}
+
+// Module-private class that let users query/write V4L2 buffer information.
+// It also makes some private V4L2Queue methods available to this module only.
+class V4L2BufferRefBase {
+ public:
+ V4L2BufferRefBase(const struct v4l2_buffer& v4l2_buffer,
+ base::WeakPtr<V4L2Queue> queue);
+ ~V4L2BufferRefBase();
+
+ bool QueueBuffer();
+ void* GetPlaneMapping(const size_t plane);
+
+ // Checks that the number of passed FDs is adequate for the current format
+ // and buffer configuration. Only useful for DMABUF buffers.
+ bool CheckNumFDsForFormat(const size_t num_fds) const;
+
+ // Data from the buffer, that users can query and/or write.
+ struct v4l2_buffer v4l2_buffer_;
+ // WARNING: do not change this to a vector or something smaller than
+ // VIDEO_MAX_PLANES, otherwise the Tegra libv4l2 will write data beyond
+ // the number of allocated planes, resulting in memory corruption.
+ struct v4l2_plane v4l2_planes_[VIDEO_MAX_PLANES];
+
+ private:
+ size_t BufferId() const { return v4l2_buffer_.index; }
+
+ friend class V4L2WritableBufferRef;
+ // A weak pointer to the queue this buffer belongs to. Will remain valid as
+ // long as the underlying V4L2 buffer is valid too.
+ // This can only be accessed from the sequence protected by sequence_checker_.
+ // Thread-safe methods (like ~V4L2BufferRefBase) must *never* access this.
+ base::WeakPtr<V4L2Queue> queue_;
+ // Where to return this buffer if it goes out of scope without being queued.
+ scoped_refptr<V4L2BuffersList> return_to_;
+ bool queued = false;
+
+ SEQUENCE_CHECKER(sequence_checker_);
+ DISALLOW_COPY_AND_ASSIGN(V4L2BufferRefBase);
+};
+
+V4L2BufferRefBase::V4L2BufferRefBase(const struct v4l2_buffer& v4l2_buffer,
+ base::WeakPtr<V4L2Queue> queue)
+ : queue_(std::move(queue)), return_to_(queue_->free_buffers_) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(V4L2_TYPE_IS_MULTIPLANAR(v4l2_buffer.type));
+ DCHECK_LE(v4l2_buffer.length, base::size(v4l2_planes_));
+ DCHECK(return_to_);
+
+ memcpy(&v4l2_buffer_, &v4l2_buffer, sizeof(v4l2_buffer_));
+ memcpy(v4l2_planes_, v4l2_buffer.m.planes,
+ sizeof(struct v4l2_plane) * v4l2_buffer.length);
+ v4l2_buffer_.m.planes = v4l2_planes_;
+}
+
+V4L2BufferRefBase::~V4L2BufferRefBase() {
+ // We are the last reference and are only accessing the thread-safe
+ // return_to_, so we are safe to call from any sequence.
+ // If we have been queued, then the queue is our owner so we don't need to
+ // return to the free buffers list.
+ if (!queued)
+ return_to_->ReturnBuffer(BufferId());
+}
+
+bool V4L2BufferRefBase::QueueBuffer() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ if (!queue_)
+ return false;
+
+ queued = queue_->QueueBuffer(&v4l2_buffer_);
+
+ return queued;
+}
+
+void* V4L2BufferRefBase::GetPlaneMapping(const size_t plane) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ if (!queue_)
+ return nullptr;
+
+ return queue_->buffers_[BufferId()]->GetPlaneMapping(plane);
+}
+
+bool V4L2BufferRefBase::CheckNumFDsForFormat(const size_t num_fds) const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ if (!queue_)
+ return false;
+
+ // We have not used SetFormat(), assume this is ok.
+ // Hopefully we standardize SetFormat() in the future.
+ if (!queue_->current_format_)
+ return true;
+
+ const size_t required_fds = queue_->current_format_->fmt.pix_mp.num_planes;
+ // Sanity check.
+ DCHECK_EQ(v4l2_buffer_.length, required_fds);
+ if (num_fds < required_fds) {
+ VLOGF(1) << "Insufficient number of FDs given for the current format. "
+ << num_fds << " provided, " << required_fds << " required.";
+ return false;
+ }
+
+ const auto* planes = v4l2_buffer_.m.planes;
+ for (size_t i = v4l2_buffer_.length - 1; i >= num_fds; --i) {
+ // Assume that an fd is a duplicate of a previous plane's fd if offset != 0.
+ // Otherwise, if offset == 0, return error as it is likely pointing to
+ // a new plane.
+ if (planes[i].data_offset == 0) {
+ VLOGF(1) << "Additional dmabuf fds point to a new buffer.";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+V4L2WritableBufferRef::V4L2WritableBufferRef(
+ const struct v4l2_buffer& v4l2_buffer,
+ base::WeakPtr<V4L2Queue> queue)
+ : buffer_data_(
+ std::make_unique<V4L2BufferRefBase>(v4l2_buffer, std::move(queue))) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+}
+
+V4L2WritableBufferRef::V4L2WritableBufferRef(V4L2WritableBufferRef&& other)
+ : buffer_data_(std::move(other.buffer_data_)) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(other.sequence_checker_);
+}
+
+V4L2WritableBufferRef::~V4L2WritableBufferRef() {
+ // Only valid references should be sequence-checked
+ if (buffer_data_) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ }
+}
+
+V4L2WritableBufferRef& V4L2WritableBufferRef::operator=(
+ V4L2WritableBufferRef&& other) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(other.sequence_checker_);
+
+ if (this == &other)
+ return *this;
+
+ buffer_data_ = std::move(other.buffer_data_);
+
+ return *this;
+}
+
+enum v4l2_memory V4L2WritableBufferRef::Memory() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ return static_cast<enum v4l2_memory>(buffer_data_->v4l2_buffer_.memory);
+}
+
+bool V4L2WritableBufferRef::DoQueue(V4L2RequestRef* /*request_ref*/) && {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ bool queued = buffer_data_->QueueBuffer();
+
+ // Clear our own reference.
+ buffer_data_.reset();
+
+ return queued;
+}
+
+bool V4L2WritableBufferRef::QueueMMap(V4L2RequestRef* request_ref) && {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ // Move ourselves so our data gets freed no matter when we return
+ V4L2WritableBufferRef self(std::move(*this));
+
+ if (self.Memory() != V4L2_MEMORY_MMAP) {
+ VLOGF(1) << "Called on invalid buffer type!";
+ return false;
+ }
+
+ return std::move(self).DoQueue(request_ref);
+}
+
+bool V4L2WritableBufferRef::QueueUserPtr(const std::vector<void*>& ptrs,
+ V4L2RequestRef* request_ref) && {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ // Move ourselves so our data gets freed no matter when we return
+ V4L2WritableBufferRef self(std::move(*this));
+
+ if (self.Memory() != V4L2_MEMORY_USERPTR) {
+ VLOGF(1) << "Called on invalid buffer type!";
+ return false;
+ }
+
+ if (ptrs.size() != self.PlanesCount()) {
+ VLOGF(1) << "Provided " << ptrs.size() << " pointers while we require "
+ << self.buffer_data_->v4l2_buffer_.length << ".";
+ return false;
+ }
+
+ for (size_t i = 0; i < ptrs.size(); i++)
+ self.buffer_data_->v4l2_buffer_.m.planes[i].m.userptr =
+ reinterpret_cast<unsigned long>(ptrs[i]);
+
+ return std::move(self).DoQueue(request_ref);
+}
+
+bool V4L2WritableBufferRef::QueueDMABuf(const std::vector<base::ScopedFD>& scoped_fds,
+ V4L2RequestRef* request_ref) && {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ std::vector<int> fds;
+ fds.reserve(scoped_fds.size());
+ for (const base::ScopedFD& scoped_fd : scoped_fds)
+ fds.push_back(scoped_fd.get());
+
+ return std::move(*this).QueueDMABuf(fds, request_ref);
+}
+
+bool V4L2WritableBufferRef::QueueDMABuf(const std::vector<int>& fds,
+ V4L2RequestRef* request_ref) && {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ // Move ourselves so our data gets freed no matter when we return
+ V4L2WritableBufferRef self(std::move(*this));
+
+ if (self.Memory() != V4L2_MEMORY_DMABUF) {
+ VLOGF(1) << "Called on invalid buffer type!";
+ return false;
+ }
+
+ if (!self.buffer_data_->CheckNumFDsForFormat(fds.size()))
+ return false;
+
+ size_t num_planes = self.PlanesCount();
+ for (size_t i = 0; i < num_planes; i++)
+ self.buffer_data_->v4l2_buffer_.m.planes[i].m.fd = fds[i];
+
+ return std::move(self).DoQueue(request_ref);
+}
+
+size_t V4L2WritableBufferRef::PlanesCount() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ return buffer_data_->v4l2_buffer_.length;
+}
+
+size_t V4L2WritableBufferRef::GetPlaneSize(const size_t plane) const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ if (plane >= PlanesCount()) {
+ VLOGF(1) << "Invalid plane " << plane << " requested.";
+ return 0;
+ }
+
+ return buffer_data_->v4l2_buffer_.m.planes[plane].length;
+}
+
+void V4L2WritableBufferRef::SetPlaneSize(const size_t plane,
+ const size_t size) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ enum v4l2_memory memory = Memory();
+ if (memory == V4L2_MEMORY_MMAP) {
+ DCHECK_EQ(buffer_data_->v4l2_buffer_.m.planes[plane].length, size);
+ return;
+ }
+ DCHECK(memory == V4L2_MEMORY_USERPTR || memory == V4L2_MEMORY_DMABUF);
+
+ if (plane >= PlanesCount()) {
+ VLOGF(1) << "Invalid plane " << plane << " requested.";
+ return;
+ }
+
+ buffer_data_->v4l2_buffer_.m.planes[plane].length = size;
+}
+
+void* V4L2WritableBufferRef::GetPlaneMapping(const size_t plane) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ return buffer_data_->GetPlaneMapping(plane);
+}
+
+void V4L2WritableBufferRef::SetTimeStamp(const struct timeval& timestamp) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ buffer_data_->v4l2_buffer_.timestamp = timestamp;
+}
+
+const struct timeval& V4L2WritableBufferRef::GetTimeStamp() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ return buffer_data_->v4l2_buffer_.timestamp;
+}
+
+void V4L2WritableBufferRef::SetPlaneBytesUsed(const size_t plane,
+ const size_t bytes_used) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ if (plane >= PlanesCount()) {
+ VLOGF(1) << "Invalid plane " << plane << " requested.";
+ return;
+ }
+
+ if (bytes_used > GetPlaneSize(plane)) {
+ VLOGF(1) << "Set bytes used " << bytes_used << " larger than plane size "
+ << GetPlaneSize(plane) << ".";
+ return;
+ }
+
+ buffer_data_->v4l2_buffer_.m.planes[plane].bytesused = bytes_used;
+}
+
+size_t V4L2WritableBufferRef::GetPlaneBytesUsed(const size_t plane) const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ if (plane >= PlanesCount()) {
+ VLOGF(1) << "Invalid plane " << plane << " requested.";
+ return 0;
+ }
+
+ return buffer_data_->v4l2_buffer_.m.planes[plane].bytesused;
+}
+
+void V4L2WritableBufferRef::SetPlaneDataOffset(const size_t plane,
+ const size_t data_offset) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ if (plane >= PlanesCount()) {
+ VLOGF(1) << "Invalid plane " << plane << " requested.";
+ return;
+ }
+
+ buffer_data_->v4l2_buffer_.m.planes[plane].data_offset = data_offset;
+}
+
+size_t V4L2WritableBufferRef::BufferId() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ return buffer_data_->v4l2_buffer_.index;
+}
+
+V4L2ReadableBuffer::V4L2ReadableBuffer(const struct v4l2_buffer& v4l2_buffer,
+ base::WeakPtr<V4L2Queue> queue)
+ : buffer_data_(
+ std::make_unique<V4L2BufferRefBase>(v4l2_buffer, std::move(queue))) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+}
+
+V4L2ReadableBuffer::~V4L2ReadableBuffer() {
+ // This method is thread-safe. Since we are the destructor, we are guaranteed
+ // to be called from the only remaining reference to us. Also, we are just
+ // calling the destructor of buffer_data_, which is also thread-safe.
+ DCHECK(buffer_data_);
+}
+
+bool V4L2ReadableBuffer::IsLast() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ return buffer_data_->v4l2_buffer_.flags & V4L2_BUF_FLAG_LAST;
+}
+
+bool V4L2ReadableBuffer::IsKeyframe() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ return buffer_data_->v4l2_buffer_.flags & V4L2_BUF_FLAG_KEYFRAME;
+}
+
+struct timeval V4L2ReadableBuffer::GetTimeStamp() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ return buffer_data_->v4l2_buffer_.timestamp;
+}
+
+size_t V4L2ReadableBuffer::PlanesCount() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ return buffer_data_->v4l2_buffer_.length;
+}
+
+const void* V4L2ReadableBuffer::GetPlaneMapping(const size_t plane) const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ return buffer_data_->GetPlaneMapping(plane);
+}
+
+size_t V4L2ReadableBuffer::GetPlaneBytesUsed(const size_t plane) const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ if (plane >= PlanesCount()) {
+ VLOGF(1) << "Invalid plane " << plane << " requested.";
+ return 0;
+ }
+
+ return buffer_data_->v4l2_planes_[plane].bytesused;
+}
+
+size_t V4L2ReadableBuffer::GetPlaneDataOffset(const size_t plane) const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ if (plane >= PlanesCount()) {
+ VLOGF(1) << "Invalid plane " << plane << " requested.";
+ return 0;
+ }
+
+ return buffer_data_->v4l2_planes_[plane].data_offset;
+}
+
+size_t V4L2ReadableBuffer::BufferId() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ return buffer_data_->v4l2_buffer_.index;
+}
+
+// This class is used to expose buffer reference classes constructors to
+// this module. This is to ensure that nobody else can create buffer references.
+class V4L2BufferRefFactory {
+ public:
+ static V4L2WritableBufferRef CreateWritableRef(
+ const struct v4l2_buffer& v4l2_buffer,
+ base::WeakPtr<V4L2Queue> queue) {
+ return V4L2WritableBufferRef(v4l2_buffer, std::move(queue));
+ }
+
+ static V4L2ReadableBufferRef CreateReadableRef(
+ const struct v4l2_buffer& v4l2_buffer,
+ base::WeakPtr<V4L2Queue> queue) {
+ return new V4L2ReadableBuffer(v4l2_buffer, std::move(queue));
+ }
+};
+
+// Helper macros that print the queue type with logs.
+#define VPQLOGF(level) \
+ VPLOGF(level) << "(" << V4L2Device::V4L2BufferTypeToString(type_) << ") "
+#define VQLOGF(level) \
+ VLOGF(level) << "(" << V4L2Device::V4L2BufferTypeToString(type_) << ") "
+#define DVQLOGF(level) \
+ DVLOGF(level) << "(" << V4L2Device::V4L2BufferTypeToString(type_) << ") "
+
+V4L2Queue::V4L2Queue(scoped_refptr<V4L2Device> dev,
+ enum v4l2_buf_type type,
+ base::OnceClosure destroy_cb)
+ : type_(type),
+ device_(dev),
+ destroy_cb_(std::move(destroy_cb)),
+ weak_this_factory_(this) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+}
+
+V4L2Queue::~V4L2Queue() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ if (is_streaming_) {
+ VQLOGF(1) << "Queue is still streaming, trying to stop it...";
+ Streamoff();
+ }
+
+ DCHECK(queued_buffers_.empty());
+ DCHECK(!free_buffers_);
+
+ if (!buffers_.empty()) {
+ VQLOGF(1) << "Buffers are still allocated, trying to deallocate them...";
+ DeallocateBuffers();
+ }
+
+ std::move(destroy_cb_).Run();
+}
+
+base::Optional<struct v4l2_format> V4L2Queue::SetFormat(uint32_t fourcc,
+ const Size& size,
+ size_t buffer_size) {
+ struct v4l2_format format;
+ memset(&format, 0, sizeof(format));
+ format.type = type_;
+ format.fmt.pix_mp.pixelformat = fourcc;
+ format.fmt.pix_mp.width = size.width();
+ format.fmt.pix_mp.height = size.height();
+ format.fmt.pix_mp.num_planes = V4L2Device::GetNumPlanesOfV4L2PixFmt(fourcc);
+ format.fmt.pix_mp.plane_fmt[0].sizeimage = buffer_size;
+ if (device_->Ioctl(VIDIOC_S_FMT, &format) != 0 ||
+ format.fmt.pix_mp.pixelformat != fourcc) {
+ VPQLOGF(2) << "Failed to set format on queue " << type_
+ << ". format_fourcc=0x" << std::hex << fourcc;
+ return base::nullopt;
+ }
+
+ current_format_ = format;
+ return current_format_;
+}
+
+size_t V4L2Queue::AllocateBuffers(size_t count, enum v4l2_memory memory) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(!free_buffers_);
+ DCHECK_EQ(queued_buffers_.size(), 0u);
+
+ if (IsStreaming()) {
+ VQLOGF(1) << "Cannot allocate buffers while streaming.";
+ return 0;
+ }
+
+ if (buffers_.size() != 0) {
+ VQLOGF(1)
+ << "Cannot allocate new buffers while others are still allocated.";
+ return 0;
+ }
+
+ if (count == 0) {
+ VQLOGF(1) << "Attempting to allocate 0 buffers.";
+ return 0;
+ }
+
+ // First query the number of planes in the buffers we are about to request.
+ // This should not be required, but Tegra's VIDIOC_QUERYBUF will fail on
+ // output buffers if the number of specified planes does not exactly match the
+ // format.
+ struct v4l2_format format = {.type = type_};
+ int ret = device_->Ioctl(VIDIOC_G_FMT, &format);
+ if (ret) {
+ VPQLOGF(1) << "VIDIOC_G_FMT failed";
+ return 0;
+ }
+ planes_count_ = format.fmt.pix_mp.num_planes;
+ DCHECK_LE(planes_count_, static_cast<size_t>(VIDEO_MAX_PLANES));
+
+ struct v4l2_requestbuffers reqbufs;
+ memset(&reqbufs, 0, sizeof(reqbufs));
+ reqbufs.count = count;
+ reqbufs.type = type_;
+ reqbufs.memory = memory;
+ DVQLOGF(3) << "Requesting " << count << " buffers.";
+
+ ret = device_->Ioctl(VIDIOC_REQBUFS, &reqbufs);
+ if (ret) {
+ VPQLOGF(1) << "VIDIOC_REQBUFS failed";
+ return 0;
+ }
+ DVQLOGF(3) << "queue " << type_ << ": got " << reqbufs.count << " buffers.";
+
+ memory_ = memory;
+
+ free_buffers_ = new V4L2BuffersList();
+
+ // Now query all buffer information.
+ for (size_t i = 0; i < reqbufs.count; i++) {
+ auto buffer = V4L2Buffer::Create(device_, type_, memory_, format, i);
+
+ if (!buffer) {
+ DeallocateBuffers();
+
+ return 0;
+ }
+
+ buffers_.emplace_back(std::move(buffer));
+ free_buffers_->ReturnBuffer(i);
+ }
+
+ DCHECK(free_buffers_);
+ DCHECK_EQ(free_buffers_->size(), buffers_.size());
+ DCHECK_EQ(queued_buffers_.size(), 0u);
+
+ return buffers_.size();
+}
+
+bool V4L2Queue::DeallocateBuffers() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ if (IsStreaming()) {
+ VQLOGF(1) << "Cannot deallocate buffers while streaming.";
+ return false;
+ }
+
+ if (buffers_.size() == 0)
+ return true;
+
+ weak_this_factory_.InvalidateWeakPtrs();
+ buffers_.clear();
+ free_buffers_ = nullptr;
+
+ // Free all buffers.
+ struct v4l2_requestbuffers reqbufs;
+ memset(&reqbufs, 0, sizeof(reqbufs));
+ reqbufs.count = 0;
+ reqbufs.type = type_;
+ reqbufs.memory = memory_;
+
+ int ret = device_->Ioctl(VIDIOC_REQBUFS, &reqbufs);
+ if (ret) {
+ VPQLOGF(1) << "VIDIOC_REQBUFS failed";
+ return false;
+ }
+
+ DCHECK(!free_buffers_);
+ DCHECK_EQ(queued_buffers_.size(), 0u);
+
+ return true;
+}
+
+size_t V4L2Queue::GetMemoryUsage() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ size_t usage = 0;
+ for (const auto& buf : buffers_) {
+ usage += buf->GetMemoryUsage();
+ }
+ return usage;
+}
+
+v4l2_memory V4L2Queue::GetMemoryType() const {
+ return memory_;
+}
+
+base::Optional<V4L2WritableBufferRef> V4L2Queue::GetFreeBuffer() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ // No buffers allocated at the moment?
+ if (!free_buffers_)
+ return base::nullopt;
+
+ auto buffer_id = free_buffers_->GetFreeBuffer();
+ if (!buffer_id.has_value())
+ return base::nullopt;
+
+ return V4L2BufferRefFactory::CreateWritableRef(
+ buffers_[buffer_id.value()]->v4l2_buffer(),
+ weak_this_factory_.GetWeakPtr());
+}
+
+base::Optional<V4L2WritableBufferRef> V4L2Queue::GetFreeBuffer(
+ size_t requested_buffer_id) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ // No buffers allocated at the moment?
+ if (!free_buffers_)
+ return base::nullopt;
+
+ auto buffer_id = free_buffers_->GetFreeBuffer(requested_buffer_id);
+ if (!buffer_id.has_value())
+ return base::nullopt;
+
+ return V4L2BufferRefFactory::CreateWritableRef(
+ buffers_[buffer_id.value()]->v4l2_buffer(),
+ weak_this_factory_.GetWeakPtr());
+}
+
+bool V4L2Queue::QueueBuffer(struct v4l2_buffer* v4l2_buffer) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ int ret = device_->Ioctl(VIDIOC_QBUF, v4l2_buffer);
+ if (ret) {
+ VPQLOGF(1) << "VIDIOC_QBUF failed";
+ return false;
+ }
+
+ auto inserted = queued_buffers_.emplace(v4l2_buffer->index);
+ DCHECK_EQ(inserted.second, true);
+
+ device_->SchedulePoll();
+
+ return true;
+}
+
+std::pair<bool, V4L2ReadableBufferRef> V4L2Queue::DequeueBuffer() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ // No need to dequeue if no buffers queued.
+ if (QueuedBuffersCount() == 0)
+ return std::make_pair(true, nullptr);
+
+ if (!IsStreaming()) {
+ VQLOGF(1) << "Attempting to dequeue a buffer while not streaming.";
+ return std::make_pair(true, nullptr);
+ }
+
+ struct v4l2_buffer v4l2_buffer;
+ memset(&v4l2_buffer, 0, sizeof(v4l2_buffer));
+ // WARNING: do not change this to a vector or something smaller than
+ // VIDEO_MAX_PLANES, otherwise the Tegra libv4l2 will write data beyond
+ // the number of allocated planes, resulting in memory corruption.
+ struct v4l2_plane planes[VIDEO_MAX_PLANES];
+ memset(planes, 0, sizeof(planes));
+ v4l2_buffer.type = type_;
+ v4l2_buffer.memory = memory_;
+ v4l2_buffer.m.planes = planes;
+ v4l2_buffer.length = planes_count_;
+ int ret = device_->Ioctl(VIDIOC_DQBUF, &v4l2_buffer);
+ if (ret) {
+ // TODO(acourbot): we should not have to check for EPIPE as codec clients
+ // should not call this method after the last buffer is dequeued.
+ switch (errno) {
+ case EAGAIN:
+ case EPIPE:
+ // This is not an error so we'll need to continue polling but won't
+ // provide a buffer.
+ device_->SchedulePoll();
+ return std::make_pair(true, nullptr);
+ default:
+ VPQLOGF(1) << "VIDIOC_DQBUF failed";
+ return std::make_pair(false, nullptr);
+ }
+ }
+
+ auto it = queued_buffers_.find(v4l2_buffer.index);
+ DCHECK(it != queued_buffers_.end());
+ queued_buffers_.erase(*it);
+
+ if (QueuedBuffersCount() > 0)
+ device_->SchedulePoll();
+
+ DCHECK(free_buffers_);
+ return std::make_pair(true,
+ V4L2BufferRefFactory::CreateReadableRef(
+ v4l2_buffer, weak_this_factory_.GetWeakPtr()));
+}
+
+bool V4L2Queue::IsStreaming() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ return is_streaming_;
+}
+
+bool V4L2Queue::Streamon() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ if (is_streaming_)
+ return true;
+
+ int arg = static_cast<int>(type_);
+ int ret = device_->Ioctl(VIDIOC_STREAMON, &arg);
+ if (ret) {
+ VPQLOGF(1) << "VIDIOC_STREAMON failed";
+ return false;
+ }
+
+ is_streaming_ = true;
+
+ return true;
+}
+
+bool V4L2Queue::Streamoff() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ // We do not check the value of IsStreaming(), because we may have queued
+ // buffers to the queue and wish to get them back - in such as case, we may
+ // need to do a VIDIOC_STREAMOFF on a stopped queue.
+
+ int arg = static_cast<int>(type_);
+ int ret = device_->Ioctl(VIDIOC_STREAMOFF, &arg);
+ if (ret) {
+ VPQLOGF(1) << "VIDIOC_STREAMOFF failed";
+ return false;
+ }
+
+ for (const auto& buffer_id : queued_buffers_) {
+ DCHECK(free_buffers_);
+ free_buffers_->ReturnBuffer(buffer_id);
+ }
+
+ queued_buffers_.clear();
+
+ is_streaming_ = false;
+
+ return true;
+}
+
+size_t V4L2Queue::AllocatedBuffersCount() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ return buffers_.size();
+}
+
+size_t V4L2Queue::FreeBuffersCount() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ return free_buffers_ ? free_buffers_->size() : 0;
+}
+
+size_t V4L2Queue::QueuedBuffersCount() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ return queued_buffers_.size();
+}
+
+#undef VDQLOGF
+#undef VPQLOGF
+#undef VQLOGF
+
+bool V4L2Queue::SupportsRequests() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ return supports_requests_;
+}
+
+// This class is used to expose V4L2Queue's constructor to this module. This is
+// to ensure that nobody else can create instances of it.
+class V4L2QueueFactory {
+ public:
+ static scoped_refptr<V4L2Queue> CreateQueue(scoped_refptr<V4L2Device> dev,
+ enum v4l2_buf_type type,
+ base::OnceClosure destroy_cb) {
+ return new V4L2Queue(std::move(dev), type, std::move(destroy_cb));
+ }
+};
+
+V4L2Device::V4L2Device() {
+ DETACH_FROM_SEQUENCE(client_sequence_checker_);
+}
+
+V4L2Device::~V4L2Device() {}
+
+scoped_refptr<V4L2Queue> V4L2Device::GetQueue(enum v4l2_buf_type type) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
+
+ switch (type) {
+ // Supported queue types.
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ break;
+ default:
+ VLOGF(1) << "Unsupported V4L2 queue type: " << type;
+ return nullptr;
+ }
+
+ // TODO(acourbot): we should instead query the device for available queues,
+ // and allocate them accordingly. This will do for now though.
+ auto it = queues_.find(type);
+ if (it != queues_.end())
+ return scoped_refptr<V4L2Queue>(it->second);
+
+ scoped_refptr<V4L2Queue> queue = V4L2QueueFactory::CreateQueue(
+ this, type, base::BindOnce(&V4L2Device::OnQueueDestroyed, this, type));
+
+ queues_[type] = queue.get();
+ return queue;
+}
+
+void V4L2Device::OnQueueDestroyed(v4l2_buf_type buf_type) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
+
+ auto it = queues_.find(buf_type);
+ DCHECK(it != queues_.end());
+ queues_.erase(it);
+}
+
+// static
+scoped_refptr<V4L2Device> V4L2Device::Create() {
+ DVLOGF(3);
+
+ scoped_refptr<V4L2Device> device;
+
+ device = new GenericV4L2Device();
+ if (device->Initialize())
+ return device;
+
+ VLOGF(1) << "Failed to create a V4L2Device";
+ return nullptr;
+}
+
+// static
+uint32_t V4L2Device::VideoCodecProfileToV4L2PixFmt(VideoCodecProfile profile,
+ bool slice_based) {
+ if (profile >= H264PROFILE_MIN && profile <= H264PROFILE_MAX) {
+ if (slice_based)
+ return V4L2_PIX_FMT_H264_SLICE;
+ else
+ return V4L2_PIX_FMT_H264;
+ } else if (profile >= VP8PROFILE_MIN && profile <= VP8PROFILE_MAX) {
+ if (slice_based)
+ return V4L2_PIX_FMT_VP8_FRAME;
+ else
+ return V4L2_PIX_FMT_VP8;
+ } else if (profile >= VP9PROFILE_MIN && profile <= VP9PROFILE_MAX) {
+ if (slice_based)
+ return V4L2_PIX_FMT_VP9_FRAME;
+ else
+ return V4L2_PIX_FMT_VP9;
+ } else {
+ LOG(ERROR) << "Unknown profile: " << GetProfileName(profile);
+ return 0;
+ }
+}
+
+// static
+VideoCodecProfile V4L2Device::V4L2ProfileToVideoCodecProfile(VideoCodec codec,
+ uint32_t profile) {
+ switch (codec) {
+ case kCodecH264:
+ switch (profile) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
+ return H264PROFILE_BASELINE;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ return H264PROFILE_MAIN;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED:
+ return H264PROFILE_EXTENDED;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ return H264PROFILE_HIGH;
+ }
+ break;
+ case kCodecVP8:
+ switch (profile) {
+ case V4L2_MPEG_VIDEO_VP8_PROFILE_0:
+ case V4L2_MPEG_VIDEO_VP8_PROFILE_1:
+ case V4L2_MPEG_VIDEO_VP8_PROFILE_2:
+ case V4L2_MPEG_VIDEO_VP8_PROFILE_3:
+ return VP8PROFILE_ANY;
+ }
+ break;
+ case kCodecVP9:
+ switch (profile) {
+ case V4L2_MPEG_VIDEO_VP9_PROFILE_0:
+ return VP9PROFILE_PROFILE0;
+ case V4L2_MPEG_VIDEO_VP9_PROFILE_1:
+ return VP9PROFILE_PROFILE1;
+ case V4L2_MPEG_VIDEO_VP9_PROFILE_2:
+ return VP9PROFILE_PROFILE2;
+ case V4L2_MPEG_VIDEO_VP9_PROFILE_3:
+ return VP9PROFILE_PROFILE3;
+ }
+ break;
+ default:
+ VLOGF(2) << "Unknown codec: " << codec;
+ }
+ VLOGF(2) << "Unknown profile: " << profile;
+ return VIDEO_CODEC_PROFILE_UNKNOWN;
+}
+
+std::vector<VideoCodecProfile> V4L2Device::V4L2PixFmtToVideoCodecProfiles(
+ uint32_t pix_fmt,
+ bool is_encoder) {
+ auto get_supported_profiles = [this](
+ VideoCodec codec,
+ std::vector<VideoCodecProfile>* profiles) {
+ uint32_t query_id = 0;
+ switch (codec) {
+ case kCodecH264:
+ query_id = V4L2_CID_MPEG_VIDEO_H264_PROFILE;
+ break;
+ case kCodecVP8:
+ query_id = V4L2_CID_MPEG_VIDEO_VP8_PROFILE;
+ break;
+ case kCodecVP9:
+ query_id = V4L2_CID_MPEG_VIDEO_VP9_PROFILE;
+ break;
+ default:
+ return false;
+ }
+
+ v4l2_queryctrl query_ctrl = {};
+ query_ctrl.id = query_id;
+ if (Ioctl(VIDIOC_QUERYCTRL, &query_ctrl) != 0) {
+ return false;
+ }
+ v4l2_querymenu query_menu = {};
+ query_menu.id = query_ctrl.id;
+ for (query_menu.index = query_ctrl.minimum;
+ static_cast<int>(query_menu.index) <= query_ctrl.maximum;
+ query_menu.index++) {
+ if (Ioctl(VIDIOC_QUERYMENU, &query_menu) == 0) {
+ const VideoCodecProfile profile =
+ V4L2Device::V4L2ProfileToVideoCodecProfile(codec, query_menu.index);
+ if (profile != VIDEO_CODEC_PROFILE_UNKNOWN)
+ profiles->push_back(profile);
+ }
+ }
+ return true;
+ };
+
+ std::vector<VideoCodecProfile> profiles;
+ switch (pix_fmt) {
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_H264_SLICE:
+ if (!get_supported_profiles(kCodecH264, &profiles)) {
+ DLOG(WARNING) << "Driver doesn't support QUERY H264 profiles, "
+ << "use default values, Base, Main, High";
+ profiles = {
+ H264PROFILE_BASELINE,
+ H264PROFILE_MAIN,
+ H264PROFILE_HIGH,
+ };
+ }
+ break;
+ case V4L2_PIX_FMT_VP8:
+ case V4L2_PIX_FMT_VP8_FRAME:
+ profiles = {VP8PROFILE_ANY};
+ break;
+ case V4L2_PIX_FMT_VP9:
+ case V4L2_PIX_FMT_VP9_FRAME:
+ if (!get_supported_profiles(kCodecVP9, &profiles)) {
+ DLOG(WARNING) << "Driver doesn't support QUERY VP9 profiles, "
+ << "use default values, Profile0";
+ profiles = {VP9PROFILE_PROFILE0};
+ }
+ break;
+ default:
+ VLOGF(1) << "Unhandled pixelformat " << FourccToString(pix_fmt);
+ return {};
+ }
+
+ // Erase duplicated profiles.
+ std::sort(profiles.begin(), profiles.end());
+ profiles.erase(std::unique(profiles.begin(), profiles.end()), profiles.end());
+ return profiles;
+}
+
+// static
+int32_t V4L2Device::VideoCodecProfileToV4L2H264Profile(
+ VideoCodecProfile profile) {
+ switch (profile) {
+ case H264PROFILE_BASELINE:
+ return V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
+ case H264PROFILE_MAIN:
+ return V4L2_MPEG_VIDEO_H264_PROFILE_MAIN;
+ case H264PROFILE_EXTENDED:
+ return V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED;
+ case H264PROFILE_HIGH:
+ return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH;
+ case H264PROFILE_HIGH10PROFILE:
+ return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10;
+ case H264PROFILE_HIGH422PROFILE:
+ return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422;
+ case H264PROFILE_HIGH444PREDICTIVEPROFILE:
+ return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE;
+ case H264PROFILE_SCALABLEBASELINE:
+ return V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_BASELINE;
+ case H264PROFILE_SCALABLEHIGH:
+ return V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH;
+ case H264PROFILE_STEREOHIGH:
+ return V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH;
+ case H264PROFILE_MULTIVIEWHIGH:
+ return V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH;
+ default:
+ DVLOGF(1) << "Add more cases as needed";
+ return -1;
+ }
+}
+
+// static
+int32_t V4L2Device::H264LevelIdcToV4L2H264Level(uint8_t level_idc) {
+ switch (level_idc) {
+ case 10:
+ return V4L2_MPEG_VIDEO_H264_LEVEL_1_0;
+ case 9:
+ return V4L2_MPEG_VIDEO_H264_LEVEL_1B;
+ case 11:
+ return V4L2_MPEG_VIDEO_H264_LEVEL_1_1;
+ case 12:
+ return V4L2_MPEG_VIDEO_H264_LEVEL_1_2;
+ case 13:
+ return V4L2_MPEG_VIDEO_H264_LEVEL_1_3;
+ case 20:
+ return V4L2_MPEG_VIDEO_H264_LEVEL_2_0;
+ case 21:
+ return V4L2_MPEG_VIDEO_H264_LEVEL_2_1;
+ case 22:
+ return V4L2_MPEG_VIDEO_H264_LEVEL_2_2;
+ case 30:
+ return V4L2_MPEG_VIDEO_H264_LEVEL_3_0;
+ case 31:
+ return V4L2_MPEG_VIDEO_H264_LEVEL_3_1;
+ case 32:
+ return V4L2_MPEG_VIDEO_H264_LEVEL_3_2;
+ case 40:
+ return V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
+ case 41:
+ return V4L2_MPEG_VIDEO_H264_LEVEL_4_1;
+ case 42:
+ return V4L2_MPEG_VIDEO_H264_LEVEL_4_2;
+ case 50:
+ return V4L2_MPEG_VIDEO_H264_LEVEL_5_0;
+ case 51:
+ return V4L2_MPEG_VIDEO_H264_LEVEL_5_1;
+ default:
+ DVLOGF(1) << "Unrecognized level_idc: " << static_cast<int>(level_idc);
+ return -1;
+ }
+}
+
+// static
+Size V4L2Device::AllocatedSizeFromV4L2Format(const struct v4l2_format& format) {
+ Size coded_size;
+ Size visible_size;
+ VideoPixelFormat frame_format = PIXEL_FORMAT_UNKNOWN;
+ size_t bytesperline = 0;
+ // Total bytes in the frame.
+ size_t sizeimage = 0;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(format.type)) {
+ DCHECK_GT(format.fmt.pix_mp.num_planes, 0);
+ bytesperline =
+ base::checked_cast<int>(format.fmt.pix_mp.plane_fmt[0].bytesperline);
+ for (size_t i = 0; i < format.fmt.pix_mp.num_planes; ++i) {
+ sizeimage +=
+ base::checked_cast<int>(format.fmt.pix_mp.plane_fmt[i].sizeimage);
+ }
+ visible_size.SetSize(base::checked_cast<int>(format.fmt.pix_mp.width),
+ base::checked_cast<int>(format.fmt.pix_mp.height));
+ const uint32_t pix_fmt = format.fmt.pix_mp.pixelformat;
+ const auto frame_fourcc = Fourcc::FromV4L2PixFmt(pix_fmt);
+ if (!frame_fourcc) {
+ VLOGF(1) << "Unsupported format " << FourccToString(pix_fmt);
+ return coded_size;
+ }
+ frame_format = frame_fourcc->ToVideoPixelFormat();
+ } else {
+ bytesperline = base::checked_cast<int>(format.fmt.pix.bytesperline);
+ sizeimage = base::checked_cast<int>(format.fmt.pix.sizeimage);
+ visible_size.SetSize(base::checked_cast<int>(format.fmt.pix.width),
+ base::checked_cast<int>(format.fmt.pix.height));
+ const uint32_t fourcc = format.fmt.pix.pixelformat;
+ const auto frame_fourcc = Fourcc::FromV4L2PixFmt(fourcc);
+ if (!frame_fourcc) {
+ VLOGF(1) << "Unsupported format " << FourccToString(fourcc);
+ return coded_size;
+ }
+ frame_format = frame_fourcc ? frame_fourcc->ToVideoPixelFormat()
+ : PIXEL_FORMAT_UNKNOWN;
+ }
+
+ // V4L2 does not provide per-plane bytesperline (bpl) when different
+ // components are sharing one physical plane buffer. In this case, it only
+ // provides bpl for the first component in the plane. So we can't depend on it
+ // for calculating height, because bpl may vary within one physical plane
+ // buffer. For example, YUV420 contains 3 components in one physical plane,
+ // with Y at 8 bits per pixel, and Cb/Cr at 4 bits per pixel per component,
+ // but we only get 8 pits per pixel from bytesperline in physical plane 0.
+ // So we need to get total frame bpp from elsewhere to calculate coded height.
+
+ // We need bits per pixel for one component only to calculate
+ // coded_width from bytesperline.
+ int plane_horiz_bits_per_pixel =
+ VideoFrame::PlaneHorizontalBitsPerPixel(frame_format, 0);
+
+ // Adding up bpp for each component will give us total bpp for all components.
+ int total_bpp = 0;
+ for (size_t i = 0; i < VideoFrame::NumPlanes(frame_format); ++i)
+ total_bpp += VideoFrame::PlaneBitsPerPixel(frame_format, i);
+
+ if (sizeimage == 0 || bytesperline == 0 || plane_horiz_bits_per_pixel == 0 ||
+ total_bpp == 0 || (bytesperline * 8) % plane_horiz_bits_per_pixel != 0) {
+ VLOGF(1) << "Invalid format provided";
+ return coded_size;
+ }
+
+ // Coded width can be calculated by taking the first component's bytesperline,
+ // which in V4L2 always applies to the first component in physical plane
+ // buffer.
+ int coded_width = bytesperline * 8 / plane_horiz_bits_per_pixel;
+ // Sizeimage is coded_width * coded_height * total_bpp.
+ int coded_height = sizeimage * 8 / coded_width / total_bpp;
+
+ coded_size.SetSize(coded_width, coded_height);
+ DVLOGF(3) << "coded_size=" << coded_size.ToString();
+
+ // Sanity checks. Calculated coded size has to contain given visible size
+ // and fulfill buffer byte size requirements.
+ DCHECK(Rect(coded_size).Contains(Rect(visible_size)));
+ DCHECK_LE(sizeimage, VideoFrame::AllocationSize(frame_format, coded_size));
+
+ return coded_size;
+}
+
+// static
+const char* V4L2Device::V4L2MemoryToString(const v4l2_memory memory) {
+ switch (memory) {
+ case V4L2_MEMORY_MMAP:
+ return "V4L2_MEMORY_MMAP";
+ case V4L2_MEMORY_USERPTR:
+ return "V4L2_MEMORY_USERPTR";
+ case V4L2_MEMORY_DMABUF:
+ return "V4L2_MEMORY_DMABUF";
+ case V4L2_MEMORY_OVERLAY:
+ return "V4L2_MEMORY_OVERLAY";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+// static
+const char* V4L2Device::V4L2BufferTypeToString(
+ const enum v4l2_buf_type buf_type) {
+ switch (buf_type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return "OUTPUT";
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return "CAPTURE";
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return "OUTPUT_MPLANE";
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ return "CAPTURE_MPLANE";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+// static
+std::string V4L2Device::V4L2FormatToString(const struct v4l2_format& format) {
+ std::ostringstream s;
+ s << "v4l2_format type: " << format.type;
+ if (format.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ format.type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ // single-planar
+ const struct v4l2_pix_format& pix = format.fmt.pix;
+ s << ", width_height: " << Size(pix.width, pix.height).ToString()
+ << ", pixelformat: " << FourccToString(pix.pixelformat)
+ << ", field: " << pix.field << ", bytesperline: " << pix.bytesperline
+ << ", sizeimage: " << pix.sizeimage;
+ } else if (V4L2_TYPE_IS_MULTIPLANAR(format.type)) {
+ const struct v4l2_pix_format_mplane& pix_mp = format.fmt.pix_mp;
+ // As long as num_planes's type is uint8_t, ostringstream treats it as a
+ // char instead of an integer, which is not what we want. Casting
+ // pix_mp.num_planes unsigned int solves the issue.
+ s << ", width_height: " << Size(pix_mp.width, pix_mp.height).ToString()
+ << ", pixelformat: " << FourccToString(pix_mp.pixelformat)
+ << ", field: " << pix_mp.field
+ << ", num_planes: " << static_cast<unsigned int>(pix_mp.num_planes);
+ for (size_t i = 0; i < pix_mp.num_planes; ++i) {
+ const struct v4l2_plane_pix_format& plane_fmt = pix_mp.plane_fmt[i];
+ s << ", plane_fmt[" << i << "].sizeimage: " << plane_fmt.sizeimage
+ << ", plane_fmt[" << i << "].bytesperline: " << plane_fmt.bytesperline;
+ }
+ } else {
+ s << " unsupported yet.";
+ }
+ return s.str();
+}
+
+// static
+std::string V4L2Device::V4L2BufferToString(const struct v4l2_buffer& buffer) {
+ std::ostringstream s;
+ s << "v4l2_buffer type: " << buffer.type << ", memory: " << buffer.memory
+ << ", index: " << buffer.index << " bytesused: " << buffer.bytesused
+ << ", length: " << buffer.length;
+ if (buffer.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ buffer.type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ // single-planar
+ if (buffer.memory == V4L2_MEMORY_MMAP) {
+ s << ", m.offset: " << buffer.m.offset;
+ } else if (buffer.memory == V4L2_MEMORY_USERPTR) {
+ s << ", m.userptr: " << buffer.m.userptr;
+ } else if (buffer.memory == V4L2_MEMORY_DMABUF) {
+ s << ", m.fd: " << buffer.m.fd;
+ }
+ } else if (V4L2_TYPE_IS_MULTIPLANAR(buffer.type)) {
+ for (size_t i = 0; i < buffer.length; ++i) {
+ const struct v4l2_plane& plane = buffer.m.planes[i];
+ s << ", m.planes[" << i << "](bytesused: " << plane.bytesused
+ << ", length: " << plane.length
+ << ", data_offset: " << plane.data_offset;
+ if (buffer.memory == V4L2_MEMORY_MMAP) {
+ s << ", m.mem_offset: " << plane.m.mem_offset;
+ } else if (buffer.memory == V4L2_MEMORY_USERPTR) {
+ s << ", m.userptr: " << plane.m.userptr;
+ } else if (buffer.memory == V4L2_MEMORY_DMABUF) {
+ s << ", m.fd: " << plane.m.fd;
+ }
+ s << ")";
+ }
+ } else {
+ s << " unsupported yet.";
+ }
+ return s.str();
+}
+
+// static
+base::Optional<VideoFrameLayout> V4L2Device::V4L2FormatToVideoFrameLayout(
+ const struct v4l2_format& format) {
+ if (!V4L2_TYPE_IS_MULTIPLANAR(format.type)) {
+ VLOGF(1) << "v4l2_buf_type is not multiplanar: " << std::hex << "0x"
+ << format.type;
+ return base::nullopt;
+ }
+ const v4l2_pix_format_mplane& pix_mp = format.fmt.pix_mp;
+ const uint32_t& pix_fmt = pix_mp.pixelformat;
+ const auto video_fourcc = Fourcc::FromV4L2PixFmt(pix_fmt);
+ if (!video_fourcc) {
+ VLOGF(1) << "Failed to convert pixel format to VideoPixelFormat: "
+ << FourccToString(pix_fmt);
+ return base::nullopt;
+ }
+ const VideoPixelFormat video_format = video_fourcc->ToVideoPixelFormat();
+ const size_t num_buffers = pix_mp.num_planes;
+ const size_t num_color_planes = VideoFrame::NumPlanes(video_format);
+ if (num_color_planes == 0) {
+ VLOGF(1) << "Unsupported video format for NumPlanes(): "
+ << VideoPixelFormatToString(video_format);
+ return base::nullopt;
+ }
+ if (num_buffers > num_color_planes) {
+ VLOGF(1) << "pix_mp.num_planes: " << num_buffers
+ << " should not be larger than NumPlanes("
+ << VideoPixelFormatToString(video_format)
+ << "): " << num_color_planes;
+ return base::nullopt;
+ }
+ // Reserve capacity in advance to prevent unnecessary vector reallocation.
+ std::vector<ColorPlaneLayout> planes;
+ planes.reserve(num_color_planes);
+ for (size_t i = 0; i < num_buffers; ++i) {
+ const v4l2_plane_pix_format& plane_format = pix_mp.plane_fmt[i];
+ planes.emplace_back(static_cast<int32_t>(plane_format.bytesperline), 0u,
+ plane_format.sizeimage);
+ }
+ // For the case that #color planes > #buffers, it fills stride of color
+ // plane which does not map to buffer.
+ // Right now only some pixel formats are supported: NV12, YUV420, YVU420.
+ if (num_color_planes > num_buffers) {
+ const int32_t y_stride = planes[0].stride;
+ // Note that y_stride is from v4l2 bytesperline and its type is uint32_t.
+ // It is safe to cast to size_t.
+ const size_t y_stride_abs = static_cast<size_t>(y_stride);
+ switch (pix_fmt) {
+ case V4L2_PIX_FMT_NV12:
+ // The stride of UV is the same as Y in NV12.
+ // The height is half of Y plane.
+ planes.emplace_back(y_stride, y_stride_abs * pix_mp.height,
+ y_stride_abs * pix_mp.height / 2);
+ DCHECK_EQ(2u, planes.size());
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420: {
+ // The spec claims that two Cx rows (including padding) is exactly as
+ // long as one Y row (including padding). So stride of Y must be even
+ // number.
+ if (y_stride % 2 != 0 || pix_mp.height % 2 != 0) {
+ VLOGF(1) << "Plane-Y stride and height should be even; stride: "
+ << y_stride << ", height: " << pix_mp.height;
+ return base::nullopt;
+ }
+ const int32_t half_stride = y_stride / 2;
+ const size_t plane_0_area = y_stride_abs * pix_mp.height;
+ const size_t plane_1_area = plane_0_area / 4;
+ planes.emplace_back(half_stride, plane_0_area, plane_1_area);
+ planes.emplace_back(half_stride, plane_0_area + plane_1_area,
+ plane_1_area);
+ DCHECK_EQ(3u, planes.size());
+ break;
+ }
+ default:
+ VLOGF(1) << "Cannot derive stride for each plane for pixel format "
+ << FourccToString(pix_fmt);
+ return base::nullopt;
+ }
+ }
+
+ // Some V4L2 devices expect buffers to be page-aligned. We cannot detect
+ // such devices individually, so set this as a video frame layout property.
+ constexpr size_t buffer_alignment = 0x1000;
+ if (num_buffers == 1) {
+ return VideoFrameLayout::CreateWithPlanes(
+ video_format, Size(pix_mp.width, pix_mp.height), std::move(planes),
+ buffer_alignment);
+ } else {
+ return VideoFrameLayout::CreateMultiPlanar(
+ video_format, Size(pix_mp.width, pix_mp.height), std::move(planes),
+ buffer_alignment);
+ }
+}
+
+// static
+size_t V4L2Device::GetNumPlanesOfV4L2PixFmt(uint32_t pix_fmt) {
+ base::Optional<Fourcc> fourcc = Fourcc::FromV4L2PixFmt(pix_fmt);
+ if (fourcc && fourcc->IsMultiPlanar()) {
+ return VideoFrame::NumPlanes(fourcc->ToVideoPixelFormat());
+ }
+ return 1u;
+}
+
+void V4L2Device::GetSupportedResolution(uint32_t pixelformat,
+ Size* min_resolution,
+ Size* max_resolution) {
+ max_resolution->SetSize(0, 0);
+ min_resolution->SetSize(0, 0);
+ v4l2_frmsizeenum frame_size;
+ memset(&frame_size, 0, sizeof(frame_size));
+ frame_size.pixel_format = pixelformat;
+ for (; Ioctl(VIDIOC_ENUM_FRAMESIZES, &frame_size) == 0; ++frame_size.index) {
+ if (frame_size.type == V4L2_FRMSIZE_TYPE_DISCRETE) {
+ if (frame_size.discrete.width >=
+ base::checked_cast<uint32_t>(max_resolution->width()) &&
+ frame_size.discrete.height >=
+ base::checked_cast<uint32_t>(max_resolution->height())) {
+ max_resolution->SetSize(frame_size.discrete.width,
+ frame_size.discrete.height);
+ }
+ if (min_resolution->IsEmpty() ||
+ (frame_size.discrete.width <=
+ base::checked_cast<uint32_t>(min_resolution->width()) &&
+ frame_size.discrete.height <=
+ base::checked_cast<uint32_t>(min_resolution->height()))) {
+ min_resolution->SetSize(frame_size.discrete.width,
+ frame_size.discrete.height);
+ }
+ } else if (frame_size.type == V4L2_FRMSIZE_TYPE_STEPWISE ||
+ frame_size.type == V4L2_FRMSIZE_TYPE_CONTINUOUS) {
+ max_resolution->SetSize(frame_size.stepwise.max_width,
+ frame_size.stepwise.max_height);
+ min_resolution->SetSize(frame_size.stepwise.min_width,
+ frame_size.stepwise.min_height);
+ break;
+ }
+ }
+ if (max_resolution->IsEmpty()) {
+ max_resolution->SetSize(1920, 1088);
+ VLOGF(1) << "GetSupportedResolution failed to get maximum resolution for "
+ << "fourcc " << FourccToString(pixelformat) << ", fall back to "
+ << max_resolution->ToString();
+ }
+ if (min_resolution->IsEmpty()) {
+ min_resolution->SetSize(16, 16);
+ VLOGF(1) << "GetSupportedResolution failed to get minimum resolution for "
+ << "fourcc " << FourccToString(pixelformat) << ", fall back to "
+ << min_resolution->ToString();
+ }
+}
+
+std::vector<uint32_t> V4L2Device::EnumerateSupportedPixelformats(
+ v4l2_buf_type buf_type) {
+ std::vector<uint32_t> pixelformats;
+
+ v4l2_fmtdesc fmtdesc;
+ memset(&fmtdesc, 0, sizeof(fmtdesc));
+ fmtdesc.type = buf_type;
+
+ for (; Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0; ++fmtdesc.index) {
+ DVLOGF(3) << "Found " << fmtdesc.description << std::hex << " (0x"
+ << fmtdesc.pixelformat << ")";
+ pixelformats.push_back(fmtdesc.pixelformat);
+ }
+
+ return pixelformats;
+}
+
+VideoDecodeAccelerator::SupportedProfiles
+V4L2Device::EnumerateSupportedDecodeProfiles(const size_t num_formats,
+ const uint32_t pixelformats[]) {
+ VideoDecodeAccelerator::SupportedProfiles profiles;
+
+ const auto& supported_pixelformats =
+ EnumerateSupportedPixelformats(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ for (uint32_t pixelformat : supported_pixelformats) {
+ if (std::find(pixelformats, pixelformats + num_formats, pixelformat) ==
+ pixelformats + num_formats)
+ continue;
+
+ VideoDecodeAccelerator::SupportedProfile profile;
+ GetSupportedResolution(pixelformat, &profile.min_resolution,
+ &profile.max_resolution);
+
+ const auto video_codec_profiles =
+ V4L2PixFmtToVideoCodecProfiles(pixelformat, false);
+
+ for (const auto& video_codec_profile : video_codec_profiles) {
+ profile.profile = video_codec_profile;
+ profiles.push_back(profile);
+
+ DVLOGF(3) << "Found decoder profile " << GetProfileName(profile.profile)
+ << ", resolutions: " << profile.min_resolution.ToString() << " "
+ << profile.max_resolution.ToString();
+ }
+ }
+
+ return profiles;
+}
+
+VideoEncodeAccelerator::SupportedProfiles
+V4L2Device::EnumerateSupportedEncodeProfiles() {
+ VideoEncodeAccelerator::SupportedProfiles profiles;
+
+ const auto& supported_pixelformats =
+ EnumerateSupportedPixelformats(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ for (const auto& pixelformat : supported_pixelformats) {
+ VideoEncodeAccelerator::SupportedProfile profile;
+ profile.max_framerate_numerator = 30;
+ profile.max_framerate_denominator = 1;
+ Size min_resolution;
+ GetSupportedResolution(pixelformat, &min_resolution,
+ &profile.max_resolution);
+
+ const auto video_codec_profiles =
+ V4L2PixFmtToVideoCodecProfiles(pixelformat, true);
+
+ for (const auto& video_codec_profile : video_codec_profiles) {
+ profile.profile = video_codec_profile;
+ profiles.push_back(profile);
+
+ DVLOGF(3) << "Found encoder profile " << GetProfileName(profile.profile)
+ << ", max resolution: " << profile.max_resolution.ToString();
+ }
+ }
+
+ return profiles;
+}
+
+bool V4L2Device::StartPolling(V4L2DevicePoller::EventCallback event_callback,
+ base::RepeatingClosure error_callback) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
+
+ if (!device_poller_) {
+ device_poller_ =
+ std::make_unique<V4L2DevicePoller>(this, "V4L2DeviceThreadPoller");
+ }
+
+ bool ret = device_poller_->StartPolling(std::move(event_callback),
+ std::move(error_callback));
+
+ if (!ret)
+ device_poller_ = nullptr;
+
+ return ret;
+}
+
+bool V4L2Device::StopPolling() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
+
+ return !device_poller_ || device_poller_->StopPolling();
+}
+
+void V4L2Device::SchedulePoll() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
+
+ if (!device_poller_ || !device_poller_->IsPolling())
+ return;
+
+ device_poller_->SchedulePoll();
+}
+
+bool V4L2Device::IsCtrlExposed(uint32_t ctrl_id) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
+
+ struct v4l2_queryctrl query_ctrl;
+ memset(&query_ctrl, 0, sizeof(query_ctrl));
+ query_ctrl.id = ctrl_id;
+
+ return Ioctl(VIDIOC_QUERYCTRL, &query_ctrl) == 0;
+}
+
+bool V4L2Device::SetExtCtrls(uint32_t ctrl_class,
+ std::vector<V4L2ExtCtrl> ctrls) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
+
+ if (ctrls.empty())
+ return true;
+
+ struct v4l2_ext_controls ext_ctrls;
+ memset(&ext_ctrls, 0, sizeof(ext_ctrls));
+ ext_ctrls.ctrl_class = ctrl_class;
+ ext_ctrls.count = ctrls.size();
+ ext_ctrls.controls = &ctrls[0].ctrl;
+ return Ioctl(VIDIOC_S_EXT_CTRLS, &ext_ctrls) == 0;
+}
+
+bool V4L2Device::IsCommandSupported(uint32_t command_id) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
+
+ struct v4l2_encoder_cmd cmd;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = command_id;
+
+ return Ioctl(VIDIOC_TRY_ENCODER_CMD, &cmd) == 0;
+}
+
+bool V4L2Device::HasCapabilities(uint32_t capabilities) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
+
+ struct v4l2_capability caps;
+ memset(&caps, 0, sizeof(caps));
+ if (Ioctl(VIDIOC_QUERYCAP, &caps) != 0) {
+ LOG(ERROR) << "Failed to query capabilities";
+ return false;
+ }
+
+ return (caps.capabilities & capabilities) == capabilities;
+}
+
+} // namespace media
diff --git a/accel/v4l2_device.h b/accel/v4l2_device.h
new file mode 100644
index 0000000..f00a604
--- /dev/null
+++ b/accel/v4l2_device.h
@@ -0,0 +1,587 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file defines the V4L2Device interface which is used by the
+// V4L2DecodeAccelerator class to delegate/pass the device specific
+// handling of any of the functionalities.
+// Note: ported from Chromium commit head: 2f13d62f0c0d
+// Note: the complete v4l2 device code is ported from Chromium, but some parts
+// have been removed:
+// - All V4L2 request functionality has been removed, as it required a newer
+// kernel version.
+// - void SetConfigStore() has been removed as it depends on a newer kernel
+// version.
+// - QueueDMABuf() from native pixmap planes has been removed, as
+// NativePixmapPlane have not been ported.
+// - GetVideoFrame() is removed as it depends on some helper functions that have
+// not been ported.
+// - GL-related functionality has been removed: canCreateEGLImageFrom(),
+// CreateEGLImage(), CreateGLImage() and GetTextureTarget()
+// - V4L2PixFmtToDrmFormat() has been removed, as DRM is not supported yet.
+
+#ifndef V4L2_DEVICE_H_
+#define V4L2_DEVICE_H_
+
+#include <linux/videodev2.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <queue>
+#include <vector>
+
+#include "base/containers/flat_map.h"
+#include "base/files/scoped_file.h"
+#include "base/memory/ref_counted.h"
+
+#include "fourcc.h"
+#include "size.h"
+#include "v4l2_device_poller.h"
+#include "video_codecs.h"
+#include "video_decode_accelerator.h"
+#include "video_encode_accelerator.h"
+#include "video_frame.h"
+#include "video_frame_layout.h"
+#include "video_pixel_format.h"
+
+// TODO(mojahsu): remove this once V4L2 headers are updated.
+#ifndef V4L2_PIX_FMT_JPEG_RAW
+#define V4L2_PIX_FMT_JPEG_RAW v4l2_fourcc('J', 'P', 'G', 'R')
+#endif
+#ifndef V4L2_CID_JPEG_LUMA_QUANTIZATION
+#define V4L2_CID_JPEG_LUMA_QUANTIZATION (V4L2_CID_JPEG_CLASS_BASE + 5)
+#endif
+#ifndef V4L2_CID_JPEG_CHROMA_QUANTIZATION
+#define V4L2_CID_JPEG_CHROMA_QUANTIZATION (V4L2_CID_JPEG_CLASS_BASE + 6)
+#endif
+
+// TODO(b/132589320): remove this once V4L2 header is updated.
+#ifndef V4L2_PIX_FMT_MM21
+// MTK 8-bit block mode, two non-contiguous planes.
+#define V4L2_PIX_FMT_MM21 v4l2_fourcc('M', 'M', '2', '1')
+#endif
+
+namespace media {
+
+class V4L2Queue;
+class V4L2BufferRefBase;
+class V4L2BuffersList;
+class V4L2DecodeSurface;
+
+// Dummy V4L2RequestRef. The full request queue functionality could not be
+// ported as it requires a newer kernel header.
+class V4L2RequestRef {};
+
+// Wrapper for the 'v4l2_ext_control' structure.
+struct V4L2ExtCtrl {
+ V4L2ExtCtrl(uint32_t id);
+ V4L2ExtCtrl(uint32_t id, int32_t val);
+ struct v4l2_ext_control ctrl;
+};
+
+// A unique reference to a buffer for clients to prepare and submit.
+//
+// Clients can prepare a buffer for queuing using the methods of this class, and
+// then either queue it using the Queue() method corresponding to the memory
+// type of the buffer, or drop the reference to make the buffer available again.
+class V4L2WritableBufferRef {
+ public:
+ V4L2WritableBufferRef(V4L2WritableBufferRef&& other);
+ V4L2WritableBufferRef() = delete;
+ V4L2WritableBufferRef& operator=(V4L2WritableBufferRef&& other);
+
+ // Return the memory type of the buffer. Useful to e.g. decide which Queue()
+ // method to use.
+ enum v4l2_memory Memory() const;
+
+ // Queue a MMAP buffer.
+ // When requests are supported, a |request_ref| can be passed along this
+ // the buffer to be submitted.
+ // If successful, true is returned and the reference to the buffer is dropped
+ // so this reference becomes invalid.
+ // In case of error, false is returned and the buffer is returned to the free
+ // list.
+ bool QueueMMap(V4L2RequestRef* request_ref = nullptr) &&;
+ // Queue a USERPTR buffer, assigning |ptrs| as pointer for each plane.
+ // The size of |ptrs| must be equal to the number of planes of this buffer.
+ // When requests are supported, a |request_ref| can be passed along this
+ // the buffer to be submitted.
+ // If successful, true is returned and the reference to the buffer is dropped
+ // so this reference becomes invalid.
+ // In case of error, false is returned and the buffer is returned to the free
+ // list.
+ bool QueueUserPtr(const std::vector<void*>& ptrs,
+ V4L2RequestRef* request_ref = nullptr) &&;
+ // Queue a DMABUF buffer, assigning |fds| as file descriptors for each plane.
+ // It is allowed the number of |fds| might be greater than the number of
+ // planes of this buffer. It happens when the v4l2 pixel format is single
+ // planar. The fd of the first plane is only used in that case.
+ // When requests are supported, a |request_ref| can be passed along this
+ // the buffer to be submitted.
+ // If successful, true is returned and the reference to the buffer is dropped
+ // so this reference becomes invalid.
+ // In case of error, false is returned and the buffer is returned to the free
+ // list.
+ bool QueueDMABuf(const std::vector<base::ScopedFD>& scoped_fds,
+ V4L2RequestRef* request_ref = nullptr) &&;
+ // Queue a DMABUF buffer, assigning |fds| as file descriptors for each plane.
+ // It is allowed the number of |fds| might be greater than the number of
+ // planes of this buffer. It happens when the v4l2 pixel format is single
+ // planar. The fd of the first plane is only used in that case.
+ // When requests are supported, a |request_ref| can be passed along this
+ // the buffer to be submitted.
+ // If successful, true is returned and the reference to the buffer is dropped
+ // so this reference becomes invalid.
+ // In case of error, false is returned and the buffer is returned to the free
+ // list.
+ bool QueueDMABuf(const std::vector<int>& fds,
+ V4L2RequestRef* request_ref = nullptr) &&;
+
+ // Returns the number of planes in this buffer.
+ size_t PlanesCount() const;
+ // Returns the size of the requested |plane|, in bytes.
+ size_t GetPlaneSize(const size_t plane) const;
+ // Set the size of the requested |plane|, in bytes. It is only valid for
+ // USERPTR and DMABUF buffers. When using MMAP buffer, this method triggers a
+ // DCHECK and is a no-op for release builds.
+ void SetPlaneSize(const size_t plane, const size_t size);
+ // This method can only be used with MMAP buffers.
+ // It will return a pointer to the data of the |plane|th plane.
+ // In case of error (invalid plane index or mapping failed), a nullptr is
+ // returned.
+ void* GetPlaneMapping(const size_t plane);
+ // Set the timestamp field for this buffer.
+ void SetTimeStamp(const struct timeval& timestamp);
+ // Return the previously-set timestamp field for this buffer.
+ const struct timeval& GetTimeStamp() const;
+ // Set the number of bytes used for |plane|.
+ void SetPlaneBytesUsed(const size_t plane, const size_t bytes_used);
+ // Returns the previously-set number of bytes used for |plane|.
+ size_t GetPlaneBytesUsed(const size_t plane) const;
+ // Set the data offset for |plane|, in bytes.
+ void SetPlaneDataOffset(const size_t plane, const size_t data_offset);
+
+ // Return the V4L2 buffer ID of the underlying buffer.
+ // TODO(acourbot) This is used for legacy clients but should be ultimately
+ // removed. See crbug/879971
+ size_t BufferId() const;
+
+ ~V4L2WritableBufferRef();
+
+ private:
+ // Do the actual queue operation once the v4l2_buffer structure is properly
+ // filled.
+ // When requests are supported, a |request_ref| can be passed along this
+ // the buffer to be submitted.
+ bool DoQueue(V4L2RequestRef* request_ref) &&;
+
+ V4L2WritableBufferRef(const struct v4l2_buffer& v4l2_buffer,
+ base::WeakPtr<V4L2Queue> queue);
+ friend class V4L2BufferRefFactory;
+
+ std::unique_ptr<V4L2BufferRefBase> buffer_data_;
+
+ SEQUENCE_CHECKER(sequence_checker_);
+ DISALLOW_COPY_AND_ASSIGN(V4L2WritableBufferRef);
+};
+
+// A reference to a read-only, dequeued buffer.
+//
+// Clients use this class to query the buffer state and content, and are
+// guaranteed that the buffer will not be reused until all references are
+// destroyed.
+// All methods of this class must be called from the same sequence, but
+// instances of V4L2ReadableBuffer objects can be destroyed from any sequence.
+// They can even outlive the V4L2 buffers they originate from. This flexibility
+// is required because V4L2ReadableBufferRefs can be embedded into VideoFrames,
+// which are then passed to other threads and not necessarily destroyed before
+// the V4L2Queue buffers are freed.
+class V4L2ReadableBuffer
+ : public base::RefCountedThreadSafe<V4L2ReadableBuffer> {
+ public:
+ // Returns whether the V4L2_BUF_FLAG_LAST flag is set for this buffer.
+ bool IsLast() const;
+ // Returns whether the V4L2_BUF_FLAG_KEYFRAME flag is set for this buffer.
+ bool IsKeyframe() const;
+ // Return the timestamp set by the driver on this buffer.
+ struct timeval GetTimeStamp() const;
+ // Returns the number of planes in this buffer.
+ size_t PlanesCount() const;
+ // Returns the number of bytes used for |plane|.
+ size_t GetPlaneBytesUsed(size_t plane) const;
+ // Returns the data offset for |plane|.
+ size_t GetPlaneDataOffset(size_t plane) const;
+ // This method can only be used with MMAP buffers.
+ // It will return a pointer to the data of the |plane|th plane.
+ // In case of error (invalid plane index or mapping failed), a nullptr is
+ // returned.
+ const void* GetPlaneMapping(const size_t plane) const;
+
+ // Return the V4L2 buffer ID of the underlying buffer.
+ // TODO(acourbot) This is used for legacy clients but should be ultimately
+ // removed. See crbug/879971
+ size_t BufferId() const;
+
+ private:
+ friend class V4L2BufferRefFactory;
+ friend class base::RefCountedThreadSafe<V4L2ReadableBuffer>;
+
+ ~V4L2ReadableBuffer();
+
+ V4L2ReadableBuffer(const struct v4l2_buffer& v4l2_buffer,
+ base::WeakPtr<V4L2Queue> queue);
+
+ std::unique_ptr<V4L2BufferRefBase> buffer_data_;
+
+ SEQUENCE_CHECKER(sequence_checker_);
+ DISALLOW_COPY_AND_ASSIGN(V4L2ReadableBuffer);
+};
+
+// Shortcut for naming consistency.
+using V4L2ReadableBufferRef = scoped_refptr<V4L2ReadableBuffer>;
+
+class V4L2Device;
+class V4L2Buffer;
+
+// Interface representing a specific queue of a |V4L2Device|. It provides free
+// and queued buffer management that is commonly required by clients.
+//
+// Buffers managed by this class undergo the following cycle:
+// 1) Allocated buffers are put into a free buffers pool, indicating that they
+// are used neither by the client nor the hardware.
+// 2) The client obtains a unique, writable reference to one of the free
+// buffers in order to set its content and other parameters.
+// 3) The client then queues the buffer obtained in 2), which invalidates its
+// reference. The buffer is now prepared to be processed by the hardware.
+// 4) Once the hardware is done with the buffer, it is ready to be dequeued by
+// the client. The client obtains a read-only, counted reference to the
+// buffer and can read its content and metadata, as well as making other
+// references to it. The buffer will not be reused until all the references
+// are dropped. Once this happens, the buffer goes back to the free list
+// described in 1).
+class V4L2Queue : public base::RefCountedThreadSafe<V4L2Queue> {
+ public:
+ // Set |fourcc| as the current format on this queue. |size| corresponds to the
+ // desired buffer's dimensions (i.e. width and height members of
+ // v4l2_pix_format_mplane (if not applicable, pass Size()).
+ // |buffer_size| is the desired size in bytes of the buffer for single-planar
+ // formats (i.e. sizeimage of the first plane). It can be set to 0 if not
+ // relevant for the desired format.
+ // If the format could be set, then the |v4l2_format| reflecting the actual
+ // format is returned. It is guaranteed to feature the specified |fourcc|,
+ // but any other parameter (including |size| and |buffer_size| may have been
+ // adjusted by the driver, so the caller must check their values.
+ base::Optional<struct v4l2_format> SetFormat(uint32_t fourcc,
+ const Size& size,
+ size_t buffer_size)
+ WARN_UNUSED_RESULT;
+
+ // Allocate |count| buffers for the current format of this queue, with a
+ // specific |memory| allocation, and returns the number of buffers allocated
+ // or zero if an error occurred, or if references to any previously allocated
+ // buffers are still held by any clients.
+ //
+ // The number of allocated buffers may be larger than the number requested, so
+ // callers must always check the return value.
+ //
+ // Calling this method while buffers are still allocated results in an error.
+ size_t AllocateBuffers(size_t count,
+ enum v4l2_memory memory) WARN_UNUSED_RESULT;
+
+ // Deallocate all buffers previously allocated by |AllocateBuffers|. Any
+ // references to buffers previously allocated held by the client must be
+ // released, or this call will fail.
+ bool DeallocateBuffers();
+
+ // Returns the memory usage of v4l2 buffers owned by this V4L2Queue which are
+ // mapped in user space memory.
+ size_t GetMemoryUsage() const;
+
+ // Returns |memory_|, memory type of last buffers allocated by this V4L2Queue.
+ v4l2_memory GetMemoryType() const;
+
+ // Return a reference to a free buffer for the caller to prepare and submit,
+ // or nullopt if no buffer is currently free.
+ //
+ // If the caller discards the returned reference, the underlying buffer is
+ // made available to clients again.
+ base::Optional<V4L2WritableBufferRef> GetFreeBuffer();
+ base::Optional<V4L2WritableBufferRef> GetFreeBuffer(
+ size_t requested_buffer_id);
+
+ // Attempt to dequeue a buffer, and return a reference to it if one was
+ // available.
+ //
+ // The first element of the returned pair will be false if an error occurred,
+ // in which case the second element will be nullptr. If no error occurred,
+ // then the first element will be true and the second element will contain a
+ // reference to the dequeued buffer if one was available, or nullptr
+ // otherwise.
+ // Dequeued buffers will not be reused by the driver until all references to
+ // them are dropped.
+ std::pair<bool, V4L2ReadableBufferRef> DequeueBuffer();
+
+ // Returns true if this queue is currently streaming.
+ bool IsStreaming() const;
+ // If not currently streaming, starts streaming. Returns true if we started
+ // streaming, or were already streaming, or false if we were not streaming
+ // and an error occurred when attempting to start the stream. On failure, any
+ // previously-queued buffers will be dequeued without processing and made
+ // available to the client, while any buffers held by the client will remain
+ // unchanged and their ownership will remain with the client.
+ bool Streamon();
+ // If currently streaming, stops streaming. Also make all queued buffers
+ // available to the client again regardless of the streaming state.
+ // If an error occurred while attempting to stop streaming, then false is
+ // returned and queued buffers are left untouched since the V4L2 queue may
+ // still be using them.
+ bool Streamoff();
+
+ // Returns the number of buffers currently allocated for this queue.
+ size_t AllocatedBuffersCount() const;
+ // Returns the number of currently free buffers on this queue.
+ size_t FreeBuffersCount() const;
+ // Returns the number of buffers currently queued on this queue.
+ size_t QueuedBuffersCount() const;
+
+ // Returns true if requests are supported by this queue.
+ bool SupportsRequests();
+
+ private:
+ ~V4L2Queue();
+
+ // Called when clients request a buffer to be queued.
+ bool QueueBuffer(struct v4l2_buffer* v4l2_buffer);
+
+ const enum v4l2_buf_type type_;
+ enum v4l2_memory memory_ = V4L2_MEMORY_MMAP;
+ bool is_streaming_ = false;
+ // Set to true if the queue supports requests.
+ bool supports_requests_ = false;
+ size_t planes_count_ = 0;
+ // Current format as set by SetFormat.
+ base::Optional<struct v4l2_format> current_format_;
+
+ std::vector<std::unique_ptr<V4L2Buffer>> buffers_;
+
+ // Buffers that are available for client to get and submit.
+ // Buffers in this list are not referenced by anyone else than ourselves.
+ scoped_refptr<V4L2BuffersList> free_buffers_;
+ // Buffers that have been queued by the client, and not dequeued yet.
+ std::set<size_t> queued_buffers_;
+
+ scoped_refptr<V4L2Device> device_;
+ // Callback to call in this queue's destructor.
+ base::OnceClosure destroy_cb_;
+
+ V4L2Queue(scoped_refptr<V4L2Device> dev,
+ enum v4l2_buf_type type,
+ base::OnceClosure destroy_cb);
+ friend class V4L2QueueFactory;
+ friend class V4L2BufferRefBase;
+ friend class base::RefCountedThreadSafe<V4L2Queue>;
+
+ SEQUENCE_CHECKER(sequence_checker_);
+
+ base::WeakPtrFactory<V4L2Queue> weak_this_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(V4L2Queue);
+};
+
+class V4L2Device : public base::RefCountedThreadSafe<V4L2Device> {
+ public:
+ // Utility format conversion functions
+ // If there is no corresponding single- or multi-planar format, returns 0.
+ static uint32_t VideoCodecProfileToV4L2PixFmt(VideoCodecProfile profile,
+ bool slice_based);
+ static VideoCodecProfile V4L2ProfileToVideoCodecProfile(VideoCodec codec,
+ uint32_t profile);
+ std::vector<VideoCodecProfile> V4L2PixFmtToVideoCodecProfiles(
+ uint32_t pix_fmt,
+ bool is_encoder);
+ // Calculates the largest plane's allocation size requested by a V4L2 device.
+ static Size AllocatedSizeFromV4L2Format(const struct v4l2_format& format);
+
+ // Convert required H264 profile and level to V4L2 enums.
+ static int32_t VideoCodecProfileToV4L2H264Profile(VideoCodecProfile profile);
+ static int32_t H264LevelIdcToV4L2H264Level(uint8_t level_idc);
+
+ // Converts v4l2_memory to a string.
+ static const char* V4L2MemoryToString(const v4l2_memory memory);
+
+ // Returns the printable name of a v4l2_buf_type.
+ static const char* V4L2BufferTypeToString(const enum v4l2_buf_type buf_type);
+
+ // Composes human readable string of v4l2_format.
+ static std::string V4L2FormatToString(const struct v4l2_format& format);
+
+ // Composes human readable string of v4l2_buffer.
+ static std::string V4L2BufferToString(const struct v4l2_buffer& buffer);
+
+ // Composes VideoFrameLayout based on v4l2_format.
+ // If error occurs, it returns base::nullopt.
+ static base::Optional<VideoFrameLayout> V4L2FormatToVideoFrameLayout(
+ const struct v4l2_format& format);
+
+ // Returns number of planes of |pix_fmt|.
+ static size_t GetNumPlanesOfV4L2PixFmt(uint32_t pix_fmt);
+
+ enum class Type {
+ kDecoder,
+ kEncoder,
+ kImageProcessor,
+ kJpegDecoder,
+ kJpegEncoder,
+ };
+
+ // Create and initialize an appropriate V4L2Device instance for the current
+ // platform, or return nullptr if not available.
+ static scoped_refptr<V4L2Device> Create();
+
+ // Open a V4L2 device of |type| for use with |v4l2_pixfmt|.
+ // Return true on success.
+ // The device will be closed in the destructor.
+ virtual bool Open(Type type, uint32_t v4l2_pixfmt) = 0;
+
+ // Returns the V4L2Queue corresponding to the requested |type|, or nullptr
+ // if the requested queue type is not supported.
+ scoped_refptr<V4L2Queue> GetQueue(enum v4l2_buf_type type);
+
+ // Parameters and return value are the same as for the standard ioctl() system
+ // call.
+ virtual int Ioctl(int request, void* arg) = 0;
+
+ // This method sleeps until either:
+ // - SetDevicePollInterrupt() is called (on another thread),
+ // - |poll_device| is true, and there is new data to be read from the device,
+ // or an event from the device has arrived; in the latter case
+ // |*event_pending| will be set to true.
+ // Returns false on error, true otherwise.
+ // This method should be called from a separate thread.
+ virtual bool Poll(bool poll_device, bool* event_pending) = 0;
+
+ // These methods are used to interrupt the thread sleeping on Poll() and force
+ // it to return regardless of device state, which is usually when the client
+ // is no longer interested in what happens with the device (on cleanup,
+ // client state change, etc.). When SetDevicePollInterrupt() is called, Poll()
+ // will return immediately, and any subsequent calls to it will also do so
+ // until ClearDevicePollInterrupt() is called.
+ virtual bool SetDevicePollInterrupt() = 0;
+ virtual bool ClearDevicePollInterrupt() = 0;
+
+ // Wrappers for standard mmap/munmap system calls.
+ virtual void* Mmap(void* addr,
+ unsigned int len,
+ int prot,
+ int flags,
+ unsigned int offset) = 0;
+ virtual void Munmap(void* addr, unsigned int len) = 0;
+
+ // Return a vector of dmabuf file descriptors, exported for V4L2 buffer with
+ // |index|, assuming the buffer contains |num_planes| V4L2 planes and is of
+ // |type|. Return an empty vector on failure.
+ // The caller is responsible for closing the file descriptors after use.
+ virtual std::vector<base::ScopedFD> GetDmabufsForV4L2Buffer(
+ int index,
+ size_t num_planes,
+ enum v4l2_buf_type type) = 0;
+
+ // Returns the preferred V4L2 input formats for |type| or empty if none.
+ virtual std::vector<uint32_t> PreferredInputFormat(Type type) = 0;
+
+ // NOTE: The below methods to query capabilities have a side effect of
+ // closing the previously-open device, if any, and should not be called after
+ // Open().
+ // TODO(posciak): fix this.
+
+ // Get minimum and maximum resolution for fourcc |pixelformat| and store to
+ // |min_resolution| and |max_resolution|.
+ void GetSupportedResolution(uint32_t pixelformat,
+ Size* min_resolution,
+ Size* max_resolution);
+
+ std::vector<uint32_t> EnumerateSupportedPixelformats(v4l2_buf_type buf_type);
+
+ // Return V4L2 pixelformats supported by the available image processor
+ // devices for |buf_type|.
+ virtual std::vector<uint32_t> GetSupportedImageProcessorPixelformats(
+ v4l2_buf_type buf_type) = 0;
+
+ // Return supported profiles for decoder, including only profiles for given
+ // fourcc |pixelformats|.
+ virtual VideoDecodeAccelerator::SupportedProfiles GetSupportedDecodeProfiles(
+ const size_t num_formats,
+ const uint32_t pixelformats[]) = 0;
+
+ // Return supported profiles for encoder.
+ virtual VideoEncodeAccelerator::SupportedProfiles
+ GetSupportedEncodeProfiles() = 0;
+
+ // Return true if image processing is supported, false otherwise.
+ virtual bool IsImageProcessingSupported() = 0;
+
+ // Return true if JPEG codec is supported, false otherwise.
+ virtual bool IsJpegDecodingSupported() = 0;
+ virtual bool IsJpegEncodingSupported() = 0;
+
+ // Start polling on this V4L2Device. |event_callback| will be posted to
+ // the caller's sequence if a buffer is ready to be dequeued and/or a V4L2
+ // event has been posted. |error_callback| will be posted to the client's
+ // sequence if a polling error has occurred.
+ bool StartPolling(V4L2DevicePoller::EventCallback event_callback,
+ base::RepeatingClosure error_callback);
+ // Stop polling this V4L2Device if polling was active. No new events will
+ // be posted after this method has returned.
+ bool StopPolling();
+ // Schedule a polling event if polling is enabled. This method is intended
+ // to be called from V4L2Queue, clients should not need to call it directly.
+ void SchedulePoll();
+
+ // Check whether the V4L2 control with specified |ctrl_id| is supported.
+ bool IsCtrlExposed(uint32_t ctrl_id);
+ // Set the specified list of |ctrls| for the specified |ctrl_class|, returns
+ // whether the operation succeeded.
+ bool SetExtCtrls(uint32_t ctrl_class, std::vector<V4L2ExtCtrl> ctrls);
+
+ // Check whether the V4L2 command with specified |command_id| is supported.
+ bool IsCommandSupported(uint32_t command_id);
+ // Check whether the V4L2 device has the specified |capabilities|.
+ bool HasCapabilities(uint32_t capabilities);
+
+ protected:
+ friend class base::RefCountedThreadSafe<V4L2Device>;
+ V4L2Device();
+ virtual ~V4L2Device();
+
+ VideoDecodeAccelerator::SupportedProfiles EnumerateSupportedDecodeProfiles(
+ const size_t num_formats,
+ const uint32_t pixelformats[]);
+
+ VideoEncodeAccelerator::SupportedProfiles EnumerateSupportedEncodeProfiles();
+
+ private:
+ // Perform platform-specific initialization of the device instance.
+ // Return true on success, false on error or if the particular implementation
+ // is not available.
+ virtual bool Initialize() = 0;
+
+ // Associates a v4l2_buf_type to its queue.
+ base::flat_map<enum v4l2_buf_type, V4L2Queue*> queues_;
+
+ // Callback that is called upon a queue's destruction, to cleanup its pointer
+ // in queues_.
+ void OnQueueDestroyed(v4l2_buf_type buf_type);
+
+ // Used if EnablePolling() is called to signal the user that an event
+ // happened or a buffer is ready to be dequeued.
+ std::unique_ptr<V4L2DevicePoller> device_poller_;
+
+ // Indicates whether the request queue creation has been tried once.
+ bool requests_queue_creation_called_ = false;
+
+ SEQUENCE_CHECKER(client_sequence_checker_);
+};
+
+} // namespace media
+
+#endif // V4L2_DEVICE_H_
diff --git a/accel/v4l2_device_poller.cc b/accel/v4l2_device_poller.cc
new file mode 100644
index 0000000..c5eb820
--- /dev/null
+++ b/accel/v4l2_device_poller.cc
@@ -0,0 +1,140 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 22d34680c8ac
+
+#include "v4l2_device_poller.h"
+
+#include <string>
+
+#include "base/bind.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread_checker.h"
+
+#include "macros.h"
+#include "v4l2_device.h"
+
+namespace media {
+
+V4L2DevicePoller::V4L2DevicePoller(V4L2Device* const device,
+ const std::string& thread_name)
+ : device_(device),
+ poll_thread_(std::move(thread_name)),
+ trigger_poll_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED),
+ stop_polling_(false) {
+ DETACH_FROM_SEQUENCE(client_sequence_checker_);
+}
+
+V4L2DevicePoller::~V4L2DevicePoller() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
+
+ StopPolling();
+}
+
+bool V4L2DevicePoller::StartPolling(EventCallback event_callback,
+ base::RepeatingClosure error_callback) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
+
+ if (IsPolling())
+ return true;
+
+ DVLOGF(4) << "Starting polling";
+
+ client_task_runner_ = base::SequencedTaskRunnerHandle::Get();
+ error_callback_ = error_callback;
+
+ if (!poll_thread_.Start()) {
+ VLOGF(1) << "Failed to start device poll thread";
+ return false;
+ }
+
+ event_callback_ = std::move(event_callback);
+
+ stop_polling_.store(false);
+ poll_thread_.task_runner()->PostTask(
+ FROM_HERE, base::BindOnce(&V4L2DevicePoller::DevicePollTask,
+ base::Unretained(this)));
+
+ DVLOGF(3) << "Polling thread started";
+
+ SchedulePoll();
+
+ return true;
+}
+
+bool V4L2DevicePoller::StopPolling() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
+
+ if (!IsPolling())
+ return true;
+
+ DVLOGF(4) << "Stopping polling";
+
+ stop_polling_.store(true);
+
+ trigger_poll_.Signal();
+
+ if (!device_->SetDevicePollInterrupt()) {
+ VLOGF(1) << "Failed to interrupt device poll.";
+ return false;
+ }
+
+ DVLOGF(3) << "Stop device poll thread";
+ poll_thread_.Stop();
+
+ if (!device_->ClearDevicePollInterrupt()) {
+ VLOGF(1) << "Failed to clear interrupting device poll.";
+ return false;
+ }
+
+ DVLOGF(4) << "Polling thread stopped";
+
+ return true;
+}
+
+bool V4L2DevicePoller::IsPolling() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
+
+ return poll_thread_.IsRunning();
+}
+
+void V4L2DevicePoller::SchedulePoll() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
+
+ // A call to DevicePollTask() will be posted when we actually start polling.
+ if (!IsPolling())
+ return;
+
+ DVLOGF(4) << "Scheduling poll";
+
+ trigger_poll_.Signal();
+}
+
+void V4L2DevicePoller::DevicePollTask() {
+ DCHECK(poll_thread_.task_runner()->RunsTasksInCurrentSequence());
+
+ while (true) {
+ DVLOGF(4) << "Waiting for poll to be scheduled.";
+ trigger_poll_.Wait();
+
+ if (stop_polling_) {
+ DVLOGF(4) << "Poll stopped, exiting.";
+ break;
+ }
+
+ bool event_pending = false;
+ DVLOGF(4) << "Polling device.";
+ if (!device_->Poll(true, &event_pending)) {
+ VLOGF(1) << "An error occurred while polling, calling error callback";
+ client_task_runner_->PostTask(FROM_HERE, error_callback_);
+ return;
+ }
+
+ DVLOGF(4) << "Poll returned, calling event callback.";
+ client_task_runner_->PostTask(FROM_HERE,
+ base::Bind(event_callback_, event_pending));
+ }
+}
+
+} // namespace media
diff --git a/accel/v4l2_device_poller.h b/accel/v4l2_device_poller.h
new file mode 100644
index 0000000..aac3d8c
--- /dev/null
+++ b/accel/v4l2_device_poller.h
@@ -0,0 +1,97 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: f65c38dcdac2
+
+#ifndef V4L2_V4L2_DEVICE_POLLER_H_
+#define V4L2_V4L2_DEVICE_POLLER_H_
+
+#include <atomic>
+
+#include "base/callback_forward.h"
+#include "base/sequence_checker.h"
+#include "base/sequenced_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+
+namespace media {
+
+class V4L2Device;
+
+// Allows a client to poll() on a given V4L2Device and be signaled when
+// a buffer is ready to be dequeued or a V4L2 event has been received. Polling
+// is done on a dedicated thread, and notifications are delivered in the form of
+// a callback to the listener's sequence.
+//
+// All the methods of this class (with the exception of the constructor) must be
+// called from the same sequence.
+//
+// Note that the service callback may also be called when no particular event
+// occurred due to the way poll() works. It is the responsibility of the caller
+// to call SchedulePoll() again if there may still be pending events.
+class V4L2DevicePoller {
+ public:
+ // Callback to be called when buffer ready/V4L2 event has potentially been
+ // polled. |event| is set if a V4L2 event has been detected.
+ using EventCallback = base::RepeatingCallback<void(bool event)>;
+
+ // Create a poller for |device|, using a thread named |thread_name|.
+ // Notification won't start until |StartPolling()| is called.
+ V4L2DevicePoller(V4L2Device* const device, const std::string& thread_name);
+ ~V4L2DevicePoller();
+
+ // Starts polling. |event_callback| will be posted on the caller's sequence
+ // every time an event occurs. The client is then responsible for consuming
+ // all pending events in that callback. If new events may still happen after
+ // the callback has run, the client must call |SchedulePoll()| again in order
+ // to be notified for them.
+ //
+ // If an error occurs during polling, |error_callback| will be posted on the
+ // caller's sequence.
+ bool StartPolling(EventCallback event_callback,
+ base::RepeatingClosure error_callback);
+ // Stop polling and stop the thread. The poller won't post any new event to
+ // the caller's sequence after this method has returned.
+ bool StopPolling();
+ // Returns true if currently polling, false otherwise.
+ bool IsPolling() const;
+ // Attempts polling the V4L2 device. This method should be called whenever
+ // doing something that may trigger an event of interest (buffer dequeue or
+ // V4L2 event), for instance queueing a buffer. In the absence of a pending
+ // event, poll() will return immediately and the service callback will be
+ // posted to the caller's sequence. The client is then responsible for calling
+ // this method again when it is interested in receiving events.
+ void SchedulePoll();
+
+ private:
+ // Perform a poll() on |device_| and post either |service_task_| or
+ // |error_callback_| on the client's sequence when poll() returns.
+ void DevicePollTask();
+
+ // V4L2 device we are polling.
+ V4L2Device* const device_;
+ // Thread on which polling is done.
+ base::Thread poll_thread_;
+ // Callback to post to the client's sequence when an event occurs.
+ EventCallback event_callback_;
+ // Closure to post to the client's sequence when an error occurs.
+ base::RepeatingClosure error_callback_;
+ // Client sequence's task runner, where closures are posted.
+ scoped_refptr<base::SequencedTaskRunner> client_task_runner_;
+
+ // Since poll() returns immediately if no buffers have been queued, we cannot
+ // rely on it to pause the polling thread until an event occurs. Instead,
+ // the polling thread will wait on this WaitableEvent (signaled by
+ // |SchedulePoll| before calling poll(), so we only call it when we are
+ // actually waiting for an event.
+ base::WaitableEvent trigger_poll_;
+ // Set to true when we wish to stop polling, instructing the poller thread
+ // to break its loop.
+ std::atomic_bool stop_polling_;
+
+ SEQUENCE_CHECKER(client_sequence_checker_);
+};
+
+} // namespace media
+
+#endif // V4L2_V4L2_DEVICE_POLLER_H_
diff --git a/accel/v4l2_video_decode_accelerator.cc b/accel/v4l2_video_decode_accelerator.cc
new file mode 100644
index 0000000..487a01c
--- /dev/null
+++ b/accel/v4l2_video_decode_accelerator.cc
@@ -0,0 +1,1922 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 91175b1
+// Note: image processor is not ported.
+
+#include "v4l2_video_decode_accelerator.h"
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/videodev2.h>
+#include <poll.h>
+#include <string.h>
+#include <sys/eventfd.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include <numeric>
+
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "build/build_config.h"
+
+#include "generic_v4l2_device.h"
+#include "macros.h"
+#include "rect.h"
+#include "shared_memory_region.h"
+
+#define NOTIFY_ERROR(x) \
+ do { \
+ VLOGF(1) << "Setting error state:" << x; \
+ SetErrorState(x); \
+ } while (0)
+
+#define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str) \
+ do { \
+ if (device_->Ioctl(type, arg) != 0) { \
+ VPLOGF(1) << "ioctl() failed: " << type_str; \
+ NOTIFY_ERROR(PLATFORM_FAILURE); \
+ return value; \
+ } \
+ } while (0)
+
+#define IOCTL_OR_ERROR_RETURN(type, arg) \
+ IOCTL_OR_ERROR_RETURN_VALUE(type, arg, ((void)0), #type)
+
+#define IOCTL_OR_ERROR_RETURN_FALSE(type, arg) \
+ IOCTL_OR_ERROR_RETURN_VALUE(type, arg, false, #type)
+
+#define IOCTL_OR_LOG_ERROR(type, arg) \
+ do { \
+ if (device_->Ioctl(type, arg) != 0) \
+ VPLOGF(1) << "ioctl() failed: " << #type; \
+ } while (0)
+
+namespace media {
+
+namespace {
+// Copied from older version of V4L2 device.
+VideoPixelFormat V4L2PixFmtToVideoPixelFormat(uint32_t pix_fmt) {
+ switch (pix_fmt) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV12M:
+ return PIXEL_FORMAT_NV12;
+
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YUV420M:
+ return PIXEL_FORMAT_I420;
+
+ case V4L2_PIX_FMT_YVU420:
+ return PIXEL_FORMAT_YV12;
+
+ case V4L2_PIX_FMT_YUV422M:
+ return PIXEL_FORMAT_I422;
+
+ case V4L2_PIX_FMT_RGB32:
+ return PIXEL_FORMAT_ARGB;
+
+ default:
+ DVLOGF(1) << "Add more cases as needed";
+ return PIXEL_FORMAT_UNKNOWN;
+ }
+}
+} // namespace
+
+// static
+const uint32_t V4L2VideoDecodeAccelerator::supported_input_fourccs_[] = {
+ V4L2_PIX_FMT_H264, V4L2_PIX_FMT_VP8, V4L2_PIX_FMT_VP9,
+};
+
+struct V4L2VideoDecodeAccelerator::BitstreamBufferRef {
+ BitstreamBufferRef(
+ base::WeakPtr<Client>& client,
+ scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
+ BitstreamBuffer buffer,
+ int32_t input_id);
+ ~BitstreamBufferRef();
+ const base::WeakPtr<Client> client;
+ const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner;
+ base::ScopedFD dmabuf_fd;
+ const size_t offset;
+ const size_t size;
+ const int32_t input_id;
+};
+
+V4L2VideoDecodeAccelerator::BitstreamBufferRef::BitstreamBufferRef(
+ base::WeakPtr<Client>& client,
+ scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
+ BitstreamBuffer buffer,
+ int32_t input_id)
+ : client(client),
+ client_task_runner(client_task_runner),
+ offset(buffer.offset()),
+ size(buffer.size()),
+ input_id(input_id) {
+ base::SharedMemoryHandle handle = buffer.handle();
+ // NOTE: BitstreamBuffer and SharedMemoryHandle don't own file descriptor.
+ // There is no need of duplicating the file descriptor here.
+ // |handle| is invalid only if flush is dummy.
+ DCHECK(handle.IsValid() || input_id == kFlushBufferId);
+ if (handle.IsValid())
+ dmabuf_fd = base::ScopedFD(handle.GetHandle());
+}
+
+V4L2VideoDecodeAccelerator::BitstreamBufferRef::~BitstreamBufferRef() {
+ if (input_id >= 0) {
+ client_task_runner->PostTask(
+ FROM_HERE,
+ base::Bind(&Client::NotifyEndOfBitstreamBuffer, client, input_id));
+ }
+}
+
+V4L2VideoDecodeAccelerator::OutputRecord::OutputRecord()
+ : state(kFree),
+ picture_id(-1),
+ cleared(false) {}
+
+V4L2VideoDecodeAccelerator::OutputRecord::~OutputRecord() {}
+
+V4L2VideoDecodeAccelerator::PictureRecord::PictureRecord(bool cleared,
+ const Picture& picture)
+ : cleared(cleared), picture(picture) {}
+
+V4L2VideoDecodeAccelerator::PictureRecord::~PictureRecord() {}
+
+V4L2VideoDecodeAccelerator::V4L2VideoDecodeAccelerator(
+ const scoped_refptr<V4L2Device>& device)
+ : child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
+ decoder_thread_("V4L2DecoderThread"),
+ decoder_state_(kUninitialized),
+ output_mode_(Config::OutputMode::ALLOCATE),
+ device_(device),
+ decoder_delay_bitstream_buffer_id_(-1),
+ decoder_decode_buffer_tasks_scheduled_(0),
+ decoder_frames_at_client_(0),
+ decoder_flushing_(false),
+ decoder_cmd_supported_(false),
+ flush_awaiting_last_output_buffer_(false),
+ reset_pending_(false),
+ input_streamon_(false),
+ input_buffer_queued_count_(0),
+ input_buffer_size_(0),
+ output_streamon_(false),
+ output_buffer_queued_count_(0),
+ output_dpb_size_(0),
+ output_planes_count_(0),
+ picture_clearing_count_(0),
+ device_poll_thread_("V4L2DevicePollThread"),
+ video_profile_(VIDEO_CODEC_PROFILE_UNKNOWN),
+ input_format_fourcc_(0),
+ output_format_fourcc_(0),
+ weak_this_factory_(this) {
+ weak_this_ = weak_this_factory_.GetWeakPtr();
+}
+
+V4L2VideoDecodeAccelerator::~V4L2VideoDecodeAccelerator() {
+ DCHECK(!decoder_thread_.IsRunning());
+ DCHECK(!device_poll_thread_.IsRunning());
+ DVLOGF(2);
+
+ // These maps have members that should be manually destroyed, e.g. file
+ // descriptors, mmap() segments, etc.
+ DCHECK(input_buffer_map_.empty());
+ DCHECK(output_buffer_map_.empty());
+}
+
+bool V4L2VideoDecodeAccelerator::Initialize(const Config& config,
+ Client* client) {
+ VLOGF(2) << "profile: " << config.profile
+ << ", output_mode=" << static_cast<int>(config.output_mode);
+ DCHECK(child_task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(decoder_state_, kUninitialized);
+
+ if (config.output_mode != Config::OutputMode::IMPORT) {
+ NOTREACHED() << "Only IMPORT OutputModes are supported";
+ return false;
+ }
+
+ client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
+ client_ = client_ptr_factory_->GetWeakPtr();
+ // If we haven't been set up to decode on separate thread via
+ // TryToSetupDecodeOnSeparateThread(), use the main thread/client for
+ // decode tasks.
+ if (!decode_task_runner_) {
+ decode_task_runner_ = child_task_runner_;
+ DCHECK(!decode_client_);
+ decode_client_ = client_;
+ }
+
+ video_profile_ = config.profile;
+
+ input_format_fourcc_ =
+ V4L2Device::VideoCodecProfileToV4L2PixFmt(video_profile_, false);
+
+ if (!device_->Open(V4L2Device::Type::kDecoder, input_format_fourcc_)) {
+ VLOGF(1) << "Failed to open device for profile: " << config.profile
+ << " fourcc: " << std::hex << "0x" << input_format_fourcc_;
+ return false;
+ }
+
+ // Capabilities check.
+ struct v4l2_capability caps;
+ const __u32 kCapsRequired = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps);
+ if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
+ VLOGF(1) << "ioctl() failed: VIDIOC_QUERYCAP"
+ << ", caps check failed: 0x" << std::hex << caps.capabilities;
+ return false;
+ }
+
+ if (!SetupFormats())
+ return false;
+
+ if (!decoder_thread_.Start()) {
+ VLOGF(1) << "decoder thread failed to start";
+ return false;
+ }
+
+ decoder_state_ = kInitialized;
+ output_mode_ = config.output_mode;
+
+ // InitializeTask will NOTIFY_ERROR on failure.
+ decoder_thread_.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::InitializeTask,
+ base::Unretained(this)));
+
+ return true;
+}
+
+void V4L2VideoDecodeAccelerator::InitializeTask() {
+ VLOGF(2);
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK_EQ(decoder_state_, kInitialized);
+
+ // Subscribe to the resolution change event.
+ struct v4l2_event_subscription sub;
+ memset(&sub, 0, sizeof(sub));
+ sub.type = V4L2_EVENT_SOURCE_CHANGE;
+ IOCTL_OR_ERROR_RETURN(VIDIOC_SUBSCRIBE_EVENT, &sub);
+
+ if (!CreateInputBuffers()) {
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+
+ decoder_cmd_supported_ = IsDecoderCmdSupported();
+
+ if (!StartDevicePoll())
+ return;
+}
+
+void V4L2VideoDecodeAccelerator::Decode(
+ const BitstreamBuffer& bitstream_buffer) {
+ DVLOGF(4) << "input_id=" << bitstream_buffer.id()
+ << ", size=" << bitstream_buffer.size();
+ DCHECK(decode_task_runner_->BelongsToCurrentThread());
+
+ if (bitstream_buffer.id() < 0) {
+ VLOGF(1) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
+ if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
+ base::SharedMemory::CloseHandle(bitstream_buffer.handle());
+ NOTIFY_ERROR(INVALID_ARGUMENT);
+ return;
+ }
+
+ // DecodeTask() will take care of running a DecodeBufferTask().
+ decoder_thread_.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DecodeTask,
+ base::Unretained(this), bitstream_buffer));
+}
+
+void V4L2VideoDecodeAccelerator::AssignPictureBuffers(
+ const std::vector<PictureBuffer>& buffers) {
+ VLOGF(2) << "buffer_count=" << buffers.size();
+ DCHECK(child_task_runner_->BelongsToCurrentThread());
+
+ decoder_thread_.task_runner()->PostTask(
+ FROM_HERE,
+ base::Bind(&V4L2VideoDecodeAccelerator::AssignPictureBuffersTask,
+ base::Unretained(this), buffers));
+}
+
+void V4L2VideoDecodeAccelerator::AssignPictureBuffersTask(
+ const std::vector<PictureBuffer>& buffers) {
+ VLOGF(2);
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK_EQ(decoder_state_, kAwaitingPictureBuffers);
+ DCHECK(!output_streamon_);
+
+ uint32_t req_buffer_count = output_dpb_size_ + kDpbOutputBufferExtraCount;
+
+ if (buffers.size() < req_buffer_count) {
+ VLOGF(1) << "Failed to provide requested picture buffers. (Got "
+ << buffers.size() << ", requested " << req_buffer_count << ")";
+ NOTIFY_ERROR(INVALID_ARGUMENT);
+ return;
+ }
+
+ // S_FMT on output queue if frame size allocated by gralloc is different from
+ // the frame size given by driver. NOTE: This S_FMT is not needed if memory
+ // type in output queue is MMAP because the driver allocates memory.
+ const Size& allocated_coded_size = buffers[0].size();
+ if (allocated_coded_size != coded_size_) {
+ struct v4l2_format format = {};
+ format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ format.fmt.pix_mp.width = allocated_coded_size.width();
+ format.fmt.pix_mp.height = allocated_coded_size.height();
+ format.fmt.pix_mp.pixelformat = output_format_fourcc_;
+ format.fmt.pix_mp.num_planes = output_planes_count_;
+ IOCTL_OR_ERROR_RETURN(VIDIOC_S_FMT, &format);
+ coded_size_.SetSize(format.fmt.pix_mp.width, format.fmt.pix_mp.height);
+ const Size& new_visible_size = GetVisibleSize(coded_size_);
+ if (new_visible_size != visible_size_) {
+ VLOGF(1) << "Visible size is changed by resetting coded_size,"
+ << "the previous visible size=" << visible_size_.ToString()
+ << "the current visible size=" << new_visible_size.ToString();
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+ }
+
+ // Allocate the output buffers.
+ struct v4l2_requestbuffers reqbufs;
+ memset(&reqbufs, 0, sizeof(reqbufs));
+ reqbufs.count = buffers.size();
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ reqbufs.memory = V4L2_MEMORY_DMABUF;
+ IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS, &reqbufs);
+
+ if (reqbufs.count < buffers.size()) {
+ VLOGF(1) << "Could not allocate enough output buffers";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+
+ DCHECK(free_output_buffers_.empty());
+ DCHECK(output_buffer_map_.empty());
+ output_buffer_map_.resize(buffers.size());
+
+ // Always use IMPORT output mode for Android solution.
+ DCHECK_EQ(output_mode_, Config::OutputMode::IMPORT);
+
+ for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
+ OutputRecord& output_record = output_buffer_map_[i];
+ DCHECK_EQ(output_record.state, kFree);
+ DCHECK_EQ(output_record.picture_id, -1);
+ DCHECK_EQ(output_record.cleared, false);
+
+ output_record.picture_id = buffers[i].id();
+
+ // This will remain kAtClient until ImportBufferForPicture is called, either
+ // by the client, or by ourselves, if we are allocating.
+ output_record.state = kAtClient;
+
+ DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id;
+ }
+}
+
+void V4L2VideoDecodeAccelerator::ImportBufferForPicture(
+ int32_t picture_buffer_id,
+ VideoPixelFormat pixel_format,
+ const NativePixmapHandle& native_pixmap_handle) {
+ DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
+ DCHECK(child_task_runner_->BelongsToCurrentThread());
+
+ if (output_mode_ != Config::OutputMode::IMPORT) {
+ VLOGF(1) << "Cannot import in non-import mode";
+ NOTIFY_ERROR(INVALID_ARGUMENT);
+ return;
+ }
+
+ if (pixel_format != V4L2PixFmtToVideoPixelFormat(output_format_fourcc_)) {
+ VLOGF(1) << "Unsupported import format: " << pixel_format;
+ NOTIFY_ERROR(INVALID_ARGUMENT);
+ return;
+ }
+
+ std::vector<base::ScopedFD> dmabuf_fds;
+ std::vector<size_t> offsets;
+ for (const auto& plane : native_pixmap_handle.planes)
+ offsets.push_back(plane.offset);
+
+ for (const auto& fd : native_pixmap_handle.fds) {
+ DCHECK_NE(fd.fd, -1);
+ dmabuf_fds.push_back(base::ScopedFD(fd.fd));
+ }
+
+ decoder_thread_.task_runner()->PostTask(
+ FROM_HERE,
+ base::Bind(&V4L2VideoDecodeAccelerator::ImportBufferForPictureTask,
+ base::Unretained(this), picture_buffer_id,
+ std::move(offsets), base::Passed(&dmabuf_fds)));
+}
+
+void V4L2VideoDecodeAccelerator::ImportBufferForPictureTask(
+ int32_t picture_buffer_id,
+ std::vector<size_t> offsets,
+ std::vector<base::ScopedFD> dmabuf_fds) {
+ DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id
+ << ", dmabuf_fds.size()=" << dmabuf_fds.size();
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+ const auto iter =
+ std::find_if(output_buffer_map_.begin(), output_buffer_map_.end(),
+ [picture_buffer_id](const OutputRecord& output_record) {
+ return output_record.picture_id == picture_buffer_id;
+ });
+ if (iter == output_buffer_map_.end()) {
+ // It's possible that we've already posted a DismissPictureBuffer for this
+ // picture, but it has not yet executed when this ImportBufferForPicture was
+ // posted to us by the client. In that case just ignore this (we've already
+ // dismissed it and accounted for that).
+ DVLOGF(3) << "got picture id=" << picture_buffer_id
+ << " not in use (anymore?).";
+ return;
+ }
+
+ if (iter->state != kAtClient) {
+ VLOGF(1) << "Cannot import buffer not owned by client";
+ NOTIFY_ERROR(INVALID_ARGUMENT);
+ return;
+ }
+
+ size_t index = iter - output_buffer_map_.begin();
+ DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
+ index),
+ 0);
+
+ iter->state = kFree;
+
+ DCHECK_LE(output_planes_count_, dmabuf_fds.size());
+
+ iter->output_fds = std::move(dmabuf_fds);
+ iter->offsets = std::move(offsets);
+
+ if (decoder_state_ == kAwaitingPictureBuffers)
+ decoder_state_ = kDecoding;
+
+ free_output_buffers_.push_back(index);
+ if (decoder_state_ != kChangingResolution) {
+ Enqueue();
+ ScheduleDecodeBufferTaskIfNeeded();
+ }
+}
+
+void V4L2VideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_buffer_id) {
+ DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id;
+ // Must be run on child thread, as we'll insert a sync in the EGL context.
+ DCHECK(child_task_runner_->BelongsToCurrentThread());
+
+ decoder_thread_.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ReusePictureBufferTask,
+ base::Unretained(this), picture_buffer_id));
+}
+
+void V4L2VideoDecodeAccelerator::Flush() {
+ VLOGF(2);
+ DCHECK(child_task_runner_->BelongsToCurrentThread());
+ decoder_thread_.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::FlushTask,
+ base::Unretained(this)));
+}
+
+void V4L2VideoDecodeAccelerator::Reset() {
+ VLOGF(2);
+ DCHECK(child_task_runner_->BelongsToCurrentThread());
+ decoder_thread_.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ResetTask,
+ base::Unretained(this)));
+}
+
+void V4L2VideoDecodeAccelerator::Destroy() {
+ VLOGF(2);
+ DCHECK(child_task_runner_->BelongsToCurrentThread());
+
+ // We're destroying; cancel all callbacks.
+ client_ptr_factory_.reset();
+ weak_this_factory_.InvalidateWeakPtrs();
+
+ // If the decoder thread is running, destroy using posted task.
+ if (decoder_thread_.IsRunning()) {
+ decoder_thread_.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DestroyTask,
+ base::Unretained(this)));
+ // DestroyTask() will cause the decoder_thread_ to flush all tasks.
+ decoder_thread_.Stop();
+ } else {
+ // Otherwise, call the destroy task directly.
+ DestroyTask();
+ }
+
+ delete this;
+ VLOGF(2) << "Destroyed.";
+}
+
+bool V4L2VideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
+ VLOGF(2);
+ decode_client_ = decode_client;
+ decode_task_runner_ = decode_task_runner;
+ return true;
+}
+
+// static
+VideoDecodeAccelerator::SupportedProfiles
+V4L2VideoDecodeAccelerator::GetSupportedProfiles() {
+ scoped_refptr<V4L2Device> device(new GenericV4L2Device());
+ if (!device)
+ return SupportedProfiles();
+
+ return device->GetSupportedDecodeProfiles(arraysize(supported_input_fourccs_),
+ supported_input_fourccs_);
+}
+
+void V4L2VideoDecodeAccelerator::DecodeTask(
+ const BitstreamBuffer& bitstream_buffer) {
+ DVLOGF(4) << "input_id=" << bitstream_buffer.id();
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK_NE(decoder_state_, kUninitialized);
+
+ // Invalid handle.
+ if (!bitstream_buffer.handle().IsValid()) {
+ NOTIFY_ERROR(INVALID_ARGUMENT);
+ return;
+ }
+
+ int bitstream_id = bitstream_buffer.id();
+ std::unique_ptr<BitstreamBufferRef> bitstream_record(new BitstreamBufferRef(
+ decode_client_, decode_task_runner_,
+ std::move(bitstream_buffer), bitstream_id));
+
+ // Skip empty buffer.
+ if (bitstream_record->size == 0)
+ return;
+
+ if (decoder_state_ == kResetting || decoder_flushing_) {
+ // In the case that we're resetting or flushing, we need to delay decoding
+ // the BitstreamBuffers that come after the Reset() or Flush() call. When
+ // we're here, we know that this DecodeTask() was scheduled by a Decode()
+ // call that came after (in the client thread) the Reset() or Flush() call;
+ // thus set up the delay if necessary.
+ if (decoder_delay_bitstream_buffer_id_ == -1)
+ decoder_delay_bitstream_buffer_id_ = bitstream_record->input_id;
+ } else if (decoder_state_ == kError) {
+ VLOGF(2) << "early out: kError state";
+ return;
+ }
+
+ decoder_input_queue_.push(std::move(bitstream_record));
+ decoder_decode_buffer_tasks_scheduled_++;
+ DecodeBufferTask();
+}
+
+void V4L2VideoDecodeAccelerator::DecodeBufferTask() {
+ DVLOGF(4);
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK_NE(decoder_state_, kUninitialized);
+
+ decoder_decode_buffer_tasks_scheduled_--;
+
+ if (decoder_state_ != kInitialized && decoder_state_ != kDecoding) {
+ DVLOGF(3) << "early out: state=" << decoder_state_;
+ return;
+ }
+
+ if (decoder_current_bitstream_buffer_ == NULL) {
+ if (decoder_input_queue_.empty()) {
+ // We're waiting for a new buffer -- exit without scheduling a new task.
+ return;
+ }
+ const std::unique_ptr<BitstreamBufferRef>& buffer_ref = decoder_input_queue_.front();
+ if (decoder_delay_bitstream_buffer_id_ == buffer_ref->input_id) {
+ // We're asked to delay decoding on this and subsequent buffers.
+ return;
+ }
+
+ // Setup to use the next buffer.
+ decoder_current_bitstream_buffer_ = std::move(decoder_input_queue_.front());
+ decoder_input_queue_.pop();
+ const auto& dmabuf_fd = decoder_current_bitstream_buffer_->dmabuf_fd;
+ if (dmabuf_fd.is_valid()) {
+ DVLOGF(4) << "reading input_id="
+ << decoder_current_bitstream_buffer_->input_id
+ << ", fd=" << dmabuf_fd.get()
+ << ", size=" << decoder_current_bitstream_buffer_->size;
+ } else {
+ DCHECK_EQ(decoder_current_bitstream_buffer_->input_id, kFlushBufferId);
+ DVLOGF(4) << "reading input_id=kFlushBufferId";
+ }
+ }
+ bool schedule_task = false;
+ const auto& dmabuf_fd = decoder_current_bitstream_buffer_->dmabuf_fd;
+ if (!dmabuf_fd.is_valid()) {
+ // This is a dummy buffer, queued to flush the pipe. Flush.
+ DCHECK_EQ(decoder_current_bitstream_buffer_->input_id, kFlushBufferId);
+ if (TrySubmitInputFrame()) {
+ VLOGF(2) << "enqueued flush buffer";
+ schedule_task = true;
+ } else {
+ // If we failed to enqueue the empty buffer (due to pipeline
+ // backpressure), don't advance the bitstream buffer queue, and don't
+ // schedule the next task. This bitstream buffer queue entry will get
+ // reprocessed when the pipeline frees up.
+ schedule_task = false;
+ }
+ } else {
+ DCHECK_GT(decoder_current_bitstream_buffer_->size, 0u);
+ switch (decoder_state_) {
+ case kInitialized:
+ schedule_task = DecodeBufferInitial();
+ break;
+ case kDecoding:
+ schedule_task = DecodeBufferContinue();
+ break;
+ default:
+ NOTIFY_ERROR(ILLEGAL_STATE);
+ return;
+ }
+ }
+ if (decoder_state_ == kError) {
+ // Failed during decode.
+ return;
+ }
+
+ if (schedule_task) {
+ ScheduleDecodeBufferTaskIfNeeded();
+ }
+}
+
+void V4L2VideoDecodeAccelerator::ScheduleDecodeBufferTaskIfNeeded() {
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+ // If we're behind on tasks, schedule another one.
+ int buffers_to_decode = decoder_input_queue_.size();
+ if (decoder_current_bitstream_buffer_ != NULL)
+ buffers_to_decode++;
+ if (decoder_decode_buffer_tasks_scheduled_ < buffers_to_decode) {
+ decoder_decode_buffer_tasks_scheduled_++;
+ decoder_thread_.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DecodeBufferTask,
+ base::Unretained(this)));
+ }
+}
+
+bool V4L2VideoDecodeAccelerator::DecodeBufferInitial() {
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK_EQ(decoder_state_, kInitialized);
+ // Initial decode. We haven't been able to get output stream format info yet.
+ // Get it, and start decoding.
+
+ if (!TrySubmitInputFrame())
+ return false;
+
+ // Recycle buffers.
+ Dequeue();
+
+ // If an initial resolution change event is not done yet, a driver probably
+ // needs more stream to decode format.
+ // Return true and schedule next buffer without changing status to kDecoding.
+ // If the initial resolution change is done and coded size is known, we may
+ // still have to wait for AssignPictureBuffers() and output buffers to be
+ // allocated.
+ if (coded_size_.IsEmpty() || output_buffer_map_.empty()) {
+ // Need more stream to decode format, return true and schedule next buffer.
+ return true;
+ }
+
+ decoder_state_ = kDecoding;
+ ScheduleDecodeBufferTaskIfNeeded();
+ return true;
+}
+
+bool V4L2VideoDecodeAccelerator::DecodeBufferContinue() {
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK_EQ(decoder_state_, kDecoding);
+
+ return TrySubmitInputFrame();
+}
+
+bool V4L2VideoDecodeAccelerator::TrySubmitInputFrame() {
+ DVLOGF(4);
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK_NE(decoder_state_, kUninitialized);
+ DCHECK_NE(decoder_state_, kResetting);
+ DCHECK_NE(decoder_state_, kError);
+ CHECK(decoder_current_bitstream_buffer_);
+
+ // No free input buffer.
+ if (free_input_buffers_.empty())
+ return false;
+
+ const int input_buffer_index = free_input_buffers_.back();
+ free_input_buffers_.pop_back();
+ InputRecord& input_record = input_buffer_map_[input_buffer_index];
+ DCHECK(!input_record.bitstream_buffer);
+
+ // Pass the required info to InputRecord.
+ input_record.bitstream_buffer = std::move(decoder_current_bitstream_buffer_);
+ // Queue it.
+ input_ready_queue_.push(input_buffer_index);
+ DVLOGF(4) << "submitting input_id=" << input_record.bitstream_buffer->input_id;
+ // Enqueue once since there's new available input for it.
+ Enqueue();
+
+ return (decoder_state_ != kError);
+}
+
+void V4L2VideoDecodeAccelerator::ServiceDeviceTask(bool event_pending) {
+ DVLOGF(4);
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK_NE(decoder_state_, kUninitialized);
+
+ if (decoder_state_ == kResetting) {
+ DVLOGF(3) << "early out: kResetting state";
+ return;
+ } else if (decoder_state_ == kError) {
+ DVLOGF(3) << "early out: kError state";
+ return;
+ } else if (decoder_state_ == kChangingResolution) {
+ DVLOGF(3) << "early out: kChangingResolution state";
+ return;
+ }
+
+ bool resolution_change_pending = false;
+ if (event_pending)
+ resolution_change_pending = DequeueResolutionChangeEvent();
+
+ if (!resolution_change_pending && coded_size_.IsEmpty()) {
+ // Some platforms do not send an initial resolution change event.
+ // To work around this, we need to keep checking if the initial resolution
+ // is known already by explicitly querying the format after each decode,
+ // regardless of whether we received an event.
+ // This needs to be done on initial resolution change,
+ // i.e. when coded_size_.IsEmpty().
+
+ // Try GetFormatInfo to check if an initial resolution change can be done.
+ struct v4l2_format format;
+ Size visible_size;
+ bool again;
+ if (GetFormatInfo(&format, &visible_size, &again) && !again) {
+ resolution_change_pending = true;
+ DequeueResolutionChangeEvent();
+ }
+ }
+
+ Dequeue();
+ Enqueue();
+
+ // Clear the interrupt fd.
+ if (!device_->ClearDevicePollInterrupt()) {
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+
+ bool poll_device = false;
+ // Add fd, if we should poll on it.
+ // Can be polled as soon as either input or output buffers are queued.
+ if (input_buffer_queued_count_ + output_buffer_queued_count_ > 0)
+ poll_device = true;
+
+ // ServiceDeviceTask() should only ever be scheduled from DevicePollTask(),
+ // so either:
+ // * device_poll_thread_ is running normally
+ // * device_poll_thread_ scheduled us, but then a ResetTask() or DestroyTask()
+ // shut it down, in which case we're either in kResetting or kError states
+ // respectively, and we should have early-outed already.
+ DCHECK(device_poll_thread_.message_loop());
+ // Queue the DevicePollTask() now.
+ device_poll_thread_.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DevicePollTask,
+ base::Unretained(this), poll_device));
+
+ DVLOGF(3) << "ServiceDeviceTask(): buffer counts: DEC["
+ << decoder_input_queue_.size() << "->"
+ << input_ready_queue_.size() << "] => DEVICE["
+ << free_input_buffers_.size() << "+"
+ << input_buffer_queued_count_ << "/"
+ << input_buffer_map_.size() << "->"
+ << free_output_buffers_.size() << "+"
+ << output_buffer_queued_count_ << "/"
+ << output_buffer_map_.size() << "] => CLIENT["
+ << decoder_frames_at_client_ << "]";
+
+ ScheduleDecodeBufferTaskIfNeeded();
+ if (resolution_change_pending)
+ StartResolutionChange();
+}
+
+void V4L2VideoDecodeAccelerator::Enqueue() {
+ DVLOGF(4);
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK_NE(decoder_state_, kUninitialized);
+
+ // Drain the pipe of completed decode buffers.
+ const int old_inputs_queued = input_buffer_queued_count_;
+ while (!input_ready_queue_.empty()) {
+ const int buffer = input_ready_queue_.front();
+ InputRecord& input_record = input_buffer_map_[buffer];
+ if (input_record.bitstream_buffer->input_id == kFlushBufferId && decoder_cmd_supported_) {
+ // Send the flush command after all input buffers are dequeued. This makes
+ // sure all previous resolution changes have been handled because the
+ // driver must hold the input buffer that triggers resolution change. The
+ // driver cannot decode data in it without new output buffers. If we send
+ // the flush now and a queued input buffer triggers resolution change
+ // later, the driver will send an output buffer that has
+ // V4L2_BUF_FLAG_LAST. But some queued input buffer have not been decoded
+ // yet. Also, V4L2VDA calls STREAMOFF and STREAMON after resolution
+ // change. They implicitly send a V4L2_DEC_CMD_STOP and V4L2_DEC_CMD_START
+ // to the decoder.
+ if (input_buffer_queued_count_ == 0) {
+ if (!SendDecoderCmdStop())
+ return;
+ input_ready_queue_.pop();
+ free_input_buffers_.push_back(buffer);
+ input_record.bitstream_buffer.reset();
+ } else {
+ break;
+ }
+ } else if (!EnqueueInputRecord())
+ return;
+ }
+ if (old_inputs_queued == 0 && input_buffer_queued_count_ != 0) {
+ // We just started up a previously empty queue.
+ // Queue state changed; signal interrupt.
+ if (!device_->SetDevicePollInterrupt()) {
+ VPLOGF(1) << "SetDevicePollInterrupt failed";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+ // Start VIDIOC_STREAMON if we haven't yet.
+ if (!input_streamon_) {
+ __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMON, &type);
+ input_streamon_ = true;
+ }
+ }
+
+ // Enqueue all the outputs we can.
+ const int old_outputs_queued = output_buffer_queued_count_;
+ while (!free_output_buffers_.empty()) {
+ if (!EnqueueOutputRecord())
+ return;
+ }
+ if (old_outputs_queued == 0 && output_buffer_queued_count_ != 0) {
+ // We just started up a previously empty queue.
+ // Queue state changed; signal interrupt.
+ if (!device_->SetDevicePollInterrupt()) {
+ VPLOGF(1) << "SetDevicePollInterrupt(): failed";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+ // Start VIDIOC_STREAMON if we haven't yet.
+ if (!output_streamon_) {
+ __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMON, &type);
+ output_streamon_ = true;
+ }
+ }
+}
+
+bool V4L2VideoDecodeAccelerator::DequeueResolutionChangeEvent() {
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK_NE(decoder_state_, kUninitialized);
+ DVLOGF(3);
+
+ struct v4l2_event ev;
+ memset(&ev, 0, sizeof(ev));
+
+ while (device_->Ioctl(VIDIOC_DQEVENT, &ev) == 0) {
+ if (ev.type == V4L2_EVENT_SOURCE_CHANGE) {
+ if (ev.u.src_change.changes & V4L2_EVENT_SRC_CH_RESOLUTION) {
+ VLOGF(2) << "got resolution change event.";
+ return true;
+ }
+ } else {
+ VLOGF(1) << "got an event (" << ev.type << ") we haven't subscribed to.";
+ }
+ }
+ return false;
+}
+
+void V4L2VideoDecodeAccelerator::Dequeue() {
+ DVLOGF(4);
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK_NE(decoder_state_, kUninitialized);
+
+ while (input_buffer_queued_count_ > 0) {
+ if (!DequeueInputBuffer())
+ break;
+ }
+ while (output_buffer_queued_count_ > 0) {
+ if (!DequeueOutputBuffer())
+ break;
+ }
+ NotifyFlushDoneIfNeeded();
+}
+
+bool V4L2VideoDecodeAccelerator::DequeueInputBuffer() {
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK_GT(input_buffer_queued_count_, 0);
+ DCHECK(input_streamon_);
+
+ // Dequeue a completed input (VIDEO_OUTPUT) buffer, and recycle to the free
+ // list.
+ struct v4l2_buffer dqbuf;
+ struct v4l2_plane planes[1];
+ memset(&dqbuf, 0, sizeof(dqbuf));
+ memset(planes, 0, sizeof(planes));
+ dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ dqbuf.memory = V4L2_MEMORY_DMABUF;
+ dqbuf.m.planes = planes;
+ dqbuf.length = 1;
+ if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
+ if (errno == EAGAIN) {
+ // EAGAIN if we're just out of buffers to dequeue.
+ return false;
+ }
+ VPLOGF(1) << "ioctl() failed: VIDIOC_DQBUF";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return false;
+ }
+ InputRecord& input_record = input_buffer_map_[dqbuf.index];
+ DCHECK(input_record.at_device);
+ free_input_buffers_.push_back(dqbuf.index);
+ input_record.at_device = false;
+ // This will trigger NotifyEndOfBitstreamBuffer().
+ input_record.bitstream_buffer.reset();
+ input_buffer_queued_count_--;
+
+ return true;
+}
+
+bool V4L2VideoDecodeAccelerator::DequeueOutputBuffer() {
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK_GT(output_buffer_queued_count_, 0);
+ DCHECK(output_streamon_);
+
+ // Dequeue a completed output (VIDEO_CAPTURE) buffer, and queue to the
+ // completed queue.
+ struct v4l2_buffer dqbuf {};
+ struct v4l2_plane dqbuf_planes[VIDEO_MAX_PLANES] = {};
+ dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dqbuf.memory = V4L2_MEMORY_DMABUF;
+ dqbuf.m.planes = dqbuf_planes;
+ dqbuf.length = output_planes_count_;
+ if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
+ if (errno == EAGAIN) {
+ // EAGAIN if we're just out of buffers to dequeue.
+ return false;
+ } else if (errno == EPIPE) {
+ DVLOGF(3) << "Got EPIPE. Last output buffer was already dequeued.";
+ return false;
+ }
+ VPLOGF(1) << "ioctl() failed: VIDIOC_DQBUF";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return false;
+ }
+ OutputRecord& output_record = output_buffer_map_[dqbuf.index];
+ DCHECK_EQ(output_record.state, kAtDevice);
+ DCHECK_NE(output_record.picture_id, -1);
+ output_buffer_queued_count_--;
+
+ // Zero-bytes buffers are returned as part of a flush and can be dismissed.
+ if (dqbuf.m.planes[0].bytesused > 0) {
+ int32_t bitstream_buffer_id = dqbuf.timestamp.tv_sec;
+ DCHECK_GE(bitstream_buffer_id, 0);
+ DVLOGF(4) << "Dequeue output buffer: dqbuf index=" << dqbuf.index
+ << " bitstream input_id=" << bitstream_buffer_id;
+ output_record.state = kAtClient;
+ decoder_frames_at_client_++;
+
+ const Picture picture(output_record.picture_id, bitstream_buffer_id,
+ Rect(visible_size_), false);
+ pending_picture_ready_.push(PictureRecord(output_record.cleared, picture));
+ SendPictureReady();
+ output_record.cleared = true;
+ }
+
+ if (dqbuf.flags & V4L2_BUF_FLAG_LAST) {
+ DVLOGF(3) << "Got last output buffer. Waiting last buffer="
+ << flush_awaiting_last_output_buffer_;
+ if (flush_awaiting_last_output_buffer_) {
+ flush_awaiting_last_output_buffer_ = false;
+ struct v4l2_decoder_cmd cmd;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = V4L2_DEC_CMD_START;
+ IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_DECODER_CMD, &cmd);
+ }
+ }
+ return true;
+}
+
+bool V4L2VideoDecodeAccelerator::EnqueueInputRecord() {
+ DVLOGF(4);
+ DCHECK(!input_ready_queue_.empty());
+
+ // Enqueue an input (VIDEO_OUTPUT) buffer.
+ const int v4l2_buffer_index = input_ready_queue_.front();
+ InputRecord& input_record = input_buffer_map_[v4l2_buffer_index];
+ DCHECK(!input_record.at_device);
+ struct v4l2_buffer qbuf {};
+ struct v4l2_plane qbuf_plane = {};
+ qbuf.index = v4l2_buffer_index;
+ qbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ qbuf.timestamp.tv_sec = input_record.bitstream_buffer->input_id;
+ qbuf.memory = V4L2_MEMORY_DMABUF;
+ qbuf.m.planes = &qbuf_plane;
+ const std::unique_ptr<BitstreamBufferRef>& buffer = input_record.bitstream_buffer;
+ if (!buffer->dmabuf_fd.is_valid()) {
+ // This is a flush case. A driver must handle Flush with V4L2_DEC_CMD_STOP.
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return false;
+ }
+ if (buffer->offset + buffer->size > input_buffer_size_) {
+ VLOGF(1) << "offset + size of input buffer is larger than buffer size"
+ << ", offset=" << buffer->offset
+ << ", size=" << buffer->size
+ << ", buffer size=" << input_buffer_size_;
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return false;
+ }
+
+ // TODO(crbug.com/901264): The way to pass an offset within a DMA-buf is
+ // not defined in V4L2 specification, so we abuse data_offset for now.
+ // Fix it when we have the right interface, including any necessary
+ // validation and potential alignment.
+ qbuf.m.planes[0].m.fd = buffer->dmabuf_fd.get();
+ qbuf.m.planes[0].data_offset = buffer->offset;
+ qbuf.m.planes[0].bytesused = buffer->offset + buffer->size;
+ // Workaround: filling length should not be needed. This is a bug of
+ // videobuf2 library.
+ qbuf.m.planes[0].length = input_buffer_size_;
+ qbuf.length = 1;
+ IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
+ DVLOGF(4) << "enqueued input_id=" << buffer->input_id;
+ input_ready_queue_.pop();
+
+ input_record.at_device = true;
+ input_buffer_queued_count_++;
+
+ return true;
+}
+
+bool V4L2VideoDecodeAccelerator::EnqueueOutputRecord() {
+ DCHECK(!free_output_buffers_.empty());
+
+ // Enqueue an output (VIDEO_CAPTURE) buffer.
+ const int buffer = free_output_buffers_.front();
+ DVLOGF(4) << "buffer " << buffer;
+ OutputRecord& output_record = output_buffer_map_[buffer];
+ DCHECK_EQ(output_record.state, kFree);
+ DCHECK_NE(output_record.picture_id, -1);
+ struct v4l2_buffer qbuf {};
+ struct v4l2_plane qbuf_planes[VIDEO_MAX_PLANES] = {};
+ qbuf.index = buffer;
+ qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ qbuf.memory = V4L2_MEMORY_DMABUF;
+ qbuf.m.planes = qbuf_planes;
+ qbuf.length = output_planes_count_;
+ DVLOGF(4) << "qbuf.index=" << qbuf.index;
+ DCHECK_LE(output_planes_count_, output_record.output_fds.size());
+ DCHECK_LE(output_planes_count_, output_record.offsets.size());
+ // Pass fd and offset info.
+ for (size_t i = 0; i < output_planes_count_; i++) {
+ // output_record.output_fds is repeatedly used. We will not close the fd of
+ // output buffer unless new fds are assigned in ImportBufferForPicture().
+ qbuf.m.planes[i].m.fd = output_record.output_fds[i].get();
+ qbuf.m.planes[i].data_offset = output_record.offsets[i];
+ }
+ IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
+ free_output_buffers_.pop_front();
+ output_record.state = kAtDevice;
+ output_buffer_queued_count_++;
+ return true;
+}
+
+void V4L2VideoDecodeAccelerator::ReusePictureBufferTask(int32_t picture_buffer_id) {
+ DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id;
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+ // We run ReusePictureBufferTask even if we're in kResetting.
+ if (decoder_state_ == kError) {
+ DVLOGF(4) << "early out: kError state";
+ return;
+ }
+
+ if (decoder_state_ == kChangingResolution) {
+ DVLOGF(4) << "early out: kChangingResolution";
+ return;
+ }
+
+ size_t index;
+ for (index = 0; index < output_buffer_map_.size(); ++index)
+ if (output_buffer_map_[index].picture_id == picture_buffer_id)
+ break;
+
+ if (index >= output_buffer_map_.size()) {
+ // It's possible that we've already posted a DismissPictureBuffer for this
+ // picture, but it has not yet executed when this ReusePictureBuffer was
+ // posted to us by the client. In that case just ignore this (we've already
+ // dismissed it and accounted for that) and let the sync object get
+ // destroyed.
+ DVLOGF(3) << "got picture id= " << picture_buffer_id
+ << " not in use (anymore?).";
+ return;
+ }
+
+ OutputRecord& output_record = output_buffer_map_[index];
+ if (output_record.state != kAtClient) {
+ VLOGF(1) << "picture_buffer_id not reusable";
+ NOTIFY_ERROR(INVALID_ARGUMENT);
+ return;
+ }
+
+ output_record.state = kFree;
+ free_output_buffers_.push_back(index);
+ decoder_frames_at_client_--;
+ // We got a buffer back, so enqueue it back.
+ Enqueue();
+}
+
+void V4L2VideoDecodeAccelerator::FlushTask() {
+ VLOGF(2);
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+ if (decoder_state_ == kError) {
+ VLOGF(2) << "early out: kError state";
+ return;
+ }
+
+ // We don't support stacked flushing.
+ DCHECK(!decoder_flushing_);
+
+ // Queue up an empty buffer -- this triggers the flush.
+ // BitstreamBufferRef::dmabuf_fd becomes invalid.
+ decoder_input_queue_.push(std::make_unique<BitstreamBufferRef>(
+ decode_client_, decode_task_runner_, BitstreamBuffer(), kFlushBufferId));
+ decoder_flushing_ = true;
+ SendPictureReady(); // Send all pending PictureReady.
+
+ ScheduleDecodeBufferTaskIfNeeded();
+}
+
+void V4L2VideoDecodeAccelerator::NotifyFlushDoneIfNeeded() {
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ if (!decoder_flushing_)
+ return;
+
+ // Pipeline is empty when:
+ // * Decoder input queue is empty of non-delayed buffers.
+ // * There is no currently filling input buffer.
+ // * Input holding queue is empty.
+ // * All input (VIDEO_OUTPUT) buffers are returned.
+ // * All image processor buffers are returned.
+ if (!decoder_input_queue_.empty()) {
+ if (decoder_input_queue_.front()->input_id !=
+ decoder_delay_bitstream_buffer_id_) {
+ DVLOGF(3) << "Some input bitstream buffers are not queued.";
+ return;
+ }
+ }
+
+ if ((input_ready_queue_.size() + input_buffer_queued_count_) != 0) {
+ DVLOGF(3) << "Some input buffers are not dequeued.";
+ return;
+ }
+ if (flush_awaiting_last_output_buffer_) {
+ DVLOGF(3) << "Waiting for last output buffer.";
+ return;
+ }
+
+ // TODO(posciak): https://crbug.com/270039. Exynos requires a
+ // streamoff-streamon sequence after flush to continue, even if we are not
+ // resetting. This would make sense, because we don't really want to resume
+ // from a non-resume point (e.g. not from an IDR) if we are flushed.
+ // MSE player however triggers a Flush() on chunk end, but never Reset(). One
+ // could argue either way, or even say that Flush() is not needed/harmful when
+ // transitioning to next chunk.
+ // For now, do the streamoff-streamon cycle to satisfy Exynos and not freeze
+ // when doing MSE. This should be harmless otherwise.
+ if (!(StopDevicePoll() && StopOutputStream() && StopInputStream()))
+ return;
+
+ if (!StartDevicePoll())
+ return;
+
+ decoder_delay_bitstream_buffer_id_ = -1;
+ decoder_flushing_ = false;
+ VLOGF(2) << "returning flush";
+ child_task_runner_->PostTask(FROM_HERE,
+ base::Bind(&Client::NotifyFlushDone, client_));
+
+ // While we were flushing, we early-outed DecodeBufferTask()s.
+ ScheduleDecodeBufferTaskIfNeeded();
+}
+
+bool V4L2VideoDecodeAccelerator::IsDecoderCmdSupported() {
+ // CMD_STOP should always succeed. If the decoder is started, the command can
+ // flush it. If the decoder is stopped, the command does nothing. We use this
+ // to know if a driver supports V4L2_DEC_CMD_STOP to flush.
+ struct v4l2_decoder_cmd cmd;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = V4L2_DEC_CMD_STOP;
+ if (device_->Ioctl(VIDIOC_TRY_DECODER_CMD, &cmd) != 0) {
+ VLOGF(2) << "V4L2_DEC_CMD_STOP is not supported.";
+ return false;
+ }
+
+ return true;
+}
+
+bool V4L2VideoDecodeAccelerator::SendDecoderCmdStop() {
+ VLOGF(2);
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK(!flush_awaiting_last_output_buffer_);
+
+ struct v4l2_decoder_cmd cmd;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = V4L2_DEC_CMD_STOP;
+ IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_DECODER_CMD, &cmd);
+ flush_awaiting_last_output_buffer_ = true;
+
+ return true;
+}
+
+void V4L2VideoDecodeAccelerator::ResetTask() {
+ VLOGF(2);
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+ if (decoder_state_ == kError) {
+ VLOGF(2) << "early out: kError state";
+ return;
+ }
+ decoder_current_bitstream_buffer_.reset();
+ while (!decoder_input_queue_.empty())
+ decoder_input_queue_.pop();
+
+ // If we are in the middle of switching resolutions or awaiting picture
+ // buffers, postpone reset until it's done. We don't have to worry about
+ // timing of this wrt to decoding, because output pipe is already
+ // stopped if we are changing resolution. We will come back here after
+ // we are done.
+ DCHECK(!reset_pending_);
+ if (decoder_state_ == kChangingResolution ||
+ decoder_state_ == kAwaitingPictureBuffers) {
+ reset_pending_ = true;
+ return;
+ }
+ FinishReset();
+}
+
+void V4L2VideoDecodeAccelerator::FinishReset() {
+ VLOGF(2);
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+ reset_pending_ = false;
+ // After the output stream is stopped, the codec should not post any
+ // resolution change events. So we dequeue the resolution change event
+ // afterwards. The event could be posted before or while stopping the output
+ // stream. The codec will expect the buffer of new size after the seek, so
+ // we need to handle the resolution change event first.
+ if (!(StopDevicePoll() && StopOutputStream()))
+ return;
+
+ if (DequeueResolutionChangeEvent()) {
+ reset_pending_ = true;
+ StartResolutionChange();
+ return;
+ }
+
+ if (!StopInputStream())
+ return;
+
+ // If we were flushing, we'll never return any more BitstreamBuffers or
+ // PictureBuffers; they have all been dropped and returned by now.
+ NotifyFlushDoneIfNeeded();
+
+ // Mark that we're resetting, then enqueue a ResetDoneTask(). All intervening
+ // jobs will early-out in the kResetting state.
+ decoder_state_ = kResetting;
+ SendPictureReady(); // Send all pending PictureReady.
+ decoder_thread_.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ResetDoneTask,
+ base::Unretained(this)));
+}
+
+void V4L2VideoDecodeAccelerator::ResetDoneTask() {
+ VLOGF(2);
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+ if (decoder_state_ == kError) {
+ VLOGF(2) << "early out: kError state";
+ return;
+ }
+
+ // Start poll thread if NotifyFlushDoneIfNeeded has not already.
+ if (!device_poll_thread_.IsRunning()) {
+ if (!StartDevicePoll())
+ return;
+ }
+
+ // Jobs drained, we're finished resetting.
+ DCHECK_EQ(decoder_state_, kResetting);
+ decoder_state_ = kInitialized;
+
+ decoder_delay_bitstream_buffer_id_ = -1;
+ child_task_runner_->PostTask(FROM_HERE,
+ base::Bind(&Client::NotifyResetDone, client_));
+
+ // While we were resetting, we early-outed DecodeBufferTask()s.
+ ScheduleDecodeBufferTaskIfNeeded();
+}
+
+void V4L2VideoDecodeAccelerator::DestroyTask() {
+ VLOGF(2);
+
+ // DestroyTask() should run regardless of decoder_state_.
+
+ StopDevicePoll();
+ StopOutputStream();
+ StopInputStream();
+
+ decoder_current_bitstream_buffer_.reset();
+ decoder_decode_buffer_tasks_scheduled_ = 0;
+ decoder_frames_at_client_ = 0;
+ while (!decoder_input_queue_.empty())
+ decoder_input_queue_.pop();
+ decoder_flushing_ = false;
+
+ // Set our state to kError. Just in case.
+ decoder_state_ = kError;
+
+ DestroyInputBuffers();
+ DestroyOutputBuffers();
+}
+
+bool V4L2VideoDecodeAccelerator::StartDevicePoll() {
+ DVLOGF(3);
+ DCHECK(!device_poll_thread_.IsRunning());
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+ // Start up the device poll thread and schedule its first DevicePollTask().
+ if (!device_poll_thread_.Start()) {
+ VLOGF(1) << "Device thread failed to start";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return false;
+ }
+ device_poll_thread_.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DevicePollTask,
+ base::Unretained(this), 0));
+
+ return true;
+}
+
+bool V4L2VideoDecodeAccelerator::StopDevicePoll() {
+ DVLOGF(3);
+
+ if (!device_poll_thread_.IsRunning())
+ return true;
+
+ if (decoder_thread_.IsRunning())
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+ // Signal the DevicePollTask() to stop, and stop the device poll thread.
+ if (!device_->SetDevicePollInterrupt()) {
+ VPLOGF(1) << "SetDevicePollInterrupt(): failed";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return false;
+ }
+ device_poll_thread_.Stop();
+ // Clear the interrupt now, to be sure.
+ if (!device_->ClearDevicePollInterrupt()) {
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return false;
+ }
+ DVLOGF(3) << "device poll stopped";
+ return true;
+}
+
+bool V4L2VideoDecodeAccelerator::StopOutputStream() {
+ VLOGF(2);
+ if (!output_streamon_)
+ return true;
+
+ __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF, &type);
+ output_streamon_ = false;
+
+ // Output stream is stopped. No need to wait for the buffer anymore.
+ flush_awaiting_last_output_buffer_ = false;
+
+ for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
+ // After streamoff, the device drops ownership of all buffers, even if we
+ // don't dequeue them explicitly. Some of them may still be owned by the
+ // client however. Reuse only those that aren't.
+ OutputRecord& output_record = output_buffer_map_[i];
+ if (output_record.state == kAtDevice) {
+ output_record.state = kFree;
+ free_output_buffers_.push_back(i);
+ }
+ }
+ output_buffer_queued_count_ = 0;
+ return true;
+}
+
+bool V4L2VideoDecodeAccelerator::StopInputStream() {
+ VLOGF(2);
+ if (!input_streamon_)
+ return true;
+
+ __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF, &type);
+ input_streamon_ = false;
+
+ // Reset accounting info for input.
+ while (!input_ready_queue_.empty())
+ input_ready_queue_.pop();
+ free_input_buffers_.clear();
+ for (size_t i = 0; i < input_buffer_map_.size(); ++i) {
+ free_input_buffers_.push_back(i);
+ input_buffer_map_[i].at_device = false;
+ input_buffer_map_[i].bitstream_buffer.reset();
+ }
+ input_buffer_queued_count_ = 0;
+
+ return true;
+}
+
+void V4L2VideoDecodeAccelerator::StartResolutionChange() {
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK_NE(decoder_state_, kUninitialized);
+ DCHECK_NE(decoder_state_, kResetting);
+
+ VLOGF(2) << "Initiate resolution change";
+
+ if (!(StopDevicePoll() && StopOutputStream()))
+ return;
+
+ decoder_state_ = kChangingResolution;
+ SendPictureReady(); // Send all pending PictureReady.
+
+ if (!DestroyOutputBuffers()) {
+ VLOGF(1) << "Failed destroying output buffers.";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+
+ FinishResolutionChange();
+}
+
+void V4L2VideoDecodeAccelerator::FinishResolutionChange() {
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK_EQ(decoder_state_, kChangingResolution);
+ VLOGF(2);
+
+ if (decoder_state_ == kError) {
+ VLOGF(2) << "early out: kError state";
+ return;
+ }
+
+ struct v4l2_format format;
+ bool again;
+ Size visible_size;
+ bool ret = GetFormatInfo(&format, &visible_size, &again);
+ if (!ret || again) {
+ VLOGF(1) << "Couldn't get format information after resolution change";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+
+ if (!CreateBuffersForFormat(format, visible_size)) {
+ VLOGF(1) << "Couldn't reallocate buffers after resolution change";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+
+ if (!StartDevicePoll())
+ return;
+}
+
+void V4L2VideoDecodeAccelerator::DevicePollTask(bool poll_device) {
+ DVLOGF(4);
+ DCHECK(device_poll_thread_.task_runner()->BelongsToCurrentThread());
+
+ bool event_pending = false;
+
+ if (!device_->Poll(poll_device, &event_pending)) {
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+
+ // All processing should happen on ServiceDeviceTask(), since we shouldn't
+ // touch decoder state from this thread.
+ decoder_thread_.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ServiceDeviceTask,
+ base::Unretained(this), event_pending));
+}
+
+void V4L2VideoDecodeAccelerator::NotifyError(Error error) {
+ VLOGF(1);
+
+ if (!child_task_runner_->BelongsToCurrentThread()) {
+ child_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::NotifyError,
+ weak_this_, error));
+ return;
+ }
+
+ if (client_) {
+ client_->NotifyError(error);
+ client_ptr_factory_.reset();
+ }
+}
+
+void V4L2VideoDecodeAccelerator::SetErrorState(Error error) {
+ // We can touch decoder_state_ only if this is the decoder thread or the
+ // decoder thread isn't running.
+ if (decoder_thread_.task_runner() &&
+ !decoder_thread_.task_runner()->BelongsToCurrentThread()) {
+ decoder_thread_.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::SetErrorState,
+ base::Unretained(this), error));
+ return;
+ }
+
+ // Post NotifyError only if we are already initialized, as the API does
+ // not allow doing so before that.
+ if (decoder_state_ != kError && decoder_state_ != kUninitialized)
+ NotifyError(error);
+
+ decoder_state_ = kError;
+}
+
+bool V4L2VideoDecodeAccelerator::GetFormatInfo(struct v4l2_format* format,
+ Size* visible_size,
+ bool* again) {
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+ *again = false;
+ memset(format, 0, sizeof(*format));
+ format->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ if (device_->Ioctl(VIDIOC_G_FMT, format) != 0) {
+ if (errno == EINVAL) {
+ // EINVAL means we haven't seen sufficient stream to decode the format.
+ *again = true;
+ return true;
+ } else {
+ VPLOGF(1) << "ioctl() failed: VIDIOC_G_FMT";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return false;
+ }
+ }
+
+ // Make sure we are still getting the format we set on initialization.
+ if (format->fmt.pix_mp.pixelformat != output_format_fourcc_) {
+ VLOGF(1) << "Unexpected format from G_FMT on output";
+ return false;
+ }
+
+ Size coded_size(format->fmt.pix_mp.width, format->fmt.pix_mp.height);
+ if (visible_size != nullptr)
+ *visible_size = GetVisibleSize(coded_size);
+
+ return true;
+}
+
+bool V4L2VideoDecodeAccelerator::CreateBuffersForFormat(
+ const struct v4l2_format& format,
+ const Size& visible_size) {
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ output_planes_count_ = format.fmt.pix_mp.num_planes;
+ coded_size_.SetSize(format.fmt.pix_mp.width, format.fmt.pix_mp.height);
+ visible_size_ = visible_size;
+
+ VLOGF(2) << "new resolution: " << coded_size_.ToString()
+ << ", visible size: " << visible_size_.ToString()
+ << ", decoder output planes count: " << output_planes_count_;
+
+ return CreateOutputBuffers();
+}
+
+Size V4L2VideoDecodeAccelerator::GetVisibleSize(
+ const Size& coded_size) {
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+ struct v4l2_rect* visible_rect;
+ struct v4l2_selection selection_arg;
+ memset(&selection_arg, 0, sizeof(selection_arg));
+ selection_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ selection_arg.target = V4L2_SEL_TGT_COMPOSE;
+
+ if (device_->Ioctl(VIDIOC_G_SELECTION, &selection_arg) == 0) {
+ VLOGF(2) << "VIDIOC_G_SELECTION is supported";
+ visible_rect = &selection_arg.r;
+ } else {
+ VLOGF(2) << "Fallback to VIDIOC_G_CROP";
+ struct v4l2_crop crop_arg;
+ memset(&crop_arg, 0, sizeof(crop_arg));
+ crop_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+
+ if (device_->Ioctl(VIDIOC_G_CROP, &crop_arg) != 0) {
+ VPLOGF(1) << "ioctl() VIDIOC_G_CROP failed";
+ return coded_size;
+ }
+ visible_rect = &crop_arg.c;
+ }
+
+ Rect rect(visible_rect->left, visible_rect->top, visible_rect->width,
+ visible_rect->height);
+ VLOGF(2) << "visible rectangle is " << rect.ToString();
+ if (!Rect(coded_size).Contains(rect)) {
+ DVLOGF(3) << "visible rectangle " << rect.ToString()
+ << " is not inside coded size " << coded_size.ToString();
+ return coded_size;
+ }
+ if (rect.IsEmpty()) {
+ VLOGF(1) << "visible size is empty";
+ return coded_size;
+ }
+
+ // Chrome assume picture frame is coded at (0, 0).
+ if (rect.x() != 0 || rect.y() != 0) {
+ VLOGF(1) << "Unexpected visible rectangle " << rect.ToString()
+ << ", top-left is not origin";
+ return coded_size;
+ }
+
+ return rect.size();
+}
+
+bool V4L2VideoDecodeAccelerator::CreateInputBuffers() {
+ VLOGF(2);
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ // We always run this as we prepare to initialize.
+ DCHECK_EQ(decoder_state_, kInitialized);
+ DCHECK(!input_streamon_);
+ DCHECK(input_buffer_map_.empty());
+
+ struct v4l2_requestbuffers reqbufs;
+ memset(&reqbufs, 0, sizeof(reqbufs));
+ reqbufs.count = kInputBufferCount;
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ reqbufs.memory = V4L2_MEMORY_DMABUF;
+ IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS, &reqbufs);
+ if (reqbufs.count < kInputBufferCount) {
+ VLOGF(1) << "Could not allocate enough output buffers";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return false;
+ }
+ input_buffer_map_.resize(reqbufs.count);
+ free_input_buffers_.resize(reqbufs.count);
+ std::iota(free_input_buffers_.begin(), free_input_buffers_.end(), 0);
+ return true;
+}
+
+static bool IsSupportedOutputFormat(uint32_t v4l2_format) {
+ // Only support V4L2_PIX_FMT_NV12 output format for now.
+ // TODO(johnylin): add more supported format if necessary.
+ uint32_t kSupportedOutputFmtFourcc[] = { V4L2_PIX_FMT_NV12 };
+ return std::find(
+ kSupportedOutputFmtFourcc,
+ kSupportedOutputFmtFourcc + arraysize(kSupportedOutputFmtFourcc),
+ v4l2_format) !=
+ kSupportedOutputFmtFourcc + arraysize(kSupportedOutputFmtFourcc);
+}
+
+bool V4L2VideoDecodeAccelerator::SetupFormats() {
+ // We always run this as we prepare to initialize.
+ DCHECK(child_task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(decoder_state_, kUninitialized);
+ DCHECK(!input_streamon_);
+ DCHECK(!output_streamon_);
+
+ size_t input_size;
+ Size max_resolution, min_resolution;
+ device_->GetSupportedResolution(input_format_fourcc_, &min_resolution,
+ &max_resolution);
+ if (max_resolution.width() > 1920 && max_resolution.height() > 1088)
+ input_size = kInputBufferMaxSizeFor4k;
+ else
+ input_size = kInputBufferMaxSizeFor1080p;
+
+ struct v4l2_fmtdesc fmtdesc;
+ memset(&fmtdesc, 0, sizeof(fmtdesc));
+ fmtdesc.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ bool is_format_supported = false;
+ while (device_->Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0) {
+ if (fmtdesc.pixelformat == input_format_fourcc_) {
+ is_format_supported = true;
+ break;
+ }
+ ++fmtdesc.index;
+ }
+
+ if (!is_format_supported) {
+ VLOGF(1) << "Input fourcc " << input_format_fourcc_
+ << " not supported by device.";
+ return false;
+ }
+
+ struct v4l2_format format;
+ memset(&format, 0, sizeof(format));
+ format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ format.fmt.pix_mp.pixelformat = input_format_fourcc_;
+ format.fmt.pix_mp.plane_fmt[0].sizeimage = input_size;
+ format.fmt.pix_mp.num_planes = 1;
+ IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT, &format);
+ // V4L2 driver adjusts input size that the driver may access. Store the size
+ // in order to specify it in QBUF later.
+ input_buffer_size_ = format.fmt.pix_mp.plane_fmt[0].sizeimage;
+
+
+ // We have to set up the format for output, because the driver may not allow
+ // changing it once we start streaming; whether it can support our chosen
+ // output format or not may depend on the input format.
+ memset(&fmtdesc, 0, sizeof(fmtdesc));
+ fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ while (device_->Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0) {
+ if (IsSupportedOutputFormat(fmtdesc.pixelformat)) {
+ output_format_fourcc_ = fmtdesc.pixelformat;
+ break;
+ }
+ ++fmtdesc.index;
+ }
+
+ if (output_format_fourcc_ == 0) {
+ VLOGF(2) << "Image processor not available";
+ return false;
+ }
+ VLOGF(2) << "Output format=" << output_format_fourcc_;
+
+ // Just set the fourcc for output; resolution, etc., will come from the
+ // driver once it extracts it from the stream.
+ memset(&format, 0, sizeof(format));
+ format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ format.fmt.pix_mp.pixelformat = output_format_fourcc_;
+ IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT, &format);
+
+ return true;
+}
+
+bool V4L2VideoDecodeAccelerator::CreateOutputBuffers() {
+ VLOGF(2);
+ DCHECK(decoder_state_ == kInitialized ||
+ decoder_state_ == kChangingResolution);
+ DCHECK(!output_streamon_);
+ DCHECK(output_buffer_map_.empty());
+ DCHECK_EQ(output_mode_, Config::OutputMode::IMPORT);
+
+ // Number of output buffers we need.
+ struct v4l2_control ctrl;
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE;
+ IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_G_CTRL, &ctrl);
+ output_dpb_size_ = ctrl.value;
+
+ // Output format setup in Initialize().
+
+ uint32_t buffer_count = output_dpb_size_ + kDpbOutputBufferExtraCount;
+
+ VideoPixelFormat pixel_format =
+ V4L2PixFmtToVideoPixelFormat(output_format_fourcc_);
+
+ child_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&Client::ProvidePictureBuffers, client_,
+ buffer_count, pixel_format, coded_size_));
+
+
+ // Go into kAwaitingPictureBuffers to prevent us from doing any more decoding
+ // or event handling while we are waiting for AssignPictureBuffers(). Not
+ // having Pictures available would not have prevented us from making decoding
+ // progress entirely e.g. in the case of H.264 where we could further decode
+ // non-slice NALUs and could even get another resolution change before we were
+ // done with this one. After we get the buffers, we'll go back into kIdle and
+ // kick off further event processing, and eventually go back into kDecoding
+ // once no more events are pending (if any).
+ decoder_state_ = kAwaitingPictureBuffers;
+
+ return true;
+}
+
+void V4L2VideoDecodeAccelerator::DestroyInputBuffers() {
+ VLOGF(2);
+ DCHECK(!decoder_thread_.IsRunning() ||
+ decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK(!input_streamon_);
+
+ if (input_buffer_map_.empty())
+ return;
+
+ struct v4l2_requestbuffers reqbufs;
+ memset(&reqbufs, 0, sizeof(reqbufs));
+ reqbufs.count = 0;
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ reqbufs.memory = V4L2_MEMORY_DMABUF;
+ IOCTL_OR_LOG_ERROR(VIDIOC_REQBUFS, &reqbufs);
+
+ input_buffer_map_.clear();
+ free_input_buffers_.clear();
+}
+
+bool V4L2VideoDecodeAccelerator::DestroyOutputBuffers() {
+ VLOGF(2);
+ DCHECK(!decoder_thread_.IsRunning() ||
+ decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK(!output_streamon_);
+ bool success = true;
+
+ if (output_buffer_map_.empty())
+ return true;
+
+ for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
+ OutputRecord& output_record = output_buffer_map_[i];
+
+ DVLOGF(3) << "dismissing PictureBuffer id=" << output_record.picture_id;
+ child_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&Client::DismissPictureBuffer, client_,
+ output_record.picture_id));
+ }
+
+ struct v4l2_requestbuffers reqbufs;
+ memset(&reqbufs, 0, sizeof(reqbufs));
+ reqbufs.count = 0;
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ reqbufs.memory = V4L2_MEMORY_DMABUF;
+ if (device_->Ioctl(VIDIOC_REQBUFS, &reqbufs) != 0) {
+ VPLOGF(1) << "ioctl() failed: VIDIOC_REQBUFS";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ success = false;
+ }
+
+ output_buffer_map_.clear();
+ while (!free_output_buffers_.empty())
+ free_output_buffers_.pop_front();
+ output_buffer_queued_count_ = 0;
+ // The client may still hold some buffers. The texture holds a reference to
+ // the buffer. It is OK to free the buffer and destroy EGLImage here.
+ decoder_frames_at_client_ = 0;
+
+ return success;
+}
+
+void V4L2VideoDecodeAccelerator::SendPictureReady() {
+ DVLOGF(4);
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ bool send_now = (decoder_state_ == kChangingResolution ||
+ decoder_state_ == kResetting || decoder_flushing_);
+ while (pending_picture_ready_.size() > 0) {
+ bool cleared = pending_picture_ready_.front().cleared;
+ const Picture& picture = pending_picture_ready_.front().picture;
+ if (cleared && picture_clearing_count_ == 0) {
+ // This picture is cleared. It can be posted to a thread different than
+ // the main GPU thread to reduce latency. This should be the case after
+ // all pictures are cleared at the beginning.
+ decode_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&Client::PictureReady, decode_client_, picture));
+ pending_picture_ready_.pop();
+ } else if (!cleared || send_now) {
+ DVLOGF(4) << "cleared=" << pending_picture_ready_.front().cleared
+ << ", decoder_state_=" << decoder_state_
+ << ", decoder_flushing_=" << decoder_flushing_
+ << ", picture_clearing_count_=" << picture_clearing_count_;
+ // If the picture is not cleared, post it to the child thread because it
+ // has to be cleared in the child thread. A picture only needs to be
+ // cleared once. If the decoder is changing resolution, resetting or
+ // flushing, send all pictures to ensure PictureReady arrive before
+ // ProvidePictureBuffers, NotifyResetDone, or NotifyFlushDone.
+ child_task_runner_->PostTaskAndReply(
+ FROM_HERE, base::Bind(&Client::PictureReady, client_, picture),
+ // Unretained is safe. If Client::PictureReady gets to run, |this| is
+ // alive. Destroy() will wait the decode thread to finish.
+ base::Bind(&V4L2VideoDecodeAccelerator::PictureCleared,
+ base::Unretained(this)));
+ picture_clearing_count_++;
+ pending_picture_ready_.pop();
+ } else {
+ // This picture is cleared. But some pictures are about to be cleared on
+ // the child thread. To preserve the order, do not send this until those
+ // pictures are cleared.
+ break;
+ }
+ }
+}
+
+void V4L2VideoDecodeAccelerator::PictureCleared() {
+ DVLOGF(4) << "clearing count=" << picture_clearing_count_;
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK_GT(picture_clearing_count_, 0);
+ picture_clearing_count_--;
+ SendPictureReady();
+}
+
+} // namespace media
diff --git a/accel/v4l2_video_decode_accelerator.h b/accel/v4l2_video_decode_accelerator.h
new file mode 100644
index 0000000..99076ed
--- /dev/null
+++ b/accel/v4l2_video_decode_accelerator.h
@@ -0,0 +1,497 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains an implementation of VideoDecodeAccelerator
+// that utilizes hardware video decoders, which expose Video4Linux 2 API
+// (http://linuxtv.org/downloads/v4l-dvb-apis/).
+// Note: ported from Chromium commit head: 85fdf90
+// Note: image processor is not ported.
+
+#ifndef MEDIA_GPU_V4L2_VIDEO_DECODE_ACCELERATOR_H_
+#define MEDIA_GPU_V4L2_VIDEO_DECODE_ACCELERATOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <list>
+#include <memory>
+#include <queue>
+#include <vector>
+
+#include "base/callback_forward.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "picture.h"
+#include "size.h"
+#include "v4l2_device.h"
+#include "video_decode_accelerator.h"
+
+namespace media {
+
+// This class handles video accelerators directly through a V4L2 device exported
+// by the hardware blocks.
+//
+// The threading model of this class is driven by the fact that it needs to
+// interface two fundamentally different event queues -- the one Chromium
+// provides through MessageLoop, and the one driven by the V4L2 devices which
+// is waited on with epoll(). There are three threads involved in this class:
+//
+// * The child thread, which is the main GPU process thread which calls the
+// VideoDecodeAccelerator entry points. Calls from this thread
+// generally do not block (with the exception of Initialize() and Destroy()).
+// They post tasks to the decoder_thread_, which actually services the task
+// and calls back when complete through the
+// VideoDecodeAccelerator::Client interface.
+// * The decoder_thread_, owned by this class. It services API tasks, through
+// the *Task() routines, as well as V4L2 device events, through
+// ServiceDeviceTask(). Almost all state modification is done on this thread
+// (this doesn't include buffer (re)allocation sequence, see below).
+// * The device_poll_thread_, owned by this class. All it does is epoll() on
+// the V4L2 in DevicePollTask() and schedule a ServiceDeviceTask() on the
+// decoder_thread_ when something interesting happens.
+// TODO(sheu): replace this thread with an TYPE_IO decoder_thread_.
+//
+// Note that this class has (almost) no locks, apart from the pictures_assigned_
+// WaitableEvent. Everything (apart from buffer (re)allocation) is serviced on
+// the decoder_thread_, so there are no synchronization issues.
+// ... well, there are, but it's a matter of getting messages posted in the
+// right order, not fiddling with locks.
+// Buffer creation is a two-step process that is serviced partially on the
+// Child thread, because we need to wait for the client to provide textures
+// for the buffers we allocate. We cannot keep the decoder thread running while
+// the client allocates Pictures for us, because we need to REQBUFS first to get
+// the required number of output buffers from the device and that cannot be done
+// unless we free the previous set of buffers, leaving the decoding in a
+// inoperable state for the duration of the wait for Pictures. So to prevent
+// subtle races (esp. if we get Reset() in the meantime), we block the decoder
+// thread while we wait for AssignPictureBuffers from the client.
+//
+// V4L2VideoDecodeAccelerator may use image processor to convert the output.
+// There are three cases:
+// Flush: V4L2VDA should wait until image processor returns all processed
+// frames.
+// Reset: V4L2VDA doesn't need to wait for image processor. When image processor
+// returns an old frame, drop it.
+// Resolution change: V4L2VDA destroy image processor when destroying output
+// buffrers. We cannot drop any frame during resolution change. So V4L2VDA
+// should destroy output buffers after image processor returns all the frames.
+class V4L2VideoDecodeAccelerator
+ : public VideoDecodeAccelerator {
+ public:
+ V4L2VideoDecodeAccelerator(
+ const scoped_refptr<V4L2Device>& device);
+ ~V4L2VideoDecodeAccelerator() override;
+
+ // VideoDecodeAccelerator implementation.
+ // Note: Initialize() and Destroy() are synchronous.
+ bool Initialize(const Config& config, Client* client) override;
+ void Decode(const BitstreamBuffer& bitstream_buffer) override;
+ void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
+ void ImportBufferForPicture(
+ int32_t picture_buffer_id,
+ VideoPixelFormat pixel_format,
+ const NativePixmapHandle& native_pixmap_handle) override;
+ void ReusePictureBuffer(int32_t picture_buffer_id) override;
+ void Flush() override;
+ void Reset() override;
+ void Destroy() override;
+ bool TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
+ override;
+
+ static VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles();
+
+ private:
+ // These are rather subjectively tuned.
+ enum {
+ kInputBufferCount = 8,
+ // TODO(posciak): determine input buffer size based on level limits.
+ // See http://crbug.com/255116.
+ // Input bitstream buffer size for up to 1080p streams.
+ kInputBufferMaxSizeFor1080p = 1024 * 1024,
+ // Input bitstream buffer size for up to 4k streams.
+ kInputBufferMaxSizeFor4k = 4 * kInputBufferMaxSizeFor1080p,
+ // This is originally from media/base/limits.h in Chromium.
+ kMaxVideoFrames = 4,
+ // Number of output buffers to use for each VDA stage above what's required
+ // by the decoder (e.g. DPB size, in H264). We need
+ // limits::kMaxVideoFrames to fill up the GpuVideoDecode pipeline,
+ // and +1 for a frame in transit.
+ kDpbOutputBufferExtraCount = kMaxVideoFrames + 1,
+ // Number of extra output buffers if image processor is used.
+ kDpbOutputBufferExtraCountForImageProcessor = 1,
+ };
+
+ // Internal state of the decoder.
+ enum State {
+ kUninitialized, // Initialize() not yet called.
+ kInitialized, // Initialize() returned true; ready to start decoding.
+ kDecoding, // DecodeBufferInitial() successful; decoding frames.
+ kResetting, // Presently resetting.
+ // Performing resolution change and waiting for image processor to return
+ // all frames.
+ kChangingResolution,
+ // Requested new PictureBuffers via ProvidePictureBuffers(), awaiting
+ // AssignPictureBuffers().
+ kAwaitingPictureBuffers,
+ kError, // Error in kDecoding state.
+ };
+
+ enum OutputRecordState {
+ kFree, // Ready to be queued to the device.
+ kAtDevice, // Held by device.
+ kAtProcessor, // Held by image processor.
+ kAtClient, // Held by client of V4L2VideoDecodeAccelerator.
+ };
+
+ enum BufferId {
+ kFlushBufferId = -2 // Buffer id for flush buffer, queued by FlushTask().
+ };
+
+ // Auto-destruction reference for BitstreamBuffer, for message-passing from
+ // Decode() to DecodeTask().
+ struct BitstreamBufferRef;
+
+ // Record for decoded pictures that can be sent to PictureReady.
+ struct PictureRecord {
+ PictureRecord(bool cleared, const Picture& picture);
+ ~PictureRecord();
+ bool cleared; // Whether the texture is cleared and safe to render from.
+ Picture picture; // The decoded picture.
+ };
+
+ // Record for input buffers.
+ struct InputRecord {
+ bool at_device = false; // held by device.
+ std::unique_ptr<BitstreamBufferRef> bitstream_buffer;
+ };
+
+ // Record for output buffers.
+ struct OutputRecord {
+ OutputRecord();
+ OutputRecord(OutputRecord&&) = default;
+ ~OutputRecord();
+ OutputRecordState state;
+ int32_t picture_id; // picture buffer id as returned to PictureReady().
+ bool cleared; // Whether the texture is cleared and safe to render
+ // from. See TextureManager for details.
+ // Output fds of the decoded frame.
+ std::vector<base::ScopedFD> output_fds;
+ // offsets of each decoded frame from each fd in |output_fds|.
+ std::vector<size_t> offsets;
+ };
+
+ //
+ // Decoding tasks, to be run on decode_thread_.
+ //
+
+ // Task to finish initialization on decoder_thread_.
+ void InitializeTask();
+
+ // Enqueue a BitstreamBuffer to decode. This will enqueue a buffer to the
+ // decoder_input_queue_, then queue a DecodeBufferTask() to actually decode
+ // the buffer.
+ void DecodeTask(const BitstreamBuffer& bitstream_buffer);
+
+ // Decode from the buffers queued in decoder_input_queue_. Calls
+ // DecodeBufferInitial() or DecodeBufferContinue() as appropriate.
+ void DecodeBufferTask();
+ // Schedule another DecodeBufferTask() if we're behind.
+ void ScheduleDecodeBufferTaskIfNeeded();
+
+ // Return true if we should continue to schedule DecodeBufferTask()s after
+ // completion.
+ bool DecodeBufferInitial();
+ bool DecodeBufferContinue();
+
+ // Flush data for one decoded frame.
+ bool TrySubmitInputFrame();
+
+ // Allocate V4L2 buffers and assign them to |buffers| provided by the client
+ // via AssignPictureBuffers() on decoder thread.
+ void AssignPictureBuffersTask(const std::vector<PictureBuffer>& buffers);
+
+ // Use buffer backed by dmabuf file descriptors in |dmabuf_fds| for the
+ // OutputRecord associated with |picture_buffer_id|, taking ownership of the
+ // file descriptors.
+ void ImportBufferForPictureTask(int32_t picture_buffer_id,
+ std::vector<size_t> offsets,
+ std::vector<base::ScopedFD> dmabuf_fds);
+
+ // Service I/O on the V4L2 devices. This task should only be scheduled from
+ // DevicePollTask(). If |event_pending| is true, one or more events
+ // on file descriptor are pending.
+ void ServiceDeviceTask(bool event_pending);
+ // Handle the various device queues.
+ void Enqueue();
+ void Dequeue();
+ // Dequeue one input buffer. Return true if success.
+ bool DequeueInputBuffer();
+ // Dequeue one output buffer. Return true if success.
+ bool DequeueOutputBuffer();
+
+ // Return true if there is a resolution change event pending.
+ bool DequeueResolutionChangeEvent();
+
+ // Enqueue a buffer on the corresponding queue.
+ bool EnqueueInputRecord();
+ bool EnqueueOutputRecord();
+
+ // Process a ReusePictureBuffer() API call. The API call create an EGLSync
+ // object on the main (GPU process) thread; we will record this object so we
+ // can wait on it before reusing the buffer.
+ void ReusePictureBufferTask(int32_t picture_buffer_id);
+
+ // Flush() task. Child thread should not submit any more buffers until it
+ // receives the NotifyFlushDone callback. This task will schedule an empty
+ // BitstreamBufferRef (with input_id == kFlushBufferId) to perform the flush.
+ void FlushTask();
+ // Notify the client of a flush completion, if required. This should be
+ // called any time a relevant queue could potentially be emptied: see
+ // function definition.
+ void NotifyFlushDoneIfNeeded();
+ // Returns true if VIDIOC_DECODER_CMD is supported.
+ bool IsDecoderCmdSupported();
+ // Send V4L2_DEC_CMD_START to the driver. Return true if success.
+ bool SendDecoderCmdStop();
+
+ // Reset() task. Drop all input buffers. If V4L2VDA is not doing resolution
+ // change or waiting picture buffers, call FinishReset.
+ void ResetTask();
+ // This will schedule a ResetDoneTask() that will send the NotifyResetDone
+ // callback, then set the decoder state to kResetting so that all intervening
+ // tasks will drain.
+ void FinishReset();
+ void ResetDoneTask();
+
+ // Device destruction task.
+ void DestroyTask();
+
+ // Start |device_poll_thread_|.
+ bool StartDevicePoll();
+
+ // Stop |device_poll_thread_|.
+ bool StopDevicePoll();
+
+ bool StopInputStream();
+ bool StopOutputStream();
+
+ void StartResolutionChange();
+ void FinishResolutionChange();
+
+ // Try to get output format and visible size, detected after parsing the
+ // beginning of the stream. Sets |again| to true if more parsing is needed.
+ // |visible_size| could be nullptr and ignored.
+ bool GetFormatInfo(struct v4l2_format* format,
+ Size* visible_size,
+ bool* again);
+ // Create output buffers for the given |format| and |visible_size|.
+ bool CreateBuffersForFormat(const struct v4l2_format& format,
+ const Size& visible_size);
+
+ // Try to get |visible_size|. Return visible size, or, if querying it is not
+ // supported or produces invalid size, return |coded_size| instead.
+ Size GetVisibleSize(const Size& coded_size);
+
+ //
+ // Device tasks, to be run on device_poll_thread_.
+ //
+
+ // The device task.
+ void DevicePollTask(bool poll_device);
+
+ //
+ // Safe from any thread.
+ //
+
+ // Error notification (using PostTask() to child thread, if necessary).
+ void NotifyError(Error error);
+
+ // Set the decoder_state_ to kError and notify the client (if necessary).
+ void SetErrorState(Error error);
+
+ //
+ // Other utility functions. Called on decoder_thread_, unless
+ // decoder_thread_ is not yet started, in which case the child thread can call
+ // these (e.g. in Initialize() or Destroy()).
+ //
+
+ // Create the buffers we need.
+ bool CreateInputBuffers();
+ bool CreateOutputBuffers();
+
+ // Destroy buffers.
+ void DestroyInputBuffers();
+ // In contrast to DestroyInputBuffers, which is called only on destruction,
+ // we call DestroyOutputBuffers also during playback, on resolution change.
+ // Even if anything fails along the way, we still want to go on and clean
+ // up as much as possible, so return false if this happens, so that the
+ // caller can error out on resolution change.
+ bool DestroyOutputBuffers();
+
+ // Set input and output formats before starting decode.
+ bool SetupFormats();
+
+ //
+ // Methods run on child thread.
+ //
+
+ // Send decoded pictures to PictureReady.
+ void SendPictureReady();
+
+ // Callback that indicates a picture has been cleared.
+ void PictureCleared();
+
+ // Our original calling task runner for the child thread.
+ scoped_refptr<base::SingleThreadTaskRunner> child_task_runner_;
+
+ // Task runner Decode() and PictureReady() run on.
+ scoped_refptr<base::SingleThreadTaskRunner> decode_task_runner_;
+
+ // WeakPtr<> pointing to |this| for use in posting tasks from the decoder or
+ // device worker threads back to the child thread. Because the worker threads
+ // are members of this class, any task running on those threads is guaranteed
+ // that this object is still alive. As a result, tasks posted from the child
+ // thread to the decoder or device thread should use base::Unretained(this),
+ // and tasks posted the other way should use |weak_this_|.
+ base::WeakPtr<V4L2VideoDecodeAccelerator> weak_this_;
+
+ // To expose client callbacks from VideoDecodeAccelerator.
+ // NOTE: all calls to these objects *MUST* be executed on
+ // child_task_runner_.
+ std::unique_ptr<base::WeakPtrFactory<Client>> client_ptr_factory_;
+ base::WeakPtr<Client> client_;
+ // Callbacks to |decode_client_| must be executed on |decode_task_runner_|.
+ base::WeakPtr<Client> decode_client_;
+
+ //
+ // Decoder state, owned and operated by decoder_thread_.
+ // Before decoder_thread_ has started, the decoder state is managed by
+ // the child (main) thread. After decoder_thread_ has started, the decoder
+ // thread should be the only one managing these.
+ //
+
+ // This thread services tasks posted from the VDA API entry points by the
+ // child thread and device service callbacks posted from the device thread.
+ base::Thread decoder_thread_;
+ // Decoder state machine state.
+ State decoder_state_;
+
+ Config::OutputMode output_mode_;
+
+ // BitstreamBuffer we're presently reading.
+ std::unique_ptr<BitstreamBufferRef> decoder_current_bitstream_buffer_;
+ // The V4L2Device this class is operating upon.
+ scoped_refptr<V4L2Device> device_;
+ // FlushTask() and ResetTask() should not affect buffers that have been
+ // queued afterwards. For flushing or resetting the pipeline then, we will
+ // delay these buffers until after the flush or reset completes.
+ int decoder_delay_bitstream_buffer_id_;
+ // We track the number of buffer decode tasks we have scheduled, since each
+ // task execution should complete one buffer. If we fall behind (due to
+ // resource backpressure, etc.), we'll have to schedule more to catch up.
+ int decoder_decode_buffer_tasks_scheduled_;
+ // Picture buffers held by the client.
+ int decoder_frames_at_client_;
+
+ // Are we flushing?
+ bool decoder_flushing_;
+ // True if VIDIOC_DECODER_CMD is supported.
+ bool decoder_cmd_supported_;
+ // True if flushing is waiting for last output buffer. After
+ // VIDIOC_DECODER_CMD is sent to the driver, this flag will be set to true to
+ // wait for the last output buffer. When this flag is true, flush done will
+ // not be sent. After an output buffer that has the flag V4L2_BUF_FLAG_LAST is
+ // received, this is set to false.
+ bool flush_awaiting_last_output_buffer_;
+
+ // Got a reset request while we were performing resolution change or waiting
+ // picture buffers.
+ bool reset_pending_;
+ // Input queue for decoder_thread_: BitstreamBuffers in.
+ std::queue<std::unique_ptr<BitstreamBufferRef>> decoder_input_queue_;
+
+ //
+ // Hardware state and associated queues. Since decoder_thread_ services
+ // the hardware, decoder_thread_ owns these too.
+ // output_buffer_map_, free_output_buffers_ and output_planes_count_ are an
+ // exception during the buffer (re)allocation sequence, when the
+ // decoder_thread_ is blocked briefly while the Child thread manipulates
+ // them.
+ //
+
+ // Completed decode buffers.
+ std::queue<int> input_ready_queue_;
+
+ // Input buffer state.
+ bool input_streamon_;
+ // Input buffers enqueued to device.
+ int input_buffer_queued_count_;
+ // Input buffers ready to use, as a LIFO since we don't care about ordering.
+ std::vector<int> free_input_buffers_;
+ // Mapping of int index to input buffer record.
+ std::vector<InputRecord> input_buffer_map_;
+ // The size of input buffer that bitstream buffer can be copied.
+ size_t input_buffer_size_;
+
+ // Output buffer state.
+ bool output_streamon_;
+ // Output buffers enqueued to device.
+ int output_buffer_queued_count_;
+ // Output buffers ready to use, as a FIFO since we want oldest-first to hide
+ // synchronization latency with GL.
+ std::list<int> free_output_buffers_;
+ // Mapping of int index to output buffer record.
+ std::vector<OutputRecord> output_buffer_map_;
+ // Required size of DPB for decoding.
+ int output_dpb_size_;
+
+ // Number of planes (i.e. separate memory buffers) for output.
+ size_t output_planes_count_;
+
+ // Pictures that are ready but not sent to PictureReady yet.
+ std::queue<PictureRecord> pending_picture_ready_;
+
+ // The number of pictures that are sent to PictureReady and will be cleared.
+ int picture_clearing_count_;
+
+ // Output picture coded size.
+ Size coded_size_;
+
+ // Output picture visible size.
+ Size visible_size_;
+
+ //
+ // The device polling thread handles notifications of V4L2 device changes.
+ //
+
+ // The thread.
+ base::Thread device_poll_thread_;
+
+ //
+ // Other state, held by the child (main) thread.
+ //
+
+ // The codec we'll be decoding for.
+ VideoCodecProfile video_profile_;
+ // Chosen input format for video_profile_.
+ uint32_t input_format_fourcc_;
+ // Chosen output format.
+ uint32_t output_format_fourcc_;
+
+ // Input format V4L2 fourccs this class supports.
+ static const uint32_t supported_input_fourccs_[];
+
+ // The WeakPtrFactory for |weak_this_|.
+ base::WeakPtrFactory<V4L2VideoDecodeAccelerator> weak_this_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(V4L2VideoDecodeAccelerator);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_V4L2_VIDEO_DECODE_ACCELERATOR_H_
diff --git a/accel/video_codecs.cc b/accel/video_codecs.cc
new file mode 100644
index 0000000..8e4d4a7
--- /dev/null
+++ b/accel/video_codecs.cc
@@ -0,0 +1,80 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 23236dc929bc
+// Note: only necessary functions are ported.
+
+#include "video_codecs.h"
+
+#include "base/logging.h"
+
+namespace media {
+
+std::string GetProfileName(VideoCodecProfile profile) {
+ switch (profile) {
+ case VIDEO_CODEC_PROFILE_UNKNOWN:
+ return "unknown";
+ case H264PROFILE_BASELINE:
+ return "h264 baseline";
+ case H264PROFILE_MAIN:
+ return "h264 main";
+ case H264PROFILE_EXTENDED:
+ return "h264 extended";
+ case H264PROFILE_HIGH:
+ return "h264 high";
+ case H264PROFILE_HIGH10PROFILE:
+ return "h264 high 10";
+ case H264PROFILE_HIGH422PROFILE:
+ return "h264 high 4:2:2";
+ case H264PROFILE_HIGH444PREDICTIVEPROFILE:
+ return "h264 high 4:4:4 predictive";
+ case H264PROFILE_SCALABLEBASELINE:
+ return "h264 scalable baseline";
+ case H264PROFILE_SCALABLEHIGH:
+ return "h264 scalable high";
+ case H264PROFILE_STEREOHIGH:
+ return "h264 stereo high";
+ case H264PROFILE_MULTIVIEWHIGH:
+ return "h264 multiview high";
+ case HEVCPROFILE_MAIN:
+ return "hevc main";
+ case HEVCPROFILE_MAIN10:
+ return "hevc main 10";
+ case HEVCPROFILE_MAIN_STILL_PICTURE:
+ return "hevc main still-picture";
+ case VP8PROFILE_ANY:
+ return "vp8";
+ case VP9PROFILE_PROFILE0:
+ return "vp9 profile0";
+ case VP9PROFILE_PROFILE1:
+ return "vp9 profile1";
+ case VP9PROFILE_PROFILE2:
+ return "vp9 profile2";
+ case VP9PROFILE_PROFILE3:
+ return "vp9 profile3";
+ case DOLBYVISION_PROFILE0:
+ return "dolby vision profile 0";
+ case DOLBYVISION_PROFILE4:
+ return "dolby vision profile 4";
+ case DOLBYVISION_PROFILE5:
+ return "dolby vision profile 5";
+ case DOLBYVISION_PROFILE7:
+ return "dolby vision profile 7";
+ case DOLBYVISION_PROFILE8:
+ return "dolby vision profile 8";
+ case DOLBYVISION_PROFILE9:
+ return "dolby vision profile 9";
+ case THEORAPROFILE_ANY:
+ return "theora";
+ case AV1PROFILE_PROFILE_MAIN:
+ return "av1 profile main";
+ case AV1PROFILE_PROFILE_HIGH:
+ return "av1 profile high";
+ case AV1PROFILE_PROFILE_PRO:
+ return "av1 profile pro";
+ }
+ NOTREACHED();
+ return "";
+}
+
+} // namespace media
diff --git a/accel/video_codecs.h b/accel/video_codecs.h
new file mode 100644
index 0000000..44d631b
--- /dev/null
+++ b/accel/video_codecs.h
@@ -0,0 +1,99 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: becc5bbb0aa6
+// Note: only necessary functions are ported.
+
+#ifndef VIDEO_CODECS_H_
+#define VIDEO_CODECS_H_
+
+#include <string>
+
+namespace media {
+
+// GENERATED_JAVA_ENUM_PACKAGE: org.chromium.media
+enum VideoCodec {
+ // These values are histogrammed over time; do not change their ordinal
+ // values. When deleting a codec replace it with a dummy value; when adding a
+ // codec, do so at the bottom (and update kVideoCodecMax).
+ kUnknownVideoCodec = 0,
+ kCodecH264,
+ kCodecVC1,
+ kCodecMPEG2,
+ kCodecMPEG4,
+ kCodecTheora,
+ kCodecVP8,
+ kCodecVP9,
+ kCodecHEVC,
+ kCodecDolbyVision,
+ kCodecAV1,
+ // DO NOT ADD RANDOM VIDEO CODECS!
+ //
+ // The only acceptable time to add a new codec is if there is production code
+ // that uses said codec in the same CL.
+
+ kVideoCodecMax = kCodecAV1, // Must equal the last "real" codec above.
+};
+
+// Video codec profiles. Keep in sync with mojo::VideoCodecProfile (see
+// media/mojo/mojom/media_types.mojom), gpu::VideoCodecProfile (see
+// gpu/config/gpu_info.h), and PP_VideoDecoder_Profile (translation is performed
+// in content/renderer/pepper/ppb_video_decoder_impl.cc).
+// NOTE: These values are histogrammed over time in UMA so the values must never
+// ever change (add new values to tools/metrics/histograms/histograms.xml)
+// GENERATED_JAVA_ENUM_PACKAGE: org.chromium.media
+enum VideoCodecProfile {
+ // Keep the values in this enum unique, as they imply format (h.264 vs. VP8,
+ // for example), and keep the values for a particular format grouped
+ // together for clarity.
+ VIDEO_CODEC_PROFILE_UNKNOWN = -1,
+ VIDEO_CODEC_PROFILE_MIN = VIDEO_CODEC_PROFILE_UNKNOWN,
+ H264PROFILE_MIN = 0,
+ H264PROFILE_BASELINE = H264PROFILE_MIN,
+ H264PROFILE_MAIN = 1,
+ H264PROFILE_EXTENDED = 2,
+ H264PROFILE_HIGH = 3,
+ H264PROFILE_HIGH10PROFILE = 4,
+ H264PROFILE_HIGH422PROFILE = 5,
+ H264PROFILE_HIGH444PREDICTIVEPROFILE = 6,
+ H264PROFILE_SCALABLEBASELINE = 7,
+ H264PROFILE_SCALABLEHIGH = 8,
+ H264PROFILE_STEREOHIGH = 9,
+ H264PROFILE_MULTIVIEWHIGH = 10,
+ H264PROFILE_MAX = H264PROFILE_MULTIVIEWHIGH,
+ VP8PROFILE_MIN = 11,
+ VP8PROFILE_ANY = VP8PROFILE_MIN,
+ VP8PROFILE_MAX = VP8PROFILE_ANY,
+ VP9PROFILE_MIN = 12,
+ VP9PROFILE_PROFILE0 = VP9PROFILE_MIN,
+ VP9PROFILE_PROFILE1 = 13,
+ VP9PROFILE_PROFILE2 = 14,
+ VP9PROFILE_PROFILE3 = 15,
+ VP9PROFILE_MAX = VP9PROFILE_PROFILE3,
+ HEVCPROFILE_MIN = 16,
+ HEVCPROFILE_MAIN = HEVCPROFILE_MIN,
+ HEVCPROFILE_MAIN10 = 17,
+ HEVCPROFILE_MAIN_STILL_PICTURE = 18,
+ HEVCPROFILE_MAX = HEVCPROFILE_MAIN_STILL_PICTURE,
+ DOLBYVISION_PROFILE0 = 19,
+ DOLBYVISION_PROFILE4 = 20,
+ DOLBYVISION_PROFILE5 = 21,
+ DOLBYVISION_PROFILE7 = 22,
+ THEORAPROFILE_MIN = 23,
+ THEORAPROFILE_ANY = THEORAPROFILE_MIN,
+ THEORAPROFILE_MAX = THEORAPROFILE_ANY,
+ AV1PROFILE_MIN = 24,
+ AV1PROFILE_PROFILE_MAIN = AV1PROFILE_MIN,
+ AV1PROFILE_PROFILE_HIGH = 25,
+ AV1PROFILE_PROFILE_PRO = 26,
+ AV1PROFILE_MAX = AV1PROFILE_PROFILE_PRO,
+ DOLBYVISION_PROFILE8 = 27,
+ DOLBYVISION_PROFILE9 = 28,
+ VIDEO_CODEC_PROFILE_MAX = DOLBYVISION_PROFILE9,
+};
+
+std::string GetProfileName(VideoCodecProfile profile);
+
+} // namespace media
+
+#endif // VIDEO_CODECS_H_
diff --git a/accel/video_decode_accelerator.cc b/accel/video_decode_accelerator.cc
new file mode 100644
index 0000000..e74d1ec
--- /dev/null
+++ b/accel/video_decode_accelerator.cc
@@ -0,0 +1,82 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 85fdf90
+
+#include "base/logging.h"
+
+#include "video_decode_accelerator.h"
+
+namespace media {
+
+VideoDecodeAccelerator::Config::Config() = default;
+VideoDecodeAccelerator::Config::Config(const Config& config) = default;
+
+VideoDecodeAccelerator::Config::Config(VideoCodecProfile video_codec_profile)
+ : profile(video_codec_profile) {}
+
+VideoDecodeAccelerator::Config::~Config() = default;
+
+std::string VideoDecodeAccelerator::Config::AsHumanReadableString() const {
+ std::ostringstream s;
+ s << "profile: " << GetProfileName(profile);
+ return s.str();
+}
+
+void VideoDecodeAccelerator::Client::NotifyInitializationComplete(
+ bool success) {
+ NOTREACHED() << "By default deferred initialization is not supported.";
+}
+
+VideoDecodeAccelerator::~VideoDecodeAccelerator() = default;
+
+bool VideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
+ // Implementations in the process that VDA runs in must override this.
+ LOG(FATAL) << "This may only be called in the same process as VDA impl.";
+ return false;
+}
+
+void VideoDecodeAccelerator::ImportBufferForPicture(
+ int32_t picture_buffer_id,
+ VideoPixelFormat pixel_format,
+ const NativePixmapHandle& native_pixmap_handle) {
+ NOTREACHED() << "Buffer import not supported.";
+}
+
+VideoDecodeAccelerator::SupportedProfile::SupportedProfile()
+ : profile(VIDEO_CODEC_PROFILE_UNKNOWN), encrypted_only(false) {}
+
+VideoDecodeAccelerator::SupportedProfile::~SupportedProfile() = default;
+
+VideoDecodeAccelerator::Capabilities::Capabilities() : flags(NO_FLAGS) {}
+
+VideoDecodeAccelerator::Capabilities::Capabilities(const Capabilities& other) =
+ default;
+
+VideoDecodeAccelerator::Capabilities::~Capabilities() = default;
+
+std::string VideoDecodeAccelerator::Capabilities::AsHumanReadableString()
+ const {
+ std::ostringstream s;
+ s << "[";
+ for (const SupportedProfile& sp : supported_profiles) {
+ s << " " << GetProfileName(sp.profile) << ": " << sp.min_resolution.width()
+ << "x" << sp.min_resolution.height() << "->" << sp.max_resolution.width()
+ << "x" << sp.max_resolution.height();
+ }
+ s << "]";
+ return s.str();
+}
+
+} // namespace media
+
+namespace std {
+
+void default_delete<media::VideoDecodeAccelerator>::operator()(
+ media::VideoDecodeAccelerator* vda) const {
+ vda->Destroy();
+}
+
+} // namespace std
diff --git a/accel/video_decode_accelerator.h b/accel/video_decode_accelerator.h
new file mode 100644
index 0000000..10601be
--- /dev/null
+++ b/accel/video_decode_accelerator.h
@@ -0,0 +1,348 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 85fdf90
+
+#ifndef VIDEO_DECODE_ACCELERATOR_H_
+#define VIDEO_DECODE_ACCELERATOR_H_
+
+#include <vector>
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+
+#include "bitstream_buffer.h"
+#include "native_pixmap_handle.h"
+#include "picture.h"
+#include "size.h"
+#include "video_codecs.h"
+#include "video_pixel_format.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+}
+
+namespace media {
+
+// Video decoder interface.
+// This interface is extended by the various components that ultimately
+// implement the backend of PPB_VideoDecoder_Dev.
+class VideoDecodeAccelerator {
+ public:
+ // Specification of a decoding profile supported by an decoder.
+ // |max_resolution| and |min_resolution| are inclusive.
+ struct SupportedProfile {
+ SupportedProfile();
+ ~SupportedProfile();
+ VideoCodecProfile profile;
+ Size max_resolution;
+ Size min_resolution;
+ bool encrypted_only;
+ };
+ using SupportedProfiles = std::vector<SupportedProfile>;
+
+ struct Capabilities {
+ Capabilities();
+ Capabilities(const Capabilities& other);
+ ~Capabilities();
+
+ std::string AsHumanReadableString() const;
+
+ // Flags that can be associated with a VDA.
+ enum Flags {
+ NO_FLAGS = 0,
+
+ // Normally, the VDA is required to be able to provide all PictureBuffers
+ // to the client via PictureReady(), even if the client does not return
+ // any of them via ReusePictureBuffer(). The client is only required to
+ // return PictureBuffers when it holds all of them, if it wants to get
+ // more decoded output. See VideoDecoder::CanReadWithoutStalling for
+ // more context.
+ // If this flag is set, then the VDA does not make this guarantee. The
+ // client must return PictureBuffers to be sure that new frames will be
+ // provided via PictureReady.
+ NEEDS_ALL_PICTURE_BUFFERS_TO_DECODE = 1 << 0,
+
+ // Whether the VDA supports being configured with an output surface for
+ // it to render frames to. For example, SurfaceViews on Android.
+ SUPPORTS_EXTERNAL_OUTPUT_SURFACE = 1 << 1,
+
+ // If set, the VDA will use deferred initialization if the config
+ // indicates that the client supports it as well. Refer to
+ // NotifyInitializationComplete for more details.
+ SUPPORTS_DEFERRED_INITIALIZATION = 1 << 2,
+
+ // If set, video frames will have COPY_REQUIRED flag which will cause
+ // an extra texture copy during composition.
+ REQUIRES_TEXTURE_COPY = 1 << 3,
+
+ // Whether the VDA supports encrypted streams or not.
+ SUPPORTS_ENCRYPTED_STREAMS = 1 << 4,
+
+ // If set the decoder does not require a restart in order to switch to
+ // using an external output surface.
+ SUPPORTS_SET_EXTERNAL_OUTPUT_SURFACE = 1 << 5,
+ };
+
+ SupportedProfiles supported_profiles;
+ uint32_t flags;
+ };
+
+ // Enumeration of potential errors generated by the API.
+ // Note: Keep these in sync with PP_VideoDecodeError_Dev. Also do not
+ // rearrange, reuse or remove values as they are used for gathering UMA
+ // statistics.
+ enum Error {
+ // An operation was attempted during an incompatible decoder state.
+ ILLEGAL_STATE = 1,
+ // Invalid argument was passed to an API method.
+ INVALID_ARGUMENT,
+ // Encoded input is unreadable.
+ UNREADABLE_INPUT,
+ // A failure occurred at the browser layer or one of its dependencies.
+ // Examples of such failures include GPU hardware failures, GPU driver
+ // failures, GPU library failures, browser programming errors, and so on.
+ PLATFORM_FAILURE,
+ // Largest used enum. This should be adjusted when new errors are added.
+ ERROR_MAX = PLATFORM_FAILURE,
+ };
+
+ // Config structure contains parameters required for the VDA initialization.
+ struct Config {
+ // Specifies the allocation and handling mode for output PictureBuffers.
+ // When set to ALLOCATE, the VDA is expected to allocate backing memory
+ // for PictureBuffers at the time of AssignPictureBuffers() call.
+ // When set to IMPORT, the VDA will not allocate, but after receiving
+ // AssignPictureBuffers() call, it will expect a call to
+ // ImportBufferForPicture() for each PictureBuffer before use.
+ enum class OutputMode {
+ ALLOCATE,
+ IMPORT,
+ };
+
+ Config();
+ Config(const Config& config);
+
+ explicit Config(VideoCodecProfile profile);
+
+ ~Config();
+
+ std::string AsHumanReadableString() const;
+
+ // The video codec and profile.
+ VideoCodecProfile profile = VIDEO_CODEC_PROFILE_UNKNOWN;
+
+ // Whether the client supports deferred initialization.
+ bool is_deferred_initialization_allowed = false;
+
+ // Coded size of the video frame hint, subject to change.
+ Size initial_expected_coded_size = Size(320, 240);
+
+ OutputMode output_mode = OutputMode::ALLOCATE;
+
+ // The list of picture buffer formats that the client knows how to use. An
+ // empty list means any format is supported.
+ std::vector<VideoPixelFormat> supported_output_formats;
+
+ // The H264 SPS and PPS configuration data. Not all clients populate these
+ // fields, so they should be parsed from the bitstream instead, if required.
+ // Each SPS and PPS is prefixed with the Annex B framing bytes: 0, 0, 0, 1.
+ std::vector<uint8_t> sps;
+ std::vector<uint8_t> pps;
+ };
+
+ // Interface for collaborating with picture interface to provide memory for
+ // output picture and blitting them. These callbacks will not be made unless
+ // Initialize() has returned successfully.
+ // This interface is extended by the various layers that relay messages back
+ // to the plugin, through the PPP_VideoDecoder_Dev interface the plugin
+ // implements.
+ class Client {
+ public:
+ // Notify the client that deferred initialization has completed successfully
+ // or not. This is required if and only if deferred initialization is
+ // supported by the VDA (see Capabilities), and it is supported by the
+ // client (see Config::is_deferred_initialization_allowed), and the initial
+ // call to VDA::Initialize returns true.
+ // The default implementation is a NOTREACHED, since deferred initialization
+ // is not supported by default.
+ virtual void NotifyInitializationComplete(bool success);
+
+ // Callback to tell client how many and what size of buffers to provide.
+ // Note that the actual count provided through AssignPictureBuffers() can be
+ // larger than the value requested.
+ // |format| indicates what format the decoded frames will be produced in
+ // by the VDA, or PIXEL_FORMAT_UNKNOWN if the underlying platform handles
+ // this transparently.
+ virtual void ProvidePictureBuffers(uint32_t requested_num_of_buffers,
+ VideoPixelFormat format,
+ const Size& dimensions) = 0;
+
+ // Callback to dismiss picture buffer that was assigned earlier.
+ virtual void DismissPictureBuffer(int32_t picture_buffer_id) = 0;
+
+ // Callback to deliver decoded pictures ready to be displayed.
+ virtual void PictureReady(const Picture& picture) = 0;
+
+ // Callback to notify that decoded has decoded the end of the current
+ // bitstream buffer.
+ virtual void NotifyEndOfBitstreamBuffer(int32_t bitstream_buffer_id) = 0;
+
+ // Flush completion callback.
+ virtual void NotifyFlushDone() = 0;
+
+ // Reset completion callback.
+ virtual void NotifyResetDone() = 0;
+
+ // Callback to notify about decoding errors. Note that errors in
+ // Initialize() will not be reported here, but will instead be indicated by
+ // a false return value there.
+ virtual void NotifyError(Error error) = 0;
+
+ protected:
+ virtual ~Client() {}
+ };
+
+ // Video decoder functions.
+
+ // Initializes the video decoder with specific configuration. Called once per
+ // decoder construction. This call is synchronous and returns true iff
+ // initialization is successful, unless deferred initialization is used.
+ //
+ // By default, deferred initialization is not used. However, if Config::
+ // is_deferred_initialization_allowed is set by the client, and if
+ // Capabilities::Flags::SUPPORTS_DEFERRED_INITIALIZATION is set by the VDA,
+ // and if VDA::Initialize returns true, then the client can expect a call to
+ // NotifyInitializationComplete with the actual success / failure of
+ // initialization. Note that a return value of false from VDA::Initialize
+ // indicates that initialization definitely failed, and no callback is needed.
+ //
+ // For encrypted video, only deferred initialization is supported and |config|
+ // must contain a valid |cdm_id|.
+ //
+ // Parameters:
+ // |config| contains the initialization parameters.
+ // |client| is the client of this video decoder. Does not take ownership of
+ // |client| which must be valid until Destroy() is called.
+ virtual bool Initialize(const Config& config, Client* client) = 0;
+
+ // Decodes given bitstream buffer that contains at most one frame. Once
+ // decoder is done with processing |bitstream_buffer| it will call
+ // NotifyEndOfBitstreamBuffer() with the bitstream buffer id.
+ // Parameters:
+ // |bitstream_buffer| is the input bitstream that is sent for decoding.
+ virtual void Decode(const BitstreamBuffer& bitstream_buffer) = 0;
+
+ // Assigns a set of texture-backed picture buffers to the video decoder.
+ //
+ // Ownership of each picture buffer remains with the client, but the client
+ // is not allowed to deallocate the buffer before the DismissPictureBuffer
+ // callback has been initiated for a given buffer.
+ //
+ // Parameters:
+ // |buffers| contains the allocated picture buffers for the output. Note
+ // that the count of buffers may be larger than the count requested through
+ // the call to Client::ProvidePictureBuffers().
+ virtual void AssignPictureBuffers(
+ const std::vector<PictureBuffer>& buffers) = 0;
+
+ // Imports |gpu_memory_buffer_handle|, pointing to a buffer in |pixel_format|,
+ // as backing memory for picture buffer associated with |picture_buffer_id|.
+ // This can only be be used if the VDA has been Initialize()d with
+ // config.output_mode = IMPORT, and should be preceded by a call to
+ // AssignPictureBuffers() to set up the number of PictureBuffers and their
+ // details.
+ // The |pixel_format| used here may be different from the |pixel_format|
+ // required in ProvidePictureBuffers(). If the buffer cannot be imported an
+ // error should be notified via NotifyError().
+ // After this call, the VDA becomes the owner of those file descriptors,
+ // and is responsible for closing it after use, also on import failure.
+ virtual void ImportBufferForPicture(
+ int32_t picture_buffer_id,
+ VideoPixelFormat pixel_format,
+ const NativePixmapHandle& native_pixmap_handle);
+
+ // Sends picture buffers to be reused by the decoder. This needs to be called
+ // for each buffer that has been processed so that decoder may know onto which
+ // picture buffers it can write the output to.
+ //
+ // Parameters:
+ // |picture_buffer_id| id of the picture buffer that is to be reused.
+ virtual void ReusePictureBuffer(int32_t picture_buffer_id) = 0;
+
+ // Flushes the decoder: all pending inputs will be decoded and pictures handed
+ // back to the client, followed by NotifyFlushDone() being called on the
+ // client. Can be used to implement "end of stream" notification.
+ virtual void Flush() = 0;
+
+ // Resets the decoder: all pending inputs are dropped immediately and the
+ // decoder returned to a state ready for further Decode()s, followed by
+ // NotifyResetDone() being called on the client. Can be used to implement
+ // "seek". After Flush is called, it is OK to call Reset before receiving
+ // NotifyFlushDone() and VDA should cancel the flush. Note NotifyFlushDone()
+ // may be on the way to the client. If client gets NotifyFlushDone(), it
+ // should be before NotifyResetDone().
+ virtual void Reset() = 0;
+
+ // Destroys the decoder: all pending inputs are dropped immediately and the
+ // component is freed. This call may asynchornously free system resources,
+ // but its client-visible effects are synchronous. After this method returns
+ // no more callbacks will be made on the client. Deletes |this|
+ // unconditionally, so make sure to drop all pointers to it!
+ virtual void Destroy() = 0;
+
+ // TO BE CALLED IN THE SAME PROCESS AS THE VDA IMPLEMENTATION ONLY.
+ //
+ // A decode "task" is a sequence that includes a Decode() call from Client,
+ // as well as corresponding callbacks to return the input BitstreamBuffer
+ // after use, and the resulting output Picture(s).
+ //
+ // If the Client can support running these three calls on a separate thread,
+ // it may call this method to try to set up the VDA implementation to do so.
+ // If the VDA can support this as well, return true, otherwise return false.
+ // If true is returned, the client may submit each Decode() call (but no other
+ // calls) on |decode_task_runner|, and should then expect that
+ // NotifyEndOfBitstreamBuffer() and PictureReady() callbacks may come on
+ // |decode_task_runner| as well, called on |decode_client|, instead of client
+ // provided to Initialize().
+ //
+ // This method may be called at any time.
+ //
+ // NOTE 1: some callbacks may still have to come on the main thread and the
+ // Client should handle both callbacks coming on main and |decode_task_runner|
+ // thread.
+ //
+ // NOTE 2: VDA implementations of Decode() must return as soon as possible and
+ // never block, as |decode_task_runner| may be a latency critical thread
+ // (such as the GPU IO thread).
+ //
+ // One application of this is offloading the GPU Child thread. In general,
+ // calls to VDA in GPU process have to be done on the GPU Child thread, as
+ // they may require GL context to be current. However, some VDAs may be able
+ // to run decode operations without GL context, which helps reduce latency and
+ // offloads the GPU Child thread.
+ virtual bool TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner);
+
+ protected:
+ // Do not delete directly; use Destroy() or own it with a scoped_ptr, which
+ // will Destroy() it properly by default.
+ virtual ~VideoDecodeAccelerator();
+};
+
+} // namespace media
+
+namespace std {
+
+// Specialize std::default_delete so that
+// std::unique_ptr<VideoDecodeAccelerator> uses "Destroy()" instead of trying to
+// use the destructor.
+template <>
+struct default_delete<media::VideoDecodeAccelerator> {
+ void operator()(media::VideoDecodeAccelerator* vda) const;
+};
+
+} // namespace std
+
+#endif // VIDEO_DECODE_ACCELERATOR_H_
diff --git a/accel/video_encode_accelerator.cc b/accel/video_encode_accelerator.cc
new file mode 100644
index 0000000..f35f4b2
--- /dev/null
+++ b/accel/video_encode_accelerator.cc
@@ -0,0 +1,46 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 9e40822e3a3d
+// Note: only necessary functions are ported.
+
+#include "video_encode_accelerator.h"
+
+namespace media {
+
+Vp8Metadata::Vp8Metadata()
+ : non_reference(false), temporal_idx(0), layer_sync(false) {}
+Vp8Metadata::Vp8Metadata(const Vp8Metadata& other) = default;
+Vp8Metadata::Vp8Metadata(Vp8Metadata&& other) = default;
+Vp8Metadata::~Vp8Metadata() = default;
+
+BitstreamBufferMetadata::BitstreamBufferMetadata()
+ : payload_size_bytes(0), key_frame(false) {}
+BitstreamBufferMetadata::BitstreamBufferMetadata(
+ BitstreamBufferMetadata&& other) = default;
+BitstreamBufferMetadata::BitstreamBufferMetadata(size_t payload_size_bytes,
+ bool key_frame,
+ base::TimeDelta timestamp)
+ : payload_size_bytes(payload_size_bytes),
+ key_frame(key_frame),
+ timestamp(timestamp) {}
+BitstreamBufferMetadata::~BitstreamBufferMetadata() = default;
+
+VideoEncodeAccelerator::SupportedProfile::SupportedProfile()
+ : profile(media::VIDEO_CODEC_PROFILE_UNKNOWN),
+ max_framerate_numerator(0),
+ max_framerate_denominator(0) {}
+
+VideoEncodeAccelerator::SupportedProfile::SupportedProfile(
+ VideoCodecProfile profile,
+ const Size& max_resolution,
+ uint32_t max_framerate_numerator,
+ uint32_t max_framerate_denominator)
+ : profile(profile),
+ max_resolution(max_resolution),
+ max_framerate_numerator(max_framerate_numerator),
+ max_framerate_denominator(max_framerate_denominator) {}
+
+VideoEncodeAccelerator::SupportedProfile::~SupportedProfile() = default;
+
+} // namespace media
diff --git a/accel/video_encode_accelerator.h b/accel/video_encode_accelerator.h
new file mode 100644
index 0000000..200930a
--- /dev/null
+++ b/accel/video_encode_accelerator.h
@@ -0,0 +1,87 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 9e40822e3a3d
+// Note: only necessary functions are ported.
+
+#ifndef MEDIA_VIDEO_VIDEO_ENCODE_ACCELERATOR_H_
+#define MEDIA_VIDEO_VIDEO_ENCODE_ACCELERATOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/optional.h"
+#include "base/time/time.h"
+
+#include "size.h"
+#include "video_codecs.h"
+
+namespace media {
+
+// class BitstreamBuffer;
+// class VideoFrame;
+
+// Metadata for a VP8 bitstream buffer.
+// |non_reference| is true iff this frame does not update any reference buffer,
+// meaning dropping this frame still results in a decodable
+// stream.
+// |temporal_idx| indicates the temporal index for this frame.
+// |layer_sync| if true iff this frame has |temporal_idx| > 0 and does NOT
+// reference any reference buffer containing a frame with
+// temporal_idx > 0.
+struct Vp8Metadata final {
+ Vp8Metadata();
+ Vp8Metadata(const Vp8Metadata& other);
+ Vp8Metadata(Vp8Metadata&& other);
+ ~Vp8Metadata();
+ bool non_reference;
+ uint8_t temporal_idx;
+ bool layer_sync;
+};
+
+// Metadata associated with a bitstream buffer.
+// |payload_size| is the byte size of the used portion of the buffer.
+// |key_frame| is true if this delivered frame is a keyframe.
+// |timestamp| is the same timestamp as in VideoFrame passed to Encode().
+// |vp8|, if set, contains metadata specific to VP8. See above.
+struct BitstreamBufferMetadata final {
+ BitstreamBufferMetadata();
+ BitstreamBufferMetadata(BitstreamBufferMetadata&& other);
+ BitstreamBufferMetadata(size_t payload_size_bytes,
+ bool key_frame,
+ base::TimeDelta timestamp);
+ ~BitstreamBufferMetadata();
+ size_t payload_size_bytes;
+ bool key_frame;
+ base::TimeDelta timestamp;
+ base::Optional<Vp8Metadata> vp8;
+};
+
+// Video encoder interface.
+class VideoEncodeAccelerator {
+ public:
+ // Specification of an encoding profile supported by an encoder.
+ struct SupportedProfile {
+ SupportedProfile();
+ SupportedProfile(VideoCodecProfile profile,
+ const Size& max_resolution,
+ uint32_t max_framerate_numerator = 0u,
+ uint32_t max_framerate_denominator = 1u);
+ ~SupportedProfile();
+
+ VideoCodecProfile profile;
+ Size min_resolution;
+ Size max_resolution;
+ uint32_t max_framerate_numerator;
+ uint32_t max_framerate_denominator;
+ };
+ using SupportedProfiles = std::vector<SupportedProfile>;
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_VIDEO_ENCODE_ACCELERATOR_H_
diff --git a/accel/video_frame.cc b/accel/video_frame.cc
new file mode 100644
index 0000000..2b1c7a6
--- /dev/null
+++ b/accel/video_frame.cc
@@ -0,0 +1,821 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 602bc8fa60fa
+// Note: only necessary functions are ported.
+// Note: some shared memory-related functionality here is no longer present in
+// Chromium.
+
+#include "video_frame.h"
+
+#include <algorithm>
+#include <climits>
+#include <limits>
+#include <numeric>
+#include <utility>
+
+#include "base/atomic_sequence_num.h"
+#include "base/bind.h"
+#include "base/bits.h"
+#include "base/callback_helpers.h"
+#include "base/logging.h"
+#include "base/memory/aligned_memory.h"
+#include "base/stl_util.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/stringprintf.h"
+#include "base/time/time.h"
+#include "media_limits.h"
+
+namespace media {
+
+// Note: moved from Chromium media/base/timestamp_constants.h
+// Indicates an invalid or missing timestamp.
+constexpr base::TimeDelta kNoTimestamp =
+ base::TimeDelta::FromMicroseconds(std::numeric_limits<int64_t>::min());
+
+namespace {
+
+// Helper to provide Rect::Intersect() as an expression.
+Rect Intersection(Rect a, const Rect& b) {
+ a.Intersect(b);
+ return a;
+}
+
+// Note: moved from Chromium base/bits.h which is not included in libchrome.
+// Round down |size| to a multiple of alignment, which must be a power of two.
+size_t AlignDown(size_t size, size_t alignment) {
+ DCHECK(base::bits::IsPowerOfTwo(alignment));
+ return size & ~(alignment - 1);
+}
+
+} // namespace
+
+// Static constexpr class for generating unique identifiers for each VideoFrame.
+static base::AtomicSequenceNumber g_unique_id_generator;
+
+static std::string StorageTypeToString(
+ const VideoFrame::StorageType storage_type) {
+ switch (storage_type) {
+ case VideoFrame::STORAGE_UNKNOWN:
+ return "UNKNOWN";
+ case VideoFrame::STORAGE_OPAQUE:
+ return "OPAQUE";
+ case VideoFrame::STORAGE_UNOWNED_MEMORY:
+ return "UNOWNED_MEMORY";
+ case VideoFrame::STORAGE_OWNED_MEMORY:
+ return "OWNED_MEMORY";
+ case VideoFrame::STORAGE_SHMEM:
+ return "SHMEM";
+ case VideoFrame::STORAGE_DMABUFS:
+ return "DMABUFS";
+ case VideoFrame::STORAGE_MOJO_SHARED_BUFFER:
+ return "MOJO_SHARED_BUFFER";
+ }
+
+ NOTREACHED() << "Invalid StorageType provided: " << storage_type;
+ return "INVALID";
+}
+
+// static
+bool VideoFrame::IsStorageTypeMappable(VideoFrame::StorageType storage_type) {
+ return
+ // This is not strictly needed but makes explicit that, at VideoFrame
+ // level, DmaBufs are not mappable from userspace.
+ storage_type != VideoFrame::STORAGE_DMABUFS &&
+ (storage_type == VideoFrame::STORAGE_UNOWNED_MEMORY ||
+ storage_type == VideoFrame::STORAGE_OWNED_MEMORY ||
+ storage_type == VideoFrame::STORAGE_SHMEM ||
+ storage_type == VideoFrame::STORAGE_MOJO_SHARED_BUFFER);
+}
+
+// If it is required to allocate aligned to multiple-of-two size overall for the
+// frame of pixel |format|.
+static bool RequiresEvenSizeAllocation(VideoPixelFormat format) {
+ switch (format) {
+ case PIXEL_FORMAT_ARGB:
+ case PIXEL_FORMAT_XRGB:
+ case PIXEL_FORMAT_RGB24:
+ case PIXEL_FORMAT_Y16:
+ case PIXEL_FORMAT_ABGR:
+ case PIXEL_FORMAT_XBGR:
+ case PIXEL_FORMAT_XR30:
+ case PIXEL_FORMAT_XB30:
+ case PIXEL_FORMAT_BGRA:
+ return false;
+ case PIXEL_FORMAT_NV12:
+ case PIXEL_FORMAT_NV21:
+ case PIXEL_FORMAT_I420:
+ case PIXEL_FORMAT_MJPEG:
+ case PIXEL_FORMAT_YUY2:
+ case PIXEL_FORMAT_YV12:
+ case PIXEL_FORMAT_I422:
+ case PIXEL_FORMAT_I444:
+ case PIXEL_FORMAT_YUV420P9:
+ case PIXEL_FORMAT_YUV422P9:
+ case PIXEL_FORMAT_YUV444P9:
+ case PIXEL_FORMAT_YUV420P10:
+ case PIXEL_FORMAT_YUV422P10:
+ case PIXEL_FORMAT_YUV444P10:
+ case PIXEL_FORMAT_YUV420P12:
+ case PIXEL_FORMAT_YUV422P12:
+ case PIXEL_FORMAT_YUV444P12:
+ case PIXEL_FORMAT_I420A:
+ case PIXEL_FORMAT_P016LE:
+ return true;
+ case PIXEL_FORMAT_UNKNOWN:
+ break;
+ }
+ NOTREACHED() << "Unsupported video frame format: " << format;
+ return false;
+}
+
+// Creates VideoFrameLayout for tightly packed frame.
+static base::Optional<VideoFrameLayout> GetDefaultLayout(
+ VideoPixelFormat format,
+ const Size& coded_size) {
+ std::vector<ColorPlaneLayout> planes;
+
+ switch (format) {
+ case PIXEL_FORMAT_I420: {
+ int uv_width = (coded_size.width() + 1) / 2;
+ int uv_height = (coded_size.height() + 1) / 2;
+ int uv_stride = uv_width;
+ int uv_size = uv_stride * uv_height;
+ planes = std::vector<ColorPlaneLayout>{
+ ColorPlaneLayout(coded_size.width(), 0, coded_size.GetArea()),
+ ColorPlaneLayout(uv_stride, coded_size.GetArea(), uv_size),
+ ColorPlaneLayout(uv_stride, coded_size.GetArea() + uv_size, uv_size),
+ };
+ break;
+ }
+
+ case PIXEL_FORMAT_Y16:
+ planes = std::vector<ColorPlaneLayout>{ColorPlaneLayout(
+ coded_size.width() * 2, 0, coded_size.GetArea() * 2)};
+ break;
+
+ case PIXEL_FORMAT_ARGB:
+ planes = std::vector<ColorPlaneLayout>{ColorPlaneLayout(
+ coded_size.width() * 4, 0, coded_size.GetArea() * 4)};
+ break;
+
+ case PIXEL_FORMAT_NV12: {
+ int uv_width = (coded_size.width() + 1) / 2;
+ int uv_height = (coded_size.height() + 1) / 2;
+ int uv_stride = uv_width * 2;
+ int uv_size = uv_stride * uv_height;
+ planes = std::vector<ColorPlaneLayout>{
+ ColorPlaneLayout(coded_size.width(), 0, coded_size.GetArea()),
+ ColorPlaneLayout(uv_stride, coded_size.GetArea(), uv_size),
+ };
+ break;
+ }
+
+ default:
+ // TODO(miu): This function should support any pixel format.
+ // http://crbug.com/555909 .
+ DLOG(ERROR)
+ << "Only PIXEL_FORMAT_I420, PIXEL_FORMAT_Y16, PIXEL_FORMAT_NV12, "
+ "and PIXEL_FORMAT_ARGB formats are supported: "
+ << VideoPixelFormatToString(format);
+ return base::nullopt;
+ }
+
+ return VideoFrameLayout::CreateWithPlanes(format, coded_size, planes);
+}
+
+// static
+bool VideoFrame::IsValidConfig(VideoPixelFormat format,
+ StorageType storage_type,
+ const Size& coded_size,
+ const Rect& visible_rect,
+ const Size& natural_size) {
+ // Check maximum limits for all formats.
+ int coded_size_area = coded_size.GetCheckedArea().ValueOrDefault(INT_MAX);
+ int natural_size_area = natural_size.GetCheckedArea().ValueOrDefault(INT_MAX);
+ static_assert(limits::kMaxCanvas < INT_MAX, "");
+ if (coded_size_area > limits::kMaxCanvas ||
+ coded_size.width() > limits::kMaxDimension ||
+ coded_size.height() > limits::kMaxDimension || visible_rect.x() < 0 ||
+ visible_rect.y() < 0 || visible_rect.right() > coded_size.width() ||
+ visible_rect.bottom() > coded_size.height() ||
+ natural_size_area > limits::kMaxCanvas ||
+ natural_size.width() > limits::kMaxDimension ||
+ natural_size.height() > limits::kMaxDimension) {
+ return false;
+ }
+
+ // TODO(mcasas): Remove parameter |storage_type| when the opaque storage types
+ // comply with the checks below. Right now we skip them.
+ if (!IsStorageTypeMappable(storage_type))
+ return true;
+
+ // Make sure new formats are properly accounted for in the method.
+ static_assert(PIXEL_FORMAT_MAX == 32,
+ "Added pixel format, please review IsValidConfig()");
+
+ if (format == PIXEL_FORMAT_UNKNOWN) {
+ return coded_size.IsEmpty() && visible_rect.IsEmpty() &&
+ natural_size.IsEmpty();
+ }
+
+ // Check that software-allocated buffer formats are not empty.
+ return !coded_size.IsEmpty() && !visible_rect.IsEmpty() &&
+ !natural_size.IsEmpty();
+}
+
+// static
+scoped_refptr<VideoFrame> VideoFrame::CreateFrame(VideoPixelFormat format,
+ const Size& coded_size,
+ const Rect& visible_rect,
+ const Size& natural_size,
+ base::TimeDelta timestamp) {
+ return CreateFrameInternal(format, coded_size, visible_rect, natural_size,
+ timestamp, false);
+}
+
+// static
+scoped_refptr<VideoFrame> VideoFrame::WrapExternalSharedMemory(
+ VideoPixelFormat format,
+ const Size& coded_size,
+ const Rect& visible_rect,
+ const Size& natural_size,
+ uint8_t* data,
+ size_t data_size,
+ base::SharedMemoryHandle handle,
+ size_t data_offset,
+ base::TimeDelta timestamp) {
+ auto layout = GetDefaultLayout(format, coded_size);
+ if (!layout)
+ return nullptr;
+ return WrapExternalStorage(STORAGE_SHMEM, *layout, visible_rect, natural_size,
+ data, data_size, timestamp, nullptr, nullptr,
+ handle, data_offset);
+}
+
+// static
+scoped_refptr<VideoFrame> VideoFrame::CreateEOSFrame() {
+ auto layout = VideoFrameLayout::Create(PIXEL_FORMAT_UNKNOWN, Size());
+ if (!layout) {
+ DLOG(ERROR) << "Invalid layout.";
+ return nullptr;
+ }
+ scoped_refptr<VideoFrame> frame =
+ new VideoFrame(*layout, STORAGE_UNKNOWN, Rect(), Size(), kNoTimestamp);
+ frame->metadata()->SetBoolean(VideoFrameMetadata::END_OF_STREAM, true);
+ return frame;
+}
+
+// static
+size_t VideoFrame::NumPlanes(VideoPixelFormat format) {
+ return VideoFrameLayout::NumPlanes(format);
+}
+
+// static
+size_t VideoFrame::AllocationSize(VideoPixelFormat format,
+ const Size& coded_size) {
+ size_t total = 0;
+ for (size_t i = 0; i < NumPlanes(format); ++i)
+ total += PlaneSize(format, i, coded_size).GetArea();
+ return total;
+}
+
+// static
+Size VideoFrame::PlaneSize(VideoPixelFormat format,
+ size_t plane,
+ const Size& coded_size) {
+ DCHECK(IsValidPlane(plane, format));
+
+ int width = coded_size.width();
+ int height = coded_size.height();
+ if (RequiresEvenSizeAllocation(format)) {
+ // Align to multiple-of-two size overall. This ensures that non-subsampled
+ // planes can be addressed by pixel with the same scaling as the subsampled
+ // planes.
+ width = base::bits::Align(width, 2);
+ height = base::bits::Align(height, 2);
+ }
+
+ const Size subsample = SampleSize(format, plane);
+ DCHECK(width % subsample.width() == 0);
+ DCHECK(height % subsample.height() == 0);
+ return Size(BytesPerElement(format, plane) * width / subsample.width(),
+ height / subsample.height());
+}
+
+// static
+int VideoFrame::PlaneHorizontalBitsPerPixel(VideoPixelFormat format,
+ size_t plane) {
+ DCHECK(IsValidPlane(plane, format));
+ const int bits_per_element = 8 * BytesPerElement(format, plane);
+ const int horiz_pixels_per_element = SampleSize(format, plane).width();
+ DCHECK_EQ(bits_per_element % horiz_pixels_per_element, 0);
+ return bits_per_element / horiz_pixels_per_element;
+}
+
+// static
+int VideoFrame::PlaneBitsPerPixel(VideoPixelFormat format, size_t plane) {
+ DCHECK(IsValidPlane(plane, format));
+ return PlaneHorizontalBitsPerPixel(format, plane) /
+ SampleSize(format, plane).height();
+}
+
+// static
+size_t VideoFrame::RowBytes(size_t plane, VideoPixelFormat format, int width) {
+ DCHECK(IsValidPlane(plane, format));
+ return BytesPerElement(format, plane) * Columns(plane, format, width);
+}
+
+// static
+int VideoFrame::BytesPerElement(VideoPixelFormat format, size_t plane) {
+ DCHECK(IsValidPlane(format, plane));
+ switch (format) {
+ case PIXEL_FORMAT_ARGB:
+ case PIXEL_FORMAT_BGRA:
+ case PIXEL_FORMAT_XRGB:
+ case PIXEL_FORMAT_ABGR:
+ case PIXEL_FORMAT_XBGR:
+ case PIXEL_FORMAT_XR30:
+ case PIXEL_FORMAT_XB30:
+ return 4;
+ case PIXEL_FORMAT_RGB24:
+ return 3;
+ case PIXEL_FORMAT_Y16:
+ case PIXEL_FORMAT_YUY2:
+ case PIXEL_FORMAT_YUV420P9:
+ case PIXEL_FORMAT_YUV422P9:
+ case PIXEL_FORMAT_YUV444P9:
+ case PIXEL_FORMAT_YUV420P10:
+ case PIXEL_FORMAT_YUV422P10:
+ case PIXEL_FORMAT_YUV444P10:
+ case PIXEL_FORMAT_YUV420P12:
+ case PIXEL_FORMAT_YUV422P12:
+ case PIXEL_FORMAT_YUV444P12:
+ case PIXEL_FORMAT_P016LE:
+ return 2;
+ case PIXEL_FORMAT_NV12:
+ case PIXEL_FORMAT_NV21: {
+ static const int bytes_per_element[] = {1, 2};
+ DCHECK_LT(plane, base::size(bytes_per_element));
+ return bytes_per_element[plane];
+ }
+ case PIXEL_FORMAT_YV12:
+ case PIXEL_FORMAT_I420:
+ case PIXEL_FORMAT_I422:
+ case PIXEL_FORMAT_I420A:
+ case PIXEL_FORMAT_I444:
+ return 1;
+ case PIXEL_FORMAT_MJPEG:
+ return 0;
+ case PIXEL_FORMAT_UNKNOWN:
+ break;
+ }
+ NOTREACHED();
+ return 0;
+}
+
+// static
+std::vector<int32_t> VideoFrame::ComputeStrides(VideoPixelFormat format,
+ const Size& coded_size) {
+ std::vector<int32_t> strides;
+ const size_t num_planes = NumPlanes(format);
+ if (num_planes == 1) {
+ strides.push_back(RowBytes(0, format, coded_size.width()));
+ } else {
+ for (size_t plane = 0; plane < num_planes; ++plane) {
+ strides.push_back(base::bits::Align(
+ RowBytes(plane, format, coded_size.width()), kFrameAddressAlignment));
+ }
+ }
+ return strides;
+}
+
+// static
+size_t VideoFrame::Rows(size_t plane, VideoPixelFormat format, int height) {
+ DCHECK(IsValidPlane(plane, format));
+ const int sample_height = SampleSize(format, plane).height();
+ return base::bits::Align(height, sample_height) / sample_height;
+}
+
+// static
+size_t VideoFrame::Columns(size_t plane, VideoPixelFormat format, int width) {
+ DCHECK(IsValidPlane(plane, format));
+ const int sample_width = SampleSize(format, plane).width();
+ return base::bits::Align(width, sample_width) / sample_width;
+}
+
+bool VideoFrame::IsMappable() const {
+ return IsStorageTypeMappable(storage_type_);
+}
+
+int VideoFrame::row_bytes(size_t plane) const {
+ return RowBytes(plane, format(), coded_size().width());
+}
+
+int VideoFrame::rows(size_t plane) const {
+ return Rows(plane, format(), coded_size().height());
+}
+
+const uint8_t* VideoFrame::visible_data(size_t plane) const {
+ DCHECK(IsValidPlane(plane, format()));
+ DCHECK(IsMappable());
+
+ // Calculate an offset that is properly aligned for all planes.
+ const Size alignment = CommonAlignment(format());
+ const int offset_x = AlignDown(visible_rect_.x(), alignment.width());
+ const int offset_y = AlignDown(visible_rect_.y(), alignment.height());
+
+ const Size subsample = SampleSize(format(), plane);
+ DCHECK(offset_x % subsample.width() == 0);
+ DCHECK(offset_y % subsample.height() == 0);
+ return data(plane) +
+ stride(plane) * (offset_y / subsample.height()) + // Row offset.
+ BytesPerElement(format(), plane) * // Column offset.
+ (offset_x / subsample.width());
+}
+
+uint8_t* VideoFrame::visible_data(size_t plane) {
+ return const_cast<uint8_t*>(
+ static_cast<const VideoFrame*>(this)->visible_data(plane));
+}
+
+base::ReadOnlySharedMemoryRegion* VideoFrame::read_only_shared_memory_region()
+ const {
+ DCHECK_EQ(storage_type_, STORAGE_SHMEM);
+ DCHECK(read_only_shared_memory_region_ &&
+ read_only_shared_memory_region_->IsValid());
+ return read_only_shared_memory_region_;
+}
+
+base::UnsafeSharedMemoryRegion* VideoFrame::unsafe_shared_memory_region()
+ const {
+ DCHECK_EQ(storage_type_, STORAGE_SHMEM);
+ DCHECK(unsafe_shared_memory_region_ &&
+ unsafe_shared_memory_region_->IsValid());
+ return unsafe_shared_memory_region_;
+}
+
+base::SharedMemoryHandle VideoFrame::shared_memory_handle() const {
+ DCHECK_EQ(storage_type_, STORAGE_SHMEM);
+ DCHECK(shared_memory_handle_.IsValid());
+ return shared_memory_handle_;
+}
+
+size_t VideoFrame::shared_memory_offset() const {
+ DCHECK_EQ(storage_type_, STORAGE_SHMEM);
+ DCHECK((read_only_shared_memory_region_ &&
+ read_only_shared_memory_region_->IsValid()) ||
+ (unsafe_shared_memory_region_ &&
+ unsafe_shared_memory_region_->IsValid()) ||
+ shared_memory_handle_.IsValid());
+ return shared_memory_offset_;
+}
+
+const std::vector<base::ScopedFD>& VideoFrame::DmabufFds() const {
+ DCHECK_EQ(storage_type_, STORAGE_DMABUFS);
+
+ return dmabuf_fds_;
+}
+
+bool VideoFrame::HasDmaBufs() const {
+ return !dmabuf_fds_.empty();
+}
+
+void VideoFrame::AddReadOnlySharedMemoryRegion(
+ base::ReadOnlySharedMemoryRegion* region) {
+ storage_type_ = STORAGE_SHMEM;
+ DCHECK(SharedMemoryUninitialized());
+ DCHECK(region && region->IsValid());
+ read_only_shared_memory_region_ = region;
+}
+
+void VideoFrame::AddUnsafeSharedMemoryRegion(
+ base::UnsafeSharedMemoryRegion* region) {
+ storage_type_ = STORAGE_SHMEM;
+ DCHECK(SharedMemoryUninitialized());
+ DCHECK(region && region->IsValid());
+ unsafe_shared_memory_region_ = region;
+}
+
+void VideoFrame::AddSharedMemoryHandle(base::SharedMemoryHandle handle) {
+ storage_type_ = STORAGE_SHMEM;
+ DCHECK(SharedMemoryUninitialized());
+ shared_memory_handle_ = handle;
+}
+
+void VideoFrame::AddDestructionObserver(base::OnceClosure callback) {
+ DCHECK(!callback.is_null());
+ done_callbacks_.push_back(std::move(callback));
+}
+
+std::string VideoFrame::AsHumanReadableString() {
+ if (metadata()->IsTrue(VideoFrameMetadata::END_OF_STREAM))
+ return "end of stream";
+
+ std::ostringstream s;
+ s << ConfigToString(format(), storage_type_, coded_size(), visible_rect_,
+ natural_size_)
+ << " timestamp:" << timestamp_.InMicroseconds();
+ return s.str();
+}
+
+size_t VideoFrame::BitDepth() const {
+ return media::BitDepth(format());
+}
+
+// static
+scoped_refptr<VideoFrame> VideoFrame::WrapExternalStorage(
+ StorageType storage_type,
+ const VideoFrameLayout& layout,
+ const Rect& visible_rect,
+ const Size& natural_size,
+ uint8_t* data,
+ size_t data_size,
+ base::TimeDelta timestamp,
+ base::ReadOnlySharedMemoryRegion* read_only_region,
+ base::UnsafeSharedMemoryRegion* unsafe_region,
+ base::SharedMemoryHandle handle,
+ size_t data_offset) {
+ DCHECK(IsStorageTypeMappable(storage_type));
+
+ if (!IsValidConfig(layout.format(), storage_type, layout.coded_size(),
+ visible_rect, natural_size)) {
+ DLOG(ERROR) << __func__ << " Invalid config."
+ << ConfigToString(layout.format(), storage_type,
+ layout.coded_size(), visible_rect,
+ natural_size);
+ return nullptr;
+ }
+
+ scoped_refptr<VideoFrame> frame = new VideoFrame(
+ layout, storage_type, visible_rect, natural_size, timestamp);
+
+ for (size_t i = 0; i < layout.planes().size(); ++i) {
+ frame->data_[i] = data + layout.planes()[i].offset;
+ }
+
+ if (storage_type == STORAGE_SHMEM) {
+ if (read_only_region || unsafe_region) {
+ DCHECK(!handle.IsValid());
+ DCHECK_NE(!!read_only_region, !!unsafe_region)
+ << "Expected exactly one read-only or unsafe region for "
+ << "STORAGE_SHMEM VideoFrame";
+ if (read_only_region) {
+ frame->read_only_shared_memory_region_ = read_only_region;
+ DCHECK(frame->read_only_shared_memory_region_->IsValid());
+ } else if (unsafe_region) {
+ frame->unsafe_shared_memory_region_ = unsafe_region;
+ DCHECK(frame->unsafe_shared_memory_region_->IsValid());
+ }
+ frame->shared_memory_offset_ = data_offset;
+ } else {
+ frame->AddSharedMemoryHandle(handle);
+ frame->shared_memory_offset_ = data_offset;
+ }
+ }
+
+ return frame;
+}
+
+VideoFrame::VideoFrame(const VideoFrameLayout& layout,
+ StorageType storage_type,
+ const Rect& visible_rect,
+ const Size& natural_size,
+ base::TimeDelta timestamp)
+ : layout_(layout),
+ storage_type_(storage_type),
+ visible_rect_(Intersection(visible_rect, Rect(layout.coded_size()))),
+ natural_size_(natural_size),
+ shared_memory_offset_(0),
+ timestamp_(timestamp),
+ unique_id_(g_unique_id_generator.GetNext()) {
+ DCHECK(IsValidConfig(format(), storage_type, coded_size(), visible_rect_,
+ natural_size_));
+ DCHECK(visible_rect_ == visible_rect)
+ << "visible_rect " << visible_rect.ToString() << " exceeds coded_size "
+ << coded_size().ToString();
+ memset(&data_, 0, sizeof(data_));
+}
+
+VideoFrame::~VideoFrame() {
+ for (auto& callback : done_callbacks_)
+ std::move(callback).Run();
+}
+
+// static
+std::string VideoFrame::ConfigToString(const VideoPixelFormat format,
+ const StorageType storage_type,
+ const Size& coded_size,
+ const Rect& visible_rect,
+ const Size& natural_size) {
+ return base::StringPrintf(
+ "format:%s storage_type:%s coded_size:%s visible_rect:%s natural_size:%s",
+ VideoPixelFormatToString(format).c_str(),
+ StorageTypeToString(storage_type).c_str(), coded_size.ToString().c_str(),
+ visible_rect.ToString().c_str(), natural_size.ToString().c_str());
+}
+
+// static
+bool VideoFrame::IsValidPlane(size_t plane, VideoPixelFormat format) {
+ DCHECK_LE(NumPlanes(format), static_cast<size_t>(kMaxPlanes));
+ return (plane < NumPlanes(format));
+}
+
+// static
+Size VideoFrame::DetermineAlignedSize(VideoPixelFormat format,
+ const Size& dimensions) {
+ const Size alignment = CommonAlignment(format);
+ const Size adjusted =
+ Size(base::bits::Align(dimensions.width(), alignment.width()),
+ base::bits::Align(dimensions.height(), alignment.height()));
+ DCHECK((adjusted.width() % alignment.width() == 0) &&
+ (adjusted.height() % alignment.height() == 0));
+ return adjusted;
+}
+
+// static
+scoped_refptr<VideoFrame> VideoFrame::CreateFrameInternal(
+ VideoPixelFormat format,
+ const Size& coded_size,
+ const Rect& visible_rect,
+ const Size& natural_size,
+ base::TimeDelta timestamp,
+ bool zero_initialize_memory) {
+ // Since we're creating a new frame (and allocating memory for it ourselves),
+ // we can pad the requested |coded_size| if necessary if the request does not
+ // line up on sample boundaries. See discussion at http://crrev.com/1240833003
+ const Size new_coded_size = DetermineAlignedSize(format, coded_size);
+ auto layout = VideoFrameLayout::CreateWithStrides(
+ format, new_coded_size, ComputeStrides(format, coded_size));
+ if (!layout) {
+ DLOG(ERROR) << "Invalid layout.";
+ return nullptr;
+ }
+
+ return CreateFrameWithLayout(*layout, visible_rect, natural_size, timestamp,
+ zero_initialize_memory);
+}
+
+scoped_refptr<VideoFrame> VideoFrame::CreateFrameWithLayout(
+ const VideoFrameLayout& layout,
+ const Rect& visible_rect,
+ const Size& natural_size,
+ base::TimeDelta timestamp,
+ bool zero_initialize_memory) {
+ const StorageType storage = STORAGE_OWNED_MEMORY;
+ if (!IsValidConfig(layout.format(), storage, layout.coded_size(),
+ visible_rect, natural_size)) {
+ DLOG(ERROR) << __func__ << " Invalid config."
+ << ConfigToString(layout.format(), storage, layout.coded_size(),
+ visible_rect, natural_size);
+ return nullptr;
+ }
+
+ scoped_refptr<VideoFrame> frame(new VideoFrame(
+ std::move(layout), storage, visible_rect, natural_size, timestamp));
+ frame->AllocateMemory(zero_initialize_memory);
+ return frame;
+}
+
+bool VideoFrame::SharedMemoryUninitialized() {
+ return !read_only_shared_memory_region_ && !unsafe_shared_memory_region_ &&
+ !shared_memory_handle_.IsValid();
+}
+
+// static
+bool VideoFrame::IsValidPlane(VideoPixelFormat format, size_t plane) {
+ DCHECK_LE(NumPlanes(format), static_cast<size_t>(kMaxPlanes));
+ return plane < NumPlanes(format);
+}
+
+// static
+Size VideoFrame::SampleSize(VideoPixelFormat format, size_t plane) {
+ DCHECK(IsValidPlane(format, plane));
+
+ switch (plane) {
+ case kYPlane: // and kARGBPlane:
+ case kAPlane:
+ return Size(1, 1);
+
+ case kUPlane: // and kUVPlane:
+ case kVPlane:
+ switch (format) {
+ case PIXEL_FORMAT_I444:
+ case PIXEL_FORMAT_YUV444P9:
+ case PIXEL_FORMAT_YUV444P10:
+ case PIXEL_FORMAT_YUV444P12:
+ case PIXEL_FORMAT_Y16:
+ return Size(1, 1);
+
+ case PIXEL_FORMAT_I422:
+ case PIXEL_FORMAT_YUV422P9:
+ case PIXEL_FORMAT_YUV422P10:
+ case PIXEL_FORMAT_YUV422P12:
+ return Size(2, 1);
+
+ case PIXEL_FORMAT_YV12:
+ case PIXEL_FORMAT_I420:
+ case PIXEL_FORMAT_I420A:
+ case PIXEL_FORMAT_NV12:
+ case PIXEL_FORMAT_NV21:
+ case PIXEL_FORMAT_YUV420P9:
+ case PIXEL_FORMAT_YUV420P10:
+ case PIXEL_FORMAT_YUV420P12:
+ case PIXEL_FORMAT_P016LE:
+ return Size(2, 2);
+
+ case PIXEL_FORMAT_UNKNOWN:
+ case PIXEL_FORMAT_YUY2:
+ case PIXEL_FORMAT_ARGB:
+ case PIXEL_FORMAT_XRGB:
+ case PIXEL_FORMAT_RGB24:
+ case PIXEL_FORMAT_MJPEG:
+ case PIXEL_FORMAT_ABGR:
+ case PIXEL_FORMAT_XBGR:
+ case PIXEL_FORMAT_XR30:
+ case PIXEL_FORMAT_XB30:
+ case PIXEL_FORMAT_BGRA:
+ break;
+ }
+ }
+ NOTREACHED();
+ return Size();
+}
+
+// static
+Size VideoFrame::CommonAlignment(VideoPixelFormat format) {
+ int max_sample_width = 0;
+ int max_sample_height = 0;
+ for (size_t plane = 0; plane < NumPlanes(format); ++plane) {
+ const Size sample_size = SampleSize(format, plane);
+ max_sample_width = std::max(max_sample_width, sample_size.width());
+ max_sample_height = std::max(max_sample_height, sample_size.height());
+ }
+ return Size(max_sample_width, max_sample_height);
+}
+
+void VideoFrame::AllocateMemory(bool zero_initialize_memory) {
+ DCHECK_EQ(storage_type_, STORAGE_OWNED_MEMORY);
+ static_assert(0 == kYPlane, "y plane data must be index 0");
+
+ std::vector<size_t> plane_size = CalculatePlaneSize();
+ const size_t total_buffer_size =
+ std::accumulate(plane_size.begin(), plane_size.end(), 0u);
+
+ uint8_t* data = reinterpret_cast<uint8_t*>(
+ base::AlignedAlloc(total_buffer_size, layout_.buffer_addr_align()));
+ if (zero_initialize_memory) {
+ memset(data, 0, total_buffer_size);
+ }
+ AddDestructionObserver(base::BindOnce(&base::AlignedFree, data));
+
+ // Note that if layout.buffer_sizes is specified, color planes' layout is the
+ // same as buffers'. See CalculatePlaneSize() for detail.
+ for (size_t plane = 0, offset = 0; plane < NumPlanes(format()); ++plane) {
+ data_[plane] = data + offset;
+ offset += plane_size[plane];
+ }
+}
+
+std::vector<size_t> VideoFrame::CalculatePlaneSize() const {
+ // We have two cases for plane size mapping:
+ // 1) If plane size is specified: use planes' size.
+ // 2) VideoFrameLayout::size is unassigned: use legacy calculation formula.
+
+ const size_t num_planes = NumPlanes(format());
+ const auto& planes = layout_.planes();
+ std::vector<size_t> plane_size(num_planes);
+ bool plane_size_assigned = true;
+ DCHECK_EQ(planes.size(), num_planes);
+ for (size_t i = 0; i < num_planes; ++i) {
+ plane_size[i] = planes[i].size;
+ plane_size_assigned &= plane_size[i] != 0;
+ }
+
+ if (plane_size_assigned)
+ return plane_size;
+
+ // Reset plane size.
+ std::fill(plane_size.begin(), plane_size.end(), 0u);
+ for (size_t plane = 0; plane < num_planes; ++plane) {
+ // These values were chosen to mirror ffmpeg's get_video_buffer().
+ // TODO(dalecurtis): This should be configurable; eventually ffmpeg wants
+ // us to use av_cpu_max_align(), but... for now, they just hard-code 32.
+ const size_t height =
+ base::bits::Align(rows(plane), kFrameAddressAlignment);
+ const size_t width = std::abs(stride(plane));
+ plane_size[plane] = width * height;
+ }
+
+ if (num_planes > 1) {
+ // The extra line of UV being allocated is because h264 chroma MC
+ // overreads by one line in some cases, see libavcodec/utils.c:
+ // avcodec_align_dimensions2() and libavcodec/x86/h264_chromamc.asm:
+ // put_h264_chroma_mc4_ssse3().
+ DCHECK(IsValidPlane(format(), kUPlane));
+ plane_size.back() += std::abs(stride(kUPlane)) + kFrameSizePadding;
+ }
+ return plane_size;
+}
+
+} // namespace media
diff --git a/accel/video_frame.h b/accel/video_frame.h
new file mode 100644
index 0000000..468fe9e
--- /dev/null
+++ b/accel/video_frame.h
@@ -0,0 +1,425 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 602bc8fa60fa
+// Note: only necessary functions are ported.
+// Note: some OS-specific defines have been removed
+// Note: WrapExternalSharedMemory() has been removed in Chromium, but is still
+// present here. Porting the code to a newer version of VideoFrame is not
+// useful, as this is only a temporary step and all usage of VideoFrame will
+// be removed.
+
+#ifndef VIDEO_FRAME_H_
+#define VIDEO_FRAME_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/aligned_memory.h"
+#include "base/memory/read_only_shared_memory_region.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/shared_memory.h"
+#include "base/memory/shared_memory_handle.h"
+#include "base/memory/unsafe_shared_memory_region.h"
+#include "base/optional.h"
+#include "base/synchronization/lock.h"
+#include "base/thread_annotations.h"
+#include "base/unguessable_token.h"
+#include "rect.h"
+#include "size.h"
+#include "video_frame_layout.h"
+#include "video_frame_metadata.h"
+#include "video_pixel_format.h"
+
+#include "base/files/scoped_file.h"
+
+namespace media {
+
+class VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
+ public:
+ enum {
+ kFrameSizeAlignment = 16,
+ kFrameSizePadding = 16,
+
+ kFrameAddressAlignment = VideoFrameLayout::kBufferAddressAlignment
+ };
+
+ enum {
+ kMaxPlanes = 4,
+
+ kYPlane = 0,
+ kARGBPlane = kYPlane,
+ kUPlane = 1,
+ kUVPlane = kUPlane,
+ kVPlane = 2,
+ kAPlane = 3,
+ };
+
+ // Defines the pixel storage type. Differentiates between directly accessible
+ // |data_| and pixels that are only indirectly accessible and not via mappable
+ // memory.
+ // Note that VideoFrames of any StorageType can also have Texture backing,
+ // with "classical" GPU Driver-only textures identified as STORAGE_OPAQUE.
+ enum StorageType {
+ STORAGE_UNKNOWN = 0,
+ STORAGE_OPAQUE = 1, // We don't know how VideoFrame's pixels are stored.
+ STORAGE_UNOWNED_MEMORY = 2, // External, non owned data pointers.
+ STORAGE_OWNED_MEMORY = 3, // VideoFrame has allocated its own data buffer.
+ STORAGE_SHMEM = 4, // Pixels are backed by Shared Memory.
+ // TODO(mcasas): Consider turning this type into STORAGE_NATIVE
+ // based on the idea of using this same enum value for both DMA
+ // buffers on Linux and CVPixelBuffers on Mac (which currently use
+ // STORAGE_UNOWNED_MEMORY) and handle it appropriately in all cases.
+ STORAGE_DMABUFS = 5, // Each plane is stored into a DmaBuf.
+ STORAGE_MOJO_SHARED_BUFFER = 6,
+ STORAGE_LAST = STORAGE_MOJO_SHARED_BUFFER,
+ };
+
+ // Call prior to CreateFrame to ensure validity of frame configuration. Called
+ // automatically by VideoDecoderConfig::IsValidConfig().
+ static bool IsValidConfig(VideoPixelFormat format,
+ StorageType storage_type,
+ const Size& coded_size,
+ const Rect& visible_rect,
+ const Size& natural_size);
+
+ // Creates a new frame in system memory with given parameters. Buffers for the
+ // frame are allocated but not initialized. The caller must not make
+ // assumptions about the actual underlying size(s), but check the returned
+ // VideoFrame instead.
+ static scoped_refptr<VideoFrame> CreateFrame(VideoPixelFormat format,
+ const Size& coded_size,
+ const Rect& visible_rect,
+ const Size& natural_size,
+ base::TimeDelta timestamp);
+
+ // Creates a new frame in system memory with given parameters. Buffers for the
+ // frame are allocated but not initialized. The caller should specify the
+ // physical buffer size and strides if needed in |layout| parameter.
+ static scoped_refptr<VideoFrame> CreateFrameWithLayout(
+ const VideoFrameLayout& layout,
+ const Rect& visible_rect,
+ const Size& natural_size,
+ base::TimeDelta timestamp,
+ bool zero_initialize_memory);
+
+ // Legacy wrapping of old SharedMemoryHandle objects. Deprecated, use one of
+ // the shared memory region wrappers above instead.
+ static scoped_refptr<VideoFrame> WrapExternalSharedMemory(
+ VideoPixelFormat format,
+ const Size& coded_size,
+ const Rect& visible_rect,
+ const Size& natural_size,
+ uint8_t* data,
+ size_t data_size,
+ base::SharedMemoryHandle handle,
+ size_t shared_memory_offset,
+ base::TimeDelta timestamp);
+
+ // Creates a frame which indicates end-of-stream.
+ static scoped_refptr<VideoFrame> CreateEOSFrame();
+
+ static size_t NumPlanes(VideoPixelFormat format);
+
+ // Returns the required allocation size for a (tightly packed) frame of the
+ // given coded size and format.
+ static size_t AllocationSize(VideoPixelFormat format, const Size& coded_size);
+
+ // Returns the plane Size (in bytes) for a plane of the given coded size
+ // and format.
+ static Size PlaneSize(VideoPixelFormat format,
+ size_t plane,
+ const Size& coded_size);
+
+ // Returns horizontal bits per pixel for given |plane| and |format|.
+ static int PlaneHorizontalBitsPerPixel(VideoPixelFormat format, size_t plane);
+
+ // Returns bits per pixel for given |plane| and |format|.
+ static int PlaneBitsPerPixel(VideoPixelFormat format, size_t plane);
+
+ // Returns the number of bytes per row for the given plane, format, and width.
+ // The width may be aligned to format requirements.
+ static size_t RowBytes(size_t plane, VideoPixelFormat format, int width);
+
+ // Returns the number of bytes per element for given |plane| and |format|.
+ static int BytesPerElement(VideoPixelFormat format, size_t plane);
+
+ // Calculates strides for each plane based on |format| and |coded_size|.
+ static std::vector<int32_t> ComputeStrides(VideoPixelFormat format,
+ const Size& coded_size);
+
+ // Returns the number of rows for the given plane, format, and height.
+ // The height may be aligned to format requirements.
+ static size_t Rows(size_t plane, VideoPixelFormat format, int height);
+
+ // Returns the number of columns for the given plane, format, and width.
+ // The width may be aligned to format requirements.
+ static size_t Columns(size_t plane, VideoPixelFormat format, int width);
+
+ // Returns true if |frame| is accesible mapped in the VideoFrame memory space.
+ // static
+ static bool IsStorageTypeMappable(VideoFrame::StorageType storage_type);
+
+ // Returns true if |frame| is accessible and mapped in the VideoFrame memory
+ // space. If false, clients should refrain from accessing data(),
+ // visible_data() etc.
+ bool IsMappable() const;
+
+ const VideoFrameLayout& layout() const { return layout_; }
+
+ VideoPixelFormat format() const { return layout_.format(); }
+ StorageType storage_type() const { return storage_type_; }
+
+ // The full dimensions of the video frame data.
+ const Size& coded_size() const { return layout_.coded_size(); }
+ // A subsection of [0, 0, coded_size().width(), coded_size.height()]. This
+ // can be set to "soft-apply" a cropping. It determines the pointers into
+ // the data returned by visible_data().
+ const Rect& visible_rect() const { return visible_rect_; }
+ // Specifies that the |visible_rect| section of the frame is supposed to be
+ // scaled to this size when being presented. This can be used to represent
+ // anamorphic frames, or to "soft-apply" any custom scaling.
+ const Size& natural_size() const { return natural_size_; }
+
+ int stride(size_t plane) const {
+ DCHECK(IsValidPlane(plane, format()));
+ DCHECK_LT(plane, layout_.num_planes());
+ return layout_.planes()[plane].stride;
+ }
+
+ // Returns the number of bytes per row and number of rows for a given plane.
+ //
+ // As opposed to stride(), row_bytes() refers to the bytes representing
+ // frame data scanlines (coded_size.width() pixels, without stride padding).
+ int row_bytes(size_t plane) const;
+ int rows(size_t plane) const;
+
+ // Returns pointer to the buffer for a given plane, if this is an
+ // IsMappable() frame type. The memory is owned by VideoFrame object and must
+ // not be freed by the caller.
+ const uint8_t* data(size_t plane) const {
+ DCHECK(IsValidPlane(plane, format()));
+ DCHECK(IsMappable());
+ return data_[plane];
+ }
+ uint8_t* data(size_t plane) {
+ DCHECK(IsValidPlane(plane, format()));
+ DCHECK(IsMappable());
+ return data_[plane];
+ }
+
+ // Returns pointer to the data in the visible region of the frame, for
+ // IsMappable() storage types. The returned pointer is offsetted into the
+ // plane buffer specified by visible_rect().origin(). Memory is owned by
+ // VideoFrame object and must not be freed by the caller.
+ const uint8_t* visible_data(size_t plane) const;
+ uint8_t* visible_data(size_t plane);
+
+ // Returns a pointer to the read-only shared-memory region, if present.
+ base::ReadOnlySharedMemoryRegion* read_only_shared_memory_region() const;
+
+ // Returns a pointer to the unsafe shared memory handle, if present.
+ base::UnsafeSharedMemoryRegion* unsafe_shared_memory_region() const;
+
+ // Returns the legacy SharedMemoryHandle, if present.
+ base::SharedMemoryHandle shared_memory_handle() const;
+
+ // Returns the offset into the shared memory where the frame data begins.
+ size_t shared_memory_offset() const;
+
+ // Returns a vector containing the backing DmaBufs for this frame. The number
+ // of returned DmaBufs will be equal or less than the number of planes of
+ // the frame. If there are less, this means that the last FD contains the
+ // remaining planes.
+ // Note that the returned FDs are still owned by the VideoFrame. This means
+ // that the caller shall not close them, or use them after the VideoFrame is
+ // destroyed. For such use cases, use media::DuplicateFDs() to obtain your
+ // own copy of the FDs.
+ const std::vector<base::ScopedFD>& DmabufFds() const;
+
+ // Returns true if |frame| has DmaBufs.
+ bool HasDmaBufs() const;
+
+ void AddReadOnlySharedMemoryRegion(base::ReadOnlySharedMemoryRegion* region);
+ void AddUnsafeSharedMemoryRegion(base::UnsafeSharedMemoryRegion* region);
+
+ // Legacy, use one of the Add*SharedMemoryRegion methods above instead.
+ void AddSharedMemoryHandle(base::SharedMemoryHandle handle);
+
+ // Adds a callback to be run when the VideoFrame is about to be destroyed.
+ // The callback may be run from ANY THREAD, and so it is up to the client to
+ // ensure thread safety. Although read-only access to the members of this
+ // VideoFrame is permitted while the callback executes (including
+ // VideoFrameMetadata), clients should not assume the data pointers are
+ // valid.
+ void AddDestructionObserver(base::OnceClosure callback);
+
+ // Returns a dictionary of optional metadata. This contains information
+ // associated with the frame that downstream clients might use for frame-level
+ // logging, quality/performance optimizations, signaling, etc.
+ //
+ // TODO(miu): Move some of the "extra" members of VideoFrame (below) into
+ // here as a later clean-up step.
+ const VideoFrameMetadata* metadata() const { return &metadata_; }
+ VideoFrameMetadata* metadata() { return &metadata_; }
+
+ // The time span between the current frame and the first frame of the stream.
+ // This is the media timestamp, and not the reference time.
+ // See VideoFrameMetadata::REFERENCE_TIME for details.
+ base::TimeDelta timestamp() const { return timestamp_; }
+ void set_timestamp(base::TimeDelta timestamp) { timestamp_ = timestamp; }
+
+ // Returns a human-readable string describing |*this|.
+ std::string AsHumanReadableString();
+
+ // Unique identifier for this video frame; generated at construction time and
+ // guaranteed to be unique within a single process.
+ int unique_id() const { return unique_id_; }
+
+ // Returns the number of bits per channel.
+ size_t BitDepth() const;
+
+ protected:
+ friend class base::RefCountedThreadSafe<VideoFrame>;
+
+ // Clients must use the static factory/wrapping methods to create a new frame.
+ // Derived classes should create their own factory/wrapping methods, and use
+ // this constructor to do basic initialization.
+ VideoFrame(const VideoFrameLayout& layout,
+ StorageType storage_type,
+ const Rect& visible_rect,
+ const Size& natural_size,
+ base::TimeDelta timestamp);
+
+ virtual ~VideoFrame();
+
+ // Creates a summary of the configuration settings provided as parameters.
+ static std::string ConfigToString(const VideoPixelFormat format,
+ const VideoFrame::StorageType storage_type,
+ const Size& coded_size,
+ const Rect& visible_rect,
+ const Size& natural_size);
+
+ // Returns true if |plane| is a valid plane index for the given |format|.
+ static bool IsValidPlane(size_t plane, VideoPixelFormat format);
+
+ // Returns |dimensions| adjusted to appropriate boundaries based on |format|.
+ static Size DetermineAlignedSize(VideoPixelFormat format,
+ const Size& dimensions);
+
+ void set_data(size_t plane, uint8_t* ptr) {
+ DCHECK(IsValidPlane(plane, format()));
+ DCHECK(ptr);
+ data_[plane] = ptr;
+ }
+
+ private:
+ static scoped_refptr<VideoFrame> WrapExternalStorage(
+ StorageType storage_type,
+ const VideoFrameLayout& layout,
+ const Rect& visible_rect,
+ const Size& natural_size,
+ uint8_t* data,
+ size_t data_size,
+ base::TimeDelta timestamp,
+ base::ReadOnlySharedMemoryRegion* read_only_region,
+ base::UnsafeSharedMemoryRegion* unsafe_region,
+ base::SharedMemoryHandle handle,
+ size_t data_offset);
+
+ static scoped_refptr<VideoFrame> CreateFrameInternal(
+ VideoPixelFormat format,
+ const Size& coded_size,
+ const Rect& visible_rect,
+ const Size& natural_size,
+ base::TimeDelta timestamp,
+ bool zero_initialize_memory);
+
+ bool SharedMemoryUninitialized();
+
+ // Returns true if |plane| is a valid plane index for the given |format|.
+ static bool IsValidPlane(VideoPixelFormat format, size_t plane);
+
+ // Returns the pixel size of each subsample for a given |plane| and |format|.
+ // E.g. 2x2 for the U-plane in PIXEL_FORMAT_I420.
+ static Size SampleSize(VideoPixelFormat format, size_t plane);
+
+ // Return the alignment for the whole frame, calculated as the max of the
+ // alignment for each individual plane.
+ static Size CommonAlignment(VideoPixelFormat format);
+
+ void AllocateMemory(bool zero_initialize_memory);
+
+ // Calculates plane size.
+ // It first considers buffer size layout_ object provides. If layout's
+ // number of buffers equals to number of planes, and buffer size is assigned
+ // (non-zero), it returns buffers' size.
+ // Otherwise, it uses the first (num_buffers - 1) assigned buffers' size as
+ // plane size. Then for the rest unassigned planes, calculates their size
+ // based on format, coded size and stride for the plane.
+ std::vector<size_t> CalculatePlaneSize() const;
+
+ // VideFrameLayout (includes format, coded_size, and strides).
+ const VideoFrameLayout layout_;
+
+ // Storage type for the different planes.
+ StorageType storage_type_; // TODO(mcasas): make const
+
+ // Width, height, and offsets of the visible portion of the video frame. Must
+ // be a subrect of |coded_size_|. Can be odd with respect to the sample
+ // boundaries, e.g. for formats with subsampled chroma.
+ const Rect visible_rect_;
+
+ // Width and height of the visible portion of the video frame
+ // (|visible_rect_.size()|) with aspect ratio taken into account.
+ const Size natural_size_;
+
+ // Array of data pointers to each plane.
+ // TODO(mcasas): we don't know on ctor if we own |data_| or not. Change
+ // to std::unique_ptr<uint8_t, AlignedFreeDeleter> after refactoring
+ // VideoFrame.
+ uint8_t* data_[kMaxPlanes];
+
+ // Shared memory handle and associated offset inside it, if this frame is a
+ // STORAGE_SHMEM one. Pointers to unowned shared memory regions. At most one
+ // of the memory regions will be set.
+ base::ReadOnlySharedMemoryRegion* read_only_shared_memory_region_ = nullptr;
+ base::UnsafeSharedMemoryRegion* unsafe_shared_memory_region_ = nullptr;
+
+ // Legacy handle.
+ base::SharedMemoryHandle shared_memory_handle_;
+
+ // If this is a STORAGE_SHMEM frame, the offset of the data within the shared
+ // memory.
+ size_t shared_memory_offset_;
+
+ class DmabufHolder;
+
+ // Dmabufs for the frame, used when storage is STORAGE_DMABUFS. Size is either
+ // equal or less than the number of planes of the frame. If it is less, then
+ // the memory area represented by the last FD contains the remaining planes.
+ std::vector<base::ScopedFD> dmabuf_fds_;
+
+ std::vector<base::OnceClosure> done_callbacks_;
+
+ base::TimeDelta timestamp_;
+
+ VideoFrameMetadata metadata_;
+
+ // Generated at construction time.
+ const int unique_id_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(VideoFrame);
+};
+
+} // namespace media
+
+#endif // VIDEO_FRAME_H_
diff --git a/accel/video_frame_layout.cc b/accel/video_frame_layout.cc
new file mode 100644
index 0000000..3f38314
--- /dev/null
+++ b/accel/video_frame_layout.cc
@@ -0,0 +1,179 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 3b7ce92816e2
+
+#include "video_frame_layout.h"
+
+#include <string.h>
+#include <numeric>
+#include <sstream>
+
+#include "base/logging.h"
+
+namespace media {
+
+namespace {
+
+template <class T>
+std::string VectorToString(const std::vector<T>& vec) {
+ std::ostringstream result;
+ std::string delim;
+ result << "[";
+ for (auto& v : vec) {
+ result << delim;
+ result << v;
+ if (delim.size() == 0)
+ delim = ", ";
+ }
+ result << "]";
+ return result.str();
+}
+
+std::vector<ColorPlaneLayout> PlanesFromStrides(
+ const std::vector<int32_t> strides) {
+ std::vector<ColorPlaneLayout> planes(strides.size());
+ for (size_t i = 0; i < strides.size(); i++) {
+ planes[i].stride = strides[i];
+ }
+ return planes;
+}
+
+} // namespace
+
+// static
+size_t VideoFrameLayout::NumPlanes(VideoPixelFormat format) {
+ switch (format) {
+ case PIXEL_FORMAT_YUY2:
+ case PIXEL_FORMAT_ARGB:
+ case PIXEL_FORMAT_BGRA:
+ case PIXEL_FORMAT_XRGB:
+ case PIXEL_FORMAT_RGB24:
+ case PIXEL_FORMAT_MJPEG:
+ case PIXEL_FORMAT_Y16:
+ case PIXEL_FORMAT_ABGR:
+ case PIXEL_FORMAT_XBGR:
+ case PIXEL_FORMAT_XR30:
+ case PIXEL_FORMAT_XB30:
+ return 1;
+ case PIXEL_FORMAT_NV12:
+ case PIXEL_FORMAT_NV21:
+ case PIXEL_FORMAT_P016LE:
+ return 2;
+ case PIXEL_FORMAT_I420:
+ case PIXEL_FORMAT_YV12:
+ case PIXEL_FORMAT_I422:
+ case PIXEL_FORMAT_I444:
+ case PIXEL_FORMAT_YUV420P9:
+ case PIXEL_FORMAT_YUV422P9:
+ case PIXEL_FORMAT_YUV444P9:
+ case PIXEL_FORMAT_YUV420P10:
+ case PIXEL_FORMAT_YUV422P10:
+ case PIXEL_FORMAT_YUV444P10:
+ case PIXEL_FORMAT_YUV420P12:
+ case PIXEL_FORMAT_YUV422P12:
+ case PIXEL_FORMAT_YUV444P12:
+ return 3;
+ case PIXEL_FORMAT_I420A:
+ return 4;
+ case PIXEL_FORMAT_UNKNOWN:
+ // Note: PIXEL_FORMAT_UNKNOWN is used for end-of-stream frame.
+ // Set its NumPlanes() to zero to avoid NOTREACHED().
+ return 0;
+ }
+ NOTREACHED() << "Unsupported video frame format: " << format;
+ return 0;
+}
+
+// static
+base::Optional<VideoFrameLayout> VideoFrameLayout::Create(
+ VideoPixelFormat format,
+ const Size& coded_size) {
+ return CreateWithStrides(format, coded_size,
+ std::vector<int32_t>(NumPlanes(format), 0));
+}
+
+// static
+base::Optional<VideoFrameLayout> VideoFrameLayout::CreateWithStrides(
+ VideoPixelFormat format,
+ const Size& coded_size,
+ std::vector<int32_t> strides) {
+ return CreateWithPlanes(format, coded_size, PlanesFromStrides(strides));
+}
+
+// static
+base::Optional<VideoFrameLayout> VideoFrameLayout::CreateWithPlanes(
+ VideoPixelFormat format,
+ const Size& coded_size,
+ std::vector<ColorPlaneLayout> planes,
+ size_t buffer_addr_align,
+ uint64_t modifier) {
+ // NOTE: Even if format is UNKNOWN, it is valid if coded_sizes is not Empty().
+ // TODO(crbug.com/896135): Return base::nullopt,
+ // if (format != PIXEL_FORMAT_UNKNOWN || !coded_sizes.IsEmpty())
+ // TODO(crbug.com/896135): Return base::nullopt,
+ // if (planes.size() != NumPlanes(format))
+ return VideoFrameLayout(format, coded_size, std::move(planes),
+ false /*is_multi_planar */, buffer_addr_align,
+ modifier);
+}
+
+base::Optional<VideoFrameLayout> VideoFrameLayout::CreateMultiPlanar(
+ VideoPixelFormat format,
+ const Size& coded_size,
+ std::vector<ColorPlaneLayout> planes,
+ size_t buffer_addr_align,
+ uint64_t modifier) {
+ // NOTE: Even if format is UNKNOWN, it is valid if coded_sizes is not Empty().
+ // TODO(crbug.com/896135): Return base::nullopt,
+ // if (format != PIXEL_FORMAT_UNKNOWN || !coded_sizes.IsEmpty())
+ // TODO(crbug.com/896135): Return base::nullopt,
+ // if (planes.size() != NumPlanes(format))
+ return VideoFrameLayout(format, coded_size, std::move(planes),
+ true /*is_multi_planar */, buffer_addr_align,
+ modifier);
+}
+
+VideoFrameLayout::VideoFrameLayout(VideoPixelFormat format,
+ const Size& coded_size,
+ std::vector<ColorPlaneLayout> planes,
+ bool is_multi_planar,
+ size_t buffer_addr_align,
+ uint64_t modifier)
+ : format_(format),
+ coded_size_(coded_size),
+ planes_(std::move(planes)),
+ is_multi_planar_(is_multi_planar),
+ buffer_addr_align_(buffer_addr_align),
+ modifier_(modifier) {}
+
+VideoFrameLayout::~VideoFrameLayout() = default;
+VideoFrameLayout::VideoFrameLayout(const VideoFrameLayout&) = default;
+VideoFrameLayout::VideoFrameLayout(VideoFrameLayout&&) = default;
+VideoFrameLayout& VideoFrameLayout::operator=(const VideoFrameLayout&) =
+ default;
+
+bool VideoFrameLayout::operator==(const VideoFrameLayout& rhs) const {
+ return format_ == rhs.format_ && coded_size_ == rhs.coded_size_ &&
+ planes_ == rhs.planes_ && is_multi_planar_ == rhs.is_multi_planar_ &&
+ buffer_addr_align_ == rhs.buffer_addr_align_ &&
+ modifier_ == rhs.modifier_;
+}
+
+bool VideoFrameLayout::operator!=(const VideoFrameLayout& rhs) const {
+ return !(*this == rhs);
+}
+
+std::ostream& operator<<(std::ostream& ostream,
+ const VideoFrameLayout& layout) {
+ ostream << "VideoFrameLayout(format: " << layout.format()
+ << ", coded_size: " << layout.coded_size().ToString()
+ << ", planes (stride, offset, size): "
+ << VectorToString(layout.planes())
+ << ", is_multi_planar: " << layout.is_multi_planar()
+ << ", buffer_addr_align: " << layout.buffer_addr_align()
+ << ", modifier: " << layout.modifier() << ")";
+ return ostream;
+}
+
+} // namespace media
diff --git a/accel/video_frame_layout.h b/accel/video_frame_layout.h
new file mode 100644
index 0000000..1713abd
--- /dev/null
+++ b/accel/video_frame_layout.h
@@ -0,0 +1,160 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 61df9350f6de
+// Note: Added kNoModifier define to remove dependency on native pixmap handle.
+
+#ifndef VIDEO_FRAME_LAYOUT_H_
+#define VIDEO_FRAME_LAYOUT_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <ostream>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/optional.h"
+#include "color_plane_layout.h"
+#include "size.h"
+#include "video_pixel_format.h"
+
+namespace media {
+
+// Copied from native_pixmap_handle.h:
+// This is the same value as DRM_FORMAT_MOD_INVALID, which is not a valid
+// modifier. We use this to indicate that layout information
+// (tiling/compression) if any will be communicated out of band.
+static constexpr uint64_t kNoModifier = 0x00ffffffffffffffULL;
+
+// A class to describes how physical buffer is allocated for video frame.
+// In stores format, coded size of the frame and size of physical buffers
+// which can be used to allocate buffer(s) hardware expected.
+// It also stores stride (bytes per line) and offset per color plane as Plane.
+// stride is to calculate each color plane's size (note that a buffer may
+// contains multiple color planes.)
+// offset is to describe a start point of each plane from buffer's dmabuf fd.
+// Note that it is copyable.
+class VideoFrameLayout {
+ public:
+ // Default alignment for buffers.
+ // Note: This value is dependent on what's used by ffmpeg, do not change
+ // without inspecting av_frame_get_buffer() first.
+ static constexpr size_t kBufferAddressAlignment = 32;
+
+ // Factory functions.
+ // |format| and |coded_size| must always be specified.
+ // |planes| info is also optional but useful to represent the layout of a
+ // video frame buffer correctly. When omitted, its information is all set
+ // to zero, so clients should be wary not to use this information.
+ // |buffer_addr_align| can be specified to request a specific buffer memory
+ // alignment.
+ // |modifier| is the additional information of |format|. It will become some
+ // value else than gfx::NativePixmapHandle::kNoModifier when the underlying
+ // buffer format is different from a standard |format| due to tiling.
+ // The returned base::Optional will be base::nullopt if the configured values
+ // are invalid.
+
+ // Create a layout suitable for |format| at |coded_size|. The stride, offsets
+ // and size of all planes are set to 0, since that information cannot reliably
+ // be infered from the arguments.
+ static base::Optional<VideoFrameLayout> Create(VideoPixelFormat format,
+ const Size& coded_size);
+
+ // Create a layout suitable for |format| at |coded_size|, with the |strides|
+ // for each plane specified. The offsets and size of all planes are set to 0.
+ // The size of |strides| must be equal to NumPlanes(|format|).
+ static base::Optional<VideoFrameLayout> CreateWithStrides(
+ VideoPixelFormat format,
+ const Size& coded_size,
+ std::vector<int32_t> strides);
+
+ // Create a layout suitable for |format| at |coded_size|, with the |planes|
+ // fully provided.
+ // The size of |planes| must be equal to NumPlanes(|format|).
+ static base::Optional<VideoFrameLayout> CreateWithPlanes(
+ VideoPixelFormat format,
+ const Size& coded_size,
+ std::vector<ColorPlaneLayout> planes,
+ size_t buffer_addr_align = kBufferAddressAlignment,
+ uint64_t modifier = kNoModifier);
+
+ // This constructor should be called for situations where the frames using
+ // this format are backed by multiple physical buffers, instead of having each
+ // plane at different offsets of the same buffer. Currently only used by V4L2.
+ static base::Optional<VideoFrameLayout> CreateMultiPlanar(
+ VideoPixelFormat format,
+ const Size& coded_size,
+ std::vector<ColorPlaneLayout> planes,
+ size_t buffer_addr_align = kBufferAddressAlignment,
+ uint64_t modifier = kNoModifier);
+
+ VideoFrameLayout() = delete;
+ VideoFrameLayout(const VideoFrameLayout&);
+ VideoFrameLayout(VideoFrameLayout&&);
+ VideoFrameLayout& operator=(const VideoFrameLayout&);
+ ~VideoFrameLayout();
+
+ static size_t NumPlanes(VideoPixelFormat format);
+
+ VideoPixelFormat format() const { return format_; }
+ const Size& coded_size() const { return coded_size_; }
+
+ // Returns number of planes. Note that num_planes >= num_buffers.
+ size_t num_planes() const { return planes_.size(); }
+
+ const std::vector<ColorPlaneLayout>& planes() const { return planes_; }
+
+ bool operator==(const VideoFrameLayout& rhs) const;
+ bool operator!=(const VideoFrameLayout& rhs) const;
+
+ // Return true when a format uses multiple backing buffers to store its
+ // planes.
+ bool is_multi_planar() const { return is_multi_planar_; }
+ // Returns the required memory alignment for buffers.
+ size_t buffer_addr_align() const { return buffer_addr_align_; }
+ // Return the modifier of buffers.
+ uint64_t modifier() const { return modifier_; }
+
+ private:
+ VideoFrameLayout(VideoPixelFormat format,
+ const Size& coded_size,
+ std::vector<ColorPlaneLayout> planes,
+ bool is_multi_planar,
+ size_t buffer_addr_align,
+ uint64_t modifier);
+
+ VideoPixelFormat format_;
+
+ // Width and height of the video frame in pixels. This must include pixel
+ // data for the whole image; i.e. for YUV formats with subsampled chroma
+ // planes, in the case that the visible portion of the image does not line up
+ // on a sample boundary, |coded_size_| must be rounded up appropriately and
+ // the pixel data provided for the odd pixels.
+ Size coded_size_;
+
+ // Layout property for each color planes, e.g. stride and buffer offset.
+ std::vector<ColorPlaneLayout> planes_;
+
+ // Set to true when a format uses multiple backing buffers to store its
+ // planes. Used by code for V4L2 API at the moment.
+ bool is_multi_planar_;
+
+ // Memory address alignment of the buffers. This is only relevant when
+ // allocating physical memory for the buffer, so it doesn't need to be
+ // serialized when frames are passed through Mojo.
+ size_t buffer_addr_align_;
+
+ // Modifier of buffers. The modifier is retrieved from GBM library. This
+ // can be a different value from kNoModifier only if the VideoFrame is created
+ // by using NativePixmap.
+ uint64_t modifier_;
+};
+
+// Outputs VideoFrameLayout to stream.
+std::ostream& operator<<(std::ostream& ostream, const VideoFrameLayout& layout);
+
+} // namespace media
+
+#endif // VIDEO_FRAME_LAYOUT_H_
diff --git a/accel/video_frame_metadata.cc b/accel/video_frame_metadata.cc
new file mode 100644
index 0000000..c6c6601
--- /dev/null
+++ b/accel/video_frame_metadata.cc
@@ -0,0 +1,49 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 19cd1babcaff
+// Note: only functions related to END_OF_STREAM are ported.
+
+#include "video_frame_metadata.h"
+
+#include <stdint.h>
+#include <utility>
+
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+
+namespace media {
+
+namespace {
+
+// Map enum key to internal std::string key used by base::DictionaryValue.
+inline std::string ToInternalKey(VideoFrameMetadata::Key key) {
+ DCHECK_LT(key, VideoFrameMetadata::NUM_KEYS);
+ return base::NumberToString(static_cast<int>(key));
+}
+
+} // namespace
+
+VideoFrameMetadata::VideoFrameMetadata() = default;
+
+VideoFrameMetadata::~VideoFrameMetadata() = default;
+
+bool VideoFrameMetadata::HasKey(Key key) const {
+ return dictionary_.HasKey(ToInternalKey(key));
+}
+
+void VideoFrameMetadata::SetBoolean(Key key, bool value) {
+ dictionary_.SetKey(ToInternalKey(key), base::Value(value));
+}
+
+bool VideoFrameMetadata::GetBoolean(Key key, bool* value) const {
+ DCHECK(value);
+ return dictionary_.GetBooleanWithoutPathExpansion(ToInternalKey(key), value);
+}
+
+bool VideoFrameMetadata::IsTrue(Key key) const {
+ bool value = false;
+ return GetBoolean(key, &value) && value;
+}
+
+} // namespace media
diff --git a/accel/video_frame_metadata.h b/accel/video_frame_metadata.h
new file mode 100644
index 0000000..a6ac6e5
--- /dev/null
+++ b/accel/video_frame_metadata.h
@@ -0,0 +1,195 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 514536171be3
+// Note: only functions related to END_OF_STREAM are ported.
+
+#ifndef VIDEO_FRAME_METADATA_H_
+#define VIDEO_FRAME_METADATA_H_
+
+#include "base/values.h"
+
+namespace media {
+
+class VideoFrameMetadata {
+ public:
+ enum Key {
+ // Sources of VideoFrames use this marker to indicate that the associated
+ // VideoFrame can be overlayed, case in which its contents do not need to be
+ // further composited but displayed directly. Use Get/SetBoolean() for
+ // this Key.
+ ALLOW_OVERLAY,
+
+ // Video capture begin/end timestamps. Consumers can use these values for
+ // dynamic optimizations, logging stats, etc. Use Get/SetTimeTicks() for
+ // these keys.
+ CAPTURE_BEGIN_TIME,
+ CAPTURE_END_TIME,
+
+ // A counter that is increased by the producer of video frames each time
+ // it pushes out a new frame. By looking for gaps in this counter, clients
+ // can determine whether or not any frames have been dropped on the way from
+ // the producer between two consecutively received frames. Note that the
+ // counter may start at arbitrary values, so the absolute value of it has no
+ // meaning.
+ CAPTURE_COUNTER,
+
+ // A base::ListValue containing 4 integers representing x, y, width, height
+ // of the rectangular region of the frame that has changed since the frame
+ // with the directly preceding CAPTURE_COUNTER. If that frame was not
+ // received, typically because it was dropped during transport from the
+ // producer, clients must assume that the entire frame has changed.
+ // The rectangle is relative to the full frame data, i.e. [0, 0,
+ // coded_size().width(), coded_size().height()]. It does not have to be
+ // fully contained within visible_rect().
+ CAPTURE_UPDATE_RECT,
+
+ // Indicates that this frame must be copied to a new texture before use,
+ // rather than being used directly. Specifically this is required for
+ // WebView because of limitations about sharing surface textures between GL
+ // contexts.
+ COPY_REQUIRED,
+
+ // Indicates if the current frame is the End of its current Stream. Use
+ // Get/SetBoolean() for this Key.
+ END_OF_STREAM,
+
+ // The estimated duration of this frame (i.e., the amount of time between
+ // the media timestamp of this frame and the next). Note that this is not
+ // the same information provided by FRAME_RATE as the FRAME_DURATION can
+ // vary unpredictably for every frame. Consumers can use this to optimize
+ // playback scheduling, make encoding quality decisions, and/or compute
+ // frame-level resource utilization stats. Use Get/SetTimeDelta() for this
+ // key.
+ FRAME_DURATION,
+
+ // Represents either the fixed frame rate, or the maximum frame rate to
+ // expect from a variable-rate source. This value generally remains the
+ // same for all frames in the same session. Use Get/SetDouble() for this
+ // key.
+ FRAME_RATE,
+
+ // This is a boolean that signals that the video capture engine detects
+ // interactive content. One possible optimization that this signal can help
+ // with is remote content: adjusting end-to-end latency down to help the
+ // user better coordinate their actions.
+ //
+ // Use Get/SetBoolean for this key.
+ INTERACTIVE_CONTENT,
+
+ // This field represents the local time at which either: 1) the frame was
+ // generated, if it was done so locally; or 2) the targeted play-out time
+ // of the frame, if it was generated from a remote source. This value is NOT
+ // a high-resolution timestamp, and so it should not be used as a
+ // presentation time; but, instead, it should be used for buffering playback
+ // and for A/V synchronization purposes.
+ // Use Get/SetTimeTicks() for this key.
+ REFERENCE_TIME,
+
+ // A feedback signal that indicates the fraction of the tolerable maximum
+ // amount of resources that were utilized to process this frame. A producer
+ // can check this value after-the-fact, usually via a VideoFrame destruction
+ // observer, to determine whether the consumer can handle more or less data
+ // volume, and achieve the right quality versus performance trade-off.
+ //
+ // Use Get/SetDouble() for this key. Values are interpreted as follows:
+ // Less than 0.0 is meaningless and should be ignored. 1.0 indicates a
+ // maximum sustainable utilization. Greater than 1.0 indicates the consumer
+ // is likely to stall or drop frames if the data volume is not reduced.
+ //
+ // Example: In a system that encodes and transmits video frames over the
+ // network, this value can be used to indicate whether sufficient CPU
+ // is available for encoding and/or sufficient bandwidth is available for
+ // transmission over the network. The maximum of the two utilization
+ // measurements would be used as feedback.
+ RESOURCE_UTILIZATION,
+
+ // Sources of VideoFrames use this marker to indicate that an instance of
+ // VideoFrameExternalResources produced from the associated video frame
+ // should use read lock fences.
+ READ_LOCK_FENCES_ENABLED,
+
+ // Indicates that the frame is rotated.
+ ROTATION,
+
+ // Android only: if set, then this frame is not suitable for overlay, even
+ // if ALLOW_OVERLAY is set. However, it allows us to process the overlay
+ // to see if it would have been promoted, if it were backed by a SurfaceView
+ // instead. This lets us figure out when SurfaceViews are appropriate.
+ TEXTURE_OWNER,
+
+ // Android only: if set, then this frame's resource would like to be
+ // notified about its promotability to an overlay.
+ WANTS_PROMOTION_HINT,
+
+ // This video frame comes from protected content.
+ PROTECTED_VIDEO,
+
+ // This video frame is protected by hardware. This option is valid only if
+ // PROTECTED_VIDEO is also set to true.
+ HW_PROTECTED,
+
+ // An UnguessableToken that identifies VideoOverlayFactory that created
+ // this VideoFrame. It's used by Cast to help with video hole punch.
+ // Use Get/SetUnguessableToken() for this key.
+ OVERLAY_PLANE_ID,
+
+ // Whether this frame was decoded in a power efficient way.
+ POWER_EFFICIENT,
+
+ // CompositorFrameMetadata variables associated with this frame. Used for
+ // remote debugging.
+ // Use Get/SetDouble() for these keys.
+ // TODO(crbug.com/832220): Use a customized dictionary value instead of
+ // using these keys directly.
+ DEVICE_SCALE_FACTOR,
+ PAGE_SCALE_FACTOR,
+ ROOT_SCROLL_OFFSET_X,
+ ROOT_SCROLL_OFFSET_Y,
+ TOP_CONTROLS_VISIBLE_HEIGHT,
+
+ // If present, this field represents the local time at which the VideoFrame
+ // was decoded from whichever format it was encoded in. Sometimes only
+ // DECODE_END_TIME will be present. Use Get/SetTimeTicks() for this key.
+ DECODE_BEGIN_TIME,
+ DECODE_END_TIME,
+
+ // If present, this field represents the elapsed time from the submission of
+ // the encoded packet with the same PTS as this frame to the decoder until
+ // the decoded frame was ready for presentation. Stored as base::TimeDelta.
+ PROCESSING_TIME,
+
+ // The RTP timestamp associated with this video frame. Stored as a double
+ // since base::DictionaryValue doesn't have a uint32_t type.
+ //
+ // https://w3c.github.io/webrtc-pc/#dom-rtcrtpcontributingsource
+ RTP_TIMESTAMP,
+
+ NUM_KEYS
+ };
+
+ VideoFrameMetadata();
+ ~VideoFrameMetadata();
+
+ bool HasKey(Key key) const;
+
+ void Clear() { dictionary_.Clear(); }
+
+ // Setters. Overwrites existing value, if present.
+ void SetBoolean(Key key, bool value);
+
+ // Getters. Returns true if |key| is present, and its value has been set.
+ bool GetBoolean(Key key, bool* value) const WARN_UNUSED_RESULT;
+
+ // Convenience method that returns true if |key| exists and is set to true.
+ bool IsTrue(Key key) const WARN_UNUSED_RESULT;
+
+ private:
+ base::DictionaryValue dictionary_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoFrameMetadata);
+};
+
+} // namespace media
+
+#endif // VIDEO_FRAME_METADATA_H_
diff --git a/accel/video_pixel_format.cc b/accel/video_pixel_format.cc
new file mode 100644
index 0000000..20b8537
--- /dev/null
+++ b/accel/video_pixel_format.cc
@@ -0,0 +1,134 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 3b7ce92816e2
+// Note: only necessary functions are ported from video_types.cc
+
+#include "video_pixel_format.h"
+
+#include "base/logging.h"
+#include "base/strings/stringprintf.h"
+
+namespace media {
+
+std::string VideoPixelFormatToString(VideoPixelFormat format) {
+ switch (format) {
+ case PIXEL_FORMAT_UNKNOWN:
+ return "PIXEL_FORMAT_UNKNOWN";
+ case PIXEL_FORMAT_I420:
+ return "PIXEL_FORMAT_I420";
+ case PIXEL_FORMAT_YV12:
+ return "PIXEL_FORMAT_YV12";
+ case PIXEL_FORMAT_I422:
+ return "PIXEL_FORMAT_I422";
+ case PIXEL_FORMAT_I420A:
+ return "PIXEL_FORMAT_I420A";
+ case PIXEL_FORMAT_I444:
+ return "PIXEL_FORMAT_I444";
+ case PIXEL_FORMAT_NV12:
+ return "PIXEL_FORMAT_NV12";
+ case PIXEL_FORMAT_NV21:
+ return "PIXEL_FORMAT_NV21";
+ case PIXEL_FORMAT_YUY2:
+ return "PIXEL_FORMAT_YUY2";
+ case PIXEL_FORMAT_ARGB:
+ return "PIXEL_FORMAT_ARGB";
+ case PIXEL_FORMAT_XRGB:
+ return "PIXEL_FORMAT_XRGB";
+ case PIXEL_FORMAT_RGB24:
+ return "PIXEL_FORMAT_RGB24";
+ case PIXEL_FORMAT_MJPEG:
+ return "PIXEL_FORMAT_MJPEG";
+ case PIXEL_FORMAT_YUV420P9:
+ return "PIXEL_FORMAT_YUV420P9";
+ case PIXEL_FORMAT_YUV420P10:
+ return "PIXEL_FORMAT_YUV420P10";
+ case PIXEL_FORMAT_YUV422P9:
+ return "PIXEL_FORMAT_YUV422P9";
+ case PIXEL_FORMAT_YUV422P10:
+ return "PIXEL_FORMAT_YUV422P10";
+ case PIXEL_FORMAT_YUV444P9:
+ return "PIXEL_FORMAT_YUV444P9";
+ case PIXEL_FORMAT_YUV444P10:
+ return "PIXEL_FORMAT_YUV444P10";
+ case PIXEL_FORMAT_YUV420P12:
+ return "PIXEL_FORMAT_YUV420P12";
+ case PIXEL_FORMAT_YUV422P12:
+ return "PIXEL_FORMAT_YUV422P12";
+ case PIXEL_FORMAT_YUV444P12:
+ return "PIXEL_FORMAT_YUV444P12";
+ case PIXEL_FORMAT_Y16:
+ return "PIXEL_FORMAT_Y16";
+ case PIXEL_FORMAT_ABGR:
+ return "PIXEL_FORMAT_ABGR";
+ case PIXEL_FORMAT_XBGR:
+ return "PIXEL_FORMAT_XBGR";
+ case PIXEL_FORMAT_P016LE:
+ return "PIXEL_FORMAT_P016LE";
+ case PIXEL_FORMAT_XR30:
+ return "PIXEL_FORMAT_XR30";
+ case PIXEL_FORMAT_XB30:
+ return "PIXEL_FORMAT_XB30";
+ case PIXEL_FORMAT_BGRA:
+ return "PIXEL_FORMAT_BGRA";
+ }
+ NOTREACHED() << "Invalid VideoPixelFormat provided: " << format;
+ return "";
+}
+
+std::string FourccToString(uint32_t fourcc) {
+ std::string result = "0000";
+ for (size_t i = 0; i < 4; ++i, fourcc >>= 8) {
+ const char c = static_cast<char>(fourcc & 0xFF);
+ if (c <= 0x1f || c >= 0x7f)
+ return base::StringPrintf("0x%x", fourcc);
+ result[i] = c;
+ }
+ return result;
+}
+
+size_t BitDepth(VideoPixelFormat format) {
+ switch (format) {
+ case PIXEL_FORMAT_UNKNOWN:
+ NOTREACHED();
+ FALLTHROUGH;
+ case PIXEL_FORMAT_I420:
+ case PIXEL_FORMAT_YV12:
+ case PIXEL_FORMAT_I422:
+ case PIXEL_FORMAT_I420A:
+ case PIXEL_FORMAT_I444:
+ case PIXEL_FORMAT_NV12:
+ case PIXEL_FORMAT_NV21:
+ case PIXEL_FORMAT_YUY2:
+ case PIXEL_FORMAT_ARGB:
+ case PIXEL_FORMAT_XRGB:
+ case PIXEL_FORMAT_RGB24:
+ case PIXEL_FORMAT_MJPEG:
+ case PIXEL_FORMAT_ABGR:
+ case PIXEL_FORMAT_XBGR:
+ case PIXEL_FORMAT_BGRA:
+ return 8;
+ case PIXEL_FORMAT_YUV420P9:
+ case PIXEL_FORMAT_YUV422P9:
+ case PIXEL_FORMAT_YUV444P9:
+ return 9;
+ case PIXEL_FORMAT_YUV420P10:
+ case PIXEL_FORMAT_YUV422P10:
+ case PIXEL_FORMAT_YUV444P10:
+ case PIXEL_FORMAT_XR30:
+ case PIXEL_FORMAT_XB30:
+ return 10;
+ case PIXEL_FORMAT_YUV420P12:
+ case PIXEL_FORMAT_YUV422P12:
+ case PIXEL_FORMAT_YUV444P12:
+ return 12;
+ case PIXEL_FORMAT_Y16:
+ case PIXEL_FORMAT_P016LE:
+ return 16;
+ }
+ NOTREACHED();
+ return 0;
+}
+
+} // namespace media
+
diff --git a/accel/video_pixel_format.h b/accel/video_pixel_format.h
new file mode 100644
index 0000000..8d80731
--- /dev/null
+++ b/accel/video_pixel_format.h
@@ -0,0 +1,95 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 3b7ce92816e2
+// Note: only necessary functions are ported from video_types.h
+
+#ifndef VIDEO_PIXEL_FORMAT_H_
+#define VIDEO_PIXEL_FORMAT_H_
+
+#include <string>
+
+namespace media {
+
+// Pixel formats roughly based on FOURCC labels, see:
+// http://www.fourcc.org/rgb.php and http://www.fourcc.org/yuv.php
+// Logged to UMA, so never reuse values. Leave gaps if necessary.
+// Ordered as planar, semi-planar, YUV-packed, and RGB formats.
+// When a VideoFrame is backed by native textures, VideoPixelFormat describes
+// how those textures should be sampled and combined to produce the final
+// pixels.
+enum VideoPixelFormat {
+ PIXEL_FORMAT_UNKNOWN = 0, // Unknown or unspecified format value.
+ PIXEL_FORMAT_I420 =
+ 1, // 12bpp YUV planar 1x1 Y, 2x2 UV samples, a.k.a. YU12.
+
+ // Note: Chrome does not actually support YVU compositing, so you probably
+ // don't actually want to use this. See http://crbug.com/784627.
+ PIXEL_FORMAT_YV12 = 2, // 12bpp YVU planar 1x1 Y, 2x2 VU samples.
+
+ PIXEL_FORMAT_I422 = 3, // 16bpp YUV planar 1x1 Y, 2x1 UV samples.
+ PIXEL_FORMAT_I420A = 4, // 20bpp YUVA planar 1x1 Y, 2x2 UV, 1x1 A samples.
+ PIXEL_FORMAT_I444 = 5, // 24bpp YUV planar, no subsampling.
+ PIXEL_FORMAT_NV12 =
+ 6, // 12bpp with Y plane followed by a 2x2 interleaved UV plane.
+ PIXEL_FORMAT_NV21 =
+ 7, // 12bpp with Y plane followed by a 2x2 interleaved VU plane.
+ /* PIXEL_FORMAT_UYVY = 8, Deprecated */
+ PIXEL_FORMAT_YUY2 =
+ 9, // 16bpp interleaved 1x1 Y, 2x1 U, 1x1 Y, 2x1 V samples.
+ PIXEL_FORMAT_ARGB = 10, // 32bpp BGRA (byte-order), 1 plane.
+ PIXEL_FORMAT_XRGB = 11, // 24bpp BGRX (byte-order), 1 plane.
+ PIXEL_FORMAT_RGB24 = 12, // 24bpp BGR (byte-order), 1 plane.
+
+ /* PIXEL_FORMAT_RGB32 = 13, Deprecated */
+ PIXEL_FORMAT_MJPEG = 14, // MJPEG compressed.
+ /* PIXEL_FORMAT_MT21 = 15, Deprecated */
+
+ // The P* in the formats below designates the number of bits per pixel
+ // component. I.e. P9 is 9-bits per pixel component, P10 is 10-bits per pixel
+ // component, etc.
+ PIXEL_FORMAT_YUV420P9 = 16,
+ PIXEL_FORMAT_YUV420P10 = 17,
+ PIXEL_FORMAT_YUV422P9 = 18,
+ PIXEL_FORMAT_YUV422P10 = 19,
+ PIXEL_FORMAT_YUV444P9 = 20,
+ PIXEL_FORMAT_YUV444P10 = 21,
+ PIXEL_FORMAT_YUV420P12 = 22,
+ PIXEL_FORMAT_YUV422P12 = 23,
+ PIXEL_FORMAT_YUV444P12 = 24,
+
+ /* PIXEL_FORMAT_Y8 = 25, Deprecated */
+ PIXEL_FORMAT_Y16 = 26, // single 16bpp plane.
+
+ PIXEL_FORMAT_ABGR = 27, // 32bpp RGBA (byte-order), 1 plane.
+ PIXEL_FORMAT_XBGR = 28, // 24bpp RGBX (byte-order), 1 plane.
+
+ PIXEL_FORMAT_P016LE = 29, // 24bpp NV12, 16 bits per channel
+
+ PIXEL_FORMAT_XR30 =
+ 30, // 32bpp BGRX, 10 bits per channel, 2 bits ignored, 1 plane
+ PIXEL_FORMAT_XB30 =
+ 31, // 32bpp RGBX, 10 bits per channel, 2 bits ignored, 1 plane
+
+ PIXEL_FORMAT_BGRA = 32, // 32bpp ARGB (byte-order), 1 plane.
+
+ // Please update UMA histogram enumeration when adding new formats here.
+ PIXEL_FORMAT_MAX =
+ PIXEL_FORMAT_BGRA, // Must always be equal to largest entry logged.
+};
+
+// Returns the name of a Format as a string.
+std::string VideoPixelFormatToString(VideoPixelFormat format);
+
+// Returns human readable fourcc string.
+// If any of the four characters is non-printable, it outputs
+// "0x<32-bit integer in hex>", e.g. FourccToString(0x66616b00) returns
+// "0x66616b00".
+std::string FourccToString(uint32_t fourcc);
+
+// Returns the number of significant bits per channel.
+size_t BitDepth(VideoPixelFormat format);
+
+} // namespace media
+
+#endif // VIDEO_PIXEL_FORMAT_H_
diff --git a/accel/vp8_bool_decoder.cc b/accel/vp8_bool_decoder.cc
new file mode 100644
index 0000000..68f06d0
--- /dev/null
+++ b/accel/vp8_bool_decoder.cc
@@ -0,0 +1,209 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Note: ported from Chromium commit head: 9b6f429
+
+/*
+ * Copyright (c) 2010, The WebM Project authors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * * Neither the name of Google, nor the WebM Project, nor the names
+ * of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This file is modified from the dboolhuff.{c,h} from the WebM's libvpx
+// project. (http://www.webmproject.org/code)
+// It is used to decode bits from a vp8 stream.
+
+#include <limits.h>
+
+#include <algorithm>
+
+#include "base/numerics/safe_conversions.h"
+#include "vp8_bool_decoder.h"
+
+namespace media {
+
+#define VP8_BD_VALUE_BIT \
+ static_cast<int>(sizeof(Vp8BoolDecoder::value_) * CHAR_BIT)
+
+static const int kDefaultProbability = 0x80; // 0x80 / 256 = 0.5
+
+// This is meant to be a large, positive constant that can still be efficiently
+// loaded as an immediate (on platforms like ARM, for example). Even relatively
+// modest values like 100 would work fine.
+#define VP8_LOTS_OF_BITS (0x40000000)
+
+// The number of leading zeros.
+static const unsigned char kVp8Norm[256] = {
+ 0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+Vp8BoolDecoder::Vp8BoolDecoder()
+ : user_buffer_(NULL),
+ user_buffer_end_(NULL),
+ value_(0),
+ count_(-8),
+ range_(255) {
+}
+
+bool Vp8BoolDecoder::Initialize(const uint8_t* data, size_t size) {
+ if (data == NULL || size == 0)
+ return false;
+ user_buffer_start_ = data;
+ user_buffer_ = data;
+ user_buffer_end_ = data + size;
+ value_ = 0;
+ count_ = -8;
+ range_ = 255;
+ return true;
+}
+
+void Vp8BoolDecoder::FillDecoder() {
+ DCHECK(user_buffer_ != NULL);
+ int shift = VP8_BD_VALUE_BIT - CHAR_BIT - (count_ + CHAR_BIT);
+ size_t bytes_left = user_buffer_end_ - user_buffer_;
+ size_t bits_left = bytes_left * CHAR_BIT;
+ int x = shift + CHAR_BIT - static_cast<int>(bits_left);
+ int loop_end = 0;
+
+ if (x >= 0) {
+ count_ += VP8_LOTS_OF_BITS;
+ loop_end = x;
+ }
+
+ if (x < 0 || bits_left) {
+ while (shift >= loop_end) {
+ count_ += CHAR_BIT;
+ value_ |= static_cast<size_t>(*user_buffer_) << shift;
+ ++user_buffer_;
+ shift -= CHAR_BIT;
+ }
+ }
+}
+
+int Vp8BoolDecoder::ReadBit(int probability) {
+ int bit = 0;
+ size_t split = 1 + (((range_ - 1) * probability) >> 8);
+ if (count_ < 0)
+ FillDecoder();
+ size_t bigsplit = static_cast<size_t>(split) << (VP8_BD_VALUE_BIT - 8);
+
+ if (value_ >= bigsplit) {
+ range_ -= split;
+ value_ -= bigsplit;
+ bit = 1;
+ } else {
+ range_ = split;
+ }
+
+ size_t shift = kVp8Norm[range_];
+ range_ <<= shift;
+ value_ <<= shift;
+ count_ -= static_cast<int>(shift);
+
+ DCHECK_EQ(1U, (range_ >> 7)); // In the range [128, 255].
+
+ return bit;
+}
+
+bool Vp8BoolDecoder::ReadLiteral(size_t num_bits, int* out) {
+ DCHECK_LE(num_bits, sizeof(int) * CHAR_BIT);
+ *out = 0;
+ for (; num_bits > 0; --num_bits)
+ *out = (*out << 1) | ReadBit(kDefaultProbability);
+ return !OutOfBuffer();
+}
+
+bool Vp8BoolDecoder::ReadBool(bool* out, uint8_t probability) {
+ *out = !!ReadBit(probability);
+ return !OutOfBuffer();
+}
+
+bool Vp8BoolDecoder::ReadBool(bool* out) {
+ return ReadBool(out, kDefaultProbability);
+}
+
+bool Vp8BoolDecoder::ReadLiteralWithSign(size_t num_bits, int* out) {
+ ReadLiteral(num_bits, out);
+ // Read sign.
+ if (ReadBit(kDefaultProbability))
+ *out = -*out;
+ return !OutOfBuffer();
+}
+
+size_t Vp8BoolDecoder::BitOffset() {
+ int bit_count = count_ + 8;
+ if (bit_count > VP8_BD_VALUE_BIT)
+ // Capped at 0 to ignore buffer underrun.
+ bit_count = std::max(0, bit_count - VP8_LOTS_OF_BITS);
+ return (user_buffer_ - user_buffer_start_) * 8 - bit_count;
+}
+
+uint8_t Vp8BoolDecoder::GetRange() {
+ return base::checked_cast<uint8_t>(range_);
+}
+
+uint8_t Vp8BoolDecoder::GetBottom() {
+ if (count_ < 0)
+ FillDecoder();
+ return static_cast<uint8_t>(value_ >> (VP8_BD_VALUE_BIT - 8));
+}
+
+inline bool Vp8BoolDecoder::OutOfBuffer() {
+ // Check if we have reached the end of the buffer.
+ //
+ // Variable |count_| stores the number of bits in the |value_| buffer, minus
+ // 8. The top byte is part of the algorithm and the remainder is buffered to
+ // be shifted into it. So, if |count_| == 8, the top 16 bits of |value_| are
+ // occupied, 8 for the algorithm and 8 in the buffer.
+ //
+ // When reading a byte from the user's buffer, |count_| is filled with 8 and
+ // one byte is filled into the |value_| buffer. When we reach the end of the
+ // data, |count_| is additionally filled with VP8_LOTS_OF_BITS. So when
+ // |count_| == VP8_LOTS_OF_BITS - 1, the user's data has been exhausted.
+ return (count_ > VP8_BD_VALUE_BIT) && (count_ < VP8_LOTS_OF_BITS);
+}
+
+} // namespace media
diff --git a/accel/vp8_bool_decoder.h b/accel/vp8_bool_decoder.h
new file mode 100644
index 0000000..4b8e3a5
--- /dev/null
+++ b/accel/vp8_bool_decoder.h
@@ -0,0 +1,135 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Note: ported from Chromium commit head: 1323b9c
+
+/*
+ * Copyright (c) 2010, The WebM Project authors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * * Neither the name of Google, nor the WebM Project, nor the names
+ * of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This file is modified from the dboolhuff.{c,h} from the WebM's libvpx
+// project. (http://www.webmproject.org/code)
+// It is used to decode bits from a vp8 stream.
+
+#ifndef VP8_BOOL_DECODER_H_
+#define VP8_BOOL_DECODER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+namespace media {
+
+// A class to decode the VP8's boolean entropy coded stream. It's a variant of
+// arithmetic coding. See RFC 6386 - Chapter 7. Boolean Entropy Decoder.
+class Vp8BoolDecoder {
+ public:
+ Vp8BoolDecoder();
+
+ // Initializes the decoder to start decoding |data|, |size| being size
+ // of |data| in bytes. Returns false if |data| is NULL or empty.
+ bool Initialize(const uint8_t* data, size_t size);
+
+ // Reads a boolean from the coded stream. Returns false if it has reached the
+ // end of |data| and failed to read the boolean. The probability of |out| to
+ // be true is |probability| / 256, e.g., when |probability| is 0x80, the
+ // chance is 1/2 (i.e., 0x80 / 256).
+ bool ReadBool(bool* out, uint8_t probability);
+
+ // Reads a boolean from the coded stream with the default probability 1/2.
+ // Returns false if it has reached the end of |data| and failed to read the
+ // boolean.
+ bool ReadBool(bool* out);
+
+ // Reads a "literal", that is, a "num_bits"-wide unsigned value whose bits
+ // come high- to low-order, with each bit encoded at probability 1/2.
+ // Returns false if it has reached the end of |data| and failed to read the
+ // literal.
+ bool ReadLiteral(size_t num_bits, int* out);
+
+ // Reads a literal with sign from the coded stream. This is similar to
+ // the ReadListeral(), it first read a "num_bits"-wide unsigned value, and
+ // then read an extra bit as the sign of the literal. Returns false if it has
+ // reached the end of |data| and failed to read the literal or the sign.
+ // This is different from the "read_signed_literal(d, n)" defined in RFC 6386.
+ bool ReadLiteralWithSign(size_t num_bits, int* out);
+
+ // The following methods are used to get the internal states of the decoder.
+
+ // Returns the bit offset to the current top bit of the coded stream. It is
+ // also the number of bits that have been written in the corresponding
+ // encoding state. More specifically, we have the following constraint:
+ // w + (bottom * S) <= v < w + (bottom + range) * S,
+ // where "w" is for the bits already written,
+ // "v" is for the possible values of the coded number.
+ // "S" is the scale for the current bit position,
+ // i.e., S = pow(2, -(n + 8)), where "n" is the bit number of "w".
+ // BitOffset() returns the bit count of "w", i.e., "n".
+ size_t BitOffset();
+
+ // Gets the "bottom" of the current coded value. See BitOffset() for
+ // more details.
+ uint8_t GetBottom();
+
+ // Gets the "range" of the current coded value. See BitOffset() for
+ // more details.
+ uint8_t GetRange();
+
+ private:
+ // Reads the next bit from the coded stream. The probability of the bit to
+ // be one is |probability| / 256.
+ int ReadBit(int probability);
+
+ // Fills more bits from |user_buffer_| to |value_|. We shall keep at least 8
+ // bits of the current |user_buffer_| in |value_|.
+ void FillDecoder();
+
+ // Returns true iff we have ran out of bits.
+ bool OutOfBuffer();
+
+ const uint8_t* user_buffer_;
+ const uint8_t* user_buffer_start_;
+ const uint8_t* user_buffer_end_;
+ size_t value_;
+ int count_;
+ size_t range_;
+
+ DISALLOW_COPY_AND_ASSIGN(Vp8BoolDecoder);
+};
+
+} // namespace media
+
+#endif // VP8_BOOL_DECODER_H_
diff --git a/accel/vp8_decoder.cc b/accel/vp8_decoder.cc
new file mode 100644
index 0000000..cd2d58b
--- /dev/null
+++ b/accel/vp8_decoder.cc
@@ -0,0 +1,197 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 7441087
+
+#include "vp8_decoder.h"
+
+namespace media {
+
+VP8Decoder::VP8Accelerator::VP8Accelerator() {}
+
+VP8Decoder::VP8Accelerator::~VP8Accelerator() {}
+
+VP8Decoder::VP8Decoder(VP8Accelerator* accelerator)
+ : state_(kNeedStreamMetadata),
+ curr_frame_start_(nullptr),
+ frame_size_(0),
+ accelerator_(accelerator) {
+ DCHECK(accelerator_);
+}
+
+VP8Decoder::~VP8Decoder() {}
+
+bool VP8Decoder::Flush() {
+ DVLOG(2) << "Decoder flush";
+ Reset();
+ return true;
+}
+
+void VP8Decoder::SetStream(const uint8_t* ptr, size_t size) {
+ DCHECK(ptr);
+ DCHECK(size);
+
+ curr_frame_start_ = ptr;
+ frame_size_ = size;
+ DVLOG(4) << "New input stream at: " << (void*)ptr << " size: " << size;
+}
+
+void VP8Decoder::Reset() {
+ curr_pic_ = nullptr;
+ curr_frame_hdr_ = nullptr;
+ curr_frame_start_ = nullptr;
+ frame_size_ = 0;
+
+ last_frame_ = nullptr;
+ golden_frame_ = nullptr;
+ alt_frame_ = nullptr;
+
+ if (state_ == kDecoding)
+ state_ = kAfterReset;
+}
+
+VP8Decoder::DecodeResult VP8Decoder::Decode() {
+ if (!curr_frame_start_ || frame_size_ == 0)
+ return kRanOutOfStreamData;
+
+ if (!curr_frame_hdr_) {
+ curr_frame_hdr_.reset(new Vp8FrameHeader());
+ if (!parser_.ParseFrame(curr_frame_start_, frame_size_,
+ curr_frame_hdr_.get())) {
+ DVLOG(1) << "Error during decode";
+ state_ = kError;
+ return kDecodeError;
+ }
+ }
+
+ if (curr_frame_hdr_->IsKeyframe()) {
+ Size new_pic_size(curr_frame_hdr_->width, curr_frame_hdr_->height);
+ if (new_pic_size.IsEmpty())
+ return kDecodeError;
+
+ if (new_pic_size != pic_size_) {
+ DVLOG(2) << "New resolution: " << new_pic_size.ToString();
+ pic_size_ = new_pic_size;
+
+ DCHECK(!curr_pic_);
+ last_frame_ = nullptr;
+ golden_frame_ = nullptr;
+ alt_frame_ = nullptr;
+
+ return kAllocateNewSurfaces;
+ }
+
+ state_ = kDecoding;
+ } else {
+ if (state_ != kDecoding) {
+ // Need a resume point.
+ curr_frame_hdr_.reset();
+ return kRanOutOfStreamData;
+ }
+ }
+
+ curr_pic_ = accelerator_->CreateVP8Picture();
+ if (!curr_pic_)
+ return kRanOutOfSurfaces;
+
+ curr_pic_->visible_rect = Rect(pic_size_);
+ if (!DecodeAndOutputCurrentFrame())
+ return kDecodeError;
+
+ return kRanOutOfStreamData;
+}
+
+void VP8Decoder::RefreshReferenceFrames() {
+ if (curr_frame_hdr_->IsKeyframe()) {
+ last_frame_ = curr_pic_;
+ golden_frame_ = curr_pic_;
+ alt_frame_ = curr_pic_;
+ return;
+ }
+
+ // Save current golden since we overwrite it here,
+ // but may have to use it to update alt below.
+ scoped_refptr<VP8Picture> curr_golden = golden_frame_;
+
+ if (curr_frame_hdr_->refresh_golden_frame) {
+ golden_frame_ = curr_pic_;
+ } else {
+ switch (curr_frame_hdr_->copy_buffer_to_golden) {
+ case Vp8FrameHeader::COPY_LAST_TO_GOLDEN:
+ DCHECK(last_frame_);
+ golden_frame_ = last_frame_;
+ break;
+
+ case Vp8FrameHeader::COPY_ALT_TO_GOLDEN:
+ DCHECK(alt_frame_);
+ golden_frame_ = alt_frame_;
+ break;
+ }
+ }
+
+ if (curr_frame_hdr_->refresh_alternate_frame) {
+ alt_frame_ = curr_pic_;
+ } else {
+ switch (curr_frame_hdr_->copy_buffer_to_alternate) {
+ case Vp8FrameHeader::COPY_LAST_TO_ALT:
+ DCHECK(last_frame_);
+ alt_frame_ = last_frame_;
+ break;
+
+ case Vp8FrameHeader::COPY_GOLDEN_TO_ALT:
+ DCHECK(curr_golden);
+ alt_frame_ = curr_golden;
+ break;
+ }
+ }
+
+ if (curr_frame_hdr_->refresh_last)
+ last_frame_ = curr_pic_;
+}
+
+bool VP8Decoder::DecodeAndOutputCurrentFrame() {
+ DCHECK(!pic_size_.IsEmpty());
+ DCHECK(curr_pic_);
+ DCHECK(curr_frame_hdr_);
+
+ if (curr_frame_hdr_->IsKeyframe()) {
+ horizontal_scale_ = curr_frame_hdr_->horizontal_scale;
+ vertical_scale_ = curr_frame_hdr_->vertical_scale;
+ } else {
+ // Populate fields from decoder state instead.
+ curr_frame_hdr_->width = pic_size_.width();
+ curr_frame_hdr_->height = pic_size_.height();
+ curr_frame_hdr_->horizontal_scale = horizontal_scale_;
+ curr_frame_hdr_->vertical_scale = vertical_scale_;
+ }
+
+ if (!accelerator_->SubmitDecode(curr_pic_, curr_frame_hdr_.get(), last_frame_,
+ golden_frame_, alt_frame_))
+ return false;
+
+ if (curr_frame_hdr_->show_frame)
+ if (!accelerator_->OutputPicture(curr_pic_))
+ return false;
+
+ RefreshReferenceFrames();
+
+ curr_pic_ = nullptr;
+ curr_frame_hdr_ = nullptr;
+ curr_frame_start_ = nullptr;
+ frame_size_ = 0;
+ return true;
+}
+
+Size VP8Decoder::GetPicSize() const {
+ return pic_size_;
+}
+
+size_t VP8Decoder::GetRequiredNumOfPictures() const {
+ const size_t kVP8NumFramesActive = 4;
+ // TODO(johnylin): see if we could get rid of kMaxVideoFrames.
+ const size_t kMaxVideoFrames = 4;
+ const size_t kPicsInPipeline = kMaxVideoFrames + 2;
+ return kVP8NumFramesActive + kPicsInPipeline;
+}
+
+} // namespace media
diff --git a/accel/vp8_decoder.h b/accel/vp8_decoder.h
new file mode 100644
index 0000000..58211f6
--- /dev/null
+++ b/accel/vp8_decoder.h
@@ -0,0 +1,114 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 60f9667
+
+#ifndef VP8_DECODER_H_
+#define VP8_DECODER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "accelerated_video_decoder.h"
+#include "size.h"
+#include "vp8_parser.h"
+#include "vp8_picture.h"
+
+namespace media {
+
+// Clients of this class are expected to pass raw VP8 stream and are expected
+// to provide an implementation of VP8Accelerator for offloading final steps
+// of the decoding process.
+//
+// This class must be created, called and destroyed on a single thread, and
+// does nothing internally on any other thread.
+class VP8Decoder : public AcceleratedVideoDecoder {
+ public:
+ class VP8Accelerator {
+ public:
+ VP8Accelerator();
+ virtual ~VP8Accelerator();
+
+ // Create a new VP8Picture that the decoder client can use for decoding
+ // and pass back to this accelerator for decoding or reference.
+ // When the picture is no longer needed by decoder, it will just drop
+ // its reference to it, and it may do so at any time.
+ // Note that this may return nullptr if accelerator is not able to provide
+ // any new pictures at given time. The decoder is expected to handle
+ // this situation as normal and return from Decode() with kRanOutOfSurfaces.
+ virtual scoped_refptr<VP8Picture> CreateVP8Picture() = 0;
+
+ // Submit decode for |pic|, taking as arguments |frame_hdr| with parsed
+ // VP8 frame header information for current frame, and using |last_frame|,
+ // |golden_frame| and |alt_frame| as references, as per VP8 specification.
+ // Note that this runs the decode in hardware.
+ // Return true if successful.
+ virtual bool SubmitDecode(const scoped_refptr<VP8Picture>& pic,
+ const Vp8FrameHeader* frame_hdr,
+ const scoped_refptr<VP8Picture>& last_frame,
+ const scoped_refptr<VP8Picture>& golden_frame,
+ const scoped_refptr<VP8Picture>& alt_frame) = 0;
+
+ // Schedule output (display) of |pic|. Note that returning from this
+ // method does not mean that |pic| has already been outputted (displayed),
+ // but guarantees that all pictures will be outputted in the same order
+ // as this method was called for them. Decoder may drop its reference
+ // to |pic| after calling this method.
+ // Return true if successful.
+ virtual bool OutputPicture(const scoped_refptr<VP8Picture>& pic) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(VP8Accelerator);
+ };
+
+ VP8Decoder(VP8Accelerator* accelerator);
+ ~VP8Decoder() override;
+
+ // AcceleratedVideoDecoder implementation.
+ bool Flush() override WARN_UNUSED_RESULT;
+ void Reset() override;
+ void SetStream(const uint8_t* ptr, size_t size) override;
+ DecodeResult Decode() override WARN_UNUSED_RESULT;
+ Size GetPicSize() const override;
+ size_t GetRequiredNumOfPictures() const override;
+
+ private:
+ bool DecodeAndOutputCurrentFrame();
+ void RefreshReferenceFrames();
+
+ enum State {
+ kNeedStreamMetadata, // After initialization, need a keyframe.
+ kDecoding, // Ready to decode from any point.
+ kAfterReset, // After Reset(), need a resume point.
+ kError, // Error in decode, can't continue.
+ };
+
+ State state_;
+
+ Vp8Parser parser_;
+
+ std::unique_ptr<Vp8FrameHeader> curr_frame_hdr_;
+ scoped_refptr<VP8Picture> curr_pic_;
+ scoped_refptr<VP8Picture> last_frame_;
+ scoped_refptr<VP8Picture> golden_frame_;
+ scoped_refptr<VP8Picture> alt_frame_;
+
+ const uint8_t* curr_frame_start_;
+ size_t frame_size_;
+
+ Size pic_size_;
+ int horizontal_scale_;
+ int vertical_scale_;
+
+ VP8Accelerator* accelerator_;
+
+ DISALLOW_COPY_AND_ASSIGN(VP8Decoder);
+};
+
+} // namespace media
+
+#endif // VP8_DECODER_H_
diff --git a/accel/vp8_parser.cc b/accel/vp8_parser.cc
new file mode 100644
index 0000000..5367545
--- /dev/null
+++ b/accel/vp8_parser.cc
@@ -0,0 +1,877 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains an implementation of a VP8 raw stream parser,
+// as defined in RFC 6386.
+// Note: ported from Chromium commit head: 2de6929
+
+
+#include "base/logging.h"
+#include "vp8_parser.h"
+
+namespace media {
+
+#define ERROR_RETURN(what) \
+ do { \
+ DVLOG(1) << "Error while trying to read " #what; \
+ return false; \
+ } while (0)
+
+#define BD_READ_BOOL_OR_RETURN(out) \
+ do { \
+ if (!bd_.ReadBool(out)) \
+ ERROR_RETURN(out); \
+ } while (0)
+
+#define BD_READ_BOOL_WITH_PROB_OR_RETURN(out, prob) \
+ do { \
+ if (!bd_.ReadBool(out, prob)) \
+ ERROR_RETURN(out); \
+ } while (0)
+
+#define BD_READ_UNSIGNED_OR_RETURN(num_bits, out) \
+ do { \
+ int _out; \
+ if (!bd_.ReadLiteral(num_bits, &_out)) \
+ ERROR_RETURN(out); \
+ *out = _out; \
+ } while (0)
+
+#define BD_READ_SIGNED_OR_RETURN(num_bits, out) \
+ do { \
+ int _out; \
+ if (!bd_.ReadLiteralWithSign(num_bits, &_out)) \
+ ERROR_RETURN(out); \
+ *out = _out; \
+ } while (0)
+
+Vp8FrameHeader::Vp8FrameHeader() {
+ memset(this, 0, sizeof(*this));
+}
+
+Vp8Parser::Vp8Parser() : stream_(nullptr), bytes_left_(0) {
+}
+
+Vp8Parser::~Vp8Parser() = default;
+
+bool Vp8Parser::ParseFrame(const uint8_t* ptr,
+ size_t frame_size,
+ Vp8FrameHeader* fhdr) {
+ stream_ = ptr;
+ bytes_left_ = frame_size;
+
+ memset(fhdr, 0, sizeof(*fhdr));
+ fhdr->data = stream_;
+ fhdr->frame_size = bytes_left_;
+
+ if (!ParseFrameTag(fhdr))
+ return false;
+
+ fhdr->first_part_offset = stream_ - fhdr->data;
+
+ if (!ParseFrameHeader(fhdr))
+ return false;
+
+ if (!ParsePartitions(fhdr))
+ return false;
+
+ DVLOG(4) << "Frame parsed, start: " << static_cast<const void*>(ptr)
+ << ", size: " << frame_size
+ << ", offsets: to first_part=" << fhdr->first_part_offset
+ << ", to macroblock data (in bits)=" << fhdr->macroblock_bit_offset;
+
+ return true;
+}
+
+static inline uint32_t GetBitsAt(uint32_t data, size_t shift, size_t num_bits) {
+ return ((data >> shift) & ((1 << num_bits) - 1));
+}
+
+bool Vp8Parser::ParseFrameTag(Vp8FrameHeader* fhdr) {
+ const size_t kFrameTagSize = 3;
+ if (bytes_left_ < kFrameTagSize)
+ return false;
+
+ uint32_t frame_tag = (stream_[2] << 16) | (stream_[1] << 8) | stream_[0];
+ fhdr->key_frame =
+ static_cast<Vp8FrameHeader::FrameType>(GetBitsAt(frame_tag, 0, 1));
+ fhdr->version = GetBitsAt(frame_tag, 1, 2);
+ fhdr->is_experimental = !!GetBitsAt(frame_tag, 3, 1);
+ fhdr->show_frame =!!GetBitsAt(frame_tag, 4, 1);
+ fhdr->first_part_size = GetBitsAt(frame_tag, 5, 19);
+
+ stream_ += kFrameTagSize;
+ bytes_left_ -= kFrameTagSize;
+
+ if (fhdr->IsKeyframe()) {
+ const size_t kKeyframeTagSize = 7;
+ if (bytes_left_ < kKeyframeTagSize)
+ return false;
+
+ static const uint8_t kVp8StartCode[] = {0x9d, 0x01, 0x2a};
+ if (memcmp(stream_, kVp8StartCode, sizeof(kVp8StartCode)) != 0)
+ return false;
+
+ stream_ += sizeof(kVp8StartCode);
+ bytes_left_ -= sizeof(kVp8StartCode);
+
+ uint16_t data = (stream_[1] << 8) | stream_[0];
+ fhdr->width = data & 0x3fff;
+ fhdr->horizontal_scale = data >> 14;
+
+ data = (stream_[3] << 8) | stream_[2];
+ fhdr->height = data & 0x3fff;
+ fhdr->vertical_scale = data >> 14;
+
+ stream_ += 4;
+ bytes_left_ -= 4;
+ }
+
+ return true;
+}
+
+bool Vp8Parser::ParseFrameHeader(Vp8FrameHeader* fhdr) {
+ if (!bd_.Initialize(stream_, bytes_left_))
+ return false;
+
+ bool keyframe = fhdr->IsKeyframe();
+ if (keyframe) {
+ unsigned int data;
+ BD_READ_UNSIGNED_OR_RETURN(1, &data); // color_space
+ BD_READ_UNSIGNED_OR_RETURN(1, &data); // clamping_type
+ }
+
+ if (!ParseSegmentationHeader(keyframe))
+ return false;
+
+ fhdr->segmentation_hdr = curr_segmentation_hdr_;
+
+ if (!ParseLoopFilterHeader(keyframe))
+ return false;
+
+ fhdr->loopfilter_hdr = curr_loopfilter_hdr_;
+
+ int log2_nbr_of_dct_partitions;
+ BD_READ_UNSIGNED_OR_RETURN(2, &log2_nbr_of_dct_partitions);
+ fhdr->num_of_dct_partitions = static_cast<size_t>(1)
+ << log2_nbr_of_dct_partitions;
+
+ if (!ParseQuantizationHeader(&fhdr->quantization_hdr))
+ return false;
+
+ if (keyframe) {
+ BD_READ_BOOL_OR_RETURN(&fhdr->refresh_entropy_probs);
+ } else {
+ BD_READ_BOOL_OR_RETURN(&fhdr->refresh_golden_frame);
+ BD_READ_BOOL_OR_RETURN(&fhdr->refresh_alternate_frame);
+
+ int refresh_mode;
+ if (!fhdr->refresh_golden_frame) {
+ BD_READ_UNSIGNED_OR_RETURN(2, &refresh_mode);
+ fhdr->copy_buffer_to_golden =
+ static_cast<Vp8FrameHeader::GoldenRefreshMode>(refresh_mode);
+ }
+
+ if (!fhdr->refresh_alternate_frame) {
+ BD_READ_UNSIGNED_OR_RETURN(2, &refresh_mode);
+ fhdr->copy_buffer_to_alternate =
+ static_cast<Vp8FrameHeader::AltRefreshMode>(refresh_mode);
+ }
+
+ BD_READ_UNSIGNED_OR_RETURN(1, &fhdr->sign_bias_golden);
+ BD_READ_UNSIGNED_OR_RETURN(1, &fhdr->sign_bias_alternate);
+ BD_READ_BOOL_OR_RETURN(&fhdr->refresh_entropy_probs);
+ BD_READ_BOOL_OR_RETURN(&fhdr->refresh_last);
+ }
+
+ if (keyframe)
+ ResetProbs();
+
+ fhdr->entropy_hdr = curr_entropy_hdr_;
+
+ if (!ParseTokenProbs(&fhdr->entropy_hdr, fhdr->refresh_entropy_probs))
+ return false;
+
+ BD_READ_BOOL_OR_RETURN(&fhdr->mb_no_skip_coeff);
+ if (fhdr->mb_no_skip_coeff)
+ BD_READ_UNSIGNED_OR_RETURN(8, &fhdr->prob_skip_false);
+
+ if (!keyframe) {
+ BD_READ_UNSIGNED_OR_RETURN(8, &fhdr->prob_intra);
+ BD_READ_UNSIGNED_OR_RETURN(8, &fhdr->prob_last);
+ BD_READ_UNSIGNED_OR_RETURN(8, &fhdr->prob_gf);
+ }
+
+ if (!ParseIntraProbs(&fhdr->entropy_hdr, fhdr->refresh_entropy_probs,
+ keyframe))
+ return false;
+
+ if (!keyframe) {
+ if (!ParseMVProbs(&fhdr->entropy_hdr, fhdr->refresh_entropy_probs))
+ return false;
+ }
+
+ fhdr->macroblock_bit_offset = bd_.BitOffset();
+ fhdr->bool_dec_range = bd_.GetRange();
+ fhdr->bool_dec_value = bd_.GetBottom();
+ fhdr->bool_dec_count = 7 - (bd_.BitOffset() + 7) % 8;
+
+ return true;
+}
+
+bool Vp8Parser::ParseSegmentationHeader(bool keyframe) {
+ Vp8SegmentationHeader* shdr = &curr_segmentation_hdr_;
+
+ if (keyframe)
+ memset(shdr, 0, sizeof(*shdr));
+
+ BD_READ_BOOL_OR_RETURN(&shdr->segmentation_enabled);
+ if (!shdr->segmentation_enabled)
+ return true;
+
+ BD_READ_BOOL_OR_RETURN(&shdr->update_mb_segmentation_map);
+ BD_READ_BOOL_OR_RETURN(&shdr->update_segment_feature_data);
+ if (shdr->update_segment_feature_data) {
+ int mode;
+ BD_READ_UNSIGNED_OR_RETURN(1, &mode);
+ shdr->segment_feature_mode =
+ static_cast<Vp8SegmentationHeader::SegmentFeatureMode>(mode);
+
+ for (size_t i = 0; i < kMaxMBSegments; ++i) {
+ bool quantizer_update;
+ BD_READ_BOOL_OR_RETURN(&quantizer_update);
+ if (quantizer_update)
+ BD_READ_SIGNED_OR_RETURN(7, &shdr->quantizer_update_value[i]);
+ else
+ shdr->quantizer_update_value[i] = 0;
+ }
+
+ for (size_t i = 0; i < kMaxMBSegments; ++i) {
+ bool loop_filter_update;
+ BD_READ_BOOL_OR_RETURN(&loop_filter_update);
+ if (loop_filter_update)
+ BD_READ_SIGNED_OR_RETURN(6, &shdr->lf_update_value[i]);
+ else
+ shdr->lf_update_value[i] = 0;
+ }
+ }
+
+ if (shdr->update_mb_segmentation_map) {
+ for (size_t i = 0; i < kNumMBFeatureTreeProbs; ++i) {
+ bool segment_prob_update;
+ BD_READ_BOOL_OR_RETURN(&segment_prob_update);
+ if (segment_prob_update)
+ BD_READ_UNSIGNED_OR_RETURN(8, &shdr->segment_prob[i]);
+ else
+ shdr->segment_prob[i] = Vp8SegmentationHeader::kDefaultSegmentProb;
+ }
+ }
+
+ return true;
+}
+
+bool Vp8Parser::ParseLoopFilterHeader(bool keyframe) {
+ Vp8LoopFilterHeader* lfhdr = &curr_loopfilter_hdr_;
+
+ if (keyframe)
+ memset(lfhdr, 0, sizeof(*lfhdr));
+
+ int type;
+ BD_READ_UNSIGNED_OR_RETURN(1, &type);
+ lfhdr->type = static_cast<Vp8LoopFilterHeader::Type>(type);
+ BD_READ_UNSIGNED_OR_RETURN(6, &lfhdr->level);
+ BD_READ_UNSIGNED_OR_RETURN(3, &lfhdr->sharpness_level);
+ BD_READ_BOOL_OR_RETURN(&lfhdr->loop_filter_adj_enable);
+
+ if (lfhdr->loop_filter_adj_enable) {
+ BD_READ_BOOL_OR_RETURN(&lfhdr->mode_ref_lf_delta_update);
+ if (lfhdr->mode_ref_lf_delta_update) {
+ for (size_t i = 0; i < kNumBlockContexts; ++i) {
+ bool ref_frame_delta_update_flag;
+ BD_READ_BOOL_OR_RETURN(&ref_frame_delta_update_flag);
+ if (ref_frame_delta_update_flag)
+ BD_READ_SIGNED_OR_RETURN(6, &lfhdr->ref_frame_delta[i]);
+ }
+
+ for (size_t i = 0; i < kNumBlockContexts; ++i) {
+ bool mb_mode_delta_update_flag;
+ BD_READ_BOOL_OR_RETURN(&mb_mode_delta_update_flag);
+ if (mb_mode_delta_update_flag)
+ BD_READ_SIGNED_OR_RETURN(6, &lfhdr->mb_mode_delta[i]);
+ }
+ }
+ }
+
+ return true;
+}
+
+bool Vp8Parser::ParseQuantizationHeader(Vp8QuantizationHeader* qhdr) {
+ // If any of the delta values is not present, the delta should be zero.
+ memset(qhdr, 0, sizeof(*qhdr));
+
+ BD_READ_UNSIGNED_OR_RETURN(7, &qhdr->y_ac_qi);
+
+ bool delta_present;
+
+ BD_READ_BOOL_OR_RETURN(&delta_present);
+ if (delta_present)
+ BD_READ_SIGNED_OR_RETURN(4, &qhdr->y_dc_delta);
+
+ BD_READ_BOOL_OR_RETURN(&delta_present);
+ if (delta_present)
+ BD_READ_SIGNED_OR_RETURN(4, &qhdr->y2_dc_delta);
+
+ BD_READ_BOOL_OR_RETURN(&delta_present);
+ if (delta_present)
+ BD_READ_SIGNED_OR_RETURN(4, &qhdr->y2_ac_delta);
+
+ BD_READ_BOOL_OR_RETURN(&delta_present);
+ if (delta_present)
+ BD_READ_SIGNED_OR_RETURN(4, &qhdr->uv_dc_delta);
+
+ BD_READ_BOOL_OR_RETURN(&delta_present);
+ if (delta_present)
+ BD_READ_SIGNED_OR_RETURN(4, &qhdr->uv_ac_delta);
+
+ return true;
+}
+
+// See spec for details on these values.
+const uint8_t kCoeffUpdateProbs[kNumBlockTypes][kNumCoeffBands]
+ [kNumPrevCoeffContexts][kNumEntropyNodes] = {
+ {
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {176, 246, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {223, 241, 252, 255, 255, 255, 255, 255, 255, 255, 255},
+ {249, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 244, 252, 255, 255, 255, 255, 255, 255, 255, 255},
+ {234, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 246, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {239, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {251, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {251, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 254, 253, 255, 254, 255, 255, 255, 255, 255, 255},
+ {250, 255, 254, 255, 254, 255, 255, 255, 255, 255, 255},
+ {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ },
+ {
+ {
+ {217, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {225, 252, 241, 253, 255, 255, 254, 255, 255, 255, 255},
+ {234, 250, 241, 250, 253, 255, 253, 254, 255, 255, 255},
+ },
+ {
+ {255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {223, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {238, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {249, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {247, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255},
+ {250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ },
+ {
+ {
+ {186, 251, 250, 255, 255, 255, 255, 255, 255, 255, 255},
+ {234, 251, 244, 254, 255, 255, 255, 255, 255, 255, 255},
+ {251, 251, 243, 253, 254, 255, 254, 255, 255, 255, 255},
+ },
+ {
+ {255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {236, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {251, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ },
+ {
+ {
+ {248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {250, 254, 252, 254, 255, 255, 255, 255, 255, 255, 255},
+ {248, 254, 249, 253, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255},
+ {246, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255},
+ {252, 254, 251, 254, 254, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 254, 252, 255, 255, 255, 255, 255, 255, 255, 255},
+ {248, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255},
+ {253, 255, 254, 254, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {245, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {253, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 251, 253, 255, 255, 255, 255, 255, 255, 255, 255},
+ {252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {249, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255},
+ {250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ },
+};
+
+const uint8_t kKeyframeYModeProbs[kNumYModeProbs] = {145, 156, 163, 128};
+const uint8_t kKeyframeUVModeProbs[kNumUVModeProbs] = {142, 114, 183};
+
+const uint8_t kDefaultYModeProbs[kNumYModeProbs] = {112, 86, 140, 37};
+const uint8_t kDefaultUVModeProbs[kNumUVModeProbs] = {162, 101, 204};
+
+const uint8_t kDefaultCoeffProbs[kNumBlockTypes][kNumCoeffBands]
+ [kNumPrevCoeffContexts][kNumEntropyNodes] = {
+ {
+ {
+ {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128},
+ {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128},
+ {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128},
+ },
+ {
+ {253, 136, 254, 255, 228, 219, 128, 128, 128, 128, 128},
+ {189, 129, 242, 255, 227, 213, 255, 219, 128, 128, 128},
+ {106, 126, 227, 252, 214, 209, 255, 255, 128, 128, 128},
+ },
+ {
+ { 1, 98, 248, 255, 236, 226, 255, 255, 128, 128, 128},
+ {181, 133, 238, 254, 221, 234, 255, 154, 128, 128, 128},
+ { 78, 134, 202, 247, 198, 180, 255, 219, 128, 128, 128},
+ },
+ {
+ { 1, 185, 249, 255, 243, 255, 128, 128, 128, 128, 128},
+ {184, 150, 247, 255, 236, 224, 128, 128, 128, 128, 128},
+ { 77, 110, 216, 255, 236, 230, 128, 128, 128, 128, 128},
+ },
+ {
+ { 1, 101, 251, 255, 241, 255, 128, 128, 128, 128, 128},
+ {170, 139, 241, 252, 236, 209, 255, 255, 128, 128, 128},
+ { 37, 116, 196, 243, 228, 255, 255, 255, 128, 128, 128},
+ },
+ {
+ { 1, 204, 254, 255, 245, 255, 128, 128, 128, 128, 128},
+ {207, 160, 250, 255, 238, 128, 128, 128, 128, 128, 128},
+ {102, 103, 231, 255, 211, 171, 128, 128, 128, 128, 128},
+ },
+ {
+ { 1, 152, 252, 255, 240, 255, 128, 128, 128, 128, 128},
+ {177, 135, 243, 255, 234, 225, 128, 128, 128, 128, 128},
+ { 80, 129, 211, 255, 194, 224, 128, 128, 128, 128, 128},
+ },
+ {
+ { 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ {246, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ {255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128},
+ }
+ },
+ {
+ {
+ {198, 35, 237, 223, 193, 187, 162, 160, 145, 155, 62},
+ {131, 45, 198, 221, 172, 176, 220, 157, 252, 221, 1},
+ { 68, 47, 146, 208, 149, 167, 221, 162, 255, 223, 128},
+ },
+ {
+ { 1, 149, 241, 255, 221, 224, 255, 255, 128, 128, 128},
+ {184, 141, 234, 253, 222, 220, 255, 199, 128, 128, 128},
+ { 81, 99, 181, 242, 176, 190, 249, 202, 255, 255, 128},
+ },
+ {
+ { 1, 129, 232, 253, 214, 197, 242, 196, 255, 255, 128},
+ { 99, 121, 210, 250, 201, 198, 255, 202, 128, 128, 128},
+ { 23, 91, 163, 242, 170, 187, 247, 210, 255, 255, 128},
+ },
+ {
+ { 1, 200, 246, 255, 234, 255, 128, 128, 128, 128, 128},
+ {109, 178, 241, 255, 231, 245, 255, 255, 128, 128, 128},
+ { 44, 130, 201, 253, 205, 192, 255, 255, 128, 128, 128},
+ },
+ {
+ { 1, 132, 239, 251, 219, 209, 255, 165, 128, 128, 128},
+ { 94, 136, 225, 251, 218, 190, 255, 255, 128, 128, 128},
+ { 22, 100, 174, 245, 186, 161, 255, 199, 128, 128, 128},
+ },
+ {
+ { 1, 182, 249, 255, 232, 235, 128, 128, 128, 128, 128},
+ {124, 143, 241, 255, 227, 234, 128, 128, 128, 128, 128},
+ { 35, 77, 181, 251, 193, 211, 255, 205, 128, 128, 128},
+ },
+ {
+ { 1, 157, 247, 255, 236, 231, 255, 255, 128, 128, 128},
+ {121, 141, 235, 255, 225, 227, 255, 255, 128, 128, 128},
+ { 45, 99, 188, 251, 195, 217, 255, 224, 128, 128, 128},
+ },
+ {
+ { 1, 1, 251, 255, 213, 255, 128, 128, 128, 128, 128},
+ {203, 1, 248, 255, 255, 128, 128, 128, 128, 128, 128},
+ {137, 1, 177, 255, 224, 255, 128, 128, 128, 128, 128},
+ }
+ },
+ {
+ {
+ {253, 9, 248, 251, 207, 208, 255, 192, 128, 128, 128},
+ {175, 13, 224, 243, 193, 185, 249, 198, 255, 255, 128},
+ { 73, 17, 171, 221, 161, 179, 236, 167, 255, 234, 128},
+ },
+ {
+ { 1, 95, 247, 253, 212, 183, 255, 255, 128, 128, 128},
+ {239, 90, 244, 250, 211, 209, 255, 255, 128, 128, 128},
+ {155, 77, 195, 248, 188, 195, 255, 255, 128, 128, 128},
+ },
+ {
+ { 1, 24, 239, 251, 218, 219, 255, 205, 128, 128, 128},
+ {201, 51, 219, 255, 196, 186, 128, 128, 128, 128, 128},
+ { 69, 46, 190, 239, 201, 218, 255, 228, 128, 128, 128},
+ },
+ {
+ { 1, 191, 251, 255, 255, 128, 128, 128, 128, 128, 128},
+ {223, 165, 249, 255, 213, 255, 128, 128, 128, 128, 128},
+ {141, 124, 248, 255, 255, 128, 128, 128, 128, 128, 128},
+ },
+ {
+ { 1, 16, 248, 255, 255, 128, 128, 128, 128, 128, 128},
+ {190, 36, 230, 255, 236, 255, 128, 128, 128, 128, 128},
+ {149, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ },
+ {
+ { 1, 226, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ {247, 192, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ {240, 128, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ },
+ {
+ { 1, 134, 252, 255, 255, 128, 128, 128, 128, 128, 128},
+ {213, 62, 250, 255, 255, 128, 128, 128, 128, 128, 128},
+ { 55, 93, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ },
+ {
+ {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128},
+ {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128},
+ {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128},
+ }
+ },
+ {
+ {
+ {202, 24, 213, 235, 186, 191, 220, 160, 240, 175, 255},
+ {126, 38, 182, 232, 169, 184, 228, 174, 255, 187, 128},
+ { 61, 46, 138, 219, 151, 178, 240, 170, 255, 216, 128},
+ },
+ {
+ { 1, 112, 230, 250, 199, 191, 247, 159, 255, 255, 128},
+ {166, 109, 228, 252, 211, 215, 255, 174, 128, 128, 128},
+ { 39, 77, 162, 232, 172, 180, 245, 178, 255, 255, 128},
+ },
+ {
+ { 1, 52, 220, 246, 198, 199, 249, 220, 255, 255, 128},
+ {124, 74, 191, 243, 183, 193, 250, 221, 255, 255, 128},
+ { 24, 71, 130, 219, 154, 170, 243, 182, 255, 255, 128},
+ },
+ {
+ { 1, 182, 225, 249, 219, 240, 255, 224, 128, 128, 128},
+ {149, 150, 226, 252, 216, 205, 255, 171, 128, 128, 128},
+ { 28, 108, 170, 242, 183, 194, 254, 223, 255, 255, 128}
+ },
+ {
+ { 1, 81, 230, 252, 204, 203, 255, 192, 128, 128, 128},
+ {123, 102, 209, 247, 188, 196, 255, 233, 128, 128, 128},
+ { 20, 95, 153, 243, 164, 173, 255, 203, 128, 128, 128},
+ },
+ {
+ { 1, 222, 248, 255, 216, 213, 128, 128, 128, 128, 128},
+ {168, 175, 246, 252, 235, 205, 255, 255, 128, 128, 128},
+ { 47, 116, 215, 255, 211, 212, 255, 255, 128, 128, 128},
+ },
+ {
+ { 1, 121, 236, 253, 212, 214, 255, 255, 128, 128, 128},
+ {141, 84, 213, 252, 201, 202, 255, 219, 128, 128, 128},
+ { 42, 80, 160, 240, 162, 185, 255, 205, 128, 128, 128},
+ },
+ {
+ { 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ {244, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ {238, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ },
+ },
+};
+
+const uint8_t kMVUpdateProbs[kNumMVContexts][kNumMVProbs] =
+{
+ {
+ 237, 246, 253, 253, 254, 254, 254, 254, 254,
+ 254, 254, 254, 254, 254, 250, 250, 252, 254, 254,
+ },
+ {
+ 231, 243, 245, 253, 254, 254, 254, 254, 254,
+ 254, 254, 254, 254, 254, 251, 251, 254, 254, 254,
+ },
+};
+
+const uint8_t kDefaultMVProbs[kNumMVContexts][kNumMVProbs] = {
+ {
+ 162, 128, 225, 146, 172, 147, 214, 39, 156,
+ 128, 129, 132, 75, 145, 178, 206, 239, 254, 254,
+ },
+ {
+ 164, 128, 204, 170, 119, 235, 140, 230, 228,
+ 128, 130, 130, 74, 148, 180, 203, 236, 254, 254,
+ },
+};
+
+void Vp8Parser::ResetProbs() {
+ static_assert(
+ sizeof(curr_entropy_hdr_.coeff_probs) == sizeof(kDefaultCoeffProbs),
+ "coeff_probs_arrays_must_be_of_correct_size");
+ memcpy(curr_entropy_hdr_.coeff_probs, kDefaultCoeffProbs,
+ sizeof(curr_entropy_hdr_.coeff_probs));
+
+ static_assert(sizeof(curr_entropy_hdr_.mv_probs) == sizeof(kDefaultMVProbs),
+ "mv_probs_arrays_must_be_of_correct_size");
+ memcpy(curr_entropy_hdr_.mv_probs, kDefaultMVProbs,
+ sizeof(curr_entropy_hdr_.mv_probs));
+
+ static_assert(
+ sizeof(curr_entropy_hdr_.y_mode_probs) == sizeof(kDefaultYModeProbs),
+ "y_probs_arrays_must_be_of_correct_size");
+ memcpy(curr_entropy_hdr_.y_mode_probs, kDefaultYModeProbs,
+ sizeof(curr_entropy_hdr_.y_mode_probs));
+
+ static_assert(
+ sizeof(curr_entropy_hdr_.uv_mode_probs) == sizeof(kDefaultUVModeProbs),
+ "uv_probs_arrays_must_be_of_correct_size");
+ memcpy(curr_entropy_hdr_.uv_mode_probs, kDefaultUVModeProbs,
+ sizeof(curr_entropy_hdr_.uv_mode_probs));
+}
+
+bool Vp8Parser::ParseTokenProbs(Vp8EntropyHeader* ehdr,
+ bool update_curr_probs) {
+ for (size_t i = 0; i < kNumBlockTypes; ++i) {
+ for (size_t j = 0; j < kNumCoeffBands; ++j) {
+ for (size_t k = 0; k < kNumPrevCoeffContexts; ++k) {
+ for (size_t l = 0; l < kNumEntropyNodes; ++l) {
+ bool coeff_prob_update_flag;
+ BD_READ_BOOL_WITH_PROB_OR_RETURN(&coeff_prob_update_flag,
+ kCoeffUpdateProbs[i][j][k][l]);
+ if (coeff_prob_update_flag)
+ BD_READ_UNSIGNED_OR_RETURN(8, &ehdr->coeff_probs[i][j][k][l]);
+ }
+ }
+ }
+ }
+
+ if (update_curr_probs) {
+ memcpy(curr_entropy_hdr_.coeff_probs, ehdr->coeff_probs,
+ sizeof(curr_entropy_hdr_.coeff_probs));
+ }
+
+ return true;
+}
+
+bool Vp8Parser::ParseIntraProbs(Vp8EntropyHeader* ehdr,
+ bool update_curr_probs,
+ bool keyframe) {
+ if (keyframe) {
+ static_assert(
+ sizeof(ehdr->y_mode_probs) == sizeof(kKeyframeYModeProbs),
+ "y_probs_arrays_must_be_of_correct_size");
+ memcpy(ehdr->y_mode_probs, kKeyframeYModeProbs,
+ sizeof(ehdr->y_mode_probs));
+
+ static_assert(
+ sizeof(ehdr->uv_mode_probs) == sizeof(kKeyframeUVModeProbs),
+ "uv_probs_arrays_must_be_of_correct_size");
+ memcpy(ehdr->uv_mode_probs, kKeyframeUVModeProbs,
+ sizeof(ehdr->uv_mode_probs));
+ } else {
+ bool intra_16x16_prob_update_flag;
+ BD_READ_BOOL_OR_RETURN(&intra_16x16_prob_update_flag);
+ if (intra_16x16_prob_update_flag) {
+ for (size_t i = 0; i < kNumYModeProbs; ++i)
+ BD_READ_UNSIGNED_OR_RETURN(8, &ehdr->y_mode_probs[i]);
+
+ if (update_curr_probs) {
+ memcpy(curr_entropy_hdr_.y_mode_probs, ehdr->y_mode_probs,
+ sizeof(curr_entropy_hdr_.y_mode_probs));
+ }
+ }
+
+ bool intra_chroma_prob_update_flag;
+ BD_READ_BOOL_OR_RETURN(&intra_chroma_prob_update_flag);
+ if (intra_chroma_prob_update_flag) {
+ for (size_t i = 0; i < kNumUVModeProbs; ++i)
+ BD_READ_UNSIGNED_OR_RETURN(8, &ehdr->uv_mode_probs[i]);
+
+ if (update_curr_probs) {
+ memcpy(curr_entropy_hdr_.uv_mode_probs, ehdr->uv_mode_probs,
+ sizeof(curr_entropy_hdr_.uv_mode_probs));
+ }
+ }
+ }
+
+ return true;
+}
+
+bool Vp8Parser::ParseMVProbs(Vp8EntropyHeader* ehdr, bool update_curr_probs) {
+ for (size_t mv_ctx = 0; mv_ctx < kNumMVContexts; ++mv_ctx) {
+ for (size_t p = 0; p < kNumMVProbs; ++p) {
+ bool mv_prob_update_flag;
+ BD_READ_BOOL_WITH_PROB_OR_RETURN(&mv_prob_update_flag,
+ kMVUpdateProbs[mv_ctx][p]);
+ if (mv_prob_update_flag) {
+ uint8_t prob;
+ BD_READ_UNSIGNED_OR_RETURN(7, &prob);
+ ehdr->mv_probs[mv_ctx][p] = prob ? (prob << 1) : 1;
+ }
+ }
+ }
+
+ if (update_curr_probs) {
+ memcpy(curr_entropy_hdr_.mv_probs, ehdr->mv_probs,
+ sizeof(curr_entropy_hdr_.mv_probs));
+ }
+
+ return true;
+}
+
+bool Vp8Parser::ParsePartitions(Vp8FrameHeader* fhdr) {
+ CHECK_GE(fhdr->num_of_dct_partitions, 1u);
+ CHECK_LE(fhdr->num_of_dct_partitions, kMaxDCTPartitions);
+
+ // DCT partitions start after the first partition and partition size values
+ // that follow it. There are num_of_dct_partitions - 1 sizes stored in the
+ // stream after the first partition, each 3 bytes long. The size of last
+ // DCT partition is not stored in the stream, but is instead calculated by
+ // taking the remainder of the frame size after the penultimate DCT partition.
+ size_t first_dct_pos = fhdr->first_part_offset + fhdr->first_part_size +
+ (fhdr->num_of_dct_partitions - 1) * 3;
+
+ // Make sure we have enough data for the first partition and partition sizes.
+ if (fhdr->frame_size < first_dct_pos)
+ return false;
+
+ // Total size of all DCT partitions.
+ size_t bytes_left = fhdr->frame_size - first_dct_pos;
+
+ // Position ourselves at the beginning of partition size values.
+ const uint8_t* ptr =
+ fhdr->data + fhdr->first_part_offset + fhdr->first_part_size;
+
+ // Read sizes from the stream (if present).
+ for (size_t i = 0; i < fhdr->num_of_dct_partitions - 1; ++i) {
+ fhdr->dct_partition_sizes[i] = (ptr[2] << 16) | (ptr[1] << 8) | ptr[0];
+
+ // Make sure we have enough data in the stream for ith partition and
+ // subtract its size from total.
+ if (bytes_left < fhdr->dct_partition_sizes[i])
+ return false;
+
+ bytes_left -= fhdr->dct_partition_sizes[i];
+
+ // Move to the position of the next partition size value.
+ ptr += 3;
+ }
+
+ // The remainder of the data belongs to the last DCT partition.
+ fhdr->dct_partition_sizes[fhdr->num_of_dct_partitions - 1] = bytes_left;
+
+ DVLOG(4) << "Control part size: " << fhdr->first_part_size;
+ for (size_t i = 0; i < fhdr->num_of_dct_partitions; ++i)
+ DVLOG(4) << "DCT part " << i << " size: " << fhdr->dct_partition_sizes[i];
+
+ return true;
+}
+
+} // namespace media
diff --git a/accel/vp8_parser.h b/accel/vp8_parser.h
new file mode 100644
index 0000000..c75e6cc
--- /dev/null
+++ b/accel/vp8_parser.h
@@ -0,0 +1,199 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains an implementation of a VP8 raw stream parser,
+// as defined in RFC 6386.
+// Note: ported from Chromium commit head: 1323b9c
+
+#ifndef VP8_PARSER_H_
+#define VP8_PARSER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/macros.h"
+#include "vp8_bool_decoder.h"
+
+namespace media {
+
+// See spec for definitions of values/fields.
+const size_t kMaxMBSegments = 4;
+const size_t kNumMBFeatureTreeProbs = 3;
+
+// Member of Vp8FrameHeader and will be 0-initialized
+// in Vp8FrameHeader's constructor.
+struct Vp8SegmentationHeader {
+ enum SegmentFeatureMode { FEATURE_MODE_DELTA = 0, FEATURE_MODE_ABSOLUTE = 1 };
+
+ bool segmentation_enabled;
+ bool update_mb_segmentation_map;
+ bool update_segment_feature_data;
+ SegmentFeatureMode segment_feature_mode;
+
+ int8_t quantizer_update_value[kMaxMBSegments];
+ int8_t lf_update_value[kMaxMBSegments];
+ static const int kDefaultSegmentProb = 255;
+ uint8_t segment_prob[kNumMBFeatureTreeProbs];
+};
+
+const size_t kNumBlockContexts = 4;
+
+// Member of Vp8FrameHeader and will be 0-initialized
+// in Vp8FrameHeader's constructor.
+struct Vp8LoopFilterHeader {
+ enum Type { LOOP_FILTER_TYPE_NORMAL = 0, LOOP_FILTER_TYPE_SIMPLE = 1 };
+ Type type;
+ uint8_t level;
+ uint8_t sharpness_level;
+ bool loop_filter_adj_enable;
+ bool mode_ref_lf_delta_update;
+
+ int8_t ref_frame_delta[kNumBlockContexts];
+ int8_t mb_mode_delta[kNumBlockContexts];
+};
+
+// Member of Vp8FrameHeader and will be 0-initialized
+// in Vp8FrameHeader's constructor.
+struct Vp8QuantizationHeader {
+ uint8_t y_ac_qi;
+ int8_t y_dc_delta;
+ int8_t y2_dc_delta;
+ int8_t y2_ac_delta;
+ int8_t uv_dc_delta;
+ int8_t uv_ac_delta;
+};
+
+const size_t kNumBlockTypes = 4;
+const size_t kNumCoeffBands = 8;
+const size_t kNumPrevCoeffContexts = 3;
+const size_t kNumEntropyNodes = 11;
+
+const size_t kNumMVContexts = 2;
+const size_t kNumMVProbs = 19;
+
+const size_t kNumYModeProbs = 4;
+const size_t kNumUVModeProbs = 3;
+
+// Member of Vp8FrameHeader and will be 0-initialized
+// in Vp8FrameHeader's constructor.
+struct Vp8EntropyHeader {
+ uint8_t coeff_probs[kNumBlockTypes][kNumCoeffBands][kNumPrevCoeffContexts]
+ [kNumEntropyNodes];
+
+ uint8_t y_mode_probs[kNumYModeProbs];
+ uint8_t uv_mode_probs[kNumUVModeProbs];
+
+ uint8_t mv_probs[kNumMVContexts][kNumMVProbs];
+};
+
+const size_t kMaxDCTPartitions = 8;
+
+struct Vp8FrameHeader {
+ Vp8FrameHeader();
+
+ enum FrameType { KEYFRAME = 0, INTERFRAME = 1 };
+ bool IsKeyframe() const { return key_frame == KEYFRAME; }
+
+ enum GoldenRefreshMode {
+ COPY_LAST_TO_GOLDEN = 1,
+ COPY_ALT_TO_GOLDEN = 2,
+ };
+
+ enum AltRefreshMode {
+ COPY_LAST_TO_ALT = 1,
+ COPY_GOLDEN_TO_ALT = 2,
+ };
+
+ FrameType key_frame;
+ uint8_t version;
+ bool is_experimental;
+ bool show_frame;
+ size_t first_part_size;
+
+ uint16_t width;
+ uint8_t horizontal_scale;
+ uint16_t height;
+ uint8_t vertical_scale;
+
+ Vp8SegmentationHeader segmentation_hdr;
+ Vp8LoopFilterHeader loopfilter_hdr;
+ Vp8QuantizationHeader quantization_hdr;
+
+ size_t num_of_dct_partitions;
+
+ Vp8EntropyHeader entropy_hdr;
+
+ bool refresh_entropy_probs;
+ bool refresh_golden_frame;
+ bool refresh_alternate_frame;
+ GoldenRefreshMode copy_buffer_to_golden;
+ AltRefreshMode copy_buffer_to_alternate;
+ uint8_t sign_bias_golden;
+ uint8_t sign_bias_alternate;
+ bool refresh_last;
+
+ bool mb_no_skip_coeff;
+ uint8_t prob_skip_false;
+ uint8_t prob_intra;
+ uint8_t prob_last;
+ uint8_t prob_gf;
+
+ const uint8_t* data;
+ size_t frame_size;
+
+ size_t dct_partition_sizes[kMaxDCTPartitions];
+ // Offset in bytes from data.
+ off_t first_part_offset;
+ // Offset in bits from first_part_offset.
+ off_t macroblock_bit_offset;
+
+ // Bool decoder state
+ uint8_t bool_dec_range;
+ uint8_t bool_dec_value;
+ uint8_t bool_dec_count;
+};
+
+// A parser for raw VP8 streams as specified in RFC 6386.
+class Vp8Parser {
+ public:
+ Vp8Parser();
+ ~Vp8Parser();
+
+ // Try to parse exactly one VP8 frame starting at |ptr| and of size |size|,
+ // filling the parsed data in |fhdr|. Return true on success.
+ // Size has to be exactly the size of the frame and coming from the caller,
+ // who needs to acquire it from elsewhere (normally from a container).
+ bool ParseFrame(const uint8_t* ptr, size_t size, Vp8FrameHeader* fhdr);
+
+ private:
+ bool ParseFrameTag(Vp8FrameHeader* fhdr);
+ bool ParseFrameHeader(Vp8FrameHeader* fhdr);
+
+ bool ParseSegmentationHeader(bool keyframe);
+ bool ParseLoopFilterHeader(bool keyframe);
+ bool ParseQuantizationHeader(Vp8QuantizationHeader* qhdr);
+ bool ParseTokenProbs(Vp8EntropyHeader* ehdr, bool update_curr_probs);
+ bool ParseIntraProbs(Vp8EntropyHeader* ehdr,
+ bool update_curr_probs,
+ bool keyframe);
+ bool ParseMVProbs(Vp8EntropyHeader* ehdr, bool update_curr_probs);
+ bool ParsePartitions(Vp8FrameHeader* fhdr);
+ void ResetProbs();
+
+ // These persist across calls to ParseFrame() and may be used and/or updated
+ // for subsequent frames if the stream instructs us to do so.
+ Vp8SegmentationHeader curr_segmentation_hdr_;
+ Vp8LoopFilterHeader curr_loopfilter_hdr_;
+ Vp8EntropyHeader curr_entropy_hdr_;
+
+ const uint8_t* stream_;
+ size_t bytes_left_;
+ Vp8BoolDecoder bd_;
+
+ DISALLOW_COPY_AND_ASSIGN(Vp8Parser);
+};
+
+} // namespace media
+
+#endif // VP8_PARSER_H_
diff --git a/accel/vp8_picture.cc b/accel/vp8_picture.cc
new file mode 100644
index 0000000..b9030ce
--- /dev/null
+++ b/accel/vp8_picture.cc
@@ -0,0 +1,18 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 6e70beb
+
+#include "vp8_picture.h"
+
+namespace media {
+
+VP8Picture::VP8Picture() {}
+
+VP8Picture::~VP8Picture() {}
+
+V4L2VP8Picture* VP8Picture::AsV4L2VP8Picture() {
+ return nullptr;
+}
+
+} // namespace media
diff --git a/accel/vp8_picture.h b/accel/vp8_picture.h
new file mode 100644
index 0000000..bd04ec7
--- /dev/null
+++ b/accel/vp8_picture.h
@@ -0,0 +1,35 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 70340ce
+
+#ifndef VP8_PICTURE_H_
+#define VP8_PICTURE_H_
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "rect.h"
+
+namespace media {
+
+class V4L2VP8Picture;
+
+class VP8Picture : public base::RefCountedThreadSafe<VP8Picture> {
+ public:
+ VP8Picture();
+
+ virtual V4L2VP8Picture* AsV4L2VP8Picture();
+
+ // The visible size of picture.
+ Rect visible_rect;
+
+ protected:
+ friend class base::RefCountedThreadSafe<VP8Picture>;
+ virtual ~VP8Picture();
+
+ DISALLOW_COPY_AND_ASSIGN(VP8Picture);
+};
+
+} // namespace media
+
+#endif // VP8_PICTURE_H_
diff --git a/accel/vp9_bool_decoder.cc b/accel/vp9_bool_decoder.cc
new file mode 100644
index 0000000..1d2b6f4
--- /dev/null
+++ b/accel/vp9_bool_decoder.cc
@@ -0,0 +1,165 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 1323b9c
+
+#include "vp9_bool_decoder.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "bit_reader.h"
+
+namespace media {
+
+namespace {
+
+// This is an optimization lookup table for the loop in spec 9.2.2.
+// while BoolRange <= 128:
+// read 1 bit
+// BoolRange *= 2
+// This table indicates how many iterations to run for a given BoolRange. So
+// the loop could be reduced to
+// read (kCountToShiftTo128[BoolRange]) bits
+const int kCountToShiftTo128[256] = {
+ 0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+} // namespace
+
+Vp9BoolDecoder::Vp9BoolDecoder() = default;
+
+Vp9BoolDecoder::~Vp9BoolDecoder() = default;
+
+// 9.2.1 Initialization process for Boolean decoder
+bool Vp9BoolDecoder::Initialize(const uint8_t* data, size_t size) {
+ DCHECK(data);
+ if (size < 1) {
+ DVLOG(1) << "input size of bool decoder shall be at least 1";
+ valid_ = false;
+ return false;
+ }
+
+ reader_.reset(new BitReader(data, size));
+ valid_ = true;
+
+ bool_value_ = 0;
+ count_to_fill_ = 8;
+ bool_range_ = 255;
+ if (ReadLiteral(1) != 0) {
+ DVLOG(1) << "marker bit should be 0";
+ valid_ = false;
+ return false;
+ }
+ return true;
+}
+
+// Fill at least |count_to_fill_| bits and prefill remain bits of |bool_value_|
+// if data is enough.
+bool Vp9BoolDecoder::Fill() {
+ DCHECK_GE(count_to_fill_, 0);
+
+ int bits_left = reader_->bits_available();
+ if (bits_left < count_to_fill_) {
+ valid_ = false;
+ DVLOG(1) << "Vp9BoolDecoder reads beyond the end of stream";
+ return false;
+ }
+
+ DCHECK_LE(count_to_fill_, kBoolSize);
+ int max_bits_to_read = kBigBoolBitSize - kBoolSize + count_to_fill_;
+ int bits_to_read = std::min(max_bits_to_read, bits_left);
+
+ BigBool data;
+ reader_->ReadBits(bits_to_read, &data);
+ bool_value_ |= data << (max_bits_to_read - bits_to_read);
+ count_to_fill_ -= bits_to_read;
+
+ return true;
+}
+
+// 9.2.2 Boolean decoding process
+bool Vp9BoolDecoder::ReadBool(int prob) {
+ DCHECK(reader_);
+
+ if (count_to_fill_ > 0) {
+ if (!Fill())
+ return false;
+ }
+
+ unsigned int split = (bool_range_ * prob + (256 - prob)) >> kBoolSize;
+ BigBool big_split = static_cast<BigBool>(split)
+ << (kBigBoolBitSize - kBoolSize);
+
+ bool bit;
+ if (bool_value_ < big_split) {
+ bool_range_ = split;
+ bit = false;
+ } else {
+ bool_range_ -= split;
+ bool_value_ -= big_split;
+ bit = true;
+ }
+
+ // Need to fill |count| bits next time in order to make |bool_range_| >=
+ // 128.
+ DCHECK_LT(bool_range_, arraysize(kCountToShiftTo128));
+ DCHECK_GT(bool_range_, 0u);
+ int count = kCountToShiftTo128[bool_range_];
+ bool_range_ <<= count;
+ bool_value_ <<= count;
+ count_to_fill_ += count;
+
+ return bit;
+}
+
+// 9.2.4 Parsing process for read_literal
+uint8_t Vp9BoolDecoder::ReadLiteral(int bits) {
+ DCHECK_LT(static_cast<size_t>(bits), sizeof(uint8_t) * 8);
+ DCHECK(reader_);
+
+ uint8_t x = 0;
+ for (int i = 0; i < bits; i++)
+ x = 2 * x + ReadBool(128);
+
+ return x;
+}
+
+bool Vp9BoolDecoder::ConsumePaddingBits() {
+ DCHECK(reader_);
+
+ if (count_to_fill_ > reader_->bits_available()) {
+ // 9.2.2 Boolean decoding process
+ // Although we actually don't used the value, spec says the bitstream
+ // should have enough bits to fill bool range, this should never happen.
+ DVLOG(2) << "not enough bits in bitstream to fill bool range";
+ return false;
+ }
+
+ if (bool_value_ != 0) {
+ DVLOG(1) << "prefilled padding bits are not zero";
+ return false;
+ }
+ while (reader_->bits_available() > 0) {
+ int data;
+ int size_to_read =
+ std::min(reader_->bits_available(), static_cast<int>(sizeof(data) * 8));
+ reader_->ReadBits(size_to_read, &data);
+ if (data != 0) {
+ DVLOG(1) << "padding bits are not zero";
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace media
diff --git a/accel/vp9_bool_decoder.h b/accel/vp9_bool_decoder.h
new file mode 100644
index 0000000..50c386f
--- /dev/null
+++ b/accel/vp9_bool_decoder.h
@@ -0,0 +1,73 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: e5a9a62
+
+#ifndef VP9_BOOL_DECODER_H_
+#define VP9_BOOL_DECODER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/macros.h"
+
+namespace media {
+
+class BitReader;
+
+class Vp9BoolDecoder {
+ public:
+ Vp9BoolDecoder();
+ ~Vp9BoolDecoder();
+
+ // |data| is the input buffer with |size| bytes.
+ // Returns true if read first marker bit successfully.
+ bool Initialize(const uint8_t* data, size_t size);
+
+ // Returns true if none of the reads since the last Initialize() call has
+ // gone beyond the end of available data.
+ bool IsValid() const { return valid_; }
+
+ // Reads one bit. B(p).
+ // If the read goes beyond the end of buffer, the return value is undefined.
+ bool ReadBool(int prob);
+
+ // Reads a literal. L(n).
+ // If the read goes beyond the end of buffer, the return value is undefined.
+ uint8_t ReadLiteral(int bits);
+
+ // Consumes padding bits up to end of data. Returns true if no
+ // padding bits or they are all zero.
+ bool ConsumePaddingBits();
+
+ private:
+ // The highest 8 bits of BigBool is actual "bool value". The remain bits
+ // are optimization of prefill buffer.
+ using BigBool = size_t;
+ // The size of "bool value" used for boolean decoding defined in spec.
+ const int kBoolSize = 8;
+ const int kBigBoolBitSize = sizeof(BigBool) * 8;
+
+ bool Fill();
+
+ std::unique_ptr<BitReader> reader_;
+
+ // Indicates if none of the reads since the last Initialize() call has gone
+ // beyond the end of available data.
+ bool valid_ = true;
+
+ BigBool bool_value_ = 0;
+
+ // Need to fill at least |count_to_fill_| bits. Negative value means extra
+ // bits pre-filled.
+ int count_to_fill_ = 0;
+ unsigned int bool_range_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(Vp9BoolDecoder);
+};
+
+} // namespace media
+
+#endif // VP9_BOOL_DECODER_H_
diff --git a/accel/vp9_compressed_header_parser.cc b/accel/vp9_compressed_header_parser.cc
new file mode 100644
index 0000000..524472f
--- /dev/null
+++ b/accel/vp9_compressed_header_parser.cc
@@ -0,0 +1,294 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
+
+#include "vp9_compressed_header_parser.h"
+
+#include "base/logging.h"
+
+namespace media {
+
+namespace {
+
+// 6.3.6 Inv recenter noneg syntax, inv_recenter_nonneg().
+int InvRecenterNonneg(int v, int m) {
+ DCHECK_LE(m, kVp9MaxProb / 2);
+ if (v > 2 * m)
+ return v;
+
+ if (v & 1)
+ return m - ((v + 1) >> 1);
+ return m + (v >> 1);
+}
+
+// 6.3.5 Inv remap prob syntax, inv_remap_prob().
+Vp9Prob InvRemapProb(uint8_t delta_prob, uint8_t prob) {
+ static uint8_t inv_map_table[kVp9MaxProb] = {
+ 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
+ 189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
+ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
+ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
+ 131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
+ 146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
+ 161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
+ 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
+ 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
+ 222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
+ 237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
+ 252, 253, 253};
+ uint8_t m = prob;
+ uint8_t v = delta_prob;
+ DCHECK_GE(m, 1);
+ DCHECK_LE(m, kVp9MaxProb);
+ DCHECK_LT(v, arraysize(inv_map_table));
+ v = inv_map_table[v];
+ m--;
+ if ((m << 1) <= kVp9MaxProb) {
+ return 1 + InvRecenterNonneg(v, m);
+ } else {
+ return kVp9MaxProb - InvRecenterNonneg(v, kVp9MaxProb - 1 - m);
+ }
+}
+
+} // namespace
+
+Vp9CompressedHeaderParser::Vp9CompressedHeaderParser() = default;
+
+// 6.3.1 Tx mode syntax
+void Vp9CompressedHeaderParser::ReadTxMode(Vp9FrameHeader* fhdr) {
+ int tx_mode;
+ if (fhdr->quant_params.IsLossless()) {
+ tx_mode = Vp9CompressedHeader::ONLY_4X4;
+ } else {
+ tx_mode = reader_.ReadLiteral(2);
+ if (tx_mode == Vp9CompressedHeader::ALLOW_32X32)
+ tx_mode += reader_.ReadLiteral(1);
+ }
+ fhdr->compressed_header.tx_mode =
+ static_cast<Vp9CompressedHeader::Vp9TxMode>(tx_mode);
+}
+
+// 6.3.4 Decode term subexp syntax
+uint8_t Vp9CompressedHeaderParser::DecodeTermSubexp() {
+ if (reader_.ReadLiteral(1) == 0)
+ return reader_.ReadLiteral(4);
+ if (reader_.ReadLiteral(1) == 0)
+ return reader_.ReadLiteral(4) + 16;
+ if (reader_.ReadLiteral(1) == 0)
+ return reader_.ReadLiteral(5) + 32;
+ uint8_t v = reader_.ReadLiteral(7);
+ if (v < 65)
+ return v + 64;
+ return (v << 1) - 1 + reader_.ReadLiteral(1);
+}
+
+// 6.3.3 Diff update prob syntax
+void Vp9CompressedHeaderParser::DiffUpdateProb(Vp9Prob* prob) {
+ const Vp9Prob kUpdateProb = 252;
+ if (reader_.ReadBool(kUpdateProb)) {
+ uint8_t delta_prob = DecodeTermSubexp();
+ *prob = InvRemapProb(delta_prob, *prob);
+ }
+}
+
+// Helper function to DiffUpdateProb an array of probs.
+template <int N>
+void Vp9CompressedHeaderParser::DiffUpdateProbArray(Vp9Prob (&prob_array)[N]) {
+ for (auto& x : prob_array) {
+ DiffUpdateProb(&x);
+ }
+}
+
+// 6.3.2 Tx mode probs syntax
+void Vp9CompressedHeaderParser::ReadTxModeProbs(
+ Vp9FrameContext* frame_context) {
+ for (auto& a : frame_context->tx_probs_8x8) {
+ DiffUpdateProbArray(a);
+ }
+ for (auto& a : frame_context->tx_probs_16x16) {
+ DiffUpdateProbArray(a);
+ }
+ for (auto& a : frame_context->tx_probs_32x32) {
+ DiffUpdateProbArray(a);
+ }
+}
+
+// 6.3.7 Coef probs syntax
+void Vp9CompressedHeaderParser::ReadCoefProbs(Vp9FrameHeader* fhdr) {
+ const int tx_mode_to_biggest_tx_size[Vp9CompressedHeader::TX_MODES] = {
+ 0, 1, 2, 3, 3,
+ };
+ const int max_tx_size =
+ tx_mode_to_biggest_tx_size[fhdr->compressed_header.tx_mode];
+ for (int tx_size = 0; tx_size <= max_tx_size; tx_size++) {
+ if (reader_.ReadLiteral(1) == 0)
+ continue;
+
+ for (auto& ai : fhdr->frame_context.coef_probs[tx_size]) {
+ for (auto& aj : ai) {
+ for (auto& ak : aj) {
+ int max_l = (ak == aj[0]) ? 3 : 6;
+ for (int l = 0; l < max_l; l++) {
+ DiffUpdateProbArray(ak[l]);
+ }
+ }
+ }
+ }
+ }
+}
+
+// 6.3.8 Skip probs syntax
+void Vp9CompressedHeaderParser::ReadSkipProb(Vp9FrameContext* frame_context) {
+ DiffUpdateProbArray(frame_context->skip_prob);
+}
+
+// 6.3.9 Inter mode probs syntax
+void Vp9CompressedHeaderParser::ReadInterModeProbs(
+ Vp9FrameContext* frame_context) {
+ for (auto& a : frame_context->inter_mode_probs)
+ DiffUpdateProbArray(a);
+}
+
+// 6.3.10 Interp filter probs syntax
+void Vp9CompressedHeaderParser::ReadInterpFilterProbs(
+ Vp9FrameContext* frame_context) {
+ for (auto& a : frame_context->interp_filter_probs)
+ DiffUpdateProbArray(a);
+}
+
+// 6.3.11 Intra inter probs syntax
+void Vp9CompressedHeaderParser::ReadIsInterProbs(
+ Vp9FrameContext* frame_context) {
+ DiffUpdateProbArray(frame_context->is_inter_prob);
+}
+
+// 6.3.12 Frame reference mode syntax
+void Vp9CompressedHeaderParser::ReadFrameReferenceMode(Vp9FrameHeader* fhdr) {
+ bool compound_reference_allowed = false;
+ for (int i = VP9_FRAME_LAST + 1; i < VP9_FRAME_MAX; i++)
+ if (fhdr->ref_frame_sign_bias[i] != fhdr->ref_frame_sign_bias[1])
+ compound_reference_allowed = true;
+
+ if (compound_reference_allowed && reader_.ReadLiteral(1)) {
+ fhdr->compressed_header.reference_mode =
+ reader_.ReadLiteral(1) ? REFERENCE_MODE_SELECT : COMPOUND_REFERENCE;
+ } else {
+ fhdr->compressed_header.reference_mode = SINGLE_REFERENCE;
+ }
+}
+
+// 6.3.13 Frame reference mode probs syntax
+void Vp9CompressedHeaderParser::ReadFrameReferenceModeProbs(
+ Vp9FrameHeader* fhdr) {
+ Vp9FrameContext* frame_context = &fhdr->frame_context;
+ if (fhdr->compressed_header.reference_mode == REFERENCE_MODE_SELECT)
+ DiffUpdateProbArray(frame_context->comp_mode_prob);
+
+ if (fhdr->compressed_header.reference_mode != COMPOUND_REFERENCE)
+ for (auto& a : frame_context->single_ref_prob)
+ DiffUpdateProbArray(a);
+
+ if (fhdr->compressed_header.reference_mode != SINGLE_REFERENCE)
+ DiffUpdateProbArray(frame_context->comp_ref_prob);
+}
+
+// 6.3.14 Y mode probs syntax
+void Vp9CompressedHeaderParser::ReadYModeProbs(Vp9FrameContext* frame_context) {
+ for (auto& a : frame_context->y_mode_probs)
+ DiffUpdateProbArray(a);
+}
+
+// 6.3.15 Partition probs syntax
+void Vp9CompressedHeaderParser::ReadPartitionProbs(
+ Vp9FrameContext* frame_context) {
+ for (auto& a : frame_context->partition_probs)
+ DiffUpdateProbArray(a);
+}
+
+// 6.3.16 MV probs syntax
+void Vp9CompressedHeaderParser::ReadMvProbs(bool allow_high_precision_mv,
+ Vp9FrameContext* frame_context) {
+ UpdateMvProbArray(frame_context->mv_joint_probs);
+
+ for (int i = 0; i < 2; i++) {
+ UpdateMvProb(&frame_context->mv_sign_prob[i]);
+ UpdateMvProbArray(frame_context->mv_class_probs[i]);
+ UpdateMvProb(&frame_context->mv_class0_bit_prob[i]);
+ UpdateMvProbArray(frame_context->mv_bits_prob[i]);
+ }
+
+ for (int i = 0; i < 2; i++) {
+ for (auto& a : frame_context->mv_class0_fr_probs[i])
+ UpdateMvProbArray(a);
+ UpdateMvProbArray(frame_context->mv_fr_probs[i]);
+ }
+
+ if (allow_high_precision_mv) {
+ for (int i = 0; i < 2; i++) {
+ UpdateMvProb(&frame_context->mv_class0_hp_prob[i]);
+ UpdateMvProb(&frame_context->mv_hp_prob[i]);
+ }
+ }
+}
+
+// 6.3.17 Update mv prob syntax
+void Vp9CompressedHeaderParser::UpdateMvProb(Vp9Prob* prob) {
+ if (reader_.ReadBool(252))
+ *prob = reader_.ReadLiteral(7) << 1 | 1;
+}
+
+// Helper function to UpdateMvProb an array of probs.
+template <int N>
+void Vp9CompressedHeaderParser::UpdateMvProbArray(Vp9Prob (&prob_array)[N]) {
+ for (auto& x : prob_array) {
+ UpdateMvProb(&x);
+ }
+}
+
+// 6.3 Compressed header syntax
+bool Vp9CompressedHeaderParser::Parse(const uint8_t* stream,
+ off_t frame_size,
+ Vp9FrameHeader* fhdr) {
+ DVLOG(2) << "Vp9CompressedHeaderParser::Parse";
+ if (!reader_.Initialize(stream, frame_size))
+ return false;
+
+ ReadTxMode(fhdr);
+ if (fhdr->compressed_header.tx_mode == Vp9CompressedHeader::TX_MODE_SELECT)
+ ReadTxModeProbs(&fhdr->frame_context);
+
+ ReadCoefProbs(fhdr);
+ ReadSkipProb(&fhdr->frame_context);
+
+ if (!fhdr->IsIntra()) {
+ ReadInterModeProbs(&fhdr->frame_context);
+ if (fhdr->interpolation_filter == SWITCHABLE)
+ ReadInterpFilterProbs(&fhdr->frame_context);
+ ReadIsInterProbs(&fhdr->frame_context);
+ ReadFrameReferenceMode(fhdr);
+ ReadFrameReferenceModeProbs(fhdr);
+ ReadYModeProbs(&fhdr->frame_context);
+ ReadPartitionProbs(&fhdr->frame_context);
+ ReadMvProbs(fhdr->allow_high_precision_mv, &fhdr->frame_context);
+ }
+
+ if (!reader_.IsValid()) {
+ DVLOG(1) << "parser reads beyond the end of buffer";
+ return false;
+ }
+ if (!reader_.ConsumePaddingBits()) {
+ DVLOG(1) << "padding bits are not zero";
+ return false;
+ }
+ return true;
+}
+
+} // namespace media
diff --git a/accel/vp9_compressed_header_parser.h b/accel/vp9_compressed_header_parser.h
new file mode 100644
index 0000000..5f5ff56
--- /dev/null
+++ b/accel/vp9_compressed_header_parser.h
@@ -0,0 +1,52 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: e5a9a62
+
+#ifndef VP9_COMPRESSED_HEADER_PARSER_H_
+#define VP9_COMPRESSED_HEADER_PARSER_H_
+
+#include "vp9_bool_decoder.h"
+#include "vp9_parser.h"
+
+namespace media {
+
+class Vp9CompressedHeaderParser {
+ public:
+ Vp9CompressedHeaderParser();
+
+ // Parses VP9 compressed header in |stream| with |frame_size| into |fhdr|.
+ // Returns true if no error.
+ bool Parse(const uint8_t* stream, off_t frame_size, Vp9FrameHeader* fhdr);
+
+ private:
+ void ReadTxMode(Vp9FrameHeader* fhdr);
+ uint8_t DecodeTermSubexp();
+ void DiffUpdateProb(Vp9Prob* prob);
+ template <int N>
+ void DiffUpdateProbArray(Vp9Prob (&prob_array)[N]);
+ void ReadTxModeProbs(Vp9FrameContext* frame_context);
+ void ReadCoefProbs(Vp9FrameHeader* fhdr);
+ void ReadSkipProb(Vp9FrameContext* frame_context);
+ void ReadInterModeProbs(Vp9FrameContext* frame_context);
+ void ReadInterpFilterProbs(Vp9FrameContext* frame_context);
+ void ReadIsInterProbs(Vp9FrameContext* frame_context);
+ void ReadFrameReferenceMode(Vp9FrameHeader* fhdr);
+ void ReadFrameReferenceModeProbs(Vp9FrameHeader* fhdr);
+ void ReadYModeProbs(Vp9FrameContext* frame_context);
+ void ReadPartitionProbs(Vp9FrameContext* frame_context);
+ void ReadMvProbs(bool allow_high_precision_mv,
+ Vp9FrameContext* frame_context);
+ void UpdateMvProb(Vp9Prob* prob);
+ template <int N>
+ void UpdateMvProbArray(Vp9Prob (&prob_array)[N]);
+
+ // Bool decoder for compressed frame header.
+ Vp9BoolDecoder reader_;
+
+ DISALLOW_COPY_AND_ASSIGN(Vp9CompressedHeaderParser);
+};
+
+} // namespace media
+
+#endif // VP9_COMPRESSED_HEADER_PARSER_H_
diff --git a/accel/vp9_decoder.cc b/accel/vp9_decoder.cc
new file mode 100644
index 0000000..d8af03d
--- /dev/null
+++ b/accel/vp9_decoder.cc
@@ -0,0 +1,227 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 7441087
+
+#include "rect.h"
+#include "vp9_decoder.h"
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/logging.h"
+
+namespace media {
+
+VP9Decoder::VP9Accelerator::VP9Accelerator() {}
+
+VP9Decoder::VP9Accelerator::~VP9Accelerator() {}
+
+VP9Decoder::VP9Decoder(VP9Accelerator* accelerator)
+ : state_(kNeedStreamMetadata),
+ accelerator_(accelerator),
+ parser_(accelerator->IsFrameContextRequired()) {
+ ref_frames_.resize(kVp9NumRefFrames);
+}
+
+VP9Decoder::~VP9Decoder() {}
+
+void VP9Decoder::SetStream(const uint8_t* ptr, size_t size) {
+ DCHECK(ptr);
+ DCHECK(size);
+
+ DVLOG(4) << "New input stream at: " << (void*)ptr << " size: " << size;
+ parser_.SetStream(ptr, size);
+}
+
+bool VP9Decoder::Flush() {
+ DVLOG(2) << "Decoder flush";
+ Reset();
+ return true;
+}
+
+void VP9Decoder::Reset() {
+ curr_frame_hdr_ = nullptr;
+ for (auto& ref_frame : ref_frames_)
+ ref_frame = nullptr;
+
+ parser_.Reset();
+
+ if (state_ == kDecoding)
+ state_ = kAfterReset;
+}
+
+VP9Decoder::DecodeResult VP9Decoder::Decode() {
+ while (1) {
+ // Read a new frame header if one is not awaiting decoding already.
+ if (!curr_frame_hdr_) {
+ std::unique_ptr<Vp9FrameHeader> hdr(new Vp9FrameHeader());
+ Vp9Parser::Result res = parser_.ParseNextFrame(hdr.get());
+ switch (res) {
+ case Vp9Parser::kOk:
+ curr_frame_hdr_ = std::move(hdr);
+ break;
+
+ case Vp9Parser::kEOStream:
+ return kRanOutOfStreamData;
+
+ case Vp9Parser::kInvalidStream:
+ DVLOG(1) << "Error parsing stream";
+ SetError();
+ return kDecodeError;
+
+ case Vp9Parser::kAwaitingRefresh:
+ DVLOG(4) << "Awaiting context update";
+ return kNeedContextUpdate;
+ }
+ }
+
+ if (state_ != kDecoding) {
+ // Not kDecoding, so we need a resume point (a keyframe), as we are after
+ // reset or at the beginning of the stream. Drop anything that is not
+ // a keyframe in such case, and continue looking for a keyframe.
+ if (curr_frame_hdr_->IsKeyframe()) {
+ state_ = kDecoding;
+ } else {
+ curr_frame_hdr_.reset();
+ continue;
+ }
+ }
+
+ if (curr_frame_hdr_->show_existing_frame) {
+ // This frame header only instructs us to display one of the
+ // previously-decoded frames, but has no frame data otherwise. Display
+ // and continue decoding subsequent frames.
+ size_t frame_to_show = curr_frame_hdr_->frame_to_show_map_idx;
+ if (frame_to_show >= ref_frames_.size() || !ref_frames_[frame_to_show]) {
+ DVLOG(1) << "Request to show an invalid frame";
+ SetError();
+ return kDecodeError;
+ }
+
+ if (!accelerator_->OutputPicture(ref_frames_[frame_to_show])) {
+ SetError();
+ return kDecodeError;
+ }
+
+ curr_frame_hdr_.reset();
+ continue;
+ }
+
+ Size new_pic_size(curr_frame_hdr_->frame_width,
+ curr_frame_hdr_->frame_height);
+ DCHECK(!new_pic_size.IsEmpty());
+
+ if (new_pic_size != pic_size_) {
+ DVLOG(1) << "New resolution: " << new_pic_size.ToString();
+
+ if (!curr_frame_hdr_->IsKeyframe()) {
+ // TODO(posciak): This is doable, but requires a few modifications to
+ // VDA implementations to allow multiple picture buffer sets in flight.
+ DVLOG(1) << "Resolution change currently supported for keyframes only";
+ SetError();
+ return kDecodeError;
+ }
+
+ // TODO(posciak): This requires us to be on a keyframe (see above) and is
+ // required, because VDA clients expect all surfaces to be returned before
+ // they can cycle surface sets after receiving kAllocateNewSurfaces.
+ // This is only an implementation detail of VDAs and can be improved.
+ for (auto& ref_frame : ref_frames_)
+ ref_frame = nullptr;
+
+ pic_size_ = new_pic_size;
+ return kAllocateNewSurfaces;
+ }
+
+ scoped_refptr<VP9Picture> pic = accelerator_->CreateVP9Picture();
+ if (!pic)
+ return kRanOutOfSurfaces;
+
+ Rect new_render_rect(curr_frame_hdr_->render_width,
+ curr_frame_hdr_->render_height);
+ // For safety, check the validity of render size or leave it as (0, 0).
+ if (!Rect(pic_size_).Contains(new_render_rect)) {
+ DVLOG(1) << "Render size exceeds picture size. render size: "
+ << new_render_rect.ToString()
+ << ", picture size: " << pic_size_.ToString();
+ new_render_rect = Rect();
+ }
+ DVLOG(2) << "Render resolution: " << new_render_rect.ToString();
+
+ pic->visible_rect = new_render_rect;
+ pic->frame_hdr.reset(curr_frame_hdr_.release());
+
+ if (!DecodeAndOutputPicture(pic)) {
+ SetError();
+ return kDecodeError;
+ }
+ }
+}
+
+void VP9Decoder::RefreshReferenceFrames(const scoped_refptr<VP9Picture>& pic) {
+ for (size_t i = 0; i < kVp9NumRefFrames; ++i) {
+ DCHECK(!pic->frame_hdr->IsKeyframe() || pic->frame_hdr->RefreshFlag(i));
+ if (pic->frame_hdr->RefreshFlag(i))
+ ref_frames_[i] = pic;
+ }
+}
+
+void VP9Decoder::UpdateFrameContext(
+ const scoped_refptr<VP9Picture>& pic,
+ const base::Callback<void(const Vp9FrameContext&)>& context_refresh_cb) {
+ DCHECK(!context_refresh_cb.is_null());
+ Vp9FrameContext frame_ctx;
+ memset(&frame_ctx, 0, sizeof(frame_ctx));
+
+ if (!accelerator_->GetFrameContext(pic, &frame_ctx)) {
+ SetError();
+ return;
+ }
+
+ context_refresh_cb.Run(frame_ctx);
+}
+
+bool VP9Decoder::DecodeAndOutputPicture(scoped_refptr<VP9Picture> pic) {
+ DCHECK(!pic_size_.IsEmpty());
+ DCHECK(pic->frame_hdr);
+
+ base::Closure done_cb;
+ const auto& context_refresh_cb =
+ parser_.GetContextRefreshCb(pic->frame_hdr->frame_context_idx);
+ if (!context_refresh_cb.is_null())
+ done_cb = base::Bind(&VP9Decoder::UpdateFrameContext,
+ base::Unretained(this), pic, context_refresh_cb);
+
+ const Vp9Parser::Context& context = parser_.context();
+ if (!accelerator_->SubmitDecode(pic, context.segmentation(),
+ context.loop_filter(), ref_frames_, done_cb))
+ return false;
+
+ if (pic->frame_hdr->show_frame) {
+ if (!accelerator_->OutputPicture(pic))
+ return false;
+ }
+
+ RefreshReferenceFrames(pic);
+ return true;
+}
+
+void VP9Decoder::SetError() {
+ Reset();
+ state_ = kError;
+}
+
+Size VP9Decoder::GetPicSize() const {
+ return pic_size_;
+}
+
+size_t VP9Decoder::GetRequiredNumOfPictures() const {
+ // kMaxVideoFrames to keep higher level media pipeline populated, +2 for the
+ // pictures being parsed and decoded currently.
+ // TODO(johnylin): see if we could get rid of kMaxVideoFrames.
+ const size_t kMaxVideoFrames = 4;
+ return kMaxVideoFrames + kVp9NumRefFrames + 2;
+}
+
+} // namespace media
diff --git a/accel/vp9_decoder.h b/accel/vp9_decoder.h
new file mode 100644
index 0000000..cdbcd69
--- /dev/null
+++ b/accel/vp9_decoder.h
@@ -0,0 +1,154 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 77118c9
+
+#ifndef VP9_DECODER_H_
+#define VP9_DECODER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/callback_forward.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "accelerated_video_decoder.h"
+#include "vp9_parser.h"
+#include "vp9_picture.h"
+
+namespace media {
+
+// This class implements an AcceleratedVideoDecoder for VP9 decoding.
+// Clients of this class are expected to pass raw VP9 stream and are expected
+// to provide an implementation of VP9Accelerator for offloading final steps
+// of the decoding process.
+//
+// This class must be created, called and destroyed on a single thread, and
+// does nothing internally on any other thread.
+class VP9Decoder : public AcceleratedVideoDecoder {
+ public:
+ class VP9Accelerator {
+ public:
+ VP9Accelerator();
+ virtual ~VP9Accelerator();
+
+ // Create a new VP9Picture that the decoder client can use for initial
+ // stages of the decoding process and pass back to this accelerator for
+ // final, accelerated stages of it, or for reference when decoding other
+ // pictures.
+ //
+ // When a picture is no longer needed by the decoder, it will just drop
+ // its reference to it, and it may do so at any time.
+ //
+ // Note that this may return nullptr if the accelerator is not able to
+ // provide any new pictures at the given time. The decoder must handle this
+ // case and treat it as normal, returning kRanOutOfSurfaces from Decode().
+ virtual scoped_refptr<VP9Picture> CreateVP9Picture() = 0;
+
+ // Submit decode for |pic| to be run in accelerator, taking as arguments
+ // information contained in it, as well as current segmentation and loop
+ // filter state in |segm_params| and |lf_params|, respectively, and using
+ // pictures in |ref_pictures| for reference.
+ // If done_cb_ is not null, it will be run once decode is done in hardware.
+ //
+ // Note that returning from this method does not mean that the decode
+ // process is finished, but the caller may drop its references to |pic|
+ // and |ref_pictures| immediately, and the data in |segm_params| and
+ // |lf_params| does not need to remain valid after this method returns.
+ //
+ // Return true when successful, false otherwise.
+ virtual bool SubmitDecode(
+ const scoped_refptr<VP9Picture>& pic,
+ const Vp9SegmentationParams& segm_params,
+ const Vp9LoopFilterParams& lf_params,
+ const std::vector<scoped_refptr<VP9Picture>>& ref_pictures,
+ const base::Closure& done_cb) = 0;
+
+ // Schedule output (display) of |pic|.
+ //
+ // Note that returning from this method does not mean that |pic| has already
+ // been outputted (displayed), but guarantees that all pictures will be
+ // outputted in the same order as this method was called for them, and that
+ // they are decoded before outputting (assuming SubmitDecode() has been
+ // called for them beforehand). Decoder may drop its references to |pic|
+ // immediately after calling this method.
+ //
+ // Return true when successful, false otherwise.
+ virtual bool OutputPicture(const scoped_refptr<VP9Picture>& pic) = 0;
+
+ // Return true if the accelerator requires the client to provide frame
+ // context in order to decode. If so, the Vp9FrameHeader provided by the
+ // client must contain a valid compressed header and frame context data.
+ virtual bool IsFrameContextRequired() const = 0;
+
+ // Set |frame_ctx| to the state after decoding |pic|, returning true on
+ // success, false otherwise.
+ virtual bool GetFrameContext(const scoped_refptr<VP9Picture>& pic,
+ Vp9FrameContext* frame_ctx) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(VP9Accelerator);
+ };
+
+ explicit VP9Decoder(VP9Accelerator* accelerator);
+ ~VP9Decoder() override;
+
+ // AcceleratedVideoDecoder implementation.
+ void SetStream(const uint8_t* ptr, size_t size) override;
+ bool Flush() override WARN_UNUSED_RESULT;
+ void Reset() override;
+ DecodeResult Decode() override WARN_UNUSED_RESULT;
+ Size GetPicSize() const override;
+ size_t GetRequiredNumOfPictures() const override;
+
+ private:
+ // Update ref_frames_ based on the information in current frame header.
+ void RefreshReferenceFrames(const scoped_refptr<VP9Picture>& pic);
+
+ // Decode and possibly output |pic| (if the picture is to be shown).
+ // Return true on success, false otherwise.
+ bool DecodeAndOutputPicture(scoped_refptr<VP9Picture> pic);
+
+ // Get frame context state after decoding |pic| from the accelerator, and call
+ // |context_refresh_cb| with the acquired state.
+ void UpdateFrameContext(
+ const scoped_refptr<VP9Picture>& pic,
+ const base::Callback<void(const Vp9FrameContext&)>& context_refresh_cb);
+
+ // Called on error, when decoding cannot continue. Sets state_ to kError and
+ // releases current state.
+ void SetError();
+
+ enum State {
+ kNeedStreamMetadata, // After initialization, need a keyframe.
+ kDecoding, // Ready to decode from any point.
+ kAfterReset, // After Reset(), need a resume point.
+ kError, // Error in decode, can't continue.
+ };
+
+ // Current decoder state.
+ State state_;
+
+ // Current frame header to be used in decoding the next picture.
+ std::unique_ptr<Vp9FrameHeader> curr_frame_hdr_;
+
+ // Reference frames currently in use.
+ std::vector<scoped_refptr<VP9Picture>> ref_frames_;
+
+ // Current coded resolution.
+ Size pic_size_;
+
+ // VP9Accelerator instance owned by the client.
+ VP9Accelerator* accelerator_;
+
+ Vp9Parser parser_;
+
+ DISALLOW_COPY_AND_ASSIGN(VP9Decoder);
+};
+
+} // namespace media
+
+#endif // VP9_DECODER_H_
diff --git a/accel/vp9_parser.cc b/accel/vp9_parser.cc
new file mode 100644
index 0000000..bbd90b9
--- /dev/null
+++ b/accel/vp9_parser.cc
@@ -0,0 +1,676 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains an implementation of a VP9 bitstream parser.
+//
+// VERBOSE level:
+// 1 something wrong in bitstream
+// 2 parsing steps
+// 3 parsed values (selected)
+// Note: ported from Chromium commit head: 2de6929
+
+#include "vp9_parser.h"
+
+#include <algorithm>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/numerics/safe_conversions.h"
+#include "vp9_compressed_header_parser.h"
+#include "vp9_uncompressed_header_parser.h"
+
+namespace media {
+
+namespace {
+
+// Coefficients extracted verbatim from "VP9 Bitstream & Decoding Process
+// Specification" Version 0.6, Sec 8.6.1 Dequantization functions, see:
+// https://www.webmproject.org/vp9/#draft-vp9-bitstream-and-decoding-process-specification
+constexpr size_t kQIndexRange = 256;
+// clang-format off
+// libva is the only user of high bit depth VP9 formats and only supports
+// 10 bits per component, see https://github.com/01org/libva/issues/137.
+// TODO(mcasas): Add the 12 bit versions of these tables.
+const int16_t kDcQLookup[][kQIndexRange] = {
+ {
+ 4, 8, 8, 9, 10, 11, 12, 12, 13, 14, 15, 16,
+ 17, 18, 19, 19, 20, 21, 22, 23, 24, 25, 26, 26,
+ 27, 28, 29, 30, 31, 32, 32, 33, 34, 35, 36, 37,
+ 38, 38, 39, 40, 41, 42, 43, 43, 44, 45, 46, 47,
+ 48, 48, 49, 50, 51, 52, 53, 53, 54, 55, 56, 57,
+ 57, 58, 59, 60, 61, 62, 62, 63, 64, 65, 66, 66,
+ 67, 68, 69, 70, 70, 71, 72, 73, 74, 74, 75, 76,
+ 77, 78, 78, 79, 80, 81, 81, 82, 83, 84, 85, 85,
+ 87, 88, 90, 92, 93, 95, 96, 98, 99, 101, 102, 104,
+ 105, 107, 108, 110, 111, 113, 114, 116, 117, 118, 120, 121,
+ 123, 125, 127, 129, 131, 134, 136, 138, 140, 142, 144, 146,
+ 148, 150, 152, 154, 156, 158, 161, 164, 166, 169, 172, 174,
+ 177, 180, 182, 185, 187, 190, 192, 195, 199, 202, 205, 208,
+ 211, 214, 217, 220, 223, 226, 230, 233, 237, 240, 243, 247,
+ 250, 253, 257, 261, 265, 269, 272, 276, 280, 284, 288, 292,
+ 296, 300, 304, 309, 313, 317, 322, 326, 330, 335, 340, 344,
+ 349, 354, 359, 364, 369, 374, 379, 384, 389, 395, 400, 406,
+ 411, 417, 423, 429, 435, 441, 447, 454, 461, 467, 475, 482,
+ 489, 497, 505, 513, 522, 530, 539, 549, 559, 569, 579, 590,
+ 602, 614, 626, 640, 654, 668, 684, 700, 717, 736, 755, 775,
+ 796, 819, 843, 869, 896, 925, 955, 988, 1022, 1058, 1098, 1139,
+ 1184, 1232, 1282, 1336,
+ },
+ {
+ 4, 9, 10, 13, 15, 17, 20, 22, 25, 28, 31, 34,
+ 37, 40, 43, 47, 50, 53, 57, 60, 64, 68, 71, 75,
+ 78, 82, 86, 90, 93, 97, 101, 105, 109, 113, 116, 120,
+ 124, 128, 132, 136, 140, 143, 147, 151, 155, 159, 163, 166,
+ 170, 174, 178, 182, 185, 189, 193, 197, 200, 204, 208, 212,
+ 215, 219, 223, 226, 230, 233, 237, 241, 244, 248, 251, 255,
+ 259, 262, 266, 269, 273, 276, 280, 283, 287, 290, 293, 297,
+ 300, 304, 307, 310, 314, 317, 321, 324, 327, 331, 334, 337,
+ 343, 350, 356, 362, 369, 375, 381, 387, 394, 400, 406, 412,
+ 418, 424, 430, 436, 442, 448, 454, 460, 466, 472, 478, 484,
+ 490, 499, 507, 516, 525, 533, 542, 550, 559, 567, 576, 584,
+ 592, 601, 609, 617, 625, 634, 644, 655, 666, 676, 687, 698,
+ 708, 718, 729, 739, 749, 759, 770, 782, 795, 807, 819, 831,
+ 844, 856, 868, 880, 891, 906, 920, 933, 947, 961, 975, 988,
+ 1001, 1015, 1030, 1045, 1061, 1076, 1090, 1105, 1120, 1137, 1153, 1170,
+ 1186, 1202, 1218, 1236, 1253, 1271, 1288, 1306, 1323, 1342, 1361, 1379,
+ 1398, 1416, 1436, 1456, 1476, 1496, 1516, 1537, 1559, 1580, 1601, 1624,
+ 1647, 1670, 1692, 1717, 1741, 1766, 1791, 1817, 1844, 1871, 1900, 1929,
+ 1958, 1990, 2021, 2054, 2088, 2123, 2159, 2197, 2236, 2276, 2319, 2363,
+ 2410, 2458, 2508, 2561, 2616, 2675, 2737, 2802, 2871, 2944, 3020, 3102,
+ 3188, 3280, 3375, 3478, 3586, 3702, 3823, 3953, 4089, 4236, 4394, 4559,
+ 4737, 4929, 5130, 5347
+ }
+};
+
+const int16_t kAcQLookup[][kQIndexRange] = {
+ {
+ 4, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
+ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
+ 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126,
+ 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150,
+ 152, 155, 158, 161, 164, 167, 170, 173, 176, 179, 182, 185,
+ 188, 191, 194, 197, 200, 203, 207, 211, 215, 219, 223, 227,
+ 231, 235, 239, 243, 247, 251, 255, 260, 265, 270, 275, 280,
+ 285, 290, 295, 300, 305, 311, 317, 323, 329, 335, 341, 347,
+ 353, 359, 366, 373, 380, 387, 394, 401, 408, 416, 424, 432,
+ 440, 448, 456, 465, 474, 483, 492, 501, 510, 520, 530, 540,
+ 550, 560, 571, 582, 593, 604, 615, 627, 639, 651, 663, 676,
+ 689, 702, 715, 729, 743, 757, 771, 786, 801, 816, 832, 848,
+ 864, 881, 898, 915, 933, 951, 969, 988, 1007, 1026, 1046, 1066,
+ 1087, 1108, 1129, 1151, 1173, 1196, 1219, 1243, 1267, 1292, 1317, 1343,
+ 1369, 1396, 1423, 1451, 1479, 1508, 1537, 1567, 1597, 1628, 1660, 1692,
+ 1725, 1759, 1793, 1828,
+ },
+ {
+ 4, 9, 11, 13, 16, 18, 21, 24, 27, 30, 33, 37,
+ 40, 44, 48, 51, 55, 59, 63, 67, 71, 75, 79, 83,
+ 88, 92, 96, 100, 105, 109, 114, 118, 122, 127, 131, 136,
+ 140, 145, 149, 154, 158, 163, 168, 172, 177, 181, 186, 190,
+ 195, 199, 204, 208, 213, 217, 222, 226, 231, 235, 240, 244,
+ 249, 253, 258, 262, 267, 271, 275, 280, 284, 289, 293, 297,
+ 302, 306, 311, 315, 319, 324, 328, 332, 337, 341, 345, 349,
+ 354, 358, 362, 367, 371, 375, 379, 384, 388, 392, 396, 401,
+ 409, 417, 425, 433, 441, 449, 458, 466, 474, 482, 490, 498,
+ 506, 514, 523, 531, 539, 547, 555, 563, 571, 579, 588, 596,
+ 604, 616, 628, 640, 652, 664, 676, 688, 700, 713, 725, 737,
+ 749, 761, 773, 785, 797, 809, 825, 841, 857, 873, 889, 905,
+ 922, 938, 954, 970, 986, 1002, 1018, 1038, 1058, 1078, 1098, 1118,
+ 1138, 1158, 1178, 1198, 1218, 1242, 1266, 1290, 1314, 1338, 1362, 1386,
+ 1411, 1435, 1463, 1491, 1519, 1547, 1575, 1603, 1631, 1663, 1695, 1727,
+ 1759, 1791, 1823, 1859, 1895, 1931, 1967, 2003, 2039, 2079, 2119, 2159,
+ 2199, 2239, 2283, 2327, 2371, 2415, 2459, 2507, 2555, 2603, 2651, 2703,
+ 2755, 2807, 2859, 2915, 2971, 3027, 3083, 3143, 3203, 3263, 3327, 3391,
+ 3455, 3523, 3591, 3659, 3731, 3803, 3876, 3952, 4028, 4104, 4184, 4264,
+ 4348, 4432, 4516, 4604, 4692, 4784, 4876, 4972, 5068, 5168, 5268, 5372,
+ 5476, 5584, 5692, 5804, 5916, 6032, 6148, 6268, 6388, 6512, 6640, 6768,
+ 6900, 7036, 7172, 7312
+ }
+};
+// clang-format on
+
+static_assert(arraysize(kDcQLookup[0]) == arraysize(kAcQLookup[0]),
+ "quantizer lookup arrays of incorrect size");
+
+size_t ClampQ(size_t q) {
+ return std::min(q, kQIndexRange - 1);
+}
+
+int ClampLf(int lf) {
+ const int kMaxLoopFilterLevel = 63;
+ return std::min(std::max(0, lf), kMaxLoopFilterLevel);
+}
+
+} // namespace
+
+bool Vp9FrameHeader::IsKeyframe() const {
+ // When show_existing_frame is true, the frame header does not precede an
+ // actual frame to be decoded, so frame_type does not apply (and is not read
+ // from the stream).
+ return !show_existing_frame && frame_type == KEYFRAME;
+}
+
+bool Vp9FrameHeader::IsIntra() const {
+ return !show_existing_frame && (frame_type == KEYFRAME || intra_only);
+}
+
+Vp9Parser::FrameInfo::FrameInfo(const uint8_t* ptr, off_t size)
+ : ptr(ptr), size(size) {}
+
+bool Vp9FrameContext::IsValid() const {
+ // probs should be in [1, 255] range.
+ static_assert(sizeof(Vp9Prob) == 1,
+ "following checks assuming Vp9Prob is single byte");
+ if (memchr(tx_probs_8x8, 0, sizeof(tx_probs_8x8)))
+ return false;
+ if (memchr(tx_probs_16x16, 0, sizeof(tx_probs_16x16)))
+ return false;
+ if (memchr(tx_probs_32x32, 0, sizeof(tx_probs_32x32)))
+ return false;
+
+ for (auto& a : coef_probs) {
+ for (auto& ai : a) {
+ for (auto& aj : ai) {
+ for (auto& ak : aj) {
+ int max_l = (ak == aj[0]) ? 3 : 6;
+ for (int l = 0; l < max_l; l++) {
+ for (auto& x : ak[l]) {
+ if (x == 0)
+ return false;
+ }
+ }
+ }
+ }
+ }
+ }
+ if (memchr(skip_prob, 0, sizeof(skip_prob)))
+ return false;
+ if (memchr(inter_mode_probs, 0, sizeof(inter_mode_probs)))
+ return false;
+ if (memchr(interp_filter_probs, 0, sizeof(interp_filter_probs)))
+ return false;
+ if (memchr(is_inter_prob, 0, sizeof(is_inter_prob)))
+ return false;
+ if (memchr(comp_mode_prob, 0, sizeof(comp_mode_prob)))
+ return false;
+ if (memchr(single_ref_prob, 0, sizeof(single_ref_prob)))
+ return false;
+ if (memchr(comp_ref_prob, 0, sizeof(comp_ref_prob)))
+ return false;
+ if (memchr(y_mode_probs, 0, sizeof(y_mode_probs)))
+ return false;
+ if (memchr(uv_mode_probs, 0, sizeof(uv_mode_probs)))
+ return false;
+ if (memchr(partition_probs, 0, sizeof(partition_probs)))
+ return false;
+ if (memchr(mv_joint_probs, 0, sizeof(mv_joint_probs)))
+ return false;
+ if (memchr(mv_sign_prob, 0, sizeof(mv_sign_prob)))
+ return false;
+ if (memchr(mv_class_probs, 0, sizeof(mv_class_probs)))
+ return false;
+ if (memchr(mv_class0_bit_prob, 0, sizeof(mv_class0_bit_prob)))
+ return false;
+ if (memchr(mv_bits_prob, 0, sizeof(mv_bits_prob)))
+ return false;
+ if (memchr(mv_class0_fr_probs, 0, sizeof(mv_class0_fr_probs)))
+ return false;
+ if (memchr(mv_fr_probs, 0, sizeof(mv_fr_probs)))
+ return false;
+ if (memchr(mv_class0_hp_prob, 0, sizeof(mv_class0_hp_prob)))
+ return false;
+ if (memchr(mv_hp_prob, 0, sizeof(mv_hp_prob)))
+ return false;
+
+ return true;
+}
+
+Vp9Parser::Context::Vp9FrameContextManager::Vp9FrameContextManager()
+ : weak_ptr_factory_(this) {}
+
+Vp9Parser::Context::Vp9FrameContextManager::~Vp9FrameContextManager() = default;
+
+const Vp9FrameContext&
+Vp9Parser::Context::Vp9FrameContextManager::frame_context() const {
+ DCHECK(initialized_);
+ DCHECK(!needs_client_update_);
+ return frame_context_;
+}
+
+void Vp9Parser::Context::Vp9FrameContextManager::Reset() {
+ initialized_ = false;
+ needs_client_update_ = false;
+ weak_ptr_factory_.InvalidateWeakPtrs();
+}
+
+void Vp9Parser::Context::Vp9FrameContextManager::SetNeedsClientUpdate() {
+ DCHECK(!needs_client_update_);
+ initialized_ = true;
+ needs_client_update_ = true;
+}
+
+Vp9Parser::ContextRefreshCallback
+Vp9Parser::Context::Vp9FrameContextManager::GetUpdateCb() {
+ if (needs_client_update_)
+ return base::Bind(&Vp9FrameContextManager::UpdateFromClient,
+ weak_ptr_factory_.GetWeakPtr());
+ else
+ return Vp9Parser::ContextRefreshCallback();
+}
+
+void Vp9Parser::Context::Vp9FrameContextManager::Update(
+ const Vp9FrameContext& frame_context) {
+ // DCHECK because we can trust values from our parser.
+ DCHECK(frame_context.IsValid());
+ initialized_ = true;
+ frame_context_ = frame_context;
+
+ // For frame context we are updating, it may be still awaiting previous
+ // ContextRefreshCallback. Because we overwrite the value of context here and
+ // previous ContextRefreshCallback no longer matters, invalidate the weak ptr
+ // to prevent previous ContextRefreshCallback run.
+ // With this optimization, we may be able to parse more frames while previous
+ // are still decoding.
+ weak_ptr_factory_.InvalidateWeakPtrs();
+ needs_client_update_ = false;
+}
+
+void Vp9Parser::Context::Vp9FrameContextManager::UpdateFromClient(
+ const Vp9FrameContext& frame_context) {
+ DVLOG(2) << "Got external frame_context update";
+ DCHECK(needs_client_update_);
+ if (!frame_context.IsValid()) {
+ DLOG(ERROR) << "Invalid prob value in frame_context";
+ return;
+ }
+ needs_client_update_ = false;
+ initialized_ = true;
+ frame_context_ = frame_context;
+}
+
+void Vp9Parser::Context::Reset() {
+ memset(&segmentation_, 0, sizeof(segmentation_));
+ memset(&loop_filter_, 0, sizeof(loop_filter_));
+ memset(&ref_slots_, 0, sizeof(ref_slots_));
+ for (auto& manager : frame_context_managers_)
+ manager.Reset();
+}
+
+void Vp9Parser::Context::MarkFrameContextForUpdate(size_t frame_context_idx) {
+ DCHECK_LT(frame_context_idx, arraysize(frame_context_managers_));
+ frame_context_managers_[frame_context_idx].SetNeedsClientUpdate();
+}
+
+void Vp9Parser::Context::UpdateFrameContext(
+ size_t frame_context_idx,
+ const Vp9FrameContext& frame_context) {
+ DCHECK_LT(frame_context_idx, arraysize(frame_context_managers_));
+ frame_context_managers_[frame_context_idx].Update(frame_context);
+}
+
+const Vp9Parser::ReferenceSlot& Vp9Parser::Context::GetRefSlot(
+ size_t ref_type) const {
+ DCHECK_LT(ref_type, arraysize(ref_slots_));
+ return ref_slots_[ref_type];
+}
+
+void Vp9Parser::Context::UpdateRefSlot(
+ size_t ref_type,
+ const Vp9Parser::ReferenceSlot& ref_slot) {
+ DCHECK_LT(ref_type, arraysize(ref_slots_));
+ ref_slots_[ref_type] = ref_slot;
+}
+
+Vp9Parser::Vp9Parser(bool parsing_compressed_header)
+ : parsing_compressed_header_(parsing_compressed_header) {
+ Reset();
+}
+
+Vp9Parser::~Vp9Parser() = default;
+
+void Vp9Parser::SetStream(const uint8_t* stream, off_t stream_size) {
+ DCHECK(stream);
+ stream_ = stream;
+ bytes_left_ = stream_size;
+ frames_.clear();
+}
+
+void Vp9Parser::Reset() {
+ stream_ = nullptr;
+ bytes_left_ = 0;
+ frames_.clear();
+ curr_frame_info_.Reset();
+
+ context_.Reset();
+}
+
+bool Vp9Parser::ParseUncompressedHeader(const FrameInfo& frame_info,
+ Vp9FrameHeader* fhdr,
+ Result* result) {
+ memset(&curr_frame_header_, 0, sizeof(curr_frame_header_));
+ *result = kInvalidStream;
+
+ Vp9UncompressedHeaderParser uncompressed_parser(&context_);
+ if (!uncompressed_parser.Parse(frame_info.ptr, frame_info.size,
+ &curr_frame_header_)) {
+ *result = kInvalidStream;
+ return true;
+ }
+
+ if (curr_frame_header_.header_size_in_bytes == 0) {
+ // Verify padding bits are zero.
+ for (off_t i = curr_frame_header_.uncompressed_header_size;
+ i < frame_info.size; i++) {
+ if (frame_info.ptr[i] != 0) {
+ DVLOG(1) << "Padding bits are not zeros.";
+ *result = kInvalidStream;
+ return true;
+ }
+ }
+ *fhdr = curr_frame_header_;
+ *result = kOk;
+ return true;
+ }
+ if (curr_frame_header_.uncompressed_header_size +
+ curr_frame_header_.header_size_in_bytes >
+ base::checked_cast<size_t>(frame_info.size)) {
+ DVLOG(1) << "header_size_in_bytes="
+ << curr_frame_header_.header_size_in_bytes
+ << " is larger than bytes left in buffer: "
+ << frame_info.size - curr_frame_header_.uncompressed_header_size;
+ *result = kInvalidStream;
+ return true;
+ }
+
+ return false;
+}
+
+bool Vp9Parser::ParseCompressedHeader(const FrameInfo& frame_info,
+ Result* result) {
+ *result = kInvalidStream;
+ size_t frame_context_idx = curr_frame_header_.frame_context_idx;
+ const Context::Vp9FrameContextManager& context_to_load =
+ context_.frame_context_managers_[frame_context_idx];
+ if (!context_to_load.initialized()) {
+ // 8.2 Frame order constraints
+ // must load an initialized set of probabilities.
+ DVLOG(1) << "loading uninitialized frame context, index="
+ << frame_context_idx;
+ *result = kInvalidStream;
+ return true;
+ }
+ if (context_to_load.needs_client_update()) {
+ DVLOG(3) << "waiting frame_context_idx=" << frame_context_idx
+ << " to update";
+ curr_frame_info_ = frame_info;
+ *result = kAwaitingRefresh;
+ return true;
+ }
+ curr_frame_header_.initial_frame_context = curr_frame_header_.frame_context =
+ context_to_load.frame_context();
+
+ Vp9CompressedHeaderParser compressed_parser;
+ if (!compressed_parser.Parse(
+ frame_info.ptr + curr_frame_header_.uncompressed_header_size,
+ curr_frame_header_.header_size_in_bytes, &curr_frame_header_)) {
+ *result = kInvalidStream;
+ return true;
+ }
+
+ if (curr_frame_header_.refresh_frame_context) {
+ // In frame parallel mode, we can refresh the context without decoding
+ // tile data.
+ if (curr_frame_header_.frame_parallel_decoding_mode) {
+ context_.UpdateFrameContext(frame_context_idx,
+ curr_frame_header_.frame_context);
+ } else {
+ context_.MarkFrameContextForUpdate(frame_context_idx);
+ }
+ }
+ return false;
+}
+
+Vp9Parser::Result Vp9Parser::ParseNextFrame(Vp9FrameHeader* fhdr) {
+ DCHECK(fhdr);
+ DVLOG(2) << "ParseNextFrame";
+ FrameInfo frame_info;
+ Result result;
+
+ // If |curr_frame_info_| is valid, uncompressed header was parsed into
+ // |curr_frame_header_| and we are awaiting context update to proceed with
+ // compressed header parsing.
+ if (curr_frame_info_.IsValid()) {
+ DCHECK(parsing_compressed_header_);
+ frame_info = curr_frame_info_;
+ curr_frame_info_.Reset();
+ } else {
+ if (frames_.empty()) {
+ // No frames to be decoded, if there is no more stream, request more.
+ if (!stream_)
+ return kEOStream;
+
+ // New stream to be parsed, parse it and fill frames_.
+ frames_ = ParseSuperframe();
+ if (frames_.empty()) {
+ DVLOG(1) << "Failed parsing superframes";
+ return kInvalidStream;
+ }
+ }
+
+ frame_info = frames_.front();
+ frames_.pop_front();
+
+ if (ParseUncompressedHeader(frame_info, fhdr, &result))
+ return result;
+ }
+
+ if (parsing_compressed_header_) {
+ if (ParseCompressedHeader(frame_info, &result)) {
+ DCHECK(result != kAwaitingRefresh || curr_frame_info_.IsValid());
+ return result;
+ }
+ }
+
+ if (!SetupSegmentationDequant())
+ return kInvalidStream;
+ SetupLoopFilter();
+ UpdateSlots();
+
+ *fhdr = curr_frame_header_;
+ return kOk;
+}
+
+Vp9Parser::ContextRefreshCallback Vp9Parser::GetContextRefreshCb(
+ size_t frame_context_idx) {
+ DCHECK_LT(frame_context_idx, arraysize(context_.frame_context_managers_));
+ auto& frame_context_manager =
+ context_.frame_context_managers_[frame_context_idx];
+
+ return frame_context_manager.GetUpdateCb();
+}
+
+// Annex B Superframes
+std::deque<Vp9Parser::FrameInfo> Vp9Parser::ParseSuperframe() {
+ const uint8_t* stream = stream_;
+ off_t bytes_left = bytes_left_;
+
+ // Make sure we don't parse stream_ more than once.
+ stream_ = nullptr;
+ bytes_left_ = 0;
+
+ if (bytes_left < 1)
+ return std::deque<FrameInfo>();
+
+ // If this is a superframe, the last byte in the stream will contain the
+ // superframe marker. If not, the whole buffer contains a single frame.
+ uint8_t marker = *(stream + bytes_left - 1);
+ if ((marker & 0xe0) != 0xc0) {
+ return {FrameInfo(stream, bytes_left)};
+ }
+
+ DVLOG(1) << "Parsing a superframe";
+
+ // The bytes immediately before the superframe marker constitute superframe
+ // index, which stores information about sizes of each frame in it.
+ // Calculate its size and set index_ptr to the beginning of it.
+ size_t num_frames = (marker & 0x7) + 1;
+ size_t mag = ((marker >> 3) & 0x3) + 1;
+ off_t index_size = 2 + mag * num_frames;
+
+ if (bytes_left < index_size)
+ return std::deque<FrameInfo>();
+
+ const uint8_t* index_ptr = stream + bytes_left - index_size;
+ if (marker != *index_ptr)
+ return std::deque<FrameInfo>();
+
+ ++index_ptr;
+ bytes_left -= index_size;
+
+ // Parse frame information contained in the index and add a pointer to and
+ // size of each frame to frames.
+ std::deque<FrameInfo> frames;
+ for (size_t i = 0; i < num_frames; ++i) {
+ uint32_t size = 0;
+ for (size_t j = 0; j < mag; ++j) {
+ size |= *index_ptr << (j * 8);
+ ++index_ptr;
+ }
+
+ if (base::checked_cast<off_t>(size) > bytes_left) {
+ DVLOG(1) << "Not enough data in the buffer for frame " << i;
+ return std::deque<FrameInfo>();
+ }
+
+ frames.push_back(FrameInfo(stream, size));
+ stream += size;
+ bytes_left -= size;
+
+ DVLOG(1) << "Frame " << i << ", size: " << size;
+ }
+
+ return frames;
+}
+
+// 8.6.1 Dequantization functions
+size_t Vp9Parser::GetQIndex(const Vp9QuantizationParams& quant,
+ size_t segid) const {
+ const Vp9SegmentationParams& segmentation = context_.segmentation();
+
+ if (segmentation.FeatureEnabled(segid,
+ Vp9SegmentationParams::SEG_LVL_ALT_Q)) {
+ int16_t feature_data =
+ segmentation.FeatureData(segid, Vp9SegmentationParams::SEG_LVL_ALT_Q);
+ size_t q_index = segmentation.abs_or_delta_update
+ ? feature_data
+ : quant.base_q_idx + feature_data;
+ return ClampQ(q_index);
+ }
+
+ return quant.base_q_idx;
+}
+
+// 8.6.1 Dequantization functions
+bool Vp9Parser::SetupSegmentationDequant() {
+ const Vp9QuantizationParams& quant = curr_frame_header_.quant_params;
+ Vp9SegmentationParams& segmentation = context_.segmentation_;
+
+ if (curr_frame_header_.bit_depth > 10) {
+ DLOG(ERROR) << "bit_depth > 10 is not supported yet, kDcQLookup and "
+ "kAcQLookup need to be extended";
+ return false;
+ }
+ const size_t bit_depth_index = (curr_frame_header_.bit_depth == 8) ? 0 : 1;
+
+ if (segmentation.enabled) {
+ for (size_t i = 0; i < Vp9SegmentationParams::kNumSegments; ++i) {
+ const size_t q_index = GetQIndex(quant, i);
+ segmentation.y_dequant[i][0] =
+ kDcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_y_dc)];
+ segmentation.y_dequant[i][1] =
+ kAcQLookup[bit_depth_index][ClampQ(q_index)];
+ segmentation.uv_dequant[i][0] =
+ kDcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_uv_dc)];
+ segmentation.uv_dequant[i][1] =
+ kAcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_uv_ac)];
+ }
+ } else {
+ const size_t q_index = quant.base_q_idx;
+ segmentation.y_dequant[0][0] =
+ kDcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_y_dc)];
+ segmentation.y_dequant[0][1] = kAcQLookup[bit_depth_index][ClampQ(q_index)];
+ segmentation.uv_dequant[0][0] =
+ kDcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_uv_dc)];
+ segmentation.uv_dequant[0][1] =
+ kAcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_uv_ac)];
+ }
+ return true;
+}
+
+// 8.8.1 Loop filter frame init process
+void Vp9Parser::SetupLoopFilter() {
+ Vp9LoopFilterParams& loop_filter = context_.loop_filter_;
+ if (!loop_filter.level)
+ return;
+
+ int scale = loop_filter.level < 32 ? 1 : 2;
+
+ for (size_t i = 0; i < Vp9SegmentationParams::kNumSegments; ++i) {
+ int level = loop_filter.level;
+ const Vp9SegmentationParams& segmentation = context_.segmentation();
+
+ if (segmentation.FeatureEnabled(i, Vp9SegmentationParams::SEG_LVL_ALT_LF)) {
+ int feature_data =
+ segmentation.FeatureData(i, Vp9SegmentationParams::SEG_LVL_ALT_LF);
+ level = ClampLf(segmentation.abs_or_delta_update ? feature_data
+ : level + feature_data);
+ }
+
+ if (!loop_filter.delta_enabled) {
+ memset(loop_filter.lvl[i], level, sizeof(loop_filter.lvl[i]));
+ } else {
+ loop_filter.lvl[i][Vp9RefType::VP9_FRAME_INTRA][0] = ClampLf(
+ level + loop_filter.ref_deltas[Vp9RefType::VP9_FRAME_INTRA] * scale);
+ loop_filter.lvl[i][Vp9RefType::VP9_FRAME_INTRA][1] = 0;
+
+ for (size_t type = Vp9RefType::VP9_FRAME_LAST;
+ type < Vp9RefType::VP9_FRAME_MAX; ++type) {
+ for (size_t mode = 0; mode < Vp9LoopFilterParams::kNumModeDeltas;
+ ++mode) {
+ loop_filter.lvl[i][type][mode] =
+ ClampLf(level + loop_filter.ref_deltas[type] * scale +
+ loop_filter.mode_deltas[mode] * scale);
+ }
+ }
+ }
+ }
+}
+
+void Vp9Parser::UpdateSlots() {
+ // 8.10 Reference frame update process
+ for (size_t i = 0; i < kVp9NumRefFrames; i++) {
+ if (curr_frame_header_.RefreshFlag(i)) {
+ ReferenceSlot ref_slot;
+ ref_slot.initialized = true;
+
+ ref_slot.frame_width = curr_frame_header_.frame_width;
+ ref_slot.frame_height = curr_frame_header_.frame_height;
+ ref_slot.subsampling_x = curr_frame_header_.subsampling_x;
+ ref_slot.subsampling_y = curr_frame_header_.subsampling_y;
+ ref_slot.bit_depth = curr_frame_header_.bit_depth;
+
+ ref_slot.profile = curr_frame_header_.profile;
+ ref_slot.color_space = curr_frame_header_.color_space;
+ context_.UpdateRefSlot(i, ref_slot);
+ }
+ }
+}
+
+} // namespace media
diff --git a/accel/vp9_parser.h b/accel/vp9_parser.h
new file mode 100644
index 0000000..ab1fa57
--- /dev/null
+++ b/accel/vp9_parser.h
@@ -0,0 +1,448 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains an implementation of a VP9 bitstream parser. The main
+// purpose of this parser is to support hardware decode acceleration. Some
+// accelerators, e.g. libva which implements VA-API, require the caller
+// (chrome) to feed them parsed VP9 frame header.
+//
+// See media::VP9Decoder for example usage.
+//
+// Note: ported from Chromium commit head: ec6c6e0
+#ifndef VP9_PARSER_H_
+#define VP9_PARSER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <deque>
+#include <memory>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+
+namespace media {
+
+const int kVp9MaxProfile = 4;
+const int kVp9NumRefFramesLog2 = 3;
+const size_t kVp9NumRefFrames = 1 << kVp9NumRefFramesLog2;
+const uint8_t kVp9MaxProb = 255;
+const size_t kVp9NumRefsPerFrame = 3;
+const size_t kVp9NumFrameContextsLog2 = 2;
+const size_t kVp9NumFrameContexts = 1 << kVp9NumFrameContextsLog2;
+
+using Vp9Prob = uint8_t;
+
+enum class Vp9ColorSpace {
+ UNKNOWN = 0,
+ BT_601 = 1,
+ BT_709 = 2,
+ SMPTE_170 = 3,
+ SMPTE_240 = 4,
+ BT_2020 = 5,
+ RESERVED = 6,
+ SRGB = 7,
+};
+
+enum Vp9InterpolationFilter {
+ EIGHTTAP = 0,
+ EIGHTTAP_SMOOTH = 1,
+ EIGHTTAP_SHARP = 2,
+ BILINEAR = 3,
+ SWITCHABLE = 4,
+};
+
+enum Vp9RefType {
+ VP9_FRAME_INTRA = 0,
+ VP9_FRAME_LAST = 1,
+ VP9_FRAME_GOLDEN = 2,
+ VP9_FRAME_ALTREF = 3,
+ VP9_FRAME_MAX = 4,
+};
+
+enum Vp9ReferenceMode {
+ SINGLE_REFERENCE = 0,
+ COMPOUND_REFERENCE = 1,
+ REFERENCE_MODE_SELECT = 2,
+};
+
+struct Vp9SegmentationParams {
+ static const size_t kNumSegments = 8;
+ static const size_t kNumTreeProbs = kNumSegments - 1;
+ static const size_t kNumPredictionProbs = 3;
+ enum SegmentLevelFeature {
+ SEG_LVL_ALT_Q = 0,
+ SEG_LVL_ALT_LF = 1,
+ SEG_LVL_REF_FRAME = 2,
+ SEG_LVL_SKIP = 3,
+ SEG_LVL_MAX
+ };
+
+ bool enabled;
+
+ bool update_map;
+ uint8_t tree_probs[kNumTreeProbs];
+ bool temporal_update;
+ uint8_t pred_probs[kNumPredictionProbs];
+
+ bool update_data;
+ bool abs_or_delta_update;
+ bool feature_enabled[kNumSegments][SEG_LVL_MAX];
+ int16_t feature_data[kNumSegments][SEG_LVL_MAX];
+
+ int16_t y_dequant[kNumSegments][2];
+ int16_t uv_dequant[kNumSegments][2];
+
+ bool FeatureEnabled(size_t seg_id, SegmentLevelFeature feature) const {
+ return feature_enabled[seg_id][feature];
+ }
+
+ int16_t FeatureData(size_t seg_id, SegmentLevelFeature feature) const {
+ return feature_data[seg_id][feature];
+ }
+};
+
+struct Vp9LoopFilterParams {
+ static const size_t kNumModeDeltas = 2;
+
+ uint8_t level;
+ uint8_t sharpness;
+
+ bool delta_enabled;
+ bool delta_update;
+ bool update_ref_deltas[VP9_FRAME_MAX];
+ int8_t ref_deltas[VP9_FRAME_MAX];
+ bool update_mode_deltas[kNumModeDeltas];
+ int8_t mode_deltas[kNumModeDeltas];
+
+ // Calculated from above fields.
+ uint8_t lvl[Vp9SegmentationParams::kNumSegments][VP9_FRAME_MAX]
+ [kNumModeDeltas];
+};
+
+// Members of Vp9FrameHeader will be 0-initialized by Vp9Parser::ParseNextFrame.
+struct Vp9QuantizationParams {
+ bool IsLossless() const {
+ return base_q_idx == 0 && delta_q_y_dc == 0 && delta_q_uv_dc == 0 &&
+ delta_q_uv_ac == 0;
+ }
+
+ uint8_t base_q_idx;
+ int8_t delta_q_y_dc;
+ int8_t delta_q_uv_dc;
+ int8_t delta_q_uv_ac;
+};
+
+// Entropy context for frame parsing
+struct Vp9FrameContext {
+ bool IsValid() const;
+
+ Vp9Prob tx_probs_8x8[2][1];
+ Vp9Prob tx_probs_16x16[2][2];
+ Vp9Prob tx_probs_32x32[2][3];
+
+ Vp9Prob coef_probs[4][2][2][6][6][3];
+ Vp9Prob skip_prob[3];
+ Vp9Prob inter_mode_probs[7][3];
+ Vp9Prob interp_filter_probs[4][2];
+ Vp9Prob is_inter_prob[4];
+
+ Vp9Prob comp_mode_prob[5];
+ Vp9Prob single_ref_prob[5][2];
+ Vp9Prob comp_ref_prob[5];
+
+ Vp9Prob y_mode_probs[4][9];
+ Vp9Prob uv_mode_probs[10][9];
+ Vp9Prob partition_probs[16][3];
+
+ Vp9Prob mv_joint_probs[3];
+ Vp9Prob mv_sign_prob[2];
+ Vp9Prob mv_class_probs[2][10];
+ Vp9Prob mv_class0_bit_prob[2];
+ Vp9Prob mv_bits_prob[2][10];
+ Vp9Prob mv_class0_fr_probs[2][2][3];
+ Vp9Prob mv_fr_probs[2][3];
+ Vp9Prob mv_class0_hp_prob[2];
+ Vp9Prob mv_hp_prob[2];
+};
+
+struct Vp9CompressedHeader {
+ enum Vp9TxMode {
+ ONLY_4X4 = 0,
+ ALLOW_8X8 = 1,
+ ALLOW_16X16 = 2,
+ ALLOW_32X32 = 3,
+ TX_MODE_SELECT = 4,
+ TX_MODES = 5,
+ };
+
+ Vp9TxMode tx_mode;
+ Vp9ReferenceMode reference_mode;
+};
+
+// VP9 frame header.
+struct Vp9FrameHeader {
+ enum FrameType {
+ KEYFRAME = 0,
+ INTERFRAME = 1,
+ };
+
+ bool IsKeyframe() const;
+ bool IsIntra() const;
+ bool RefreshFlag(size_t i) const {
+ return !!(refresh_frame_flags & (1u << i));
+ }
+
+ uint8_t profile;
+
+ bool show_existing_frame;
+ uint8_t frame_to_show_map_idx;
+
+ FrameType frame_type;
+
+ bool show_frame;
+ bool error_resilient_mode;
+
+ uint8_t bit_depth;
+ Vp9ColorSpace color_space;
+ bool color_range;
+ uint8_t subsampling_x;
+ uint8_t subsampling_y;
+
+ // The range of frame_width and frame_height is 1..2^16.
+ uint32_t frame_width;
+ uint32_t frame_height;
+ uint32_t render_width;
+ uint32_t render_height;
+
+ bool intra_only;
+ uint8_t reset_frame_context;
+ uint8_t refresh_frame_flags;
+ uint8_t ref_frame_idx[kVp9NumRefsPerFrame];
+ bool ref_frame_sign_bias[Vp9RefType::VP9_FRAME_MAX];
+ bool allow_high_precision_mv;
+ Vp9InterpolationFilter interpolation_filter;
+
+ bool refresh_frame_context;
+ bool frame_parallel_decoding_mode;
+ uint8_t frame_context_idx;
+ // |frame_context_idx_to_save_probs| is to be used by save_probs() only, and
+ // |frame_context_idx| otherwise.
+ uint8_t frame_context_idx_to_save_probs;
+
+ Vp9QuantizationParams quant_params;
+
+ uint8_t tile_cols_log2;
+ uint8_t tile_rows_log2;
+
+ // Pointer to the beginning of frame data. It is a responsibility of the
+ // client of the Vp9Parser to maintain validity of this data while it is
+ // being used outside of that class.
+ const uint8_t* data;
+
+ // Size of |data| in bytes.
+ size_t frame_size;
+
+ // Size of compressed header in bytes.
+ size_t header_size_in_bytes;
+
+ // Size of uncompressed header in bytes.
+ size_t uncompressed_header_size;
+
+ Vp9CompressedHeader compressed_header;
+ // Initial frame entropy context after load_probs2(frame_context_idx).
+ Vp9FrameContext initial_frame_context;
+ // Current frame entropy context after header parsing.
+ Vp9FrameContext frame_context;
+};
+
+// A parser for VP9 bitstream.
+class Vp9Parser {
+ public:
+ // If context update is needed after decoding a frame, the client must
+ // execute this callback, passing the updated context state.
+ using ContextRefreshCallback = base::Callback<void(const Vp9FrameContext&)>;
+
+ // ParseNextFrame() return values. See documentation for ParseNextFrame().
+ enum Result {
+ kOk,
+ kInvalidStream,
+ kEOStream,
+ kAwaitingRefresh,
+ };
+
+ // The parsing context to keep track of references.
+ struct ReferenceSlot {
+ bool initialized;
+ uint32_t frame_width;
+ uint32_t frame_height;
+ uint8_t subsampling_x;
+ uint8_t subsampling_y;
+ uint8_t bit_depth;
+
+ // More fields for consistency checking.
+ uint8_t profile;
+ Vp9ColorSpace color_space;
+ };
+
+ // The parsing context that persists across frames.
+ class Context {
+ public:
+ class Vp9FrameContextManager {
+ public:
+ Vp9FrameContextManager();
+ ~Vp9FrameContextManager();
+ bool initialized() const { return initialized_; }
+ bool needs_client_update() const { return needs_client_update_; }
+ const Vp9FrameContext& frame_context() const;
+
+ // Resets to uninitialized state.
+ void Reset();
+
+ // Marks this context as requiring an update from parser's client.
+ void SetNeedsClientUpdate();
+
+ // Updates frame context.
+ void Update(const Vp9FrameContext& frame_context);
+
+ // Returns a callback to update frame context at a later time with.
+ ContextRefreshCallback GetUpdateCb();
+
+ private:
+ // Updates frame context from parser's client.
+ void UpdateFromClient(const Vp9FrameContext& frame_context);
+
+ bool initialized_ = false;
+ bool needs_client_update_ = false;
+ Vp9FrameContext frame_context_;
+
+ base::WeakPtrFactory<Vp9FrameContextManager> weak_ptr_factory_;
+ };
+
+ void Reset();
+
+ // Mark |frame_context_idx| as requiring update from the client.
+ void MarkFrameContextForUpdate(size_t frame_context_idx);
+
+ // Update frame context at |frame_context_idx| with the contents of
+ // |frame_context|.
+ void UpdateFrameContext(size_t frame_context_idx,
+ const Vp9FrameContext& frame_context);
+
+ // Return ReferenceSlot for frame at |ref_idx|.
+ const ReferenceSlot& GetRefSlot(size_t ref_idx) const;
+
+ // Update contents of ReferenceSlot at |ref_idx| with the contents of
+ // |ref_slot|.
+ void UpdateRefSlot(size_t ref_idx, const ReferenceSlot& ref_slot);
+
+ const Vp9SegmentationParams& segmentation() const { return segmentation_; }
+
+ const Vp9LoopFilterParams& loop_filter() const { return loop_filter_; }
+
+ private:
+ friend class Vp9UncompressedHeaderParser;
+ friend class Vp9Parser;
+
+ // Segmentation and loop filter state.
+ Vp9SegmentationParams segmentation_;
+ Vp9LoopFilterParams loop_filter_;
+
+ // Frame references.
+ ReferenceSlot ref_slots_[kVp9NumRefFrames];
+
+ Vp9FrameContextManager frame_context_managers_[kVp9NumFrameContexts];
+ };
+
+ // The constructor. See ParseNextFrame() for comments for
+ // |parsing_compressed_header|.
+ explicit Vp9Parser(bool parsing_compressed_header);
+ ~Vp9Parser();
+
+ // Set a new stream buffer to read from, starting at |stream| and of size
+ // |stream_size| in bytes. |stream| must point to the beginning of a single
+ // frame or a single superframe, is owned by caller and must remain valid
+ // until the next call to SetStream().
+ void SetStream(const uint8_t* stream, off_t stream_size);
+
+ // Parse the next frame in the current stream buffer, filling |fhdr| with
+ // the parsed frame header and updating current segmentation and loop filter
+ // state.
+ // Return kOk if a frame has successfully been parsed,
+ // kEOStream if there is no more data in the current stream buffer,
+ // kAwaitingRefresh if this frame awaiting frame context update, or
+ // kInvalidStream on error.
+ Result ParseNextFrame(Vp9FrameHeader* fhdr);
+
+ // Return current parsing context.
+ const Context& context() const { return context_; }
+
+ // Return a ContextRefreshCallback, which, if not null, has to be called with
+ // the new context state after the frame associated with |frame_context_idx|
+ // is decoded.
+ ContextRefreshCallback GetContextRefreshCb(size_t frame_context_idx);
+
+ // Clear parser state and return to an initialized state.
+ void Reset();
+
+ private:
+ // Stores start pointer and size of each frame within the current superframe.
+ struct FrameInfo {
+ FrameInfo() = default;
+ FrameInfo(const uint8_t* ptr, off_t size);
+ bool IsValid() const { return ptr != nullptr; }
+ void Reset() { ptr = nullptr; }
+
+ // Starting address of the frame.
+ const uint8_t* ptr = nullptr;
+
+ // Size of the frame in bytes.
+ off_t size = 0;
+ };
+
+ std::deque<FrameInfo> ParseSuperframe();
+
+ // Returns true and populates |result| with the parsing result if parsing of
+ // current frame is finished (possibly unsuccessfully). |fhdr| will only be
+ // populated and valid if |result| is kOk. Otherwise return false, indicating
+ // that the compressed header must be parsed next.
+ bool ParseUncompressedHeader(const FrameInfo& frame_info,
+ Vp9FrameHeader* fhdr,
+ Result* result);
+
+ // Returns true if parsing of current frame is finished and |result| will be
+ // populated with value of parsing result. Otherwise, needs to continue setup
+ // current frame.
+ bool ParseCompressedHeader(const FrameInfo& frame_info, Result* result);
+
+ size_t GetQIndex(const Vp9QuantizationParams& quant, size_t segid) const;
+ // Returns true if the setup succeeded.
+ bool SetupSegmentationDequant();
+ void SetupLoopFilter();
+ void UpdateSlots();
+
+ // Current address in the bitstream buffer.
+ const uint8_t* stream_;
+
+ // Remaining bytes in stream_.
+ off_t bytes_left_;
+
+ const bool parsing_compressed_header_;
+
+ // FrameInfo for the remaining frames in the current superframe to be parsed.
+ std::deque<FrameInfo> frames_;
+
+ Context context_;
+
+ FrameInfo curr_frame_info_;
+ Vp9FrameHeader curr_frame_header_;
+
+ DISALLOW_COPY_AND_ASSIGN(Vp9Parser);
+};
+
+} // namespace media
+
+#endif // VP9_PARSER_H_
diff --git a/accel/vp9_picture.cc b/accel/vp9_picture.cc
new file mode 100644
index 0000000..df2c3b0
--- /dev/null
+++ b/accel/vp9_picture.cc
@@ -0,0 +1,18 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 6e70beb
+
+#include "vp9_picture.h"
+
+namespace media {
+
+VP9Picture::VP9Picture() {}
+
+VP9Picture::~VP9Picture() {}
+
+V4L2VP9Picture* VP9Picture::AsV4L2VP9Picture() {
+ return nullptr;
+}
+
+} // namespace media
diff --git a/accel/vp9_picture.h b/accel/vp9_picture.h
new file mode 100644
index 0000000..efff37b
--- /dev/null
+++ b/accel/vp9_picture.h
@@ -0,0 +1,42 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 70340ce
+
+#ifndef VP9_PICTURE_H_
+#define VP9_PICTURE_H_
+
+#include <memory>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "rect.h"
+#include "vp9_parser.h"
+
+namespace media {
+
+class V4L2VP9Picture;
+
+class VP9Picture : public base::RefCountedThreadSafe<VP9Picture> {
+ public:
+ VP9Picture();
+
+ virtual V4L2VP9Picture* AsV4L2VP9Picture();
+
+ std::unique_ptr<Vp9FrameHeader> frame_hdr;
+
+ // The visible size of picture. This could be either parsed from frame
+ // header, or set to Rect(0, 0) for indicating invalid values or
+ // not available.
+ Rect visible_rect;
+
+ protected:
+ friend class base::RefCountedThreadSafe<VP9Picture>;
+ virtual ~VP9Picture();
+
+ DISALLOW_COPY_AND_ASSIGN(VP9Picture);
+};
+
+} // namespace media
+
+#endif // VP9_PICTURE_H_
diff --git a/accel/vp9_raw_bits_reader.cc b/accel/vp9_raw_bits_reader.cc
new file mode 100644
index 0000000..dea06e0
--- /dev/null
+++ b/accel/vp9_raw_bits_reader.cc
@@ -0,0 +1,62 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
+
+#include "vp9_raw_bits_reader.h"
+
+#include <limits.h>
+
+#include "base/logging.h"
+#include "bit_reader.h"
+
+namespace media {
+
+Vp9RawBitsReader::Vp9RawBitsReader() : valid_(true) {}
+
+Vp9RawBitsReader::~Vp9RawBitsReader() = default;
+
+void Vp9RawBitsReader::Initialize(const uint8_t* data, size_t size) {
+ DCHECK(data);
+ reader_.reset(new BitReader(data, size));
+ valid_ = true;
+}
+
+bool Vp9RawBitsReader::ReadBool() {
+ DCHECK(reader_);
+ if (!valid_)
+ return false;
+
+ int value = 0;
+ valid_ = reader_->ReadBits(1, &value);
+ return valid_ ? value == 1 : false;
+}
+
+int Vp9RawBitsReader::ReadLiteral(int bits) {
+ DCHECK(reader_);
+ if (!valid_)
+ return 0;
+
+ int value = 0;
+ DCHECK_LT(static_cast<size_t>(bits), sizeof(value) * 8);
+ valid_ = reader_->ReadBits(bits, &value);
+ return valid_ ? value : 0;
+}
+
+int Vp9RawBitsReader::ReadSignedLiteral(int bits) {
+ int value = ReadLiteral(bits);
+ return ReadBool() ? -value : value;
+}
+
+size_t Vp9RawBitsReader::GetBytesRead() const {
+ DCHECK(reader_);
+ return (reader_->bits_read() + 7) / 8;
+}
+
+bool Vp9RawBitsReader::ConsumeTrailingBits() {
+ DCHECK(reader_);
+ int bits_left = GetBytesRead() * 8 - reader_->bits_read();
+ return ReadLiteral(bits_left) == 0;
+}
+
+} // namespace media
diff --git a/accel/vp9_raw_bits_reader.h b/accel/vp9_raw_bits_reader.h
new file mode 100644
index 0000000..04ad413
--- /dev/null
+++ b/accel/vp9_raw_bits_reader.h
@@ -0,0 +1,67 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: e5a9a62
+
+#ifndef VP9_RAW_BITS_READER_H_
+#define VP9_RAW_BITS_READER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/macros.h"
+
+namespace media {
+
+class BitReader;
+
+// A class to read raw bits stream. See VP9 spec, "RAW-BITS DECODING" section
+// for detail.
+class Vp9RawBitsReader {
+ public:
+ Vp9RawBitsReader();
+ ~Vp9RawBitsReader();
+
+ // |data| is the input buffer with |size| bytes.
+ void Initialize(const uint8_t* data, size_t size);
+
+ // Returns true if none of the reads since the last Initialize() call has
+ // gone beyond the end of available data.
+ bool IsValid() const { return valid_; }
+
+ // Returns how many bytes were read since the last Initialize() call.
+ // Partial bytes will be counted as one byte. For example, it will return 1
+ // if 3 bits were read.
+ size_t GetBytesRead() const;
+
+ // Reads one bit.
+ // If the read goes beyond the end of buffer, the return value is undefined.
+ bool ReadBool();
+
+ // Reads a literal with |bits| bits.
+ // If the read goes beyond the end of buffer, the return value is undefined.
+ int ReadLiteral(int bits);
+
+ // Reads a signed literal with |bits| bits (not including the sign bit).
+ // If the read goes beyond the end of buffer, the return value is undefined.
+ int ReadSignedLiteral(int bits);
+
+ // Consumes trailing bits up to next byte boundary. Returns true if no
+ // trailing bits or they are all zero.
+ bool ConsumeTrailingBits();
+
+ private:
+ std::unique_ptr<BitReader> reader_;
+
+ // Indicates if none of the reads since the last Initialize() call has gone
+ // beyond the end of available data.
+ bool valid_;
+
+ DISALLOW_COPY_AND_ASSIGN(Vp9RawBitsReader);
+};
+
+} // namespace media
+
+#endif // VP9_RAW_BITS_READER_H_
diff --git a/accel/vp9_uncompressed_header_parser.cc b/accel/vp9_uncompressed_header_parser.cc
new file mode 100644
index 0000000..f6dc2eb
--- /dev/null
+++ b/accel/vp9_uncompressed_header_parser.cc
@@ -0,0 +1,1103 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: f06caa0
+
+#include "vp9_uncompressed_header_parser.h"
+
+#include "base/logging.h"
+
+namespace media {
+
+namespace {
+
+// 10.5 Default probability tables
+Vp9FrameContext kVp9DefaultFrameContext = {
+ // tx_probs_8x8
+ {{100}, {66}},
+ // tx_probs_16x16
+ {{20, 152}, {15, 101}},
+ // tx_probs_32x32
+ {{3, 136, 37}, {5, 52, 13}},
+ // coef_probs
+ {// 4x4
+ {{{{{195, 29, 183}, {84, 49, 136}, {8, 42, 71}},
+ {{31, 107, 169},
+ {35, 99, 159},
+ {17, 82, 140},
+ {8, 66, 114},
+ {2, 44, 76},
+ {1, 19, 32}},
+ {{40, 132, 201},
+ {29, 114, 187},
+ {13, 91, 157},
+ {7, 75, 127},
+ {3, 58, 95},
+ {1, 28, 47}},
+ {{69, 142, 221},
+ {42, 122, 201},
+ {15, 91, 159},
+ {6, 67, 121},
+ {1, 42, 77},
+ {1, 17, 31}},
+ {{102, 148, 228},
+ {67, 117, 204},
+ {17, 82, 154},
+ {6, 59, 114},
+ {2, 39, 75},
+ {1, 15, 29}},
+ {{156, 57, 233},
+ {119, 57, 212},
+ {58, 48, 163},
+ {29, 40, 124},
+ {12, 30, 81},
+ {3, 12, 31}}},
+ {{{191, 107, 226}, {124, 117, 204}, {25, 99, 155}},
+ {{29, 148, 210},
+ {37, 126, 194},
+ {8, 93, 157},
+ {2, 68, 118},
+ {1, 39, 69},
+ {1, 17, 33}},
+ {{41, 151, 213},
+ {27, 123, 193},
+ {3, 82, 144},
+ {1, 58, 105},
+ {1, 32, 60},
+ {1, 13, 26}},
+ {{59, 159, 220},
+ {23, 126, 198},
+ {4, 88, 151},
+ {1, 66, 114},
+ {1, 38, 71},
+ {1, 18, 34}},
+ {{114, 136, 232},
+ {51, 114, 207},
+ {11, 83, 155},
+ {3, 56, 105},
+ {1, 33, 65},
+ {1, 17, 34}},
+ {{149, 65, 234},
+ {121, 57, 215},
+ {61, 49, 166},
+ {28, 36, 114},
+ {12, 25, 76},
+ {3, 16, 42}}}},
+ {{{{214, 49, 220}, {132, 63, 188}, {42, 65, 137}},
+ {{85, 137, 221},
+ {104, 131, 216},
+ {49, 111, 192},
+ {21, 87, 155},
+ {2, 49, 87},
+ {1, 16, 28}},
+ {{89, 163, 230},
+ {90, 137, 220},
+ {29, 100, 183},
+ {10, 70, 135},
+ {2, 42, 81},
+ {1, 17, 33}},
+ {{108, 167, 237},
+ {55, 133, 222},
+ {15, 97, 179},
+ {4, 72, 135},
+ {1, 45, 85},
+ {1, 19, 38}},
+ {{124, 146, 240},
+ {66, 124, 224},
+ {17, 88, 175},
+ {4, 58, 122},
+ {1, 36, 75},
+ {1, 18, 37}},
+ {{141, 79, 241},
+ {126, 70, 227},
+ {66, 58, 182},
+ {30, 44, 136},
+ {12, 34, 96},
+ {2, 20, 47}}},
+ {{{229, 99, 249}, {143, 111, 235}, {46, 109, 192}},
+ {{82, 158, 236},
+ {94, 146, 224},
+ {25, 117, 191},
+ {9, 87, 149},
+ {3, 56, 99},
+ {1, 33, 57}},
+ {{83, 167, 237},
+ {68, 145, 222},
+ {10, 103, 177},
+ {2, 72, 131},
+ {1, 41, 79},
+ {1, 20, 39}},
+ {{99, 167, 239},
+ {47, 141, 224},
+ {10, 104, 178},
+ {2, 73, 133},
+ {1, 44, 85},
+ {1, 22, 47}},
+ {{127, 145, 243},
+ {71, 129, 228},
+ {17, 93, 177},
+ {3, 61, 124},
+ {1, 41, 84},
+ {1, 21, 52}},
+ {{157, 78, 244},
+ {140, 72, 231},
+ {69, 58, 184},
+ {31, 44, 137},
+ {14, 38, 105},
+ {8, 23, 61}}}}},
+ // 8x8
+ {{{{{125, 34, 187}, {52, 41, 133}, {6, 31, 56}},
+ {{37, 109, 153},
+ {51, 102, 147},
+ {23, 87, 128},
+ {8, 67, 101},
+ {1, 41, 63},
+ {1, 19, 29}},
+ {{31, 154, 185},
+ {17, 127, 175},
+ {6, 96, 145},
+ {2, 73, 114},
+ {1, 51, 82},
+ {1, 28, 45}},
+ {{23, 163, 200},
+ {10, 131, 185},
+ {2, 93, 148},
+ {1, 67, 111},
+ {1, 41, 69},
+ {1, 14, 24}},
+ {{29, 176, 217},
+ {12, 145, 201},
+ {3, 101, 156},
+ {1, 69, 111},
+ {1, 39, 63},
+ {1, 14, 23}},
+ {{57, 192, 233},
+ {25, 154, 215},
+ {6, 109, 167},
+ {3, 78, 118},
+ {1, 48, 69},
+ {1, 21, 29}}},
+ {{{202, 105, 245}, {108, 106, 216}, {18, 90, 144}},
+ {{33, 172, 219},
+ {64, 149, 206},
+ {14, 117, 177},
+ {5, 90, 141},
+ {2, 61, 95},
+ {1, 37, 57}},
+ {{33, 179, 220},
+ {11, 140, 198},
+ {1, 89, 148},
+ {1, 60, 104},
+ {1, 33, 57},
+ {1, 12, 21}},
+ {{30, 181, 221},
+ {8, 141, 198},
+ {1, 87, 145},
+ {1, 58, 100},
+ {1, 31, 55},
+ {1, 12, 20}},
+ {{32, 186, 224},
+ {7, 142, 198},
+ {1, 86, 143},
+ {1, 58, 100},
+ {1, 31, 55},
+ {1, 12, 22}},
+ {{57, 192, 227},
+ {20, 143, 204},
+ {3, 96, 154},
+ {1, 68, 112},
+ {1, 42, 69},
+ {1, 19, 32}}}},
+ {{{{212, 35, 215}, {113, 47, 169}, {29, 48, 105}},
+ {{74, 129, 203},
+ {106, 120, 203},
+ {49, 107, 178},
+ {19, 84, 144},
+ {4, 50, 84},
+ {1, 15, 25}},
+ {{71, 172, 217},
+ {44, 141, 209},
+ {15, 102, 173},
+ {6, 76, 133},
+ {2, 51, 89},
+ {1, 24, 42}},
+ {{64, 185, 231},
+ {31, 148, 216},
+ {8, 103, 175},
+ {3, 74, 131},
+ {1, 46, 81},
+ {1, 18, 30}},
+ {{65, 196, 235},
+ {25, 157, 221},
+ {5, 105, 174},
+ {1, 67, 120},
+ {1, 38, 69},
+ {1, 15, 30}},
+ {{65, 204, 238},
+ {30, 156, 224},
+ {7, 107, 177},
+ {2, 70, 124},
+ {1, 42, 73},
+ {1, 18, 34}}},
+ {{{225, 86, 251}, {144, 104, 235}, {42, 99, 181}},
+ {{85, 175, 239},
+ {112, 165, 229},
+ {29, 136, 200},
+ {12, 103, 162},
+ {6, 77, 123},
+ {2, 53, 84}},
+ {{75, 183, 239},
+ {30, 155, 221},
+ {3, 106, 171},
+ {1, 74, 128},
+ {1, 44, 76},
+ {1, 17, 28}},
+ {{73, 185, 240},
+ {27, 159, 222},
+ {2, 107, 172},
+ {1, 75, 127},
+ {1, 42, 73},
+ {1, 17, 29}},
+ {{62, 190, 238},
+ {21, 159, 222},
+ {2, 107, 172},
+ {1, 72, 122},
+ {1, 40, 71},
+ {1, 18, 32}},
+ {{61, 199, 240},
+ {27, 161, 226},
+ {4, 113, 180},
+ {1, 76, 129},
+ {1, 46, 80},
+ {1, 23, 41}}}}},
+ // 16x16
+ {{{{{7, 27, 153}, {5, 30, 95}, {1, 16, 30}},
+ {{50, 75, 127},
+ {57, 75, 124},
+ {27, 67, 108},
+ {10, 54, 86},
+ {1, 33, 52},
+ {1, 12, 18}},
+ {{43, 125, 151},
+ {26, 108, 148},
+ {7, 83, 122},
+ {2, 59, 89},
+ {1, 38, 60},
+ {1, 17, 27}},
+ {{23, 144, 163},
+ {13, 112, 154},
+ {2, 75, 117},
+ {1, 50, 81},
+ {1, 31, 51},
+ {1, 14, 23}},
+ {{18, 162, 185},
+ {6, 123, 171},
+ {1, 78, 125},
+ {1, 51, 86},
+ {1, 31, 54},
+ {1, 14, 23}},
+ {{15, 199, 227},
+ {3, 150, 204},
+ {1, 91, 146},
+ {1, 55, 95},
+ {1, 30, 53},
+ {1, 11, 20}}},
+ {{{19, 55, 240}, {19, 59, 196}, {3, 52, 105}},
+ {{41, 166, 207},
+ {104, 153, 199},
+ {31, 123, 181},
+ {14, 101, 152},
+ {5, 72, 106},
+ {1, 36, 52}},
+ {{35, 176, 211},
+ {12, 131, 190},
+ {2, 88, 144},
+ {1, 60, 101},
+ {1, 36, 60},
+ {1, 16, 28}},
+ {{28, 183, 213},
+ {8, 134, 191},
+ {1, 86, 142},
+ {1, 56, 96},
+ {1, 30, 53},
+ {1, 12, 20}},
+ {{20, 190, 215},
+ {4, 135, 192},
+ {1, 84, 139},
+ {1, 53, 91},
+ {1, 28, 49},
+ {1, 11, 20}},
+ {{13, 196, 216},
+ {2, 137, 192},
+ {1, 86, 143},
+ {1, 57, 99},
+ {1, 32, 56},
+ {1, 13, 24}}}},
+ {{{{211, 29, 217}, {96, 47, 156}, {22, 43, 87}},
+ {{78, 120, 193},
+ {111, 116, 186},
+ {46, 102, 164},
+ {15, 80, 128},
+ {2, 49, 76},
+ {1, 18, 28}},
+ {{71, 161, 203},
+ {42, 132, 192},
+ {10, 98, 150},
+ {3, 69, 109},
+ {1, 44, 70},
+ {1, 18, 29}},
+ {{57, 186, 211},
+ {30, 140, 196},
+ {4, 93, 146},
+ {1, 62, 102},
+ {1, 38, 65},
+ {1, 16, 27}},
+ {{47, 199, 217},
+ {14, 145, 196},
+ {1, 88, 142},
+ {1, 57, 98},
+ {1, 36, 62},
+ {1, 15, 26}},
+ {{26, 219, 229},
+ {5, 155, 207},
+ {1, 94, 151},
+ {1, 60, 104},
+ {1, 36, 62},
+ {1, 16, 28}}},
+ {{{233, 29, 248}, {146, 47, 220}, {43, 52, 140}},
+ {{100, 163, 232},
+ {179, 161, 222},
+ {63, 142, 204},
+ {37, 113, 174},
+ {26, 89, 137},
+ {18, 68, 97}},
+ {{85, 181, 230},
+ {32, 146, 209},
+ {7, 100, 164},
+ {3, 71, 121},
+ {1, 45, 77},
+ {1, 18, 30}},
+ {{65, 187, 230},
+ {20, 148, 207},
+ {2, 97, 159},
+ {1, 68, 116},
+ {1, 40, 70},
+ {1, 14, 29}},
+ {{40, 194, 227},
+ {8, 147, 204},
+ {1, 94, 155},
+ {1, 65, 112},
+ {1, 39, 66},
+ {1, 14, 26}},
+ {{16, 208, 228},
+ {3, 151, 207},
+ {1, 98, 160},
+ {1, 67, 117},
+ {1, 41, 74},
+ {1, 17, 31}}}}},
+ // 32x32
+ {{{{{17, 38, 140}, {7, 34, 80}, {1, 17, 29}},
+ {{37, 75, 128},
+ {41, 76, 128},
+ {26, 66, 116},
+ {12, 52, 94},
+ {2, 32, 55},
+ {1, 10, 16}},
+ {{50, 127, 154},
+ {37, 109, 152},
+ {16, 82, 121},
+ {5, 59, 85},
+ {1, 35, 54},
+ {1, 13, 20}},
+ {{40, 142, 167},
+ {17, 110, 157},
+ {2, 71, 112},
+ {1, 44, 72},
+ {1, 27, 45},
+ {1, 11, 17}},
+ {{30, 175, 188},
+ {9, 124, 169},
+ {1, 74, 116},
+ {1, 48, 78},
+ {1, 30, 49},
+ {1, 11, 18}},
+ {{10, 222, 223},
+ {2, 150, 194},
+ {1, 83, 128},
+ {1, 48, 79},
+ {1, 27, 45},
+ {1, 11, 17}}},
+ {{{36, 41, 235}, {29, 36, 193}, {10, 27, 111}},
+ {{85, 165, 222},
+ {177, 162, 215},
+ {110, 135, 195},
+ {57, 113, 168},
+ {23, 83, 120},
+ {10, 49, 61}},
+ {{85, 190, 223},
+ {36, 139, 200},
+ {5, 90, 146},
+ {1, 60, 103},
+ {1, 38, 65},
+ {1, 18, 30}},
+ {{72, 202, 223},
+ {23, 141, 199},
+ {2, 86, 140},
+ {1, 56, 97},
+ {1, 36, 61},
+ {1, 16, 27}},
+ {{55, 218, 225},
+ {13, 145, 200},
+ {1, 86, 141},
+ {1, 57, 99},
+ {1, 35, 61},
+ {1, 13, 22}},
+ {{15, 235, 212},
+ {1, 132, 184},
+ {1, 84, 139},
+ {1, 57, 97},
+ {1, 34, 56},
+ {1, 14, 23}}}},
+ {{{{181, 21, 201}, {61, 37, 123}, {10, 38, 71}},
+ {{47, 106, 172},
+ {95, 104, 173},
+ {42, 93, 159},
+ {18, 77, 131},
+ {4, 50, 81},
+ {1, 17, 23}},
+ {{62, 147, 199},
+ {44, 130, 189},
+ {28, 102, 154},
+ {18, 75, 115},
+ {2, 44, 65},
+ {1, 12, 19}},
+ {{55, 153, 210},
+ {24, 130, 194},
+ {3, 93, 146},
+ {1, 61, 97},
+ {1, 31, 50},
+ {1, 10, 16}},
+ {{49, 186, 223},
+ {17, 148, 204},
+ {1, 96, 142},
+ {1, 53, 83},
+ {1, 26, 44},
+ {1, 11, 17}},
+ {{13, 217, 212},
+ {2, 136, 180},
+ {1, 78, 124},
+ {1, 50, 83},
+ {1, 29, 49},
+ {1, 14, 23}}},
+ {{{197, 13, 247}, {82, 17, 222}, {25, 17, 162}},
+ {{126, 186, 247},
+ {234, 191, 243},
+ {176, 177, 234},
+ {104, 158, 220},
+ {66, 128, 186},
+ {55, 90, 137}},
+ {{111, 197, 242},
+ {46, 158, 219},
+ {9, 104, 171},
+ {2, 65, 125},
+ {1, 44, 80},
+ {1, 17, 91}},
+ {{104, 208, 245},
+ {39, 168, 224},
+ {3, 109, 162},
+ {1, 79, 124},
+ {1, 50, 102},
+ {1, 43, 102}},
+ {{84, 220, 246},
+ {31, 177, 231},
+ {2, 115, 180},
+ {1, 79, 134},
+ {1, 55, 77},
+ {1, 60, 79}},
+ {{43, 243, 240},
+ {8, 180, 217},
+ {1, 115, 166},
+ {1, 84, 121},
+ {1, 51, 67},
+ {1, 16, 6}}}}}},
+ // skip_prob
+ {192, 128, 64},
+ // inter_mode_probs
+ {{2, 173, 34},
+ {7, 145, 85},
+ {7, 166, 63},
+ {7, 94, 66},
+ {8, 64, 46},
+ {17, 81, 31},
+ {25, 29, 30}},
+ // interp_filter_probs
+ {{235, 162}, {36, 255}, {34, 3}, {149, 144}},
+ // is_inter_prob
+ {9, 102, 187, 225},
+ // comp_mode_prob
+ {239, 183, 119, 96, 41},
+ // single_ref_prob
+ {{33, 16}, {77, 74}, {142, 142}, {172, 170}, {238, 247}},
+ // comp_ref_prob
+ {50, 126, 123, 221, 226},
+ // y_mode_probs
+ {{65, 32, 18, 144, 162, 194, 41, 51, 98},
+ {132, 68, 18, 165, 217, 196, 45, 40, 78},
+ {173, 80, 19, 176, 240, 193, 64, 35, 46},
+ {221, 135, 38, 194, 248, 121, 96, 85, 29}},
+ // uv_mode_probs
+ {{120, 7, 76, 176, 208, 126, 28, 54, 103},
+ {48, 12, 154, 155, 139, 90, 34, 117, 119},
+ {67, 6, 25, 204, 243, 158, 13, 21, 96},
+ {97, 5, 44, 131, 176, 139, 48, 68, 97},
+ {83, 5, 42, 156, 111, 152, 26, 49, 152},
+ {80, 5, 58, 178, 74, 83, 33, 62, 145},
+ {86, 5, 32, 154, 192, 168, 14, 22, 163},
+ {85, 5, 32, 156, 216, 148, 19, 29, 73},
+ {77, 7, 64, 116, 132, 122, 37, 126, 120},
+ {101, 21, 107, 181, 192, 103, 19, 67, 125}},
+ // partition_probs
+ {{199, 122, 141},
+ {147, 63, 159},
+ {148, 133, 118},
+ {121, 104, 114},
+ {174, 73, 87},
+ {92, 41, 83},
+ {82, 99, 50},
+ {53, 39, 39},
+ {177, 58, 59},
+ {68, 26, 63},
+ {52, 79, 25},
+ {17, 14, 12},
+ {222, 34, 30},
+ {72, 16, 44},
+ {58, 32, 12},
+ {10, 7, 6}},
+ // mv_joint_probs
+ {32, 64, 96},
+ // mv_sign_prob
+ {128, 128},
+ // mv_class_probs
+ {{224, 144, 192, 168, 192, 176, 192, 198, 198, 245},
+ {216, 128, 176, 160, 176, 176, 192, 198, 198, 208}},
+ // mv_class0_bit_prob
+ {216, 208},
+ // mv_bits_prob
+ {{136, 140, 148, 160, 176, 192, 224, 234, 234, 240},
+ {136, 140, 148, 160, 176, 192, 224, 234, 234, 240}},
+ // mv_class0_fr_probs
+ {{{128, 128, 64}, {96, 112, 64}}, {{128, 128, 64}, {96, 112, 64}}},
+ // mv_fr_probs
+ {{64, 96, 64}, {64, 96, 64}},
+ // mv_class0_hp_prob
+ {160, 160},
+ // mv_hp_prob
+ {128, 128},
+};
+
+// Helper function for Vp9Parser::ReadTileInfo. Defined as
+// calc_min_log2_tile_cols in spec 6.2.14 Tile size calculation.
+int GetMinLog2TileCols(int sb64_cols) {
+ const int kMaxTileWidthB64 = 64;
+ int min_log2 = 0;
+ while ((kMaxTileWidthB64 << min_log2) < sb64_cols)
+ min_log2++;
+ return min_log2;
+}
+
+// Helper function for Vp9Parser::ReadTileInfo. Defined as
+// calc_max_log2_tile_cols in spec 6.2.14 Tile size calculation.
+int GetMaxLog2TileCols(int sb64_cols) {
+ const int kMinTileWidthB64 = 4;
+ int max_log2 = 1;
+ while ((sb64_cols >> max_log2) >= kMinTileWidthB64)
+ max_log2++;
+ return max_log2 - 1;
+}
+
+} // namespace
+
+Vp9UncompressedHeaderParser::Vp9UncompressedHeaderParser(
+ Vp9Parser::Context* context)
+ : context_(context) {}
+
+uint8_t Vp9UncompressedHeaderParser::ReadProfile() {
+ uint8_t profile = 0;
+
+ // LSB first.
+ if (reader_.ReadBool())
+ profile |= 1;
+ if (reader_.ReadBool())
+ profile |= 2;
+ if (profile > 2 && reader_.ReadBool())
+ profile += 1;
+ return profile;
+}
+
+// 6.2.1 Frame sync syntax
+bool Vp9UncompressedHeaderParser::VerifySyncCode() {
+ const int kSyncCode = 0x498342;
+ if (reader_.ReadLiteral(8 * 3) != kSyncCode) {
+ DVLOG(1) << "Invalid frame sync code";
+ return false;
+ }
+ return true;
+}
+
+// 6.2.2 Color config syntax
+bool Vp9UncompressedHeaderParser::ReadColorConfig(Vp9FrameHeader* fhdr) {
+ if (fhdr->profile == 2 || fhdr->profile == 3) {
+ fhdr->bit_depth = reader_.ReadBool() ? 12 : 10;
+ } else {
+ fhdr->bit_depth = 8;
+ }
+
+ fhdr->color_space = static_cast<Vp9ColorSpace>(reader_.ReadLiteral(3));
+ if (fhdr->color_space != Vp9ColorSpace::SRGB) {
+ fhdr->color_range = reader_.ReadBool();
+ if (fhdr->profile == 1 || fhdr->profile == 3) {
+ fhdr->subsampling_x = reader_.ReadBool() ? 1 : 0;
+ fhdr->subsampling_y = reader_.ReadBool() ? 1 : 0;
+ if (fhdr->subsampling_x == 1 && fhdr->subsampling_y == 1) {
+ DVLOG(1) << "4:2:0 color not supported in profile 1 or 3";
+ return false;
+ }
+ bool reserved = reader_.ReadBool();
+ if (reserved) {
+ DVLOG(1) << "reserved bit set";
+ return false;
+ }
+ } else {
+ fhdr->subsampling_x = fhdr->subsampling_y = 1;
+ }
+ } else {
+ fhdr->color_range = true;
+ if (fhdr->profile == 1 || fhdr->profile == 3) {
+ fhdr->subsampling_x = fhdr->subsampling_y = 0;
+
+ bool reserved = reader_.ReadBool();
+ if (reserved) {
+ DVLOG(1) << "reserved bit set";
+ return false;
+ }
+ } else {
+ DVLOG(1) << "4:4:4 color not supported in profile 0 or 2";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// 6.2.3 Frame size syntax
+void Vp9UncompressedHeaderParser::ReadFrameSize(Vp9FrameHeader* fhdr) {
+ fhdr->frame_width = reader_.ReadLiteral(16) + 1;
+ fhdr->frame_height = reader_.ReadLiteral(16) + 1;
+}
+
+// 6.2.4 Render size syntax
+void Vp9UncompressedHeaderParser::ReadRenderSize(Vp9FrameHeader* fhdr) {
+ if (reader_.ReadBool()) {
+ fhdr->render_width = reader_.ReadLiteral(16) + 1;
+ fhdr->render_height = reader_.ReadLiteral(16) + 1;
+ } else {
+ fhdr->render_width = fhdr->frame_width;
+ fhdr->render_height = fhdr->frame_height;
+ }
+}
+
+// 6.2.5 Frame size with refs syntax
+bool Vp9UncompressedHeaderParser::ReadFrameSizeFromRefs(Vp9FrameHeader* fhdr) {
+ bool found_ref = false;
+ for (const auto& idx : fhdr->ref_frame_idx) {
+ found_ref = reader_.ReadBool();
+ if (found_ref) {
+ const Vp9Parser::ReferenceSlot& ref = context_->GetRefSlot(idx);
+ DCHECK(ref.initialized);
+ fhdr->frame_width = ref.frame_width;
+ fhdr->frame_height = ref.frame_height;
+
+ const unsigned kMaxDimension = 1u << 16;
+ DCHECK_LE(fhdr->frame_width, kMaxDimension);
+ DCHECK_LE(fhdr->frame_height, kMaxDimension);
+ break;
+ }
+ }
+
+ if (!found_ref)
+ ReadFrameSize(fhdr);
+
+ // 7.2.5 Frame size with refs semantics
+ bool has_valid_ref_frame = false;
+ for (const auto& idx : fhdr->ref_frame_idx) {
+ const Vp9Parser::ReferenceSlot& ref = context_->GetRefSlot(idx);
+ if (2 * fhdr->frame_width >= ref.frame_width &&
+ 2 * fhdr->frame_height >= ref.frame_height &&
+ fhdr->frame_width <= 16 * ref.frame_width &&
+ fhdr->frame_height <= 16 * ref.frame_height) {
+ has_valid_ref_frame = true;
+ break;
+ }
+ }
+ if (!has_valid_ref_frame) {
+ DVLOG(1) << "There should be at least one reference frame meeting "
+ << "size conditions.";
+ return false;
+ }
+
+ ReadRenderSize(fhdr);
+ return true;
+}
+
+// 6.2.7 Interpolation filter syntax
+Vp9InterpolationFilter Vp9UncompressedHeaderParser::ReadInterpolationFilter() {
+ if (reader_.ReadBool())
+ return Vp9InterpolationFilter::SWITCHABLE;
+
+ // The mapping table for next two bits.
+ const Vp9InterpolationFilter table[] = {
+ Vp9InterpolationFilter::EIGHTTAP_SMOOTH, Vp9InterpolationFilter::EIGHTTAP,
+ Vp9InterpolationFilter::EIGHTTAP_SHARP, Vp9InterpolationFilter::BILINEAR,
+ };
+ return table[reader_.ReadLiteral(2)];
+}
+
+void Vp9UncompressedHeaderParser::SetupPastIndependence(Vp9FrameHeader* fhdr) {
+ memset(&context_->segmentation_, 0, sizeof(context_->segmentation_));
+ ResetLoopfilter();
+ fhdr->frame_context = kVp9DefaultFrameContext;
+ DCHECK(fhdr->frame_context.IsValid());
+}
+
+// 6.2.8 Loop filter params syntax
+void Vp9UncompressedHeaderParser::ReadLoopFilterParams() {
+ Vp9LoopFilterParams& loop_filter = context_->loop_filter_;
+
+ loop_filter.level = reader_.ReadLiteral(6);
+ loop_filter.sharpness = reader_.ReadLiteral(3);
+ loop_filter.delta_update = false;
+
+ loop_filter.delta_enabled = reader_.ReadBool();
+ if (loop_filter.delta_enabled) {
+ loop_filter.delta_update = reader_.ReadBool();
+ if (loop_filter.delta_update) {
+ for (size_t i = 0; i < Vp9RefType::VP9_FRAME_MAX; i++) {
+ loop_filter.update_ref_deltas[i] = reader_.ReadBool();
+ if (loop_filter.update_ref_deltas[i])
+ loop_filter.ref_deltas[i] = reader_.ReadSignedLiteral(6);
+ }
+
+ for (size_t i = 0; i < Vp9LoopFilterParams::kNumModeDeltas; i++) {
+ loop_filter.update_mode_deltas[i] = reader_.ReadBool();
+ if (loop_filter.update_mode_deltas[i])
+ loop_filter.mode_deltas[i] = reader_.ReadSignedLiteral(6);
+ }
+ }
+ }
+}
+
+// 6.2.9 Quantization params syntax
+void Vp9UncompressedHeaderParser::ReadQuantizationParams(
+ Vp9QuantizationParams* quants) {
+ quants->base_q_idx = reader_.ReadLiteral(8);
+
+ quants->delta_q_y_dc = ReadDeltaQ();
+ quants->delta_q_uv_dc = ReadDeltaQ();
+ quants->delta_q_uv_ac = ReadDeltaQ();
+}
+
+// 6.2.10 Delta quantizer syntax
+int8_t Vp9UncompressedHeaderParser::ReadDeltaQ() {
+ if (reader_.ReadBool())
+ return reader_.ReadSignedLiteral(4);
+ return 0;
+}
+
+// 6.2.11 Segmentation params syntax
+bool Vp9UncompressedHeaderParser::ReadSegmentationParams() {
+ Vp9SegmentationParams& segmentation = context_->segmentation_;
+ segmentation.update_map = false;
+ segmentation.update_data = false;
+
+ segmentation.enabled = reader_.ReadBool();
+ if (!segmentation.enabled)
+ return true;
+
+ segmentation.update_map = reader_.ReadBool();
+ if (segmentation.update_map) {
+ for (auto& tree_prob : segmentation.tree_probs) {
+ tree_prob = ReadProb();
+ }
+
+ segmentation.temporal_update = reader_.ReadBool();
+ for (auto& pred_prob : segmentation.pred_probs) {
+ pred_prob = segmentation.temporal_update ? ReadProb() : kVp9MaxProb;
+ }
+ }
+
+ segmentation.update_data = reader_.ReadBool();
+ if (segmentation.update_data) {
+ segmentation.abs_or_delta_update = reader_.ReadBool();
+
+ const int kFeatureDataBits[] = {8, 6, 2, 0};
+ const bool kFeatureDataSigned[] = {true, true, false, false};
+
+ for (size_t i = 0; i < Vp9SegmentationParams::kNumSegments; i++) {
+ for (size_t j = 0; j < Vp9SegmentationParams::SEG_LVL_MAX; j++) {
+ int16_t data = 0;
+ segmentation.feature_enabled[i][j] = reader_.ReadBool();
+ if (segmentation.feature_enabled[i][j]) {
+ data = reader_.ReadLiteral(kFeatureDataBits[j]);
+ if (kFeatureDataSigned[j])
+ if (reader_.ReadBool()) {
+ // 7.2.9
+ if (segmentation.abs_or_delta_update) {
+ DVLOG(1) << "feature_sign should be 0"
+ << " if abs_or_delta_update is 1";
+ return false;
+ }
+ data = -data;
+ }
+ }
+ segmentation.feature_data[i][j] = data;
+ }
+ }
+ }
+ return true;
+}
+
+// 6.2.12 Probability syntax
+uint8_t Vp9UncompressedHeaderParser::ReadProb() {
+ return reader_.ReadBool() ? reader_.ReadLiteral(8) : kVp9MaxProb;
+}
+
+// 6.2.13 Tile info syntax
+bool Vp9UncompressedHeaderParser::ReadTileInfo(Vp9FrameHeader* fhdr) {
+ int sb64_cols = (fhdr->frame_width + 63) / 64;
+
+ int min_log2_tile_cols = GetMinLog2TileCols(sb64_cols);
+ int max_log2_tile_cols = GetMaxLog2TileCols(sb64_cols);
+
+ int max_ones = max_log2_tile_cols - min_log2_tile_cols;
+ fhdr->tile_cols_log2 = min_log2_tile_cols;
+ while (max_ones-- && reader_.ReadBool())
+ fhdr->tile_cols_log2++;
+
+ fhdr->tile_rows_log2 = reader_.ReadBool() ? 1 : 0;
+ if (fhdr->tile_rows_log2 > 0 && reader_.ReadBool())
+ fhdr->tile_rows_log2++;
+
+ // 7.2.11 Tile info semantics
+ if (fhdr->tile_cols_log2 > 6) {
+ DVLOG(1) << "tile_cols_log2 should be <= 6";
+ return false;
+ }
+
+ return true;
+}
+
+void Vp9UncompressedHeaderParser::ResetLoopfilter() {
+ Vp9LoopFilterParams& loop_filter = context_->loop_filter_;
+
+ loop_filter.delta_enabled = true;
+ loop_filter.delta_update = true;
+
+ loop_filter.ref_deltas[VP9_FRAME_INTRA] = 1;
+ loop_filter.ref_deltas[VP9_FRAME_LAST] = 0;
+ loop_filter.ref_deltas[VP9_FRAME_GOLDEN] = -1;
+ loop_filter.ref_deltas[VP9_FRAME_ALTREF] = -1;
+
+ memset(loop_filter.mode_deltas, 0, sizeof(loop_filter.mode_deltas));
+}
+
+// 6.2 Uncompressed header syntax
+bool Vp9UncompressedHeaderParser::Parse(const uint8_t* stream,
+ off_t frame_size,
+ Vp9FrameHeader* fhdr) {
+ DVLOG(2) << "Vp9UncompressedHeaderParser::Parse";
+ reader_.Initialize(stream, frame_size);
+
+ fhdr->data = stream;
+ fhdr->frame_size = frame_size;
+
+ // frame marker
+ if (reader_.ReadLiteral(2) != 0x2) {
+ DVLOG(1) << "frame marker shall be equal to 2";
+ return false;
+ }
+
+ fhdr->profile = ReadProfile();
+ if (fhdr->profile >= kVp9MaxProfile) {
+ DVLOG(1) << "Unsupported bitstream profile";
+ return false;
+ }
+
+ fhdr->show_existing_frame = reader_.ReadBool();
+ if (fhdr->show_existing_frame) {
+ fhdr->frame_to_show_map_idx = reader_.ReadLiteral(3);
+ fhdr->show_frame = true;
+
+ if (!reader_.ConsumeTrailingBits()) {
+ DVLOG(1) << "trailing bits are not zero";
+ return false;
+ }
+ if (!reader_.IsValid()) {
+ DVLOG(1) << "parser reads beyond the end of buffer";
+ return false;
+ }
+ fhdr->uncompressed_header_size = reader_.GetBytesRead();
+ fhdr->header_size_in_bytes = 0;
+ return true;
+ }
+
+ fhdr->frame_type = static_cast<Vp9FrameHeader::FrameType>(reader_.ReadBool());
+ fhdr->show_frame = reader_.ReadBool();
+ fhdr->error_resilient_mode = reader_.ReadBool();
+
+ if (fhdr->IsKeyframe()) {
+ if (!VerifySyncCode())
+ return false;
+
+ if (!ReadColorConfig(fhdr))
+ return false;
+
+ ReadFrameSize(fhdr);
+ ReadRenderSize(fhdr);
+ fhdr->refresh_frame_flags = 0xff;
+ } else {
+ if (!fhdr->show_frame)
+ fhdr->intra_only = reader_.ReadBool();
+
+ if (!fhdr->error_resilient_mode)
+ fhdr->reset_frame_context = reader_.ReadLiteral(2);
+
+ if (fhdr->intra_only) {
+ if (!VerifySyncCode())
+ return false;
+
+ if (fhdr->profile > 0) {
+ if (!ReadColorConfig(fhdr))
+ return false;
+ } else {
+ fhdr->bit_depth = 8;
+ fhdr->color_space = Vp9ColorSpace::BT_601;
+ fhdr->subsampling_x = fhdr->subsampling_y = 1;
+ }
+
+ fhdr->refresh_frame_flags = reader_.ReadLiteral(8);
+
+ ReadFrameSize(fhdr);
+ ReadRenderSize(fhdr);
+ } else {
+ fhdr->refresh_frame_flags = reader_.ReadLiteral(8);
+
+ static_assert(arraysize(fhdr->ref_frame_sign_bias) >=
+ Vp9RefType::VP9_FRAME_LAST + kVp9NumRefsPerFrame,
+ "ref_frame_sign_bias is not big enough");
+ for (size_t i = 0; i < kVp9NumRefsPerFrame; i++) {
+ fhdr->ref_frame_idx[i] = reader_.ReadLiteral(kVp9NumRefFramesLog2);
+ fhdr->ref_frame_sign_bias[Vp9RefType::VP9_FRAME_LAST + i] =
+ reader_.ReadBool();
+
+ // 8.2 Frame order constraints
+ // ref_frame_idx[i] refers to an earlier decoded frame.
+ const Vp9Parser::ReferenceSlot& ref =
+ context_->GetRefSlot(fhdr->ref_frame_idx[i]);
+ if (!ref.initialized) {
+ DVLOG(1) << "ref_frame_idx[" << i
+ << "]=" << static_cast<int>(fhdr->ref_frame_idx[i])
+ << " refers to unused frame";
+ return false;
+ }
+
+ // 7.2 Uncompressed header semantics
+ // the selected reference frames match the current frame in bit depth,
+ // profile, chroma subsampling, and color space.
+ if (ref.profile != fhdr->profile) {
+ DVLOG(1) << "profile of referenced frame mismatch";
+ return false;
+ }
+ if (i == 0) {
+ // Below fields are not specified for inter-frame in header, so copy
+ // them from referenced frame.
+ fhdr->bit_depth = ref.bit_depth;
+ fhdr->color_space = ref.color_space;
+ fhdr->subsampling_x = ref.subsampling_x;
+ fhdr->subsampling_y = ref.subsampling_y;
+ } else {
+ if (fhdr->bit_depth != ref.bit_depth) {
+ DVLOG(1) << "bit_depth of referenced frame mismatch";
+ return false;
+ }
+ if (fhdr->color_space != ref.color_space) {
+ DVLOG(1) << "color_space of referenced frame mismatch";
+ return false;
+ }
+ if (fhdr->subsampling_x != ref.subsampling_x ||
+ fhdr->subsampling_y != ref.subsampling_y) {
+ DVLOG(1) << "chroma subsampling of referenced frame mismatch";
+ return false;
+ }
+ }
+ }
+
+ if (!ReadFrameSizeFromRefs(fhdr))
+ return false;
+
+ fhdr->allow_high_precision_mv = reader_.ReadBool();
+ fhdr->interpolation_filter = ReadInterpolationFilter();
+ }
+ }
+
+ if (fhdr->error_resilient_mode) {
+ fhdr->refresh_frame_context = false;
+ fhdr->frame_parallel_decoding_mode = true;
+ } else {
+ fhdr->refresh_frame_context = reader_.ReadBool();
+ fhdr->frame_parallel_decoding_mode = reader_.ReadBool();
+ }
+
+ fhdr->frame_context_idx_to_save_probs = fhdr->frame_context_idx =
+ reader_.ReadLiteral(kVp9NumFrameContextsLog2);
+
+ if (fhdr->IsIntra()) {
+ SetupPastIndependence(fhdr);
+ if (fhdr->IsKeyframe() || fhdr->error_resilient_mode ||
+ fhdr->reset_frame_context == 3) {
+ for (size_t i = 0; i < kVp9NumFrameContexts; ++i)
+ context_->UpdateFrameContext(i, fhdr->frame_context);
+ } else if (fhdr->reset_frame_context == 2) {
+ context_->UpdateFrameContext(fhdr->frame_context_idx,
+ fhdr->frame_context);
+ }
+ fhdr->frame_context_idx = 0;
+ }
+
+ ReadLoopFilterParams();
+ ReadQuantizationParams(&fhdr->quant_params);
+ if (!ReadSegmentationParams())
+ return false;
+
+ if (!ReadTileInfo(fhdr))
+ return false;
+
+ fhdr->header_size_in_bytes = reader_.ReadLiteral(16);
+ if (fhdr->header_size_in_bytes == 0) {
+ DVLOG(1) << "invalid header size";
+ return false;
+ }
+
+ if (!reader_.ConsumeTrailingBits()) {
+ DVLOG(1) << "trailing bits are not zero";
+ return false;
+ }
+ if (!reader_.IsValid()) {
+ DVLOG(1) << "parser reads beyond the end of buffer";
+ return false;
+ }
+ fhdr->uncompressed_header_size = reader_.GetBytesRead();
+
+ return true;
+}
+
+} // namespace media
diff --git a/accel/vp9_uncompressed_header_parser.h b/accel/vp9_uncompressed_header_parser.h
new file mode 100644
index 0000000..6780d38
--- /dev/null
+++ b/accel/vp9_uncompressed_header_parser.h
@@ -0,0 +1,49 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: e5a9a62
+
+#ifndef VP9_UNCOMPRESSED_HEADER_PARSER_H_
+#define VP9_UNCOMPRESSED_HEADER_PARSER_H_
+
+#include "vp9_parser.h"
+#include "vp9_raw_bits_reader.h"
+
+namespace media {
+
+class Vp9UncompressedHeaderParser {
+ public:
+ Vp9UncompressedHeaderParser(Vp9Parser::Context* context);
+
+ // Parses VP9 uncompressed header in |stream| with |frame_size| into |fhdr|.
+ // Returns true if no error.
+ bool Parse(const uint8_t* stream, off_t frame_size, Vp9FrameHeader* fhdr);
+
+ private:
+ uint8_t ReadProfile();
+ bool VerifySyncCode();
+ bool ReadColorConfig(Vp9FrameHeader* fhdr);
+ void ReadFrameSize(Vp9FrameHeader* fhdr);
+ bool ReadFrameSizeFromRefs(Vp9FrameHeader* fhdr);
+ void ReadRenderSize(Vp9FrameHeader* fhdr);
+ Vp9InterpolationFilter ReadInterpolationFilter();
+ void ResetLoopfilter();
+ void SetupPastIndependence(Vp9FrameHeader* fhdr);
+ void ReadLoopFilterParams();
+ void ReadQuantizationParams(Vp9QuantizationParams* quants);
+ int8_t ReadDeltaQ();
+ uint8_t ReadProb();
+ bool ReadSegmentationParams();
+ bool ReadTileInfo(Vp9FrameHeader* fhdr);
+
+ // Raw bits reader for uncompressed frame header.
+ Vp9RawBitsReader reader_;
+
+ Vp9Parser::Context* context_;
+
+ DISALLOW_COPY_AND_ASSIGN(Vp9UncompressedHeaderParser);
+};
+
+} // namespace media
+
+#endif // VP9_UNCOMPRESSED_HEADER_PARSER_H_
diff --git a/common/Android.bp b/common/Android.bp
index e9c5fe3..45334d0 100644
--- a/common/Android.bp
+++ b/common/Android.bp
@@ -1,12 +1,3 @@
-package {
- // See: http://go/android-license-faq
- // A large-scale-change added 'default_applicable_licenses' to import
- // all of the 'license_kinds' from "external_v4l2_codec2_license"
- // to get the below license kinds:
- // SPDX-license-identifier-BSD
- default_applicable_licenses: ["external_v4l2_codec2_license"],
-}
-
cc_library {
name: "libv4l2_codec2_common",
vendor: true,
@@ -16,16 +7,10 @@ cc_library {
],
srcs: [
- "Common.cpp",
"EncodeHelpers.cpp",
"FormatConverter.cpp",
- "Fourcc.cpp",
- "NalParser.cpp",
"V4L2ComponentCommon.cpp",
"VideoTypes.cpp",
- "V4L2Device.cpp",
- "V4L2DevicePoller.cpp",
- "VideoPixelFormat.cpp",
],
export_include_dirs: [
@@ -36,9 +21,9 @@ cc_library {
"libchrome",
"libcutils",
"liblog",
- "libstagefright_foundation",
"libui",
"libutils",
+ "libv4l2_codec2_accel"
],
static_libs: [
diff --git a/common/Common.cpp b/common/Common.cpp
deleted file mode 100644
index 79243ec..0000000
--- a/common/Common.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2021 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <v4l2_codec2/common/Common.h>
-
-#include <base/numerics/safe_math.h>
-
-namespace android {
-
-bool contains(const Rect& rect1, const Rect& rect2) {
- return (rect2.left >= rect1.left && rect2.right <= rect1.right && rect2.top >= rect1.top &&
- rect2.bottom <= rect1.bottom);
-}
-
-std::string toString(const Rect& rect) {
- return std::string("(") + std::to_string(rect.left) + "," + std::to_string(rect.top) + ") " +
- std::to_string(rect.width()) + "x" + std::to_string(rect.height());
-}
-
-std::optional<int> getArea(const ui::Size& size) {
- base::CheckedNumeric<int> checked_area = size.width;
- checked_area *= size.height;
- return checked_area.IsValid() ? std::optional<int>(checked_area.ValueOrDie()) : std::nullopt;
-}
-
-bool isEmpty(const ui::Size& size) {
- return !size.width || !size.height;
-}
-
-std::string toString(const ui::Size& size) {
- return std::to_string(size.width) + "x" + std::to_string(size.height);
-}
-
-} // namespace android
diff --git a/common/EncodeHelpers.cpp b/common/EncodeHelpers.cpp
index 4575197..757d064 100644
--- a/common/EncodeHelpers.cpp
+++ b/common/EncodeHelpers.cpp
@@ -7,51 +7,81 @@
#include <v4l2_codec2/common/EncodeHelpers.h>
-#include <linux/v4l2-controls.h>
+#include <string.h>
#include <C2AllocatorGralloc.h>
#include <cutils/native_handle.h>
#include <ui/GraphicBuffer.h>
#include <utils/Log.h>
-#include <v4l2_codec2/common/NalParser.h>
-
namespace android {
-uint8_t c2LevelToV4L2Level(C2Config::level_t level) {
+media::VideoCodecProfile c2ProfileToVideoCodecProfile(C2Config::profile_t profile) {
+ switch (profile) {
+ case C2Config::PROFILE_AVC_BASELINE:
+ return media::VideoCodecProfile::H264PROFILE_BASELINE;
+ case C2Config::PROFILE_AVC_MAIN:
+ return media::VideoCodecProfile::H264PROFILE_MAIN;
+ case C2Config::PROFILE_AVC_EXTENDED:
+ return media::VideoCodecProfile::H264PROFILE_EXTENDED;
+ case C2Config::PROFILE_AVC_HIGH:
+ return media::VideoCodecProfile::H264PROFILE_HIGH;
+ case C2Config::PROFILE_AVC_HIGH_10:
+ return media::VideoCodecProfile::H264PROFILE_HIGH10PROFILE;
+ case C2Config::PROFILE_AVC_HIGH_422:
+ return media::VideoCodecProfile::H264PROFILE_HIGH422PROFILE;
+ case C2Config::PROFILE_AVC_HIGH_444_PREDICTIVE:
+ return media::VideoCodecProfile::H264PROFILE_HIGH444PREDICTIVEPROFILE;
+ case C2Config::PROFILE_AVC_SCALABLE_BASELINE:
+ return media::VideoCodecProfile::H264PROFILE_SCALABLEBASELINE;
+ case C2Config::PROFILE_AVC_SCALABLE_HIGH:
+ return media::VideoCodecProfile::H264PROFILE_SCALABLEHIGH;
+ case C2Config::PROFILE_AVC_STEREO_HIGH:
+ return media::VideoCodecProfile::H264PROFILE_STEREOHIGH;
+ case C2Config::PROFILE_AVC_MULTIVIEW_HIGH:
+ return media::VideoCodecProfile::H264PROFILE_MULTIVIEWHIGH;
+ default:
+ ALOGE("Unrecognizable C2 profile (value = 0x%x)...", profile);
+ return media::VideoCodecProfile::VIDEO_CODEC_PROFILE_UNKNOWN;
+ }
+}
+
+uint8_t c2LevelToLevelIDC(C2Config::level_t level) {
switch (level) {
case C2Config::LEVEL_AVC_1:
- return V4L2_MPEG_VIDEO_H264_LEVEL_1_0;
+ return 10;
case C2Config::LEVEL_AVC_1B:
- return V4L2_MPEG_VIDEO_H264_LEVEL_1B;
+ return 9;
case C2Config::LEVEL_AVC_1_1:
- return V4L2_MPEG_VIDEO_H264_LEVEL_1_1;
+ return 11;
case C2Config::LEVEL_AVC_1_2:
- return V4L2_MPEG_VIDEO_H264_LEVEL_1_2;
+ return 12;
case C2Config::LEVEL_AVC_1_3:
- return V4L2_MPEG_VIDEO_H264_LEVEL_1_3;
+ return 13;
case C2Config::LEVEL_AVC_2:
- return V4L2_MPEG_VIDEO_H264_LEVEL_2_0;
+ return 20;
case C2Config::LEVEL_AVC_2_1:
- return V4L2_MPEG_VIDEO_H264_LEVEL_2_1;
+ return 21;
case C2Config::LEVEL_AVC_2_2:
- return V4L2_MPEG_VIDEO_H264_LEVEL_2_2;
+ return 22;
case C2Config::LEVEL_AVC_3:
- return V4L2_MPEG_VIDEO_H264_LEVEL_3_0;
+ return 30;
case C2Config::LEVEL_AVC_3_1:
- return V4L2_MPEG_VIDEO_H264_LEVEL_3_1;
+ return 31;
case C2Config::LEVEL_AVC_3_2:
- return V4L2_MPEG_VIDEO_H264_LEVEL_3_2;
+ return 32;
case C2Config::LEVEL_AVC_4:
- return V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
+ return 40;
case C2Config::LEVEL_AVC_4_1:
- return V4L2_MPEG_VIDEO_H264_LEVEL_4_1;
+ return 41;
case C2Config::LEVEL_AVC_4_2:
- return V4L2_MPEG_VIDEO_H264_LEVEL_4_2;
+ return 42;
case C2Config::LEVEL_AVC_5:
- return V4L2_MPEG_VIDEO_H264_LEVEL_5_0;
+ return 50;
case C2Config::LEVEL_AVC_5_1:
- return V4L2_MPEG_VIDEO_H264_LEVEL_5_1;
+ return 51;
+ case C2Config::LEVEL_AVC_5_2:
+ return 52;
default:
ALOGE("Unrecognizable C2 level (value = 0x%x)...", level);
return 0;
@@ -68,24 +98,20 @@ android_ycbcr getGraphicBlockInfo(const C2ConstGraphicBlock& block) {
height, format, 1, usage, stride);
native_handle_delete(grallocHandle);
- // Pass SW flag so that ARCVM returns the guest buffer dimensions instead
- // of the host buffer dimensions. This means we will have to convert the
- // return value from ptrs to buffer offsets ourselves.
android_ycbcr ycbcr = {};
- int32_t status = buf->lockYCbCr(GRALLOC_USAGE_SW_READ_OFTEN, &ycbcr);
+ // Usage flag without SW_READ/WRITE bits.
+ constexpr uint32_t kNonSWLockUsage = 0;
+ int32_t status = buf->lockYCbCr(kNonSWLockUsage, &ycbcr);
if (status != OK) ALOGE("lockYCbCr is failed: %d", (int)status);
buf->unlock();
-
- uintptr_t y = reinterpret_cast<uintptr_t>(ycbcr.y);
- ycbcr.y = nullptr;
- ycbcr.cb = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ycbcr.cb) - y);
- ycbcr.cr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ycbcr.cr) - y);
-
return ycbcr;
}
void extractCSDInfo(std::unique_ptr<C2StreamInitDataInfo::output>* const csd, const uint8_t* data,
size_t length) {
+ constexpr uint8_t kTypeSeqParamSet = 7;
+ constexpr uint8_t kTypePicParamSet = 8;
+
// Android frameworks needs 4 bytes start code.
constexpr uint8_t kStartCode[] = {0x00, 0x00, 0x00, 0x01};
constexpr int kStartCodeLength = 4;
@@ -101,9 +127,9 @@ void extractCSDInfo(std::unique_ptr<C2StreamInitDataInfo::output>* const csd, co
NalParser parser(data, length);
while (parser.locateNextNal()) {
if (parser.length() == 0) continue;
- uint8_t nalType = parser.type();
+ uint8_t nalType = *parser.data() & 0x1f;
ALOGV("find next NAL: type=%d, length=%zu", nalType, parser.length());
- if (nalType != NalParser::kSPSType && nalType != NalParser::kPPSType) continue;
+ if (nalType != kTypeSeqParamSet && nalType != kTypePicParamSet) continue;
if (tmpOutput + kStartCodeLength + parser.length() > tmpConfigDataEnd) {
ALOGE("Buffer overflow on extracting codec config data (length=%zu)", length);
@@ -121,4 +147,32 @@ void extractCSDInfo(std::unique_ptr<C2StreamInitDataInfo::output>* const csd, co
std::memcpy((*csd)->m.value, tmpConfigData.get(), configDataLength);
}
+NalParser::NalParser(const uint8_t* data, size_t length)
+ : mCurrNalDataPos(data), mDataEnd(data + length) {
+ mNextNalStartCodePos = findNextStartCodePos();
+}
+
+bool NalParser::locateNextNal() {
+ if (mNextNalStartCodePos == mDataEnd) return false;
+ mCurrNalDataPos = mNextNalStartCodePos + kNalStartCodeLength; // skip start code.
+ mNextNalStartCodePos = findNextStartCodePos();
+ return true;
+}
+
+const uint8_t* NalParser::data() const {
+ return mCurrNalDataPos;
+}
+
+size_t NalParser::length() const {
+ if (mNextNalStartCodePos == mDataEnd) return mDataEnd - mCurrNalDataPos;
+ size_t length = mNextNalStartCodePos - mCurrNalDataPos;
+ // The start code could be 3 or 4 bytes, i.e., 0x000001 or 0x00000001.
+ return *(mNextNalStartCodePos - 1) == 0x00 ? length - 1 : length;
+}
+
+const uint8_t* NalParser::findNextStartCodePos() const {
+ return std::search(mCurrNalDataPos, mDataEnd, kNalStartCode,
+ kNalStartCode + kNalStartCodeLength);
+}
+
} // namespace android
diff --git a/common/FormatConverter.cpp b/common/FormatConverter.cpp
index d694bd1..9ab9161 100644
--- a/common/FormatConverter.cpp
+++ b/common/FormatConverter.cpp
@@ -29,8 +29,9 @@ namespace android {
namespace {
// The constant expression of mapping the pixel format conversion pair (src, dst) to a unique
// integer.
-constexpr int convertMap(VideoPixelFormat src, VideoPixelFormat dst) {
- return static_cast<int>(src) * (static_cast<int>(VideoPixelFormat::UNKNOWN) + 1) +
+constexpr int convertMap(media::VideoPixelFormat src, media::VideoPixelFormat dst) {
+ return static_cast<int>(src) *
+ (static_cast<int>(media::VideoPixelFormat::PIXEL_FORMAT_MAX) + 1) +
static_cast<int>(dst);
}
@@ -92,11 +93,12 @@ std::unique_ptr<ImplDefinedToRGBXMap> ImplDefinedToRGBXMap::Create(
}
// static
-std::unique_ptr<FormatConverter> FormatConverter::Create(VideoPixelFormat outFormat,
- const ui::Size& visibleSize,
+std::unique_ptr<FormatConverter> FormatConverter::Create(media::VideoPixelFormat outFormat,
+ const media::Size& visibleSize,
uint32_t inputCount,
- const ui::Size& codedSize) {
- if (outFormat != VideoPixelFormat::I420 && outFormat != VideoPixelFormat::NV12) {
+ const media::Size& codedSize) {
+ if (outFormat != media::VideoPixelFormat::PIXEL_FORMAT_I420 &&
+ outFormat != media::VideoPixelFormat::PIXEL_FORMAT_NV12) {
ALOGE("Unsupported output format: %d", static_cast<int32_t>(outFormat));
return nullptr;
}
@@ -109,11 +111,12 @@ std::unique_ptr<FormatConverter> FormatConverter::Create(VideoPixelFormat outFor
return converter;
}
-c2_status_t FormatConverter::initialize(VideoPixelFormat outFormat, const ui::Size& visibleSize,
- uint32_t inputCount, const ui::Size& codedSize) {
+c2_status_t FormatConverter::initialize(media::VideoPixelFormat outFormat,
+ const media::Size& visibleSize, uint32_t inputCount,
+ const media::Size& codedSize) {
ALOGV("initialize(out_format=%s, visible_size=%dx%d, input_count=%u, coded_size=%dx%d)",
- videoPixelFormatToString(outFormat).c_str(), visibleSize.width, visibleSize.height,
- inputCount, codedSize.width, codedSize.height);
+ media::VideoPixelFormatToString(outFormat).c_str(), visibleSize.width(),
+ visibleSize.height(), inputCount, codedSize.width(), codedSize.height());
std::shared_ptr<C2BlockPool> pool;
c2_status_t status = GetCodec2BlockPool(C2BlockPool::BASIC_GRAPHIC, nullptr, &pool);
@@ -123,7 +126,7 @@ c2_status_t FormatConverter::initialize(VideoPixelFormat outFormat, const ui::Si
}
HalPixelFormat halFormat;
- if (outFormat == VideoPixelFormat::I420) {
+ if (outFormat == media::VideoPixelFormat::PIXEL_FORMAT_I420) {
// Android HAL format doesn't have I420, we use YV12 instead and swap U and V data while
// conversion to perform I420.
halFormat = HalPixelFormat::YV12;
@@ -134,7 +137,7 @@ c2_status_t FormatConverter::initialize(VideoPixelFormat outFormat, const ui::Si
uint32_t bufferCount = std::max(inputCount, kMinInputBufferCount);
for (uint32_t i = 0; i < bufferCount; i++) {
std::shared_ptr<C2GraphicBlock> block;
- status = pool->fetchGraphicBlock(codedSize.width, codedSize.height,
+ status = pool->fetchGraphicBlock(codedSize.width(), codedSize.height(),
static_cast<uint32_t>(halFormat),
{(C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE),
static_cast<uint64_t>(BufferUsage::VIDEO_ENCODER)},
@@ -150,10 +153,10 @@ c2_status_t FormatConverter::initialize(VideoPixelFormat outFormat, const ui::Si
mOutFormat = outFormat;
mVisibleSize = visibleSize;
- mTempPlaneU =
- std::unique_ptr<uint8_t[]>(new uint8_t[mVisibleSize.width * mVisibleSize.height / 4]);
- mTempPlaneV =
- std::unique_ptr<uint8_t[]>(new uint8_t[mVisibleSize.width * mVisibleSize.height / 4]);
+ mTempPlaneU = std::unique_ptr<uint8_t[]>(
+ new uint8_t[mVisibleSize.width() * mVisibleSize.height() / 4]);
+ mTempPlaneV = std::unique_ptr<uint8_t[]>(
+ new uint8_t[mVisibleSize.width() * mVisibleSize.height() / 4]);
return C2_OK;
}
@@ -198,7 +201,7 @@ C2ConstGraphicBlock FormatConverter::convertBlock(uint64_t frameIndex,
const int dstStrideV = outputLayout.planes[C2PlanarLayout::PLANE_U].rowInc; // only for I420
const int dstStrideUV = outputLayout.planes[C2PlanarLayout::PLANE_U].rowInc; // only for NV12
- VideoPixelFormat inputFormat = VideoPixelFormat::UNKNOWN;
+ media::VideoPixelFormat inputFormat = media::VideoPixelFormat::PIXEL_FORMAT_UNKNOWN;
*status = C2_OK;
if (inputLayout.type == C2PlanarLayout::TYPE_YUV) {
const uint8_t* srcY = inputView.data()[C2PlanarLayout::PLANE_Y];
@@ -208,9 +211,10 @@ C2ConstGraphicBlock FormatConverter::convertBlock(uint64_t frameIndex,
const int srcStrideU = inputLayout.planes[C2PlanarLayout::PLANE_U].rowInc;
const int srcStrideV = inputLayout.planes[C2PlanarLayout::PLANE_V].rowInc;
if (inputLayout.rootPlanes == 3) {
- inputFormat = VideoPixelFormat::YV12;
+ inputFormat = media::VideoPixelFormat::PIXEL_FORMAT_YV12;
} else if (inputLayout.rootPlanes == 2) {
- inputFormat = (srcV > srcU) ? VideoPixelFormat::NV12 : VideoPixelFormat::NV21;
+ inputFormat = (srcV > srcU) ? media::VideoPixelFormat::PIXEL_FORMAT_NV12
+ : media::VideoPixelFormat::PIXEL_FORMAT_NV21;
}
if (inputFormat == mOutFormat) {
@@ -220,73 +224,79 @@ C2ConstGraphicBlock FormatConverter::convertBlock(uint64_t frameIndex,
}
switch (convertMap(inputFormat, mOutFormat)) {
- case convertMap(VideoPixelFormat::YV12, VideoPixelFormat::I420):
+ case convertMap(media::VideoPixelFormat::PIXEL_FORMAT_YV12,
+ media::VideoPixelFormat::PIXEL_FORMAT_I420):
libyuv::I420Copy(srcY, srcStrideY, srcU, srcStrideU, srcV, srcStrideV, dstY, dstStrideY,
- dstU, dstStrideU, dstV, dstStrideV, mVisibleSize.width,
- mVisibleSize.height);
+ dstU, dstStrideU, dstV, dstStrideV, mVisibleSize.width(),
+ mVisibleSize.height());
break;
- case convertMap(VideoPixelFormat::YV12, VideoPixelFormat::NV12):
+ case convertMap(media::VideoPixelFormat::PIXEL_FORMAT_YV12,
+ media::VideoPixelFormat::PIXEL_FORMAT_NV12):
libyuv::I420ToNV12(srcY, srcStrideY, srcU, srcStrideU, srcV, srcStrideV, dstY,
- dstStrideY, dstUV, dstStrideUV, mVisibleSize.width,
- mVisibleSize.height);
+ dstStrideY, dstUV, dstStrideUV, mVisibleSize.width(),
+ mVisibleSize.height());
break;
- case convertMap(VideoPixelFormat::NV12, VideoPixelFormat::I420):
+ case convertMap(media::VideoPixelFormat::PIXEL_FORMAT_NV12,
+ media::VideoPixelFormat::PIXEL_FORMAT_I420):
libyuv::NV12ToI420(srcY, srcStrideY, srcU, srcStrideU, dstY, dstStrideY, dstU,
- dstStrideU, dstV, dstStrideV, mVisibleSize.width,
- mVisibleSize.height);
+ dstStrideU, dstV, dstStrideV, mVisibleSize.width(),
+ mVisibleSize.height());
break;
- case convertMap(VideoPixelFormat::NV21, VideoPixelFormat::I420):
+ case convertMap(media::VideoPixelFormat::PIXEL_FORMAT_NV21,
+ media::VideoPixelFormat::PIXEL_FORMAT_I420):
libyuv::NV21ToI420(srcY, srcStrideY, srcV, srcStrideV, dstY, dstStrideY, dstU,
- dstStrideU, dstV, dstStrideV, mVisibleSize.width,
- mVisibleSize.height);
+ dstStrideU, dstV, dstStrideV, mVisibleSize.width(),
+ mVisibleSize.height());
break;
- case convertMap(VideoPixelFormat::NV21, VideoPixelFormat::NV12):
- ALOGV("%s(): Converting PIXEL_FORMAT_NV21 -> PIXEL_FORMAT_NV12", __func__);
- libyuv::CopyPlane(srcY, srcStrideY, dstY, dstStrideY, mVisibleSize.width,
- mVisibleSize.height);
- copyPlaneByPixel(srcU, srcStrideU, 2, dstUV, dstStrideUV, 2, mVisibleSize.width / 2,
- mVisibleSize.height / 2);
- copyPlaneByPixel(srcV, srcStrideV, 2, dstUV + 1, dstStrideUV, 2, mVisibleSize.width / 2,
- mVisibleSize.height / 2);
+ case convertMap(media::VideoPixelFormat::PIXEL_FORMAT_NV21,
+ media::VideoPixelFormat::PIXEL_FORMAT_NV12):
+ libyuv::CopyPlane(srcY, srcStrideY, dstY, dstStrideY, mVisibleSize.width(),
+ mVisibleSize.height());
+ copyPlaneByPixel(srcU, srcStrideU, 2, dstUV, dstStrideUV, 2, mVisibleSize.width() / 2,
+ mVisibleSize.height() / 2);
+ copyPlaneByPixel(srcV, srcStrideV, 2, dstUV + 1, dstStrideUV, 2,
+ mVisibleSize.width() / 2, mVisibleSize.height() / 2);
break;
default:
ALOGE("Unsupported pixel format conversion from %s to %s",
- videoPixelFormatToString(inputFormat).c_str(),
- videoPixelFormatToString(mOutFormat).c_str());
+ media::VideoPixelFormatToString(inputFormat).c_str(),
+ media::VideoPixelFormatToString(mOutFormat).c_str());
*status = C2_CORRUPTED;
return inputBlock; // This is actually redundant and should not be used.
}
} else if (inputLayout.type == C2PlanarLayout::TYPE_RGB) {
// There is only RGBA_8888 specified in C2AllocationGralloc::map(), no BGRA_8888. Maybe
// BGRA_8888 is not used now?
- inputFormat = VideoPixelFormat::ABGR;
+ inputFormat = media::VideoPixelFormat::PIXEL_FORMAT_ABGR;
const uint8_t* srcRGB = (idMap) ? idMap->addr() : inputView.data()[C2PlanarLayout::PLANE_R];
const int srcStrideRGB =
(idMap) ? idMap->rowInc() : inputLayout.planes[C2PlanarLayout::PLANE_R].rowInc;
switch (convertMap(inputFormat, mOutFormat)) {
- case convertMap(VideoPixelFormat::ABGR, VideoPixelFormat::I420):
+ case convertMap(media::VideoPixelFormat::PIXEL_FORMAT_ABGR,
+ media::VideoPixelFormat::PIXEL_FORMAT_I420):
libyuv::ABGRToI420(srcRGB, srcStrideRGB, dstY, dstStrideY, dstU, dstStrideU, dstV,
- dstStrideV, mVisibleSize.width, mVisibleSize.height);
+ dstStrideV, mVisibleSize.width(), mVisibleSize.height());
break;
- case convertMap(VideoPixelFormat::ABGR, VideoPixelFormat::NV12): {
+ case convertMap(media::VideoPixelFormat::PIXEL_FORMAT_ABGR,
+ media::VideoPixelFormat::PIXEL_FORMAT_NV12): {
// There is no libyuv function to convert ABGR to NV12. Therefore, we first convert to
// I420 on dst-Y plane and temporary U/V plane. Then we copy U and V pixels from
// temporary planes to dst-UV interleavedly.
- const int tempStride = mVisibleSize.width / 2;
+ const int tempStride = mVisibleSize.width() / 2;
libyuv::ABGRToI420(srcRGB, srcStrideRGB, dstY, dstStrideY, mTempPlaneU.get(),
- tempStride, mTempPlaneV.get(), tempStride, mVisibleSize.width,
- mVisibleSize.height);
+ tempStride, mTempPlaneV.get(), tempStride, mVisibleSize.width(),
+ mVisibleSize.height());
libyuv::MergeUVPlane(mTempPlaneU.get(), tempStride, mTempPlaneV.get(), tempStride,
- dstUV, dstStrideUV, mVisibleSize.width / 2,
- mVisibleSize.height / 2);
+ dstUV, dstStrideUV, mVisibleSize.width() / 2,
+ mVisibleSize.height() / 2);
break;
}
default:
ALOGE("Unsupported pixel format conversion from %s to %s",
- videoPixelFormatToString(inputFormat).c_str(),
- videoPixelFormatToString(mOutFormat).c_str());
+ media::VideoPixelFormatToString(inputFormat).c_str(),
+ media::VideoPixelFormatToString(mOutFormat).c_str());
*status = C2_CORRUPTED;
return inputBlock; // This is actually redundant and should not be used.
}
@@ -297,10 +307,10 @@ C2ConstGraphicBlock FormatConverter::convertBlock(uint64_t frameIndex,
}
ALOGV("convertBlock(frame_index=%" PRIu64 ", format=%s)", frameIndex,
- videoPixelFormatToString(inputFormat).c_str());
+ media::VideoPixelFormatToString(inputFormat).c_str());
entry->mAssociatedFrameIndex = frameIndex;
mAvailableQueue.pop();
- return outputBlock->share(C2Rect(mVisibleSize.width, mVisibleSize.height), C2Fence());
+ return outputBlock->share(C2Rect(mVisibleSize.width(), mVisibleSize.height()), C2Fence());
}
c2_status_t FormatConverter::returnBlock(uint64_t frameIndex) {
diff --git a/common/Fourcc.cpp b/common/Fourcc.cpp
deleted file mode 100644
index f7d3efd..0000000
--- a/common/Fourcc.cpp
+++ /dev/null
@@ -1,280 +0,0 @@
-// Copyright 2019 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <v4l2_codec2/common/Fourcc.h>
-
-#include <linux/videodev2.h>
-
-#include <utils/Log.h>
-
-namespace android {
-
-Fourcc::Fourcc(Fourcc::Value fourcc) : mValue(fourcc) {}
-Fourcc::~Fourcc() = default;
-Fourcc& Fourcc::operator=(const Fourcc& other) = default;
-
-// static
-std::optional<Fourcc> Fourcc::fromUint32(uint32_t fourcc) {
- switch (fourcc) {
- case AR24:
- case AB24:
- case XR24:
- case XB24:
- case RGB4:
- case YU12:
- case YV12:
- case YM12:
- case YM21:
- case YUYV:
- case NV12:
- case NV21:
- case NM12:
- case NM21:
- case YM16:
- case MT21:
- case MM21:
- return Fourcc(static_cast<Value>(fourcc));
- }
- ALOGE("Unmapped fourcc: %s", fourccToString(fourcc).c_str());
- return std::nullopt;
-}
-
-// static
-std::optional<Fourcc> Fourcc::fromVideoPixelFormat(VideoPixelFormat pixelFormat,
- bool singlePlanar) {
- if (singlePlanar) {
- switch (pixelFormat) {
- case VideoPixelFormat::ARGB:
- return Fourcc(AR24);
- case VideoPixelFormat::ABGR:
- return Fourcc(AB24);
- case VideoPixelFormat::XRGB:
- return Fourcc(XR24);
- case VideoPixelFormat::XBGR:
- return Fourcc(XB24);
- case VideoPixelFormat::BGRA:
- return Fourcc(RGB4);
- case VideoPixelFormat::I420:
- return Fourcc(YU12);
- case VideoPixelFormat::YV12:
- return Fourcc(YV12);
- case VideoPixelFormat::YUY2:
- return Fourcc(YUYV);
- case VideoPixelFormat::NV12:
- return Fourcc(NV12);
- case VideoPixelFormat::NV21:
- return Fourcc(NV21);
- case VideoPixelFormat::I422:
- case VideoPixelFormat::I420A:
- case VideoPixelFormat::I444:
- case VideoPixelFormat::RGB24:
- case VideoPixelFormat::MJPEG:
- case VideoPixelFormat::YUV420P9:
- case VideoPixelFormat::YUV420P10:
- case VideoPixelFormat::YUV422P9:
- case VideoPixelFormat::YUV422P10:
- case VideoPixelFormat::YUV444P9:
- case VideoPixelFormat::YUV444P10:
- case VideoPixelFormat::YUV420P12:
- case VideoPixelFormat::YUV422P12:
- case VideoPixelFormat::YUV444P12:
- case VideoPixelFormat::Y16:
- case VideoPixelFormat::P016LE:
- case VideoPixelFormat::XR30:
- case VideoPixelFormat::XB30:
- case VideoPixelFormat::UNKNOWN:
- break;
- }
- } else {
- switch (pixelFormat) {
- case VideoPixelFormat::I420:
- return Fourcc(YM12);
- case VideoPixelFormat::YV12:
- return Fourcc(YM21);
- case VideoPixelFormat::NV12:
- return Fourcc(NM12);
- case VideoPixelFormat::I422:
- return Fourcc(YM16);
- case VideoPixelFormat::NV21:
- return Fourcc(NM21);
- case VideoPixelFormat::I420A:
- case VideoPixelFormat::I444:
- case VideoPixelFormat::YUY2:
- case VideoPixelFormat::ARGB:
- case VideoPixelFormat::XRGB:
- case VideoPixelFormat::RGB24:
- case VideoPixelFormat::MJPEG:
- case VideoPixelFormat::YUV420P9:
- case VideoPixelFormat::YUV420P10:
- case VideoPixelFormat::YUV422P9:
- case VideoPixelFormat::YUV422P10:
- case VideoPixelFormat::YUV444P9:
- case VideoPixelFormat::YUV444P10:
- case VideoPixelFormat::YUV420P12:
- case VideoPixelFormat::YUV422P12:
- case VideoPixelFormat::YUV444P12:
- case VideoPixelFormat::Y16:
- case VideoPixelFormat::ABGR:
- case VideoPixelFormat::XBGR:
- case VideoPixelFormat::P016LE:
- case VideoPixelFormat::XR30:
- case VideoPixelFormat::XB30:
- case VideoPixelFormat::BGRA:
- case VideoPixelFormat::UNKNOWN:
- break;
- }
- }
- ALOGE("Unmapped %s for %s", videoPixelFormatToString(pixelFormat).c_str(),
- singlePlanar ? "single-planar" : "multi-planar");
- return std::nullopt;
-}
-
-VideoPixelFormat Fourcc::toVideoPixelFormat() const {
- switch (mValue) {
- case AR24:
- return VideoPixelFormat::ARGB;
- case AB24:
- return VideoPixelFormat::ABGR;
- case XR24:
- return VideoPixelFormat::XRGB;
- case XB24:
- return VideoPixelFormat::XBGR;
- case RGB4:
- return VideoPixelFormat::BGRA;
- case YU12:
- case YM12:
- return VideoPixelFormat::I420;
- case YV12:
- case YM21:
- return VideoPixelFormat::YV12;
- case YUYV:
- return VideoPixelFormat::YUY2;
- case NV12:
- case NM12:
- return VideoPixelFormat::NV12;
- case NV21:
- case NM21:
- return VideoPixelFormat::NV21;
- case YM16:
- return VideoPixelFormat::I422;
- // V4L2_PIX_FMT_MT21C is only used for MT8173 hardware video decoder output
- // and should be converted by MT8173 image processor for compositor to
- // render. Since it is an intermediate format for video decoder,
- // VideoPixelFormat shall not have its mapping. However, we need to create a
- // VideoFrameLayout for the format to process the intermediate frame. Hence
- // we map V4L2_PIX_FMT_MT21C to PIXEL_FORMAT_NV12 as their layout are the
- // same.
- case MT21:
- // V4L2_PIX_FMT_MM21 is used for MT8183 hardware video decoder. It is
- // similar to V4L2_PIX_FMT_MT21C but is not compressed ; thus it can also
- // be mapped to PIXEL_FORMAT_NV12.
- case MM21:
- return VideoPixelFormat::NV12;
- }
-
- ALOGE("Unmapped Fourcc: %s", toString().c_str());
- return VideoPixelFormat::UNKNOWN;
-}
-
-// static
-std::optional<Fourcc> Fourcc::fromV4L2PixFmt(uint32_t v4l2PixFmt) {
- // We can do that because we adopt the same internal definition of Fourcc as
- // V4L2.
- return fromUint32(v4l2PixFmt);
-}
-
-uint32_t Fourcc::toV4L2PixFmt() const {
- // Note that we can do that because we adopt the same internal definition of
- // Fourcc as V4L2.
- return static_cast<uint32_t>(mValue);
-}
-
-std::optional<Fourcc> Fourcc::toSinglePlanar() const {
- switch (mValue) {
- case AR24:
- case AB24:
- case XR24:
- case XB24:
- case RGB4:
- case YU12:
- case YV12:
- case YUYV:
- case NV12:
- case NV21:
- return Fourcc(mValue);
- case YM12:
- return Fourcc(YU12);
- case YM21:
- return Fourcc(YV12);
- case NM12:
- return Fourcc(NV12);
- case NM21:
- return Fourcc(NV21);
- case YM16:
- case MT21:
- case MM21:
- return std::nullopt;
- }
-}
-
-bool operator!=(const Fourcc& lhs, const Fourcc& rhs) {
- return !(lhs == rhs);
-}
-
-bool Fourcc::isMultiPlanar() const {
- switch (mValue) {
- case AR24:
- case AB24:
- case XR24:
- case XB24:
- case RGB4:
- case YU12:
- case YV12:
- case YUYV:
- case NV12:
- case NV21:
- return false;
- case YM12:
- case YM21:
- case NM12:
- case NM21:
- case YM16:
- case MT21:
- case MM21:
- return true;
- }
-}
-
-std::string Fourcc::toString() const {
- return fourccToString(static_cast<uint32_t>(mValue));
-}
-
-static_assert(Fourcc::AR24 == V4L2_PIX_FMT_ABGR32, "Mismatch Fourcc");
-#ifdef V4L2_PIX_FMT_RGBA32
-// V4L2_PIX_FMT_RGBA32 is defined since v5.2
-static_assert(Fourcc::AB24 == V4L2_PIX_FMT_RGBA32, "Mismatch Fourcc");
-#endif // V4L2_PIX_FMT_RGBA32
-static_assert(Fourcc::XR24 == V4L2_PIX_FMT_XBGR32, "Mismatch Fourcc");
-#ifdef V4L2_PIX_FMT_RGBX32
-// V4L2_PIX_FMT_RGBX32 is defined since v5.2
-static_assert(Fourcc::XB24 == V4L2_PIX_FMT_RGBX32, "Mismatch Fourcc");
-#endif // V4L2_PIX_FMT_RGBX32
-static_assert(Fourcc::RGB4 == V4L2_PIX_FMT_RGB32, "Mismatch Fourcc");
-static_assert(Fourcc::YU12 == V4L2_PIX_FMT_YUV420, "Mismatch Fourcc");
-static_assert(Fourcc::YV12 == V4L2_PIX_FMT_YVU420, "Mismatch Fourcc");
-static_assert(Fourcc::YM12 == V4L2_PIX_FMT_YUV420M, "Mismatch Fourcc");
-static_assert(Fourcc::YM21 == V4L2_PIX_FMT_YVU420M, "Mismatch Fourcc");
-static_assert(Fourcc::YUYV == V4L2_PIX_FMT_YUYV, "Mismatch Fourcc");
-static_assert(Fourcc::NV12 == V4L2_PIX_FMT_NV12, "Mismatch Fourcc");
-static_assert(Fourcc::NV21 == V4L2_PIX_FMT_NV21, "Mismatch Fourcc");
-static_assert(Fourcc::NM12 == V4L2_PIX_FMT_NV12M, "Mismatch Fourcc");
-static_assert(Fourcc::NM21 == V4L2_PIX_FMT_NV21M, "Mismatch Fourcc");
-static_assert(Fourcc::YM16 == V4L2_PIX_FMT_YUV422M, "Mismatch Fourcc");
-static_assert(Fourcc::MT21 == V4L2_PIX_FMT_MT21C, "Mismatch Fourcc");
-#ifdef V4L2_PIX_FMT_MM21
-// V4L2_PIX_FMT_MM21 is not yet upstreamed.
-static_assert(Fourcc::MM21 == V4L2_PIX_FMT_MM21, "Mismatch Fourcc");
-#endif // V4L2_PIX_FMT_MM21
-
-} // namespace android
diff --git a/common/NalParser.cpp b/common/NalParser.cpp
deleted file mode 100644
index 3216574..0000000
--- a/common/NalParser.cpp
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2021 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "NalParser"
-
-#include <v4l2_codec2/common/NalParser.h>
-
-#include <algorithm>
-
-#include <media/stagefright/foundation/ABitReader.h>
-#include <utils/Log.h>
-
-namespace android {
-
-namespace {
-
-enum H264ProfileIDC {
- kProfileIDCAVLC444 = 44,
- kProfileIDScalableBaseline = 83,
- kProfileIDScalableHigh = 86,
- kProfileIDCHigh = 100,
- kProfileIDHigh10 = 110,
- kProfileIDSMultiviewHigh = 118,
- kProfileIDHigh422 = 122,
- kProfileIDStereoHigh = 128,
- kProfileIDHigh444Predictive = 244,
-};
-
-constexpr uint32_t kYUV444Idc = 3;
-
-// Read unsigned int encoded with exponential-golomb.
-uint32_t parseUE(ABitReader* br) {
- uint32_t numZeroes = 0;
- while (br->getBits(1) == 0) {
- ++numZeroes;
- }
- uint32_t val = br->getBits(numZeroes);
- return val + (1u << numZeroes) - 1;
-}
-
-// Read signed int encoded with exponential-golomb.
-int32_t parseSE(ABitReader* br) {
- uint32_t codeNum = parseUE(br);
- return (codeNum & 1) ? (codeNum + 1) >> 1 : -static_cast<int32_t>(codeNum >> 1);
-}
-
-// Skip a H.264 sequence scaling list in the specified bitstream.
-void skipScalingList(ABitReader* br, size_t scalingListSize) {
- size_t nextScale = 8;
- size_t lastScale = 8;
- for (size_t j = 0; j < scalingListSize; ++j) {
- if (nextScale != 0) {
- int32_t deltaScale = parseSE(br); // delta_sl
- if (deltaScale < -128) {
- ALOGW("delta scale (%d) is below range, capping to -128", deltaScale);
- deltaScale = -128;
- } else if (deltaScale > 127) {
- ALOGW("delta scale (%d) is above range, capping to 127", deltaScale);
- deltaScale = 127;
- }
- nextScale = (lastScale + (deltaScale + 256)) % 256;
- }
- lastScale = (nextScale == 0) ? lastScale : nextScale;
- }
-}
-
-// Skip the H.264 sequence scaling matrix in the specified bitstream.
-void skipScalingMatrix(ABitReader* br, size_t numScalingLists) {
- for (size_t i = 0; i < numScalingLists; ++i) {
- if (br->getBits(1)) { // seq_scaling_list_present_flag
- if (i < 6) {
- skipScalingList(br, 16);
- } else {
- skipScalingList(br, 64);
- }
- }
- }
-}
-
-} // namespace
-
-NalParser::NalParser(const uint8_t* data, size_t length)
- : mCurrNalDataPos(data), mDataEnd(data + length) {
- mNextNalStartCodePos = findNextStartCodePos();
-}
-
-bool NalParser::locateNextNal() {
- if (mNextNalStartCodePos == mDataEnd) return false;
- mCurrNalDataPos = mNextNalStartCodePos + kNalStartCodeLength; // skip start code.
- mNextNalStartCodePos = findNextStartCodePos();
- return true;
-}
-
-bool NalParser::locateSPS() {
- while (locateNextNal()) {
- if (length() == 0) continue;
- if (type() != kSPSType) continue;
- return true;
- }
-
- return false;
-}
-
-const uint8_t* NalParser::data() const {
- return mCurrNalDataPos;
-}
-
-size_t NalParser::length() const {
- if (mNextNalStartCodePos == mDataEnd) return mDataEnd - mCurrNalDataPos;
- size_t length = mNextNalStartCodePos - mCurrNalDataPos;
- // The start code could be 3 or 4 bytes, i.e., 0x000001 or 0x00000001.
- return *(mNextNalStartCodePos - 1) == 0x00 ? length - 1 : length;
-}
-
-uint8_t NalParser::type() const {
- // First byte is forbidden_zero_bit (1) + nal_ref_idc (2) + nal_unit_type (5)
- constexpr uint8_t kNALTypeMask = 0x1f;
- return *mCurrNalDataPos & kNALTypeMask;
-}
-
-const uint8_t* NalParser::findNextStartCodePos() const {
- return std::search(mCurrNalDataPos, mDataEnd, kNalStartCode,
- kNalStartCode + kNalStartCodeLength);
-}
-
-bool NalParser::findCodedColorAspects(ColorAspects* colorAspects) {
- ALOG_ASSERT(colorAspects);
- ALOG_ASSERT(type() == kSPSType);
-
- // Unfortunately we can't directly jump to the Video Usability Information (VUI) parameters that
- // contain the color aspects. We need to parse the entire SPS header up until the values we
- // need.
-
- // Skip first byte containing type.
- ABitReader br(mCurrNalDataPos + 1, length() - 1);
-
- uint32_t profileIDC = br.getBits(8); // profile_idc
- br.skipBits(16); // constraint flags + reserved bits + level_idc
- parseUE(&br); // seq_parameter_set_id
-
- if (profileIDC == kProfileIDCHigh || profileIDC == kProfileIDHigh10 ||
- profileIDC == kProfileIDHigh422 || profileIDC == kProfileIDHigh444Predictive ||
- profileIDC == kProfileIDCAVLC444 || profileIDC == kProfileIDScalableBaseline ||
- profileIDC == kProfileIDScalableHigh || profileIDC == kProfileIDSMultiviewHigh ||
- profileIDC == kProfileIDStereoHigh) {
- uint32_t chromaFormatIDC = parseUE(&br);
- if (chromaFormatIDC == kYUV444Idc) { // chroma_format_idc
- br.skipBits(1); // separate_colour_plane_flag
- }
- parseUE(&br); // bit_depth_luma_minus8
- parseUE(&br); // bit_depth_chroma_minus8
- br.skipBits(1); // lossless_qpprime_y_zero_flag
-
- if (br.getBits(1)) { // seq_scaling_matrix_present_flag
- const size_t numScalingLists = (chromaFormatIDC != kYUV444Idc) ? 8 : 12;
- skipScalingMatrix(&br, numScalingLists);
- }
- }
-
- parseUE(&br); // log2_max_frame_num_minus4
- uint32_t pictureOrderCountType = parseUE(&br); // pic_order_cnt_type
- if (pictureOrderCountType == 0) {
- parseUE(&br); // log2_max_pic_order_cnt_lsb_minus4
- } else if (pictureOrderCountType == 1) {
- br.skipBits(1); // delta_pic_order_always_zero_flag
- parseSE(&br); // offset_for_non_ref_pic
- parseSE(&br); // offset_for_top_to_bottom_field
- uint32_t numReferenceFrames = parseUE(&br); // num_ref_frames_in_pic_order_cnt_cycle
- for (uint32_t i = 0; i < numReferenceFrames; ++i) {
- parseUE(&br); // offset_for_ref_frame
- }
- }
-
- parseUE(&br); // num_ref_frames
- br.skipBits(1); // gaps_in_frame_num_value_allowed_flag
- parseUE(&br); // pic_width_in_mbs_minus1
- parseUE(&br); // pic_height_in_map_units_minus1
- if (!br.getBits(1)) { // frame_mbs_only_flag
- br.skipBits(1); // mb_adaptive_frame_field_flag
- }
- br.skipBits(1); // direct_8x8_inference_flag
-
- if (br.getBits(1)) { // frame_cropping_flag
- parseUE(&br); // frame_cropping_rect_left_offset
- parseUE(&br); // frame_cropping_rect_right_offset
- parseUE(&br); // frame_cropping_rect_top_offset
- parseUE(&br); // frame_cropping_rect_bottom_offset
- }
-
- if (br.getBits(1)) { // vui_parameters_present_flag
- if (br.getBits(1)) { // VUI aspect_ratio_info_present_flag
- if (br.getBits(8) == 255) { // VUI aspect_ratio_idc == extended sample aspect ratio
- br.skipBits(32); // VUI sar_width + sar_height
- }
- }
-
- if (br.getBits(1)) { // VUI overscan_info_present_flag
- br.skipBits(1); // VUI overscan_appropriate_flag
- }
- if (br.getBits(1)) { // VUI video_signal_type_present_flag
- br.skipBits(3); // VUI video_format
- colorAspects->fullRange = br.getBits(1); // VUI video_full_range_flag
- if (br.getBits(1)) { // VUI color_description_present_flag
- colorAspects->primaries = br.getBits(8); // VUI colour_primaries
- colorAspects->transfer = br.getBits(8); // VUI transfer_characteristics
- colorAspects->coeffs = br.getBits(8); // VUI matrix_coefficients
- return !br.overRead();
- }
- }
- }
-
- return false;
-}
-
-} // namespace android
diff --git a/common/V4L2ComponentCommon.cpp b/common/V4L2ComponentCommon.cpp
index 518b489..86448a8 100644
--- a/common/V4L2ComponentCommon.cpp
+++ b/common/V4L2ComponentCommon.cpp
@@ -12,8 +12,6 @@
namespace android {
const std::string V4L2ComponentName::kH264Encoder = "c2.v4l2.avc.encoder";
-const std::string V4L2ComponentName::kVP8Encoder = "c2.v4l2.vp8.encoder";
-const std::string V4L2ComponentName::kVP9Encoder = "c2.v4l2.vp9.encoder";
const std::string V4L2ComponentName::kH264Decoder = "c2.v4l2.avc.decoder";
const std::string V4L2ComponentName::kVP8Decoder = "c2.v4l2.vp8.decoder";
@@ -24,16 +22,16 @@ const std::string V4L2ComponentName::kVP9SecureDecoder = "c2.v4l2.vp9.decoder.se
// static
bool V4L2ComponentName::isValid(const char* name) {
- return name == kH264Encoder || name == kVP8Encoder || name == kVP9Encoder ||
- name == kH264Decoder || name == kVP8Decoder || name == kVP9Decoder ||
- name == kH264SecureDecoder || name == kVP8SecureDecoder || name == kVP9SecureDecoder;
+ return name == kH264Encoder || name == kH264Decoder || name == kVP8Decoder ||
+ name == kVP9Decoder || name == kH264SecureDecoder || name == kVP8SecureDecoder ||
+ name == kVP9SecureDecoder;
}
// static
bool V4L2ComponentName::isEncoder(const char* name) {
ALOG_ASSERT(isValid(name));
- return name == kH264Encoder || name == kVP8Encoder || name == kVP9Encoder;
+ return name == kH264Encoder;
}
} // namespace android
diff --git a/common/V4L2Device.cpp b/common/V4L2Device.cpp
deleted file mode 100644
index a31d82b..0000000
--- a/common/V4L2Device.cpp
+++ /dev/null
@@ -1,2010 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-// Note: ported from Chromium commit head: 2f13d62f0c0d
-// Note: Added some missing defines that are only defined in newer kernel
-// versions (e.g. V4L2_PIX_FMT_VP8_FRAME)
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "V4L2Device"
-
-#include <v4l2_codec2/common/V4L2Device.h>
-
-#include <fcntl.h>
-#include <inttypes.h>
-#include <linux/media.h>
-#include <linux/videodev2.h>
-#include <poll.h>
-#include <string.h>
-#include <sys/eventfd.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-
-#include <algorithm>
-#include <mutex>
-#include <set>
-#include <sstream>
-
-#include <base/bind.h>
-#include <base/numerics/safe_conversions.h>
-#include <base/posix/eintr_wrapper.h>
-#include <base/strings/stringprintf.h>
-#include <base/thread_annotations.h>
-#include <utils/Log.h>
-
-#include <v4l2_codec2/common/Fourcc.h>
-#include <v4l2_codec2/common/VideoPixelFormat.h>
-
-// VP8 parsed frames
-#ifndef V4L2_PIX_FMT_VP8_FRAME
-#define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F')
-#endif
-
-// VP9 parsed frames
-#ifndef V4L2_PIX_FMT_VP9_FRAME
-#define V4L2_PIX_FMT_VP9_FRAME v4l2_fourcc('V', 'P', '9', 'F')
-#endif
-
-// H264 parsed slices
-#ifndef V4L2_PIX_FMT_H264_SLICE
-#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4')
-#endif
-
-namespace android {
-
-struct v4l2_format buildV4L2Format(const enum v4l2_buf_type type, uint32_t fourcc,
- const ui::Size& size, size_t buffer_size, uint32_t stride) {
- struct v4l2_format format;
- memset(&format, 0, sizeof(format));
- format.type = type;
- format.fmt.pix_mp.pixelformat = fourcc;
- format.fmt.pix_mp.width = size.width;
- format.fmt.pix_mp.height = size.height;
- format.fmt.pix_mp.num_planes = V4L2Device::getNumPlanesOfV4L2PixFmt(fourcc);
- format.fmt.pix_mp.plane_fmt[0].sizeimage = buffer_size;
-
- // When the image format is planar the bytesperline value applies to the first plane and is
- // divided by the same factor as the width field for the other planes.
- format.fmt.pix_mp.plane_fmt[0].bytesperline = stride;
-
- return format;
-}
-
-V4L2ExtCtrl::V4L2ExtCtrl(uint32_t id) {
- memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = id;
-}
-
-V4L2ExtCtrl::V4L2ExtCtrl(uint32_t id, int32_t val) : V4L2ExtCtrl(id) {
- ctrl.value = val;
-}
-
-// Class used to store the state of a buffer that should persist between reference creations. This
-// includes:
-// * Result of initial VIDIOC_QUERYBUF ioctl,
-// * Plane mappings.
-//
-// Also provides helper functions.
-class V4L2Buffer {
-public:
- static std::unique_ptr<V4L2Buffer> create(scoped_refptr<V4L2Device> device,
- enum v4l2_buf_type type, enum v4l2_memory memory,
- const struct v4l2_format& format, size_t bufferId);
- ~V4L2Buffer();
-
- V4L2Buffer(const V4L2Buffer&) = delete;
- V4L2Buffer& operator=(const V4L2Buffer&) = delete;
-
- void* getPlaneMapping(const size_t plane);
- size_t getMemoryUsage() const;
- const struct v4l2_buffer& v4l2_buffer() const { return mV4l2Buffer; }
-
-private:
- V4L2Buffer(scoped_refptr<V4L2Device> device, enum v4l2_buf_type type, enum v4l2_memory memory,
- const struct v4l2_format& format, size_t bufferId);
- bool query();
-
- scoped_refptr<V4L2Device> mDevice;
- std::vector<void*> mPlaneMappings;
-
- // V4L2 data as queried by QUERYBUF.
- struct v4l2_buffer mV4l2Buffer;
- // WARNING: do not change this to a vector or something smaller than VIDEO_MAX_PLANES, otherwise
- // the Tegra libv4l2 will write data beyond the number of allocated planes, resulting in memory
- // corruption.
- struct v4l2_plane mV4l2Planes[VIDEO_MAX_PLANES];
-
- struct v4l2_format mFormat __attribute__((unused));
-};
-
-std::unique_ptr<V4L2Buffer> V4L2Buffer::create(scoped_refptr<V4L2Device> device,
- enum v4l2_buf_type type, enum v4l2_memory memory,
- const struct v4l2_format& format, size_t bufferId) {
- // Not using std::make_unique because constructor is private.
- std::unique_ptr<V4L2Buffer> buffer(new V4L2Buffer(device, type, memory, format, bufferId));
-
- if (!buffer->query()) return nullptr;
-
- return buffer;
-}
-
-V4L2Buffer::V4L2Buffer(scoped_refptr<V4L2Device> device, enum v4l2_buf_type type,
- enum v4l2_memory memory, const struct v4l2_format& format, size_t bufferId)
- : mDevice(device), mFormat(format) {
- ALOG_ASSERT(V4L2_TYPE_IS_MULTIPLANAR(type));
- ALOG_ASSERT(format.fmt.pix_mp.num_planes <= base::size(mV4l2Planes));
-
- memset(mV4l2Planes, 0, sizeof(mV4l2Planes));
- memset(&mV4l2Buffer, 0, sizeof(mV4l2Buffer));
- mV4l2Buffer.m.planes = mV4l2Planes;
- // Just in case we got more planes than we want.
- mV4l2Buffer.length =
- std::min(static_cast<size_t>(format.fmt.pix_mp.num_planes), base::size(mV4l2Planes));
- mV4l2Buffer.index = bufferId;
- mV4l2Buffer.type = type;
- mV4l2Buffer.memory = memory;
- mV4l2Buffer.memory = V4L2_MEMORY_DMABUF;
- mPlaneMappings.resize(mV4l2Buffer.length);
-}
-
-V4L2Buffer::~V4L2Buffer() {
- if (mV4l2Buffer.memory == V4L2_MEMORY_MMAP) {
- for (size_t i = 0; i < mPlaneMappings.size(); i++) {
- if (mPlaneMappings[i] != nullptr) {
- mDevice->munmap(mPlaneMappings[i], mV4l2Buffer.m.planes[i].length);
- }
- }
- }
-}
-
-bool V4L2Buffer::query() {
- int ret = mDevice->ioctl(VIDIOC_QUERYBUF, &mV4l2Buffer);
- if (ret) {
- ALOGE("VIDIOC_QUERYBUF failed");
- return false;
- }
-
- DCHECK(mPlaneMappings.size() == mV4l2Buffer.length);
-
- return true;
-}
-
-void* V4L2Buffer::getPlaneMapping(const size_t plane) {
- if (plane >= mPlaneMappings.size()) {
- ALOGE("Invalid plane %zu requested.", plane);
- return nullptr;
- }
-
- void* p = mPlaneMappings[plane];
- if (p) {
- return p;
- }
-
- // Do this check here to avoid repeating it after a buffer has been successfully mapped (we know
- // we are of MMAP type by then).
- if (mV4l2Buffer.memory != V4L2_MEMORY_MMAP) {
- ALOGE("Cannot create mapping on non-MMAP buffer");
- return nullptr;
- }
-
- p = mDevice->mmap(NULL, mV4l2Buffer.m.planes[plane].length, PROT_READ | PROT_WRITE, MAP_SHARED,
- mV4l2Buffer.m.planes[plane].m.mem_offset);
- if (p == MAP_FAILED) {
- ALOGE("mmap() failed: ");
- return nullptr;
- }
-
- mPlaneMappings[plane] = p;
- return p;
-}
-
-size_t V4L2Buffer::getMemoryUsage() const {
- size_t usage = 0;
- for (size_t i = 0; i < mV4l2Buffer.length; i++) {
- usage += mV4l2Buffer.m.planes[i].length;
- }
- return usage;
-}
-
-// A thread-safe pool of buffer indexes, allowing buffers to be obtained and returned from different
-// threads. All the methods of this class are thread-safe. Users should keep a scoped_refptr to
-// instances of this class in order to ensure the list remains alive as long as they need it.
-class V4L2BuffersList : public base::RefCountedThreadSafe<V4L2BuffersList> {
-public:
- V4L2BuffersList() = default;
-
- V4L2BuffersList(const V4L2BuffersList&) = delete;
- V4L2BuffersList& operator=(const V4L2BuffersList&) = delete;
-
- // Return a buffer to this list. Also can be called to set the initial pool of buffers.
- // Note that it is illegal to return the same buffer twice.
- void returnBuffer(size_t bufferId);
- // Get any of the buffers in the list. There is no order guarantee whatsoever.
- std::optional<size_t> getFreeBuffer();
- // Get the buffer with specified index.
- std::optional<size_t> getFreeBuffer(size_t requestedBufferId);
- // Number of buffers currently in this list.
- size_t size() const;
-
-private:
- friend class base::RefCountedThreadSafe<V4L2BuffersList>;
- ~V4L2BuffersList() = default;
-
- mutable std::mutex mLock;
- std::set<size_t> mFreeBuffers GUARDED_BY(mLock);
-};
-
-void V4L2BuffersList::returnBuffer(size_t bufferId) {
- std::lock_guard<std::mutex> lock(mLock);
-
- auto inserted = mFreeBuffers.emplace(bufferId);
- if (!inserted.second) {
- ALOGE("Returning buffer failed");
- }
-}
-
-std::optional<size_t> V4L2BuffersList::getFreeBuffer() {
- std::lock_guard<std::mutex> lock(mLock);
-
- auto iter = mFreeBuffers.begin();
- if (iter == mFreeBuffers.end()) {
- ALOGV("No free buffer available!");
- return std::nullopt;
- }
-
- size_t bufferId = *iter;
- mFreeBuffers.erase(iter);
-
- return bufferId;
-}
-
-std::optional<size_t> V4L2BuffersList::getFreeBuffer(size_t requestedBufferId) {
- std::lock_guard<std::mutex> lock(mLock);
-
- return (mFreeBuffers.erase(requestedBufferId) > 0) ? std::make_optional(requestedBufferId)
- : std::nullopt;
-}
-
-size_t V4L2BuffersList::size() const {
- std::lock_guard<std::mutex> lock(mLock);
-
- return mFreeBuffers.size();
-}
-
-// Module-private class that let users query/write V4L2 buffer information. It also makes some
-// private V4L2Queue methods available to this module only.
-class V4L2BufferRefBase {
-public:
- V4L2BufferRefBase(const struct v4l2_buffer& v4l2Buffer, base::WeakPtr<V4L2Queue> queue);
- ~V4L2BufferRefBase();
-
- V4L2BufferRefBase(const V4L2BufferRefBase&) = delete;
- V4L2BufferRefBase& operator=(const V4L2BufferRefBase&) = delete;
-
- bool queueBuffer();
- void* getPlaneMapping(const size_t plane);
-
- // Checks that the number of passed FDs is adequate for the current format and buffer
- // configuration. Only useful for DMABUF buffers.
- bool checkNumFDsForFormat(const size_t numFds) const;
-
- // Data from the buffer, that users can query and/or write.
- struct v4l2_buffer mV4l2Buffer;
- // WARNING: do not change this to a vector or something smaller than VIDEO_MAX_PLANES, otherwise
- // the Tegra libv4l2 will write data beyond the number of allocated planes, resulting in memory
- // corruption.
- struct v4l2_plane mV4l2Planes[VIDEO_MAX_PLANES];
-
-private:
- size_t bufferId() const { return mV4l2Buffer.index; }
-
- friend class V4L2WritableBufferRef;
- // A weak pointer to the queue this buffer belongs to. Will remain valid as long as the
- // underlying V4L2 buffer is valid too. This can only be accessed from the sequence protected by
- // sequence_checker_. Thread-safe methods (like ~V4L2BufferRefBase) must *never* access this.
- base::WeakPtr<V4L2Queue> mQueue;
- // Where to return this buffer if it goes out of scope without being queued.
- scoped_refptr<V4L2BuffersList> mReturnTo;
- bool queued = false;
-
- SEQUENCE_CHECKER(mSequenceChecker);
-};
-
-V4L2BufferRefBase::V4L2BufferRefBase(const struct v4l2_buffer& v4l2Buffer,
- base::WeakPtr<V4L2Queue> queue)
- : mQueue(std::move(queue)), mReturnTo(mQueue->mFreeBuffers) {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(V4L2_TYPE_IS_MULTIPLANAR(v4l2Buffer.type));
- ALOG_ASSERT(v4l2Buffer.length <= base::size(mV4l2Planes));
- ALOG_ASSERT(mReturnTo);
-
- memcpy(&mV4l2Buffer, &v4l2Buffer, sizeof(mV4l2Buffer));
- memcpy(mV4l2Planes, v4l2Buffer.m.planes, sizeof(struct v4l2_plane) * v4l2Buffer.length);
- mV4l2Buffer.m.planes = mV4l2Planes;
-}
-
-V4L2BufferRefBase::~V4L2BufferRefBase() {
- // We are the last reference and are only accessing the thread-safe mReturnTo, so we are safe
- // to call from any sequence. If we have been queued, then the queue is our owner so we don't
- // need to return to the free buffers list.
- if (!queued) mReturnTo->returnBuffer(bufferId());
-}
-
-bool V4L2BufferRefBase::queueBuffer() {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
-
- if (!mQueue) return false;
-
- queued = mQueue->queueBuffer(&mV4l2Buffer);
-
- return queued;
-}
-
-void* V4L2BufferRefBase::getPlaneMapping(const size_t plane) {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
-
- if (!mQueue) return nullptr;
-
- return mQueue->mBuffers[bufferId()]->getPlaneMapping(plane);
-}
-
-bool V4L2BufferRefBase::checkNumFDsForFormat(const size_t numFds) const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
-
- if (!mQueue) return false;
-
- // We have not used SetFormat(), assume this is ok.
- // Hopefully we standardize SetFormat() in the future.
- if (!mQueue->mCurrentFormat) return true;
-
- const size_t requiredFds = mQueue->mCurrentFormat->fmt.pix_mp.num_planes;
- // Sanity check.
- ALOG_ASSERT(mV4l2Buffer.length == requiredFds);
- if (numFds < requiredFds) {
- ALOGE("Insufficient number of FDs given for the current format. "
- "%zu provided, %zu required.",
- numFds, requiredFds);
- return false;
- }
-
- const auto* planes = mV4l2Buffer.m.planes;
- for (size_t i = mV4l2Buffer.length - 1; i >= numFds; --i) {
- // Assume that an fd is a duplicate of a previous plane's fd if offset != 0. Otherwise, if
- // offset == 0, return error as it is likely pointing to a new plane.
- if (planes[i].data_offset == 0) {
- ALOGE("Additional dmabuf fds point to a new buffer.");
- return false;
- }
- }
-
- return true;
-}
-
-V4L2WritableBufferRef::V4L2WritableBufferRef(const struct v4l2_buffer& v4l2Buffer,
- base::WeakPtr<V4L2Queue> queue)
- : mBufferData(std::make_unique<V4L2BufferRefBase>(v4l2Buffer, std::move(queue))) {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
-}
-
-V4L2WritableBufferRef::V4L2WritableBufferRef(V4L2WritableBufferRef&& other)
- : mBufferData(std::move(other.mBufferData)) {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- DCHECK_CALLED_ON_VALID_SEQUENCE(other.mSequenceChecker);
-}
-
-V4L2WritableBufferRef::~V4L2WritableBufferRef() {
- // Only valid references should be sequence-checked
- if (mBufferData) {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- }
-}
-
-V4L2WritableBufferRef& V4L2WritableBufferRef::operator=(V4L2WritableBufferRef&& other) {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- DCHECK_CALLED_ON_VALID_SEQUENCE(other.mSequenceChecker);
-
- if (this == &other) return *this;
-
- mBufferData = std::move(other.mBufferData);
-
- return *this;
-}
-
-enum v4l2_memory V4L2WritableBufferRef::memory() const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- return static_cast<enum v4l2_memory>(mBufferData->mV4l2Buffer.memory);
-}
-
-bool V4L2WritableBufferRef::doQueue() && {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- bool queued = mBufferData->queueBuffer();
-
- // Clear our own reference.
- mBufferData.reset();
-
- return queued;
-}
-
-bool V4L2WritableBufferRef::queueMMap() && {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- // Move ourselves so our data gets freed no matter when we return
- V4L2WritableBufferRef self(std::move(*this));
-
- if (self.memory() != V4L2_MEMORY_MMAP) {
- ALOGE("Called on invalid buffer type!");
- return false;
- }
-
- return std::move(self).doQueue();
-}
-
-bool V4L2WritableBufferRef::queueUserPtr(const std::vector<void*>& ptrs) && {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- // Move ourselves so our data gets freed no matter when we return
- V4L2WritableBufferRef self(std::move(*this));
-
- if (self.memory() != V4L2_MEMORY_USERPTR) {
- ALOGE("Called on invalid buffer type!");
- return false;
- }
-
- if (ptrs.size() != self.planesCount()) {
- ALOGE("Provided %zu pointers while we require %u.", ptrs.size(),
- self.mBufferData->mV4l2Buffer.length);
- return false;
- }
-
- for (size_t i = 0; i < ptrs.size(); i++) {
- self.mBufferData->mV4l2Buffer.m.planes[i].m.userptr =
- reinterpret_cast<unsigned long>(ptrs[i]);
- }
-
- return std::move(self).doQueue();
-}
-
-bool V4L2WritableBufferRef::queueDMABuf(const std::vector<int>& fds) && {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- // Move ourselves so our data gets freed no matter when we return
- V4L2WritableBufferRef self(std::move(*this));
-
- if (self.memory() != V4L2_MEMORY_DMABUF) {
- ALOGE("Called on invalid buffer type!");
- return false;
- }
-
- if (!self.mBufferData->checkNumFDsForFormat(fds.size())) return false;
-
- size_t numPlanes = self.planesCount();
- for (size_t i = 0; i < numPlanes; i++) self.mBufferData->mV4l2Buffer.m.planes[i].m.fd = fds[i];
-
- return std::move(self).doQueue();
-}
-
-size_t V4L2WritableBufferRef::planesCount() const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- return mBufferData->mV4l2Buffer.length;
-}
-
-size_t V4L2WritableBufferRef::getPlaneSize(const size_t plane) const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- if (plane >= planesCount()) {
- ALOGE("Invalid plane %zu requested.", plane);
- return 0;
- }
-
- return mBufferData->mV4l2Buffer.m.planes[plane].length;
-}
-
-void V4L2WritableBufferRef::setPlaneSize(const size_t plane, const size_t size) {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- enum v4l2_memory mem = memory();
- if (mem == V4L2_MEMORY_MMAP) {
- ALOG_ASSERT(mBufferData->mV4l2Buffer.m.planes[plane].length == size);
- return;
- }
- ALOG_ASSERT(mem == V4L2_MEMORY_USERPTR || mem == V4L2_MEMORY_DMABUF);
-
- if (plane >= planesCount()) {
- ALOGE("Invalid plane %zu requested.", plane);
- return;
- }
-
- mBufferData->mV4l2Buffer.m.planes[plane].length = size;
-}
-
-void* V4L2WritableBufferRef::getPlaneMapping(const size_t plane) {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- return mBufferData->getPlaneMapping(plane);
-}
-
-void V4L2WritableBufferRef::setTimeStamp(const struct timeval& timestamp) {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- mBufferData->mV4l2Buffer.timestamp = timestamp;
-}
-
-const struct timeval& V4L2WritableBufferRef::getTimeStamp() const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- return mBufferData->mV4l2Buffer.timestamp;
-}
-
-void V4L2WritableBufferRef::setPlaneBytesUsed(const size_t plane, const size_t bytesUsed) {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- if (plane >= planesCount()) {
- ALOGE("Invalid plane %zu requested.", plane);
- return;
- }
-
- if (bytesUsed > getPlaneSize(plane)) {
- ALOGE("Set bytes used %zu larger than plane size %zu.", bytesUsed, getPlaneSize(plane));
- return;
- }
-
- mBufferData->mV4l2Buffer.m.planes[plane].bytesused = bytesUsed;
-}
-
-size_t V4L2WritableBufferRef::getPlaneBytesUsed(const size_t plane) const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- if (plane >= planesCount()) {
- ALOGE("Invalid plane %zu requested.", plane);
- return 0;
- }
-
- return mBufferData->mV4l2Buffer.m.planes[plane].bytesused;
-}
-
-void V4L2WritableBufferRef::setPlaneDataOffset(const size_t plane, const size_t dataOffset) {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- if (plane >= planesCount()) {
- ALOGE("Invalid plane %zu requested.", plane);
- return;
- }
-
- mBufferData->mV4l2Buffer.m.planes[plane].data_offset = dataOffset;
-}
-
-size_t V4L2WritableBufferRef::bufferId() const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- return mBufferData->mV4l2Buffer.index;
-}
-
-V4L2ReadableBuffer::V4L2ReadableBuffer(const struct v4l2_buffer& v4l2Buffer,
- base::WeakPtr<V4L2Queue> queue)
- : mBufferData(std::make_unique<V4L2BufferRefBase>(v4l2Buffer, std::move(queue))) {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
-}
-
-V4L2ReadableBuffer::~V4L2ReadableBuffer() {
- // This method is thread-safe. Since we are the destructor, we are guaranteed to be called from
- // the only remaining reference to us. Also, we are just calling the destructor of buffer_data_,
- // which is also thread-safe.
- ALOG_ASSERT(mBufferData);
-}
-
-bool V4L2ReadableBuffer::isLast() const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- return mBufferData->mV4l2Buffer.flags & V4L2_BUF_FLAG_LAST;
-}
-
-bool V4L2ReadableBuffer::isKeyframe() const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- return mBufferData->mV4l2Buffer.flags & V4L2_BUF_FLAG_KEYFRAME;
-}
-
-struct timeval V4L2ReadableBuffer::getTimeStamp() const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- return mBufferData->mV4l2Buffer.timestamp;
-}
-
-size_t V4L2ReadableBuffer::planesCount() const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- return mBufferData->mV4l2Buffer.length;
-}
-
-const void* V4L2ReadableBuffer::getPlaneMapping(const size_t plane) const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- DCHECK(mBufferData);
-
- return mBufferData->getPlaneMapping(plane);
-}
-
-size_t V4L2ReadableBuffer::getPlaneBytesUsed(const size_t plane) const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- if (plane >= planesCount()) {
- ALOGE("Invalid plane %zu requested.", plane);
- return 0;
- }
-
- return mBufferData->mV4l2Planes[plane].bytesused;
-}
-
-size_t V4L2ReadableBuffer::getPlaneDataOffset(const size_t plane) const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- if (plane >= planesCount()) {
- ALOGE("Invalid plane %zu requested.", plane);
- return 0;
- }
-
- return mBufferData->mV4l2Planes[plane].data_offset;
-}
-
-size_t V4L2ReadableBuffer::bufferId() const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(mBufferData);
-
- return mBufferData->mV4l2Buffer.index;
-}
-
-// This class is used to expose buffer reference classes constructors to this module. This is to
-// ensure that nobody else can create buffer references.
-class V4L2BufferRefFactory {
-public:
- static V4L2WritableBufferRef CreateWritableRef(const struct v4l2_buffer& v4l2Buffer,
- base::WeakPtr<V4L2Queue> queue) {
- return V4L2WritableBufferRef(v4l2Buffer, std::move(queue));
- }
-
- static V4L2ReadableBufferRef CreateReadableRef(const struct v4l2_buffer& v4l2Buffer,
- base::WeakPtr<V4L2Queue> queue) {
- return new V4L2ReadableBuffer(v4l2Buffer, std::move(queue));
- }
-};
-
-//// Helper macros that print the queue type with logs.
-#define ALOGEQ(fmt, ...) ALOGE("(%s)" fmt, V4L2Device::v4L2BufferTypeToString(mType), ##__VA_ARGS__)
-#define ALOGVQ(fmt, ...) ALOGD("(%s)" fmt, V4L2Device::v4L2BufferTypeToString(mType), ##__VA_ARGS__)
-
-V4L2Queue::V4L2Queue(scoped_refptr<V4L2Device> dev, enum v4l2_buf_type type,
- base::OnceClosure destroyCb)
- : mType(type), mDevice(dev), mDestroyCb(std::move(destroyCb)) {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
-}
-
-V4L2Queue::~V4L2Queue() {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
-
- if (mIsStreaming) {
- ALOGEQ("Queue is still streaming, trying to stop it...");
- streamoff();
- }
-
- ALOG_ASSERT(mQueuedBuffers.empty());
- ALOG_ASSERT(!mFreeBuffers);
-
- if (!mBuffers.empty()) {
- ALOGEQ("Buffers are still allocated, trying to deallocate them...");
- deallocateBuffers();
- }
-
- std::move(mDestroyCb).Run();
-}
-
-std::optional<struct v4l2_format> V4L2Queue::setFormat(uint32_t fourcc, const ui::Size& size,
- size_t bufferSize, uint32_t stride) {
- struct v4l2_format format = buildV4L2Format(mType, fourcc, size, bufferSize, stride);
- if (mDevice->ioctl(VIDIOC_S_FMT, &format) != 0 || format.fmt.pix_mp.pixelformat != fourcc) {
- ALOGEQ("Failed to set format (format_fourcc=0x%" PRIx32 ")", fourcc);
- return std::nullopt;
- }
-
- mCurrentFormat = format;
- return mCurrentFormat;
-}
-
-std::optional<struct v4l2_format> V4L2Queue::tryFormat(uint32_t fourcc, const ui::Size& size,
- size_t bufferSize) {
- struct v4l2_format format = buildV4L2Format(mType, fourcc, size, bufferSize, 0);
- if (mDevice->ioctl(VIDIOC_TRY_FMT, &format) != 0 || format.fmt.pix_mp.pixelformat != fourcc) {
- ALOGEQ("Tried format not supported (format_fourcc=0x%" PRIx32 ")", fourcc);
- return std::nullopt;
- }
-
- return format;
-}
-
-std::pair<std::optional<struct v4l2_format>, int> V4L2Queue::getFormat() {
- struct v4l2_format format;
- memset(&format, 0, sizeof(format));
- format.type = mType;
- if (mDevice->ioctl(VIDIOC_G_FMT, &format) != 0) {
- ALOGEQ("Failed to get format");
- return std::make_pair(std::nullopt, errno);
- }
-
- return std::make_pair(format, 0);
-}
-
-size_t V4L2Queue::allocateBuffers(size_t count, enum v4l2_memory memory) {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- ALOG_ASSERT(!mFreeBuffers);
- ALOG_ASSERT(mQueuedBuffers.size() == 0u);
-
- if (isStreaming()) {
- ALOGEQ("Cannot allocate buffers while streaming.");
- return 0;
- }
-
- if (mBuffers.size() != 0) {
- ALOGEQ("Cannot allocate new buffers while others are still allocated.");
- return 0;
- }
-
- if (count == 0) {
- ALOGEQ("Attempting to allocate 0 buffers.");
- return 0;
- }
-
- // First query the number of planes in the buffers we are about to request. This should not be
- // required, but Tegra's VIDIOC_QUERYBUF will fail on output buffers if the number of specified
- // planes does not exactly match the format.
- struct v4l2_format format = {.type = mType};
- int ret = mDevice->ioctl(VIDIOC_G_FMT, &format);
- if (ret) {
- ALOGEQ("VIDIOC_G_FMT failed");
- return 0;
- }
- mPlanesCount = format.fmt.pix_mp.num_planes;
- ALOG_ASSERT(mPlanesCount <= static_cast<size_t>(VIDEO_MAX_PLANES));
-
- struct v4l2_requestbuffers reqbufs;
- memset(&reqbufs, 0, sizeof(reqbufs));
- reqbufs.count = count;
- reqbufs.type = mType;
- reqbufs.memory = memory;
- ALOGVQ("Requesting %zu buffers.", count);
-
- ret = mDevice->ioctl(VIDIOC_REQBUFS, &reqbufs);
- if (ret) {
- ALOGEQ("VIDIOC_REQBUFS failed");
- return 0;
- }
- ALOGVQ("Queue %u: got %u buffers.", mType, reqbufs.count);
-
- mMemory = memory;
-
- mFreeBuffers = new V4L2BuffersList();
-
- // Now query all buffer information.
- for (size_t i = 0; i < reqbufs.count; i++) {
- auto buffer = V4L2Buffer::create(mDevice, mType, mMemory, format, i);
-
- if (!buffer) {
- deallocateBuffers();
-
- return 0;
- }
-
- mBuffers.emplace_back(std::move(buffer));
- mFreeBuffers->returnBuffer(i);
- }
-
- ALOG_ASSERT(mFreeBuffers);
- ALOG_ASSERT(mFreeBuffers->size() == mBuffers.size());
- ALOG_ASSERT(mQueuedBuffers.size() == 0u);
-
- return mBuffers.size();
-}
-
-bool V4L2Queue::deallocateBuffers() {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
-
- if (isStreaming()) {
- ALOGEQ("Cannot deallocate buffers while streaming.");
- return false;
- }
-
- if (mBuffers.size() == 0) return true;
-
- mWeakThisFactory.InvalidateWeakPtrs();
- mBuffers.clear();
- mFreeBuffers = nullptr;
-
- // Free all buffers.
- struct v4l2_requestbuffers reqbufs;
- memset(&reqbufs, 0, sizeof(reqbufs));
- reqbufs.count = 0;
- reqbufs.type = mType;
- reqbufs.memory = mMemory;
-
- int ret = mDevice->ioctl(VIDIOC_REQBUFS, &reqbufs);
- if (ret) {
- ALOGEQ("VIDIOC_REQBUFS failed");
- return false;
- }
-
- ALOG_ASSERT(!mFreeBuffers);
- ALOG_ASSERT(mQueuedBuffers.size() == 0u);
-
- return true;
-}
-
-size_t V4L2Queue::getMemoryUsage() const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
- size_t usage = 0;
- for (const auto& buf : mBuffers) {
- usage += buf->getMemoryUsage();
- }
- return usage;
-}
-
-v4l2_memory V4L2Queue::getMemoryType() const {
- return mMemory;
-}
-
-std::optional<V4L2WritableBufferRef> V4L2Queue::getFreeBuffer() {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
-
- // No buffers allocated at the moment?
- if (!mFreeBuffers) return std::nullopt;
-
- auto bufferId = mFreeBuffers->getFreeBuffer();
- if (!bufferId.has_value()) return std::nullopt;
-
- return V4L2BufferRefFactory::CreateWritableRef(mBuffers[bufferId.value()]->v4l2_buffer(),
- mWeakThisFactory.GetWeakPtr());
-}
-
-std::optional<V4L2WritableBufferRef> V4L2Queue::getFreeBuffer(size_t requestedBufferIid) {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
-
- // No buffers allocated at the moment?
- if (!mFreeBuffers) return std::nullopt;
-
- auto bufferId = mFreeBuffers->getFreeBuffer(requestedBufferIid);
- if (!bufferId.has_value()) return std::nullopt;
-
- return V4L2BufferRefFactory::CreateWritableRef(mBuffers[bufferId.value()]->v4l2_buffer(),
- mWeakThisFactory.GetWeakPtr());
-}
-
-bool V4L2Queue::queueBuffer(struct v4l2_buffer* v4l2Buffer) {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
-
- int ret = mDevice->ioctl(VIDIOC_QBUF, v4l2Buffer);
- if (ret) {
- ALOGEQ("VIDIOC_QBUF failed");
- return false;
- }
-
- auto inserted = mQueuedBuffers.emplace(v4l2Buffer->index);
- if (!inserted.second) {
- ALOGE("Queuing buffer failed");
- return false;
- }
-
- mDevice->schedulePoll();
-
- return true;
-}
-
-std::pair<bool, V4L2ReadableBufferRef> V4L2Queue::dequeueBuffer() {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
-
- // No need to dequeue if no buffers queued.
- if (queuedBuffersCount() == 0) return std::make_pair(true, nullptr);
-
- if (!isStreaming()) {
- ALOGEQ("Attempting to dequeue a buffer while not streaming.");
- return std::make_pair(true, nullptr);
- }
-
- struct v4l2_buffer v4l2Buffer;
- memset(&v4l2Buffer, 0, sizeof(v4l2Buffer));
- // WARNING: do not change this to a vector or something smaller than VIDEO_MAX_PLANES, otherwise
- // the Tegra libv4l2 will write data beyond the number of allocated planes, resulting in memory
- // corruption.
- struct v4l2_plane planes[VIDEO_MAX_PLANES];
- memset(planes, 0, sizeof(planes));
- v4l2Buffer.type = mType;
- v4l2Buffer.memory = mMemory;
- v4l2Buffer.m.planes = planes;
- v4l2Buffer.length = mPlanesCount;
- int ret = mDevice->ioctl(VIDIOC_DQBUF, &v4l2Buffer);
- if (ret) {
- // TODO(acourbot): we should not have to check for EPIPE as codec clients should not call
- // this method after the last buffer is dequeued.
- switch (errno) {
- case EAGAIN:
- case EPIPE:
- // This is not an error so we'll need to continue polling but won't provide a buffer.
- mDevice->schedulePoll();
- return std::make_pair(true, nullptr);
- default:
- ALOGEQ("VIDIOC_DQBUF failed");
- return std::make_pair(false, nullptr);
- }
- }
-
- auto it = mQueuedBuffers.find(v4l2Buffer.index);
- ALOG_ASSERT(it != mQueuedBuffers.end());
- mQueuedBuffers.erase(*it);
-
- if (queuedBuffersCount() > 0) mDevice->schedulePoll();
-
- ALOG_ASSERT(mFreeBuffers);
- return std::make_pair(true, V4L2BufferRefFactory::CreateReadableRef(
- v4l2Buffer, mWeakThisFactory.GetWeakPtr()));
-}
-
-bool V4L2Queue::isStreaming() const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
-
- return mIsStreaming;
-}
-
-bool V4L2Queue::streamon() {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
-
- if (mIsStreaming) return true;
-
- int arg = static_cast<int>(mType);
- int ret = mDevice->ioctl(VIDIOC_STREAMON, &arg);
- if (ret) {
- ALOGEQ("VIDIOC_STREAMON failed");
- return false;
- }
-
- mIsStreaming = true;
-
- return true;
-}
-
-bool V4L2Queue::streamoff() {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
-
- // We do not check the value of IsStreaming(), because we may have queued buffers to the queue
- // and wish to get them back - in such as case, we may need to do a VIDIOC_STREAMOFF on a
- // stopped queue.
-
- int arg = static_cast<int>(mType);
- int ret = mDevice->ioctl(VIDIOC_STREAMOFF, &arg);
- if (ret) {
- ALOGEQ("VIDIOC_STREAMOFF failed");
- return false;
- }
-
- for (const auto& bufferId : mQueuedBuffers) {
- ALOG_ASSERT(mFreeBuffers);
- mFreeBuffers->returnBuffer(bufferId);
- }
-
- mQueuedBuffers.clear();
-
- mIsStreaming = false;
-
- return true;
-}
-
-size_t V4L2Queue::allocatedBuffersCount() const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
-
- return mBuffers.size();
-}
-
-size_t V4L2Queue::freeBuffersCount() const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
-
- return mFreeBuffers ? mFreeBuffers->size() : 0;
-}
-
-size_t V4L2Queue::queuedBuffersCount() const {
- ALOG_ASSERT(mSequenceChecker.CalledOnValidSequence());
-
- return mQueuedBuffers.size();
-}
-
-#undef ALOGEQ
-#undef ALOGVQ
-
-// This class is used to expose V4L2Queue's constructor to this module. This is to ensure that
-// nobody else can create instances of it.
-class V4L2QueueFactory {
-public:
- static scoped_refptr<V4L2Queue> createQueue(scoped_refptr<V4L2Device> dev,
- enum v4l2_buf_type type,
- base::OnceClosure destroyCb) {
- return new V4L2Queue(std::move(dev), type, std::move(destroyCb));
- }
-};
-
-V4L2Device::V4L2Device() {
- DETACH_FROM_SEQUENCE(mClientSequenceChecker);
-}
-
-V4L2Device::~V4L2Device() {
- closeDevice();
-}
-
-scoped_refptr<V4L2Queue> V4L2Device::getQueue(enum v4l2_buf_type type) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(mClientSequenceChecker);
-
- switch (type) {
- // Supported queue types.
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- break;
- default:
- ALOGE("Unsupported V4L2 queue type: %u", type);
- return nullptr;
- }
-
- // TODO(acourbot): we should instead query the device for available queues, and allocate them
- // accordingly. This will do for now though.
- auto it = mQueues.find(type);
- if (it != mQueues.end()) return scoped_refptr<V4L2Queue>(it->second);
-
- scoped_refptr<V4L2Queue> queue = V4L2QueueFactory::createQueue(
- this, type, base::BindOnce(&V4L2Device::onQueueDestroyed, this, type));
-
- mQueues[type] = queue.get();
- return queue;
-}
-
-void V4L2Device::onQueueDestroyed(v4l2_buf_type bufType) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(mClientSequenceChecker);
-
- auto it = mQueues.find(bufType);
- ALOG_ASSERT(it != mQueues.end());
- mQueues.erase(it);
-}
-
-// static
-scoped_refptr<V4L2Device> V4L2Device::create() {
- ALOGV("%s()", __func__);
- return scoped_refptr<V4L2Device>(new V4L2Device());
-}
-
-bool V4L2Device::open(Type type, uint32_t v4l2PixFmt) {
- ALOGV("%s()", __func__);
-
- std::string path = getDevicePathFor(type, v4l2PixFmt);
-
- if (path.empty()) {
- ALOGE("No devices supporting %s for type: %u", fourccToString(v4l2PixFmt).c_str(),
- static_cast<uint32_t>(type));
- return false;
- }
-
- if (!openDevicePath(path, type)) {
- ALOGE("Failed opening %s", path.c_str());
- return false;
- }
-
- mDevicePollInterruptFd.reset(eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC));
- if (!mDevicePollInterruptFd.is_valid()) {
- ALOGE("Failed creating a poll interrupt fd");
- return false;
- }
-
- return true;
-}
-
-int V4L2Device::ioctl(int request, void* arg) {
- ALOG_ASSERT(mDeviceFd.is_valid());
- return HANDLE_EINTR(::ioctl(mDeviceFd.get(), request, arg));
-}
-
-bool V4L2Device::poll(bool pollDevice, bool* eventPending) {
- struct pollfd pollfds[2];
- nfds_t nfds;
- int pollfd = -1;
-
- pollfds[0].fd = mDevicePollInterruptFd.get();
- pollfds[0].events = POLLIN | POLLERR;
- nfds = 1;
-
- if (pollDevice) {
- ALOGV("adding device fd to poll() set");
- pollfds[nfds].fd = mDeviceFd.get();
- pollfds[nfds].events = POLLIN | POLLOUT | POLLERR | POLLPRI;
- pollfd = nfds;
- nfds++;
- }
-
- if (HANDLE_EINTR(::poll(pollfds, nfds, -1)) == -1) {
- ALOGE("poll() failed");
- return false;
- }
- *eventPending = (pollfd != -1 && pollfds[pollfd].revents & POLLPRI);
- return true;
-}
-
-void* V4L2Device::mmap(void* addr, unsigned int len, int prot, int flags, unsigned int offset) {
- DCHECK(mDeviceFd.is_valid());
- return ::mmap(addr, len, prot, flags, mDeviceFd.get(), offset);
-}
-
-void V4L2Device::munmap(void* addr, unsigned int len) {
- ::munmap(addr, len);
-}
-
-bool V4L2Device::setDevicePollInterrupt() {
- ALOGV("%s()", __func__);
-
- const uint64_t buf = 1;
- if (HANDLE_EINTR(write(mDevicePollInterruptFd.get(), &buf, sizeof(buf))) == -1) {
- ALOGE("write() failed");
- return false;
- }
- return true;
-}
-
-bool V4L2Device::clearDevicePollInterrupt() {
- ALOGV("%s()", __func__);
-
- uint64_t buf;
- if (HANDLE_EINTR(read(mDevicePollInterruptFd.get(), &buf, sizeof(buf))) == -1) {
- if (errno == EAGAIN) {
- // No interrupt flag set, and we're reading nonblocking. Not an error.
- return true;
- } else {
- ALOGE("read() failed");
- return false;
- }
- }
- return true;
-}
-
-std::vector<base::ScopedFD> V4L2Device::getDmabufsForV4L2Buffer(int index, size_t numPlanes,
- enum v4l2_buf_type bufType) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(V4L2_TYPE_IS_MULTIPLANAR(bufType));
-
- std::vector<base::ScopedFD> dmabufFds;
- for (size_t i = 0; i < numPlanes; ++i) {
- struct v4l2_exportbuffer expbuf;
- memset(&expbuf, 0, sizeof(expbuf));
- expbuf.type = bufType;
- expbuf.index = index;
- expbuf.plane = i;
- expbuf.flags = O_CLOEXEC;
- if (ioctl(VIDIOC_EXPBUF, &expbuf) != 0) {
- dmabufFds.clear();
- break;
- }
-
- dmabufFds.push_back(base::ScopedFD(expbuf.fd));
- }
-
- return dmabufFds;
-}
-
-std::vector<uint32_t> V4L2Device::preferredInputFormat(Type type) {
- if (type == Type::kEncoder) return {V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_NV12};
-
- return {};
-}
-
-// static
-uint32_t V4L2Device::C2ProfileToV4L2PixFmt(C2Config::profile_t profile, bool sliceBased) {
- if (profile >= C2Config::PROFILE_AVC_BASELINE &&
- profile <= C2Config::PROFILE_AVC_ENHANCED_MULTIVIEW_DEPTH_HIGH) {
- if (sliceBased) {
- return V4L2_PIX_FMT_H264_SLICE;
- } else {
- return V4L2_PIX_FMT_H264;
- }
- } else if (profile >= C2Config::PROFILE_VP8_0 && profile <= C2Config::PROFILE_VP8_3) {
- if (sliceBased) {
- return V4L2_PIX_FMT_VP8_FRAME;
- } else {
- return V4L2_PIX_FMT_VP8;
- }
- } else if (profile >= C2Config::PROFILE_VP9_0 && profile <= C2Config::PROFILE_VP9_3) {
- if (sliceBased) {
- return V4L2_PIX_FMT_VP9_FRAME;
- } else {
- return V4L2_PIX_FMT_VP9;
- }
- } else {
- ALOGE("Unknown profile: %s", profileToString(profile));
- return 0;
- }
-}
-
-// static
-C2Config::profile_t V4L2Device::v4L2ProfileToC2Profile(VideoCodec codec, uint32_t profile) {
- switch (codec) {
- case VideoCodec::H264:
- switch (profile) {
- case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
- case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
- return C2Config::PROFILE_AVC_BASELINE;
- case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
- return C2Config::PROFILE_AVC_MAIN;
- case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED:
- return C2Config::PROFILE_AVC_EXTENDED;
- case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
- return C2Config::PROFILE_AVC_HIGH;
- }
- break;
- case VideoCodec::VP8:
- switch (profile) {
- case V4L2_MPEG_VIDEO_VP8_PROFILE_0:
- return C2Config::PROFILE_VP8_0;
- case V4L2_MPEG_VIDEO_VP8_PROFILE_1:
- return C2Config::PROFILE_VP8_1;
- case V4L2_MPEG_VIDEO_VP8_PROFILE_2:
- return C2Config::PROFILE_VP8_2;
- case V4L2_MPEG_VIDEO_VP8_PROFILE_3:
- return C2Config::PROFILE_VP8_3;
- }
- break;
- case VideoCodec::VP9:
- switch (profile) {
- case V4L2_MPEG_VIDEO_VP9_PROFILE_0:
- return C2Config::PROFILE_VP9_0;
- case V4L2_MPEG_VIDEO_VP9_PROFILE_1:
- return C2Config::PROFILE_VP9_1;
- case V4L2_MPEG_VIDEO_VP9_PROFILE_2:
- return C2Config::PROFILE_VP9_2;
- case V4L2_MPEG_VIDEO_VP9_PROFILE_3:
- return C2Config::PROFILE_VP9_3;
- }
- break;
- default:
- ALOGE("Unknown codec: %u", codec);
- }
- ALOGE("Unknown profile: %u", profile);
- return C2Config::PROFILE_UNUSED;
-}
-
-std::vector<C2Config::profile_t> V4L2Device::v4L2PixFmtToC2Profiles(uint32_t pixFmt,
- bool /*isEncoder*/) {
- auto getSupportedProfiles = [this](VideoCodec codec,
- std::vector<C2Config::profile_t>* profiles) {
- uint32_t queryId = 0;
- switch (codec) {
- case VideoCodec::H264:
- queryId = V4L2_CID_MPEG_VIDEO_H264_PROFILE;
- break;
- case VideoCodec::VP8:
- queryId = V4L2_CID_MPEG_VIDEO_VP8_PROFILE;
- break;
- case VideoCodec::VP9:
- queryId = V4L2_CID_MPEG_VIDEO_VP9_PROFILE;
- break;
- default:
- return false;
- }
-
- v4l2_queryctrl queryCtrl = {};
- queryCtrl.id = queryId;
- if (ioctl(VIDIOC_QUERYCTRL, &queryCtrl) != 0) {
- return false;
- }
- v4l2_querymenu queryMenu = {};
- queryMenu.id = queryCtrl.id;
- for (queryMenu.index = queryCtrl.minimum;
- static_cast<int>(queryMenu.index) <= queryCtrl.maximum; queryMenu.index++) {
- if (ioctl(VIDIOC_QUERYMENU, &queryMenu) == 0) {
- const C2Config::profile_t profile =
- V4L2Device::v4L2ProfileToC2Profile(codec, queryMenu.index);
- if (profile != C2Config::PROFILE_UNUSED) profiles->push_back(profile);
- }
- }
- return true;
- };
-
- std::vector<C2Config::profile_t> profiles;
- switch (pixFmt) {
- case V4L2_PIX_FMT_H264:
- case V4L2_PIX_FMT_H264_SLICE:
- if (!getSupportedProfiles(VideoCodec::H264, &profiles)) {
- ALOGW("Driver doesn't support QUERY H264 profiles, "
- "use default values, Base, Main, High");
- profiles = {
- C2Config::PROFILE_AVC_BASELINE,
- C2Config::PROFILE_AVC_MAIN,
- C2Config::PROFILE_AVC_HIGH,
- };
- }
- break;
- case V4L2_PIX_FMT_VP8:
- case V4L2_PIX_FMT_VP8_FRAME:
- if (!getSupportedProfiles(VideoCodec::VP8, &profiles)) {
- ALOGW("Driver doesn't support QUERY VP8 profiles, use default values, Profile0");
- profiles = {C2Config::PROFILE_VP8_0};
- }
- break;
- case V4L2_PIX_FMT_VP9:
- case V4L2_PIX_FMT_VP9_FRAME:
- if (!getSupportedProfiles(VideoCodec::VP9, &profiles)) {
- ALOGW("Driver doesn't support QUERY VP9 profiles, use default values, Profile0");
- profiles = {C2Config::PROFILE_VP9_0};
- }
- break;
- default:
- ALOGE("Unhandled pixelformat %s", fourccToString(pixFmt).c_str());
- return {};
- }
-
- // Erase duplicated profiles.
- std::sort(profiles.begin(), profiles.end());
- profiles.erase(std::unique(profiles.begin(), profiles.end()), profiles.end());
- return profiles;
-}
-
-// static
-int32_t V4L2Device::c2ProfileToV4L2H264Profile(C2Config::profile_t profile) {
- switch (profile) {
- case C2Config::PROFILE_AVC_BASELINE:
- return V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
- case C2Config::PROFILE_AVC_MAIN:
- return V4L2_MPEG_VIDEO_H264_PROFILE_MAIN;
- case C2Config::PROFILE_AVC_EXTENDED:
- return V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED;
- case C2Config::PROFILE_AVC_HIGH:
- return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH;
- case C2Config::PROFILE_AVC_HIGH_10:
- return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10;
- case C2Config::PROFILE_AVC_HIGH_422:
- return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422;
- case C2Config::PROFILE_AVC_HIGH_444_PREDICTIVE:
- return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE;
- case C2Config::PROFILE_AVC_SCALABLE_BASELINE:
- return V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_BASELINE;
- case C2Config::PROFILE_AVC_SCALABLE_HIGH:
- return V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH;
- case C2Config::PROFILE_AVC_STEREO_HIGH:
- return V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH;
- case C2Config::PROFILE_AVC_MULTIVIEW_HIGH:
- return V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH;
- default:
- ALOGE("Add more cases as needed");
- return -1;
- }
-}
-
-// static
-int32_t V4L2Device::h264LevelIdcToV4L2H264Level(uint8_t levelIdc) {
- switch (levelIdc) {
- case 10:
- return V4L2_MPEG_VIDEO_H264_LEVEL_1_0;
- case 9:
- return V4L2_MPEG_VIDEO_H264_LEVEL_1B;
- case 11:
- return V4L2_MPEG_VIDEO_H264_LEVEL_1_1;
- case 12:
- return V4L2_MPEG_VIDEO_H264_LEVEL_1_2;
- case 13:
- return V4L2_MPEG_VIDEO_H264_LEVEL_1_3;
- case 20:
- return V4L2_MPEG_VIDEO_H264_LEVEL_2_0;
- case 21:
- return V4L2_MPEG_VIDEO_H264_LEVEL_2_1;
- case 22:
- return V4L2_MPEG_VIDEO_H264_LEVEL_2_2;
- case 30:
- return V4L2_MPEG_VIDEO_H264_LEVEL_3_0;
- case 31:
- return V4L2_MPEG_VIDEO_H264_LEVEL_3_1;
- case 32:
- return V4L2_MPEG_VIDEO_H264_LEVEL_3_2;
- case 40:
- return V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
- case 41:
- return V4L2_MPEG_VIDEO_H264_LEVEL_4_1;
- case 42:
- return V4L2_MPEG_VIDEO_H264_LEVEL_4_2;
- case 50:
- return V4L2_MPEG_VIDEO_H264_LEVEL_5_0;
- case 51:
- return V4L2_MPEG_VIDEO_H264_LEVEL_5_1;
- default:
- ALOGE("Unrecognized levelIdc: %u", static_cast<uint32_t>(levelIdc));
- return -1;
- }
-}
-
-// static
-ui::Size V4L2Device::allocatedSizeFromV4L2Format(const struct v4l2_format& format) {
- ui::Size codedSize;
- ui::Size visibleSize;
- VideoPixelFormat frameFormat = VideoPixelFormat::UNKNOWN;
- size_t bytesPerLine = 0;
- // Total bytes in the frame.
- size_t sizeimage = 0;
-
- if (V4L2_TYPE_IS_MULTIPLANAR(format.type)) {
- ALOG_ASSERT(format.fmt.pix_mp.num_planes > 0);
- bytesPerLine = base::checked_cast<int>(format.fmt.pix_mp.plane_fmt[0].bytesperline);
- for (size_t i = 0; i < format.fmt.pix_mp.num_planes; ++i) {
- sizeimage += base::checked_cast<int>(format.fmt.pix_mp.plane_fmt[i].sizeimage);
- }
- visibleSize.set(base::checked_cast<int>(format.fmt.pix_mp.width),
- base::checked_cast<int>(format.fmt.pix_mp.height));
- const uint32_t pixFmt = format.fmt.pix_mp.pixelformat;
- const auto frameFourcc = Fourcc::fromV4L2PixFmt(pixFmt);
- if (!frameFourcc) {
- ALOGE("Unsupported format %s", fourccToString(pixFmt).c_str());
- return codedSize;
- }
- frameFormat = frameFourcc->toVideoPixelFormat();
- } else {
- bytesPerLine = base::checked_cast<int>(format.fmt.pix.bytesperline);
- sizeimage = base::checked_cast<int>(format.fmt.pix.sizeimage);
- visibleSize.set(base::checked_cast<int>(format.fmt.pix.width),
- base::checked_cast<int>(format.fmt.pix.height));
- const uint32_t fourcc = format.fmt.pix.pixelformat;
- const auto frameFourcc = Fourcc::fromV4L2PixFmt(fourcc);
- if (!frameFourcc) {
- ALOGE("Unsupported format %s", fourccToString(fourcc).c_str());
- return codedSize;
- }
- frameFormat = frameFourcc ? frameFourcc->toVideoPixelFormat() : VideoPixelFormat::UNKNOWN;
- }
-
- // V4L2 does not provide per-plane bytesperline (bpl) when different components are sharing one
- // physical plane buffer. In this case, it only provides bpl for the first component in the
- // plane. So we can't depend on it for calculating height, because bpl may vary within one
- // physical plane buffer. For example, YUV420 contains 3 components in one physical plane, with
- // Y at 8 bits per pixel, and Cb/Cr at 4 bits per pixel per component, but we only get 8 pits
- // per pixel from bytesperline in physical plane 0. So we need to get total frame bpp from
- // elsewhere to calculate coded height.
-
- // We need bits per pixel for one component only to calculate the coded width from bytesperline.
- int planeHorizBitsPerPixel = planeHorizontalBitsPerPixel(frameFormat, 0);
-
- // Adding up bpp for each component will give us total bpp for all components.
- int totalBpp = 0;
- for (size_t i = 0; i < numPlanes(frameFormat); ++i)
- totalBpp += planeBitsPerPixel(frameFormat, i);
-
- if (sizeimage == 0 || bytesPerLine == 0 || planeHorizBitsPerPixel == 0 || totalBpp == 0 ||
- (bytesPerLine * 8) % planeHorizBitsPerPixel != 0) {
- ALOGE("Invalid format provided");
- return codedSize;
- }
-
- // Coded width can be calculated by taking the first component's bytesperline, which in V4L2
- // always applies to the first component in physical plane buffer.
- int codedWidth = bytesPerLine * 8 / planeHorizBitsPerPixel;
- // Sizeimage is codedWidth * codedHeight * totalBpp.
- int codedHeight = sizeimage * 8 / codedWidth / totalBpp;
-
- codedSize.set(codedWidth, codedHeight);
- ALOGV("codedSize=%s", toString(codedSize).c_str());
-
- // Sanity checks. Calculated coded size has to contain given visible size and fulfill buffer
- // byte size requirements.
- ALOG_ASSERT(Rect(codedSize).Contains(Rect(visibleSize)));
- ALOG_ASSERT(sizeimage <= allocationSize(frameFormat, codedSize));
-
- return codedSize;
-}
-
-// static
-const char* V4L2Device::v4L2MemoryToString(const v4l2_memory memory) {
- switch (memory) {
- case V4L2_MEMORY_MMAP:
- return "V4L2_MEMORY_MMAP";
- case V4L2_MEMORY_USERPTR:
- return "V4L2_MEMORY_USERPTR";
- case V4L2_MEMORY_DMABUF:
- return "V4L2_MEMORY_DMABUF";
- case V4L2_MEMORY_OVERLAY:
- return "V4L2_MEMORY_OVERLAY";
- default:
- return "UNKNOWN";
- }
-}
-
-// static
-const char* V4L2Device::v4L2BufferTypeToString(const enum v4l2_buf_type bufType) {
- switch (bufType) {
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- return "OUTPUT";
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- return "CAPTURE";
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- return "OUTPUT_MPLANE";
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- return "CAPTURE_MPLANE";
- default:
- return "UNKNOWN";
- }
-}
-
-// static
-std::string V4L2Device::v4L2FormatToString(const struct v4l2_format& format) {
- std::ostringstream s;
- s << "v4l2_format type: " << format.type;
- if (format.type == V4L2_BUF_TYPE_VIDEO_CAPTURE || format.type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- // single-planar
- const struct v4l2_pix_format& pix = format.fmt.pix;
- s << ", width_height: " << toString(ui::Size(pix.width, pix.height))
- << ", pixelformat: " << fourccToString(pix.pixelformat) << ", field: " << pix.field
- << ", bytesperline: " << pix.bytesperline << ", sizeimage: " << pix.sizeimage;
- } else if (V4L2_TYPE_IS_MULTIPLANAR(format.type)) {
- const struct v4l2_pix_format_mplane& pixMp = format.fmt.pix_mp;
- // As long as num_planes's type is uint8_t, ostringstream treats it as a char instead of an
- // integer, which is not what we want. Casting pix_mp.num_planes unsigned int solves the
- // issue.
- s << ", width_height: " << toString(ui::Size(pixMp.width, pixMp.height))
- << ", pixelformat: " << fourccToString(pixMp.pixelformat) << ", field: " << pixMp.field
- << ", num_planes: " << static_cast<unsigned int>(pixMp.num_planes);
- for (size_t i = 0; i < pixMp.num_planes; ++i) {
- const struct v4l2_plane_pix_format& plane_fmt = pixMp.plane_fmt[i];
- s << ", plane_fmt[" << i << "].sizeimage: " << plane_fmt.sizeimage << ", plane_fmt["
- << i << "].bytesperline: " << plane_fmt.bytesperline;
- }
- } else {
- s << " unsupported yet.";
- }
- return s.str();
-}
-
-// static
-std::string V4L2Device::v4L2BufferToString(const struct v4l2_buffer& buffer) {
- std::ostringstream s;
- s << "v4l2_buffer type: " << buffer.type << ", memory: " << buffer.memory
- << ", index: " << buffer.index << " bytesused: " << buffer.bytesused
- << ", length: " << buffer.length;
- if (buffer.type == V4L2_BUF_TYPE_VIDEO_CAPTURE || buffer.type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- // single-planar
- if (buffer.memory == V4L2_MEMORY_MMAP) {
- s << ", m.offset: " << buffer.m.offset;
- } else if (buffer.memory == V4L2_MEMORY_USERPTR) {
- s << ", m.userptr: " << buffer.m.userptr;
- } else if (buffer.memory == V4L2_MEMORY_DMABUF) {
- s << ", m.fd: " << buffer.m.fd;
- };
- } else if (V4L2_TYPE_IS_MULTIPLANAR(buffer.type)) {
- for (size_t i = 0; i < buffer.length; ++i) {
- const struct v4l2_plane& plane = buffer.m.planes[i];
- s << ", m.planes[" << i << "](bytesused: " << plane.bytesused
- << ", length: " << plane.length << ", data_offset: " << plane.data_offset;
- if (buffer.memory == V4L2_MEMORY_MMAP) {
- s << ", m.mem_offset: " << plane.m.mem_offset;
- } else if (buffer.memory == V4L2_MEMORY_USERPTR) {
- s << ", m.userptr: " << plane.m.userptr;
- } else if (buffer.memory == V4L2_MEMORY_DMABUF) {
- s << ", m.fd: " << plane.m.fd;
- }
- s << ")";
- }
- } else {
- s << " unsupported yet.";
- }
- return s.str();
-}
-
-// static
-std::optional<VideoFrameLayout> V4L2Device::v4L2FormatToVideoFrameLayout(
- const struct v4l2_format& format) {
- if (!V4L2_TYPE_IS_MULTIPLANAR(format.type)) {
- ALOGE("v4l2_buf_type is not multiplanar: 0x%" PRIx32, format.type);
- return std::nullopt;
- }
- const v4l2_pix_format_mplane& pixMp = format.fmt.pix_mp;
- const uint32_t& pixFmt = pixMp.pixelformat;
- const auto videoFourcc = Fourcc::fromV4L2PixFmt(pixFmt);
- if (!videoFourcc) {
- ALOGE("Failed to convert pixel format to VideoPixelFormat: %s",
- fourccToString(pixFmt).c_str());
- return std::nullopt;
- }
- const VideoPixelFormat videoFormat = videoFourcc->toVideoPixelFormat();
- const size_t numBuffers = pixMp.num_planes;
- const size_t numColorPlanes = numPlanes(videoFormat);
- if (numColorPlanes == 0) {
- ALOGE("Unsupported video format for NumPlanes(): %s",
- videoPixelFormatToString(videoFormat).c_str());
- return std::nullopt;
- }
- if (numBuffers > numColorPlanes) {
- ALOGE("pix_mp.num_planes: %zu should not be larger than NumPlanes(%s): %zu", numBuffers,
- videoPixelFormatToString(videoFormat).c_str(), numColorPlanes);
- return std::nullopt;
- }
- // Reserve capacity in advance to prevent unnecessary vector reallocation.
- std::vector<VideoFramePlane> planes;
- planes.reserve(numColorPlanes);
- for (size_t i = 0; i < numBuffers; ++i) {
- const v4l2_plane_pix_format& planeFormat = pixMp.plane_fmt[i];
- planes.push_back(VideoFramePlane{planeFormat.bytesperline, 0u, planeFormat.sizeimage});
- }
- // For the case that #color planes > #buffers, it fills stride of color plane which does not map
- // to buffer. Right now only some pixel formats are supported: NV12, YUV420, YVU420.
- if (numColorPlanes > numBuffers) {
- const uint32_t yStride = planes[0].mStride;
- // Note that y_stride is from v4l2 bytesperline and its type is uint32_t. It is safe to cast
- // to size_t.
- const size_t yStrideAbs = static_cast<size_t>(yStride);
- switch (pixFmt) {
- case V4L2_PIX_FMT_NV12:
- // The stride of UV is the same as Y in NV12. The height is half of Y plane.
- planes.push_back(VideoFramePlane{yStride, yStrideAbs * pixMp.height,
- yStrideAbs * pixMp.height / 2});
- ALOG_ASSERT(2u == planes.size());
- break;
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420: {
- // The spec claims that two Cx rows (including padding) is exactly as long as one Y row
- // (including padding). So stride of Y must be even number.
- if (yStride % 2 != 0 || pixMp.height % 2 != 0) {
- ALOGE("Plane-Y stride and height should be even; stride: %u, height: %u", yStride,
- pixMp.height);
- return std::nullopt;
- }
- const uint32_t halfStride = yStride / 2;
- const size_t plane0Area = yStrideAbs * pixMp.height;
- const size_t plane1Area = plane0Area / 4;
- planes.push_back(VideoFramePlane{halfStride, plane0Area, plane1Area});
- planes.push_back(VideoFramePlane{halfStride, plane0Area + plane1Area, plane1Area});
- ALOG_ASSERT(3u == planes.size());
- break;
- }
- default:
- ALOGE("Cannot derive stride for each plane for pixel format %s",
- fourccToString(pixFmt).c_str());
- return std::nullopt;
- }
- }
-
- return VideoFrameLayout{videoFormat, ui::Size(pixMp.width, pixMp.height), std::move(planes),
- (numBuffers > 1)};
-}
-
-// static
-size_t V4L2Device::getNumPlanesOfV4L2PixFmt(uint32_t pixFmt) {
- std::optional<Fourcc> fourcc = Fourcc::fromV4L2PixFmt(pixFmt);
- if (fourcc && fourcc->isMultiPlanar()) {
- return numPlanes(fourcc->toVideoPixelFormat());
- }
- return 1u;
-}
-
-void V4L2Device::getSupportedResolution(uint32_t pixelFormat, ui::Size* minResolution,
- ui::Size* maxResolution) {
- maxResolution->set(0, 0);
- minResolution->set(0, 0);
- v4l2_frmsizeenum frameSize;
- memset(&frameSize, 0, sizeof(frameSize));
- frameSize.pixel_format = pixelFormat;
- for (; ioctl(VIDIOC_ENUM_FRAMESIZES, &frameSize) == 0; ++frameSize.index) {
- if (frameSize.type == V4L2_FRMSIZE_TYPE_DISCRETE) {
- if (frameSize.discrete.width >= base::checked_cast<uint32_t>(maxResolution->width) &&
- frameSize.discrete.height >= base::checked_cast<uint32_t>(maxResolution->height)) {
- maxResolution->set(frameSize.discrete.width, frameSize.discrete.height);
- }
- if (isEmpty(*minResolution) ||
- (frameSize.discrete.width <= base::checked_cast<uint32_t>(minResolution->width) &&
- frameSize.discrete.height <=
- base::checked_cast<uint32_t>(minResolution->height))) {
- minResolution->set(frameSize.discrete.width, frameSize.discrete.height);
- }
- } else if (frameSize.type == V4L2_FRMSIZE_TYPE_STEPWISE ||
- frameSize.type == V4L2_FRMSIZE_TYPE_CONTINUOUS) {
- maxResolution->set(frameSize.stepwise.max_width, frameSize.stepwise.max_height);
- minResolution->set(frameSize.stepwise.min_width, frameSize.stepwise.min_height);
- break;
- }
- }
- if (isEmpty(*maxResolution)) {
- maxResolution->set(1920, 1088);
- ALOGE("GetSupportedResolution failed to get maximum resolution for fourcc %s, "
- "fall back to %s",
- fourccToString(pixelFormat).c_str(), toString(*maxResolution).c_str());
- }
- if (isEmpty(*minResolution)) {
- minResolution->set(16, 16);
- ALOGE("GetSupportedResolution failed to get minimum resolution for fourcc %s, "
- "fall back to %s",
- fourccToString(pixelFormat).c_str(), toString(*minResolution).c_str());
- }
-}
-
-std::vector<uint32_t> V4L2Device::enumerateSupportedPixelformats(v4l2_buf_type bufType) {
- std::vector<uint32_t> pixelFormats;
-
- v4l2_fmtdesc fmtDesc;
- memset(&fmtDesc, 0, sizeof(fmtDesc));
- fmtDesc.type = bufType;
-
- for (; ioctl(VIDIOC_ENUM_FMT, &fmtDesc) == 0; ++fmtDesc.index) {
- ALOGV("Found %s (0x%" PRIx32 ")", fmtDesc.description, fmtDesc.pixelformat);
- pixelFormats.push_back(fmtDesc.pixelformat);
- }
-
- return pixelFormats;
-}
-
-V4L2Device::SupportedDecodeProfiles V4L2Device::getSupportedDecodeProfiles(
- const size_t numFormats, const uint32_t pixelFormats[]) {
- SupportedDecodeProfiles supportedProfiles;
-
- Type type = Type::kDecoder;
- const auto& devices = getDevicesForType(type);
- for (const auto& device : devices) {
- if (!openDevicePath(device.first, type)) {
- ALOGV("Failed opening %s", device.first.c_str());
- continue;
- }
-
- const auto& profiles = enumerateSupportedDecodeProfiles(numFormats, pixelFormats);
- supportedProfiles.insert(supportedProfiles.end(), profiles.begin(), profiles.end());
- closeDevice();
- }
-
- return supportedProfiles;
-}
-
-V4L2Device::SupportedEncodeProfiles V4L2Device::getSupportedEncodeProfiles() {
- SupportedEncodeProfiles supportedProfiles;
-
- Type type = Type::kEncoder;
- const auto& devices = getDevicesForType(type);
- for (const auto& device : devices) {
- if (!openDevicePath(device.first, type)) {
- ALOGV("Failed opening %s", device.first.c_str());
- continue;
- }
-
- const auto& profiles = enumerateSupportedEncodeProfiles();
- supportedProfiles.insert(supportedProfiles.end(), profiles.begin(), profiles.end());
- closeDevice();
- }
-
- return supportedProfiles;
-}
-
-V4L2Device::SupportedDecodeProfiles V4L2Device::enumerateSupportedDecodeProfiles(
- const size_t numFormats, const uint32_t pixelFormats[]) {
- SupportedDecodeProfiles profiles;
-
- const auto& supportedPixelformats =
- enumerateSupportedPixelformats(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
-
- for (uint32_t pixelFormat : supportedPixelformats) {
- if (std::find(pixelFormats, pixelFormats + numFormats, pixelFormat) ==
- pixelFormats + numFormats)
- continue;
-
- SupportedDecodeProfile profile;
- getSupportedResolution(pixelFormat, &profile.min_resolution, &profile.max_resolution);
-
- const auto videoCodecProfiles = v4L2PixFmtToC2Profiles(pixelFormat, false);
-
- for (const auto& videoCodecProfile : videoCodecProfiles) {
- profile.profile = videoCodecProfile;
- profiles.push_back(profile);
-
- ALOGV("Found decoder profile %s, resolutions: %s %s", profileToString(profile.profile),
- toString(profile.min_resolution).c_str(),
- toString(profile.max_resolution).c_str());
- }
- }
-
- return profiles;
-}
-
-V4L2Device::SupportedEncodeProfiles V4L2Device::enumerateSupportedEncodeProfiles() {
- SupportedEncodeProfiles profiles;
-
- const auto& supportedPixelformats =
- enumerateSupportedPixelformats(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
-
- for (const auto& pixelformat : supportedPixelformats) {
- SupportedEncodeProfile profile;
- profile.max_framerate_numerator = 30;
- profile.max_framerate_denominator = 1;
- ui::Size minResolution;
- getSupportedResolution(pixelformat, &minResolution, &profile.max_resolution);
-
- const auto videoCodecProfiles = v4L2PixFmtToC2Profiles(pixelformat, true);
-
- for (const auto& videoCodecProfile : videoCodecProfiles) {
- profile.profile = videoCodecProfile;
- profiles.push_back(profile);
-
- ALOGV("Found encoder profile %s, max resolution: %s", profileToString(profile.profile),
- toString(profile.max_resolution).c_str());
- }
- }
-
- return profiles;
-}
-
-bool V4L2Device::startPolling(android::V4L2DevicePoller::EventCallback eventCallback,
- base::RepeatingClosure errorCallback) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(mClientSequenceChecker);
-
- if (!mDevicePoller) {
- mDevicePoller = std::make_unique<android::V4L2DevicePoller>(this, "V4L2DeviceThreadPoller");
- }
-
- bool ret = mDevicePoller->startPolling(std::move(eventCallback), std::move(errorCallback));
-
- if (!ret) mDevicePoller = nullptr;
-
- return ret;
-}
-
-bool V4L2Device::stopPolling() {
- DCHECK_CALLED_ON_VALID_SEQUENCE(mClientSequenceChecker);
-
- return !mDevicePoller || mDevicePoller->stopPolling();
-}
-
-void V4L2Device::schedulePoll() {
- DCHECK_CALLED_ON_VALID_SEQUENCE(mClientSequenceChecker);
-
- if (!mDevicePoller || !mDevicePoller->isPolling()) return;
-
- mDevicePoller->schedulePoll();
-}
-
-bool V4L2Device::isCtrlExposed(uint32_t ctrlId) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(mClientSequenceChecker);
-
- struct v4l2_queryctrl queryCtrl;
- memset(&queryCtrl, 0, sizeof(queryCtrl));
- queryCtrl.id = ctrlId;
-
- return ioctl(VIDIOC_QUERYCTRL, &queryCtrl) == 0;
-}
-
-bool V4L2Device::setExtCtrls(uint32_t ctrlClass, std::vector<V4L2ExtCtrl> ctrls) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(mClientSequenceChecker);
-
- if (ctrls.empty()) return true;
-
- struct v4l2_ext_controls extCtrls;
- memset(&extCtrls, 0, sizeof(extCtrls));
- extCtrls.ctrl_class = ctrlClass;
- extCtrls.count = ctrls.size();
- extCtrls.controls = &ctrls[0].ctrl;
- return ioctl(VIDIOC_S_EXT_CTRLS, &extCtrls) == 0;
-}
-
-bool V4L2Device::isCommandSupported(uint32_t commandId) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(mClientSequenceChecker);
-
- struct v4l2_encoder_cmd cmd;
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = commandId;
-
- return ioctl(VIDIOC_TRY_ENCODER_CMD, &cmd) == 0;
-}
-
-bool V4L2Device::hasCapabilities(uint32_t capabilities) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(mClientSequenceChecker);
-
- struct v4l2_capability caps;
- memset(&caps, 0, sizeof(caps));
- if (ioctl(VIDIOC_QUERYCAP, &caps) != 0) {
- ALOGE("Failed to query capabilities");
- return false;
- }
-
- return (caps.capabilities & capabilities) == capabilities;
-}
-
-bool V4L2Device::openDevicePath(const std::string& path, Type /*type*/) {
- ALOG_ASSERT(!mDeviceFd.is_valid());
-
- mDeviceFd.reset(HANDLE_EINTR(::open(path.c_str(), O_RDWR | O_NONBLOCK | O_CLOEXEC)));
- if (!mDeviceFd.is_valid()) return false;
-
- return true;
-}
-
-void V4L2Device::closeDevice() {
- ALOGV("%s()", __func__);
-
- mDeviceFd.reset();
-}
-
-void V4L2Device::enumerateDevicesForType(Type type) {
- // video input/output devices are registered as /dev/videoX in V4L2.
- static const std::string kVideoDevicePattern = "/dev/video";
-
- std::string devicePattern;
- v4l2_buf_type bufType;
- switch (type) {
- case Type::kDecoder:
- devicePattern = kVideoDevicePattern;
- bufType = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- break;
- case Type::kEncoder:
- devicePattern = kVideoDevicePattern;
- bufType = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- break;
- default:
- ALOGE("Only decoder and encoder types are supported!!");
- return;
- }
-
- std::vector<std::string> candidatePaths;
-
- // TODO(posciak): Remove this legacy unnumbered device once all platforms are updated to use
- // numbered devices.
- candidatePaths.push_back(devicePattern);
-
- // We are sandboxed, so we can't query directory contents to check which devices are actually
- // available. Try to open the first 10; if not present, we will just fail to open immediately.
- for (int i = 0; i < 10; ++i) {
- candidatePaths.push_back(base::StringPrintf("%s%d", devicePattern.c_str(), i));
- }
-
- Devices devices;
- for (const auto& path : candidatePaths) {
- if (!openDevicePath(path, type)) {
- continue;
- }
-
- const auto& supportedPixelformats = enumerateSupportedPixelformats(bufType);
- if (!supportedPixelformats.empty()) {
- ALOGV("Found device: %s", path.c_str());
- devices.push_back(std::make_pair(path, supportedPixelformats));
- }
-
- closeDevice();
- }
-
- ALOG_ASSERT(mDevicesByType.count(type) == 0u);
- mDevicesByType[type] = devices;
-}
-
-const V4L2Device::Devices& V4L2Device::getDevicesForType(Type type) {
- if (mDevicesByType.count(type) == 0) enumerateDevicesForType(type);
-
- ALOG_ASSERT(mDevicesByType.count(type) != 0u);
- return mDevicesByType[type];
-}
-
-std::string V4L2Device::getDevicePathFor(Type type, uint32_t pixFmt) {
- const Devices& devices = getDevicesForType(type);
-
- for (const auto& device : devices) {
- if (std::find(device.second.begin(), device.second.end(), pixFmt) != device.second.end())
- return device.first;
- }
-
- return std::string();
-}
-
-} // namespace android
diff --git a/common/V4L2DevicePoller.cpp b/common/V4L2DevicePoller.cpp
deleted file mode 100644
index 5f2d0a5..0000000
--- a/common/V4L2DevicePoller.cpp
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2019 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-// Note: ported from Chromium commit head: 22d34680c8ac
-
-#include <v4l2_codec2/common/V4L2DevicePoller.h>
-
-#include <string>
-
-#include <base/bind.h>
-#include <base/threading/sequenced_task_runner_handle.h>
-#include <base/threading/thread_checker.h>
-#include <log/log.h>
-
-#include <v4l2_codec2/common/V4L2Device.h>
-
-namespace android {
-
-V4L2DevicePoller::V4L2DevicePoller(V4L2Device* const device, const std::string& threadName)
- : mDevice(device),
- mPollThread(std::move(threadName)),
- mTriggerPoll(base::WaitableEvent::ResetPolicy::AUTOMATIC,
- base::WaitableEvent::InitialState::NOT_SIGNALED),
- mStopPolling(false) {}
-
-V4L2DevicePoller::~V4L2DevicePoller() {
- ALOG_ASSERT(mClientTaskTunner->RunsTasksInCurrentSequence());
-
- stopPolling();
-}
-
-bool V4L2DevicePoller::startPolling(EventCallback eventCallback,
- base::RepeatingClosure errorCallback) {
- if (isPolling()) return true;
-
- ALOGV("Starting polling");
-
- mClientTaskTunner = base::SequencedTaskRunnerHandle::Get();
- mErrorCallback = errorCallback;
-
- if (!mPollThread.Start()) {
- ALOGE("Failed to start device poll thread");
- return false;
- }
-
- mEventCallback = std::move(eventCallback);
-
- mStopPolling.store(false);
- mPollThread.task_runner()->PostTask(
- FROM_HERE, base::BindOnce(&V4L2DevicePoller::devicePollTask, base::Unretained(this)));
-
- ALOGV("Polling thread started");
-
- schedulePoll();
-
- return true;
-}
-
-bool V4L2DevicePoller::stopPolling() {
- ALOG_ASSERT(mClientTaskTunner->RunsTasksInCurrentSequence());
-
- if (!isPolling()) return true;
-
- ALOGV("Stopping polling");
-
- mStopPolling.store(true);
-
- mTriggerPoll.Signal();
-
- if (!mDevice->setDevicePollInterrupt()) {
- ALOGE("Failed to interrupt device poll.");
- return false;
- }
-
- ALOGV("Stop device poll thread");
- mPollThread.Stop();
-
- if (!mDevice->clearDevicePollInterrupt()) {
- ALOGE("Failed to clear interrupting device poll.");
- return false;
- }
-
- ALOGV("Polling thread stopped");
-
- return true;
-}
-
-bool V4L2DevicePoller::isPolling() const {
- ALOG_ASSERT(mClientTaskTunner->RunsTasksInCurrentSequence());
-
- return mPollThread.IsRunning();
-}
-
-void V4L2DevicePoller::schedulePoll() {
- ALOG_ASSERT(mClientTaskTunner->RunsTasksInCurrentSequence());
-
- // A call to DevicePollTask() will be posted when we actually start polling.
- if (!isPolling()) return;
-
- ALOGV("Scheduling poll");
-
- mTriggerPoll.Signal();
-}
-
-void V4L2DevicePoller::devicePollTask() {
- ALOG_ASSERT(mClientTaskTunner->RunsTasksInCurrentSequence());
-
- while (true) {
- ALOGV("Waiting for poll to be scheduled.");
- mTriggerPoll.Wait();
-
- if (mStopPolling) {
- ALOGV("Poll stopped, exiting.");
- break;
- }
-
- bool event_pending = false;
- ALOGV("Polling device.");
- if (!mDevice->poll(true, &event_pending)) {
- ALOGE("An error occurred while polling, calling error callback");
- mClientTaskTunner->PostTask(FROM_HERE, mErrorCallback);
- return;
- }
-
- ALOGV("Poll returned, calling event callback.");
- mClientTaskTunner->PostTask(FROM_HERE, base::Bind(mEventCallback, event_pending));
- }
-}
-
-} // namespace android
diff --git a/common/VideoPixelFormat.cpp b/common/VideoPixelFormat.cpp
deleted file mode 100644
index f175c26..0000000
--- a/common/VideoPixelFormat.cpp
+++ /dev/null
@@ -1,371 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-// Note: ported from Chromium commit head: 3b7ce92816e2
-// Note: only necessary functions are ported from video_types.cc
-
-#include <v4l2_codec2/common/VideoPixelFormat.h>
-
-#include <base/bits.h>
-#include <utils/Log.h>
-
-namespace android {
-
-namespace {
-
-enum {
- kMaxPlanes = 4,
- kYPlane = 0,
- kARGBPlane = kYPlane,
- kUPlane = 1,
- kUVPlane = kUPlane,
- kVPlane = 2,
- kAPlane = 3,
-};
-}
-
-std::string videoPixelFormatToString(VideoPixelFormat format) {
- switch (format) {
- case VideoPixelFormat::I420:
- return "I420";
- case VideoPixelFormat::YV12:
- return "YV12";
- case VideoPixelFormat::I422:
- return "I422";
- case VideoPixelFormat::I420A:
- return "I420A";
- case VideoPixelFormat::I444:
- return "I444";
- case VideoPixelFormat::NV12:
- return "NV12";
- case VideoPixelFormat::NV21:
- return "NV21";
- case VideoPixelFormat::YUY2:
- return "YUY2";
- case VideoPixelFormat::ARGB:
- return "ARGB";
- case VideoPixelFormat::XRGB:
- return "XRGB";
- case VideoPixelFormat::RGB24:
- return "RGB24";
- case VideoPixelFormat::MJPEG:
- return "MJPEG";
- case VideoPixelFormat::YUV420P9:
- return "YUV420P9";
- case VideoPixelFormat::YUV420P10:
- return "YUV420P10";
- case VideoPixelFormat::YUV422P9:
- return "YUV422P9";
- case VideoPixelFormat::YUV422P10:
- return "YUV422P10";
- case VideoPixelFormat::YUV444P9:
- return "YUV444P9";
- case VideoPixelFormat::YUV444P10:
- return "YUV444P10";
- case VideoPixelFormat::YUV420P12:
- return "YUV420P12";
- case VideoPixelFormat::YUV422P12:
- return "YUV422P12";
- case VideoPixelFormat::YUV444P12:
- return "YUV444P12";
- case VideoPixelFormat::Y16:
- return "Y16";
- case VideoPixelFormat::ABGR:
- return "ABGR";
- case VideoPixelFormat::XBGR:
- return "XBGR";
- case VideoPixelFormat::P016LE:
- return "P016LE";
- case VideoPixelFormat::XR30:
- return "XR30";
- case VideoPixelFormat::XB30:
- return "XB30";
- case VideoPixelFormat::BGRA:
- return "BGRA";
- case VideoPixelFormat::UNKNOWN:
- return "UNKNOWN";
- }
-}
-
-std::string fourccToString(uint32_t fourcc) {
- std::string result = "0000";
- for (size_t i = 0; i < 4; ++i, fourcc >>= 8) {
- const char c = static_cast<char>(fourcc & 0xFF);
- if (c <= 0x1f || c >= 0x7f) {
- return (std::stringstream("0x") << std::hex << fourcc).str();
- }
- result[i] = c;
- }
- return result;
-}
-
-size_t bitDepth(VideoPixelFormat format) {
- switch (format) {
- case VideoPixelFormat::I420:
- case VideoPixelFormat::YV12:
- case VideoPixelFormat::I422:
- case VideoPixelFormat::I420A:
- case VideoPixelFormat::I444:
- case VideoPixelFormat::NV12:
- case VideoPixelFormat::NV21:
- case VideoPixelFormat::YUY2:
- case VideoPixelFormat::ARGB:
- case VideoPixelFormat::XRGB:
- case VideoPixelFormat::RGB24:
- case VideoPixelFormat::MJPEG:
- case VideoPixelFormat::ABGR:
- case VideoPixelFormat::XBGR:
- case VideoPixelFormat::BGRA:
- return 8;
- case VideoPixelFormat::YUV420P9:
- case VideoPixelFormat::YUV422P9:
- case VideoPixelFormat::YUV444P9:
- return 9;
- case VideoPixelFormat::YUV420P10:
- case VideoPixelFormat::YUV422P10:
- case VideoPixelFormat::YUV444P10:
- case VideoPixelFormat::XR30:
- case VideoPixelFormat::XB30:
- return 10;
- case VideoPixelFormat::YUV420P12:
- case VideoPixelFormat::YUV422P12:
- case VideoPixelFormat::YUV444P12:
- return 12;
- case VideoPixelFormat::Y16:
- case VideoPixelFormat::P016LE:
- return 16;
- case VideoPixelFormat::UNKNOWN:
- ALOGE("Invalid pixel format");
- return 0;
- }
-}
-
-// If it is required to allocate aligned to multiple-of-two size overall for the
-// frame of pixel |format|.
-static bool RequiresEvenSizeAllocation(VideoPixelFormat format) {
- switch (format) {
- case VideoPixelFormat::ARGB:
- case VideoPixelFormat::XRGB:
- case VideoPixelFormat::RGB24:
- case VideoPixelFormat::Y16:
- case VideoPixelFormat::ABGR:
- case VideoPixelFormat::XBGR:
- case VideoPixelFormat::XR30:
- case VideoPixelFormat::XB30:
- case VideoPixelFormat::BGRA:
- return false;
- case VideoPixelFormat::NV12:
- case VideoPixelFormat::NV21:
- case VideoPixelFormat::I420:
- case VideoPixelFormat::MJPEG:
- case VideoPixelFormat::YUY2:
- case VideoPixelFormat::YV12:
- case VideoPixelFormat::I422:
- case VideoPixelFormat::I444:
- case VideoPixelFormat::YUV420P9:
- case VideoPixelFormat::YUV422P9:
- case VideoPixelFormat::YUV444P9:
- case VideoPixelFormat::YUV420P10:
- case VideoPixelFormat::YUV422P10:
- case VideoPixelFormat::YUV444P10:
- case VideoPixelFormat::YUV420P12:
- case VideoPixelFormat::YUV422P12:
- case VideoPixelFormat::YUV444P12:
- case VideoPixelFormat::I420A:
- case VideoPixelFormat::P016LE:
- return true;
- case VideoPixelFormat::UNKNOWN:
- ALOGE("Invalid pixel format");
- return false;
- }
-}
-
-size_t numPlanes(VideoPixelFormat format) {
- switch (format) {
- case VideoPixelFormat::YUY2:
- case VideoPixelFormat::ARGB:
- case VideoPixelFormat::BGRA:
- case VideoPixelFormat::XRGB:
- case VideoPixelFormat::RGB24:
- case VideoPixelFormat::MJPEG:
- case VideoPixelFormat::Y16:
- case VideoPixelFormat::ABGR:
- case VideoPixelFormat::XBGR:
- case VideoPixelFormat::XR30:
- case VideoPixelFormat::XB30:
- return 1;
- case VideoPixelFormat::NV12:
- case VideoPixelFormat::NV21:
- case VideoPixelFormat::P016LE:
- return 2;
- case VideoPixelFormat::I420:
- case VideoPixelFormat::YV12:
- case VideoPixelFormat::I422:
- case VideoPixelFormat::I444:
- case VideoPixelFormat::YUV420P9:
- case VideoPixelFormat::YUV422P9:
- case VideoPixelFormat::YUV444P9:
- case VideoPixelFormat::YUV420P10:
- case VideoPixelFormat::YUV422P10:
- case VideoPixelFormat::YUV444P10:
- case VideoPixelFormat::YUV420P12:
- case VideoPixelFormat::YUV422P12:
- case VideoPixelFormat::YUV444P12:
- return 3;
- case VideoPixelFormat::I420A:
- return 4;
- case VideoPixelFormat::UNKNOWN:
- // Note: VideoPixelFormat::UNKNOWN is used for end-of-stream frame.
- return 0;
- }
-}
-
-size_t allocationSize(VideoPixelFormat format, const android::ui::Size& coded_size) {
- size_t total = 0;
- for (size_t i = 0; i < numPlanes(format); ++i) {
- android::ui::Size plane_size = planeSize(format, i, coded_size);
- total += (plane_size.width * plane_size.height);
- }
-
- return total;
-}
-
-android::ui::Size planeSize(VideoPixelFormat format, size_t plane,
- const android::ui::Size& coded_size) {
- ALOG_ASSERT(isValidPlane(plane, format));
-
- int width = coded_size.width;
- int height = coded_size.height;
- if (RequiresEvenSizeAllocation(format)) {
- // Align to multiple-of-two size overall. This ensures that non-subsampled
- // planes can be addressed by pixel with the same scaling as the subsampled
- // planes.
- width = base::bits::Align(width, 2);
- height = base::bits::Align(height, 2);
- }
-
- const android::ui::Size subsample = SampleSize(format, plane);
- ALOG_ASSERT(width % subsample.width == 0);
- ALOG_ASSERT(height % subsample.height == 0);
- return android::ui::Size(bytesPerElement(format, plane) * width / subsample.width,
- height / subsample.height);
-}
-
-int planeHorizontalBitsPerPixel(VideoPixelFormat format, size_t plane) {
- ALOG_ASSERT(isValidPlane(plane, format));
- const int bitsPerElement = 8 * bytesPerElement(format, plane);
- const int horizPixelsPerElement = SampleSize(format, plane).width;
- ALOG_ASSERT(bitsPerElement % horizPixelsPerElement == 0);
- return bitsPerElement / horizPixelsPerElement;
-}
-
-int planeBitsPerPixel(VideoPixelFormat format, size_t plane) {
- ALOG_ASSERT(isValidPlane(plane, format));
- return planeHorizontalBitsPerPixel(format, plane) / SampleSize(format, plane).height;
-}
-
-int bytesPerElement(VideoPixelFormat format, size_t plane) {
- ALOG_ASSERT(isValidPlane(format, plane));
- switch (format) {
- case VideoPixelFormat::ARGB:
- case VideoPixelFormat::BGRA:
- case VideoPixelFormat::XRGB:
- case VideoPixelFormat::ABGR:
- case VideoPixelFormat::XBGR:
- case VideoPixelFormat::XR30:
- case VideoPixelFormat::XB30:
- return 4;
- case VideoPixelFormat::RGB24:
- return 3;
- case VideoPixelFormat::Y16:
- case VideoPixelFormat::YUY2:
- case VideoPixelFormat::YUV420P9:
- case VideoPixelFormat::YUV422P9:
- case VideoPixelFormat::YUV444P9:
- case VideoPixelFormat::YUV420P10:
- case VideoPixelFormat::YUV422P10:
- case VideoPixelFormat::YUV444P10:
- case VideoPixelFormat::YUV420P12:
- case VideoPixelFormat::YUV422P12:
- case VideoPixelFormat::YUV444P12:
- case VideoPixelFormat::P016LE:
- return 2;
- case VideoPixelFormat::NV12:
- case VideoPixelFormat::NV21: {
- static const int bytes_per_element[] = {1, 2};
- ALOG_ASSERT(plane < base::size(bytes_per_element));
- return bytes_per_element[plane];
- }
- case VideoPixelFormat::YV12:
- case VideoPixelFormat::I420:
- case VideoPixelFormat::I422:
- case VideoPixelFormat::I420A:
- case VideoPixelFormat::I444:
- return 1;
- case VideoPixelFormat::MJPEG:
- return 0;
- case VideoPixelFormat::UNKNOWN:
- ALOGE("Invalid pixel format");
- return 0;
- }
-}
-
-bool isValidPlane(VideoPixelFormat format, size_t plane) {
- ALOG_ASSERT(numPlanes(format) <= static_cast<size_t>(kMaxPlanes));
- return plane < numPlanes(format);
-}
-
-android::ui::Size SampleSize(VideoPixelFormat format, size_t plane) {
- ALOG_ASSERT(isValidPlane(format, plane));
-
- switch (plane) {
- case kYPlane: // and kARGBPlane:
- case kAPlane:
- return android::ui::Size(1, 1);
-
- case kUPlane: // and kUVPlane:
- case kVPlane:
- switch (format) {
- case VideoPixelFormat::I444:
- case VideoPixelFormat::YUV444P9:
- case VideoPixelFormat::YUV444P10:
- case VideoPixelFormat::YUV444P12:
- case VideoPixelFormat::Y16:
- return android::ui::Size(1, 1);
-
- case VideoPixelFormat::I422:
- case VideoPixelFormat::YUV422P9:
- case VideoPixelFormat::YUV422P10:
- case VideoPixelFormat::YUV422P12:
- return android::ui::Size(2, 1);
-
- case VideoPixelFormat::YV12:
- case VideoPixelFormat::I420:
- case VideoPixelFormat::I420A:
- case VideoPixelFormat::NV12:
- case VideoPixelFormat::NV21:
- case VideoPixelFormat::YUV420P9:
- case VideoPixelFormat::YUV420P10:
- case VideoPixelFormat::YUV420P12:
- case VideoPixelFormat::P016LE:
- return android::ui::Size(2, 2);
-
- case VideoPixelFormat::UNKNOWN:
- case VideoPixelFormat::YUY2:
- case VideoPixelFormat::ARGB:
- case VideoPixelFormat::XRGB:
- case VideoPixelFormat::RGB24:
- case VideoPixelFormat::MJPEG:
- case VideoPixelFormat::ABGR:
- case VideoPixelFormat::XBGR:
- case VideoPixelFormat::XR30:
- case VideoPixelFormat::XB30:
- case VideoPixelFormat::BGRA:
- ALOGE("Invalid pixel format");
- }
- }
-
- return android::ui::Size();
-}
-
-} // namespace android
diff --git a/common/VideoTypes.cpp b/common/VideoTypes.cpp
index c123ad1..1ecceca 100644
--- a/common/VideoTypes.cpp
+++ b/common/VideoTypes.cpp
@@ -22,65 +22,6 @@ const char* VideoCodecToString(VideoCodec codec) {
}
}
-const char* profileToString(C2Config::profile_t profile) {
- switch (profile) {
- case C2Config::PROFILE_UNUSED:
- return "unused";
- case C2Config::PROFILE_AVC_BASELINE:
- return "h264 baseline";
- case C2Config::PROFILE_AVC_MAIN:
- return "h264 main";
- case C2Config::PROFILE_AVC_EXTENDED:
- return "h264 extended";
- case C2Config::PROFILE_AVC_HIGH:
- return "h264 high";
- case C2Config::PROFILE_AVC_HIGH_10:
- return "h264 high 10";
- case C2Config::PROFILE_AVC_HIGH_422:
- return "h264 high 4:2:2";
- case C2Config::PROFILE_AVC_HIGH_444_PREDICTIVE:
- return "h264 high 4:4:4 predictive";
- case C2Config::PROFILE_AVC_SCALABLE_BASELINE:
- return "h264 scalable baseline";
- case C2Config::PROFILE_AVC_SCALABLE_HIGH:
- return "h264 scalable high";
- case C2Config::PROFILE_AVC_STEREO_HIGH:
- return "h264 stereo high";
- case C2Config::PROFILE_AVC_MULTIVIEW_HIGH:
- return "h264 multiview high";
- case C2Config::PROFILE_HEVC_MAIN:
- return "hevc main";
- case C2Config::PROFILE_HEVC_MAIN_10:
- return "hevc main 10";
- case C2Config::PROFILE_HEVC_MAIN_STILL:
- return "hevc main still-picture";
- case C2Config::PROFILE_VP8_0:
- return "vp8 profile0";
- case C2Config::PROFILE_VP8_1:
- return "vp8 profile1";
- case C2Config::PROFILE_VP8_2:
- return "vp8 profile2";
- case C2Config::PROFILE_VP8_3:
- return "vp8 profile3";
- case C2Config::PROFILE_VP9_0:
- return "vp9 profile0";
- case C2Config::PROFILE_VP9_1:
- return "vp9 profile1";
- case C2Config::PROFILE_VP9_2:
- return "vp9 profile2";
- case C2Config::PROFILE_VP9_3:
- return "vp9 profile3";
- case C2Config::PROFILE_AV1_0:
- return "av1 profile 0";
- case C2Config::PROFILE_AV1_1:
- return "av1 profile 1";
- case C2Config::PROFILE_AV1_2:
- return "av1 profile 2";
- default:
- return "unknown";
- }
-}
-
const char* HalPixelFormatToString(HalPixelFormat format) {
switch (format) {
case HalPixelFormat::UNKNOWN:
diff --git a/common/include/v4l2_codec2/common/Common.h b/common/include/v4l2_codec2/common/Common.h
index 0775af1..650b7a7 100644
--- a/common/include/v4l2_codec2/common/Common.h
+++ b/common/include/v4l2_codec2/common/Common.h
@@ -7,47 +7,14 @@
#include <inttypes.h>
-#include <optional>
-#include <string>
-#include <vector>
-
-#include <ui/Rect.h>
-#include <ui/Size.h>
-
-#include <v4l2_codec2/common/VideoPixelFormat.h>
-
namespace android {
-// The stride, offset and size of a video frame plane.
+// The offset and stride of a video frame plane.
struct VideoFramePlane {
- uint32_t mStride = 0;
- size_t mOffset = 0;
- size_t mSize = 0;
-};
-
-// A video frame's layout, containing pixel format, size and layout of individual planes.
-struct VideoFrameLayout {
- VideoPixelFormat mFormat = VideoPixelFormat::UNKNOWN;
- android::ui::Size mCodedSize;
- std::vector<VideoFramePlane> mPlanes;
- bool mMultiPlanar = false;
+ uint32_t mOffset;
+ uint32_t mStride;
};
-// Check whether |rect1| completely contains |rect2|.
-bool contains(const Rect& rect1, const Rect& rect2);
-
-// Convert the specified |rect| to a string.
-std::string toString(const Rect& rect);
-
-// Get the area encapsulated by the |size|. Returns nullopt if multiplication causes overflow.
-std::optional<int> getArea(const ui::Size& size);
-
-// Check whether the specified |size| is empty
-bool isEmpty(const ui::Size& size);
-
-// Convert the specified |size| to a string.
-std::string toString(const ui::Size& size);
-
} // namespace android
#endif // ANDROID_V4L2_CODEC2_COMMON_COMMON_H
diff --git a/common/include/v4l2_codec2/common/EncodeHelpers.h b/common/include/v4l2_codec2/common/EncodeHelpers.h
index bfbdd05..d152ba8 100644
--- a/common/include/v4l2_codec2/common/EncodeHelpers.h
+++ b/common/include/v4l2_codec2/common/EncodeHelpers.h
@@ -6,10 +6,12 @@
#define ANDROID_V4L2_CODEC2_COMMON_HELPERS_H
#include <C2Config.h>
+#include <C2ParamDef.h>
#include <system/graphics.h>
-#include <ui/Size.h>
-#include <v4l2_codec2/common/VideoPixelFormat.h>
+#include <size.h>
+#include <video_codecs.h>
+#include <video_pixel_format.h>
namespace android {
@@ -27,17 +29,20 @@ struct VideoEncoderAcceleratorConfig {
DMABUF = 1,
};
- VideoPixelFormat mInputFormat;
- ui::Size mInputVisibleSize;
- C2Config::profile_t mOutputProfile;
+ media::VideoPixelFormat mInputFormat;
+ media::Size mInputVisibleSize;
+ media::VideoCodecProfile mOutputProfile;
uint32_t mInitialBitrate;
uint32_t mInitialFramerate;
uint8_t mH264OutputLevel;
VideoFrameStorageType mStorageType;
};
-// Convert the specified C2Config level to a V4L2 level.
-uint8_t c2LevelToV4L2Level(C2Config::level_t level);
+// Convert the specified C2Config profile to a media::VideoCodecProfile.
+media::VideoCodecProfile c2ProfileToVideoCodecProfile(C2Config::profile_t profile);
+
+// Convert the specified C2Config level to an integer value.
+uint8_t c2LevelToLevelIDC(C2Config::level_t level);
// Get the specified graphics block in YCbCr format.
android_ycbcr getGraphicBlockInfo(const C2ConstGraphicBlock& block);
@@ -48,6 +53,38 @@ android_ycbcr getGraphicBlockInfo(const C2ConstGraphicBlock& block);
void extractCSDInfo(std::unique_ptr<C2StreamInitDataInfo::output>* const csd, const uint8_t* data,
size_t length);
+// Helper class to parse H264 NAL units from data.
+class NalParser {
+public:
+ NalParser(const uint8_t* data, size_t length);
+
+ // Locates the next NAL after |mNextNalStartCodePos|. If there is one, updates |mCurrNalDataPos|
+ // to the first byte of the NAL data (start code is not included), and |mNextNalStartCodePos| to
+ // the position of the next start code, and returns true.
+ // If there is no more NAL, returns false.
+ //
+ // Note: This method must be called prior to data() and length().
+ bool locateNextNal();
+
+ // Gets current NAL data (start code is not included).
+ const uint8_t* data() const;
+
+ // Gets the byte length of current NAL data (start code is not included).
+ size_t length() const;
+
+private:
+ const uint8_t* findNextStartCodePos() const;
+
+ // The byte pattern for the start of a H264 NAL unit.
+ const uint8_t kNalStartCode[3] = {0x00, 0x00, 0x01};
+ // The length in bytes of the NAL-unit start pattern.
+ const size_t kNalStartCodeLength = 3;
+
+ const uint8_t* mCurrNalDataPos;
+ const uint8_t* mDataEnd;
+ const uint8_t* mNextNalStartCodePos;
+};
+
} // namespace android
#endif // ANDROID_V4L2_CODEC2_COMMON_HELPERS_H
diff --git a/common/include/v4l2_codec2/common/FormatConverter.h b/common/include/v4l2_codec2/common/FormatConverter.h
index bc3f85a..f00f115 100644
--- a/common/include/v4l2_codec2/common/FormatConverter.h
+++ b/common/include/v4l2_codec2/common/FormatConverter.h
@@ -10,10 +10,9 @@
#include <vector>
#include <C2Buffer.h>
-#include <ui/Size.h>
+#include <size.h>
#include <utils/StrongPointer.h>
-
-#include <v4l2_codec2/common/VideoPixelFormat.h>
+#include <video_pixel_format.h>
namespace android {
@@ -51,9 +50,10 @@ public:
// Create FormatConverter instance and initialize it, nullptr will be returned on
// initialization error.
- static std::unique_ptr<FormatConverter> Create(VideoPixelFormat outFormat,
- const ui::Size& visibleSize, uint32_t inputCount,
- const ui::Size& codedSize);
+ static std::unique_ptr<FormatConverter> Create(media::VideoPixelFormat outFormat,
+ const media::Size& visibleSize,
+ uint32_t inputCount,
+ const media::Size& codedSize);
// Convert the input block into the alternative block with required pixel format and return it,
// or return the original block if zero-copy is applied.
@@ -93,8 +93,8 @@ private:
// Initialize foramt converter. It will pre-allocate a set of graphic blocks as |codedSize| and
// |outFormat|. This function should be called prior to other functions.
- c2_status_t initialize(VideoPixelFormat outFormat, const ui::Size& visibleSize,
- uint32_t inputCount, const ui::Size& codedSize);
+ c2_status_t initialize(media::VideoPixelFormat outFormat, const media::Size& visibleSize,
+ uint32_t inputCount, const media::Size& codedSize);
// The array of block entries.
std::vector<std::unique_ptr<BlockEntry>> mGraphicBlocks;
@@ -106,8 +106,8 @@ private:
std::unique_ptr<uint8_t[]> mTempPlaneU;
std::unique_ptr<uint8_t[]> mTempPlaneV;
- VideoPixelFormat mOutFormat = VideoPixelFormat::UNKNOWN;
- ui::Size mVisibleSize;
+ media::VideoPixelFormat mOutFormat = media::VideoPixelFormat::PIXEL_FORMAT_UNKNOWN;
+ media::Size mVisibleSize;
};
} // namespace android
diff --git a/common/include/v4l2_codec2/common/Fourcc.h b/common/include/v4l2_codec2/common/Fourcc.h
deleted file mode 100644
index a0f5fc4..0000000
--- a/common/include/v4l2_codec2/common/Fourcc.h
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2019 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-// Note: ported from Chromium commit head: 27c98933749f
-
-#ifndef ANDROID_V4L2_CODEC2_COMMON_FOURCC_H
-#define ANDROID_V4L2_CODEC2_COMMON_FOURCC_H
-
-#include <stdint.h>
-#include <optional>
-#include <string>
-
-#include <v4l2_codec2/common/VideoPixelFormat.h>
-
-namespace android {
-
-// Composes a Fourcc value.
-constexpr uint32_t composeFourcc(char a, char b, char c, char d) {
- return static_cast<uint32_t>(a) | (static_cast<uint32_t>(b) << 8) |
- (static_cast<uint32_t>(c) << 16) | (static_cast<uint32_t>(d) << 24);
-}
-
-// Fourcc enum holder and converters.
-// Usage:
-// Fourcc f1(Fourcc::AR24);
-// EXPECT_EQ("AR24", f1.ToString());
-// Fourcc f2 = Fourcc::FromVideoPixelFormat(PIXEL_FORMAT_ARGB);
-// EXPECT_EQ(f2, f1);
-class Fourcc {
-public:
- enum Value : uint32_t {
- // RGB formats.
- // https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/pixfmt-rgb.html
- // Maps to PIXEL_FORMAT_ARGB, V4L2_PIX_FMT_ABGR32, VA_FOURCC_BGRA.
- // 32bpp BGRA (byte-order), 1 plane.
- AR24 = composeFourcc('A', 'R', '2', '4'),
-
- // Maps to PIXEL_FORMAT_ABGR, V4L2_PIX_FMT_RGBA32, VA_FOURCC_RGBA.
- // 32bpp RGBA (byte-order), 1 plane
- AB24 = composeFourcc('A', 'B', '2', '4'),
-
- // Maps to PIXEL_FORMAT_XRGB, V4L2_PIX_FMT_XBGR32, VA_FOURCC_BGRX.
- // 32bpp BGRX (byte-order), 1 plane.
- XR24 = composeFourcc('X', 'R', '2', '4'),
-
- // Maps to PIXEL_FORMAT_XBGR, V4L2_PIX_FMT_RGBX32, VA_FOURCC_RGBX.
- // 32bpp RGBX (byte-order), 1 plane.
- XB24 = composeFourcc('X', 'B', '2', '4'),
-
- // Maps to PIXEL_FORMAT_BGRA, V4L2_PIX_FMT_RGB32, VA_FOURCC_ARGB.
- // 32bpp ARGB (byte-order), 1 plane.
- // Note that V4L2_PIX_FMT_RGB32("RGB4") is deprecated and replaced by
- // V4L2_PIX_FMT_ARGB32("BA24"), however, some board relies on the fourcc mapping so we keep
- // it as-is.
- RGB4 = composeFourcc('R', 'G', 'B', '4'),
-
- // YUV420 single-planar formats.
- // https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/pixfmt-yuv420.html
- // Maps to PIXEL_FORMAT_I420, V4L2_PIX_FMT_YUV420, VA_FOURCC_I420.
- // 12bpp YUV planar 1x1 Y, 2x2 UV samples.
- YU12 = composeFourcc('Y', 'U', '1', '2'),
- // Maps to PIXEL_FORMAT_YV12, V4L2_PIX_FMT_YVU420, VA_FOURCC_YV12.
- // 12bpp YVU planar 1x1 Y, 2x2 VU samples.
- YV12 = composeFourcc('Y', 'V', '1', '2'),
-
- // YUV420 multi-planar format.
- // https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/pixfmt-yuv420m.htm
- // Maps to PIXEL_FORMAT_I420, V4L2_PIX_FMT_YUV420M.
- YM12 = composeFourcc('Y', 'M', '1', '2'),
- // Maps to PIXEL_FORMAT_YV12, V4L2_PIX_FMT_YVU420M.
- YM21 = composeFourcc('Y', 'M', '2', '1'),
-
- // YUYV format.
- // https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/pixfmt-yuyv.html
- // Maps to PIXEL_FORMAT_YUY2, V4L2_PIX_FMT_YUYV, VA_FOURCC_YUY2.
- // 16bpp YUV planar (YUV 4:2:2), YUYV (byte-order), 1 plane.
- YUYV = composeFourcc('Y', 'U', 'Y', 'V'),
-
- // NV12 single-planar format.
- // https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/pixfmt-nv12.html
- // Maps to PIXEL_FORMAT_NV12, V4L2_PIX_FMT_NV12, VA_FOURCC_NV12.
- // 12bpp with Y plane followed by a 2x2 interleaved UV plane.
- NV12 = composeFourcc('N', 'V', '1', '2'),
- // Maps to PIXEL_FORMAT_NV21, V4L2_PIX_FMT_NV21, VA_FOURCC_NV21.
- // 12bpp with Y plane followed by a 2x2 interleaved VU plane.
- NV21 = composeFourcc('N', 'V', '2', '1'),
-
- // NV12 multi-planar format.
- // https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/pixfmt-nv12m.html
- // Maps to PIXEL_FORMAT_NV12, V4L2_PIX_FMT_NV12M,
- NM12 = composeFourcc('N', 'M', '1', '2'),
- // Maps to PIXEL_FORMAT_NV21, V4L2_PIX_FMT_NV21M.
- NM21 = composeFourcc('N', 'M', '2', '1'),
-
- // YUV422 multi-planar format.
- // https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/pixfmt-yuv422m.html
- // Maps to PIXEL_FORMAT_I422, V4L2_PIX_FMT_YUV422M
- // 16bpp YUV planar 1x1 Y, 2x1 UV samples.
- YM16 = composeFourcc('Y', 'M', '1', '6'),
-
- // V4L2 proprietary format.
- // https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/pixfmt-reserved.html
- // Maps to V4L2_PIX_FMT_MT21C.
- // It is used for MT8173 hardware video decoder output and should be converted by MT8173 image
- // processor for compositor to render.
- MT21 = composeFourcc('M', 'T', '2', '1'),
- // Maps to V4L2_PIX_FMT_MM21.
- // It is used for MT8183 hardware video decoder.
- MM21 = composeFourcc('M', 'M', '2', '1'),
- };
-
- explicit Fourcc(Fourcc::Value fourcc);
- Fourcc& operator=(const Fourcc& fourcc);
- ~Fourcc();
-
- bool operator==(const Fourcc& rhs) const { return mValue == rhs.mValue; }
-
- // Factory methods:
-
- // Builds a Fourcc from a given fourcc code. This will return a valid Fourcc if the argument is
- // part of the |Value| enum, or nullopt otherwise.
- static std::optional<Fourcc> fromUint32(uint32_t fourcc);
-
- // Converts a VideoPixelFormat to Fourcc. Returns nullopt for invalid input. Note that a
- // VideoPixelFormat may have two Fourcc counterparts. Caller has to specify if it is for
- // single-planar or multi-planar format.
- static std::optional<Fourcc> fromVideoPixelFormat(VideoPixelFormat pixelFormat,
- bool singlePlanar = true);
- // Converts a V4L2PixFmt to Fourcc. Returns nullopt for invalid input.
- static std::optional<Fourcc> fromV4L2PixFmt(uint32_t v4l2PixFmt);
-
- // Value getters:
- // Returns the VideoPixelFormat counterpart of the value. Returns PIXEL_FORMAT_UNKNOWN if no
- // mapping is found.
- VideoPixelFormat toVideoPixelFormat() const;
- // Returns the V4L2PixFmt counterpart of the value. Returns 0 if no mapping is found.
- uint32_t toV4L2PixFmt() const;
-
- // Returns the single-planar Fourcc of the value. If value is a single-planar, returns the same
- // Fourcc. Returns nullopt if no mapping is found.
- std::optional<Fourcc> toSinglePlanar() const;
-
- // Returns whether |value_| is multi planar format.
- bool isMultiPlanar() const;
-
- // Outputs human readable fourcc string, e.g. "NV12".
- std::string toString() const;
-
-private:
- Value mValue;
-};
-
-bool operator!=(const Fourcc& lhs, const Fourcc& rhs);
-
-} // namespace android
-
-#endif // ANDROID_V4L2_CODEC2_COMMON_FOURCC_H
diff --git a/common/include/v4l2_codec2/common/NalParser.h b/common/include/v4l2_codec2/common/NalParser.h
deleted file mode 100644
index 69f56c3..0000000
--- a/common/include/v4l2_codec2/common/NalParser.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2021 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef ANDROID_V4L2_CODEC2_COMMON_NALPARSER_H
-#define ANDROID_V4L2_CODEC2_COMMON_NALPARSER_H
-
-#include <stdint.h>
-
-namespace android {
-
-// Helper class to parse H264 NAL units from data.
-class NalParser {
-public:
- // Type of a SPS NAL unit.
- static constexpr uint8_t kSPSType = 7;
- // Type of a PPS NAL unit.
- static constexpr uint8_t kPPSType = 8;
-
- // Parameters related to a video's color aspects.
- struct ColorAspects {
- int32_t primaries;
- int32_t transfer;
- int32_t coeffs;
- bool fullRange;
- };
-
- NalParser(const uint8_t* data, size_t length);
-
- // Locates the next NAL after |mNextNalStartCodePos|. If there is one, updates |mCurrNalDataPos|
- // to the first byte of the NAL data (start code is not included), and |mNextNalStartCodePos| to
- // the position of the next start code, and returns true.
- // If there is no more NAL, returns false.
- //
- // Note: This method must be called prior to data() and length().
- bool locateNextNal();
-
- // Locate the sequence parameter set (SPS).
- bool locateSPS();
-
- // Gets current NAL data (start code is not included).
- const uint8_t* data() const;
-
- // Gets the byte length of current NAL data (start code is not included).
- size_t length() const;
-
- // Get the type of the current NAL unit.
- uint8_t type() const;
-
- // Find the H.264 video's color aspects in the current SPS NAL.
- bool findCodedColorAspects(ColorAspects* colorAspects);
-
-private:
- const uint8_t* findNextStartCodePos() const;
-
- // The byte pattern for the start of a H264 NAL unit.
- const uint8_t kNalStartCode[3] = {0x00, 0x00, 0x01};
- // The length in bytes of the NAL-unit start pattern.
- const size_t kNalStartCodeLength = 3;
-
- const uint8_t* mCurrNalDataPos;
- const uint8_t* mDataEnd;
- const uint8_t* mNextNalStartCodePos;
-};
-
-} // namespace android
-
-#endif // ANDROID_V4L2_CODEC2_COMMON_NALPARSER_H
diff --git a/common/include/v4l2_codec2/common/V4L2ComponentCommon.h b/common/include/v4l2_codec2/common/V4L2ComponentCommon.h
index b8cf031..6e3efa8 100644
--- a/common/include/v4l2_codec2/common/V4L2ComponentCommon.h
+++ b/common/include/v4l2_codec2/common/V4L2ComponentCommon.h
@@ -12,8 +12,6 @@ namespace android {
// Defines the names of all supported components.
struct V4L2ComponentName {
static const std::string kH264Encoder;
- static const std::string kVP8Encoder;
- static const std::string kVP9Encoder;
static const std::string kH264Decoder;
static const std::string kVP8Decoder;
diff --git a/common/include/v4l2_codec2/common/V4L2Device.h b/common/include/v4l2_codec2/common/V4L2Device.h
deleted file mode 100644
index b4c909c..0000000
--- a/common/include/v4l2_codec2/common/V4L2Device.h
+++ /dev/null
@@ -1,518 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// This file defines the V4L2Device which is used by the V4L2Decoder and V4L2Encoder classes to
-// delegate/pass the device specific handling of any of the functionalities.
-// Note: ported from Chromium commit head: 2f13d62f0c0d, but some parts have been removed.
-
-#ifndef ANDROID_V4L2_CODEC2_COMMON_V4L2_DEVICE_H
-#define ANDROID_V4L2_CODEC2_COMMON_V4L2_DEVICE_H
-
-#include <linux/videodev2.h>
-#include <stddef.h>
-#include <stdint.h>
-
-#include <optional>
-#include <vector>
-
-#include <C2Config.h>
-#include <base/containers/flat_map.h>
-#include <base/files/scoped_file.h>
-#include <base/memory/ref_counted.h>
-
-#include <ui/Size.h>
-#include <v4l2_codec2/common/Common.h>
-#include <v4l2_codec2/common/V4L2DevicePoller.h>
-#include <v4l2_codec2/common/VideoTypes.h>
-
-namespace android {
-
-class V4L2Queue;
-class V4L2BufferRefBase;
-class V4L2BuffersList;
-class V4L2DecodeSurface;
-
-// Wrapper for the 'v4l2_ext_control' structure.
-struct V4L2ExtCtrl {
- V4L2ExtCtrl(uint32_t id);
- V4L2ExtCtrl(uint32_t id, int32_t val);
- struct v4l2_ext_control ctrl;
-};
-
-// A unique reference to a buffer for clients to prepare and submit.
-//
-// Clients can prepare a buffer for queuing using the methods of this class, and then either queue
-// it using the Queue() method corresponding to the memory type of the buffer, or drop the reference
-// to make the buffer available again.
-class V4L2WritableBufferRef {
-public:
- V4L2WritableBufferRef(V4L2WritableBufferRef&& other);
- V4L2WritableBufferRef() = delete;
- V4L2WritableBufferRef& operator=(V4L2WritableBufferRef&& other);
-
- // Return the memory type of the buffer. Useful to e.g. decide which Queue() method to use.
- enum v4l2_memory memory() const;
-
- // Queue a MMAP buffer. If successful, true is returned and the reference to the buffer is
- // dropped so this reference becomes invalid. In case of error, false is returned and the buffer
- // is returned to the free list.
- bool queueMMap() &&;
- // Queue a USERPTR buffer, assigning |ptrs| as pointer for each plane. The size of |ptrs| must
- // be equal to the number of planes of this buffer. If successful, true is returned and the
- // reference to the buffer is dropped so this reference becomes invalid. In case of error, false
- // is returned and the buffer is returned to the free list.
- bool queueUserPtr(const std::vector<void*>& ptrs) &&;
- // Queue a DMABUF buffer, assigning |fds| as file descriptors for each plane. It is allowed the
- // number of |fds| might be greater than the number of planes of this buffer. It happens when
- // the v4l2 pixel format is single planar. The fd of the first plane is only used in that case.
- // If successful, true is returned and the reference to the buffer is dropped so this reference
- // becomes invalid. In case of error, false is returned and the buffer is returned to the free
- // list.
- bool queueDMABuf(const std::vector<int>& fds) &&;
-
- // Returns the number of planes in this buffer.
- size_t planesCount() const;
- // Returns the size of the requested |plane|, in bytes.
- size_t getPlaneSize(const size_t plane) const;
- // Set the size of the requested |plane|, in bytes. It is only valid for USERPTR and DMABUF
- // buffers. When using an MMAP buffer, this method triggers an assert and is a no-op for release
- // builds.
- void setPlaneSize(const size_t plane, const size_t size);
- // This method can only be used with MMAP buffers. It will return a pointer to the data of the
- // |plane|th plane. In case of error (invalid plane index or mapping failed), a nullptr is
- // returned.
- void* getPlaneMapping(const size_t plane);
- // Set the timestamp field for this buffer.
- void setTimeStamp(const struct timeval& timestamp);
- // Return the previously-set timestamp field for this buffer.
- const struct timeval& getTimeStamp() const;
- // Set the number of bytes used for |plane|.
- void setPlaneBytesUsed(const size_t plane, const size_t bytesUsed);
- // Returns the previously-set number of bytes used for |plane|.
- size_t getPlaneBytesUsed(const size_t plane) const;
- // Set the data offset for |plane|, in bytes.
- void setPlaneDataOffset(const size_t plane, const size_t dataOffset);
-
- // Return the V4L2 buffer ID of the underlying buffer.
- size_t bufferId() const;
-
- ~V4L2WritableBufferRef();
-
-private:
- friend class V4L2BufferRefFactory;
-
- // Do the actual queue operation once the v4l2_buffer structure is properly filled.
- bool doQueue() &&;
-
- V4L2WritableBufferRef(const struct v4l2_buffer& v4l2Buffer, base::WeakPtr<V4L2Queue> queue);
-
- V4L2WritableBufferRef(const V4L2WritableBufferRef&) = delete;
- V4L2WritableBufferRef& operator=(const V4L2WritableBufferRef&) = delete;
-
- std::unique_ptr<V4L2BufferRefBase> mBufferData;
-
- SEQUENCE_CHECKER(mSequenceChecker);
-};
-
-// A reference to a read-only, dequeued buffer.
-//
-// Clients use this class to query the buffer state and content, and are guaranteed that the buffer
-// will not be reused until all references are destroyed.
-// All methods of this class must be called from the same sequence, but instances of
-// V4L2ReadableBuffer objects can be destroyed from any sequence. They can even outlive the V4L2
-// buffers they originate from. This flexibility is required because V4L2ReadableBufferRefs can be
-// embedded into VideoFrames, which are then passed to other threads and not necessarily destroyed
-// before the V4L2Queue buffers are freed.
-class V4L2ReadableBuffer : public base::RefCountedThreadSafe<V4L2ReadableBuffer> {
-public:
- // Returns whether the V4L2_BUF_FLAG_LAST flag is set for this buffer.
- bool isLast() const;
- // Returns whether the V4L2_BUF_FLAG_KEYFRAME flag is set for this buffer.
- bool isKeyframe() const;
- // Return the timestamp set by the driver on this buffer.
- struct timeval getTimeStamp() const;
- // Returns the number of planes in this buffer.
- size_t planesCount() const;
- // Returns the number of bytes used for |plane|.
- size_t getPlaneBytesUsed(size_t plane) const;
- // Returns the data offset for |plane|.
- size_t getPlaneDataOffset(size_t plane) const;
- // This method can only be used with MMAP buffers. It will return a pointer to the data of the
- // |plane|th plane. In case of error (invalid plane index or mapping failed), a nullptr is
- // returned.
- const void* getPlaneMapping(const size_t plane) const;
-
- // Return the V4L2 buffer ID of the underlying buffer.
- size_t bufferId() const;
-
-private:
- friend class V4L2BufferRefFactory;
- friend class base::RefCountedThreadSafe<V4L2ReadableBuffer>;
-
- ~V4L2ReadableBuffer();
-
- V4L2ReadableBuffer(const struct v4l2_buffer& v4l2Buffer, base::WeakPtr<V4L2Queue> queue);
-
- V4L2ReadableBuffer(const V4L2ReadableBuffer&) = delete;
- V4L2ReadableBuffer& operator=(const V4L2ReadableBuffer&) = delete;
-
- std::unique_ptr<V4L2BufferRefBase> mBufferData;
-
- SEQUENCE_CHECKER(mSequenceChecker);
-};
-
-// Shortcut for naming consistency.
-using V4L2ReadableBufferRef = scoped_refptr<V4L2ReadableBuffer>;
-
-class V4L2Device;
-class V4L2Buffer;
-
-// Interface representing a specific queue of a |V4L2Device|. It provides free and queued buffer
-// management that is commonly required by clients.
-//
-// Buffers managed by this class undergo the following cycle:
-// 1) Allocated buffers are put into a free buffers pool, indicating that they are used neither by
-// the client nor the hardware.
-// 2) The client obtains a unique, writable reference to one of the free buffers in order to set
-// its content and other parameters.
-// 3) The client then queues the buffer obtained in 2), which invalidates its reference. The buffer
-// is now prepared to be processed by the hardware.
-// 4) Once the hardware is done with the buffer, it is ready to be dequeued by the client. The
-// client obtains a read-only, counted reference to the buffer and can read its content and
-// metadata, as well as making other references to it. The buffer will not be reused until all
-// the references are dropped. Once this happens, the buffer goes back to the free list described
-// in 1).
-class V4L2Queue : public base::RefCountedThreadSafe<V4L2Queue> {
-public:
- // Set |fourcc| as the current format on this queue. |size| corresponds to the desired buffer's
- // dimensions (i.e. width and height members of v4l2_pix_format_mplane (if not applicable, pass
- // Size()).
- // |bufferSize| is the desired size in bytes of the buffer for single-planar formats (i.e.
- // sizeimage of the first plane). It can be set to 0 if not relevant for the desired format.
- // |stride| is the desired stride in bytes of the buffer (i.e. bytesperline). It can be set to 0
- // if not relevant or to let the driver decide. If the format could be set, then the
- // |v4l2_format| reflecting the actual format is returned. It is guaranteed to feature the
- // specified |fourcc|, but any other parameter (including |size| and |bufferSize| may have been
- // adjusted by the driver, so the caller must check their values.
- std::optional<struct v4l2_format> setFormat(uint32_t fourcc, const ui::Size& size,
- size_t bufferSize,
- uint32_t stride = 0) WARN_UNUSED_RESULT;
-
- // Identical to |setFormat|, but does not actually apply the format, and can be called anytime.
- // Returns an adjusted V4L2 format if |fourcc| is supported by the queue, or |nullopt| if
- // |fourcc| is not supported or an ioctl error happened.
- std::optional<struct v4l2_format> tryFormat(uint32_t fourcc, const ui::Size& size,
- size_t bufferSize) WARN_UNUSED_RESULT;
-
- // Returns the currently set format on the queue. The result is returned as a std::pair where
- // the first member is the format, or base::nullopt if the format could not be obtained due to
- // an ioctl error. The second member is only used in case of an error and contains the |errno|
- // set by the failing ioctl. If the first member is not base::nullopt, the second member will
- // always be zero.
- //
- // If the second member is 0, then the first member is guaranteed to have a valid value. So
- // clients that are not interested in the precise error message can just check that the first
- // member is valid and go on.
- //
- // This pair is used because not all failures to get the format are necessarily errors, so we
- // need to way to let the use decide whether it is one or not.
- std::pair<std::optional<struct v4l2_format>, int> getFormat();
-
- // Allocate |count| buffers for the current format of this queue, with a specific |memory|
- // allocation, and returns the number of buffers allocated or zero if an error occurred, or if
- // references to any previously allocated buffers are still held by any clients.
- //
- // The number of allocated buffers may be larger than the number requested, so callers must
- // always check the return value.
- //
- // Calling this method while buffers are still allocated results in an error.
- size_t allocateBuffers(size_t count, enum v4l2_memory memory) WARN_UNUSED_RESULT;
-
- // Deallocate all buffers previously allocated by |allocateBuffers|. Any references to buffers
- // previously allocated held by the client must be released, or this call will fail.
- bool deallocateBuffers();
-
- // Returns the memory usage of v4l2 buffers owned by this V4L2Queue which are mapped in user
- // space memory.
- size_t getMemoryUsage() const;
-
- // Returns |mMemory|, memory type of last buffers allocated by this V4L2Queue.
- v4l2_memory getMemoryType() const;
-
- // Return a reference to a free buffer for the caller to prepare and submit, or nullopt if no
- // buffer is currently free.
- //
- // If the caller discards the returned reference, the underlying buffer is made available to
- // clients again.
- std::optional<V4L2WritableBufferRef> getFreeBuffer();
- std::optional<V4L2WritableBufferRef> getFreeBuffer(size_t requestedBufferId);
-
- // Attempt to dequeue a buffer, and return a reference to it if one was available.
- //
- // The first element of the returned pair will be false if an error occurred, in which case the
- // second element will be nullptr. If no error occurred, then the first element will be true and
- // the second element will contain a reference to the dequeued buffer if one was available, or
- // nullptr otherwise. Dequeued buffers will not be reused by the driver until all references to
- // them are dropped.
- std::pair<bool, V4L2ReadableBufferRef> dequeueBuffer();
-
- // Returns true if this queue is currently streaming.
- bool isStreaming() const;
- // If not currently streaming, starts streaming. Returns true if we started streaming, or were
- // already streaming, or false if we were not streaming and an error occurred when attempting to
- // start the stream. On failure, any previously-queued buffers will be dequeued without
- // processing and made available to the client, while any buffers held by the client will remain
- // unchanged and their ownership will remain with the client.
- bool streamon();
- // If currently streaming, stops streaming. Also make all queued buffers available to the client
- // again regardless of the streaming state. If an error occurred while attempting to stop
- // streaming, then false is returned and queued buffers are left untouched since the V4L2 queue
- // may still be using them.
- bool streamoff();
-
- // Returns the number of buffers currently allocated for this queue.
- size_t allocatedBuffersCount() const;
- // Returns the number of currently free buffers on this queue.
- size_t freeBuffersCount() const;
- // Returns the number of buffers currently queued on this queue.
- size_t queuedBuffersCount() const;
-
-private:
- ~V4L2Queue();
-
- V4L2Queue(const V4L2Queue&) = delete;
- V4L2Queue& operator=(const V4L2Queue&) = delete;
-
- // Called when clients request a buffer to be queued.
- bool queueBuffer(struct v4l2_buffer* v4l2Buffer);
-
- const enum v4l2_buf_type mType;
- enum v4l2_memory mMemory = V4L2_MEMORY_MMAP;
- bool mIsStreaming = false;
- size_t mPlanesCount = 0;
- // Current format as set by SetFormat.
- std::optional<struct v4l2_format> mCurrentFormat;
-
- std::vector<std::unique_ptr<V4L2Buffer>> mBuffers;
-
- // Buffers that are available for client to get and submit. Buffers in this list are not
- // referenced by anyone else than ourselves.
- scoped_refptr<V4L2BuffersList> mFreeBuffers;
- // Buffers that have been queued by the client, and not dequeued yet.
- std::set<size_t> mQueuedBuffers;
-
- scoped_refptr<V4L2Device> mDevice;
- // Callback to call in this queue's destructor.
- base::OnceClosure mDestroyCb;
-
- V4L2Queue(scoped_refptr<V4L2Device> dev, enum v4l2_buf_type type, base::OnceClosure destroyCb);
- friend class V4L2QueueFactory;
- friend class V4L2BufferRefBase;
- friend class base::RefCountedThreadSafe<V4L2Queue>;
-
- SEQUENCE_CHECKER(mSequenceChecker);
-
- base::WeakPtrFactory<V4L2Queue> mWeakThisFactory{this};
-};
-
-class V4L2Device : public base::RefCountedThreadSafe<V4L2Device> {
-public:
- // Specification of an encoding profile supported by an encoder.
- struct SupportedEncodeProfile {
- C2Config::profile_t profile = C2Config::PROFILE_UNUSED;
- ui::Size min_resolution;
- ui::Size max_resolution;
- uint32_t max_framerate_numerator = 0;
- uint32_t max_framerate_denominator = 0;
- };
- using SupportedEncodeProfiles = std::vector<SupportedEncodeProfile>;
-
- // Specification of a decoding profile supported by an decoder.
- // |max_resolution| and |min_resolution| are inclusive.
- struct SupportedDecodeProfile {
- C2Config::profile_t profile = C2Config::PROFILE_UNUSED;
- ui::Size max_resolution;
- ui::Size min_resolution;
- bool encrypted_only = false;
- };
- using SupportedDecodeProfiles = std::vector<SupportedDecodeProfile>;
-
- // Utility format conversion functions
- // If there is no corresponding single- or multi-planar format, returns 0.
- static uint32_t C2ProfileToV4L2PixFmt(C2Config::profile_t profile, bool sliceBased);
- static C2Config::profile_t v4L2ProfileToC2Profile(VideoCodec codec, uint32_t profile);
- std::vector<C2Config::profile_t> v4L2PixFmtToC2Profiles(uint32_t pixFmt, bool isEncoder);
- // Calculates the largest plane's allocation size requested by a V4L2 device.
- static ui::Size allocatedSizeFromV4L2Format(const struct v4l2_format& format);
-
- // Convert required H264 profile and level to V4L2 enums.
- static int32_t c2ProfileToV4L2H264Profile(C2Config::profile_t profile);
- static int32_t h264LevelIdcToV4L2H264Level(uint8_t levelIdc);
-
- // Converts v4l2_memory to a string.
- static const char* v4L2MemoryToString(const v4l2_memory memory);
-
- // Returns the printable name of a v4l2_buf_type.
- static const char* v4L2BufferTypeToString(const enum v4l2_buf_type bufType);
-
- // Composes human readable string of v4l2_format.
- static std::string v4L2FormatToString(const struct v4l2_format& format);
-
- // Composes human readable string of v4l2_buffer.
- static std::string v4L2BufferToString(const struct v4l2_buffer& buffer);
-
- // Composes VideoFrameLayout based on v4l2_format. If error occurs, it returns base::nullopt.
- static std::optional<VideoFrameLayout> v4L2FormatToVideoFrameLayout(
- const struct v4l2_format& format);
-
- // Returns number of planes of |pixFmt|.
- static size_t getNumPlanesOfV4L2PixFmt(uint32_t pixFmt);
-
- enum class Type { kDecoder, kEncoder };
-
- // Create and initialize an appropriate V4L2Device instance for the current platform, or return
- // nullptr if not available.
- static scoped_refptr<V4L2Device> create();
-
- // Open a V4L2 device of |type| for use with |v4l2PixFmt|. Return true on success. The device
- // will be closed in the destructor.
- bool open(Type type, uint32_t v4l2PixFmt);
-
- // Returns the V4L2Queue corresponding to the requested |type|, or nullptr if the requested
- // queue type is not supported.
- scoped_refptr<V4L2Queue> getQueue(enum v4l2_buf_type type);
-
- // Parameters and return value are the same as for the standard ioctl() system call.
- int ioctl(int request, void* arg);
-
- // This method sleeps until either:
- // - SetDevicePollInterrupt() is called (on another thread),
- // - |pollDevice| is true, and there is new data to be read from the device,
- // or an event from the device has arrived; in the latter case
- // |*eventPending| will be set to true.
- // Returns false on error, true otherwise. This method should be called from a separate thread.
- bool poll(bool pollDevice, bool* eventPending);
-
- // These methods are used to interrupt the thread sleeping on poll() and force it to return
- // regardless of device state, which is usually when the client is no longer interested in what
- // happens with the device (on cleanup, client state change, etc.). When
- // setDevicePollInterrupt() is called, poll() will return immediately, and any subsequent calls
- // to it will also do so until clearDevicePollInterrupt() is called.
- bool setDevicePollInterrupt();
- bool clearDevicePollInterrupt();
-
- // Wrappers for standard mmap/munmap system calls.
- void* mmap(void* addr, unsigned int len, int prot, int flags, unsigned int offset);
- void munmap(void* addr, unsigned int len);
-
- // Return a vector of dmabuf file descriptors, exported for V4L2 buffer with |index|, assuming
- // the buffer contains |numPlanes| V4L2 planes and is of |bufType|. Return an empty vector on
- // failure. The caller is responsible for closing the file descriptors after use.
- std::vector<base::ScopedFD> getDmabufsForV4L2Buffer(int index, size_t numPlanes,
- enum v4l2_buf_type bufType);
-
- // Returns the preferred V4L2 input formats for |type| or empty if none.
- std::vector<uint32_t> preferredInputFormat(Type type);
-
- // NOTE: The below methods to query capabilities have a side effect of closing the
- // previously-open device, if any, and should not be called after Open().
-
- // Get minimum and maximum resolution for fourcc |pixelFormat| and store to |minResolution| and
- // |maxResolution|.
- void getSupportedResolution(uint32_t pixelFormat, ui::Size* minResolution,
- ui::Size* maxResolution);
-
- std::vector<uint32_t> enumerateSupportedPixelformats(v4l2_buf_type bufType);
-
- // Return supported profiles for decoder, including only profiles for given fourcc
- // |pixelFormats|.
- SupportedDecodeProfiles getSupportedDecodeProfiles(const size_t numFormats,
- const uint32_t pixelFormats[]);
-
- // Return supported profiles for encoder.
- SupportedEncodeProfiles getSupportedEncodeProfiles();
-
- // Start polling on this V4L2Device. |eventCallback| will be posted to the caller's sequence if
- // a buffer is ready to be dequeued and/or a V4L2 event has been posted. |errorCallback| will
- // be posted to the client's
- // sequence if a polling error has occurred.
- bool startPolling(android::V4L2DevicePoller::EventCallback eventCallback,
- base::RepeatingClosure errorCallback);
- // Stop polling this V4L2Device if polling was active. No new events will be posted after this
- // method has returned.
- bool stopPolling();
- // Schedule a polling event if polling is enabled. This method is intended to be called from
- // V4L2Queue, clients should not need to call it directly.
- void schedulePoll();
-
- // Check whether the V4L2 control with specified |ctrlId| is supported.
- bool isCtrlExposed(uint32_t ctrlId);
- // Set the specified list of |ctrls| for the specified |ctrlClass|, returns whether the
- // operation succeeded.
- bool setExtCtrls(uint32_t ctrlClass, std::vector<V4L2ExtCtrl> ctrls);
-
- // Check whether the V4L2 command with specified |commandId| is supported.
- bool isCommandSupported(uint32_t commandId);
- // Check whether the V4L2 device has the specified |capabilities|.
- bool hasCapabilities(uint32_t capabilities);
-
-private:
- // Vector of video device node paths and corresponding pixelformats supported by each device node.
- using Devices = std::vector<std::pair<std::string, std::vector<uint32_t>>>;
-
- friend class base::RefCountedThreadSafe<V4L2Device>;
- V4L2Device();
- ~V4L2Device();
-
- V4L2Device(const V4L2Device&) = delete;
- V4L2Device& operator=(const V4L2Device&) = delete;
-
- SupportedDecodeProfiles enumerateSupportedDecodeProfiles(const size_t numFormats,
- const uint32_t pixelFormats[]);
-
- SupportedEncodeProfiles enumerateSupportedEncodeProfiles();
-
- // Open device node for |path| as a device of |type|.
- bool openDevicePath(const std::string& path, Type type);
-
- // Close the currently open device.
- void closeDevice();
-
- // Enumerate all V4L2 devices on the system for |type| and store the results under
- // mDevicesByType[type].
- void enumerateDevicesForType(V4L2Device::Type type);
-
- // Return device information for all devices of |type| available in the system. Enumerates and
- // queries devices on first run and caches the results for subsequent calls.
- const Devices& getDevicesForType(V4L2Device::Type type);
-
- // Return device node path for device of |type| supporting |pixFmt|, or an empty string if the
- // given combination is not supported by the system.
- std::string getDevicePathFor(V4L2Device::Type type, uint32_t pixFmt);
-
- // Callback that is called upon a queue's destruction, to cleanup its pointer in mQueues.
- void onQueueDestroyed(v4l2_buf_type buf_type);
-
- // Stores information for all devices available on the system for each device Type.
- std::map<V4L2Device::Type, Devices> mDevicesByType;
-
- // The actual device fd.
- base::ScopedFD mDeviceFd;
-
- // eventfd fd to signal device poll thread when its poll() should be interrupted.
- base::ScopedFD mDevicePollInterruptFd;
-
- // Associates a v4l2_buf_type to its queue.
- base::flat_map<enum v4l2_buf_type, V4L2Queue*> mQueues;
-
- // Used if EnablePolling() is called to signal the user that an event happened or a buffer is
- // ready to be dequeued.
- std::unique_ptr<android::V4L2DevicePoller> mDevicePoller;
-
- SEQUENCE_CHECKER(mClientSequenceChecker);
-};
-
-} // namespace android
-
-#endif // ANDROID_V4L2_CODEC2_COMMON_V4L2_DEVICE_H
diff --git a/common/include/v4l2_codec2/common/V4L2DevicePoller.h b/common/include/v4l2_codec2/common/V4L2DevicePoller.h
deleted file mode 100644
index ad256be..0000000
--- a/common/include/v4l2_codec2/common/V4L2DevicePoller.h
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2019 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-// Note: ported from Chromium commit head: f65c38dcdac2
-
-#ifndef ANDROID_V4L2_CODEC2_COMMON_V4L2_DEVICE_POLLER_H
-#define ANDROID_V4L2_CODEC2_COMMON_V4L2_DEVICE_POLLER_H
-
-#include <atomic>
-
-#include <base/callback_forward.h>
-#include <base/sequence_checker.h>
-#include <base/sequenced_task_runner.h>
-#include <base/synchronization/waitable_event.h>
-#include <base/threading/thread.h>
-
-namespace android {
-
-class V4L2Device;
-
-// Allows a client to poll() on a given V4L2Device and be signaled when a buffer is ready to be
-// dequeued or a V4L2 event has been received. Polling is done on a dedicated thread, and
-// notifications are delivered in the form of a callback to the listener's sequence.
-//
-// All the methods of this class (with the exception of the constructor) must be called from the
-// same sequence.
-//
-// Note that the service callback may also be called when no particular event occurred due to the
-// way poll() works. It is the responsibility of the caller to call SchedulePoll() again if there
-// may still be pending events.
-class V4L2DevicePoller {
-public:
- // Callback to be called when buffer ready/V4L2 event has potentially been polled. |event| is
- // set if a V4L2 event has been detected.
- using EventCallback = base::RepeatingCallback<void(bool event)>;
-
- // Create a poller for |device|, using a thread named |threadName|. Notification won't start
- // until |startPolling()| is called.
- V4L2DevicePoller(V4L2Device* const device, const std::string& threadName);
- ~V4L2DevicePoller();
-
- // Starts polling. |mEventCallback| will be posted on the caller's sequence every time an event
- // occurs. The client is then responsible for consuming all pending events in that callback. If
- // new events may still happen after the callback has run, the client must call |schedulePoll()|
- // again in order to be notified for them.
- //
- // If an error occurs during polling, |mErrorCallback| will be posted on the caller's sequence.
- bool startPolling(EventCallback eventCallback, base::RepeatingClosure errorCallback);
- // Stop polling and stop the thread. The poller won't post any new event to the caller's
- // sequence after this method has returned.
- bool stopPolling();
- // Returns true if currently polling, false otherwise.
- bool isPolling() const;
- // Attempts polling the V4L2 device. This method should be called whenever doing something that
- // may trigger an event of interest (buffer dequeue or V4L2 event), for instance queueing a
- // buffer. In the absence of a pending event, poll() will return immediately and the service
- // callback will be posted to the caller's sequence. The client is then responsible for calling
- // this method again when it is interested in receiving events.
- void schedulePoll();
-
-private:
- // Perform a poll() on |mDevice| and post either |mEventCallback| or |mErrorCallback| on the
- // client's sequence when poll() returns.
- void devicePollTask();
-
- // V4L2 device we are polling.
- V4L2Device* const mDevice;
- // Thread on which polling is done.
- base::Thread mPollThread;
- // Callback to post to the client's sequence when an event occurs.
- EventCallback mEventCallback;
- // Closure to post to the client's sequence when an error occurs.
- base::RepeatingClosure mErrorCallback;
- // Client sequence's task runner, where closures are posted.
- scoped_refptr<base::SequencedTaskRunner> mClientTaskTunner;
-
- // Since poll() returns immediately if no buffers have been queued, we cannot rely on it to
- // pause the polling thread until an event occurs. Instead,
- // the polling thread will wait on this WaitableEvent (signaled by |schedulePoll| before calling
- // poll(), so we only call it when we are actually waiting for an event.
- base::WaitableEvent mTriggerPoll;
- // Set to true when we wish to stop polling, instructing the poller thread to break its loop.
- std::atomic_bool mStopPolling;
-};
-
-} // namespace android
-
-#endif // ANDROID_V4L2_CODEC2_COMMON_V4L2_DEVICE_POLLER_H
diff --git a/common/include/v4l2_codec2/common/VideoPixelFormat.h b/common/include/v4l2_codec2/common/VideoPixelFormat.h
deleted file mode 100644
index 2cfe910..0000000
--- a/common/include/v4l2_codec2/common/VideoPixelFormat.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-// Note: ported from Chromium commit head: 3b7ce92816e2
-// Note: only necessary functions are ported from video_types.h
-
-#ifndef ANDROID_V4L2_CODEC2_COMMON_VIDEO_PIXEL_FORMAT_H
-#define ANDROID_V4L2_CODEC2_COMMON_VIDEO_PIXEL_FORMAT_H
-
-#include <string>
-
-#include "ui/Size.h"
-
-namespace android {
-
-// Pixel formats roughly based on FOURCC labels, see:
-// http://www.fourcc.org/rgb.php and http://www.fourcc.org/yuv.php
-enum class VideoPixelFormat {
- I420, // 12bpp YUV planar 1x1 Y, 2x2 UV samples, a.k.a. YU12.
- YV12, // 12bpp YVU planar 1x1 Y, 2x2 VU samples.
- I422, // 16bpp YUV planar 1x1 Y, 2x1 UV samples.
- I420A, // 20bpp YUVA planar 1x1 Y, 2x2 UV, 1x1 A samples.
- I444, // 24bpp YUV planar, no subsampling.
- NV12, // 12bpp with Y plane followed by a 2x2 interleaved UV plane.
- NV21, // 12bpp with Y plane followed by a 2x2 interleaved VU plane.
- YUY2, // 16bpp interleaved 1x1 Y, 2x1 U, 1x1 Y, 2x1 V samples.
- ARGB, // 32bpp BGRA (byte-order), 1 plane.
- XRGB, // 24bpp BGRX (byte-order), 1 plane.
- RGB24, // 24bpp BGR (byte-order), 1 plane.
- MJPEG, // MJPEG compressed.
- Y16, // single 16bpp plane.
- ABGR, // 32bpp RGBA (byte-order), 1 plane.
- XBGR, // 24bpp RGBX (byte-order), 1 plane.
- P016LE, // 24bpp NV12, 16 bits per channel
- XR30, // 32bpp BGRX, 10 bits per channel, 2 bits ignored, 1 plane
- XB30, // 32bpp RGBX, 10 bits per channel, 2 bits ignored, 1 plane
- BGRA, // 32bpp ARGB (byte-order), 1 plane.
- // The P* in the formats below designates the number of bits per pixel component. I.e. P9 is
- // 9-bits per pixel component, P10 is 10-bits per pixel component, etc.
- YUV420P9,
- YUV420P10,
- YUV422P9,
- YUV422P10,
- YUV444P9,
- YUV444P10,
- YUV420P12,
- YUV422P12,
- YUV444P12,
- UNKNOWN, // Unknown or unspecified format value.
-};
-
-// Returns the name of a Format as a string.
-std::string videoPixelFormatToString(VideoPixelFormat format);
-
-// Returns human readable fourcc string. If any of the four characters is non-printable, it outputs
-// "0x<32-bit integer in hex>", e.g. FourccToString(0x66616b00) returns "0x66616b00".
-std::string fourccToString(uint32_t fourcc);
-
-// Returns the number of significant bits per channel.
-size_t bitDepth(VideoPixelFormat format);
-
-// Returns the number of planes for the |format|.
-size_t numPlanes(VideoPixelFormat format);
-
-// Returns required allocation size for a (tightly packed) frame of the given coded size and format.
-size_t allocationSize(VideoPixelFormat format, const android::ui::Size& coded_size);
-
-// Returns the plane Size (in bytes) for a plane of the given coded size and format.
-android::ui::Size planeSize(VideoPixelFormat format, size_t plane,
- const android::ui::Size& coded_size);
-
-// Returns horizontal bits per pixel for given |plane| and |format|.
-int planeHorizontalBitsPerPixel(VideoPixelFormat format, size_t plane);
-
-// Returns bits per pixel for given |plane| and |format|.
-int planeBitsPerPixel(VideoPixelFormat format, size_t plane);
-
-// Returns the number of bytes per element for given |plane| and |format|.
-int bytesPerElement(VideoPixelFormat format, size_t plane);
-
-// Returns true if |plane| is a valid plane index for the given |format|.
-bool isValidPlane(VideoPixelFormat format, size_t plane);
-
-// Returns the pixel size of each subsample for a given |plane| and |format|.
-// E.g. 2x2 for the U-plane in I420.
-android::ui::Size SampleSize(VideoPixelFormat format, size_t plane);
-
-} // namespace android
-
-#endif // ANDROID_V4L2_CODEC2_COMMON_VIDEO_PIXEL_FORMAT_H
diff --git a/common/include/v4l2_codec2/common/VideoTypes.h b/common/include/v4l2_codec2/common/VideoTypes.h
index 076f096..a5130d2 100644
--- a/common/include/v4l2_codec2/common/VideoTypes.h
+++ b/common/include/v4l2_codec2/common/VideoTypes.h
@@ -8,7 +8,6 @@
#include <optional>
#include <string>
-#include <C2Config.h>
#include <android/hardware/graphics/common/1.0/types.h>
namespace android {
@@ -19,12 +18,7 @@ enum class VideoCodec {
VP8,
VP9,
};
-
-constexpr std::initializer_list<VideoCodec> kAllCodecs = {VideoCodec::H264, VideoCodec::VP8,
- VideoCodec::VP9};
-
const char* VideoCodecToString(VideoCodec codec);
-const char* profileToString(C2Config::profile_t profile);
// Enumeration of supported pixel format. The value should be the same as
// ::android::hardware::graphics::common::V1_0::PixelFormat.
diff --git a/components/Android.bp b/components/Android.bp
index 16c7d20..8273412 100644
--- a/components/Android.bp
+++ b/components/Android.bp
@@ -1,41 +1,26 @@
-package {
- // See: http://go/android-license-faq
- // A large-scale-change added 'default_applicable_licenses' to import
- // all of the 'license_kinds' from "external_v4l2_codec2_license"
- // to get the below license kinds:
- // SPDX-license-identifier-BSD
- default_applicable_licenses: ["external_v4l2_codec2_license"],
-}
-
cc_library {
name: "libv4l2_codec2_components",
vendor: true,
defaults: [
- "libcodec2-hidl-defaults",
+ "libcodec2-impl-defaults",
],
srcs: [
"VideoFrame.cpp",
"VideoFramePool.cpp",
- "V4L2ComponentFactory.cpp",
- "V4L2ComponentStore.cpp",
"V4L2Decoder.cpp",
+ "V4L2ComponentFactory.cpp",
"V4L2DecodeComponent.cpp",
"V4L2DecodeInterface.cpp",
- "V4L2Encoder.cpp",
"V4L2EncodeComponent.cpp",
"V4L2EncodeInterface.cpp",
"VideoDecoder.cpp",
- "VideoEncoder.cpp",
],
export_include_dirs: [
"include",
],
- header_libs: [
- "libcodec2_internal",
- ],
shared_libs: [
"android.hardware.graphics.common@1.0",
"libc2plugin_store",
@@ -46,9 +31,11 @@ cc_library {
"libsfplugin_ccodec_utils",
"libstagefright_bufferqueue_helper",
"libstagefright_foundation",
+ "libv4l2_codec2_store",
"libui",
],
static_libs: [
+ "libv4l2_codec2_accel",
"libv4l2_codec2_common",
],
diff --git a/components/V4L2ComponentFactory.cpp b/components/V4L2ComponentFactory.cpp
index a3f8837..a535c34 100644
--- a/components/V4L2ComponentFactory.cpp
+++ b/components/V4L2ComponentFactory.cpp
@@ -5,57 +5,51 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "V4L2ComponentFactory"
-#include <v4l2_codec2/components/V4L2ComponentFactory.h>
+#include <string>
-#include <codec2/hidl/1.0/InputBufferManager.h>
+#include <C2ComponentFactory.h>
+#include <SimpleC2Interface.h>
#include <log/log.h>
+#include <util/C2InterfaceHelper.h>
#include <v4l2_codec2/common/V4L2ComponentCommon.h>
#include <v4l2_codec2/components/V4L2DecodeComponent.h>
#include <v4l2_codec2/components/V4L2DecodeInterface.h>
#include <v4l2_codec2/components/V4L2EncodeComponent.h>
#include <v4l2_codec2/components/V4L2EncodeInterface.h>
+#include <v4l2_codec2/store/V4L2ComponentStore.h>
namespace android {
-// static
-std::unique_ptr<V4L2ComponentFactory> V4L2ComponentFactory::create(
- const std::string& componentName, std::shared_ptr<C2ReflectorHelper> reflector) {
- ALOGV("%s(%s)", __func__, componentName.c_str());
-
- if (!android::V4L2ComponentName::isValid(componentName.c_str())) {
- ALOGE("Invalid component name: %s", componentName.c_str());
- return nullptr;
- }
- if (reflector == nullptr) {
- ALOGE("reflector is null");
- return nullptr;
+class V4L2ComponentFactory : public C2ComponentFactory {
+public:
+ V4L2ComponentFactory(const char* componentName, bool isEncoder);
+ ~V4L2ComponentFactory() override;
+
+ // Implementation of C2ComponentFactory.
+ c2_status_t createComponent(c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+ ComponentDeleter deleter) override;
+ c2_status_t createInterface(c2_node_id_t id,
+ std::shared_ptr<C2ComponentInterface>* const interface,
+ InterfaceDeleter deleter) override;
+
+private:
+ const std::string mComponentName;
+ const bool mIsEncoder;
+ std::shared_ptr<C2ReflectorHelper> mReflector;
+};
+
+V4L2ComponentFactory::V4L2ComponentFactory(const char* componentName, bool isEncoder)
+ : mComponentName(componentName), mIsEncoder(isEncoder) {
+ auto componentStore = V4L2ComponentStore::Create();
+ if (componentStore == nullptr) {
+ ALOGE("Could not create V4L2ComponentStore.");
+ return;
}
-
- bool isEncoder = android::V4L2ComponentName::isEncoder(componentName.c_str());
- return std::make_unique<V4L2ComponentFactory>(componentName, isEncoder, std::move(reflector));
+ mReflector = std::static_pointer_cast<C2ReflectorHelper>(componentStore->getParamReflector());
}
-V4L2ComponentFactory::V4L2ComponentFactory(const std::string& componentName, bool isEncoder,
- std::shared_ptr<C2ReflectorHelper> reflector)
- : mComponentName(componentName), mIsEncoder(isEncoder), mReflector(std::move(reflector)) {
- using namespace ::android::hardware::media::c2::V1_0;
- // To minimize IPC, we generally want the codec2 framework to release and
- // recycle input buffers when the corresponding work item is done. However,
- // sometimes it is necessary to provide more input to unblock a decoder.
- //
- // Optimally we would configure this on a per-context basis. However, the
- // InputBufferManager is a process-wide singleton, so we need to configure it
- // pessimistically. Basing the interval on frame timing can be suboptimal if
- // the decoded output isn't being displayed, but that's not a primary use case
- // and few videos will actually rely on this behavior.
- constexpr nsecs_t kMinFrameIntervalNs = 1000000000ull / 60;
- uint32_t delayCount = 0;
- for (auto c : kAllCodecs) {
- delayCount = std::max(delayCount, V4L2DecodeInterface::getOutputDelay(c));
- }
- utils::InputBufferManager::setNotificationInterval(delayCount * kMinFrameIntervalNs / 2);
-}
+V4L2ComponentFactory::~V4L2ComponentFactory() = default;
c2_status_t V4L2ComponentFactory::createComponent(c2_node_id_t id,
std::shared_ptr<C2Component>* const component,
@@ -73,7 +67,7 @@ c2_status_t V4L2ComponentFactory::createComponent(c2_node_id_t id,
} else {
*component = V4L2DecodeComponent::create(mComponentName, id, mReflector, deleter);
}
- return *component ? C2_OK : C2_NO_MEMORY;
+ return *component ? C2_OK : C2_BAD_VALUE;
}
c2_status_t V4L2ComponentFactory::createInterface(
@@ -104,3 +98,20 @@ c2_status_t V4L2ComponentFactory::createInterface(
}
} // namespace android
+
+extern "C" ::C2ComponentFactory* CreateCodec2Factory(const char* componentName) {
+ ALOGV("%s(%s)", __func__, componentName);
+
+ if (!android::V4L2ComponentName::isValid(componentName)) {
+ ALOGE("Invalid component name: %s", componentName);
+ return nullptr;
+ }
+
+ bool isEncoder = android::V4L2ComponentName::isEncoder(componentName);
+ return new android::V4L2ComponentFactory(componentName, isEncoder);
+}
+
+extern "C" void DestroyCodec2Factory(::C2ComponentFactory* factory) {
+ ALOGV("%s()", __func__);
+ delete factory;
+}
diff --git a/components/V4L2DecodeComponent.cpp b/components/V4L2DecodeComponent.cpp
index 400c765..1ea9a7b 100644
--- a/components/V4L2DecodeComponent.cpp
+++ b/components/V4L2DecodeComponent.cpp
@@ -20,12 +20,10 @@
#include <base/bind.h>
#include <base/callback_helpers.h>
#include <base/time/time.h>
-#include <cutils/properties.h>
#include <log/log.h>
#include <media/stagefright/foundation/ColorUtils.h>
-#include <v4l2_codec2/common/Common.h>
-#include <v4l2_codec2/common/NalParser.h>
+#include <h264_parser.h>
#include <v4l2_codec2/common/VideoTypes.h>
#include <v4l2_codec2/components/BitstreamBuffer.h>
#include <v4l2_codec2/components/V4L2Decoder.h>
@@ -34,6 +32,8 @@
namespace android {
namespace {
+// TODO(b/151128291): figure out why we cannot open V4L2Device in 0.5 second?
+const ::base::TimeDelta kBlockingMethodTimeout = ::base::TimeDelta::FromMilliseconds(5000);
// Mask against 30 bits to avoid (undefined) wraparound on signed integer.
int32_t frameIndexToBitstreamId(c2_cntr64_t frameIndex) {
@@ -43,23 +43,44 @@ int32_t frameIndexToBitstreamId(c2_cntr64_t frameIndex) {
bool parseCodedColorAspects(const C2ConstLinearBlock& input,
C2StreamColorAspectsInfo::input* codedAspects) {
C2ReadView view = input.map().get();
- NalParser parser(view.data(), view.capacity());
+ const uint8_t* data = view.data();
+ const uint32_t size = view.capacity();
+
+ std::unique_ptr<media::H264Parser> h264Parser = std::make_unique<media::H264Parser>();
+ h264Parser->SetStream(data, static_cast<off_t>(size));
+ media::H264NALU nalu;
+ media::H264Parser::Result parRes = h264Parser->AdvanceToNextNALU(&nalu);
+ if (parRes != media::H264Parser::kEOStream && parRes != media::H264Parser::kOk) {
+ ALOGE("H264 AdvanceToNextNALU error: %d", static_cast<int>(parRes));
+ return false;
+ }
+ if (nalu.nal_unit_type != media::H264NALU::kSPS) {
+ ALOGV("NALU is not SPS");
+ return false;
+ }
- if (!parser.locateSPS()) {
- ALOGV("Couldn't find SPS");
+ int spsId;
+ parRes = h264Parser->ParseSPS(&spsId);
+ if (parRes != media::H264Parser::kEOStream && parRes != media::H264Parser::kOk) {
+ ALOGE("H264 ParseSPS error: %d", static_cast<int>(parRes));
return false;
}
- NalParser::ColorAspects aspects;
- if (!parser.findCodedColorAspects(&aspects)) {
- ALOGV("Couldn't find color description in SPS");
+ // Parse ISO color aspects from H264 SPS bitstream.
+ const media::H264SPS* sps = h264Parser->GetSPS(spsId);
+ if (!sps->colour_description_present_flag) {
+ ALOGV("No Color Description in SPS");
return false;
}
+ int32_t primaries = sps->colour_primaries;
+ int32_t transfer = sps->transfer_characteristics;
+ int32_t coeffs = sps->matrix_coefficients;
+ bool fullRange = sps->video_full_range_flag;
// Convert ISO color aspects to ColorUtils::ColorAspects.
ColorAspects colorAspects;
- ColorUtils::convertIsoColorAspectsToCodecAspects(
- aspects.primaries, aspects.transfer, aspects.coeffs, aspects.fullRange, colorAspects);
+ ColorUtils::convertIsoColorAspectsToCodecAspects(primaries, transfer, coeffs, fullRange,
+ colorAspects);
ALOGV("Parsed ColorAspects from bitstream: (R:%d, P:%d, M:%d, T:%d)", colorAspects.mRange,
colorAspects.mPrimaries, colorAspects.mMatrixCoeffs, colorAspects.mTransfer);
@@ -117,23 +138,9 @@ bool isNoShowFrameWork(const C2Work& work, const C2WorkOrdinalStruct& currOrdina
} // namespace
// static
-std::atomic<int32_t> V4L2DecodeComponent::sConcurrentInstances = 0;
-
-// static
std::shared_ptr<C2Component> V4L2DecodeComponent::create(
const std::string& name, c2_node_id_t id, const std::shared_ptr<C2ReflectorHelper>& helper,
C2ComponentFactory::ComponentDeleter deleter) {
- static const int32_t kMaxConcurrentInstances =
- property_get_int32("debug.v4l2_codec2.decode.concurrent-instances", -1);
- static std::mutex mutex;
-
- std::lock_guard<std::mutex> lock(mutex);
-
- if (kMaxConcurrentInstances >= 0 && sConcurrentInstances.load() >= kMaxConcurrentInstances) {
- ALOGW("Reject to Initialize() due to too many instances: %d", sConcurrentInstances.load());
- return nullptr;
- }
-
auto intfImpl = std::make_shared<V4L2DecodeInterface>(name, helper);
if (intfImpl->status() != C2_OK) {
ALOGE("Failed to initialize V4L2DecodeInterface.");
@@ -151,19 +158,28 @@ V4L2DecodeComponent::V4L2DecodeComponent(const std::string& name, c2_node_id_t i
mIntf(std::make_shared<SimpleInterface<V4L2DecodeInterface>>(name.c_str(), id, mIntfImpl)) {
ALOGV("%s(%s)", __func__, name.c_str());
- sConcurrentInstances.fetch_add(1, std::memory_order_relaxed);
mIsSecure = name.find(".secure") != std::string::npos;
}
V4L2DecodeComponent::~V4L2DecodeComponent() {
ALOGV("%s()", __func__);
- release();
-
- sConcurrentInstances.fetch_sub(1, std::memory_order_relaxed);
+ if (mDecoderThread.IsRunning()) {
+ mDecoderTaskRunner->PostTask(
+ FROM_HERE, ::base::BindOnce(&V4L2DecodeComponent::destroyTask, mWeakThis));
+ mDecoderThread.Stop();
+ }
ALOGV("%s() done", __func__);
}
+void V4L2DecodeComponent::destroyTask() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+ mWeakThisFactory.InvalidateWeakPtrs();
+ mDecoder = nullptr;
+}
+
c2_status_t V4L2DecodeComponent::start() {
ALOGV("%s()", __func__);
std::lock_guard<std::mutex> lock(mStartStopLock);
@@ -180,25 +196,27 @@ c2_status_t V4L2DecodeComponent::start() {
}
mDecoderTaskRunner = mDecoderThread.task_runner();
mWeakThis = mWeakThisFactory.GetWeakPtr();
- mStdWeakThis = weak_from_this();
c2_status_t status = C2_CORRUPTED;
- ::base::WaitableEvent done;
- mDecoderTaskRunner->PostTask(
- FROM_HERE, ::base::BindOnce(&V4L2DecodeComponent::startTask, mWeakThis,
- ::base::Unretained(&status), ::base::Unretained(&done)));
- done.Wait();
+ mStartStopDone.Reset();
+ mDecoderTaskRunner->PostTask(FROM_HERE,
+ ::base::BindOnce(&V4L2DecodeComponent::startTask, mWeakThis,
+ ::base::Unretained(&status)));
+ if (!mStartStopDone.TimedWait(kBlockingMethodTimeout)) {
+ ALOGE("startTask() timeout...");
+ return C2_TIMED_OUT;
+ }
if (status == C2_OK) mComponentState.store(ComponentState::RUNNING);
return status;
}
-void V4L2DecodeComponent::startTask(c2_status_t* status, ::base::WaitableEvent* done) {
+void V4L2DecodeComponent::startTask(c2_status_t* status) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
::base::ScopedClosureRunner done_caller(
- ::base::BindOnce(&::base::WaitableEvent::Signal, ::base::Unretained(done)));
+ ::base::BindOnce(&::base::WaitableEvent::Signal, ::base::Unretained(&mStartStopDone)));
*status = C2_CORRUPTED;
const auto codec = mIntfImpl->getVideoCodec();
@@ -207,16 +225,12 @@ void V4L2DecodeComponent::startTask(c2_status_t* status, ::base::WaitableEvent*
return;
}
const size_t inputBufferSize = mIntfImpl->getInputBufferSize();
- // ::base::Unretained(this) is safe here because |mDecoder| is always destroyed before
- // |mDecoderThread| is stopped, so |*this| is always valid during |mDecoder|'s lifetime.
- mDecoder = V4L2Decoder::Create(*codec, inputBufferSize,
- ::base::BindRepeating(&V4L2DecodeComponent::getVideoFramePool,
- ::base::Unretained(this)),
- ::base::BindRepeating(&V4L2DecodeComponent::onOutputFrameReady,
- ::base::Unretained(this)),
- ::base::BindRepeating(&V4L2DecodeComponent::reportError,
- ::base::Unretained(this), C2_CORRUPTED),
- mDecoderTaskRunner);
+ mDecoder = V4L2Decoder::Create(
+ *codec, inputBufferSize,
+ ::base::BindRepeating(&V4L2DecodeComponent::getVideoFramePool, mWeakThis),
+ ::base::BindRepeating(&V4L2DecodeComponent::onOutputFrameReady, mWeakThis),
+ ::base::BindRepeating(&V4L2DecodeComponent::reportError, mWeakThis, C2_CORRUPTED),
+ mDecoderTaskRunner);
if (!mDecoder) {
ALOGE("Failed to create V4L2Decoder for %s", VideoCodecToString(*codec));
return;
@@ -231,40 +245,36 @@ void V4L2DecodeComponent::startTask(c2_status_t* status, ::base::WaitableEvent*
*status = C2_OK;
}
-std::unique_ptr<VideoFramePool> V4L2DecodeComponent::getVideoFramePool(const ui::Size& size,
- HalPixelFormat pixelFormat,
- size_t numBuffers) {
+void V4L2DecodeComponent::getVideoFramePool(std::unique_ptr<VideoFramePool>* pool,
+ const media::Size& size, HalPixelFormat pixelFormat,
+ size_t numBuffers) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
- auto sharedThis = mStdWeakThis.lock();
- if (sharedThis == nullptr) {
- ALOGE("%s(): V4L2DecodeComponent instance is destroyed.", __func__);
- return nullptr;
- }
-
// (b/157113946): Prevent malicious dynamic resolution change exhausts system memory.
constexpr int kMaximumSupportedArea = 4096 * 4096;
- if (getArea(size).value_or(INT_MAX) > kMaximumSupportedArea) {
- ALOGE("The output size (%dx%d) is larger than supported size (4096x4096)", size.width,
- size.height);
+ if (size.width() * size.height() > kMaximumSupportedArea) {
+ ALOGE("The output size (%dx%d) is larger than supported size (4096x4096)", size.width(),
+ size.height());
reportError(C2_BAD_VALUE);
- return nullptr;
+ *pool = nullptr;
+ return;
}
// Get block pool ID configured from the client.
auto poolId = mIntfImpl->getBlockPoolId();
ALOGI("Using C2BlockPool ID = %" PRIu64 " for allocating output buffers", poolId);
std::shared_ptr<C2BlockPool> blockPool;
- auto status = GetCodec2BlockPool(poolId, std::move(sharedThis), &blockPool);
+ auto status = GetCodec2BlockPool(poolId, shared_from_this(), &blockPool);
if (status != C2_OK) {
ALOGE("Graphic block allocator is invalid: %d", status);
reportError(status);
- return nullptr;
+ *pool = nullptr;
+ return;
}
- return VideoFramePool::Create(std::move(blockPool), numBuffers, size, pixelFormat, mIsSecure,
- mDecoderTaskRunner);
+ *pool = VideoFramePool::Create(std::move(blockPool), numBuffers, size, pixelFormat, mIsSecure,
+ mDecoderTaskRunner);
}
c2_status_t V4L2DecodeComponent::stop() {
@@ -277,13 +287,19 @@ c2_status_t V4L2DecodeComponent::stop() {
return C2_BAD_STATE;
}
- if (mDecoderThread.IsRunning()) {
- mDecoderTaskRunner->PostTask(FROM_HERE,
- ::base::BindOnce(&V4L2DecodeComponent::stopTask, mWeakThis));
- mDecoderThread.Stop();
- mDecoderTaskRunner = nullptr;
+ // Return immediately if the component is already stopped.
+ if (!mDecoderThread.IsRunning()) return C2_OK;
+
+ mStartStopDone.Reset();
+ mDecoderTaskRunner->PostTask(FROM_HERE,
+ ::base::BindOnce(&V4L2DecodeComponent::stopTask, mWeakThis));
+ if (!mStartStopDone.TimedWait(kBlockingMethodTimeout)) {
+ ALOGE("stopTask() timeout...");
+ return C2_TIMED_OUT;
}
+ mDecoderThread.Stop();
+ mDecoderTaskRunner = nullptr;
mComponentState.store(ComponentState::STOPPED);
return C2_OK;
}
@@ -294,38 +310,10 @@ void V4L2DecodeComponent::stopTask() {
reportAbandonedWorks();
mIsDraining = false;
-
- releaseTask();
-}
-
-c2_status_t V4L2DecodeComponent::reset() {
- ALOGV("%s()", __func__);
-
- return stop();
-}
-
-c2_status_t V4L2DecodeComponent::release() {
- ALOGV("%s()", __func__);
- std::lock_guard<std::mutex> lock(mStartStopLock);
-
- if (mDecoderThread.IsRunning()) {
- mDecoderTaskRunner->PostTask(
- FROM_HERE, ::base::BindOnce(&V4L2DecodeComponent::releaseTask, mWeakThis));
- mDecoderThread.Stop();
- mDecoderTaskRunner = nullptr;
- }
-
- mComponentState.store(ComponentState::RELEASED);
- return C2_OK;
-}
-
-void V4L2DecodeComponent::releaseTask() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
-
- mWeakThisFactory.InvalidateWeakPtrs();
- mStdWeakThis.reset();
mDecoder = nullptr;
+ mWeakThisFactory.InvalidateWeakPtrs();
+
+ mStartStopDone.Signal();
}
c2_status_t V4L2DecodeComponent::setListener_vb(
@@ -430,21 +418,16 @@ void V4L2DecodeComponent::pumpPendingWorks() {
}
while (!mPendingWorks.empty() && !mIsDraining) {
- std::unique_ptr<C2Work> pendingWork(std::move(mPendingWorks.front()));
+ std::unique_ptr<C2Work> work(std::move(mPendingWorks.front()));
mPendingWorks.pop();
- const int32_t bitstreamId = frameIndexToBitstreamId(pendingWork->input.ordinal.frameIndex);
- const bool isCSDWork = pendingWork->input.flags & C2FrameData::FLAG_CODEC_CONFIG;
- const bool isEmptyWork = pendingWork->input.buffers.front() == nullptr;
- const bool isEOSWork = pendingWork->input.flags & C2FrameData::FLAG_END_OF_STREAM;
- const C2Work* work = pendingWork.get();
+ const int32_t bitstreamId = frameIndexToBitstreamId(work->input.ordinal.frameIndex);
+ const bool isCSDWork = work->input.flags & C2FrameData::FLAG_CODEC_CONFIG;
+ const bool isEmptyWork = work->input.buffers.front() == nullptr;
ALOGV("Process C2Work bitstreamId=%d isCSDWork=%d, isEmptyWork=%d", bitstreamId, isCSDWork,
isEmptyWork);
- auto res = mWorksAtDecoder.insert(std::make_pair(bitstreamId, std::move(pendingWork)));
- ALOGW_IF(!res.second, "We already inserted bitstreamId %d to decoder?", bitstreamId);
-
- if (!isEmptyWork) {
+ if (work->input.buffers.front() != nullptr) {
// If input.buffers is not empty, the buffer should have meaningful content inside.
C2ConstLinearBlock linearBlock =
work->input.buffers.front()->data().linearBlocks().front();
@@ -481,11 +464,14 @@ void V4L2DecodeComponent::pumpPendingWorks() {
mWeakThis, bitstreamId));
}
- if (isEOSWork) {
+ if (work->input.flags & C2FrameData::FLAG_END_OF_STREAM) {
mDecoder->drain(::base::BindOnce(&V4L2DecodeComponent::onDrainDone, mWeakThis));
mIsDraining = true;
}
+ auto res = mWorksAtDecoder.insert(std::make_pair(bitstreamId, std::move(work)));
+ ALOGW_IF(!res.second, "We already inserted bitstreamId %d to decoder?", bitstreamId);
+
// Directly report the empty CSD work as finished.
if (isCSDWork && isEmptyWork) reportWorkIfFinished(bitstreamId);
}
@@ -496,18 +482,8 @@ void V4L2DecodeComponent::onDecodeDone(int32_t bitstreamId, VideoDecoder::Decode
VideoDecoder::DecodeStatusToString(status));
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
- auto it = mWorksAtDecoder.find(bitstreamId);
- ALOG_ASSERT(it != mWorksAtDecoder.end());
- C2Work* work = it->second.get();
-
switch (status) {
case VideoDecoder::DecodeStatus::kAborted:
- work->input.buffers.front().reset();
- work->worklets.front()->output.flags = static_cast<C2FrameData::flags_t>(
- work->worklets.front()->output.flags & C2FrameData::FLAG_DROP_FRAME);
- mOutputBitstreamIds.push(bitstreamId);
-
- pumpReportWork();
return;
case VideoDecoder::DecodeStatus::kError:
@@ -515,6 +491,10 @@ void V4L2DecodeComponent::onDecodeDone(int32_t bitstreamId, VideoDecoder::Decode
return;
case VideoDecoder::DecodeStatus::kOk:
+ auto it = mWorksAtDecoder.find(bitstreamId);
+ ALOG_ASSERT(it != mWorksAtDecoder.end());
+ C2Work* work = it->second.get();
+
// Release the input buffer.
work->input.buffers.front().reset();
@@ -542,6 +522,9 @@ void V4L2DecodeComponent::onOutputFrameReady(std::unique_ptr<VideoFrame> frame)
C2Work* work = it->second.get();
C2ConstGraphicBlock constBlock = std::move(frame)->getGraphicBlock();
+ // TODO(b/160307705): Consider to remove the dependency of C2VdaBqBlockPool.
+ MarkBlockPoolDataAsShared(constBlock);
+
std::shared_ptr<C2Buffer> buffer = C2Buffer::CreateGraphicBuffer(std::move(constBlock));
if (mPendingColorAspectsChange &&
work->input.ordinal.frameIndex.peeku() >= mPendingColorAspectsChangeFrameIndex) {
@@ -614,10 +597,7 @@ bool V4L2DecodeComponent::reportWorkIfFinished(int32_t bitstreamId) {
}
auto it = mWorksAtDecoder.find(bitstreamId);
- if (it == mWorksAtDecoder.end()) {
- ALOGI("work(bitstreamId = %d) is dropped, skip.", bitstreamId);
- return true;
- }
+ ALOG_ASSERT(it != mWorksAtDecoder.end());
if (!isWorkDone(*(it->second))) {
ALOGV("work(bitstreamId = %d) is not done yet.", bitstreamId);
@@ -641,33 +621,25 @@ bool V4L2DecodeComponent::reportEOSWork() {
ALOGV("%s()", __func__);
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
- const auto it =
- std::find_if(mWorksAtDecoder.begin(), mWorksAtDecoder.end(), [](const auto& kv) {
- return kv.second->input.flags & C2FrameData::FLAG_END_OF_STREAM;
- });
- if (it == mWorksAtDecoder.end()) {
- ALOGE("Failed to find EOS work.");
+ // In this moment all works prior to EOS work should be done and returned to listener.
+ if (mWorksAtDecoder.size() != 1u) {
+ ALOGE("It shouldn't have remaining works in mWorksAtDecoder except EOS work.");
+ for (const auto& kv : mWorksAtDecoder) {
+ ALOGE("bitstreamId(%d) => Work index=%llu, timestamp=%llu", kv.first,
+ kv.second->input.ordinal.frameIndex.peekull(),
+ kv.second->input.ordinal.timestamp.peekull());
+ }
return false;
}
- std::unique_ptr<C2Work> eosWork(std::move(it->second));
- mWorksAtDecoder.erase(it);
+ std::unique_ptr<C2Work> eosWork(std::move(mWorksAtDecoder.begin()->second));
+ mWorksAtDecoder.clear();
eosWork->result = C2_OK;
eosWork->workletsProcessed = static_cast<uint32_t>(eosWork->worklets.size());
eosWork->worklets.front()->output.flags = C2FrameData::FLAG_END_OF_STREAM;
if (!eosWork->input.buffers.empty()) eosWork->input.buffers.front().reset();
- if (!mWorksAtDecoder.empty()) {
- ALOGW("There are remaining works except EOS work. abandon them.");
- for (const auto& kv : mWorksAtDecoder) {
- ALOGW("bitstreamId(%d) => Work index=%llu, timestamp=%llu", kv.first,
- kv.second->input.ordinal.frameIndex.peekull(),
- kv.second->input.ordinal.timestamp.peekull());
- }
- reportAbandonedWorks();
- }
-
return reportWork(std::move(eosWork));
}
@@ -675,12 +647,6 @@ bool V4L2DecodeComponent::reportWork(std::unique_ptr<C2Work> work) {
ALOGV("%s(work=%llu)", __func__, work->input.ordinal.frameIndex.peekull());
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
- auto sharedThis = mStdWeakThis.lock();
- if (sharedThis == nullptr) {
- ALOGE("%s(): V4L2DecodeComponent instance is destroyed.", __func__);
- return false;
- }
-
if (!mListener) {
ALOGE("mListener is nullptr, setListener_vb() not called?");
return false;
@@ -688,7 +654,7 @@ bool V4L2DecodeComponent::reportWork(std::unique_ptr<C2Work> work) {
std::list<std::unique_ptr<C2Work>> finishedWorks;
finishedWorks.emplace_back(std::move(work));
- mListener->onWorkDone_nb(std::move(sharedThis), std::move(finishedWorks));
+ mListener->onWorkDone_nb(shared_from_this(), std::move(finishedWorks));
return true;
}
@@ -725,12 +691,6 @@ void V4L2DecodeComponent::reportAbandonedWorks() {
ALOGV("%s()", __func__);
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
- auto sharedThis = mStdWeakThis.lock();
- if (sharedThis == nullptr) {
- ALOGE("%s(): V4L2DecodeComponent instance is destroyed.", __func__);
- return;
- }
-
std::list<std::unique_ptr<C2Work>> abandonedWorks;
while (!mPendingWorks.empty()) {
abandonedWorks.emplace_back(std::move(mPendingWorks.front()));
@@ -754,7 +714,7 @@ void V4L2DecodeComponent::reportAbandonedWorks() {
ALOGE("mListener is nullptr, setListener_vb() not called?");
return;
}
- mListener->onWorkDone_nb(std::move(sharedThis), std::move(abandonedWorks));
+ mListener->onWorkDone_nb(shared_from_this(), std::move(abandonedWorks));
}
}
@@ -828,12 +788,6 @@ void V4L2DecodeComponent::reportError(c2_status_t error) {
ALOGE("%s(error=%u)", __func__, static_cast<uint32_t>(error));
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
- auto sharedThis = mStdWeakThis.lock();
- if (sharedThis == nullptr) {
- ALOGE("%s(): V4L2DecodeComponent instance is destroyed.", __func__);
- return;
- }
-
if (mComponentState.load() == ComponentState::ERROR) return;
mComponentState.store(ComponentState::ERROR);
@@ -841,7 +795,21 @@ void V4L2DecodeComponent::reportError(c2_status_t error) {
ALOGE("mListener is nullptr, setListener_vb() not called?");
return;
}
- mListener->onError_nb(std::move(sharedThis), static_cast<uint32_t>(error));
+ mListener->onError_nb(shared_from_this(), static_cast<uint32_t>(error));
+}
+
+c2_status_t V4L2DecodeComponent::reset() {
+ ALOGV("%s()", __func__);
+
+ return stop();
+}
+
+c2_status_t V4L2DecodeComponent::release() {
+ ALOGV("%s()", __func__);
+
+ c2_status_t ret = reset();
+ mComponentState.store(ComponentState::RELEASED);
+ return ret;
}
c2_status_t V4L2DecodeComponent::announce_nb(const std::vector<C2WorkOutline>& /* items */) {
diff --git a/components/V4L2DecodeInterface.cpp b/components/V4L2DecodeInterface.cpp
index 4bc4121..a09fcc4 100644
--- a/components/V4L2DecodeInterface.cpp
+++ b/components/V4L2DecodeInterface.cpp
@@ -14,8 +14,8 @@
#include <media/stagefright/foundation/MediaDefs.h>
#include <v4l2_codec2/common/V4L2ComponentCommon.h>
-#include <v4l2_codec2/common/V4L2Device.h>
#include <v4l2_codec2/plugin_store/V4L2AllocatorId.h>
+#include <v4l2_device.h>
namespace android {
namespace {
@@ -49,6 +49,22 @@ size_t calculateInputBufferSize(size_t area) {
if (area > k1080pArea) return kInputBufferSizeFor4K;
return kInputBufferSizeFor1080p;
}
+
+uint32_t getOutputDelay(VideoCodec codec) {
+ switch (codec) {
+ case VideoCodec::H264:
+ // Due to frame reordering an H264 decoder might need multiple additional input frames to be
+ // queued before being able to output the associated decoded buffers. We need to tell the
+ // codec2 framework that it should not stop queuing new work items until the maximum number
+ // of frame reordering is reached, to avoid stalling the decoder.
+ return 16;
+ case VideoCodec::VP8:
+ return 0;
+ case VideoCodec::VP9:
+ return 0;
+ }
+}
+
} // namespace
// static
@@ -122,10 +138,6 @@ V4L2DecodeInterface::V4L2DecodeInterface(const std::string& name,
return;
}
- addParameter(DefineParam(mKind, C2_PARAMKEY_COMPONENT_KIND)
- .withConstValue(new C2ComponentKindSetting(C2Component::KIND_DECODER))
- .build());
-
std::string inputMime;
switch (*mVideoCodec) {
case VideoCodec::H264:
@@ -234,7 +246,7 @@ V4L2DecodeInterface::V4L2DecodeInterface(const std::string& name,
bool secureMode = name.find(".secure") != std::string::npos;
const C2Allocator::id_t inputAllocators[] = {secureMode ? V4L2AllocatorId::SECURE_LINEAR
- : C2AllocatorStore::DEFAULT_LINEAR};
+ : C2PlatformAllocatorStore::BLOB};
const C2Allocator::id_t outputAllocators[] = {V4L2AllocatorId::V4L2_BUFFERPOOL};
const C2Allocator::id_t surfaceAllocator =
@@ -322,7 +334,7 @@ V4L2DecodeInterface::V4L2DecodeInterface(const std::string& name,
}
size_t V4L2DecodeInterface::getInputBufferSize() const {
- return calculateInputBufferSize(mSize->width * mSize->height);
+ return calculateInputBufferSize(getMaxSize().GetArea());
}
c2_status_t V4L2DecodeInterface::queryColorAspects(
@@ -338,19 +350,4 @@ c2_status_t V4L2DecodeInterface::queryColorAspects(
return status;
}
-uint32_t V4L2DecodeInterface::getOutputDelay(VideoCodec codec) {
- switch (codec) {
- case VideoCodec::H264:
- // Due to frame reordering an H264 decoder might need multiple additional input frames to be
- // queued before being able to output the associated decoded buffers. We need to tell the
- // codec2 framework that it should not stop queuing new work items until the maximum number
- // of frame reordering is reached, to avoid stalling the decoder.
- return 16;
- case VideoCodec::VP8:
- return 0;
- case VideoCodec::VP9:
- return 0;
- }
-}
-
} // namespace android
diff --git a/components/V4L2Decoder.cpp b/components/V4L2Decoder.cpp
index d694837..d52bd6c 100644
--- a/components/V4L2Decoder.cpp
+++ b/components/V4L2Decoder.cpp
@@ -16,9 +16,6 @@
#include <base/memory/ptr_util.h>
#include <log/log.h>
-#include <v4l2_codec2/common/Common.h>
-#include <v4l2_codec2/common/Fourcc.h>
-
namespace android {
namespace {
@@ -26,13 +23,6 @@ constexpr size_t kNumInputBuffers = 16;
// Extra buffers for transmitting in the whole video pipeline.
constexpr size_t kNumExtraOutputBuffers = 4;
-// Currently we only support flexible pixel 420 format YCBCR_420_888 in Android.
-// Here is the list of flexible 420 format.
-constexpr std::initializer_list<uint32_t> kSupportedOutputFourccs = {
- Fourcc::YU12, Fourcc::YV12, Fourcc::YM12, Fourcc::YM21,
- Fourcc::NV12, Fourcc::NV21, Fourcc::NM12, Fourcc::NM21,
-};
-
uint32_t VideoCodecToV4L2PixFmt(VideoCodec codec) {
switch (codec) {
case VideoCodec::H264:
@@ -74,17 +64,17 @@ V4L2Decoder::~V4L2Decoder() {
// Streamoff input and output queue.
if (mOutputQueue) {
- mOutputQueue->streamoff();
- mOutputQueue->deallocateBuffers();
+ mOutputQueue->Streamoff();
+ mOutputQueue->DeallocateBuffers();
mOutputQueue = nullptr;
}
if (mInputQueue) {
- mInputQueue->streamoff();
- mInputQueue->deallocateBuffers();
+ mInputQueue->Streamoff();
+ mInputQueue->DeallocateBuffers();
mInputQueue = nullptr;
}
if (mDevice) {
- mDevice->stopPolling();
+ mDevice->StopPolling();
mDevice = nullptr;
}
}
@@ -104,15 +94,15 @@ bool V4L2Decoder::start(const VideoCodec& codec, const size_t inputBufferSize, G
return false;
}
- mDevice = V4L2Device::create();
+ mDevice = media::V4L2Device::Create();
const uint32_t inputPixelFormat = VideoCodecToV4L2PixFmt(codec);
- if (!mDevice->open(V4L2Device::Type::kDecoder, inputPixelFormat)) {
+ if (!mDevice->Open(media::V4L2Device::Type::kDecoder, inputPixelFormat)) {
ALOGE("Failed to open device for %s", VideoCodecToString(codec));
return false;
}
- if (!mDevice->hasCapabilities(V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING)) {
+ if (!mDevice->HasCapabilities(V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING)) {
ALOGE("Device does not have VIDEO_M2M_MPLANE and STREAMING capabilities.");
return false;
}
@@ -120,7 +110,7 @@ bool V4L2Decoder::start(const VideoCodec& codec, const size_t inputBufferSize, G
struct v4l2_decoder_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = V4L2_DEC_CMD_STOP;
- if (mDevice->ioctl(VIDIOC_TRY_DECODER_CMD, &cmd) != 0) {
+ if (mDevice->Ioctl(VIDIOC_TRY_DECODER_CMD, &cmd) != 0) {
ALOGE("Device does not support flushing (V4L2_DEC_CMD_STOP)");
return false;
}
@@ -129,14 +119,14 @@ bool V4L2Decoder::start(const VideoCodec& codec, const size_t inputBufferSize, G
struct v4l2_event_subscription sub;
memset(&sub, 0, sizeof(sub));
sub.type = V4L2_EVENT_SOURCE_CHANGE;
- if (mDevice->ioctl(VIDIOC_SUBSCRIBE_EVENT, &sub) != 0) {
+ if (mDevice->Ioctl(VIDIOC_SUBSCRIBE_EVENT, &sub) != 0) {
ALOGE("ioctl() failed: VIDIOC_SUBSCRIBE_EVENT: V4L2_EVENT_SOURCE_CHANGE");
return false;
}
// Create Input/Output V4L2Queue, and setup input queue.
- mInputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- mOutputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ mInputQueue = mDevice->GetQueue(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ mOutputQueue = mDevice->GetQueue(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (!mInputQueue || !mOutputQueue) {
ALOGE("Failed to create V4L2 queue.");
return false;
@@ -146,7 +136,7 @@ bool V4L2Decoder::start(const VideoCodec& codec, const size_t inputBufferSize, G
return false;
}
- if (!mDevice->startPolling(::base::BindRepeating(&V4L2Decoder::serviceDeviceTask, mWeakThis),
+ if (!mDevice->StartPolling(::base::BindRepeating(&V4L2Decoder::serviceDeviceTask, mWeakThis),
::base::BindRepeating(&V4L2Decoder::onError, mWeakThis))) {
ALOGE("Failed to start polling V4L2 device.");
return false;
@@ -163,25 +153,25 @@ bool V4L2Decoder::setupInputFormat(const uint32_t inputPixelFormat, const size_t
// Check if the format is supported.
std::vector<uint32_t> formats =
- mDevice->enumerateSupportedPixelformats(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ mDevice->EnumerateSupportedPixelformats(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
if (std::find(formats.begin(), formats.end(), inputPixelFormat) == formats.end()) {
ALOGE("Input codec s not supported by device.");
return false;
}
// Setup the input format.
- auto format = mInputQueue->setFormat(inputPixelFormat, ui::Size(), inputBufferSize, 0);
+ auto format = mInputQueue->SetFormat(inputPixelFormat, media::Size(), inputBufferSize);
if (!format) {
ALOGE("Failed to call IOCTL to set input format.");
return false;
}
ALOG_ASSERT(format->fmt.pix_mp.pixelformat == inputPixelFormat);
- if (mInputQueue->allocateBuffers(kNumInputBuffers, V4L2_MEMORY_DMABUF) == 0) {
+ if (mInputQueue->AllocateBuffers(kNumInputBuffers, V4L2_MEMORY_DMABUF) == 0) {
ALOGE("Failed to allocate input buffer.");
return false;
}
- if (!mInputQueue->streamon()) {
+ if (!mInputQueue->Streamon()) {
ALOGE("Failed to streamon input queue.");
return false;
}
@@ -213,7 +203,7 @@ void V4L2Decoder::drain(DecodeCB drainCb) {
switch (mState) {
case State::Idle:
- ALOGV("Nothing need to drain, ignore.");
+ ALOGD("Nothing need to drain, ignore.");
mTaskRunner->PostTask(
FROM_HERE, ::base::BindOnce(std::move(drainCb), VideoDecoder::DecodeStatus::kOk));
return;
@@ -252,8 +242,8 @@ void V4L2Decoder::pumpDecodeRequest() {
// yet. Also, V4L2VDA calls STREAMOFF and STREAMON after resolution
// change. They implicitly send a V4L2_DEC_CMD_STOP and V4L2_DEC_CMD_START
// to the decoder.
- if (mInputQueue->queuedBuffersCount() > 0) {
- ALOGV("Wait for all input buffers dequeued.");
+ if (mInputQueue->QueuedBuffersCount() > 0) {
+ ALOGD("Wait for all input buffers dequeued.");
return;
}
@@ -271,7 +261,7 @@ void V4L2Decoder::pumpDecodeRequest() {
}
// Pause if no free input buffer. We resume decoding after dequeueing input buffers.
- auto inputBuffer = mInputQueue->getFreeBuffer();
+ auto inputBuffer = mInputQueue->GetFreeBuffer();
if (!inputBuffer) {
ALOGV("There is no free input buffer.");
return;
@@ -282,8 +272,8 @@ void V4L2Decoder::pumpDecodeRequest() {
const int32_t bitstreamId = request.buffer->id;
ALOGV("QBUF to input queue, bitstreadId=%d", bitstreamId);
- inputBuffer->setTimeStamp({.tv_sec = bitstreamId});
- size_t planeSize = inputBuffer->getPlaneSize(0);
+ inputBuffer->SetTimeStamp({.tv_sec = bitstreamId});
+ size_t planeSize = inputBuffer->GetPlaneSize(0);
if (request.buffer->size > planeSize) {
ALOGE("The input size (%zu) is not enough, we need %zu", planeSize,
request.buffer->size);
@@ -293,11 +283,11 @@ void V4L2Decoder::pumpDecodeRequest() {
ALOGV("Set bytes_used=%zu, offset=%zu", request.buffer->offset + request.buffer->size,
request.buffer->offset);
- inputBuffer->setPlaneDataOffset(0, request.buffer->offset);
- inputBuffer->setPlaneBytesUsed(0, request.buffer->offset + request.buffer->size);
+ inputBuffer->SetPlaneDataOffset(0, request.buffer->offset);
+ inputBuffer->SetPlaneBytesUsed(0, request.buffer->offset + request.buffer->size);
std::vector<int> fds;
fds.push_back(std::move(request.buffer->dmabuf_fd));
- if (!std::move(*inputBuffer).queueDMABuf(fds)) {
+ if (!std::move(*inputBuffer).QueueDMABuf(fds)) {
ALOGE("%s(): Failed to QBUF to input queue, bitstreamId=%d", __func__, bitstreamId);
onError();
return;
@@ -312,7 +302,7 @@ void V4L2Decoder::flush() {
ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
if (mState == State::Idle) {
- ALOGV("Nothing need to flush, ignore.");
+ ALOGD("Nothing need to flush, ignore.");
return;
}
if (mState == State::Error) {
@@ -330,23 +320,16 @@ void V4L2Decoder::flush() {
}
// Streamoff both V4L2 queues to drop input and output buffers.
- mDevice->stopPolling();
- mOutputQueue->streamoff();
+ mDevice->StopPolling();
+ mOutputQueue->Streamoff();
mFrameAtDevice.clear();
- mInputQueue->streamoff();
+ mInputQueue->Streamoff();
// Streamon both V4L2 queues.
- mInputQueue->streamon();
- mOutputQueue->streamon();
-
- // If there is no free buffer at mOutputQueue, tryFetchVideoFrame() should be triggerred after
- // a buffer is DQBUF from output queue. Now all the buffers are dropped at mOutputQueue, we
- // have to trigger tryFetchVideoFrame() here.
- if (mVideoFramePool) {
- tryFetchVideoFrame();
- }
+ mInputQueue->Streamon();
+ mOutputQueue->Streamon();
- if (!mDevice->startPolling(::base::BindRepeating(&V4L2Decoder::serviceDeviceTask, mWeakThis),
+ if (!mDevice->StartPolling(::base::BindRepeating(&V4L2Decoder::serviceDeviceTask, mWeakThis),
::base::BindRepeating(&V4L2Decoder::onError, mWeakThis))) {
ALOGE("Failed to start polling V4L2 device.");
onError();
@@ -358,22 +341,22 @@ void V4L2Decoder::flush() {
void V4L2Decoder::serviceDeviceTask(bool event) {
ALOGV("%s(event=%d) state=%s InputQueue(%s):%zu+%zu/%zu, OutputQueue(%s):%zu+%zu/%zu", __func__,
- event, StateToString(mState), (mInputQueue->isStreaming() ? "streamon" : "streamoff"),
- mInputQueue->freeBuffersCount(), mInputQueue->queuedBuffersCount(),
- mInputQueue->allocatedBuffersCount(),
- (mOutputQueue->isStreaming() ? "streamon" : "streamoff"),
- mOutputQueue->freeBuffersCount(), mOutputQueue->queuedBuffersCount(),
- mOutputQueue->allocatedBuffersCount());
+ event, StateToString(mState), (mInputQueue->IsStreaming() ? "streamon" : "streamoff"),
+ mInputQueue->FreeBuffersCount(), mInputQueue->QueuedBuffersCount(),
+ mInputQueue->AllocatedBuffersCount(),
+ (mOutputQueue->IsStreaming() ? "streamon" : "streamoff"),
+ mOutputQueue->FreeBuffersCount(), mOutputQueue->QueuedBuffersCount(),
+ mOutputQueue->AllocatedBuffersCount());
ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
if (mState == State::Error) return;
// Dequeue output and input queue.
bool inputDequeued = false;
- while (mInputQueue->queuedBuffersCount() > 0) {
+ while (mInputQueue->QueuedBuffersCount() > 0) {
bool success;
- V4L2ReadableBufferRef dequeuedBuffer;
- std::tie(success, dequeuedBuffer) = mInputQueue->dequeueBuffer();
+ media::V4L2ReadableBufferRef dequeuedBuffer;
+ std::tie(success, dequeuedBuffer) = mInputQueue->DequeueBuffer();
if (!success) {
ALOGE("Failed to dequeue buffer from input queue.");
onError();
@@ -384,7 +367,7 @@ void V4L2Decoder::serviceDeviceTask(bool event) {
inputDequeued = true;
// Run the corresponding decode callback.
- int32_t id = dequeuedBuffer->getTimeStamp().tv_sec;
+ int32_t id = dequeuedBuffer->GetTimeStamp().tv_sec;
ALOGV("DQBUF from input queue, bitstreamId=%d", id);
auto it = mPendingDecodeCbs.find(id);
if (it == mPendingDecodeCbs.end()) {
@@ -396,10 +379,10 @@ void V4L2Decoder::serviceDeviceTask(bool event) {
}
bool outputDequeued = false;
- while (mOutputQueue->queuedBuffersCount() > 0) {
+ while (mOutputQueue->QueuedBuffersCount() > 0) {
bool success;
- V4L2ReadableBufferRef dequeuedBuffer;
- std::tie(success, dequeuedBuffer) = mOutputQueue->dequeueBuffer();
+ media::V4L2ReadableBufferRef dequeuedBuffer;
+ std::tie(success, dequeuedBuffer) = mOutputQueue->DequeueBuffer();
if (!success) {
ALOGE("Failed to dequeue buffer from output queue.");
onError();
@@ -409,12 +392,12 @@ void V4L2Decoder::serviceDeviceTask(bool event) {
outputDequeued = true;
- const size_t bufferId = dequeuedBuffer->bufferId();
- const int32_t bitstreamId = static_cast<int32_t>(dequeuedBuffer->getTimeStamp().tv_sec);
- const size_t bytesUsed = dequeuedBuffer->getPlaneBytesUsed(0);
- const bool isLast = dequeuedBuffer->isLast();
- ALOGV("DQBUF from output queue, bufferId=%zu, bitstreamId=%d, bytesused=%zu, isLast=%d",
- bufferId, bitstreamId, bytesUsed, isLast);
+ const size_t bufferId = dequeuedBuffer->BufferId();
+ const int32_t bitstreamId = static_cast<int32_t>(dequeuedBuffer->GetTimeStamp().tv_sec);
+ const size_t bytesUsed = dequeuedBuffer->GetPlaneBytesUsed(0);
+ const bool isLast = dequeuedBuffer->IsLast();
+ ALOGV("DQBUF from output queue, bufferId=%zu, corresponding bitstreamId=%d, bytesused=%zu",
+ bufferId, bitstreamId, bytesUsed);
// Get the corresponding VideoFrame of the dequeued buffer.
auto it = mFrameAtDevice.find(bufferId);
@@ -433,10 +416,10 @@ void V4L2Decoder::serviceDeviceTask(bool event) {
// then the driver will fail to notify EOS. So we recycle the buffer immediately.
ALOGV("Recycle empty buffer %zu back to V4L2 output queue.", bufferId);
dequeuedBuffer.reset();
- auto outputBuffer = mOutputQueue->getFreeBuffer(bufferId);
+ auto outputBuffer = mOutputQueue->GetFreeBuffer(bufferId);
ALOG_ASSERT(outputBuffer, "V4L2 output queue slot %zu is not freed.", bufferId);
- if (!std::move(*outputBuffer).queueDMABuf(frame->getFDs())) {
+ if (!std::move(*outputBuffer).QueueDMABuf(frame->getFDs())) {
ALOGE("%s(): Failed to recycle empty buffer to output queue.", __func__);
onError();
return;
@@ -478,7 +461,7 @@ bool V4L2Decoder::dequeueResolutionChangeEvent() {
struct v4l2_event ev;
memset(&ev, 0, sizeof(ev));
- while (mDevice->ioctl(VIDIOC_DQEVENT, &ev) == 0) {
+ while (mDevice->Ioctl(VIDIOC_DQEVENT, &ev) == 0) {
if (ev.type == V4L2_EVENT_SOURCE_CHANGE &&
ev.u.src_change.changes & V4L2_EVENT_SRC_CH_RESOLUTION) {
return true;
@@ -491,52 +474,40 @@ bool V4L2Decoder::changeResolution() {
ALOGV("%s()", __func__);
ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- const std::optional<struct v4l2_format> format = getFormatInfo();
+ std::optional<struct v4l2_format> format = getFormatInfo();
std::optional<size_t> numOutputBuffers = getNumOutputBuffers();
if (!format || !numOutputBuffers) {
return false;
}
- const ui::Size codedSize(format->fmt.pix_mp.width, format->fmt.pix_mp.height);
- if (!setupOutputFormat(codedSize)) {
- return false;
- }
-
- const std::optional<struct v4l2_format> adjustedFormat = getFormatInfo();
- if (!adjustedFormat) {
- return false;
- }
- mCodedSize.set(adjustedFormat->fmt.pix_mp.width, adjustedFormat->fmt.pix_mp.height);
+ mCodedSize.SetSize(format->fmt.pix_mp.width, format->fmt.pix_mp.height);
mVisibleRect = getVisibleRect(mCodedSize);
ALOGI("Need %zu output buffers. coded size: %s, visible rect: %s", *numOutputBuffers,
- toString(mCodedSize).c_str(), toString(mVisibleRect).c_str());
- if (isEmpty(mCodedSize)) {
+ mCodedSize.ToString().c_str(), mVisibleRect.ToString().c_str());
+ if (mCodedSize.IsEmpty()) {
ALOGE("Failed to get resolution from V4L2 driver.");
return false;
}
- mOutputQueue->streamoff();
- mOutputQueue->deallocateBuffers();
+ mOutputQueue->Streamoff();
+ mOutputQueue->DeallocateBuffers();
mFrameAtDevice.clear();
mBlockIdToV4L2Id.clear();
- if (mOutputQueue->allocateBuffers(*numOutputBuffers, V4L2_MEMORY_DMABUF) == 0) {
+ if (mOutputQueue->AllocateBuffers(*numOutputBuffers, V4L2_MEMORY_DMABUF) == 0) {
ALOGE("Failed to allocate output buffer.");
return false;
}
- if (!mOutputQueue->streamon()) {
+ if (!mOutputQueue->Streamon()) {
ALOGE("Failed to streamon output queue.");
return false;
}
- // Release the previous VideoFramePool before getting a new one to guarantee only one pool
- // exists at the same time.
- mVideoFramePool.reset();
- // Always use flexible pixel 420 format YCBCR_420_888 in Android.
- mVideoFramePool = mGetPoolCb.Run(mCodedSize, HalPixelFormat::YCBCR_420_888, *numOutputBuffers);
+ // Always use fexible pixel 420 format YCBCR_420_888 in Android.
+ mGetPoolCb.Run(&mVideoFramePool, mCodedSize, HalPixelFormat::YCBCR_420_888, *numOutputBuffers);
if (!mVideoFramePool) {
- ALOGE("Failed to get block pool with size: %s", toString(mCodedSize).c_str());
+ ALOGE("Failed to get block pool with size: %s", mCodedSize.ToString().c_str());
return false;
}
@@ -544,36 +515,13 @@ bool V4L2Decoder::changeResolution() {
return true;
}
-bool V4L2Decoder::setupOutputFormat(const ui::Size& size) {
- for (const uint32_t& pixfmt :
- mDevice->enumerateSupportedPixelformats(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) {
- if (std::find(kSupportedOutputFourccs.begin(), kSupportedOutputFourccs.end(), pixfmt) ==
- kSupportedOutputFourccs.end()) {
- ALOGD("Pixel format %s is not supported, skipping...", fourccToString(pixfmt).c_str());
- continue;
- }
-
- if (mOutputQueue->setFormat(pixfmt, size, 0) != std::nullopt) {
- return true;
- }
- }
-
- ALOGE("Failed to find supported pixel format");
- return false;
-}
-
void V4L2Decoder::tryFetchVideoFrame() {
ALOGV("%s()", __func__);
ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mVideoFramePool, "mVideoFramePool is null, haven't get the instance yet?");
- if (!mVideoFramePool) {
- ALOGE("mVideoFramePool is null, failed to get the instance after resolution change?");
- onError();
- return;
- }
-
- if (mOutputQueue->freeBuffersCount() == 0) {
- ALOGV("No free V4L2 output buffers, ignore.");
+ if (mOutputQueue->FreeBuffersCount() == 0) {
+ ALOGD("No free V4L2 output buffers, ignore.");
return;
}
@@ -599,18 +547,18 @@ void V4L2Decoder::onVideoFrameReady(
uint32_t blockId;
std::tie(frame, blockId) = std::move(*frameWithBlockId);
- std::optional<V4L2WritableBufferRef> outputBuffer;
+ ::base::Optional<media::V4L2WritableBufferRef> outputBuffer;
// Find the V4L2 buffer that is associated with this block.
auto iter = mBlockIdToV4L2Id.find(blockId);
if (iter != mBlockIdToV4L2Id.end()) {
// If we have met this block in the past, reuse the same V4L2 buffer.
- outputBuffer = mOutputQueue->getFreeBuffer(iter->second);
- } else if (mBlockIdToV4L2Id.size() < mOutputQueue->allocatedBuffersCount()) {
+ outputBuffer = mOutputQueue->GetFreeBuffer(iter->second);
+ } else if (mBlockIdToV4L2Id.size() < mOutputQueue->AllocatedBuffersCount()) {
// If this is the first time we see this block, give it the next
// available V4L2 buffer.
const size_t v4l2BufferId = mBlockIdToV4L2Id.size();
mBlockIdToV4L2Id.emplace(blockId, v4l2BufferId);
- outputBuffer = mOutputQueue->getFreeBuffer(v4l2BufferId);
+ outputBuffer = mOutputQueue->GetFreeBuffer(v4l2BufferId);
} else {
// If this happens, this is a bug in VideoFramePool. It should never
// provide more blocks than we have V4L2 buffers.
@@ -618,15 +566,15 @@ void V4L2Decoder::onVideoFrameReady(
}
if (!outputBuffer) {
- ALOGE("V4L2 buffer not available. blockId=%u", blockId);
+ ALOGE("V4L2 buffer not available.");
onError();
return;
}
- uint32_t v4l2Id = outputBuffer->bufferId();
+ uint32_t v4l2Id = outputBuffer->BufferId();
ALOGV("QBUF to output queue, blockId=%u, V4L2Id=%u", blockId, v4l2Id);
- if (!std::move(*outputBuffer).queueDMABuf(frame->getFDs())) {
+ if (!std::move(*outputBuffer).QueueDMABuf(frame->getFDs())) {
ALOGE("%s(): Failed to QBUF to output queue, blockId=%u, V4L2Id=%u", __func__, blockId,
v4l2Id);
onError();
@@ -649,7 +597,7 @@ std::optional<size_t> V4L2Decoder::getNumOutputBuffers() {
struct v4l2_control ctrl;
memset(&ctrl, 0, sizeof(ctrl));
ctrl.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE;
- if (mDevice->ioctl(VIDIOC_G_CTRL, &ctrl) != 0) {
+ if (mDevice->Ioctl(VIDIOC_G_CTRL, &ctrl) != 0) {
ALOGE("ioctl() failed: VIDIOC_G_CTRL");
return std::nullopt;
}
@@ -665,7 +613,7 @@ std::optional<struct v4l2_format> V4L2Decoder::getFormatInfo() {
struct v4l2_format format;
memset(&format, 0, sizeof(format));
format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- if (mDevice->ioctl(VIDIOC_G_FMT, &format) != 0) {
+ if (mDevice->Ioctl(VIDIOC_G_FMT, &format) != 0) {
ALOGE("ioctl() failed: VIDIOC_G_FMT");
return std::nullopt;
}
@@ -673,7 +621,7 @@ std::optional<struct v4l2_format> V4L2Decoder::getFormatInfo() {
return format;
}
-Rect V4L2Decoder::getVisibleRect(const ui::Size& codedSize) {
+media::Rect V4L2Decoder::getVisibleRect(const media::Size& codedSize) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
@@ -683,7 +631,7 @@ Rect V4L2Decoder::getVisibleRect(const ui::Size& codedSize) {
selection_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
selection_arg.target = V4L2_SEL_TGT_COMPOSE;
- if (mDevice->ioctl(VIDIOC_G_SELECTION, &selection_arg) == 0) {
+ if (mDevice->Ioctl(VIDIOC_G_SELECTION, &selection_arg) == 0) {
ALOGV("VIDIOC_G_SELECTION is supported");
visible_rect = &selection_arg.r;
} else {
@@ -692,24 +640,24 @@ Rect V4L2Decoder::getVisibleRect(const ui::Size& codedSize) {
memset(&crop_arg, 0, sizeof(crop_arg));
crop_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- if (mDevice->ioctl(VIDIOC_G_CROP, &crop_arg) != 0) {
+ if (mDevice->Ioctl(VIDIOC_G_CROP, &crop_arg) != 0) {
ALOGW("ioctl() VIDIOC_G_CROP failed");
- return Rect(codedSize.width, codedSize.height);
+ return media::Rect(codedSize);
}
visible_rect = &crop_arg.c;
}
- Rect rect(visible_rect->left, visible_rect->top, visible_rect->left + visible_rect->width,
- visible_rect->top + visible_rect->height);
- ALOGV("visible rectangle is %s", toString(rect).c_str());
- if (!contains(Rect(codedSize.width, codedSize.height), rect)) {
- ALOGW("visible rectangle %s is not inside coded size %s", toString(rect).c_str(),
- toString(codedSize).c_str());
- return Rect(codedSize.width, codedSize.height);
+ media::Rect rect(visible_rect->left, visible_rect->top, visible_rect->width,
+ visible_rect->height);
+ ALOGD("visible rectangle is %s", rect.ToString().c_str());
+ if (!media::Rect(codedSize).Contains(rect)) {
+ ALOGW("visible rectangle %s is not inside coded size %s", rect.ToString().c_str(),
+ codedSize.ToString().c_str());
+ return media::Rect(codedSize);
}
- if (rect.isEmpty()) {
+ if (rect.IsEmpty()) {
ALOGW("visible size is empty");
- return Rect(codedSize.width, codedSize.height);
+ return media::Rect(codedSize);
}
return rect;
@@ -722,7 +670,7 @@ bool V4L2Decoder::sendV4L2DecoderCmd(bool start) {
struct v4l2_decoder_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = start ? V4L2_DEC_CMD_START : V4L2_DEC_CMD_STOP;
- if (mDevice->ioctl(VIDIOC_DECODER_CMD, &cmd) != 0) {
+ if (mDevice->Ioctl(VIDIOC_DECODER_CMD, &cmd) != 0) {
ALOGE("ioctl() VIDIOC_DECODER_CMD failed: start=%d", start);
return false;
}
diff --git a/components/V4L2EncodeComponent.cpp b/components/V4L2EncodeComponent.cpp
index b4bbc0e..ab2230e 100644
--- a/components/V4L2EncodeComponent.cpp
+++ b/components/V4L2EncodeComponent.cpp
@@ -18,19 +18,17 @@
#include <android/hardware/graphics/common/1.0/types.h>
#include <base/bind.h>
#include <base/bind_helpers.h>
-#include <cutils/properties.h>
#include <log/log.h>
#include <media/stagefright/MediaDefs.h>
#include <ui/GraphicBuffer.h>
-#include <ui/Size.h>
+#include <fourcc.h>
+#include <h264_parser.h>
+#include <rect.h>
#include <v4l2_codec2/common/Common.h>
#include <v4l2_codec2/common/EncodeHelpers.h>
-#include <v4l2_codec2/common/FormatConverter.h>
-#include <v4l2_codec2/common/VideoPixelFormat.h>
-#include <v4l2_codec2/components/BitstreamBuffer.h>
-#include <v4l2_codec2/components/V4L2EncodeInterface.h>
-#include <v4l2_codec2/components/V4L2Encoder.h>
+#include <v4l2_device.h>
+#include <video_pixel_format.h>
using android::hardware::graphics::common::V1_0::BufferUsage;
@@ -38,12 +36,12 @@ namespace android {
namespace {
-const VideoPixelFormat kInputPixelFormat = VideoPixelFormat::NV12;
+const media::VideoPixelFormat kInputPixelFormat = media::VideoPixelFormat::PIXEL_FORMAT_NV12;
// Get the video frame layout from the specified |inputBlock|.
// TODO(dstaessens): Clean up code extracting layout from a C2GraphicBlock.
std::optional<std::vector<VideoFramePlane>> getVideoFrameLayout(const C2ConstGraphicBlock& block,
- VideoPixelFormat* format) {
+ media::VideoPixelFormat* format) {
ALOGV("%s()", __func__);
// Get the C2PlanarLayout from the graphics block. The C2GraphicView returned by block.map()
@@ -97,17 +95,17 @@ std::optional<std::vector<VideoFramePlane>> getVideoFrameLayout(const C2ConstGra
}
if (!crcb && !semiplanar) {
- *format = VideoPixelFormat::I420;
+ *format = media::VideoPixelFormat::PIXEL_FORMAT_I420;
} else if (!crcb && semiplanar) {
- *format = VideoPixelFormat::NV12;
+ *format = media::VideoPixelFormat::PIXEL_FORMAT_NV12;
} else if (crcb && !semiplanar) {
// HACK: pretend YV12 is I420 now since VEA only accepts I420. (YV12 will be used
// for input byte-buffer mode).
// TODO(dstaessens): Is this hack still necessary now we're not using the VEA directly?
- //format = VideoPixelFormat::YV12;
- *format = VideoPixelFormat::I420;
+ //format = media::VideoPixelFormat::PIXEL_FORMAT_YV12;
+ *format = media::VideoPixelFormat::PIXEL_FORMAT_I420;
} else {
- *format = VideoPixelFormat::NV21;
+ *format = media::VideoPixelFormat::PIXEL_FORMAT_NV21;
}
break;
}
@@ -115,7 +113,7 @@ std::optional<std::vector<VideoFramePlane>> getVideoFrameLayout(const C2ConstGra
offsets[C2PlanarLayout::PLANE_R] = layout.planes[C2PlanarLayout::PLANE_R].offset;
strides[C2PlanarLayout::PLANE_R] =
static_cast<uint32_t>(layout.planes[C2PlanarLayout::PLANE_R].rowInc);
- *format = VideoPixelFormat::ARGB;
+ *format = media::VideoPixelFormat::PIXEL_FORMAT_ARGB;
break;
}
default:
@@ -125,89 +123,56 @@ std::optional<std::vector<VideoFramePlane>> getVideoFrameLayout(const C2ConstGra
std::vector<VideoFramePlane> planes;
for (uint32_t i = 0; i < layout.rootPlanes; ++i) {
- // The mSize field is not used in our case, so we can safely set it to zero.
- planes.push_back({strides[i], offsets[i], 0});
+ planes.push_back({offsets[i], strides[i]});
}
return planes;
}
-// Get the video frame stride for the specified |format| and |size|.
-std::optional<uint32_t> getVideoFrameStride(VideoPixelFormat format, ui::Size size) {
- // Fetch a graphic block from the pool to determine the stride.
- std::shared_ptr<C2BlockPool> pool;
- c2_status_t status = GetCodec2BlockPool(C2BlockPool::BASIC_GRAPHIC, nullptr, &pool);
- if (status != C2_OK) {
- ALOGE("Failed to get basic graphic block pool (err=%d)", status);
- return std::nullopt;
- }
-
- // Android HAL format doesn't have I420, we use YV12 instead and swap the U and V planes when
- // converting to NV12. YCBCR_420_888 will allocate NV12 by minigbm.
- HalPixelFormat halFormat = (format == VideoPixelFormat::I420) ? HalPixelFormat::YV12
- : HalPixelFormat::YCBCR_420_888;
-
- std::shared_ptr<C2GraphicBlock> block;
- status = pool->fetchGraphicBlock(size.width, size.height, static_cast<uint32_t>(halFormat),
- C2MemoryUsage(C2MemoryUsage::CPU_READ), &block);
- if (status != C2_OK) {
- ALOGE("Failed to fetch graphic block (err=%d)", status);
- return std::nullopt;
- }
+// The maximum size for output buffer, which is chosen empirically for a 1080p video.
+constexpr size_t kMaxBitstreamBufferSizeInBytes = 2 * 1024 * 1024; // 2MB
+// The frame size for 1080p (FHD) video in pixels.
+constexpr int k1080PSizeInPixels = 1920 * 1080;
+// The frame size for 1440p (QHD) video in pixels.
+constexpr int k1440PSizeInPixels = 2560 * 1440;
+
+// Use quadruple size of kMaxBitstreamBufferSizeInBytes when the input frame size is larger than
+// 1440p, double if larger than 1080p. This is chosen empirically for some 4k encoding use cases and
+// the Android CTS VideoEncoderTest (crbug.com/927284).
+size_t GetMaxOutputBufferSize(const media::Size& size) {
+ if (size.GetArea() > k1440PSizeInPixels) return kMaxBitstreamBufferSizeInBytes * 4;
+ if (size.GetArea() > k1080PSizeInPixels) return kMaxBitstreamBufferSizeInBytes * 2;
+ return kMaxBitstreamBufferSizeInBytes;
+}
- const C2ConstGraphicBlock constBlock = block->share(C2Rect(size.width, size.height), C2Fence());
- VideoPixelFormat pixelFormat;
- std::optional<std::vector<VideoFramePlane>> planes =
- getVideoFrameLayout(constBlock, &pixelFormat);
- if (!planes || planes.value().empty()) {
- ALOGE("Failed to get video frame layout from block");
- return std::nullopt;
- }
+// These are rather subjectively tuned.
+constexpr size_t kInputBufferCount = 2;
+constexpr size_t kOutputBufferCount = 2;
- return planes.value()[0].mStride;
-}
+// Define V4L2_CID_MPEG_VIDEO_H264_SPS_PPS_BEFORE_IDR control code if not present in header files.
+#ifndef V4L2_CID_MPEG_VIDEO_H264_SPS_PPS_BEFORE_IDR
+#define V4L2_CID_MPEG_VIDEO_H264_SPS_PPS_BEFORE_IDR (V4L2_CID_MPEG_BASE + 388)
+#endif
-// Create an input frame from the specified graphic block.
-std::unique_ptr<V4L2Encoder::InputFrame> CreateInputFrame(const C2ConstGraphicBlock& block,
- uint64_t index, int64_t timestamp) {
- VideoPixelFormat format;
- std::optional<std::vector<VideoFramePlane>> planes = getVideoFrameLayout(block, &format);
- if (!planes) {
- ALOGE("Failed to get input block's layout");
- return nullptr;
- }
+} // namespace
+// static
+std::unique_ptr<V4L2EncodeComponent::InputFrame> V4L2EncodeComponent::InputFrame::Create(
+ const C2ConstGraphicBlock& block) {
std::vector<int> fds;
const C2Handle* const handle = block.handle();
for (int i = 0; i < handle->numFds; i++) {
fds.emplace_back(handle->data[i]);
}
- return std::make_unique<V4L2Encoder::InputFrame>(std::move(fds), std::move(planes.value()),
- format, index, timestamp);
+ return std::unique_ptr<InputFrame>(new InputFrame(std::move(fds)));
}
-} // namespace
-
-// static
-std::atomic<int32_t> V4L2EncodeComponent::sConcurrentInstances = 0;
-
// static
std::shared_ptr<C2Component> V4L2EncodeComponent::create(
C2String name, c2_node_id_t id, std::shared_ptr<C2ReflectorHelper> helper,
C2ComponentFactory::ComponentDeleter deleter) {
ALOGV("%s(%s)", __func__, name.c_str());
- static const int32_t kMaxConcurrentInstances =
- property_get_int32("debug.v4l2_codec2.encode.concurrent-instances", -1);
-
- static std::mutex mutex;
- std::lock_guard<std::mutex> lock(mutex);
- if (kMaxConcurrentInstances >= 0 && sConcurrentInstances.load() >= kMaxConcurrentInstances) {
- ALOGW("Cannot create additional encoder, maximum number of instances reached: %d",
- kMaxConcurrentInstances);
- return nullptr;
- }
-
auto interface = std::make_shared<V4L2EncodeInterface>(name, std::move(helper));
if (interface->status() != C2_OK) {
ALOGE("Component interface initialization failed (error code %d)", interface->status());
@@ -225,8 +190,6 @@ V4L2EncodeComponent::V4L2EncodeComponent(C2String name, c2_node_id_t id,
mInterface(std::move(interface)),
mComponentState(ComponentState::LOADED) {
ALOGV("%s(%s)", __func__, name.c_str());
-
- sConcurrentInstances.fetch_add(1, std::memory_order_relaxed);
}
V4L2EncodeComponent::~V4L2EncodeComponent() {
@@ -242,8 +205,6 @@ V4L2EncodeComponent::~V4L2EncodeComponent() {
&mWeakThisFactory));
mEncoderThread.Stop();
}
-
- sConcurrentInstances.fetch_sub(1, std::memory_order_relaxed);
ALOGV("%s(): done", __func__);
}
@@ -430,6 +391,7 @@ std::shared_ptr<C2ComponentInterface> V4L2EncodeComponent::intf() {
void V4L2EncodeComponent::startTask(bool* success, ::base::WaitableEvent* done) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mEncoderState == EncoderState::UNINITIALIZED);
*success = initializeEncoder();
done->Signal();
@@ -439,148 +401,128 @@ void V4L2EncodeComponent::stopTask(::base::WaitableEvent* done) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
- // Flushing the encoder will abort all pending work.
+ // Flushing the encoder will abort all pending work and stop polling and streaming on the V4L2
+ // device queues.
flush();
- mInputFormatConverter.reset();
-
- mEncoder.reset();
- mOutputBlockPool.reset();
+ // Deallocate all V4L2 device input and output buffers.
+ destroyInputBuffers();
+ destroyOutputBuffers();
// Invalidate all weak pointers so no more functions will be executed on the encoder thread.
mWeakThisFactory.InvalidateWeakPtrs();
+ setEncoderState(EncoderState::UNINITIALIZED);
done->Signal();
}
void V4L2EncodeComponent::queueTask(std::unique_ptr<C2Work> work) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mEncoder);
-
- // Currently only a single worklet per work item is supported. An input buffer should always be
- // supplied unless this is a drain or CSD request.
- ALOG_ASSERT(work->input.buffers.size() <= 1u && work->worklets.size() == 1u);
-
- // Set the default values for the output worklet.
- work->worklets.front()->output.flags = static_cast<C2FrameData::flags_t>(0);
- work->worklets.front()->output.buffers.clear();
- work->worklets.front()->output.ordinal = work->input.ordinal;
+ ALOG_ASSERT(mEncoderState != EncoderState::UNINITIALIZED);
- uint64_t index = work->input.ordinal.frameIndex.peeku();
- int64_t timestamp = static_cast<int64_t>(work->input.ordinal.timestamp.peeku());
- bool endOfStream = work->input.flags & C2FrameData::FLAG_END_OF_STREAM;
- ALOGV("Queuing next encode (index: %" PRIu64 ", timestamp: %" PRId64 ", EOS: %d)", index,
- timestamp, endOfStream);
-
- // The codec 2.0 framework might queue an empty CSD request, but this is currently not
- // supported. We will return the CSD with the first encoded buffer work.
- if (work->input.buffers.empty() && !endOfStream) {
- ALOGV("Discarding empty CSD request");
- reportWork(std::move(work));
+ // If we're in the error state we can immediately return, freeing all buffers in the work item.
+ if (mEncoderState == EncoderState::ERROR) {
return;
}
- // By the time we get an input buffer, the output block pool should be configured.
- if (!mOutputBlockPool && !getBlockPool()) {
- reportError(C2_CORRUPTED);
- return;
- }
-
- // If conversion is required but no free buffers are available we queue the work item.
- if (mInputFormatConverter && !mInputFormatConverter->isReady()) {
- ALOGV("Input format convertor ran out of buffers");
- mInputConverterQueue.push(std::move(work));
- return;
- }
+ ALOGV("Queued work item (index: %llu, timestamp: %llu, EOS: %d)",
+ work->input.ordinal.frameIndex.peekull(), work->input.ordinal.timestamp.peekull(),
+ work->input.flags & C2FrameData::FLAG_END_OF_STREAM);
- // If we have data to encode send it to the encoder. If conversion is required we will first
- // convert the data to the requested pixel format.
- if (!work->input.buffers.empty()) {
- C2ConstGraphicBlock inputBlock =
- work->input.buffers.front()->data().graphicBlocks().front();
- if (mInputFormatConverter) {
- ALOGV("Converting input block (index: %" PRIu64 ")", index);
- c2_status_t status = C2_CORRUPTED;
- inputBlock = mInputFormatConverter->convertBlock(index, inputBlock, &status);
- if (status != C2_OK) {
- ALOGE("Failed to convert input block (index: %" PRIu64 ")", index);
- reportError(status);
- return;
- }
- }
- if (!encode(inputBlock, index, timestamp)) {
- return;
- }
- }
+ mInputWorkQueue.push(std::move(work));
- mWorkQueue.push_back(std::move(work));
- if (endOfStream) {
- mEncoder->drain();
+ // If we were waiting for work, start encoding again.
+ if (mEncoderState == EncoderState::WAITING_FOR_INPUT) {
+ setEncoderState(EncoderState::ENCODING);
+ mEncoderTaskRunner->PostTask(
+ FROM_HERE,
+ ::base::BindOnce(&V4L2EncodeComponent::scheduleNextEncodeTask, mWeakThis));
}
}
+// TODO(dstaessens): Investigate improving drain logic after draining the virtio device is fixed.
void V4L2EncodeComponent::drainTask(drain_mode_t /*drainMode*/) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
- // We can only start draining if all work has been queued in the encoder, so we mark the last
- // item waiting for conversion as EOS if required.
- if (!mInputConverterQueue.empty()) {
- C2Work* work = mInputConverterQueue.back().get();
- work->input.flags = static_cast<C2FrameData::flags_t>(work->input.flags |
- C2FrameData::FLAG_END_OF_STREAM);
+ // We can only start draining if all the work in our input queue has been queued on the V4L2
+ // device input queue, so we mark the last item in the input queue as EOS.
+ if (!mInputWorkQueue.empty()) {
+ ALOGV("Marking last item in input work queue as EOS");
+ mInputWorkQueue.back()->input.flags = static_cast<C2FrameData::flags_t>(
+ mInputWorkQueue.back()->input.flags | C2FrameData::FLAG_END_OF_STREAM);
return;
}
- // Mark the last item in the output work queue as EOS, so we will only report it as finished
- // after draining has completed.
- if (!mWorkQueue.empty()) {
+ // If the input queue is empty and there is only a single empty EOS work item in the output
+ // queue we can immediately consider flushing done.
+ if ((mOutputWorkQueue.size() == 1) && mOutputWorkQueue.back()->input.buffers.empty()) {
+ ALOG_ASSERT(mOutputWorkQueue.back()->input.flags & C2FrameData::FLAG_END_OF_STREAM);
+ setEncoderState(EncoderState::DRAINING);
+ mEncoderTaskRunner->PostTask(
+ FROM_HERE, ::base::BindOnce(&V4L2EncodeComponent::onDrainDone, mWeakThis, true));
+ return;
+ }
+
+ // If the input queue is empty all work that needs to be drained has already been queued in the
+ // V4L2 device, so we can immediately request a drain.
+ if (!mOutputWorkQueue.empty()) {
+ // Mark the last item in the output work queue as EOS, so we will only report it as
+ // finished after draining has completed.
ALOGV("Starting drain and marking last item in output work queue as EOS");
- C2Work* work = mWorkQueue.back().get();
- work->input.flags = static_cast<C2FrameData::flags_t>(work->input.flags |
- C2FrameData::FLAG_END_OF_STREAM);
- mEncoder->drain();
+ mOutputWorkQueue.back()->input.flags = C2FrameData::FLAG_END_OF_STREAM;
+ drain();
}
}
-void V4L2EncodeComponent::onDrainDone(bool success) {
+void V4L2EncodeComponent::onDrainDone(bool done) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(!mWorkQueue.empty());
+ ALOG_ASSERT(mEncoderState == EncoderState::DRAINING || mEncoderState == EncoderState::ERROR);
- if (!success) {
+ if (mEncoderState == EncoderState::ERROR) {
+ return;
+ }
+
+ if (!done) {
ALOGE("draining the encoder failed");
reportError(C2_CORRUPTED);
return;
}
- // Find the first work item marked as EOS. This might not be the first item in the queue, as
- // previous buffers in the queue might still be waiting for their associated input buffers.
- auto it = std::find_if(
- mWorkQueue.cbegin(), mWorkQueue.cend(), [](const std::unique_ptr<C2Work>& work) {
- return ((work->input.flags & C2FrameData::FLAG_END_OF_STREAM) &&
- !(work->worklets.back()->output.flags & C2FrameData::FLAG_END_OF_STREAM));
- });
- if (it == mWorkQueue.end()) {
- ALOGW("No EOS work item found in queue");
+ // The last work item in the output work queue should be an EOS request.
+ if (mOutputWorkQueue.empty() ||
+ !(mOutputWorkQueue.back()->input.flags & C2FrameData::FLAG_END_OF_STREAM)) {
+ ALOGE("The last item in the output work queue should be marked EOS");
+ reportError(C2_CORRUPTED);
return;
}
- // Mark the item in the output work queue as EOS done.
- C2Work* eosWork = it->get();
+ // Mark the last item in the output work queue as EOS done.
+ C2Work* eosWork = mOutputWorkQueue.back().get();
eosWork->worklets.back()->output.flags = C2FrameData::FLAG_END_OF_STREAM;
// Draining is done which means all buffers on the device output queue have been returned, but
// not all buffers on the device input queue might have been returned yet.
- if ((eosWork != mWorkQueue.front().get()) || !isWorkDone(*eosWork)) {
+ if ((mOutputWorkQueue.size() > 1) || !isWorkDone(*eosWork)) {
ALOGV("Draining done, waiting for input buffers to be returned");
return;
}
ALOGV("Draining done");
- reportWork(std::move(mWorkQueue.front()));
- mWorkQueue.pop_front();
+ reportWork(std::move(mOutputWorkQueue.front()));
+ mOutputWorkQueue.pop_front();
+
+ // Draining the encoder is now done, we can start encoding again.
+ if (!mInputWorkQueue.empty()) {
+ setEncoderState(EncoderState::ENCODING);
+ mEncoderTaskRunner->PostTask(
+ FROM_HERE,
+ ::base::BindOnce(&V4L2EncodeComponent::scheduleNextEncodeTask, mWeakThis));
+ } else {
+ setEncoderState(EncoderState::WAITING_FOR_INPUT);
+ }
}
void V4L2EncodeComponent::flushTask(::base::WaitableEvent* done,
@@ -590,11 +532,11 @@ void V4L2EncodeComponent::flushTask(::base::WaitableEvent* done,
// Move all work that can immediately be aborted to flushedWork, and notify the caller.
if (flushedWork) {
- while (!mInputConverterQueue.empty()) {
- std::unique_ptr<C2Work> work = std::move(mInputConverterQueue.front());
+ while (!mInputWorkQueue.empty()) {
+ std::unique_ptr<C2Work> work = std::move(mInputWorkQueue.front());
work->input.buffers.clear();
flushedWork->push_back(std::move(work));
- mInputConverterQueue.pop();
+ mInputWorkQueue.pop();
}
}
done->Signal();
@@ -614,54 +556,290 @@ void V4L2EncodeComponent::setListenerTask(const std::shared_ptr<Listener>& liste
bool V4L2EncodeComponent::initializeEncoder() {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(!mInputFormatConverter);
- ALOG_ASSERT(!mEncoder);
+ ALOG_ASSERT(mEncoderState == EncoderState::UNINITIALIZED);
+ mVisibleSize = mInterface->getInputVisibleSize();
+ mKeyFramePeriod = mInterface->getKeyFramePeriod();
+ mKeyFrameCounter = 0;
mCSDSubmitted = false;
- // Get the requested profile and level.
- C2Config::profile_t outputProfile = mInterface->getOutputProfile();
+ // Open the V4L2 device for encoding to the requested output format.
+ // TODO(dstaessens): Do we need to close the device first if already opened?
+ // TODO(dstaessens): Avoid conversion to VideoCodecProfile and use C2Config::profile_t directly.
+ media::VideoCodecProfile outputProfile =
+ c2ProfileToVideoCodecProfile(mInterface->getOutputProfile());
+ uint32_t outputPixelFormat =
+ media::V4L2Device::VideoCodecProfileToV4L2PixFmt(outputProfile, false);
+ if (!outputPixelFormat) {
+ ALOGE("Invalid output profile %s", media::GetProfileName(outputProfile).c_str());
+ return false;
+ }
- std::optional<uint8_t> h264Level;
- if (outputProfile >= C2Config::PROFILE_AVC_BASELINE &&
- outputProfile <= C2Config::PROFILE_AVC_ENHANCED_MULTIVIEW_DEPTH_HIGH) {
- h264Level = c2LevelToV4L2Level(mInterface->getOutputLevel());
+ mDevice = media::V4L2Device::Create();
+ if (!mDevice) {
+ ALOGE("Failed to create V4L2 device");
+ return false;
}
- // Get the stride used by the C2 framework, as this might be different from the stride used by
- // the V4L2 encoder.
- std::optional<uint32_t> stride =
- getVideoFrameStride(kInputPixelFormat, mInterface->getInputVisibleSize());
- if (!stride) {
- ALOGE("Failed to get video frame stride");
- reportError(C2_CORRUPTED);
+ if (!mDevice->Open(media::V4L2Device::Type::kEncoder, outputPixelFormat)) {
+ ALOGE("Failed to open device for profile %s (%s)",
+ media::GetProfileName(outputProfile).c_str(),
+ media::FourccToString(outputPixelFormat).c_str());
+ return false;
+ }
+
+ // Make sure the device has all required capabilities (multi-planar Memory-To-Memory and
+ // streaming I/O), and whether flushing is supported.
+ if (!mDevice->HasCapabilities(V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING)) {
+ ALOGE("Device doesn't have the required capabilities");
+ return false;
+ }
+ if (!mDevice->IsCommandSupported(V4L2_ENC_CMD_STOP)) {
+ ALOGE("Device does not support flushing (V4L2_ENC_CMD_STOP)");
+ return false;
+ }
+
+ // Get input/output queues so we can send encode request to the device and get back the results.
+ mInputQueue = mDevice->GetQueue(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ mOutputQueue = mDevice->GetQueue(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (!mInputQueue || !mOutputQueue) {
+ ALOGE("Failed to get V4L2 device queues");
+ return false;
+ }
+
+ // First try to configure the specified output format, as changing the output format can affect
+ // the configured input format.
+ if (!configureOutputFormat(outputProfile)) return false;
+
+ // Configure the input format. If the device doesn't support the specified format we'll use one
+ // of the device's preferred formats in combination with an input format convertor.
+ if (!configureInputFormat(kInputPixelFormat)) return false;
+
+ // Create input and output buffers.
+ // TODO(dstaessens): Avoid allocating output buffers, encode directly into blockpool buffers.
+ if (!createInputBuffers() || !createOutputBuffers()) return false;
+
+ // Configure the device, setting all required controls.
+ uint8_t level = c2LevelToLevelIDC(mInterface->getOutputLevel());
+ if (!configureDevice(outputProfile, level)) return false;
+
+ // We're ready to start encoding now.
+ setEncoderState(EncoderState::WAITING_FOR_INPUT);
+
+ // As initialization is asynchronous work might have already be queued.
+ if (!mInputWorkQueue.empty()) {
+ setEncoderState(EncoderState::ENCODING);
+ mEncoderTaskRunner->PostTask(
+ FROM_HERE, ::base::Bind(&V4L2EncodeComponent::scheduleNextEncodeTask, mWeakThis));
+ }
+ return true;
+}
+
+bool V4L2EncodeComponent::configureInputFormat(media::VideoPixelFormat inputFormat) {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mEncoderState == EncoderState::UNINITIALIZED);
+ ALOG_ASSERT(!mInputQueue->IsStreaming());
+ ALOG_ASSERT(!mVisibleSize.IsEmpty());
+ ALOG_ASSERT(!mInputFormatConverter);
+
+ // First try to use the requested pixel format directly.
+ ::base::Optional<struct v4l2_format> format;
+ auto fourcc = media::Fourcc::FromVideoPixelFormat(inputFormat, false);
+ if (fourcc) {
+ format = mInputQueue->SetFormat(fourcc->ToV4L2PixFmt(), mVisibleSize, 0);
+ }
+
+ // If the device doesn't support the requested input format we'll try the device's preferred
+ // input pixel formats and use a format convertor. We need to try all formats as some formats
+ // might not be supported for the configured output format.
+ if (!format) {
+ std::vector<uint32_t> preferredFormats =
+ mDevice->PreferredInputFormat(media::V4L2Device::Type::kEncoder);
+ for (uint32_t i = 0; !format && i < preferredFormats.size(); ++i) {
+ format = mInputQueue->SetFormat(preferredFormats[i], mVisibleSize, 0);
+ }
+ }
+
+ if (!format) {
+ ALOGE("Failed to set input format to %s",
+ media::VideoPixelFormatToString(inputFormat).c_str());
return false;
}
- mEncoder = V4L2Encoder::create(
- outputProfile, h264Level, mInterface->getInputVisibleSize(), *stride,
- mInterface->getKeyFramePeriod(),
- ::base::BindRepeating(&V4L2EncodeComponent::fetchOutputBlock, mWeakThis),
- ::base::BindRepeating(&V4L2EncodeComponent::onInputBufferDone, mWeakThis),
- ::base::BindRepeating(&V4L2EncodeComponent::onOutputBufferDone, mWeakThis),
- ::base::BindRepeating(&V4L2EncodeComponent::onDrainDone, mWeakThis),
- ::base::BindRepeating(&V4L2EncodeComponent::reportError, mWeakThis, C2_CORRUPTED),
- mEncoderTaskRunner);
- if (!mEncoder) {
- ALOGE("Failed to create V4L2Encoder (profile: %s)", profileToString(outputProfile));
+ // Check whether the negotiated input format is valid. The coded size might be adjusted to match
+ // encoder minimums, maximums and alignment requirements of the currently selected formats.
+ auto layout = media::V4L2Device::V4L2FormatToVideoFrameLayout(*format);
+ if (!layout) {
+ ALOGE("Invalid input layout");
return false;
}
+ mInputLayout = layout.value();
+ if (!media::Rect(mInputLayout->coded_size()).Contains(media::Rect(mVisibleSize))) {
+ ALOGE("Input size %s exceeds encoder capability, encoder can handle %s",
+ mVisibleSize.ToString().c_str(), mInputLayout->coded_size().ToString().c_str());
+ return false;
+ }
+
+ // Calculate the input coded size from the format.
+ // TODO(dstaessens): How is this different from mInputLayout->coded_size()?
+ mInputCodedSize = media::V4L2Device::AllocatedSizeFromV4L2Format(*format);
+
// Add an input format convertor if the device doesn't support the requested input format.
+ // Note: The amount of input buffers in the convertor should match the amount of buffers on the
+ // device input queue, to simplify logic.
+ // TODO(dstaessens): Currently an input format convertor is always required. Mapping an input
+ // buffer always seems to fail unless we copy it into a new a buffer first. As a temporary
+ // workaround the line below is commented, but this should be undone once the issue is fixed.
+ //if (mInputLayout->format() != inputFormat) {
ALOGV("Creating input format convertor (%s)",
- videoPixelFormatToString(mEncoder->inputFormat()).c_str());
+ media::VideoPixelFormatToString(mInputLayout->format()).c_str());
mInputFormatConverter =
- FormatConverter::Create(mEncoder->inputFormat(), mEncoder->visibleSize(),
- V4L2Encoder::kInputBufferCount, mEncoder->codedSize());
+ FormatConverter::Create(inputFormat, mVisibleSize, kInputBufferCount, mInputCodedSize);
if (!mInputFormatConverter) {
ALOGE("Failed to created input format convertor");
return false;
}
+ //}
+
+ // The coded input size might be different from the visible size due to alignment requirements,
+ // So we need to specify the visible rectangle. Note that this rectangle might still be adjusted
+ // due to hardware limitations.
+ // TODO(dstaessens): Overwrite mVisibleSize with the adapted visible size here?
+ media::Rect visibleRectangle(mVisibleSize.width(), mVisibleSize.height());
+
+ struct v4l2_rect rect;
+ rect.left = visibleRectangle.x();
+ rect.top = visibleRectangle.y();
+ rect.width = visibleRectangle.width();
+ rect.height = visibleRectangle.height();
+
+ // Try to adjust the visible rectangle using the VIDIOC_S_SELECTION command. If this is not
+ // supported we'll try to use the VIDIOC_S_CROP command instead. The visible rectangle might be
+ // adjusted to conform to hardware limitations (e.g. round to closest horizontal and vertical
+ // offsets, width and height).
+ struct v4l2_selection selection_arg;
+ memset(&selection_arg, 0, sizeof(selection_arg));
+ selection_arg.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ selection_arg.target = V4L2_SEL_TGT_CROP;
+ selection_arg.r = rect;
+ if (mDevice->Ioctl(VIDIOC_S_SELECTION, &selection_arg) == 0) {
+ visibleRectangle = media::Rect(selection_arg.r.left, selection_arg.r.top,
+ selection_arg.r.width, selection_arg.r.height);
+ } else {
+ struct v4l2_crop crop;
+ memset(&crop, 0, sizeof(v4l2_crop));
+ crop.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ crop.c = rect;
+ if (mDevice->Ioctl(VIDIOC_S_CROP, &crop) != 0 ||
+ mDevice->Ioctl(VIDIOC_G_CROP, &crop) != 0) {
+ ALOGE("Failed to crop to specified visible rectangle");
+ return false;
+ }
+ visibleRectangle = media::Rect(crop.c.left, crop.c.top, crop.c.width, crop.c.height);
+ }
+
+ ALOGV("Input format set to %s (size: %s, adjusted size: %dx%d, coded size: %s)",
+ media::VideoPixelFormatToString(mInputLayout->format()).c_str(),
+ mVisibleSize.ToString().c_str(), visibleRectangle.width(), visibleRectangle.height(),
+ mInputCodedSize.ToString().c_str());
+
+ mVisibleSize.SetSize(visibleRectangle.width(), visibleRectangle.height());
+ return true;
+}
+
+bool V4L2EncodeComponent::configureOutputFormat(media::VideoCodecProfile outputProfile) {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mEncoderState == EncoderState::UNINITIALIZED);
+ ALOG_ASSERT(!mOutputQueue->IsStreaming());
+ ALOG_ASSERT(!mVisibleSize.IsEmpty());
+
+ auto format = mOutputQueue->SetFormat(
+ media::V4L2Device::VideoCodecProfileToV4L2PixFmt(outputProfile, false), mVisibleSize,
+ GetMaxOutputBufferSize(mVisibleSize));
+ if (!format) {
+ ALOGE("Failed to set output format to %s", media::GetProfileName(outputProfile).c_str());
+ return false;
+ }
+
+ // The device might adjust the requested output buffer size to match hardware requirements.
+ mOutputBufferSize = ::base::checked_cast<size_t>(format->fmt.pix_mp.plane_fmt[0].sizeimage);
+
+ ALOGV("Output format set to %s (buffer size: %u)", media::GetProfileName(outputProfile).c_str(),
+ mOutputBufferSize);
+ return true;
+}
+
+bool V4L2EncodeComponent::configureDevice(media::VideoCodecProfile outputProfile,
+ std::optional<const uint8_t> outputH264Level) {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ // Enable frame-level bitrate control. This is the only mandatory general control.
+ if (!mDevice->SetExtCtrls(V4L2_CTRL_CLASS_MPEG,
+ {media::V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, 1)})) {
+ ALOGW("Failed enabling bitrate control");
+ // TODO(b/161508368): V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE is currently not supported yet,
+ // assume the operation was successful for now.
+ }
+
+ // Additional optional controls:
+ // - Enable macroblock-level bitrate control.
+ // - Set GOP length to 0 to disable periodic key frames.
+ mDevice->SetExtCtrls(V4L2_CTRL_CLASS_MPEG,
+ {media::V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE, 1),
+ media::V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0)});
+
+ // All controls below are H.264-specific, so we can return here if the profile is not H.264.
+ if (outputProfile < media::H264PROFILE_MIN || outputProfile > media::H264PROFILE_MAX) {
+ return true;
+ }
+
+ // When encoding H.264 we want to prepend SPS and PPS to each IDR for resilience. Some
+ // devices support this through the V4L2_CID_MPEG_VIDEO_H264_SPS_PPS_BEFORE_IDR control.
+ // TODO(b/161495502): V4L2_CID_MPEG_VIDEO_H264_SPS_PPS_BEFORE_IDR is currently not supported
+ // yet, just log a warning if the operation was unsuccessful for now.
+ if (mDevice->IsCtrlExposed(V4L2_CID_MPEG_VIDEO_H264_SPS_PPS_BEFORE_IDR)) {
+ if (!mDevice->SetExtCtrls(
+ V4L2_CTRL_CLASS_MPEG,
+ {media::V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_H264_SPS_PPS_BEFORE_IDR, 1)})) {
+ ALOGE("Failed to configure device to prepend SPS and PPS to each IDR");
+ return false;
+ }
+ ALOGV("Device supports prepending SPS and PPS to each IDR");
+ } else {
+ ALOGW("Device doesn't support prepending SPS and PPS to IDR");
+ }
+
+ std::vector<media::V4L2ExtCtrl> h264Ctrls;
+
+ // No B-frames, for lowest decoding latency.
+ h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_B_FRAMES, 0);
+ // Quantization parameter maximum value (for variable bitrate control).
+ h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 51);
+
+ // Set H.264 profile.
+ int32_t profile = media::V4L2Device::VideoCodecProfileToV4L2H264Profile(outputProfile);
+ if (profile < 0) {
+ ALOGE("Trying to set invalid H.264 profile");
+ return false;
+ }
+ h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_H264_PROFILE, profile);
+
+ // Set H.264 output level. Use Level 4.0 as fallback default.
+ // TODO(dstaessens): Investigate code added by hiroh@ recently to select level in Chrome VEA.
+ uint8_t h264Level = outputH264Level.value_or(media::H264SPS::kLevelIDC4p0);
+ h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ media::V4L2Device::H264LevelIdcToV4L2H264Level(h264Level));
+
+ // Ask not to put SPS and PPS into separate bitstream buffers.
+ h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_HEADER_MODE,
+ V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
+
+ // Ignore return value as these controls are optional.
+ mDevice->SetExtCtrls(V4L2_CTRL_CLASS_MPEG, std::move(h264Ctrls));
return true;
}
@@ -686,22 +864,30 @@ bool V4L2EncodeComponent::updateEncodingParameters() {
if (mBitrate != bitrate) {
ALOG_ASSERT(bitrate > 0u);
ALOGV("Setting bitrate to %u", bitrate);
- if (!mEncoder->setBitrate(bitrate)) {
- reportError(C2_CORRUPTED);
- return false;
+ if (!mDevice->SetExtCtrls(V4L2_CTRL_CLASS_MPEG,
+ {media::V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_BITRATE, bitrate)})) {
+ // TODO(b/161495749): V4L2_CID_MPEG_VIDEO_BITRATE is currently not supported yet, assume
+ // the operation was successful for now.
+ ALOGW("Requesting bitrate change failed");
}
mBitrate = bitrate;
}
// Ask device to change framerate if it's different from the currently configured framerate.
+ // TODO(dstaessens): Move IOCTL to device and use helper function.
uint32_t framerate = static_cast<uint32_t>(std::round(framerateInfo.value));
if (mFramerate != framerate) {
ALOG_ASSERT(framerate > 0u);
ALOGV("Setting framerate to %u", framerate);
- if (!mEncoder->setFramerate(framerate)) {
- ALOGE("Requesting framerate change failed");
- reportError(C2_CORRUPTED);
- return false;
+ struct v4l2_streamparm parms;
+ memset(&parms, 0, sizeof(v4l2_streamparm));
+ parms.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ parms.parm.output.timeperframe.numerator = 1;
+ parms.parm.output.timeperframe.denominator = framerate;
+ if (mDevice->Ioctl(VIDIOC_S_PARM, &parms) != 0) {
+ // TODO(b/161499573): VIDIOC_S_PARM is currently not supported yet, assume the operation
+ // was successful for now.
+ ALOGW("Requesting framerate change failed");
}
mFramerate = framerate;
}
@@ -716,7 +902,7 @@ bool V4L2EncodeComponent::updateEncodingParameters() {
return false;
}
if (requestKeyFrame.value == C2_TRUE) {
- mEncoder->requestKeyframe();
+ mKeyFrameCounter = 0;
requestKeyFrame.value = C2_FALSE;
std::vector<std::unique_ptr<C2SettingResult>> failures;
status = mInterface->config({&requestKeyFrame}, C2_MAY_BLOCK, &failures);
@@ -727,22 +913,132 @@ bool V4L2EncodeComponent::updateEncodingParameters() {
}
}
+ // Request the next frame to be a key frame each time the counter reaches 0.
+ if (mKeyFrameCounter == 0) {
+ if (!mDevice->SetExtCtrls(V4L2_CTRL_CLASS_MPEG,
+ {media::V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME)})) {
+ // TODO(b/161498590): V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME is currently not supported
+ // yet, assume the operation was successful for now.
+ ALOGW("Failed requesting key frame");
+ }
+ }
+
return true;
}
-bool V4L2EncodeComponent::encode(C2ConstGraphicBlock block, uint64_t index, int64_t timestamp) {
+void V4L2EncodeComponent::scheduleNextEncodeTask() {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mEncoder);
+ ALOG_ASSERT(mEncoderState == EncoderState::ENCODING || mEncoderState == EncoderState::ERROR);
- ALOGV("Encoding input block (index: %" PRIu64 ", timestamp: %" PRId64 ", size: %dx%d)", index,
- timestamp, block.width(), block.height());
+ // If we're in the error state we can immediately return.
+ if (mEncoderState == EncoderState::ERROR) {
+ return;
+ }
+
+ // Get the next work item. Currently only a single worklet per work item is supported. An input
+ // buffer should always be supplied unless this is a drain or CSD request.
+ ALOG_ASSERT(!mInputWorkQueue.empty());
+ C2Work* work = mInputWorkQueue.front().get();
+ ALOG_ASSERT(work->input.buffers.size() <= 1u && work->worklets.size() == 1u);
+
+ // Set the default values for the output worklet.
+ work->worklets.front()->output.flags = static_cast<C2FrameData::flags_t>(0);
+ work->worklets.front()->output.buffers.clear();
+ work->worklets.front()->output.ordinal = work->input.ordinal;
+
+ uint64_t index = work->input.ordinal.frameIndex.peeku();
+ int64_t timestamp = static_cast<int64_t>(work->input.ordinal.timestamp.peeku());
+ bool endOfStream = work->input.flags & C2FrameData::FLAG_END_OF_STREAM;
+ ALOGV("Scheduling next encode (index: %" PRIu64 ", timestamp: %" PRId64 ", EOS: %d)", index,
+ timestamp, endOfStream);
+
+ if (!work->input.buffers.empty()) {
+ // Check if the device has free input buffers available. If not we'll switch to the
+ // WAITING_FOR_INPUT_BUFFERS state, and resume encoding once we're notified buffers are
+ // available in the onInputBufferDone() task. Note: The input buffers are not copied into
+ // the device's input buffers, but rather a memory pointer is imported. We still have to
+ // throttle the number of enqueues queued simultaneously on the device however.
+ if (mInputQueue->FreeBuffersCount() == 0) {
+ ALOGV("Waiting for device to return input buffers");
+ setEncoderState(EncoderState::WAITING_FOR_INPUT_BUFFERS);
+ return;
+ }
+
+ C2ConstGraphicBlock inputBlock =
+ work->input.buffers.front()->data().graphicBlocks().front();
+
+ // If encoding fails, we'll wait for an event (e.g. input buffers available) to start
+ // encoding again.
+ if (!encode(inputBlock, index, timestamp)) {
+ return;
+ }
+ }
+
+ // The codec 2.0 framework might queue an empty CSD request, but this is currently not
+ // supported. We will return the CSD with the first encoded buffer work.
+ // TODO(dstaessens): Avoid doing this, store CSD request work at start of output queue.
+ if (work->input.buffers.empty() && !endOfStream) {
+ ALOGV("Discarding empty CSD request");
+ reportWork(std::move(mInputWorkQueue.front()));
+ } else {
+ mOutputWorkQueue.push_back(std::move(mInputWorkQueue.front()));
+ }
+ mInputWorkQueue.pop();
+
+ // Drain the encoder if required.
+ if (endOfStream) {
+ drainTask(C2Component::DRAIN_COMPONENT_WITH_EOS);
+ }
+
+ if (mEncoderState == EncoderState::DRAINING) {
+ return;
+ } else if (mInputWorkQueue.empty()) {
+ setEncoderState(EncoderState::WAITING_FOR_INPUT);
+ return;
+ }
+
+ // Queue the next work item to be encoded.
+ mEncoderTaskRunner->PostTask(
+ FROM_HERE, ::base::BindOnce(&V4L2EncodeComponent::scheduleNextEncodeTask, mWeakThis));
+}
+
+bool V4L2EncodeComponent::encode(C2ConstGraphicBlock block, uint64_t index, int64_t timestamp) {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mEncoderState == EncoderState::ENCODING);
// Update dynamic encoding parameters (bitrate, framerate, key frame) if requested.
if (!updateEncodingParameters()) return false;
- // Create an input frame from the graphic block.
- std::unique_ptr<V4L2Encoder::InputFrame> frame = CreateInputFrame(block, index, timestamp);
+ mKeyFrameCounter = (mKeyFrameCounter + 1) % mKeyFramePeriod;
+
+ // If required convert the data to the V4L2 device's configured input pixel format. We
+ // allocate the same amount of buffers on the device input queue and the format convertor,
+ // so we should never run out of conversion buffers if there are free buffers in the input
+ // queue.
+ if (mInputFormatConverter) {
+ if (!mInputFormatConverter->isReady()) {
+ ALOGE("Input format convertor ran out of buffers");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+
+ ALOGV("Converting input block (index: %" PRIu64 ")", index);
+ c2_status_t status = C2_CORRUPTED;
+ block = mInputFormatConverter->convertBlock(index, block, &status);
+ if (status != C2_OK) {
+ ALOGE("Failed to convert input block (index: %" PRIu64 ")", index);
+ reportError(status);
+ return false;
+ }
+ }
+
+ ALOGV("Encoding input block (index: %" PRIu64 ", timestamp: %" PRId64 ", size: %dx%d)", index,
+ timestamp, block.width(), block.height());
+
+ // Create a video frame from the graphic block.
+ std::unique_ptr<InputFrame> frame = InputFrame::Create(block);
if (!frame) {
ALOGE("Failed to create video frame from input block (index: %" PRIu64
", timestamp: %" PRId64 ")",
@@ -751,71 +1047,141 @@ bool V4L2EncodeComponent::encode(C2ConstGraphicBlock block, uint64_t index, int6
return false;
}
- if (!mEncoder->encode(std::move(frame))) {
+ // Get the video frame layout and pixel format from the graphic block.
+ // TODO(dstaessens) Integrate getVideoFrameLayout() into InputFrame::Create()
+ media::VideoPixelFormat format;
+ std::optional<std::vector<VideoFramePlane>> planes = getVideoFrameLayout(block, &format);
+ if (!planes) {
+ ALOGE("Failed to get input block's layout");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+
+ if (!enqueueInputBuffer(std::move(frame), format, *planes, index, timestamp)) {
+ ALOGE("Failed to enqueue video frame (index: %" PRIu64 ", timestamp: %" PRId64 ")", index,
+ timestamp);
+ reportError(C2_CORRUPTED);
return false;
}
+ // Start streaming on the input and output queue if required.
+ if (!mInputQueue->IsStreaming()) {
+ ALOG_ASSERT(!mOutputQueue->IsStreaming());
+ if (!mOutputQueue->Streamon() || !mInputQueue->Streamon()) {
+ ALOGE("Failed to start streaming on input and output queue");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+ // Start polling on the V4L2 device.
+ startDevicePoll();
+ }
+
+ // Queue all buffers on the output queue. These buffers will be used to store the encoded
+ // bitstreams.
+ while (mOutputQueue->FreeBuffersCount() > 0) {
+ if (!enqueueOutputBuffer()) return false;
+ }
+
return true;
}
+void V4L2EncodeComponent::drain() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ if (mEncoderState == EncoderState::DRAINING || mEncoderState == EncoderState::ERROR) {
+ return;
+ }
+
+ ALOG_ASSERT(mInputQueue->IsStreaming() && mOutputQueue->IsStreaming());
+ ALOG_ASSERT(!mOutputWorkQueue.empty());
+
+ // TODO(dstaessens): Move IOCTL to device class.
+ struct v4l2_encoder_cmd cmd;
+ memset(&cmd, 0, sizeof(v4l2_encoder_cmd));
+ cmd.cmd = V4L2_ENC_CMD_STOP;
+ if (mDevice->Ioctl(VIDIOC_ENCODER_CMD, &cmd) != 0) {
+ ALOGE("Failed to stop encoder");
+ onDrainDone(false);
+ return;
+ }
+ ALOGV("%s(): Sent STOP command to encoder", __func__);
+
+ setEncoderState(EncoderState::DRAINING);
+}
+
void V4L2EncodeComponent::flush() {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
- mEncoder->flush();
+ // Stop the device poll thread.
+ stopDevicePoll();
+
+ // Stop streaming on the V4L2 device, which stops all currently queued work and releases all
+ // buffers currently in use by the device.
+ // TODO(b/160540027): Calling streamoff currently results in a bug.
+ for (auto& queue : {mInputQueue, mOutputQueue}) {
+ if (queue && queue->IsStreaming() && !queue->Streamoff()) {
+ ALOGE("Failed to stop streaming on the device queue");
+ reportError(C2_CORRUPTED);
+ }
+ }
+
+ // Return all buffers to the input format convertor and clear all references to graphic blocks
+ // in the input queue. We don't need to clear the output map as those buffers will still be
+ // used.
+ for (auto& it : mInputBuffersMap) {
+ if (mInputFormatConverter && it.second) {
+ mInputFormatConverter->returnBlock(it.first);
+ }
+ it.second = nullptr;
+ }
// Report all queued work items as aborted.
std::list<std::unique_ptr<C2Work>> abortedWorkItems;
- while (!mInputConverterQueue.empty()) {
- std::unique_ptr<C2Work> work = std::move(mInputConverterQueue.front());
+ while (!mInputWorkQueue.empty()) {
+ std::unique_ptr<C2Work> work = std::move(mInputWorkQueue.front());
work->result = C2_NOT_FOUND;
work->input.buffers.clear();
abortedWorkItems.push_back(std::move(work));
- mInputConverterQueue.pop();
+ mInputWorkQueue.pop();
}
- while (!mWorkQueue.empty()) {
- std::unique_ptr<C2Work> work = std::move(mWorkQueue.front());
- // Return buffer to the input format convertor if required.
- if (mInputFormatConverter && work->input.buffers.empty()) {
- mInputFormatConverter->returnBlock(work->input.ordinal.frameIndex.peeku());
- }
+ while (!mOutputWorkQueue.empty()) {
+ std::unique_ptr<C2Work> work = std::move(mOutputWorkQueue.front());
work->result = C2_NOT_FOUND;
work->input.buffers.clear();
abortedWorkItems.push_back(std::move(work));
- mWorkQueue.pop_front();
+ mOutputWorkQueue.pop_front();
}
- if (!abortedWorkItems.empty()) {
+ if (!abortedWorkItems.empty())
mListener->onWorkDone_nb(shared_from_this(), std::move(abortedWorkItems));
- }
+
+ // Streaming and polling on the V4L2 device input and output queues will be resumed once new
+ // encode work is queued.
}
-void V4L2EncodeComponent::fetchOutputBlock(uint32_t size,
- std::unique_ptr<BitstreamBuffer>* buffer) {
- ALOGV("Fetching linear block (size: %u)", size);
- std::shared_ptr<C2LinearBlock> block;
+std::shared_ptr<C2LinearBlock> V4L2EncodeComponent::fetchOutputBlock() {
+ // TODO(dstaessens): fetchLinearBlock() might be blocking.
+ ALOGV("Fetching linear block (size: %u)", mOutputBufferSize);
+ std::shared_ptr<C2LinearBlock> outputBlock;
c2_status_t status = mOutputBlockPool->fetchLinearBlock(
- size,
+ mOutputBufferSize,
C2MemoryUsage(C2MemoryUsage::CPU_READ |
static_cast<uint64_t>(BufferUsage::VIDEO_ENCODER)),
- &block);
+ &outputBlock);
if (status != C2_OK) {
ALOGE("Failed to fetch linear block (error: %d)", status);
reportError(status);
+ return nullptr;
}
- // Store a reference to the block to keep the fds alive.
- int fd = block->handle()->data[0];
- ALOG_ASSERT(!mOutputBuffersMap[fd]);
- mOutputBuffersMap[fd] = std::move(block);
-
- // TODO(dstaessens) Store the C2LinearBlock directly into the BitstreamBuffer.
- *buffer = std::make_unique<BitstreamBuffer>(fd, fd, 0, size);
+ return outputBlock;
}
void V4L2EncodeComponent::onInputBufferDone(uint64_t index) {
ALOGV("%s(): Input buffer done (index: %" PRIu64 ")", __func__, index);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mEncoder);
+ ALOG_ASSERT(mEncoderState != EncoderState::UNINITIALIZED);
// There are no guarantees the input buffers are returned in order, so we need to find the work
// item which this buffer belongs to.
@@ -826,45 +1192,53 @@ void V4L2EncodeComponent::onInputBufferDone(uint64_t index) {
return;
}
- // We're done using the input block, release reference to return the block to the client.
+ // We're done using the input block, release reference to return the block to the client. If
+ // using an input format convertor, we also need to return the block to the convertor.
LOG_ASSERT(!work->input.buffers.empty());
work->input.buffers.front().reset();
-
- // Return the block to the convertor if required. If we have buffers awaiting conversion, we can
- // now attempt to convert and encode them again.
if (mInputFormatConverter) {
c2_status_t status = mInputFormatConverter->returnBlock(index);
if (status != C2_OK) {
reportError(status);
return;
}
- while (!mInputConverterQueue.empty() && mInputFormatConverter->isReady()) {
- std::unique_ptr<C2Work> work = std::move(mInputConverterQueue.front());
- mInputConverterQueue.pop();
- queueTask(std::move(work));
- }
}
// Return all completed work items. The work item might have been waiting for it's input buffer
// to be returned, in which case we can report it as completed now. As input buffers are not
// necessarily returned in order we might be able to return multiple ready work items now.
- while (!mWorkQueue.empty() && isWorkDone(*mWorkQueue.front())) {
- reportWork(std::move(mWorkQueue.front()));
- mWorkQueue.pop_front();
+ while (!mOutputWorkQueue.empty() && isWorkDone(*mOutputWorkQueue.front())) {
+ reportWork(std::move(mOutputWorkQueue.front()));
+ mOutputWorkQueue.pop_front();
+ }
+
+ // We might have been waiting for input buffers to be returned after draining finished.
+ if (mEncoderState == EncoderState::DRAINING && mOutputWorkQueue.empty()) {
+ ALOGV("Draining done");
+ mEncoderState = EncoderState::WAITING_FOR_INPUT_BUFFERS;
+ }
+
+ // If we previously used up all input queue buffers we can start encoding again now.
+ if ((mEncoderState == EncoderState::WAITING_FOR_INPUT_BUFFERS) && !mInputWorkQueue.empty()) {
+ setEncoderState(EncoderState::ENCODING);
+ mEncoderTaskRunner->PostTask(
+ FROM_HERE,
+ ::base::BindOnce(&V4L2EncodeComponent::scheduleNextEncodeTask, mWeakThis));
}
}
-void V4L2EncodeComponent::onOutputBufferDone(size_t dataSize, int64_t timestamp, bool keyFrame,
- std::unique_ptr<BitstreamBuffer> buffer) {
- ALOGV("%s(): output buffer done (timestamp: %" PRId64 ", size: %zu, keyframe: %d)", __func__,
- timestamp, dataSize, keyFrame);
+void V4L2EncodeComponent::onOutputBufferDone(uint32_t payloadSize, bool keyFrame, int64_t timestamp,
+ std::shared_ptr<C2LinearBlock> outputBlock) {
+ ALOGV("%s(): output buffer done (timestamp: %" PRId64 ", size: %u, key frame: %d)", __func__,
+ timestamp, payloadSize, keyFrame);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
- std::shared_ptr<C2LinearBlock> outputBlock = std::move(mOutputBuffersMap[buffer->id]);
- mOutputBuffersMap.erase(buffer->id);
- ALOG_ASSERT(outputBlock);
+ if (mEncoderState == EncoderState::ERROR) {
+ return;
+ }
- C2ConstLinearBlock constBlock = outputBlock->share(outputBlock->offset(), dataSize, C2Fence());
+ C2ConstLinearBlock constBlock =
+ outputBlock->share(outputBlock->offset(), payloadSize, C2Fence());
// If no CSD (content-specific-data, e.g. SPS for H.264) has been submitted yet, we expect this
// output block to contain CSD. We only submit the CSD once, even if it's attached to each key
@@ -881,8 +1255,8 @@ void V4L2EncodeComponent::onOutputBufferDone(size_t dataSize, int64_t timestamp,
}
// Attach the CSD to the first item in our output work queue.
- LOG_ASSERT(!mWorkQueue.empty());
- C2Work* work = mWorkQueue.front().get();
+ LOG_ASSERT(!mOutputWorkQueue.empty());
+ C2Work* work = mOutputWorkQueue.front().get();
work->worklets.front()->output.configUpdate.push_back(std::move(csd));
mCSDSubmitted = true;
}
@@ -892,31 +1266,26 @@ void V4L2EncodeComponent::onOutputBufferDone(size_t dataSize, int64_t timestamp,
if (!work) {
// It's possible we got an empty CSD request with timestamp 0, which we currently just
// discard.
+ // TODO(dstaessens): Investigate handling empty CSD requests.
if (timestamp != 0) {
reportError(C2_CORRUPTED);
}
return;
}
- std::shared_ptr<C2Buffer> linearBuffer = C2Buffer::CreateLinearBuffer(std::move(constBlock));
- if (!linearBuffer) {
- ALOGE("Failed to create linear buffer from block");
- reportError(C2_CORRUPTED);
- return;
- }
-
+ std::shared_ptr<C2Buffer> buffer = C2Buffer::CreateLinearBuffer(std::move(constBlock));
if (keyFrame) {
- linearBuffer->setInfo(
+ buffer->setInfo(
std::make_shared<C2StreamPictureTypeMaskInfo::output>(0u, C2Config::SYNC_FRAME));
}
- work->worklets.front()->output.buffers.emplace_back(std::move(linearBuffer));
+ work->worklets.front()->output.buffers.emplace_back(buffer);
// We can report the work item as completed if its associated input buffer has also been
// released. As output buffers are not necessarily returned in order we might be able to return
// multiple ready work items now.
- while (!mWorkQueue.empty() && isWorkDone(*mWorkQueue.front())) {
- reportWork(std::move(mWorkQueue.front()));
- mWorkQueue.pop_front();
+ while (!mOutputWorkQueue.empty() && isWorkDone(*mOutputWorkQueue.front())) {
+ reportWork(std::move(mOutputWorkQueue.front()));
+ mOutputWorkQueue.pop_front();
}
}
@@ -924,11 +1293,11 @@ C2Work* V4L2EncodeComponent::getWorkByIndex(uint64_t index) {
ALOGV("%s(): getting work item (index: %" PRIu64 ")", __func__, index);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
- auto it = std::find_if(mWorkQueue.begin(), mWorkQueue.end(),
+ auto it = std::find_if(mOutputWorkQueue.begin(), mOutputWorkQueue.end(),
[index](const std::unique_ptr<C2Work>& w) {
return w->input.ordinal.frameIndex.peeku() == index;
});
- if (it == mWorkQueue.end()) {
+ if (it == mOutputWorkQueue.end()) {
ALOGE("Failed to find work (index: %" PRIu64 ")", index);
return nullptr;
}
@@ -943,12 +1312,13 @@ C2Work* V4L2EncodeComponent::getWorkByTimestamp(int64_t timestamp) {
// Find the work with specified timestamp by looping over the output work queue. This should be
// very fast as the output work queue will never be longer then a few items. Ignore empty work
// items that are marked as EOS, as their timestamp might clash with other work items.
- auto it = std::find_if(
- mWorkQueue.begin(), mWorkQueue.end(), [timestamp](const std::unique_ptr<C2Work>& w) {
- return !(w->input.flags & C2FrameData::FLAG_END_OF_STREAM) &&
- w->input.ordinal.timestamp.peeku() == static_cast<uint64_t>(timestamp);
- });
- if (it == mWorkQueue.end()) {
+ auto it = std::find_if(mOutputWorkQueue.begin(), mOutputWorkQueue.end(),
+ [timestamp](const std::unique_ptr<C2Work>& w) {
+ return !(w->input.flags & C2FrameData::FLAG_END_OF_STREAM) &&
+ w->input.ordinal.timestamp.peeku() ==
+ static_cast<uint64_t>(timestamp);
+ });
+ if (it == mOutputWorkQueue.end()) {
ALOGE("Failed to find work (timestamp: %" PRIu64 ")", timestamp);
return nullptr;
}
@@ -996,27 +1366,321 @@ void V4L2EncodeComponent::reportWork(std::unique_ptr<C2Work> work) {
mListener->onWorkDone_nb(shared_from_this(), std::move(finishedWorkList));
}
-bool V4L2EncodeComponent::getBlockPool() {
- C2BlockPool::local_id_t poolId = mInterface->getBlockPoolId();
- if (poolId == C2BlockPool::BASIC_LINEAR) {
- ALOGW("Using unoptimized linear block pool");
+bool V4L2EncodeComponent::startDevicePoll() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ if (!mDevice->StartPolling(
+ ::base::BindRepeating(&V4L2EncodeComponent::serviceDeviceTask, mWeakThis),
+ ::base::BindRepeating(&V4L2EncodeComponent::onPollError, mWeakThis))) {
+ ALOGE("Device poll thread failed to start");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+
+ ALOGV("Device poll started");
+ return true;
+}
+
+bool V4L2EncodeComponent::stopDevicePoll() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ if (!mDevice->StopPolling()) {
+ ALOGE("Failed to stop polling on the device");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+
+ ALOGV("Device poll stopped");
+ return true;
+}
+
+void V4L2EncodeComponent::onPollError() {
+ ALOGV("%s()", __func__);
+ reportError(C2_CORRUPTED);
+}
+
+void V4L2EncodeComponent::serviceDeviceTask(bool /*event*/) {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mEncoderState != EncoderState::UNINITIALIZED);
+
+ if (mEncoderState == EncoderState::ERROR) {
+ return;
+ }
+
+ // Dequeue completed input (VIDEO_OUTPUT) buffers, and recycle to the free list.
+ while (mInputQueue->QueuedBuffersCount() > 0) {
+ if (!dequeueInputBuffer()) break;
+ }
+
+ // Dequeue completed output (VIDEO_CAPTURE) buffers, and recycle to the free list.
+ while (mOutputQueue->QueuedBuffersCount() > 0) {
+ if (!dequeueOutputBuffer()) break;
+ }
+
+ ALOGV("%s() - done", __func__);
+}
+
+bool V4L2EncodeComponent::enqueueInputBuffer(std::unique_ptr<InputFrame> frame,
+ media::VideoPixelFormat format,
+ const std::vector<VideoFramePlane>& planes,
+ int64_t index, int64_t timestamp) {
+ ALOGV("%s(): queuing input buffer (index: %" PRId64 ")", __func__, index);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mInputQueue->FreeBuffersCount() > 0);
+ ALOG_ASSERT(mEncoderState == EncoderState::ENCODING);
+ ALOG_ASSERT(mInputLayout->format() == format);
+ ALOG_ASSERT(mInputLayout->planes().size() == planes.size());
+
+ auto buffer = mInputQueue->GetFreeBuffer();
+ if (!buffer) {
+ ALOGE("Failed to get free buffer from device input queue");
+ return false;
+ }
+
+ // Mark the buffer with the frame's timestamp so we can identify the associated output buffers.
+ buffer->SetTimeStamp(
+ {.tv_sec = static_cast<time_t>(timestamp / ::base::Time::kMicrosecondsPerSecond),
+ .tv_usec = static_cast<time_t>(timestamp % ::base::Time::kMicrosecondsPerSecond)});
+ size_t bufferId = buffer->BufferId();
+
+ for (size_t i = 0; i < planes.size(); ++i) {
+ // Single-buffer input format may have multiple color planes, so bytesUsed of the single
+ // buffer should be sum of each color planes' size.
+ size_t bytesUsed = 0;
+ if (planes.size() == 1) {
+ bytesUsed = media::VideoFrame::AllocationSize(format, mInputLayout->coded_size());
+ } else {
+ bytesUsed = ::base::checked_cast<size_t>(
+ media::VideoFrame::PlaneSize(format, i, mInputLayout->coded_size()).GetArea());
+ }
+
+ // TODO(crbug.com/901264): The way to pass an offset within a DMA-buf is not defined
+ // in V4L2 specification, so we abuse data_offset for now. Fix it when we have the
+ // right interface, including any necessary validation and potential alignment.
+ buffer->SetPlaneDataOffset(i, planes[i].mOffset);
+ bytesUsed += planes[i].mOffset;
+ // Workaround: filling length should not be needed. This is a bug of videobuf2 library.
+ buffer->SetPlaneSize(i, mInputLayout->planes()[i].size + planes[i].mOffset);
+ buffer->SetPlaneBytesUsed(i, bytesUsed);
+ }
+
+ std::move(*buffer).QueueDMABuf(frame->getFDs());
+
+ ALOGV("Queued buffer in input queue (index: %" PRId64 ", timestamp: %" PRId64
+ ", bufferId: %zu)",
+ index, timestamp, bufferId);
+
+ mInputBuffersMap[bufferId] = {index, std::move(frame)};
+
+ return true;
+}
+
+bool V4L2EncodeComponent::enqueueOutputBuffer() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mOutputQueue->FreeBuffersCount() > 0);
+
+ auto buffer = mOutputQueue->GetFreeBuffer();
+ if (!buffer) {
+ ALOGE("Failed to get free buffer from device output queue");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+
+ std::shared_ptr<C2LinearBlock> outputBlock = fetchOutputBlock();
+ if (!outputBlock) {
+ ALOGE("Failed to fetch output block");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+
+ size_t bufferId = buffer->BufferId();
+
+ std::vector<int> fds;
+ fds.push_back(outputBlock->handle()->data[0]);
+ if (!std::move(*buffer).QueueDMABuf(fds)) {
+ ALOGE("Failed to queue output buffer using QueueDMABuf");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+
+ ALOG_ASSERT(!mOutputBuffersMap[bufferId]);
+ mOutputBuffersMap[bufferId] = std::move(outputBlock);
+ ALOGV("%s(): Queued buffer in output queue (bufferId: %zu)", __func__, bufferId);
+ return true;
+}
+
+bool V4L2EncodeComponent::dequeueInputBuffer() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mEncoderState != EncoderState::UNINITIALIZED);
+ ALOG_ASSERT(mInputQueue->QueuedBuffersCount() > 0);
+
+ std::pair<bool, media::V4L2ReadableBufferRef> result = mInputQueue->DequeueBuffer();
+ if (!result.first) {
+ ALOGE("Failed to dequeue buffer from input queue");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+ if (!result.second) {
+ // No more buffers ready to be dequeued in input queue.
+ return false;
+ }
+
+ const media::V4L2ReadableBufferRef buffer = std::move(result.second);
+ uint64_t index = mInputBuffersMap[buffer->BufferId()].first;
+ int64_t timestamp = buffer->GetTimeStamp().tv_usec +
+ buffer->GetTimeStamp().tv_sec * ::base::Time::kMicrosecondsPerSecond;
+ ALOGV("Dequeued buffer from input queue (index: %" PRId64 ", timestamp: %" PRId64
+ ", bufferId: %zu)",
+ index, timestamp, buffer->BufferId());
+
+ mInputBuffersMap[buffer->BufferId()].second = nullptr;
+ onInputBufferDone(index);
+
+ return true;
+}
+
+bool V4L2EncodeComponent::dequeueOutputBuffer() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mEncoderState != EncoderState::UNINITIALIZED);
+ ALOG_ASSERT(mOutputQueue->QueuedBuffersCount() > 0);
+
+ std::pair<bool, media::V4L2ReadableBufferRef> result = mOutputQueue->DequeueBuffer();
+ if (!result.first) {
+ ALOGE("Failed to dequeue buffer from output queue");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+ if (!result.second) {
+ // No more buffers ready to be dequeued in output queue.
+ return false;
+ }
+
+ media::V4L2ReadableBufferRef buffer = std::move(result.second);
+ size_t encodedDataSize = buffer->GetPlaneBytesUsed(0) - buffer->GetPlaneDataOffset(0);
+ ::base::TimeDelta timestamp = ::base::TimeDelta::FromMicroseconds(
+ buffer->GetTimeStamp().tv_usec +
+ buffer->GetTimeStamp().tv_sec * ::base::Time::kMicrosecondsPerSecond);
+
+ ALOGV("Dequeued buffer from output queue (timestamp: %" PRId64
+ ", bufferId: %zu, data size: %zu, EOS: %d)",
+ timestamp.InMicroseconds(), buffer->BufferId(), encodedDataSize, buffer->IsLast());
+
+ if (!mOutputBuffersMap[buffer->BufferId()]) {
+ ALOGE("Failed to find output block associated with output buffer");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+
+ std::shared_ptr<C2LinearBlock> block = std::move(mOutputBuffersMap[buffer->BufferId()]);
+ if (encodedDataSize > 0) {
+ onOutputBufferDone(encodedDataSize, buffer->IsKeyframe(), timestamp.InMicroseconds(),
+ std::move(block));
+ }
+
+ // If the buffer is marked as last and we were flushing the encoder, flushing is now done.
+ if ((mEncoderState == EncoderState::DRAINING) && buffer->IsLast()) {
+ onDrainDone(true);
+
+ // Start the encoder again.
+ struct v4l2_encoder_cmd cmd;
+ memset(&cmd, 0, sizeof(v4l2_encoder_cmd));
+ cmd.cmd = V4L2_ENC_CMD_START;
+ if (mDevice->Ioctl(VIDIOC_ENCODER_CMD, &cmd) != 0) {
+ ALOGE("Failed to restart encoder after flushing (V4L2_ENC_CMD_START)");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+ }
+
+ // Queue a new output buffer to replace the one we dequeued.
+ buffer = nullptr;
+ enqueueOutputBuffer();
+
+ return true;
+}
+
+bool V4L2EncodeComponent::createInputBuffers() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(!mInputQueue->IsStreaming());
+ ALOG_ASSERT(mInputBuffersMap.empty());
+
+ // No memory is allocated here, we just generate a list of buffers on the input queue, which
+ // will hold memory handles to the real buffers.
+ if (mInputQueue->AllocateBuffers(kInputBufferCount, V4L2_MEMORY_DMABUF) < kInputBufferCount) {
+ ALOGE("Failed to create V4L2 input buffers.");
+ return false;
}
+
+ mInputBuffersMap.resize(mInputQueue->AllocatedBuffersCount());
+ return true;
+}
+
+bool V4L2EncodeComponent::createOutputBuffers() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(!mOutputQueue->IsStreaming());
+ ALOG_ASSERT(mOutputBuffersMap.empty());
+
+ // Fetch the output block pool.
+ C2BlockPool::local_id_t poolId = mInterface->getBlockPoolId();
c2_status_t status = GetCodec2BlockPool(poolId, shared_from_this(), &mOutputBlockPool);
if (status != C2_OK || !mOutputBlockPool) {
ALOGE("Failed to get output block pool, error: %d", status);
return false;
}
+
+ // No memory is allocated here, we just generate a list of buffers on the output queue, which
+ // will hold memory handles to the real buffers.
+ if (mOutputQueue->AllocateBuffers(kOutputBufferCount, V4L2_MEMORY_DMABUF) <
+ kOutputBufferCount) {
+ ALOGE("Failed to create V4L2 output buffers.");
+ return false;
+ }
+
+ mOutputBuffersMap.resize(mOutputQueue->AllocatedBuffersCount());
return true;
}
+void V4L2EncodeComponent::destroyInputBuffers() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(!mInputQueue->IsStreaming());
+
+ if (!mInputQueue || mInputQueue->AllocatedBuffersCount() == 0) return;
+ mInputQueue->DeallocateBuffers();
+ mInputBuffersMap.clear();
+}
+
+void V4L2EncodeComponent::destroyOutputBuffers() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(!mOutputQueue->IsStreaming());
+
+ if (!mOutputQueue || mOutputQueue->AllocatedBuffersCount() == 0) return;
+ mOutputQueue->DeallocateBuffers();
+ mOutputBuffersMap.clear();
+ mOutputBlockPool.reset();
+}
+
void V4L2EncodeComponent::reportError(c2_status_t error) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
- // TODO(dstaessens): Report all pending work items as finished upon failure.
- std::lock_guard<std::mutex> lock(mComponentLock);
- if (mComponentState != ComponentState::ERROR) {
+ {
+ std::lock_guard<std::mutex> lock(mComponentLock);
setComponentState(ComponentState::ERROR);
+ }
+
+ // TODO(dstaessens): Report all pending work items as finished upon failure.
+ if (mEncoderState != EncoderState::ERROR) {
+ setEncoderState(EncoderState::ERROR);
mListener->onError_nb(shared_from_this(), static_cast<uint32_t>(error));
}
}
@@ -1044,6 +1708,39 @@ void V4L2EncodeComponent::setComponentState(ComponentState state) {
mComponentState = state;
}
+void V4L2EncodeComponent::setEncoderState(EncoderState state) {
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ // Check whether the state change is valid.
+ switch (state) {
+ case EncoderState::UNINITIALIZED:
+ // TODO(dstaessens): Check all valid state changes.
+ break;
+ case EncoderState::WAITING_FOR_INPUT:
+ ALOG_ASSERT(mEncoderState == EncoderState::UNINITIALIZED ||
+ mEncoderState == EncoderState::ENCODING ||
+ mEncoderState == EncoderState::DRAINING);
+ break;
+ case EncoderState::WAITING_FOR_INPUT_BUFFERS:
+ ALOG_ASSERT(mEncoderState == EncoderState::ENCODING);
+ break;
+ case EncoderState::ENCODING:
+ ALOG_ASSERT(mEncoderState == EncoderState::WAITING_FOR_INPUT ||
+ mEncoderState == EncoderState::WAITING_FOR_INPUT_BUFFERS ||
+ mEncoderState == EncoderState::DRAINING);
+ break;
+ case EncoderState::DRAINING:
+ ALOG_ASSERT(mEncoderState == EncoderState::ENCODING);
+ break;
+ case EncoderState::ERROR:
+ break;
+ }
+
+ ALOGV("Changed encoder state from %s to %s", encoderStateToString(mEncoderState),
+ encoderStateToString(state));
+ mEncoderState = state;
+}
+
const char* V4L2EncodeComponent::componentStateToString(V4L2EncodeComponent::ComponentState state) {
switch (state) {
case ComponentState::UNLOADED:
@@ -1057,4 +1754,21 @@ const char* V4L2EncodeComponent::componentStateToString(V4L2EncodeComponent::Com
}
}
+const char* V4L2EncodeComponent::encoderStateToString(V4L2EncodeComponent::EncoderState state) {
+ switch (state) {
+ case EncoderState::UNINITIALIZED:
+ return "UNINITIALIZED";
+ case EncoderState::WAITING_FOR_INPUT:
+ return "WAITING_FOR_INPUT";
+ case EncoderState::WAITING_FOR_INPUT_BUFFERS:
+ return "WAITING_FOR_INPUT_BUFFERS";
+ case EncoderState::ENCODING:
+ return "ENCODING";
+ case EncoderState::DRAINING:
+ return "Draining";
+ case EncoderState::ERROR:
+ return "ERROR";
+ }
+}
+
} // namespace android
diff --git a/components/V4L2EncodeInterface.cpp b/components/V4L2EncodeInterface.cpp
index 7f0fb39..9e6b556 100644
--- a/components/V4L2EncodeInterface.cpp
+++ b/components/V4L2EncodeInterface.cpp
@@ -8,7 +8,6 @@
#include <v4l2_codec2/components/V4L2EncodeInterface.h>
#include <inttypes.h>
-#include <algorithm>
#include <C2PlatformSupport.h>
#include <SimpleC2Interface.h>
@@ -16,9 +15,9 @@
#include <media/stagefright/MediaDefs.h>
#include <utils/Log.h>
+#include <v4l2_device.h>
#include <v4l2_codec2/common/V4L2ComponentCommon.h>
-#include <v4l2_codec2/common/V4L2Device.h>
-#include <v4l2_codec2/common/VideoTypes.h>
+#include <video_codecs.h>
using android::hardware::graphics::common::V1_0::BufferUsage;
@@ -42,50 +41,76 @@ constexpr uint32_t kDefaultBitrate = 64000;
// TODO: increase this in the future for supporting higher level/resolution encoding.
constexpr uint32_t kMaxBitrate = 50000000;
-std::optional<VideoCodec> getCodecFromComponentName(const std::string& name) {
- if (name == V4L2ComponentName::kH264Encoder) return VideoCodec::H264;
- if (name == V4L2ComponentName::kVP8Encoder) return VideoCodec::VP8;
- if (name == V4L2ComponentName::kVP9Encoder) return VideoCodec::VP9;
+// The frame size of 1080p video.
+constexpr uint32_t kFrameSize1080P = 1920 * 1080;
+
+C2Config::profile_t videoCodecProfileToC2Profile(media::VideoCodecProfile profile) {
+ switch (profile) {
+ case media::VideoCodecProfile::H264PROFILE_BASELINE:
+ return C2Config::PROFILE_AVC_BASELINE;
+ case media::VideoCodecProfile::H264PROFILE_MAIN:
+ return C2Config::PROFILE_AVC_MAIN;
+ case media::VideoCodecProfile::H264PROFILE_EXTENDED:
+ return C2Config::PROFILE_AVC_EXTENDED;
+ case media::VideoCodecProfile::H264PROFILE_HIGH:
+ return C2Config::PROFILE_AVC_HIGH;
+ case media::VideoCodecProfile::H264PROFILE_HIGH10PROFILE:
+ return C2Config::PROFILE_AVC_HIGH_10;
+ case media::VideoCodecProfile::H264PROFILE_HIGH422PROFILE:
+ return C2Config::PROFILE_AVC_HIGH_422;
+ case media::VideoCodecProfile::H264PROFILE_HIGH444PREDICTIVEPROFILE:
+ return C2Config::PROFILE_AVC_HIGH_444_PREDICTIVE;
+ case media::VideoCodecProfile::H264PROFILE_SCALABLEBASELINE:
+ return C2Config::PROFILE_AVC_SCALABLE_BASELINE;
+ case media::VideoCodecProfile::H264PROFILE_SCALABLEHIGH:
+ return C2Config::PROFILE_AVC_SCALABLE_HIGH;
+ case media::VideoCodecProfile::H264PROFILE_STEREOHIGH:
+ return C2Config::PROFILE_AVC_STEREO_HIGH;
+ case media::VideoCodecProfile::H264PROFILE_MULTIVIEWHIGH:
+ return C2Config::PROFILE_AVC_MULTIVIEW_HIGH;
+ default:
+ ALOGE("Unrecognizable profile (value = %d)...", profile);
+ return C2Config::PROFILE_UNUSED;
+ }
+}
+
+std::optional<media::VideoCodec> getCodecFromComponentName(const std::string& name) {
+ if (name == V4L2ComponentName::kH264Encoder)
+ return media::VideoCodec::kCodecH264;
ALOGE("Unknown name: %s", name.c_str());
return std::nullopt;
}
-// Check whether the specified profile is a valid profile for the specified codec.
-bool IsValidProfileForCodec(VideoCodec codec, C2Config::profile_t profile) {
- switch (codec) {
- case VideoCodec::H264:
- return ((profile >= C2Config::PROFILE_AVC_BASELINE) &&
- (profile <= C2Config::PROFILE_AVC_ENHANCED_MULTIVIEW_DEPTH_HIGH));
- case VideoCodec::VP8:
- return ((profile >= C2Config::PROFILE_VP8_0) && (profile <= C2Config::PROFILE_VP8_3));
- case VideoCodec::VP9:
- return ((profile >= C2Config::PROFILE_VP9_0) && (profile <= C2Config::PROFILE_VP9_3));
- default:
- return false;
- }
-}
-
} // namespace
// static
-C2R V4L2EncodeInterface::H264ProfileLevelSetter(
- bool /*mayBlock*/, C2P<C2StreamProfileLevelInfo::output>& info,
- const C2P<C2StreamPictureSizeInfo::input>& videoSize,
- const C2P<C2StreamFrameRateInfo::output>& frameRate,
- const C2P<C2StreamBitrateInfo::output>& bitrate) {
+C2R V4L2EncodeInterface::ProfileLevelSetter(bool mayBlock,
+ C2P<C2StreamProfileLevelInfo::output>& info,
+ const C2P<C2StreamPictureSizeInfo::input>& videoSize,
+ const C2P<C2StreamFrameRateInfo::output>& frameRate,
+ const C2P<C2StreamBitrateInfo::output>& bitrate) {
+ (void)mayBlock;
+
static C2Config::level_t lowestConfigLevel = C2Config::LEVEL_UNUSED;
+ // Use at least PROFILE_AVC_MAIN as default for 1080p input video and up.
+ // TODO (b/114332827): Find root cause of bad quality of Baseline encoding.
+ C2Config::profile_t defaultMinProfile = C2Config::PROFILE_AVC_BASELINE;
+ if (videoSize.v.width * videoSize.v.height >= kFrameSize1080P) {
+ defaultMinProfile = C2Config::PROFILE_AVC_MAIN;
+ }
+
// Adopt default minimal profile instead if the requested profile is not supported, or lower
// than the default minimal one.
- constexpr C2Config::profile_t minProfile = C2Config::PROFILE_AVC_BASELINE;
- if (!info.F(info.v.profile).supportsAtAll(info.v.profile) || info.v.profile < minProfile) {
- if (info.F(info.v.profile).supportsAtAll(minProfile)) {
- ALOGV("Set profile to default (%u) instead.", minProfile);
- info.set().profile = minProfile;
+ if (!info.F(info.v.profile).supportsAtAll(info.v.profile) ||
+ info.v.profile < defaultMinProfile) {
+ if (info.F(info.v.profile).supportsAtAll(defaultMinProfile)) {
+ ALOGV("Set profile to default (%u) instead.", defaultMinProfile);
+ info.set().profile = defaultMinProfile;
} else {
ALOGE("Unable to set either requested profile (%u) or default profile (%u).",
- info.v.profile, minProfile);
+ info.v.profile, defaultMinProfile);
return C2R(C2SettingResultBuilder::BadValue(info.F(info.v.profile)));
}
}
@@ -185,29 +210,6 @@ C2R V4L2EncodeInterface::H264ProfileLevelSetter(
return C2R::Ok();
}
-C2R V4L2EncodeInterface::VP9ProfileLevelSetter(
- bool /*mayBlock*/, C2P<C2StreamProfileLevelInfo::output>& info,
- const C2P<C2StreamPictureSizeInfo::input>& /*videoSize*/,
- const C2P<C2StreamFrameRateInfo::output>& /*frameRate*/,
- const C2P<C2StreamBitrateInfo::output>& /*bitrate*/) {
- // Adopt default minimal profile instead if the requested profile is not supported, or lower
- // than the default minimal one.
- constexpr C2Config::profile_t defaultMinProfile = C2Config::PROFILE_VP9_0;
- if (!info.F(info.v.profile).supportsAtAll(info.v.profile) ||
- info.v.profile < defaultMinProfile) {
- if (info.F(info.v.profile).supportsAtAll(defaultMinProfile)) {
- ALOGV("Set profile to default (%u) instead.", defaultMinProfile);
- info.set().profile = defaultMinProfile;
- } else {
- ALOGE("Unable to set either requested profile (%u) or default profile (%u).",
- info.v.profile, defaultMinProfile);
- return C2R(C2SettingResultBuilder::BadValue(info.F(info.v.profile)));
- }
- }
-
- return C2R::Ok();
-}
-
// static
C2R V4L2EncodeInterface::SizeSetter(bool mayBlock, C2P<C2StreamPictureSizeInfo::input>& videoSize) {
(void)mayBlock;
@@ -231,8 +233,8 @@ C2R V4L2EncodeInterface::IntraRefreshPeriodSetter(bool mayBlock,
return C2R::Ok();
}
-V4L2EncodeInterface::V4L2EncodeInterface(const C2String& name,
- std::shared_ptr<C2ReflectorHelper> helper)
+V4L2EncodeInterface::V4L2EncodeInterface(
+ const C2String& name, std::shared_ptr<C2ReflectorHelper> helper)
: C2InterfaceHelper(std::move(helper)) {
ALOGV("%s(%s)", __func__, name.c_str());
@@ -242,57 +244,48 @@ V4L2EncodeInterface::V4L2EncodeInterface(const C2String& name,
}
void V4L2EncodeInterface::Initialize(const C2String& name) {
- scoped_refptr<V4L2Device> device = V4L2Device::create();
+ scoped_refptr<media::V4L2Device> device = media::V4L2Device::Create();
if (!device) {
ALOGE("Failed to create V4L2 device");
mInitStatus = C2_CORRUPTED;
return;
}
- auto codec = getCodecFromComponentName(name);
- if (!codec) {
- ALOGE("Invalid component name");
- mInitStatus = C2_BAD_VALUE;
- return;
- }
-
- V4L2Device::SupportedEncodeProfiles supported_profiles = device->getSupportedEncodeProfiles();
-
- // Compile the list of supported profiles.
- // Note: unsigned int is used here, since std::vector<C2Config::profile_t> cannot convert to
- // std::vector<unsigned int> required by the c2 framework below.
+ // Use type=unsigned int here, otherwise it will cause compile error in
+ // C2F(mProfileLevel, profile).oneOf(profiles) since std::vector<C2Config::profile_t> cannot
+ // convert to std::vector<unsigned int>.
std::vector<unsigned int> profiles;
- ui::Size maxSize;
- for (const auto& supportedProfile : supported_profiles) {
- if (!IsValidProfileForCodec(codec.value(), supportedProfile.profile)) {
- continue; // Ignore unrecognizable or unsupported profiles.
+ media::Size maxSize;
+ for (const auto& supportedProfile : device->GetSupportedEncodeProfiles()) {
+ C2Config::profile_t profile = videoCodecProfileToC2Profile(supportedProfile.profile);
+ if (profile == C2Config::PROFILE_UNUSED) {
+ continue; // neglect unrecognizable profile
}
- ALOGV("Queried c2_profile = 0x%x : max_size = %d x %d", supportedProfile.profile,
- supportedProfile.max_resolution.width, supportedProfile.max_resolution.height);
- profiles.push_back(static_cast<unsigned int>(supportedProfile.profile));
- maxSize.setWidth(std::max(maxSize.width, supportedProfile.max_resolution.width));
- maxSize.setHeight(std::max(maxSize.height, supportedProfile.max_resolution.height));
+ ALOGV("Queried c2_profile = 0x%x : max_size = %d x %d", profile,
+ supportedProfile.max_resolution.width(), supportedProfile.max_resolution.height());
+ profiles.push_back(static_cast<unsigned int>(profile));
+ maxSize.set_width(std::max(maxSize.width(), supportedProfile.max_resolution.width()));
+ maxSize.set_height(std::max(maxSize.height(), supportedProfile.max_resolution.height()));
}
if (profiles.empty()) {
- ALOGE("No supported profiles");
+ ALOGD("No supported profiles");
mInitStatus = C2_BAD_VALUE;
return;
}
+ C2Config::profile_t minProfile =
+ static_cast<C2Config::profile_t>(*std::min_element(profiles.begin(), profiles.end()));
+
// Special note: the order of addParameter matters if your setters are dependent on other
// parameters. Please make sure the dependent parameters are added prior to the
// one needs the setter dependency.
- addParameter(DefineParam(mKind, C2_PARAMKEY_COMPONENT_KIND)
- .withConstValue(new C2ComponentKindSetting(C2Component::KIND_ENCODER))
- .build());
-
addParameter(DefineParam(mInputVisibleSize, C2_PARAMKEY_PICTURE_SIZE)
.withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
.withFields({
- C2F(mInputVisibleSize, width).inRange(2, maxSize.width, 2),
- C2F(mInputVisibleSize, height).inRange(2, maxSize.height, 2),
+ C2F(mInputVisibleSize, width).inRange(2, maxSize.width(), 2),
+ C2F(mInputVisibleSize, height).inRange(2, maxSize.height(), 2),
})
.withSetter(SizeSetter)
.build());
@@ -311,10 +304,8 @@ void V4L2EncodeInterface::Initialize(const C2String& name) {
.build());
std::string outputMime;
- if (getCodecFromComponentName(name) == VideoCodec::H264) {
+ if (getCodecFromComponentName(name) == media::VideoCodec::kCodecH264) {
outputMime = MEDIA_MIMETYPE_VIDEO_AVC;
- C2Config::profile_t minProfile = static_cast<C2Config::profile_t>(
- *std::min_element(profiles.begin(), profiles.end()));
addParameter(
DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
.withDefault(new C2StreamProfileLevelInfo::output(0u, minProfile,
@@ -331,38 +322,10 @@ void V4L2EncodeInterface::Initialize(const C2String& name) {
C2Config::LEVEL_AVC_3_2, C2Config::LEVEL_AVC_4,
C2Config::LEVEL_AVC_4_1, C2Config::LEVEL_AVC_5,
C2Config::LEVEL_AVC_5_1})})
- .withSetter(H264ProfileLevelSetter, mInputVisibleSize, mFrameRate, mBitrate)
- .build());
- } else if (getCodecFromComponentName(name) == VideoCodec::VP8) {
- outputMime = MEDIA_MIMETYPE_VIDEO_VP8;
- // VP8 doesn't have conventional profiles, we'll use profile0 if the VP8 codec is requested.
- addParameter(DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
- .withConstValue(new C2StreamProfileLevelInfo::output(
- 0u, C2Config::PROFILE_VP8_0, C2Config::LEVEL_UNUSED))
- .build());
- } else if (getCodecFromComponentName(name) == VideoCodec::VP9) {
- outputMime = MEDIA_MIMETYPE_VIDEO_VP9;
- C2Config::profile_t minProfile = static_cast<C2Config::profile_t>(
- *std::min_element(profiles.begin(), profiles.end()));
- addParameter(
- DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
- .withDefault(new C2StreamProfileLevelInfo::output(0u, minProfile,
- C2Config::LEVEL_VP9_1))
- .withFields(
- {C2F(mProfileLevel, profile).oneOf(profiles),
- C2F(mProfileLevel, level)
- // TODO(dstaessens) query supported levels from adaptor.
- .oneOf({C2Config::LEVEL_VP9_1, C2Config::LEVEL_VP9_1_1,
- C2Config::LEVEL_VP9_2, C2Config::LEVEL_VP9_2_1,
- C2Config::LEVEL_VP9_3, C2Config::LEVEL_VP9_3_1,
- C2Config::LEVEL_VP9_4, C2Config::LEVEL_VP9_4_1,
- C2Config::LEVEL_VP9_5, C2Config::LEVEL_VP9_5_1,
- C2Config::LEVEL_VP9_5_2, C2Config::LEVEL_VP9_6,
- C2Config::LEVEL_VP9_6_1,
- C2Config::LEVEL_VP9_6_2})})
- .withSetter(VP9ProfileLevelSetter, mInputVisibleSize, mFrameRate, mBitrate)
+ .withSetter(ProfileLevelSetter, mInputVisibleSize, mFrameRate, mBitrate)
.build());
} else {
+ // TODO(johnylin): implement VP8/VP9 encoder in the future.
ALOGE("Unsupported component name: %s", name.c_str());
mInitStatus = C2_BAD_VALUE;
return;
diff --git a/components/V4L2Encoder.cpp b/components/V4L2Encoder.cpp
deleted file mode 100644
index cdd2d89..0000000
--- a/components/V4L2Encoder.cpp
+++ /dev/null
@@ -1,1016 +0,0 @@
-// Copyright 2021 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "V4L2Encoder"
-
-#include <v4l2_codec2/components/V4L2Encoder.h>
-
-#include <stdint.h>
-#include <optional>
-#include <vector>
-
-#include <base/bind.h>
-#include <base/files/scoped_file.h>
-#include <base/memory/ptr_util.h>
-#include <log/log.h>
-#include <ui/Rect.h>
-
-#include <v4l2_codec2/common/Fourcc.h>
-#include <v4l2_codec2/common/V4L2Device.h>
-#include <v4l2_codec2/components/BitstreamBuffer.h>
-
-namespace android {
-
-namespace {
-
-const VideoPixelFormat kInputPixelFormat = VideoPixelFormat::NV12;
-
-// The maximum size for output buffer, which is chosen empirically for a 1080p video.
-constexpr size_t kMaxBitstreamBufferSizeInBytes = 2 * 1024 * 1024; // 2MB
-// The frame size for 1080p (FHD) video in pixels.
-constexpr int k1080PSizeInPixels = 1920 * 1080;
-// The frame size for 1440p (QHD) video in pixels.
-constexpr int k1440PSizeInPixels = 2560 * 1440;
-
-// Use quadruple size of kMaxBitstreamBufferSizeInBytes when the input frame size is larger than
-// 1440p, double if larger than 1080p. This is chosen empirically for some 4k encoding use cases and
-// the Android CTS VideoEncoderTest (crbug.com/927284).
-size_t GetMaxOutputBufferSize(const ui::Size& size) {
- if (getArea(size) > k1440PSizeInPixels) return kMaxBitstreamBufferSizeInBytes * 4;
- if (getArea(size) > k1080PSizeInPixels) return kMaxBitstreamBufferSizeInBytes * 2;
- return kMaxBitstreamBufferSizeInBytes;
-}
-
-// Define V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR control code if not present in header files.
-#ifndef V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR
-#define V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR (V4L2_CID_MPEG_BASE + 644)
-#endif
-
-} // namespace
-
-// static
-std::unique_ptr<VideoEncoder> V4L2Encoder::create(
- C2Config::profile_t outputProfile, std::optional<uint8_t> level,
- const ui::Size& visibleSize, uint32_t stride, uint32_t keyFramePeriod,
- FetchOutputBufferCB fetchOutputBufferCb, InputBufferDoneCB inputBufferDoneCb,
- OutputBufferDoneCB outputBufferDoneCb, DrainDoneCB drainDoneCb, ErrorCB errorCb,
- scoped_refptr<::base::SequencedTaskRunner> taskRunner) {
- ALOGV("%s()", __func__);
-
- std::unique_ptr<V4L2Encoder> encoder = ::base::WrapUnique<V4L2Encoder>(new V4L2Encoder(
- std::move(taskRunner), std::move(fetchOutputBufferCb), std::move(inputBufferDoneCb),
- std::move(outputBufferDoneCb), std::move(drainDoneCb), std::move(errorCb)));
- if (!encoder->initialize(outputProfile, level, visibleSize, stride, keyFramePeriod)) {
- return nullptr;
- }
- return encoder;
-}
-
-V4L2Encoder::V4L2Encoder(scoped_refptr<::base::SequencedTaskRunner> taskRunner,
- FetchOutputBufferCB fetchOutputBufferCb,
- InputBufferDoneCB inputBufferDoneCb, OutputBufferDoneCB outputBufferDoneCb,
- DrainDoneCB drainDoneCb, ErrorCB errorCb)
- : mFetchOutputBufferCb(fetchOutputBufferCb),
- mInputBufferDoneCb(inputBufferDoneCb),
- mOutputBufferDoneCb(outputBufferDoneCb),
- mDrainDoneCb(std::move(drainDoneCb)),
- mErrorCb(std::move(errorCb)),
- mTaskRunner(std::move(taskRunner)) {
- ALOGV("%s()", __func__);
-
- mWeakThis = mWeakThisFactory.GetWeakPtr();
-}
-
-V4L2Encoder::~V4L2Encoder() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- mWeakThisFactory.InvalidateWeakPtrs();
-
- // Flushing the encoder will stop polling and streaming on the V4L2 device queues.
- flush();
-
- // Deallocate all V4L2 device input and output buffers.
- destroyInputBuffers();
- destroyOutputBuffers();
-}
-
-bool V4L2Encoder::encode(std::unique_ptr<InputFrame> frame) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mState != State::UNINITIALIZED);
-
- // If we're in the error state we can immediately return, freeing the input buffer.
- if (mState == State::ERROR) {
- return false;
- }
-
- if (!frame) {
- ALOGW("Empty encode request scheduled");
- return false;
- }
-
- mEncodeRequests.push(EncodeRequest(std::move(frame)));
-
- // If we were waiting for encode requests, start encoding again.
- if (mState == State::WAITING_FOR_INPUT_FRAME) {
- setState(State::ENCODING);
- mTaskRunner->PostTask(FROM_HERE,
- ::base::BindOnce(&V4L2Encoder::handleEncodeRequest, mWeakThis));
- }
-
- return true;
-}
-
-void V4L2Encoder::drain() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- // We can only start draining if all the requests in our input queue has been queued on the V4L2
- // device input queue, so we mark the last item in the input queue as EOS.
- if (!mEncodeRequests.empty()) {
- ALOGV("Marking last item (index: %" PRIu64 ") in encode request queue as EOS",
- mEncodeRequests.back().video_frame->index());
- mEncodeRequests.back().end_of_stream = true;
- return;
- }
-
- // Start a drain operation on the device. If no buffers are currently queued the device will
- // return an empty buffer with the V4L2_BUF_FLAG_LAST flag set.
- handleDrainRequest();
-}
-
-void V4L2Encoder::flush() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- handleFlushRequest();
-}
-
-bool V4L2Encoder::setBitrate(uint32_t bitrate) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
- {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_BITRATE, bitrate)})) {
- ALOGE("Setting bitrate to %u failed", bitrate);
- return false;
- }
- return true;
-}
-
-bool V4L2Encoder::setFramerate(uint32_t framerate) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- struct v4l2_streamparm parms;
- memset(&parms, 0, sizeof(v4l2_streamparm));
- parms.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- parms.parm.output.timeperframe.numerator = 1;
- parms.parm.output.timeperframe.denominator = framerate;
- if (mDevice->ioctl(VIDIOC_S_PARM, &parms) != 0) {
- ALOGE("Setting framerate to %u failed", framerate);
- return false;
- }
- return true;
-}
-
-void V4L2Encoder::requestKeyframe() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- mKeyFrameCounter = 0;
-}
-
-VideoPixelFormat V4L2Encoder::inputFormat() const {
- return mInputLayout ? mInputLayout.value().mFormat : VideoPixelFormat::UNKNOWN;
-}
-
-bool V4L2Encoder::initialize(C2Config::profile_t outputProfile, std::optional<uint8_t> level,
- const ui::Size& visibleSize, uint32_t stride,
- uint32_t keyFramePeriod) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(keyFramePeriod > 0);
-
- mVisibleSize = visibleSize;
- mKeyFramePeriod = keyFramePeriod;
- mKeyFrameCounter = 0;
-
- // Open the V4L2 device for encoding to the requested output format.
- // TODO(dstaessens): Avoid conversion to VideoCodecProfile and use C2Config::profile_t directly.
- uint32_t outputPixelFormat = V4L2Device::C2ProfileToV4L2PixFmt(outputProfile, false);
- if (!outputPixelFormat) {
- ALOGE("Invalid output profile %s", profileToString(outputProfile));
- return false;
- }
-
- mDevice = V4L2Device::create();
- if (!mDevice) {
- ALOGE("Failed to create V4L2 device");
- return false;
- }
-
- if (!mDevice->open(V4L2Device::Type::kEncoder, outputPixelFormat)) {
- ALOGE("Failed to open device for profile %s (%s)", profileToString(outputProfile),
- fourccToString(outputPixelFormat).c_str());
- return false;
- }
-
- // Make sure the device has all required capabilities (multi-planar Memory-To-Memory and
- // streaming I/O), and whether flushing is supported.
- if (!mDevice->hasCapabilities(V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING)) {
- ALOGE("Device doesn't have the required capabilities");
- return false;
- }
- if (!mDevice->isCommandSupported(V4L2_ENC_CMD_STOP)) {
- ALOGE("Device does not support flushing (V4L2_ENC_CMD_STOP)");
- return false;
- }
-
- // Get input/output queues so we can send encode request to the device and get back the results.
- mInputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- mOutputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
- if (!mInputQueue || !mOutputQueue) {
- ALOGE("Failed to get V4L2 device queues");
- return false;
- }
-
- // First try to configure the specified output format, as changing the output format can affect
- // the configured input format.
- if (!configureOutputFormat(outputProfile)) return false;
-
- // Configure the input format. If the device doesn't support the specified format we'll use one
- // of the device's preferred formats in combination with an input format convertor.
- if (!configureInputFormat(kInputPixelFormat, stride)) return false;
-
- // Create input and output buffers.
- if (!createInputBuffers() || !createOutputBuffers()) return false;
-
- // Configure the device, setting all required controls.
- if (!configureDevice(outputProfile, level)) return false;
-
- // We're ready to start encoding now.
- setState(State::WAITING_FOR_INPUT_FRAME);
- return true;
-}
-
-void V4L2Encoder::handleEncodeRequest() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mState == State::ENCODING || mState == State::ERROR);
-
- // If we're in the error state we can immediately return.
- if (mState == State::ERROR) {
- return;
- }
-
- // It's possible we flushed the encoder since this function was scheduled.
- if (mEncodeRequests.empty()) {
- return;
- }
-
- // Get the next encode request from the queue.
- EncodeRequest& encodeRequest = mEncodeRequests.front();
-
- // Check if the device has free input buffers available. If not we'll switch to the
- // WAITING_FOR_INPUT_BUFFERS state, and resume encoding once we've dequeued an input buffer.
- // Note: The input buffers are not copied into the device's input buffers, but rather a memory
- // pointer is imported. We still have to throttle the number of enqueues queued simultaneously
- // on the device however.
- if (mInputQueue->freeBuffersCount() == 0) {
- ALOGV("Waiting for device to return input buffers");
- setState(State::WAITING_FOR_V4L2_BUFFER);
- return;
- }
-
- // Request the next frame to be a key frame each time the counter reaches 0.
- if (mKeyFrameCounter == 0) {
- if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
- {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME)})) {
- ALOGE("Failed requesting key frame");
- onError();
- return;
- }
- }
- mKeyFrameCounter = (mKeyFrameCounter + 1) % mKeyFramePeriod;
-
- // Enqueue the input frame in the V4L2 device.
- uint64_t index = encodeRequest.video_frame->index();
- uint64_t timestamp = encodeRequest.video_frame->timestamp();
- bool end_of_stream = encodeRequest.end_of_stream;
- if (!enqueueInputBuffer(std::move(encodeRequest.video_frame))) {
- ALOGE("Failed to enqueue input frame (index: %" PRIu64 ", timestamp: %" PRId64 ")", index,
- timestamp);
- onError();
- return;
- }
- mEncodeRequests.pop();
-
- // Start streaming and polling on the input and output queue if required.
- if (!mInputQueue->isStreaming()) {
- ALOG_ASSERT(!mOutputQueue->isStreaming());
- if (!mOutputQueue->streamon() || !mInputQueue->streamon()) {
- ALOGE("Failed to start streaming on input and output queue");
- onError();
- return;
- }
- startDevicePoll();
- }
-
- // Queue buffers on output queue. These buffers will be used to store the encoded bitstream.
- while (mOutputQueue->freeBuffersCount() > 0) {
- if (!enqueueOutputBuffer()) return;
- }
-
- // Drain the encoder if requested.
- if (end_of_stream) {
- handleDrainRequest();
- return;
- }
-
- if (mEncodeRequests.empty()) {
- setState(State::WAITING_FOR_INPUT_FRAME);
- return;
- }
-
- // Schedule the next buffer to be encoded.
- mTaskRunner->PostTask(FROM_HERE,
- ::base::BindOnce(&V4L2Encoder::handleEncodeRequest, mWeakThis));
-}
-
-void V4L2Encoder::handleFlushRequest() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- // Stop the device poll thread.
- stopDevicePoll();
-
- // Stop streaming on the V4L2 device, which stops all currently queued encode operations and
- // releases all buffers currently in use by the device.
- for (auto& queue : {mInputQueue, mOutputQueue}) {
- if (queue && queue->isStreaming() && !queue->streamoff()) {
- ALOGE("Failed to stop streaming on the device queue");
- onError();
- }
- }
-
- // Clear all outstanding encode requests and references to input and output queue buffers.
- while (!mEncodeRequests.empty()) {
- mEncodeRequests.pop();
- }
- for (auto& buf : mInputBuffers) {
- buf = nullptr;
- }
- for (auto& buf : mOutputBuffers) {
- buf = nullptr;
- }
-
- // Streaming and polling on the V4L2 device input and output queues will be resumed once new
- // encode work is queued.
- if (mState != State::ERROR) {
- setState(State::WAITING_FOR_INPUT_FRAME);
- }
-}
-
-void V4L2Encoder::handleDrainRequest() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- if (mState == State::DRAINING || mState == State::ERROR) {
- return;
- }
-
- setState(State::DRAINING);
-
- // If we're not streaming we can consider the request completed immediately.
- if (!mInputQueue->isStreaming()) {
- onDrainDone(true);
- return;
- }
-
- struct v4l2_encoder_cmd cmd;
- memset(&cmd, 0, sizeof(v4l2_encoder_cmd));
- cmd.cmd = V4L2_ENC_CMD_STOP;
- if (mDevice->ioctl(VIDIOC_ENCODER_CMD, &cmd) != 0) {
- ALOGE("Failed to stop encoder");
- onDrainDone(false);
- return;
- }
- ALOGV("%s(): Sent STOP command to encoder", __func__);
-}
-
-void V4L2Encoder::onDrainDone(bool done) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mState == State::DRAINING || mState == State::ERROR);
-
- if (mState == State::ERROR) {
- return;
- }
-
- if (!done) {
- ALOGE("draining the encoder failed");
- mDrainDoneCb.Run(false);
- onError();
- return;
- }
-
- ALOGV("Draining done");
- mDrainDoneCb.Run(true);
-
- // Draining the encoder is done, we can now start encoding again.
- if (!mEncodeRequests.empty()) {
- setState(State::ENCODING);
- mTaskRunner->PostTask(FROM_HERE,
- ::base::BindOnce(&V4L2Encoder::handleEncodeRequest, mWeakThis));
- } else {
- setState(State::WAITING_FOR_INPUT_FRAME);
- }
-}
-
-bool V4L2Encoder::configureInputFormat(VideoPixelFormat inputFormat, uint32_t stride) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mState == State::UNINITIALIZED);
- ALOG_ASSERT(!mInputQueue->isStreaming());
- ALOG_ASSERT(!isEmpty(mVisibleSize));
-
- // First try to use the requested pixel format directly.
- std::optional<struct v4l2_format> format;
- auto fourcc = Fourcc::fromVideoPixelFormat(inputFormat, false);
- if (fourcc) {
- format = mInputQueue->setFormat(fourcc->toV4L2PixFmt(), mVisibleSize, 0, stride);
- }
-
- // If the device doesn't support the requested input format we'll try the device's preferred
- // input pixel formats and use a format convertor. We need to try all formats as some formats
- // might not be supported for the configured output format.
- if (!format) {
- std::vector<uint32_t> preferredFormats =
- mDevice->preferredInputFormat(V4L2Device::Type::kEncoder);
- for (uint32_t i = 0; !format && i < preferredFormats.size(); ++i) {
- format = mInputQueue->setFormat(preferredFormats[i], mVisibleSize, 0, stride);
- }
- }
-
- if (!format) {
- ALOGE("Failed to set input format to %s", videoPixelFormatToString(inputFormat).c_str());
- return false;
- }
-
- // Check whether the negotiated input format is valid. The coded size might be adjusted to match
- // encoder minimums, maximums and alignment requirements of the currently selected formats.
- auto layout = V4L2Device::v4L2FormatToVideoFrameLayout(*format);
- if (!layout) {
- ALOGE("Invalid input layout");
- return false;
- }
-
- mInputLayout = layout.value();
- if (!contains(Rect(mInputLayout->mCodedSize.width, mInputLayout->mCodedSize.height),
- Rect(mVisibleSize.width, mVisibleSize.height))) {
- ALOGE("Input size %s exceeds encoder capability, encoder can handle %s",
- toString(mVisibleSize).c_str(), toString(mInputLayout->mCodedSize).c_str());
- return false;
- }
-
- // Calculate the input coded size from the format.
- // TODO(dstaessens): How is this different from mInputLayout->coded_size()?
- mInputCodedSize = V4L2Device::allocatedSizeFromV4L2Format(*format);
-
- // Configuring the input format might cause the output buffer size to change.
- auto outputFormat = mOutputQueue->getFormat();
- if (!outputFormat.first) {
- ALOGE("Failed to get output format (errno: %i)", outputFormat.second);
- return false;
- }
- uint32_t AdjustedOutputBufferSize = outputFormat.first->fmt.pix_mp.plane_fmt[0].sizeimage;
- if (mOutputBufferSize != AdjustedOutputBufferSize) {
- mOutputBufferSize = AdjustedOutputBufferSize;
- ALOGV("Output buffer size adjusted to: %u", mOutputBufferSize);
- }
-
- // The coded input size might be different from the visible size due to alignment requirements,
- // So we need to specify the visible rectangle. Note that this rectangle might still be adjusted
- // due to hardware limitations.
- Rect visibleRectangle(mVisibleSize.width, mVisibleSize.height);
-
- struct v4l2_rect rect;
- memset(&rect, 0, sizeof(rect));
- rect.left = visibleRectangle.left;
- rect.top = visibleRectangle.top;
- rect.width = visibleRectangle.width();
- rect.height = visibleRectangle.height();
-
- // Try to adjust the visible rectangle using the VIDIOC_S_SELECTION command. If this is not
- // supported we'll try to use the VIDIOC_S_CROP command instead. The visible rectangle might be
- // adjusted to conform to hardware limitations (e.g. round to closest horizontal and vertical
- // offsets, width and height).
- struct v4l2_selection selection_arg;
- memset(&selection_arg, 0, sizeof(selection_arg));
- selection_arg.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- selection_arg.target = V4L2_SEL_TGT_CROP;
- selection_arg.r = rect;
- if (mDevice->ioctl(VIDIOC_S_SELECTION, &selection_arg) == 0) {
- visibleRectangle = Rect(selection_arg.r.left, selection_arg.r.top,
- selection_arg.r.left + selection_arg.r.width,
- selection_arg.r.top + selection_arg.r.height);
- } else {
- struct v4l2_crop crop;
- memset(&crop, 0, sizeof(v4l2_crop));
- crop.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- crop.c = rect;
- if (mDevice->ioctl(VIDIOC_S_CROP, &crop) != 0 ||
- mDevice->ioctl(VIDIOC_G_CROP, &crop) != 0) {
- ALOGE("Failed to crop to specified visible rectangle");
- return false;
- }
- visibleRectangle = Rect(crop.c.left, crop.c.top, crop.c.left + crop.c.width,
- crop.c.top + crop.c.height);
- }
-
- ALOGV("Input format set to %s (size: %s, adjusted size: %dx%d, coded size: %s)",
- videoPixelFormatToString(mInputLayout->mFormat).c_str(), toString(mVisibleSize).c_str(),
- visibleRectangle.width(), visibleRectangle.height(), toString(mInputCodedSize).c_str());
-
- mVisibleSize.set(visibleRectangle.width(), visibleRectangle.height());
- return true;
-}
-
-bool V4L2Encoder::configureOutputFormat(C2Config::profile_t outputProfile) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mState == State::UNINITIALIZED);
- ALOG_ASSERT(!mOutputQueue->isStreaming());
- ALOG_ASSERT(!isEmpty(mVisibleSize));
-
- auto format = mOutputQueue->setFormat(V4L2Device::C2ProfileToV4L2PixFmt(outputProfile, false),
- mVisibleSize, GetMaxOutputBufferSize(mVisibleSize));
- if (!format) {
- ALOGE("Failed to set output format to %s", profileToString(outputProfile));
- return false;
- }
-
- // The device might adjust the requested output buffer size to match hardware requirements.
- mOutputBufferSize = format->fmt.pix_mp.plane_fmt[0].sizeimage;
-
- ALOGV("Output format set to %s (buffer size: %u)", profileToString(outputProfile),
- mOutputBufferSize);
- return true;
-}
-
-bool V4L2Encoder::configureDevice(C2Config::profile_t outputProfile,
- std::optional<const uint8_t> outputH264Level) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- // Enable frame-level bitrate control. This is the only mandatory general control.
- if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
- {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, 1)})) {
- ALOGW("Failed enabling bitrate control");
- // TODO(b/161508368): V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE is currently not supported yet,
- // assume the operation was successful for now.
- }
-
- // Additional optional controls:
- // - Enable macroblock-level bitrate control.
- // - Set GOP length to 0 to disable periodic key frames.
- mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG, {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE, 1),
- V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0)});
-
- // All controls below are H.264-specific, so we can return here if the profile is not H.264.
- if (outputProfile >= C2Config::PROFILE_AVC_BASELINE ||
- outputProfile <= C2Config::PROFILE_AVC_ENHANCED_MULTIVIEW_DEPTH_HIGH) {
- return configureH264(outputProfile, outputH264Level);
- }
-
- return true;
-}
-
-bool V4L2Encoder::configureH264(C2Config::profile_t outputProfile,
- std::optional<const uint8_t> outputH264Level) {
- // When encoding H.264 we want to prepend SPS and PPS to each IDR for resilience. Some
- // devices support this through the V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR control.
- // TODO(b/161495502): V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR is currently not supported
- // yet, just log a warning if the operation was unsuccessful for now.
- if (mDevice->isCtrlExposed(V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR)) {
- if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
- {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR, 1)})) {
- ALOGE("Failed to configure device to prepend SPS and PPS to each IDR");
- return false;
- }
- ALOGV("Device supports prepending SPS and PPS to each IDR");
- } else {
- ALOGW("Device doesn't support prepending SPS and PPS to IDR");
- }
-
- std::vector<V4L2ExtCtrl> h264Ctrls;
-
- // No B-frames, for lowest decoding latency.
- h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_B_FRAMES, 0);
- // Quantization parameter maximum value (for variable bitrate control).
- h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 51);
-
- // Set H.264 profile.
- int32_t profile = V4L2Device::c2ProfileToV4L2H264Profile(outputProfile);
- if (profile < 0) {
- ALOGE("Trying to set invalid H.264 profile");
- return false;
- }
- h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_H264_PROFILE, profile);
-
- // Set H.264 output level. Use Level 4.0 as fallback default.
- int32_t h264Level =
- static_cast<int32_t>(outputH264Level.value_or(V4L2_MPEG_VIDEO_H264_LEVEL_4_0));
- h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_H264_LEVEL, h264Level);
-
- // Ask not to put SPS and PPS into separate bitstream buffers.
- h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_HEADER_MODE,
- V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
-
- // Ignore return value as these controls are optional.
- mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG, std::move(h264Ctrls));
-
- return true;
-}
-
-bool V4L2Encoder::startDevicePoll() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- if (!mDevice->startPolling(::base::BindRepeating(&V4L2Encoder::serviceDeviceTask, mWeakThis),
- ::base::BindRepeating(&V4L2Encoder::onPollError, mWeakThis))) {
- ALOGE("Device poll thread failed to start");
- onError();
- return false;
- }
-
- ALOGV("Device poll started");
- return true;
-}
-
-bool V4L2Encoder::stopDevicePoll() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- if (!mDevice->stopPolling()) {
- ALOGE("Failed to stop polling on the device");
- onError();
- return false;
- }
-
- ALOGV("Device poll stopped");
- return true;
-}
-
-void V4L2Encoder::onPollError() {
- ALOGV("%s()", __func__);
- onError();
-}
-
-void V4L2Encoder::serviceDeviceTask(bool /*event*/) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mState != State::UNINITIALIZED);
-
- if (mState == State::ERROR) {
- return;
- }
-
- // Dequeue completed input (VIDEO_OUTPUT) buffers, and recycle to the free list.
- while (mInputQueue->queuedBuffersCount() > 0) {
- if (!dequeueInputBuffer()) break;
- }
-
- // Dequeue completed output (VIDEO_CAPTURE) buffers, and recycle to the free list.
- while (mOutputQueue->queuedBuffersCount() > 0) {
- if (!dequeueOutputBuffer()) break;
- }
-
- ALOGV("%s() - done", __func__);
-}
-
-bool V4L2Encoder::enqueueInputBuffer(std::unique_ptr<InputFrame> frame) {
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mInputQueue->freeBuffersCount() > 0);
- ALOG_ASSERT(mState == State::ENCODING);
- ALOG_ASSERT(frame);
- ALOG_ASSERT(mInputLayout->mFormat == frame->pixelFormat());
- ALOG_ASSERT(mInputLayout->mPlanes.size() == frame->planes().size());
-
- auto format = frame->pixelFormat();
- auto planes = frame->planes();
- auto index = frame->index();
- auto timestamp = frame->timestamp();
-
- ALOGV("%s(): queuing input buffer (index: %" PRId64 ")", __func__, index);
-
- auto buffer = mInputQueue->getFreeBuffer();
- if (!buffer) {
- ALOGE("Failed to get free buffer from device input queue");
- return false;
- }
-
- // Mark the buffer with the frame's timestamp so we can identify the associated output buffers.
- buffer->setTimeStamp(
- {.tv_sec = static_cast<time_t>(timestamp / ::base::Time::kMicrosecondsPerSecond),
- .tv_usec = static_cast<time_t>(timestamp % ::base::Time::kMicrosecondsPerSecond)});
- size_t bufferId = buffer->bufferId();
-
- for (size_t i = 0; i < planes.size(); ++i) {
- // Single-buffer input format may have multiple color planes, so bytesUsed of the single
- // buffer should be sum of each color planes' size.
- size_t bytesUsed = 0;
- if (planes.size() == 1) {
- bytesUsed = allocationSize(format, mInputLayout->mCodedSize);
- } else {
- bytesUsed = ::base::checked_cast<size_t>(
- getArea(planeSize(format, i, mInputLayout->mCodedSize)).value());
- }
-
- // TODO(crbug.com/901264): The way to pass an offset within a DMA-buf is not defined
- // in V4L2 specification, so we abuse data_offset for now. Fix it when we have the
- // right interface, including any necessary validation and potential alignment.
- buffer->setPlaneDataOffset(i, planes[i].mOffset);
- bytesUsed += planes[i].mOffset;
- // Workaround: filling length should not be needed. This is a bug of videobuf2 library.
- buffer->setPlaneSize(i, mInputLayout->mPlanes[i].mSize + planes[i].mOffset);
- buffer->setPlaneBytesUsed(i, bytesUsed);
- }
-
- if (!std::move(*buffer).queueDMABuf(frame->fds())) {
- ALOGE("Failed to queue input buffer using QueueDMABuf");
- onError();
- return false;
- }
-
- ALOGV("Queued buffer in input queue (index: %" PRId64 ", timestamp: %" PRId64
- ", bufferId: %zu)",
- index, timestamp, bufferId);
-
- ALOG_ASSERT(!mInputBuffers[bufferId]);
- mInputBuffers[bufferId] = std::move(frame);
-
- return true;
-}
-
-bool V4L2Encoder::enqueueOutputBuffer() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mOutputQueue->freeBuffersCount() > 0);
-
- auto buffer = mOutputQueue->getFreeBuffer();
- if (!buffer) {
- ALOGE("Failed to get free buffer from device output queue");
- onError();
- return false;
- }
-
- std::unique_ptr<BitstreamBuffer> bitstreamBuffer;
- mFetchOutputBufferCb.Run(mOutputBufferSize, &bitstreamBuffer);
- if (!bitstreamBuffer) {
- ALOGE("Failed to fetch output block");
- onError();
- return false;
- }
-
- size_t bufferId = buffer->bufferId();
-
- std::vector<int> fds;
- fds.push_back(bitstreamBuffer->dmabuf_fd);
- if (!std::move(*buffer).queueDMABuf(fds)) {
- ALOGE("Failed to queue output buffer using QueueDMABuf");
- onError();
- return false;
- }
-
- ALOG_ASSERT(!mOutputBuffers[bufferId]);
- mOutputBuffers[bufferId] = std::move(bitstreamBuffer);
- ALOGV("%s(): Queued buffer in output queue (bufferId: %zu)", __func__, bufferId);
- return true;
-}
-
-bool V4L2Encoder::dequeueInputBuffer() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mState != State::UNINITIALIZED);
- ALOG_ASSERT(mInputQueue->queuedBuffersCount() > 0);
-
- if (mState == State::ERROR) {
- return false;
- }
-
- bool success;
- V4L2ReadableBufferRef buffer;
- std::tie(success, buffer) = mInputQueue->dequeueBuffer();
- if (!success) {
- ALOGE("Failed to dequeue buffer from input queue");
- onError();
- return false;
- }
- if (!buffer) {
- // No more buffers ready to be dequeued in input queue.
- return false;
- }
-
- uint64_t index = mInputBuffers[buffer->bufferId()]->index();
- int64_t timestamp = buffer->getTimeStamp().tv_usec +
- buffer->getTimeStamp().tv_sec * ::base::Time::kMicrosecondsPerSecond;
- ALOGV("Dequeued buffer from input queue (index: %" PRId64 ", timestamp: %" PRId64
- ", bufferId: %zu)",
- index, timestamp, buffer->bufferId());
-
- mInputBuffers[buffer->bufferId()] = nullptr;
-
- mInputBufferDoneCb.Run(index);
-
- // If we previously used up all input queue buffers we can start encoding again now.
- if ((mState == State::WAITING_FOR_V4L2_BUFFER) && !mEncodeRequests.empty()) {
- setState(State::ENCODING);
- mTaskRunner->PostTask(FROM_HERE,
- ::base::BindOnce(&V4L2Encoder::handleEncodeRequest, mWeakThis));
- }
-
- return true;
-}
-
-bool V4L2Encoder::dequeueOutputBuffer() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mState != State::UNINITIALIZED);
- ALOG_ASSERT(mOutputQueue->queuedBuffersCount() > 0);
-
- if (mState == State::ERROR) {
- return false;
- }
-
- bool success;
- V4L2ReadableBufferRef buffer;
- std::tie(success, buffer) = mOutputQueue->dequeueBuffer();
- if (!success) {
- ALOGE("Failed to dequeue buffer from output queue");
- onError();
- return false;
- }
- if (!buffer) {
- // No more buffers ready to be dequeued in output queue.
- return false;
- }
-
- size_t encodedDataSize = buffer->getPlaneBytesUsed(0) - buffer->getPlaneDataOffset(0);
- ::base::TimeDelta timestamp = ::base::TimeDelta::FromMicroseconds(
- buffer->getTimeStamp().tv_usec +
- buffer->getTimeStamp().tv_sec * ::base::Time::kMicrosecondsPerSecond);
-
- ALOGV("Dequeued buffer from output queue (timestamp: %" PRId64
- ", bufferId: %zu, data size: %zu, EOS: %d)",
- timestamp.InMicroseconds(), buffer->bufferId(), encodedDataSize, buffer->isLast());
-
- if (!mOutputBuffers[buffer->bufferId()]) {
- ALOGE("Failed to find output block associated with output buffer");
- onError();
- return false;
- }
-
- std::unique_ptr<BitstreamBuffer> bitstream_buffer =
- std::move(mOutputBuffers[buffer->bufferId()]);
- if (encodedDataSize > 0) {
- mOutputBufferDoneCb.Run(encodedDataSize, timestamp.InMicroseconds(), buffer->isKeyframe(),
- std::move(bitstream_buffer));
- }
-
- // If the buffer is marked as last and we were flushing the encoder, flushing is now done.
- if ((mState == State::DRAINING) && buffer->isLast()) {
- onDrainDone(true);
- // Start the encoder again.
- struct v4l2_encoder_cmd cmd;
- memset(&cmd, 0, sizeof(v4l2_encoder_cmd));
- cmd.cmd = V4L2_ENC_CMD_START;
- if (mDevice->ioctl(VIDIOC_ENCODER_CMD, &cmd) != 0) {
- ALOGE("Failed to restart encoder after draining (V4L2_ENC_CMD_START)");
- onError();
- return false;
- }
- }
-
- // Queue a new output buffer to replace the one we dequeued.
- buffer = nullptr;
- enqueueOutputBuffer();
-
- return true;
-}
-
-bool V4L2Encoder::createInputBuffers() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(!mInputQueue->isStreaming());
- ALOG_ASSERT(mInputBuffers.empty());
-
- // No memory is allocated here, we just generate a list of buffers on the input queue, which
- // will hold memory handles to the real buffers.
- if (mInputQueue->allocateBuffers(kInputBufferCount, V4L2_MEMORY_DMABUF) < kInputBufferCount) {
- ALOGE("Failed to create V4L2 input buffers.");
- return false;
- }
-
- mInputBuffers.resize(mInputQueue->allocatedBuffersCount());
- return true;
-}
-
-bool V4L2Encoder::createOutputBuffers() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(!mOutputQueue->isStreaming());
- ALOG_ASSERT(mOutputBuffers.empty());
-
- // No memory is allocated here, we just generate a list of buffers on the output queue, which
- // will hold memory handles to the real buffers.
- if (mOutputQueue->allocateBuffers(kOutputBufferCount, V4L2_MEMORY_DMABUF) <
- kOutputBufferCount) {
- ALOGE("Failed to create V4L2 output buffers.");
- return false;
- }
-
- mOutputBuffers.resize(mOutputQueue->allocatedBuffersCount());
- return true;
-}
-
-void V4L2Encoder::destroyInputBuffers() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(!mInputQueue->isStreaming());
-
- if (!mInputQueue || mInputQueue->allocatedBuffersCount() == 0) return;
- mInputQueue->deallocateBuffers();
- mInputBuffers.clear();
-}
-
-void V4L2Encoder::destroyOutputBuffers() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(!mOutputQueue->isStreaming());
-
- if (!mOutputQueue || mOutputQueue->allocatedBuffersCount() == 0) return;
- mOutputQueue->deallocateBuffers();
- mOutputBuffers.clear();
-}
-
-void V4L2Encoder::onError() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- if (mState != State::ERROR) {
- setState(State::ERROR);
- mErrorCb.Run();
- }
-}
-
-void V4L2Encoder::setState(State state) {
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- // Check whether the state change is valid.
- switch (state) {
- case State::UNINITIALIZED:
- break;
- case State::WAITING_FOR_INPUT_FRAME:
- ALOG_ASSERT(mState != State::ERROR);
- break;
- case State::WAITING_FOR_V4L2_BUFFER:
- ALOG_ASSERT(mState == State::ENCODING);
- break;
- case State::ENCODING:
- ALOG_ASSERT(mState == State::WAITING_FOR_INPUT_FRAME ||
- mState == State::WAITING_FOR_V4L2_BUFFER || mState == State::DRAINING);
- break;
- case State::DRAINING:
- ALOG_ASSERT(mState == State::ENCODING || mState == State::WAITING_FOR_INPUT_FRAME);
- break;
- case State::ERROR:
- break;
- }
-
- ALOGV("Changed encoder state from %s to %s", stateToString(mState), stateToString(state));
- mState = state;
-}
-
-const char* V4L2Encoder::stateToString(State state) {
- switch (state) {
- case State::UNINITIALIZED:
- return "UNINITIALIZED";
- case State::WAITING_FOR_INPUT_FRAME:
- return "WAITING_FOR_INPUT_FRAME";
- case State::WAITING_FOR_V4L2_BUFFER:
- return "WAITING_FOR_V4L2_BUFFER";
- case State::ENCODING:
- return "ENCODING";
- case State::DRAINING:
- return "DRAINING";
- case State::ERROR:
- return "ERROR";
- }
-}
-
-} // namespace android
diff --git a/components/VideoEncoder.cpp b/components/VideoEncoder.cpp
deleted file mode 100644
index e3e19c2..0000000
--- a/components/VideoEncoder.cpp
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2021 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <v4l2_codec2/components/VideoEncoder.h>
-
-namespace android {
-
-VideoEncoder::InputFrame::InputFrame(std::vector<int>&& fds, std::vector<VideoFramePlane>&& planes,
- VideoPixelFormat pixelFormat, uint64_t index,
- int64_t timestamp)
- : mFds(std::move(fds)),
- mPlanes(std::move(planes)),
- mPixelFormat(pixelFormat),
- mIndex(index),
- mTimestamp(timestamp) {}
-
-} // namespace android
diff --git a/components/VideoFrame.cpp b/components/VideoFrame.cpp
index b7481ad..cb5efb7 100644
--- a/components/VideoFrame.cpp
+++ b/components/VideoFrame.cpp
@@ -34,11 +34,11 @@ const std::vector<int>& VideoFrame::getFDs() const {
return mFds;
}
-void VideoFrame::setVisibleRect(const Rect& visibleRect) {
+void VideoFrame::setVisibleRect(const media::Rect& visibleRect) {
mVisibleRect = visibleRect;
}
-const Rect& VideoFrame::getVisibleRect() const {
+const media::Rect& VideoFrame::getVisibleRect() const {
return mVisibleRect;
}
diff --git a/components/VideoFramePool.cpp b/components/VideoFramePool.cpp
index 665ff73..b6bbfab 100644
--- a/components/VideoFramePool.cpp
+++ b/components/VideoFramePool.cpp
@@ -26,15 +26,14 @@ using android::hardware::graphics::common::V1_0::BufferUsage;
namespace android {
// static
-std::optional<uint32_t> VideoFramePool::getBufferIdFromGraphicBlock(C2BlockPool& blockPool,
+std::optional<uint32_t> VideoFramePool::getBufferIdFromGraphicBlock(const C2BlockPool& blockPool,
const C2Block2D& block) {
ALOGV("%s() blockPool.getAllocatorId() = %u", __func__, blockPool.getAllocatorId());
if (blockPool.getAllocatorId() == android::V4L2AllocatorId::V4L2_BUFFERPOOL) {
return C2VdaPooledBlockPool::getBufferIdFromGraphicBlock(block);
} else if (blockPool.getAllocatorId() == C2PlatformAllocatorStore::BUFFERQUEUE) {
- C2VdaBqBlockPool* bqPool = static_cast<C2VdaBqBlockPool*>(&blockPool);
- return bqPool->getBufferIdFromGraphicBlock(block);
+ return C2VdaBqBlockPool::getBufferIdFromGraphicBlock(block);
}
ALOGE("%s(): unknown allocator ID: %u", __func__, blockPool.getAllocatorId());
@@ -42,9 +41,7 @@ std::optional<uint32_t> VideoFramePool::getBufferIdFromGraphicBlock(C2BlockPool&
}
// static
-c2_status_t VideoFramePool::requestNewBufferSet(C2BlockPool& blockPool, int32_t bufferCount,
- const ui::Size& size, uint32_t format,
- C2MemoryUsage usage) {
+c2_status_t VideoFramePool::requestNewBufferSet(C2BlockPool& blockPool, int32_t bufferCount) {
ALOGV("%s() blockPool.getAllocatorId() = %u", __func__, blockPool.getAllocatorId());
if (blockPool.getAllocatorId() == android::V4L2AllocatorId::V4L2_BUFFERPOOL) {
@@ -52,7 +49,7 @@ c2_status_t VideoFramePool::requestNewBufferSet(C2BlockPool& blockPool, int32_t
return bpPool->requestNewBufferSet(bufferCount);
} else if (blockPool.getAllocatorId() == C2PlatformAllocatorStore::BUFFERQUEUE) {
C2VdaBqBlockPool* bqPool = static_cast<C2VdaBqBlockPool*>(&blockPool);
- return bqPool->requestNewBufferSet(bufferCount, size.width, size.height, format, usage);
+ return bqPool->requestNewBufferSet(bufferCount);
}
ALOGE("%s(): unknown allocator ID: %u", __func__, blockPool.getAllocatorId());
@@ -72,40 +69,31 @@ bool VideoFramePool::setNotifyBlockAvailableCb(C2BlockPool& blockPool, ::base::O
// static
std::unique_ptr<VideoFramePool> VideoFramePool::Create(
- std::shared_ptr<C2BlockPool> blockPool, const size_t numBuffers, const ui::Size& size,
+ std::shared_ptr<C2BlockPool> blockPool, const size_t numBuffers, const media::Size& size,
HalPixelFormat pixelFormat, bool isSecure,
scoped_refptr<::base::SequencedTaskRunner> taskRunner) {
ALOG_ASSERT(blockPool != nullptr);
- uint64_t usage = static_cast<uint64_t>(BufferUsage::VIDEO_DECODER);
- if (isSecure) {
- usage |= C2MemoryUsage::READ_PROTECTED;
- } else if (blockPool->getAllocatorId() == android::V4L2AllocatorId::V4L2_BUFFERPOOL) {
- // CPU access to buffers is only required in byte buffer mode.
- usage |= C2MemoryUsage::CPU_READ;
- }
- const C2MemoryUsage memoryUsage(usage);
-
- if (requestNewBufferSet(*blockPool, numBuffers, size, static_cast<uint32_t>(pixelFormat),
- memoryUsage) != C2_OK) {
+ if (requestNewBufferSet(*blockPool, numBuffers) != C2_OK) {
return nullptr;
}
std::unique_ptr<VideoFramePool> pool = ::base::WrapUnique(new VideoFramePool(
- std::move(blockPool), size, pixelFormat, memoryUsage, std::move(taskRunner)));
+ std::move(blockPool), size, pixelFormat, isSecure, std::move(taskRunner)));
if (!pool->initialize()) return nullptr;
return pool;
}
-VideoFramePool::VideoFramePool(std::shared_ptr<C2BlockPool> blockPool, const ui::Size& size,
- HalPixelFormat pixelFormat, C2MemoryUsage memoryUsage,
+VideoFramePool::VideoFramePool(std::shared_ptr<C2BlockPool> blockPool, const media::Size& size,
+ HalPixelFormat pixelFormat, bool isSecure,
scoped_refptr<::base::SequencedTaskRunner> taskRunner)
: mBlockPool(std::move(blockPool)),
mSize(size),
mPixelFormat(pixelFormat),
- mMemoryUsage(memoryUsage),
+ mMemoryUsage(isSecure ? C2MemoryUsage::READ_PROTECTED : C2MemoryUsage::CPU_READ,
+ static_cast<uint64_t>(BufferUsage::VIDEO_DECODER)),
mClientTaskRunner(std::move(taskRunner)) {
- ALOGV("%s(size=%dx%d)", __func__, size.width, size.height);
+ ALOGV("%s(size=%dx%d)", __func__, size.width(), size.height());
ALOG_ASSERT(mClientTaskRunner->RunsTasksInCurrentSequence());
DCHECK(mBlockPool);
DCHECK(mClientTaskRunner);
@@ -180,8 +168,9 @@ void VideoFramePool::getVideoFrameTask() {
static size_t sDelay = kFetchRetryDelayInit;
std::shared_ptr<C2GraphicBlock> block;
- c2_status_t err = mBlockPool->fetchGraphicBlock(
- mSize.width, mSize.height, static_cast<uint32_t>(mPixelFormat), mMemoryUsage, &block);
+ c2_status_t err = mBlockPool->fetchGraphicBlock(mSize.width(), mSize.height(),
+ static_cast<uint32_t>(mPixelFormat),
+ mMemoryUsage, &block);
if (err == C2_TIMED_OUT || err == C2_BLOCKING) {
if (setNotifyBlockAvailableCb(*mBlockPool,
::base::BindOnce(&VideoFramePool::getVideoFrameTaskThunk,
diff --git a/components/include/v4l2_codec2/components/BitstreamBuffer.h b/components/include/v4l2_codec2/components/BitstreamBuffer.h
index d61e4f9..ec8a917 100644
--- a/components/include/v4l2_codec2/components/BitstreamBuffer.h
+++ b/components/include/v4l2_codec2/components/BitstreamBuffer.h
@@ -7,6 +7,8 @@
#include <stdint.h>
+#include <base/files/scoped_file.h>
+
namespace android {
// The BitstreamBuffer class can be used to store encoded video data.
diff --git a/components/include/v4l2_codec2/components/V4L2ComponentFactory.h b/components/include/v4l2_codec2/components/V4L2ComponentFactory.h
deleted file mode 100644
index fc6abea..0000000
--- a/components/include/v4l2_codec2/components/V4L2ComponentFactory.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2021 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_COMPONENT_FACTORY_H
-#define ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_COMPONENT_FACTORY_H
-
-#include <memory>
-#include <string>
-
-#include <C2ComponentFactory.h>
-#include <util/C2InterfaceHelper.h>
-
-namespace android {
-
-class V4L2ComponentFactory : public C2ComponentFactory {
-public:
- static std::unique_ptr<V4L2ComponentFactory> create(
- const std::string& componentName, std::shared_ptr<C2ReflectorHelper> reflector);
- V4L2ComponentFactory(const std::string& componentName, bool isEncoder,
- std::shared_ptr<C2ReflectorHelper> reflector);
- ~V4L2ComponentFactory() override = default;
-
- // Implementation of C2ComponentFactory.
- c2_status_t createComponent(c2_node_id_t id, std::shared_ptr<C2Component>* const component,
- ComponentDeleter deleter) override;
- c2_status_t createInterface(c2_node_id_t id,
- std::shared_ptr<C2ComponentInterface>* const interface,
- InterfaceDeleter deleter) override;
-
-private:
- const std::string mComponentName;
- const bool mIsEncoder;
- std::shared_ptr<C2ReflectorHelper> mReflector;
-};
-
-} // namespace android
-
-#endif // ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_COMPONENT_FACTORY_H
diff --git a/components/include/v4l2_codec2/components/V4L2DecodeComponent.h b/components/include/v4l2_codec2/components/V4L2DecodeComponent.h
index 1e98118..37da866 100644
--- a/components/include/v4l2_codec2/components/V4L2DecodeComponent.h
+++ b/components/include/v4l2_codec2/components/V4L2DecodeComponent.h
@@ -5,7 +5,6 @@
#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODE_COMPONENT_H
#define ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODE_COMPONENT_H
-#include <atomic>
#include <memory>
#include <C2Component.h>
@@ -20,6 +19,7 @@
#include <v4l2_codec2/components/V4L2DecodeInterface.h>
#include <v4l2_codec2/components/VideoDecoder.h>
#include <v4l2_codec2/components/VideoFramePool.h>
+#include <v4l2_device.h>
namespace android {
@@ -59,9 +59,9 @@ private:
static const char* ComponentStateToString(ComponentState state);
// Handle C2Component's public methods on |mDecoderTaskRunner|.
- void startTask(c2_status_t* status, ::base::WaitableEvent* done);
+ void destroyTask();
+ void startTask(c2_status_t* status);
void stopTask();
- void releaseTask();
void queueTask(std::unique_ptr<C2Work> work);
void flushTask();
void drainTask();
@@ -70,9 +70,8 @@ private:
// Try to process pending works at |mPendingWorks|. Paused when |mIsDraining| is set.
void pumpPendingWorks();
// Get the buffer pool.
- std::unique_ptr<VideoFramePool> getVideoFramePool(const ui::Size& size,
- HalPixelFormat pixelFormat,
- size_t numBuffers);
+ void getVideoFramePool(std::unique_ptr<VideoFramePool>* pool, const media::Size& size,
+ HalPixelFormat pixelFormat, size_t numBuffers);
// Detect and report works with no-show frame, only used at VP8 and VP9.
void detectNoShowFrameWorksAndReportIfFinished(const C2WorkOrdinalStruct& currOrdinal);
@@ -92,8 +91,6 @@ private:
// Report error when any error occurs.
void reportError(c2_status_t error);
- static std::atomic<int32_t> sConcurrentInstances;
-
// The pointer of component interface implementation.
std::shared_ptr<V4L2DecodeInterface> mIntfImpl;
// The pointer of component interface.
@@ -122,6 +119,7 @@ private:
// The mutex lock to synchronize start/stop/reset/release calls.
std::mutex mStartStopLock;
+ ::base::WaitableEvent mStartStopDone;
// The color aspects parameter for current decoded output buffers.
std::shared_ptr<C2StreamColorAspectsInfo::output> mCurrentColorAspects;
@@ -140,9 +138,6 @@ private:
::base::Thread mDecoderThread{"V4L2DecodeComponentDecoderThread"};
scoped_refptr<::base::SequencedTaskRunner> mDecoderTaskRunner;
- // Hold a weak_ptr of |*this| when |mDecoderThread| is running.
- std::weak_ptr<V4L2DecodeComponent> mStdWeakThis;
-
::base::WeakPtrFactory<V4L2DecodeComponent> mWeakThisFactory{this};
::base::WeakPtr<V4L2DecodeComponent> mWeakThis;
};
diff --git a/components/include/v4l2_codec2/components/V4L2DecodeInterface.h b/components/include/v4l2_codec2/components/V4L2DecodeInterface.h
index f2ab898..b57f6c1 100644
--- a/components/include/v4l2_codec2/components/V4L2DecodeInterface.h
+++ b/components/include/v4l2_codec2/components/V4L2DecodeInterface.h
@@ -9,9 +9,9 @@
#include <string>
#include <C2Config.h>
-#include <ui/Size.h>
#include <util/C2InterfaceHelper.h>
+#include <size.h>
#include <v4l2_codec2/common/VideoTypes.h>
namespace android {
@@ -27,8 +27,8 @@ public:
c2_status_t status() const { return mInitStatus; }
C2BlockPool::local_id_t getBlockPoolId() const { return mOutputBlockPoolIds->m.values[0]; }
std::optional<VideoCodec> getVideoCodec() const { return mVideoCodec; }
-
- static uint32_t getOutputDelay(VideoCodec codec);
+ media::Size getMaxSize() const { return mMaxSize; }
+ media::Size getMinSize() const { return mMinSize; }
size_t getInputBufferSize() const;
c2_status_t queryColorAspects(
@@ -50,8 +50,6 @@ private:
const C2P<C2StreamColorAspectsTuning::output>& def,
const C2P<C2StreamColorAspectsInfo::input>& coded);
- // The kind of the component; should be C2Component::KIND_DECODER.
- std::shared_ptr<C2ComponentKindSetting> mKind;
// The input format kind; should be C2FormatCompressed.
std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
// The memory usage flag of input buffer; should be BufferUsage::VIDEO_DECODER.
@@ -96,6 +94,8 @@ private:
c2_status_t mInitStatus;
std::optional<VideoCodec> mVideoCodec;
+ media::Size mMinSize;
+ media::Size mMaxSize;
};
} // namespace android
diff --git a/components/include/v4l2_codec2/components/V4L2Decoder.h b/components/include/v4l2_codec2/components/V4L2Decoder.h
index b65bd49..bdddc7f 100644
--- a/components/include/v4l2_codec2/components/V4L2Decoder.h
+++ b/components/include/v4l2_codec2/components/V4L2Decoder.h
@@ -13,13 +13,13 @@
#include <base/callback.h>
#include <base/memory/weak_ptr.h>
-#include <ui/Rect.h>
-#include <ui/Size.h>
-#include <v4l2_codec2/common/V4L2Device.h>
+#include <rect.h>
+#include <size.h>
#include <v4l2_codec2/common/VideoTypes.h>
#include <v4l2_codec2/components/VideoDecoder.h>
#include <v4l2_codec2/components/VideoFrame.h>
#include <v4l2_codec2/components/VideoFramePool.h>
+#include <v4l2_device.h>
namespace android {
@@ -49,6 +49,7 @@ private:
: buffer(std::move(buffer)), decodeCb(std::move(decodeCb)) {}
DecodeRequest(DecodeRequest&&) = default;
~DecodeRequest() = default;
+ DecodeRequest& operator=(DecodeRequest&&);
std::unique_ptr<BitstreamBuffer> buffer; // nullptr means Drain
DecodeCB decodeCb;
@@ -63,14 +64,13 @@ private:
void serviceDeviceTask(bool event);
bool dequeueResolutionChangeEvent();
bool changeResolution();
- bool setupOutputFormat(const ui::Size& size);
void tryFetchVideoFrame();
void onVideoFrameReady(std::optional<VideoFramePool::FrameWithBlockId> frameWithBlockId);
std::optional<size_t> getNumOutputBuffers();
std::optional<struct v4l2_format> getFormatInfo();
- Rect getVisibleRect(const ui::Size& codedSize);
+ media::Rect getVisibleRect(const media::Size& codedSize);
bool sendV4L2DecoderCmd(bool start);
void setState(State newState);
@@ -78,9 +78,9 @@ private:
std::unique_ptr<VideoFramePool> mVideoFramePool;
- scoped_refptr<V4L2Device> mDevice;
- scoped_refptr<V4L2Queue> mInputQueue;
- scoped_refptr<V4L2Queue> mOutputQueue;
+ scoped_refptr<media::V4L2Device> mDevice;
+ scoped_refptr<media::V4L2Queue> mInputQueue;
+ scoped_refptr<media::V4L2Queue> mOutputQueue;
std::queue<DecodeRequest> mDecodeRequests;
std::map<int32_t, DecodeCB> mPendingDecodeCbs;
@@ -90,8 +90,8 @@ private:
DecodeCB mDrainCb;
ErrorCB mErrorCb;
- ui::Size mCodedSize;
- Rect mVisibleRect;
+ media::Size mCodedSize;
+ media::Rect mVisibleRect;
std::map<size_t, std::unique_ptr<VideoFrame>> mFrameAtDevice;
diff --git a/components/include/v4l2_codec2/components/V4L2EncodeComponent.h b/components/include/v4l2_codec2/components/V4L2EncodeComponent.h
index 4665ffa..4a61e05 100644
--- a/components/include/v4l2_codec2/components/V4L2EncodeComponent.h
+++ b/components/include/v4l2_codec2/components/V4L2EncodeComponent.h
@@ -8,7 +8,6 @@
#include <atomic>
#include <memory>
#include <optional>
-#include <unordered_map>
#include <C2Component.h>
#include <C2ComponentFactory.h>
@@ -23,12 +22,20 @@
#include <base/threading/thread.h>
#include <util/C2InterfaceHelper.h>
+#include <size.h>
+#include <v4l2_codec2/common/FormatConverter.h>
+#include <v4l2_codec2/components/V4L2EncodeInterface.h>
+#include <video_frame_layout.h>
+
+namespace media {
+class V4L2Device;
+class V4L2ReadableBuffer;
+class V4L2Queue;
+} // namespace media
+
namespace android {
-struct BitstreamBuffer;
-class FormatConverter;
-class VideoEncoder;
-class V4L2EncodeInterface;
+struct VideoFramePlane;
class V4L2EncodeComponent : public C2Component,
public std::enable_shared_from_this<V4L2EncodeComponent> {
@@ -55,6 +62,19 @@ public:
std::shared_ptr<C2ComponentInterface> intf() override;
private:
+ class InputFrame {
+ public:
+ // Create an input frame from a C2GraphicBlock.
+ static std::unique_ptr<InputFrame> Create(const C2ConstGraphicBlock& block);
+ ~InputFrame() = default;
+
+ const std::vector<int>& getFDs() const { return mFds; }
+
+ private:
+ InputFrame(std::vector<int> fds) : mFds(std::move(fds)) {}
+ const std::vector<int> mFds;
+ };
+
// Possible component states.
enum class ComponentState {
UNLOADED, // Initial state of component.
@@ -63,6 +83,16 @@ private:
ERROR, // An error occurred.
};
+ // Possible encoder states.
+ enum class EncoderState {
+ UNINITIALIZED, // Not initialized yet or initialization failed.
+ WAITING_FOR_INPUT, // Waiting for work to be queued.
+ WAITING_FOR_INPUT_BUFFERS, // Waiting for V4L2 input queue buffers.
+ ENCODING, // Queuing input buffers.
+ DRAINING, // Flushing encoder.
+ ERROR, // encoder encountered an error.
+ };
+
V4L2EncodeComponent(C2String name, c2_node_id_t id,
std::shared_ptr<V4L2EncodeInterface> interface);
@@ -79,7 +109,7 @@ private:
// scheduled work and mark the last item as EOS, before processing any new work.
void drainTask(drain_mode_t drainMode);
// Called on the encoder thread when a drain is completed.
- void onDrainDone(bool success);
+ void onDrainDone(bool done);
// Flush all currently scheduled work on the encoder thread. The encoder will abort all
// scheduled work items, work that can be immediately aborted will be placed in |flushedWork|.
void flushTask(::base::WaitableEvent* done,
@@ -89,6 +119,13 @@ private:
// Initialize the V4L2 device for encoding with the requested configuration.
bool initializeEncoder();
+ // Configure input format on the V4L2 device.
+ bool configureInputFormat(media::VideoPixelFormat inputFormat);
+ // Configure output format on the V4L2 device.
+ bool configureOutputFormat(media::VideoCodecProfile outputProfile);
+ // Configure required and optional controls on the V4L2 device.
+ bool configureDevice(media::VideoCodecProfile outputProfile,
+ std::optional<const uint8_t> outputH264Level);
// Update the |mBitrate| and |mFramerate| currently configured on the V4L2 device, to match the
// values requested by the codec 2.0 framework.
bool updateEncodingParameters();
@@ -97,17 +134,19 @@ private:
void scheduleNextEncodeTask();
// Encode the specified |block| with corresponding |index| and |timestamp|.
bool encode(C2ConstGraphicBlock block, uint64_t index, int64_t timestamp);
+ // Drain the encoder.
+ void drain();
// Flush the encoder.
void flush();
- // Fetch a new output buffer from the output block pool with specified |size|.
- void fetchOutputBlock(uint32_t size, std::unique_ptr<BitstreamBuffer>* buffer);
+ // Fetch a new output buffer from the output block pool.
+ std::shared_ptr<C2LinearBlock> fetchOutputBlock();
// Called on the encoder thread when the encoder is done using an input buffer.
void onInputBufferDone(uint64_t index);
// Called on the encoder thread when an output buffer is ready.
- void onOutputBufferDone(size_t dataSize, int64_t timestamp, bool keyFrame,
- std::unique_ptr<BitstreamBuffer> buffer);
+ void onOutputBufferDone(uint32_t payloadSize, bool keyFrame, int64_t timestamp,
+ std::shared_ptr<C2LinearBlock> outputBlock);
// Helper function to find a work item in the output work queue by index.
C2Work* getWorkByIndex(uint64_t index);
@@ -118,22 +157,52 @@ private:
// Notify the listener the specified |work| item is finished.
void reportWork(std::unique_ptr<C2Work> work);
- // Configure the c2 block pool that will be used to create output buffers.
- bool getBlockPool();
+ // Attempt to start the V4L2 device poller.
+ bool startDevicePoll();
+ // Attempt to stop the V4L2 device poller.
+ bool stopDevicePoll();
+ // Called by the V4L2 device poller on the |mEncoderTaskRunner| whenever an error occurred.
+ void onPollError();
+ // Service I/O on the V4L2 device, called by the V4L2 device poller on the |mEncoderTaskRunner|.
+ void serviceDeviceTask(bool event);
+
+ // Enqueue an input buffer to be encoded on the device input queue. Returns whether the
+ // operation was successful.
+ bool enqueueInputBuffer(std::unique_ptr<InputFrame> frame, media::VideoPixelFormat format,
+ const std::vector<VideoFramePlane>& planes, int64_t index,
+ int64_t timestamp);
+ // Enqueue an output buffer to store the encoded bitstream on the device output queue. Returns
+ // whether the operation was successful.
+ bool enqueueOutputBuffer();
+ // Dequeue an input buffer the V4L2 device has finished encoding on the device input queue.
+ // Returns whether a buffer could be dequeued.
+ bool dequeueInputBuffer();
+ // Dequeue an output buffer containing the encoded bitstream from the device output queue. The
+ // bitstream is copied into another buffer that is sent to the client, after which the output
+ // buffer is returned to the queue. Returns whether the operation was successful.
+ bool dequeueOutputBuffer();
+
+ // Create input buffers on the V4L2 device input queue.
+ bool createInputBuffers();
+ // Create output buffers on the V4L2 device output queue.
+ bool createOutputBuffers();
+ // Destroy the input buffers on the V4L2 device input queue.
+ void destroyInputBuffers();
+ // Destroy the output buffers on the V4L2 device output queue.
+ void destroyOutputBuffers();
// Notify the client an error occurred and switch to the error state.
void reportError(c2_status_t error);
// Change the state of the component.
void setComponentState(ComponentState state);
+ // Change the state of the encoder, only called on the encoder thread.
+ void setEncoderState(EncoderState state);
// Get the specified component |state| as string.
static const char* componentStateToString(ComponentState state);
+ // Get the specified encoder |state| as string.
+ static const char* encoderStateToString(EncoderState state);
- // The underlying V4L2 encoder.
- std::unique_ptr<VideoEncoder> mEncoder;
-
- // The number of concurrent encoder instances currently created.
- static std::atomic<int32_t> sConcurrentInstances;
// The component's registered name.
const C2String mName;
// The component's id, provided by the C2 framework upon initialization.
@@ -148,30 +217,53 @@ private:
// The component's listener to be notified when events occur, only accessed on encoder thread.
std::shared_ptr<Listener> mListener;
- // The queue of encode work items waiting for free buffers in the input convertor.
- std::queue<std::unique_ptr<C2Work>> mInputConverterQueue;
+ // The V4L2 device used to interact with the driver, only accessed on encoder thread.
+ scoped_refptr<media::V4L2Device> mDevice;
+ scoped_refptr<media::V4L2Queue> mInputQueue;
+ scoped_refptr<media::V4L2Queue> mOutputQueue;
+
+ // The video stream's visible size.
+ media::Size mVisibleSize;
+ // The video stream's coded size.
+ media::Size mInputCodedSize;
+ // The input layout configured on the V4L2 device.
+ std::optional<media::VideoFrameLayout> mInputLayout;
// An input format convertor will be used if the device doesn't support the video's format.
std::unique_ptr<FormatConverter> mInputFormatConverter;
+ // Required output buffer byte size.
+ uint32_t mOutputBufferSize = 0;
// The bitrate currently configured on the v4l2 device.
uint32_t mBitrate = 0;
// The framerate currently configured on the v4l2 device.
uint32_t mFramerate = 0;
+ // How often we want to request the V4L2 device to create a key frame.
+ uint32_t mKeyFramePeriod = 0;
+ // Key frame counter, a key frame will be requested each time it reaches zero.
+ uint32_t mKeyFrameCounter = 0;
+
// Whether we extracted and submitted CSD (codec-specific data, e.g. H.264 SPS) to the framework.
bool mCSDSubmitted = false;
+ // The queue of encode work items to be processed.
+ std::queue<std::unique_ptr<C2Work>> mInputWorkQueue;
// The queue of encode work items currently being processed.
- std::deque<std::unique_ptr<C2Work>> mWorkQueue;
+ std::deque<std::unique_ptr<C2Work>> mOutputWorkQueue;
- // Map of buffer ids and associated C2LinearBlock buffers. The buffer's fds are used as id.
- std::unordered_map<int32_t, std::shared_ptr<C2LinearBlock>> mOutputBuffersMap;
+ // List of work item indices and frames associated with each buffer in the device input queue.
+ std::vector<std::pair<int64_t, std::unique_ptr<InputFrame>>> mInputBuffersMap;
+ // Map of buffer indices and output blocks associated with each buffer in the output queue. This
+ // map keeps the C2LinearBlock buffers alive so we can avoid duplicated fds.
+ std::vector<std::shared_ptr<C2LinearBlock>> mOutputBuffersMap;
// The output block pool.
std::shared_ptr<C2BlockPool> mOutputBlockPool;
// The component state, accessible from any thread as C2Component interface is not thread-safe.
std::atomic<ComponentState> mComponentState;
+ // The current state of the encoder, only accessed on the encoder thread.
+ EncoderState mEncoderState = EncoderState::UNINITIALIZED;
// The encoder thread on which all interaction with the V4L2 device is performed.
::base::Thread mEncoderThread{"V4L2EncodeComponentThread"};
diff --git a/components/include/v4l2_codec2/components/V4L2EncodeInterface.h b/components/include/v4l2_codec2/components/V4L2EncodeInterface.h
index 2efbfcc..f480d25 100644
--- a/components/include/v4l2_codec2/components/V4L2EncodeInterface.h
+++ b/components/include/v4l2_codec2/components/V4L2EncodeInterface.h
@@ -11,10 +11,11 @@
#include <C2.h>
#include <C2Buffer.h>
#include <C2Config.h>
-#include <ui/Size.h>
#include <util/C2InterfaceHelper.h>
+#include <size.h>
#include <v4l2_codec2/common/EncodeHelpers.h>
+#include <video_codecs.h>
namespace media {
class V4L2Device;
@@ -32,11 +33,10 @@ public:
// Note: these getters are not thread-safe. For dynamic parameters, component should use
// formal query API for C2ComponentInterface instead.
c2_status_t status() const { return mInitStatus; }
- const char* getOutputMediaType() const { return mOutputMediaType->m.value; }
C2Config::profile_t getOutputProfile() const { return mProfileLevel->profile; }
C2Config::level_t getOutputLevel() const { return mProfileLevel->level; }
- const ui::Size getInputVisibleSize() const {
- return ui::Size(mInputVisibleSize->width, mInputVisibleSize->height);
+ const media::Size getInputVisibleSize() const {
+ return media::Size(mInputVisibleSize->width, mInputVisibleSize->height);
}
C2BlockPool::local_id_t getBlockPoolId() const { return mOutputBlockPoolIds->m.values[0]; }
// Get sync key-frame period in frames.
@@ -46,14 +46,10 @@ protected:
void Initialize(const C2String& name);
// Configurable parameter setters.
- static C2R H264ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::output>& info,
- const C2P<C2StreamPictureSizeInfo::input>& videosize,
- const C2P<C2StreamFrameRateInfo::output>& frameRate,
- const C2P<C2StreamBitrateInfo::output>& bitrate);
- static C2R VP9ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::output>& info,
- const C2P<C2StreamPictureSizeInfo::input>& videosize,
- const C2P<C2StreamFrameRateInfo::output>& frameRate,
- const C2P<C2StreamBitrateInfo::output>& bitrate);
+ static C2R ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::output>& info,
+ const C2P<C2StreamPictureSizeInfo::input>& videosize,
+ const C2P<C2StreamFrameRateInfo::output>& frameRate,
+ const C2P<C2StreamBitrateInfo::output>& bitrate);
static C2R SizeSetter(bool mayBlock, C2P<C2StreamPictureSizeInfo::input>& videoSize);
@@ -62,8 +58,6 @@ protected:
// Constant parameters
- // The kind of the component; should be C2Component::KIND_ENCODER.
- std::shared_ptr<C2ComponentKindSetting> mKind;
// The input format kind; should be C2FormatVideo.
std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
// The memory usage flag of input buffer; should be BufferUsage::VIDEO_ENCODER.
diff --git a/components/include/v4l2_codec2/components/V4L2Encoder.h b/components/include/v4l2_codec2/components/V4L2Encoder.h
deleted file mode 100644
index 5abee8f..0000000
--- a/components/include/v4l2_codec2/components/V4L2Encoder.h
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2021 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_ENCODER_H
-#define ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_ENCODER_H
-
-#include <stdint.h>
-#include <memory>
-#include <optional>
-#include <queue>
-#include <vector>
-
-#include <base/memory/weak_ptr.h>
-#include <base/sequenced_task_runner.h>
-#include <ui/Size.h>
-
-#include <v4l2_codec2/common/Common.h>
-#include <v4l2_codec2/components/VideoEncoder.h>
-
-namespace android {
-
-struct BitstreamBuffer;
-struct VideoFramePlane;
-class V4L2Device;
-class V4L2Queue;
-
-class V4L2Encoder : public VideoEncoder {
-public:
- // Number of buffers on V4L2 device queues.
- static constexpr size_t kInputBufferCount = 2;
- static constexpr size_t kOutputBufferCount = 2;
-
- static std::unique_ptr<VideoEncoder> create(
- C2Config::profile_t profile, std::optional<uint8_t> level, const ui::Size& visibleSize,
- uint32_t stride, uint32_t keyFramePeriod, FetchOutputBufferCB fetchOutputBufferCb,
- InputBufferDoneCB inputBufferDoneCb, OutputBufferDoneCB outputBufferDoneCb,
- DrainDoneCB drainDoneCb, ErrorCB errorCb,
- scoped_refptr<::base::SequencedTaskRunner> taskRunner);
- ~V4L2Encoder() override;
-
- bool encode(std::unique_ptr<InputFrame> frame) override;
- void drain() override;
- void flush() override;
-
- bool setBitrate(uint32_t bitrate) override;
- bool setFramerate(uint32_t framerate) override;
- void requestKeyframe() override;
-
- VideoPixelFormat inputFormat() const override;
- const ui::Size& visibleSize() const override { return mVisibleSize; }
- const ui::Size& codedSize() const override { return mInputCodedSize; }
-
-private:
- // Possible encoder states.
- enum class State {
- UNINITIALIZED, // Not initialized yet or initialization failed.
- WAITING_FOR_INPUT_FRAME, // Waiting for frames to be queued.
- WAITING_FOR_V4L2_BUFFER, // Waiting for V4L2 input queue buffers.
- ENCODING, // Queuing input buffers.
- DRAINING, // Draining encoder.
- ERROR, // Encoder encountered an error.
- };
-
- // Contains a single encode request.
- struct EncodeRequest {
- EncodeRequest(std::unique_ptr<InputFrame> video_frame)
- : video_frame(std::move(video_frame)) {}
- ~EncodeRequest() = default;
- EncodeRequest(EncodeRequest&&) = default;
- EncodeRequest& operator=(EncodeRequest&&) = default;
-
- std::unique_ptr<InputFrame> video_frame;
- bool end_of_stream = false;
- };
-
- V4L2Encoder(scoped_refptr<::base::SequencedTaskRunner> taskRunner,
- FetchOutputBufferCB fetchOutputBufferCb, InputBufferDoneCB mInputBufferDoneCb,
- OutputBufferDoneCB mOutputBufferDoneCb, DrainDoneCB drainDoneCb, ErrorCB errorCb);
-
- // Initialize the V4L2 encoder for specified parameters.
- bool initialize(C2Config::profile_t outputProfile, std::optional<uint8_t> level,
- const ui::Size& visibleSize, uint32_t stride, uint32_t keyFramePeriod);
-
- // Handle the next encode request on the queue.
- void handleEncodeRequest();
- // Handle a request to flush the encoder.
- void handleFlushRequest();
- // Handle a request to drain the encoder.
- void handleDrainRequest();
- // Called when draining the encoder has completed.
- void onDrainDone(bool done);
-
- // Configure input format on the V4L2 device.
- bool configureInputFormat(VideoPixelFormat inputFormat, uint32_t stride);
- // Configure output format on the V4L2 device.
- bool configureOutputFormat(C2Config::profile_t outputProfile);
- // Configure required and optional controls on the V4L2 device.
- bool configureDevice(C2Config::profile_t outputProfile,
- std::optional<const uint8_t> outputH264Level);
- // Configure required and optional H.264 controls on the V4L2 device.
- bool configureH264(C2Config::profile_t outputProfile,
- std::optional<const uint8_t> outputH264Level);
-
- // Attempt to start the V4L2 device poller.
- bool startDevicePoll();
- // Attempt to stop the V4L2 device poller.
- bool stopDevicePoll();
- // Called by the V4L2 device poller whenever an error occurred.
- void onPollError();
- // Service I/O on the V4L2 device, called by the V4L2 device poller.
- void serviceDeviceTask(bool event);
-
- // Enqueue an input buffer to be encoded on the device input queue. Returns whether the
- // operation was successful.
- bool enqueueInputBuffer(std::unique_ptr<InputFrame> frame);
- // Enqueue an output buffer to store the encoded bitstream on the device output queue. Returns
- // whether the operation was successful.
- bool enqueueOutputBuffer();
- // Dequeue an input buffer the V4L2 device has finished encoding on the device input queue.
- // Returns whether a buffer could be dequeued.
- bool dequeueInputBuffer();
- // Dequeue an output buffer containing the encoded bitstream from the device output queue.
- // Returns whether the operation was successful.
- bool dequeueOutputBuffer();
-
- // Create input buffers on the V4L2 device input queue.
- bool createInputBuffers();
- // Create output buffers on the V4L2 device output queue.
- bool createOutputBuffers();
- // Destroy the input buffers on the V4L2 device input queue.
- void destroyInputBuffers();
- // Destroy the output buffers on the V4L2 device output queue.
- void destroyOutputBuffers();
-
- // Notify the client an error occurred and switch to the error state.
- void onError();
-
- // Change the state of the encoder.
- void setState(State state);
- // Get the specified encoder |state| as string.
- static const char* stateToString(State state);
-
- // The list of currently queued encode requests.
- std::queue<EncodeRequest> mEncodeRequests;
-
- // The video stream's visible size.
- ui::Size mVisibleSize;
- // The video stream's coded size.
- ui::Size mInputCodedSize;
- // The input layout configured on the V4L2 device.
- std::optional<VideoFrameLayout> mInputLayout;
- // Required output buffer byte size.
- uint32_t mOutputBufferSize = 0;
-
- // How often we want to request the V4L2 device to create a key frame.
- uint32_t mKeyFramePeriod = 0;
- // Key frame counter, a key frame will be requested each time it reaches zero.
- uint32_t mKeyFrameCounter = 0;
-
- // The V4L2 device and associated queues used to interact with the device.
- scoped_refptr<V4L2Device> mDevice;
- scoped_refptr<V4L2Queue> mInputQueue;
- scoped_refptr<V4L2Queue> mOutputQueue;
-
- // List of frames associated with each buffer in the V4L2 device input queue.
- std::vector<std::unique_ptr<InputFrame>> mInputBuffers;
- // List of bitstream buffers associated with each buffer in the V4L2 device output queue.
- std::vector<std::unique_ptr<BitstreamBuffer>> mOutputBuffers;
-
- // Callbacks to be triggered on various events.
- FetchOutputBufferCB mFetchOutputBufferCb;
- InputBufferDoneCB mInputBufferDoneCb;
- OutputBufferDoneCB mOutputBufferDoneCb;
- DrainDoneCB mDrainDoneCb;
- ErrorCB mErrorCb;
-
- // The current state of the encoder.
- State mState = State::UNINITIALIZED;
-
- scoped_refptr<::base::SequencedTaskRunner> mTaskRunner;
-
- ::base::WeakPtr<V4L2Encoder> mWeakThis;
- ::base::WeakPtrFactory<V4L2Encoder> mWeakThisFactory{this};
-};
-
-} // namespace android
-
-#endif // ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_ENCODER_H
diff --git a/components/include/v4l2_codec2/components/VideoDecoder.h b/components/include/v4l2_codec2/components/VideoDecoder.h
index 9a48562..c737c65 100644
--- a/components/include/v4l2_codec2/components/VideoDecoder.h
+++ b/components/include/v4l2_codec2/components/VideoDecoder.h
@@ -26,8 +26,9 @@ public:
};
static const char* DecodeStatusToString(DecodeStatus status);
- using GetPoolCB = base::RepeatingCallback<std::unique_ptr<VideoFramePool>(
- const ui::Size& size, HalPixelFormat pixelFormat, size_t numOutputBuffers)>;
+ using GetPoolCB =
+ base::RepeatingCallback<void(std::unique_ptr<VideoFramePool>*, const media::Size& size,
+ HalPixelFormat pixelFormat, size_t numOutputBuffers)>;
using DecodeCB = base::OnceCallback<void(DecodeStatus)>;
using OutputCB = base::RepeatingCallback<void(std::unique_ptr<VideoFrame>)>;
using ErrorCB = base::RepeatingCallback<void()>;
diff --git a/components/include/v4l2_codec2/components/VideoEncoder.h b/components/include/v4l2_codec2/components/VideoEncoder.h
deleted file mode 100644
index 46bcad1..0000000
--- a/components/include/v4l2_codec2/components/VideoEncoder.h
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2021 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_VIDEO_ENCODER_H
-#define ANDROID_V4L2_CODEC2_COMPONENTS_VIDEO_ENCODER_H
-
-#include <stdint.h>
-#include <memory>
-#include <vector>
-
-#include <base/callback.h>
-#include <ui/Size.h>
-
-#include <v4l2_codec2/common/Common.h>
-#include <v4l2_codec2/common/VideoPixelFormat.h>
-#include <v4l2_codec2/common/VideoTypes.h>
-
-namespace android {
-
-struct BitstreamBuffer;
-
-class VideoEncoder {
-public:
- // The InputFrame class can be used to store raw video frames.
- // Note: The InputFrame does not take ownership of the data. The file descriptor is not
- // duplicated and the caller is responsible for keeping the data alive until the buffer
- // is returned by an InputBufferDoneCB() call.
- class InputFrame {
- public:
- InputFrame(std::vector<int>&& fds, std::vector<VideoFramePlane>&& planes,
- VideoPixelFormat pixelFormat, uint64_t index, int64_t timestamp);
- ~InputFrame() = default;
-
- const std::vector<int>& fds() const { return mFds; }
- const std::vector<VideoFramePlane>& planes() const { return mPlanes; }
- VideoPixelFormat pixelFormat() const { return mPixelFormat; }
- uint64_t index() const { return mIndex; }
- int64_t timestamp() const { return mTimestamp; }
-
- private:
- const std::vector<int> mFds;
- std::vector<VideoFramePlane> mPlanes;
- VideoPixelFormat mPixelFormat;
- uint64_t mIndex = 0;
- int64_t mTimestamp = 0;
- };
-
- using FetchOutputBufferCB =
- base::RepeatingCallback<void(uint32_t, std::unique_ptr<BitstreamBuffer>* buffer)>;
- // TODO(dstaessens): Change callbacks to OnceCallback provided when requesting encode/drain.
- using InputBufferDoneCB = base::RepeatingCallback<void(uint64_t)>;
- using OutputBufferDoneCB = base::RepeatingCallback<void(
- size_t, int64_t, bool, std::unique_ptr<BitstreamBuffer> buffer)>;
- using DrainDoneCB = base::RepeatingCallback<void(bool)>;
- using ErrorCB = base::RepeatingCallback<void()>;
-
- virtual ~VideoEncoder() = default;
-
- // Encode the frame, |InputBufferDoneCB| and |OutputBufferDoneCB| will be called when done.
- virtual bool encode(std::unique_ptr<InputFrame> buffer) = 0;
- // Drain the encoder, |mDrainDoneCb| will be called when done.
- virtual void drain() = 0;
- // Flush the encoder, pending drain operations will be aborted.
- virtual void flush() = 0;
-
- // Set the bitrate to the specified value, will affect all non-processed frames.
- virtual bool setBitrate(uint32_t bitrate) = 0;
- // Set the framerate to the specified value, will affect all non-processed frames.
- virtual bool setFramerate(uint32_t framerate) = 0;
- // Request the next frame encoded to be a key frame, will affect the next non-processed frame.
- virtual void requestKeyframe() = 0;
-
- virtual VideoPixelFormat inputFormat() const = 0;
- virtual const ui::Size& visibleSize() const = 0;
- virtual const ui::Size& codedSize() const = 0;
-};
-
-} // namespace android
-
-#endif // ANDROID_V4L2_CODEC2_COMPONENTS_VIDEO_ENCODER_H
diff --git a/components/include/v4l2_codec2/components/VideoFrame.h b/components/include/v4l2_codec2/components/VideoFrame.h
index b5d7b99..395a52b 100644
--- a/components/include/v4l2_codec2/components/VideoFrame.h
+++ b/components/include/v4l2_codec2/components/VideoFrame.h
@@ -10,11 +10,11 @@
#include <C2Buffer.h>
-#include <ui/Rect.h>
+#include <rect.h>
namespace android {
-// Wrap C2GraphicBlock and provide essential information from C2GraphicBlock.
+// Wrap C2GraphicBlock and provide essiential information from C2GraphicBlock.
class VideoFrame {
public:
// Create the instance from C2GraphicBlock. return nullptr if any error occurs.
@@ -25,8 +25,8 @@ public:
const std::vector<int>& getFDs() const;
// Getter and setter of the visible rectangle.
- void setVisibleRect(const Rect& visibleRect);
- const Rect& getVisibleRect() const;
+ void setVisibleRect(const media::Rect& visibleRect);
+ const media::Rect& getVisibleRect() const;
// Getter and setter of the bitstream ID of the corresponding input bitstream.
void setBitstreamId(int32_t bitstreamId);
@@ -40,7 +40,7 @@ private:
std::shared_ptr<C2GraphicBlock> mGraphicBlock;
std::vector<int> mFds;
- Rect mVisibleRect;
+ media::Rect mVisibleRect;
int32_t mBitstreamId = -1;
};
diff --git a/components/include/v4l2_codec2/components/VideoFramePool.h b/components/include/v4l2_codec2/components/VideoFramePool.h
index 2978eed..71bfe27 100644
--- a/components/include/v4l2_codec2/components/VideoFramePool.h
+++ b/components/include/v4l2_codec2/components/VideoFramePool.h
@@ -15,8 +15,8 @@
#include <base/memory/weak_ptr.h>
#include <base/sequenced_task_runner.h>
#include <base/threading/thread.h>
-#include <ui/Size.h>
+#include <size.h>
#include <v4l2_codec2/common/VideoTypes.h>
#include <v4l2_codec2/components/VideoFrame.h>
@@ -31,8 +31,8 @@ public:
using GetVideoFrameCB = ::base::OnceCallback<void(std::optional<FrameWithBlockId>)>;
static std::unique_ptr<VideoFramePool> Create(
- std::shared_ptr<C2BlockPool> blockPool, const size_t numBuffers, const ui::Size& size,
- HalPixelFormat pixelFormat, bool isSecure,
+ std::shared_ptr<C2BlockPool> blockPool, const size_t numBuffers,
+ const media::Size& size, HalPixelFormat pixelFormat, bool isSecure,
scoped_refptr<::base::SequencedTaskRunner> taskRunner);
~VideoFramePool();
@@ -48,8 +48,8 @@ private:
// |pixelFormat| is the pixel format of the required graphic blocks.
// |isSecure| indicates the video stream is encrypted or not.
// All public methods and the callbacks should be run on |taskRunner|.
- VideoFramePool(std::shared_ptr<C2BlockPool> blockPool, const ui::Size& size,
- HalPixelFormat pixelFormat, C2MemoryUsage memoryUsage,
+ VideoFramePool(std::shared_ptr<C2BlockPool> blockPool, const media::Size& size,
+ HalPixelFormat pixelFormat, bool isSecure,
scoped_refptr<::base::SequencedTaskRunner> taskRunner);
bool initialize();
void destroyTask();
@@ -59,21 +59,21 @@ private:
void getVideoFrameTask();
void onVideoFrameReady(std::optional<FrameWithBlockId> frameWithBlockId);
+ // Extracts buffer ID from graphic block.
+ // |block| is the graphic block allocated by |blockPool|.
+ static std::optional<uint32_t> getBufferIdFromGraphicBlock(const C2BlockPool& blockPool,
+ const C2Block2D& block);
+
// Ask |blockPool| to allocate the specified number of buffers.
// |bufferCount| is the number of requested buffers.
- static c2_status_t requestNewBufferSet(C2BlockPool& blockPool, int32_t bufferCount,
- const ui::Size& size, uint32_t format,
- C2MemoryUsage usage);
-
- static std::optional<uint32_t> getBufferIdFromGraphicBlock(C2BlockPool& blockPool,
- const C2Block2D& block);
+ static c2_status_t requestNewBufferSet(C2BlockPool& blockPool, int32_t bufferCount);
// Ask |blockPool| to notify when a block is available via |cb|.
// Return true if |blockPool| supports notifying buffer available.
static bool setNotifyBlockAvailableCb(C2BlockPool& blockPool, ::base::OnceClosure cb);
std::shared_ptr<C2BlockPool> mBlockPool;
- const ui::Size mSize;
+ const media::Size mSize;
const HalPixelFormat mPixelFormat;
const C2MemoryUsage mMemoryUsage;
diff --git a/plugin_store/Android.bp b/plugin_store/Android.bp
index e358378..73dccaf 100644
--- a/plugin_store/Android.bp
+++ b/plugin_store/Android.bp
@@ -1,12 +1,3 @@
-package {
- // See: http://go/android-license-faq
- // A large-scale-change added 'default_applicable_licenses' to import
- // all of the 'license_kinds' from "external_v4l2_codec2_license"
- // to get the below license kinds:
- // SPDX-license-identifier-BSD
- default_applicable_licenses: ["external_v4l2_codec2_license"],
-}
-
cc_library_shared {
name: "libc2plugin_store",
vendor: true,
@@ -18,8 +9,6 @@ cc_library_shared {
srcs: [
"C2VdaBqBlockPool.cpp",
"C2VdaPooledBlockPool.cpp",
- "DrmGrallocHelpers.cpp",
- "H2BGraphicBufferProducer.cpp",
"V4L2PluginStore.cpp",
"VendorAllocatorLoader.cpp",
],
@@ -28,13 +17,12 @@ cc_library_shared {
],
header_libs: [
- "libcodec2_internal", // needed for including C2BqBufferPriv.h.
+ "libcodec2_internal",
],
shared_libs: [
"android.hardware.graphics.bufferqueue@2.0",
"libchrome",
"libcutils",
- "libdrm",
"libhardware",
"libhidlbase",
"libnativewindow",
diff --git a/plugin_store/C2VdaBqBlockPool.cpp b/plugin_store/C2VdaBqBlockPool.cpp
index 855f389..9abc698 100644
--- a/plugin_store/C2VdaBqBlockPool.cpp
+++ b/plugin_store/C2VdaBqBlockPool.cpp
@@ -8,446 +8,432 @@
#include <v4l2_codec2/plugin_store/C2VdaBqBlockPool.h>
#include <errno.h>
-#include <string.h>
#include <chrono>
#include <mutex>
-#include <set>
-#include <sstream>
#include <thread>
#include <C2AllocatorGralloc.h>
#include <C2BlockInternal.h>
-#include <C2SurfaceSyncObj.h>
+#include <android/hardware/graphics/bufferqueue/2.0/IGraphicBufferProducer.h>
#include <android/hardware/graphics/bufferqueue/2.0/IProducerListener.h>
#include <base/callback.h>
#include <log/log.h>
+#include <system/window.h>
+#include <types.h>
#include <ui/BufferQueueDefs.h>
-#include <v4l2_codec2/plugin_store/DrmGrallocHelpers.h>
-#include <v4l2_codec2/plugin_store/H2BGraphicBufferProducer.h>
#include <v4l2_codec2/plugin_store/V4L2AllocatorId.h>
namespace android {
namespace {
-// The wait time for acquire fence in milliseconds. The normal display is 60Hz,
-// which period is 16ms. We choose 2x period as timeout.
-constexpr int kFenceWaitTimeMs = 32;
-
-// The default maximum dequeued buffer count of IGBP. Currently we don't use
-// this value to restrict the count of allocated buffers, so we choose a huge
-// enough value here.
-constexpr int kMaxDequeuedBufferCount = 32u;
+// The wait time for acquire fence in milliseconds.
+constexpr int kFenceWaitTimeMs = 10;
+// The timeout limit of acquiring lock of timed_mutex in milliseconds.
+constexpr std::chrono::milliseconds kTimedMutexTimeoutMs = std::chrono::milliseconds(500);
} // namespace
using namespace std::chrono_literals;
-// We use the value of DRM handle as the unique ID of the graphic buffers.
-using unique_id_t = uint32_t;
-// Type for IGBP slot index.
-using slot_t = int32_t;
-
+using ::android::C2AndroidMemoryUsage;
+using ::android::Fence;
+using ::android::GraphicBuffer;
+using ::android::sp;
+using ::android::status_t;
using ::android::BufferQueueDefs::BUFFER_NEEDS_REALLOCATION;
using ::android::BufferQueueDefs::NUM_BUFFER_SLOTS;
+using ::android::BufferQueueDefs::RELEASE_ALL_BUFFERS;
+using ::android::hardware::hidl_handle;
using ::android::hardware::Return;
+
+using HBuffer = ::android::hardware::graphics::common::V1_2::HardwareBuffer;
+using HStatus = ::android::hardware::graphics::bufferqueue::V2_0::Status;
+using HGraphicBufferProducer =
+ ::android::hardware::graphics::bufferqueue::V2_0::IGraphicBufferProducer;
using HProducerListener = ::android::hardware::graphics::bufferqueue::V2_0::IProducerListener;
+using HConnectionType = hardware::graphics::bufferqueue::V2_0::ConnectionType;
+using HQueueBufferOutput =
+ ::android::hardware::graphics::bufferqueue::V2_0::IGraphicBufferProducer::QueueBufferOutput;
-static c2_status_t asC2Error(status_t err) {
+using ::android::hardware::graphics::bufferqueue::V2_0::utils::b2h;
+using ::android::hardware::graphics::bufferqueue::V2_0::utils::h2b;
+using ::android::hardware::graphics::bufferqueue::V2_0::utils::HFenceWrapper;
+
+static c2_status_t asC2Error(int32_t err) {
switch (err) {
- case OK:
+ case android::NO_ERROR:
return C2_OK;
- case NO_INIT:
+ case android::NO_INIT:
return C2_NO_INIT;
- case BAD_VALUE:
+ case android::BAD_VALUE:
return C2_BAD_VALUE;
- case TIMED_OUT:
+ case android::TIMED_OUT:
return C2_TIMED_OUT;
- case WOULD_BLOCK:
+ case android::WOULD_BLOCK:
return C2_BLOCKING;
- case NO_MEMORY:
+ case android::NO_MEMORY:
return C2_NO_MEMORY;
}
return C2_CORRUPTED;
}
-// Convert GraphicBuffer to C2GraphicAllocation and wrap producer id and slot index.
-std::shared_ptr<C2GraphicAllocation> ConvertGraphicBuffer2C2Allocation(
- sp<GraphicBuffer> graphicBuffer, const uint64_t igbpId, const slot_t slot,
- C2Allocator* const allocator) {
- ALOGV("%s(idbpId=0x%" PRIx64 ", slot=%d)", __func__, igbpId, slot);
-
- C2Handle* c2Handle = WrapNativeCodec2GrallocHandle(
- graphicBuffer->handle, graphicBuffer->width, graphicBuffer->height,
- graphicBuffer->format, graphicBuffer->usage, graphicBuffer->stride,
- graphicBuffer->getGenerationNumber(), igbpId, slot);
- if (!c2Handle) {
- ALOGE("WrapNativeCodec2GrallocHandle() failed");
- return nullptr;
- }
-
- std::shared_ptr<C2GraphicAllocation> allocation;
- const auto err = allocator->priorGraphicAllocation(c2Handle, &allocation);
- if (err != C2_OK) {
- ALOGE("C2Allocator::priorGraphicAllocation() failed: %d", err);
- native_handle_close(c2Handle);
- native_handle_delete(c2Handle);
- return nullptr;
- }
-
- return allocation;
-}
-
-// This class is used to notify the listener when a certain event happens.
-class EventNotifier : public virtual android::RefBase {
+class H2BGraphicBufferProducer {
public:
- class Listener {
- public:
- virtual ~Listener() = default;
-
- // Called by EventNotifier when a certain event happens.
- virtual void onEventNotified() = 0;
- };
-
- explicit EventNotifier(std::weak_ptr<Listener> listener) : mListener(std::move(listener)) {}
- virtual ~EventNotifier() = default;
-
-protected:
- void notify() {
- ALOGV("%s()", __func__);
- std::shared_ptr<Listener> listener = mListener.lock();
- if (listener) {
- listener->onEventNotified();
+ explicit H2BGraphicBufferProducer(sp<HGraphicBufferProducer> base) : mBase(base) {}
+ ~H2BGraphicBufferProducer() = default;
+
+ status_t requestBuffer(int slot, sp<GraphicBuffer>* buf) {
+ bool converted = false;
+ status_t status = UNKNOWN_ERROR;
+ Return<void> transResult = mBase->requestBuffer(
+ slot, [&converted, &status, buf](HStatus hStatus, HBuffer const& hBuffer,
+ uint32_t generationNumber) {
+ converted = h2b(hStatus, &status) && h2b(hBuffer, buf);
+ if (*buf) {
+ (*buf)->setGenerationNumber(generationNumber);
+ }
+ });
+
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
}
- }
-
- std::weak_ptr<Listener> mListener;
-};
-
-// Notifies the listener when the connected IGBP releases buffers.
-class BufferReleasedNotifier : public EventNotifier, public HProducerListener {
-public:
- using EventNotifier::EventNotifier;
- ~BufferReleasedNotifier() override = default;
-
- // HProducerListener implementation
- Return<void> onBuffersReleased(uint32_t count) override {
- ALOGV("%s(%u)", __func__, count);
- if (count > 0) {
- notify();
+ if (!converted) {
+ ALOGE("%s(): corrupted transaction.", __func__);
+ return FAILED_TRANSACTION;
}
- return {};
- }
-};
-
-// IGBP expects its user (e.g. C2VdaBqBlockPool) to keep the mapping from dequeued slot index to
-// graphic buffers. Also, C2VdaBqBlockPool guaratees to fetch N fixed set of buffers with buffer
-// identifier. So this class stores the mapping from slot index to buffers and the mapping from
-// buffer unique ID to buffers.
-// This class also implements functionalities for buffer migration when surface switching. Buffers
-// are owned by either component (i.e. local buffers) or CCodec framework (i.e. remote buffers).
-// When switching surface, the ccodec framework migrates remote buffers to the new surfaces. Then
-// C2VdaBqBlockPool migrates local buffers. However, some buffers might be lost during migration.
-// We assume that there are enough buffers migrated to the new surface to continue the playback.
-// After |NUM_BUFFER_SLOTS| amount of buffers are dequeued from new surface, all buffers should
-// be dequeued at least once. Then we treat the missing buffer as lost, and attach these bufers to
-// the new surface.
-class TrackedGraphicBuffers {
-public:
- using value_type = std::tuple<slot_t, unique_id_t, std::shared_ptr<C2GraphicAllocation>>;
-
- TrackedGraphicBuffers() = default;
- ~TrackedGraphicBuffers() = default;
-
- void reset() {
- mSlotId2GraphicBuffer.clear();
- mSlotId2PoolData.clear();
- mAllocationsRegistered.clear();
- mAllocationsToBeMigrated.clear();
- mMigrateLostBufferCounter = 0;
- mGenerationToBeMigrated = 0;
- }
-
- void registerUniqueId(unique_id_t uniqueId, std::shared_ptr<C2GraphicAllocation> allocation) {
- ALOGV("%s(uniqueId=%u)", __func__, uniqueId);
- ALOG_ASSERT(allocation != nullptr);
-
- mAllocationsRegistered[uniqueId] = std::move(allocation);
- }
-
- std::shared_ptr<C2GraphicAllocation> getRegisteredAllocation(unique_id_t uniqueId) {
- const auto iter = mAllocationsRegistered.find(uniqueId);
- ALOG_ASSERT(iter != mAllocationsRegistered.end());
-
- return iter->second;
- }
-
- bool hasUniqueId(unique_id_t uniqueId) const {
- return mAllocationsRegistered.find(uniqueId) != mAllocationsRegistered.end() ||
- mAllocationsToBeMigrated.find(uniqueId) != mAllocationsToBeMigrated.end();
+ if (status != android::NO_ERROR) {
+ ALOGE("%s() failed: %d", __func__, status);
+ }
+ return status;
}
- void updateSlotBuffer(slot_t slotId, unique_id_t uniqueId, sp<GraphicBuffer> slotBuffer) {
- ALOGV("%s(slotId=%d)", __func__, slotId);
- ALOG_ASSERT(slotBuffer != nullptr);
+ status_t setMaxDequeuedBufferCount(int maxDequeuedBuffers) {
+ status_t status = UNKNOWN_ERROR;
+ Return<HStatus> transResult =
+ mBase->setMaxDequeuedBufferCount(static_cast<int32_t>(maxDequeuedBuffers));
- mSlotId2GraphicBuffer[slotId] = std::make_pair(uniqueId, std::move(slotBuffer));
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+ if (!h2b(static_cast<HStatus>(transResult), &status)) {
+ ALOGE("%s(): corrupted transaction.", __func__);
+ return FAILED_TRANSACTION;
+ }
+ if (status != android::NO_ERROR) {
+ ALOGE("%s() failed: %d", __func__, status);
+ }
+ return status;
+ }
+
+ status_t dequeueBuffer(uint32_t width, uint32_t height, uint32_t pixelFormat,
+ C2AndroidMemoryUsage androidUsage, int* slot, sp<Fence>* fence) {
+ using Input = HGraphicBufferProducer::DequeueBufferInput;
+ using Output = HGraphicBufferProducer::DequeueBufferOutput;
+ Input input{width, height, pixelFormat, androidUsage.asGrallocUsage()};
+
+ bool converted = false;
+ status_t status = UNKNOWN_ERROR;
+ Return<void> transResult = mBase->dequeueBuffer(
+ input, [&converted, &status, &slot, &fence](HStatus hStatus, int32_t hSlot,
+ Output const& hOutput) {
+ converted = h2b(hStatus, &status);
+ if (!converted || status != android::NO_ERROR) {
+ return;
+ }
+
+ *slot = hSlot;
+ if (hOutput.bufferNeedsReallocation) {
+ status = BUFFER_NEEDS_REALLOCATION;
+ }
+ converted = h2b(hOutput.fence, fence);
+ });
+
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+ if (!converted) {
+ ALOGE("%s(): corrupted transaction.", __func__);
+ return FAILED_TRANSACTION;
+ }
+ if (status != android::NO_ERROR && status != BUFFER_NEEDS_REALLOCATION &&
+ status != android::TIMED_OUT) {
+ ALOGE("%s() failed: %d", __func__, status);
+ }
+ return status;
}
- std::pair<unique_id_t, sp<GraphicBuffer>> getSlotBuffer(slot_t slotId) const {
- const auto iter = mSlotId2GraphicBuffer.find(slotId);
- ALOG_ASSERT(iter != mSlotId2GraphicBuffer.end());
-
- return iter->second;
- }
+ status_t detachBuffer(int slot) {
+ status_t status = UNKNOWN_ERROR;
+ Return<HStatus> transResult = mBase->detachBuffer(static_cast<int32_t>(slot));
- bool hasSlotId(slot_t slotId) const {
- return mSlotId2GraphicBuffer.find(slotId) != mSlotId2GraphicBuffer.end();
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+ if (!h2b(static_cast<HStatus>(transResult), &status)) {
+ ALOGE("%s(): corrupted transaction.", __func__);
+ return FAILED_TRANSACTION;
+ }
+ if (status != android::NO_ERROR) {
+ ALOGE("%s() failed: %d", __func__, status);
+ }
+ return status;
}
- void updatePoolData(slot_t slotId, std::weak_ptr<C2BufferQueueBlockPoolData> poolData) {
- ALOGV("%s(slotId=%d)", __func__, slotId);
- ALOG_ASSERT(hasSlotId(slotId));
+ status_t attachBuffer(const sp<GraphicBuffer>& buffer, int* outSlot) {
+ HBuffer hBuffer;
+ uint32_t hGenerationNumber;
+ if (!b2h(buffer, &hBuffer, &hGenerationNumber)) {
+ ALOGE("%s: invalid input buffer.", __func__);
+ return BAD_VALUE;
+ }
- mSlotId2PoolData[slotId] = std::move(poolData);
+ bool converted = false;
+ status_t status = UNKNOWN_ERROR;
+ Return<void> transResult = mBase->attachBuffer(
+ hBuffer, hGenerationNumber,
+ [&converted, &status, outSlot](HStatus hStatus, int32_t hSlot,
+ bool releaseAllBuffers) {
+ converted = h2b(hStatus, &status);
+ *outSlot = static_cast<int>(hSlot);
+ if (converted && releaseAllBuffers && status == android::NO_ERROR) {
+ status = RELEASE_ALL_BUFFERS;
+ }
+ });
+
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+ if (!converted) {
+ ALOGE("%s(): corrupted transaction.", __func__);
+ return FAILED_TRANSACTION;
+ }
+ if (status != android::NO_ERROR) {
+ ALOGE("%s() failed: %d", __func__, status);
+ }
+ return status;
}
- bool migrateLocalBuffers(H2BGraphicBufferProducer* const producer, uint64_t producerId,
- uint32_t generation, uint64_t usage) {
- ALOGV("%s(producerId=%" PRIx64 ", generation=%u, usage=%" PRIx64 ")", __func__, producerId,
- generation, usage);
-
- mGenerationToBeMigrated = generation;
- mUsageToBeMigrated = usage;
-
- // Move all buffers to mAllocationsToBeMigrated.
- for (auto& pair : mAllocationsRegistered) {
- if (!mAllocationsToBeMigrated.insert(pair).second) {
- ALOGE("%s() duplicated uniqueId=%u", __func__, pair.first);
- return false;
- }
+ status_t cancelBuffer(int slot, const sp<Fence>& fence) {
+ HFenceWrapper hFenceWrapper;
+ if (!b2h(fence, &hFenceWrapper)) {
+ ALOGE("%s(): corrupted input fence.", __func__);
+ return UNKNOWN_ERROR;
}
- mAllocationsRegistered.clear();
-
- ALOGV("%s(producerId=%" PRIx64 ", generation=%u, usage=%" PRIx64 ") before %s", __func__,
- producerId, generation, usage, debugString().c_str());
-
- // Migrate local buffers.
- std::map<slot_t, std::pair<unique_id_t, sp<GraphicBuffer>>> newSlotId2GraphicBuffer;
- std::map<slot_t, std::weak_ptr<C2BufferQueueBlockPoolData>> newSlotId2PoolData;
- for (const auto& pair : mSlotId2PoolData) {
- auto oldSlot = pair.first;
- auto poolData = pair.second.lock();
- if (!poolData) {
- continue;
- }
-
- unique_id_t uniqueId;
- sp<GraphicBuffer> slotBuffer;
- std::shared_ptr<C2SurfaceSyncMemory> syncMem;
- std::tie(uniqueId, slotBuffer) = getSlotBuffer(oldSlot);
- slot_t newSlot = poolData->migrate(producer->getBase(), mGenerationToBeMigrated,
- mUsageToBeMigrated, producerId, slotBuffer,
- slotBuffer->getGenerationNumber(),
- syncMem);
- if (newSlot < 0) {
- ALOGW("%s() Failed to migrate local buffer: uniqueId=%u, oldSlot=%d", __func__,
- uniqueId, oldSlot);
- continue;
- }
- ALOGV("%s() migrated buffer: uniqueId=%u, oldSlot=%d, newSlot=%d", __func__, uniqueId,
- oldSlot, newSlot);
- newSlotId2GraphicBuffer[newSlot] = std::make_pair(uniqueId, std::move(slotBuffer));
- newSlotId2PoolData[newSlot] = std::move(poolData);
+ status_t status = UNKNOWN_ERROR;
+ Return<HStatus> transResult =
+ mBase->cancelBuffer(static_cast<int32_t>(slot), hFenceWrapper.getHandle());
- if (!moveBufferToRegistered(uniqueId)) {
- ALOGE("%s() failed to move buffer to registered, uniqueId=%u", __func__, uniqueId);
- return false;
- }
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
}
- mSlotId2GraphicBuffer = std::move(newSlotId2GraphicBuffer);
- mSlotId2PoolData = std::move(newSlotId2PoolData);
-
- // Choose a big enough number to ensure all buffer should be dequeued at least once.
- mMigrateLostBufferCounter = NUM_BUFFER_SLOTS;
- ALOGD("%s() migrated %zu local buffers", __func__, mAllocationsRegistered.size());
- return true;
- }
-
- bool needMigrateLostBuffers() const {
- return mMigrateLostBufferCounter == 0 && !mAllocationsToBeMigrated.empty();
+ if (!h2b(static_cast<HStatus>(transResult), &status)) {
+ ALOGE("%s(): corrupted transaction.", __func__);
+ return FAILED_TRANSACTION;
+ }
+ if (status != android::NO_ERROR) {
+ ALOGE("%s() failed: %d", __func__, status);
+ }
+ return status;
}
- status_t migrateLostBuffer(C2Allocator* const allocator,
- H2BGraphicBufferProducer* const producer, const uint64_t producerId,
- slot_t* newSlot) {
- ALOGV("%s() %s", __func__, debugString().c_str());
+ int query(int what, int* value) {
+ int result = 0;
+ Return<void> transResult =
+ mBase->query(static_cast<int32_t>(what), [&result, value](int32_t r, int32_t v) {
+ result = static_cast<int>(r);
+ *value = static_cast<int>(v);
+ });
- if (!needMigrateLostBuffers()) {
- return NO_INIT;
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
}
+ return result;
+ }
- auto iter = mAllocationsToBeMigrated.begin();
- const unique_id_t uniqueId = iter->first;
- const C2Handle* c2Handle = iter->second->handle();
+ status_t allowAllocation(bool allow) {
+ status_t status = UNKNOWN_ERROR;
+ Return<HStatus> transResult = mBase->allowAllocation(allow);
- // Convert C2GraphicAllocation to GraphicBuffer, and update generation and usage.
- uint32_t width, height, format, stride, igbpSlot, generation;
- uint64_t usage, igbpId;
- _UnwrapNativeCodec2GrallocMetadata(c2Handle, &width, &height, &format, &usage, &stride,
- &generation, &igbpId, &igbpSlot);
- native_handle_t* grallocHandle = UnwrapNativeCodec2GrallocHandle(c2Handle);
- sp<GraphicBuffer> graphicBuffer =
- new GraphicBuffer(grallocHandle, GraphicBuffer::CLONE_HANDLE, width, height, format,
- 1, mUsageToBeMigrated, stride);
- native_handle_delete(grallocHandle);
- if (graphicBuffer->initCheck() != android::NO_ERROR) {
- ALOGE("Failed to create GraphicBuffer: %d", graphicBuffer->initCheck());
- return false;
- }
- graphicBuffer->setGenerationNumber(mGenerationToBeMigrated);
-
- // Attach GraphicBuffer to producer.
- const auto attachStatus = producer->attachBuffer(graphicBuffer, newSlot);
- if (attachStatus == TIMED_OUT || attachStatus == INVALID_OPERATION) {
- ALOGV("%s(): No free slot yet.", __func__);
- return TIMED_OUT;
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
}
- if (attachStatus != OK) {
- ALOGE("%s(): Failed to attach buffer to new producer: %d", __func__, attachStatus);
- return attachStatus;
+ if (!h2b(static_cast<HStatus>(transResult), &status)) {
+ ALOGE("%s(): corrupted transaction.", __func__);
+ return FAILED_TRANSACTION;
}
- ALOGD("%s(), migrated lost buffer uniqueId=%u to slot=%d", __func__, uniqueId, *newSlot);
- updateSlotBuffer(*newSlot, uniqueId, graphicBuffer);
-
- // Wrap the new GraphicBuffer to C2GraphicAllocation and register it.
- std::shared_ptr<C2GraphicAllocation> allocation =
- ConvertGraphicBuffer2C2Allocation(graphicBuffer, producerId, *newSlot, allocator);
- if (!allocation) {
- return UNKNOWN_ERROR;
+ if (status != android::NO_ERROR) {
+ ALOGW("%s() failed: %d", __func__, status);
}
- registerUniqueId(uniqueId, std::move(allocation));
-
- // Note: C2ArcProtectedGraphicAllocator releases the protected buffers if all the
- // corrresponding C2GraphicAllocations are released. To prevent the protected buffer is
- // released and then allocated again, we release the old C2GraphicAllocation after the new
- // one has been created.
- mAllocationsToBeMigrated.erase(iter);
-
- return OK;
+ return status;
}
- void onBufferDequeued(slot_t slotId) {
- ALOGV("%s(slotId=%d)", __func__, slotId);
- unique_id_t uniqueId;
- std::tie(uniqueId, std::ignore) = getSlotBuffer(slotId);
+ status_t getUniqueId(uint64_t* outId) const {
+ Return<uint64_t> transResult = mBase->getUniqueId();
- moveBufferToRegistered(uniqueId);
- if (mMigrateLostBufferCounter > 0) {
- --mMigrateLostBufferCounter;
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
}
- }
- size_t size() const { return mAllocationsRegistered.size() + mAllocationsToBeMigrated.size(); }
-
- std::string debugString() const {
- std::stringstream ss;
- ss << "tracked size: " << size() << std::endl;
- ss << " registered uniqueIds: ";
- for (const auto& pair : mAllocationsRegistered) {
- ss << pair.first << ", ";
+ *outId = static_cast<uint64_t>(transResult);
+ return android::NO_ERROR;
+ }
+
+ // android::IProducerListener cannot be depended by vendor library, so we use HProducerListener
+ // directly.
+ status_t connect(sp<HProducerListener> const& hListener, int32_t api,
+ bool producerControlledByApp) {
+ bool converted = false;
+ status_t status = UNKNOWN_ERROR;
+ // hack(b/146409777): We pass self-defined api, so we don't use b2h() here.
+ Return<void> transResult = mBase->connect(
+ hListener, static_cast<HConnectionType>(api), producerControlledByApp,
+ [&converted, &status](HStatus hStatus, HQueueBufferOutput const& /* hOutput */) {
+ converted = h2b(hStatus, &status);
+ });
+
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
}
- ss << std::endl;
- ss << " to-be-migrated uniqueIds: ";
- for (const auto& pair : mAllocationsToBeMigrated) {
- ss << pair.first << ", ";
+ if (!converted) {
+ ALOGE("%s(): corrupted transaction.", __func__);
+ return FAILED_TRANSACTION;
}
- ss << std::endl;
- ss << " Count down for lost buffer migration: " << mMigrateLostBufferCounter;
- return ss.str();
+ return status;
}
-private:
- bool moveBufferToRegistered(unique_id_t uniqueId) {
- ALOGV("%s(uniqueId=%u)", __func__, uniqueId);
- auto iter = mAllocationsToBeMigrated.find(uniqueId);
- if (iter == mAllocationsToBeMigrated.end()) {
- return false;
+ status_t setDequeueTimeout(nsecs_t timeout) {
+ status_t status = UNKNOWN_ERROR;
+ Return<HStatus> transResult = mBase->setDequeueTimeout(static_cast<int64_t>(timeout));
+
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
}
- if (!mAllocationsRegistered.insert(*iter).second) {
- ALOGE("%s() duplicated uniqueId=%u", __func__, uniqueId);
- return false;
+ if (!h2b(static_cast<HStatus>(transResult), &status)) {
+ ALOGE("%s(): corrupted transaction.", __func__);
+ return FAILED_TRANSACTION;
}
- mAllocationsToBeMigrated.erase(iter);
-
- return true;
+ return status;
}
- // Mapping from IGBP slots to the corresponding graphic buffers.
- std::map<slot_t, std::pair<unique_id_t, sp<GraphicBuffer>>> mSlotId2GraphicBuffer;
+private:
+ const sp<HGraphicBufferProducer> mBase;
+};
- // Mapping from IGBP slots to the corresponding pool data.
- std::map<slot_t, std::weak_ptr<C2BufferQueueBlockPoolData>> mSlotId2PoolData;
+// This class is used to notify the listener when a certain event happens.
+class EventNotifier : public virtual android::RefBase {
+public:
+ class Listener {
+ public:
+ virtual ~Listener() = default;
- // Track the buffers registered at the current producer.
- std::map<unique_id_t, std::shared_ptr<C2GraphicAllocation>> mAllocationsRegistered;
+ // Called by EventNotifier when a certain event happens.
+ virtual void onEventNotified() = 0;
+ };
- // Track the buffers that should be migrated to the current producer.
- std::map<unique_id_t, std::shared_ptr<C2GraphicAllocation>> mAllocationsToBeMigrated;
+ explicit EventNotifier(const std::shared_ptr<Listener>& listener) : mListener(listener) {}
+ virtual ~EventNotifier() = default;
- // The counter for migrating lost buffers. Count down when a buffer is
- // dequeued from IGBP. When it goes to 0, then we treat the remaining
- // buffers at |mAllocationsToBeMigrated| lost, and migrate them to
- // current IGBP.
- size_t mMigrateLostBufferCounter = 0;
+protected:
+ void notify() {
+ ALOGV("%s()", __func__);
+ std::shared_ptr<Listener> listener = mListener.lock();
+ if (listener) {
+ listener->onEventNotified();
+ }
+ }
- // The generation and usage of the current IGBP, used to migrate buffers.
- uint32_t mGenerationToBeMigrated = 0;
- uint64_t mUsageToBeMigrated = 0;
+ std::weak_ptr<Listener> mListener;
};
-class DrmHandleManager {
+// Notifies the listener when the connected IGBP releases buffers.
+class BufferReleasedNotifier : public EventNotifier, public HProducerListener {
public:
- DrmHandleManager() { mRenderFd = openRenderFd(); }
+ using EventNotifier::EventNotifier;
+ ~BufferReleasedNotifier() override = default;
- ~DrmHandleManager() {
- closeAllHandles();
- if (mRenderFd) {
- close(*mRenderFd);
+ // HProducerListener implementation
+ Return<void> onBuffersReleased(uint32_t count) override {
+ ALOGV("%s(%u)", __func__, count);
+ if (count > 0) {
+ notify();
}
+ return {};
}
+};
- std::optional<unique_id_t> getHandle(int primeFd) {
- if (!mRenderFd) {
- return std::nullopt;
- }
+/**
+ * BlockPoolData implementation for C2VdaBqBlockPool. The life cycle of this object should be as
+ * long as its accompanied C2GraphicBlock.
+ *
+ * When C2VdaBqBlockPoolData is created, |mShared| is false, and the owner of the accompanied
+ * C2GraphicBlock is the component that called fetchGraphicBlock(). If this is released before
+ * sharing, the destructor will call detachBuffer() to BufferQueue to free the slot.
+ *
+ * When the accompanied C2GraphicBlock is going to share to client from component, component should
+ * call MarkBlockPoolDataAsShared() to set |mShared| to true, and then this will be released after
+ * the transition of C2GraphicBlock across HIDL interface. At this time, the destructor will not
+ * call detachBuffer().
+ */
+struct C2VdaBqBlockPoolData : public _C2BlockPoolData {
+ // This type should be a different value than what _C2BlockPoolData::type_t has defined.
+ static constexpr int kTypeVdaBufferQueue = TYPE_BUFFERQUEUE + 256;
+
+ C2VdaBqBlockPoolData(uint64_t producerId, int32_t slotId,
+ const std::shared_ptr<C2VdaBqBlockPool::Impl>& pool);
+ C2VdaBqBlockPoolData() = delete;
+
+ // If |mShared| is false, call detach buffer to BufferQueue via |mPool|
+ virtual ~C2VdaBqBlockPoolData() override;
+
+ type_t getType() const override { return static_cast<type_t>(kTypeVdaBufferQueue); }
+
+ bool mShared = false; // whether is shared from component to client.
+ const uint64_t mProducerId;
+ const int32_t mSlotId;
+ const std::shared_ptr<C2VdaBqBlockPool::Impl> mPool;
+};
- std::optional<unique_id_t> handle = getDrmHandle(*mRenderFd, primeFd);
- // Defer closing the handle until we don't need the buffer to keep the returned DRM handle
- // the same.
- if (handle) {
- mHandles.insert(*handle);
- }
- return handle;
+c2_status_t MarkBlockPoolDataAsShared(const C2ConstGraphicBlock& sharedBlock) {
+ std::shared_ptr<_C2BlockPoolData> data = _C2BlockFactory::GetGraphicBlockPoolData(sharedBlock);
+ if (!data || data->getType() != C2VdaBqBlockPoolData::kTypeVdaBufferQueue) {
+ // Skip this functtion if |sharedBlock| is not fetched from C2VdaBqBlockPool.
+ return C2_OMITTED;
}
-
- void closeAllHandles() {
- if (!mRenderFd) {
- return;
- }
-
- for (const unique_id_t& handle : mHandles) {
- closeDrmHandle(*mRenderFd, handle);
- }
- mHandles.clear();
+ const std::shared_ptr<C2VdaBqBlockPoolData> poolData =
+ std::static_pointer_cast<C2VdaBqBlockPoolData>(data);
+ if (poolData->mShared) {
+ ALOGE("C2VdaBqBlockPoolData(id=%" PRIu64 ", slot=%d) is already marked as shared...",
+ poolData->mProducerId, poolData->mSlotId);
+ return C2_BAD_STATE;
}
+ poolData->mShared = true;
+ return C2_OK;
+}
-private:
- std::optional<int> mRenderFd;
- std::set<unique_id_t> mHandles;
-};
+// static
+std::optional<uint32_t> C2VdaBqBlockPool::getBufferIdFromGraphicBlock(const C2Block2D& block) {
+ uint32_t width, height, format, stride, igbp_slot, generation;
+ uint64_t usage, igbp_id;
+ android::_UnwrapNativeCodec2GrallocMetadata(block.handle(), &width, &height, &format, &usage,
+ &stride, &generation, &igbp_id, &igbp_slot);
+ ALOGV("Unwrap Metadata: igbp[%" PRIu64 ", %u] (%u*%u, fmt %#x, usage %" PRIx64 ", stride %u)",
+ igbp_id, igbp_slot, width, height, format, usage, stride);
+ return igbp_slot;
+}
class C2VdaBqBlockPool::Impl : public std::enable_shared_from_this<C2VdaBqBlockPool::Impl>,
public EventNotifier::Listener {
@@ -466,12 +452,15 @@ public:
std::shared_ptr<C2GraphicBlock>* block /* nonnull */);
void setRenderCallback(const C2BufferQueueBlockPool::OnRenderCallback& renderCallback);
void configureProducer(const sp<HGraphicBufferProducer>& producer);
- c2_status_t requestNewBufferSet(int32_t bufferCount, uint32_t width, uint32_t height,
- uint32_t format, C2MemoryUsage usage);
+ c2_status_t requestNewBufferSet(int32_t bufferCount);
+ c2_status_t updateGraphicBlock(bool willCancel, uint32_t oldSlot, uint32_t* newSlot,
+ std::shared_ptr<C2GraphicBlock>* block /* nonnull */);
+ c2_status_t getMinBuffersForDisplay(size_t* bufferCount);
bool setNotifyBlockAvailableCb(::base::OnceClosure cb);
- std::optional<unique_id_t> getBufferIdFromGraphicBlock(const C2Block2D& block);
private:
+ friend struct C2VdaBqBlockPoolData;
+
// Requested buffer formats.
struct BufferFormat {
BufferFormat(uint32_t width, uint32_t height, uint32_t pixelFormat,
@@ -485,145 +474,133 @@ private:
C2AndroidMemoryUsage mUsage = C2MemoryUsage(0);
};
- status_t getFreeSlotLocked(uint32_t width, uint32_t height, uint32_t format,
- C2MemoryUsage usage, slot_t* slot, sp<Fence>* fence);
+ // For C2VdaBqBlockPoolData to detach corresponding slot buffer from BufferQueue.
+ void detachBuffer(uint64_t producerId, int32_t slotId);
// Queries the generation and usage flags from the given producer by dequeuing and requesting a
// buffer (the buffer is then detached and freed).
- status_t queryGenerationAndUsageLocked(uint32_t width, uint32_t height, uint32_t pixelFormat,
- C2AndroidMemoryUsage androidUsage, uint32_t* generation,
- uint64_t* usage);
-
- // Wait the fence. If any error occurs, cancel the buffer back to the producer.
- status_t waitFence(slot_t slot, sp<Fence> fence);
+ c2_status_t queryGenerationAndUsage(H2BGraphicBufferProducer* const producer, uint32_t width,
+ uint32_t height, uint32_t pixelFormat,
+ C2AndroidMemoryUsage androidUsage, uint32_t* generation,
+ uint64_t* usage);
- // Call mProducer's allowAllocation if needed.
- status_t allowAllocation(bool allow);
+ // Switches producer and transfers allocated buffers from old producer to the new one.
+ bool switchProducer(H2BGraphicBufferProducer* const newProducer, uint64_t newProducerId);
const std::shared_ptr<C2Allocator> mAllocator;
std::unique_ptr<H2BGraphicBufferProducer> mProducer;
- uint64_t mProducerId = 0;
- bool mAllowAllocation = false;
-
+ uint64_t mProducerId;
C2BufferQueueBlockPool::OnRenderCallback mRenderCallback;
// Function mutex to lock at the start of each API function call for protecting the
// synchronization of all member variables.
std::mutex mMutex;
-
- TrackedGraphicBuffers mTrackedGraphicBuffers;
-
- // We treat DRM handle as uniqueId of GraphicBuffer.
- DrmHandleManager mDrmHandleManager;
-
+ // The mutex of excluding the procedures of configuring producer and allocating buffers. They
+ // should be blocked mutually. Set the timeout for acquiring lock in case of any deadlock.
+ // Configuring producer: configureProducer() called by CCodec.
+ // Allocating buffers: requestNewBufferSet(), then a loop of fetchGraphicBlock() called by
+ // compoenent until |mSlotAllocations|.size() equals |mBuffersRequested|.
+ std::timed_mutex mConfigureProducerAndAllocateBuffersMutex;
+ // The unique lock of the procedure of allocating buffers. It should be locked in the beginning
+ // of requestNewBufferSet() and unlock in the end of the loop of fetchGraphicBlock(). Note that
+ // all calls should be in the same thread.
+ std::unique_lock<std::timed_mutex> mAllocateBuffersLock;
+
+ // The map restored C2GraphicAllocation from corresponding slot index.
+ std::map<int32_t, std::shared_ptr<C2GraphicAllocation>> mSlotAllocations;
// Number of buffers requested on requestNewBufferSet() call.
- size_t mBuffersRequested = 0u;
+ size_t mBuffersRequested;
// Currently requested buffer formats.
BufferFormat mBufferFormat;
+ // The map recorded the slot indices from old producer to new producer.
+ std::map<int32_t, int32_t> mProducerChangeSlotMap;
+ // The counter for representing the buffer count in client. Only used in producer switching
+ // case. It will be reset in switchProducer(), and accumulated in updateGraphicBlock() routine.
+ uint32_t mBuffersInClient = 0u;
+ // The indicator to record if producer has been switched. Set to true when producer is switched.
+ // Toggle off when requestNewBufferSet() is called. We forcedly detach all slots to make sure
+ // all slots are available, except the ones owned by client.
+ bool mProducerSwitched = false;
// Listener for buffer release events.
sp<EventNotifier> mFetchBufferNotifier;
std::mutex mBufferReleaseMutex;
- // Set to true when the buffer release event is triggered after dequeueing buffer from IGBP
- // times out. Reset when fetching new slot times out, or |mNotifyBlockAvailableCb| is executed.
+ // Set to true when the buffer release event is triggered after dequeueing
+ // buffer from IGBP times out.
bool mBufferReleasedAfterTimedOut GUARDED_BY(mBufferReleaseMutex) = false;
// The callback to notify the caller the buffer is available.
::base::OnceClosure mNotifyBlockAvailableCb GUARDED_BY(mBufferReleaseMutex);
-
- // Set to true if any error occurs at previous configureProducer().
- bool mConfigureProducerError = false;
};
C2VdaBqBlockPool::Impl::Impl(const std::shared_ptr<C2Allocator>& allocator)
- : mAllocator(allocator) {}
+ : mAllocator(allocator),
+ mAllocateBuffersLock(mConfigureProducerAndAllocateBuffersMutex, std::defer_lock),
+ mBuffersRequested(0u) {}
c2_status_t C2VdaBqBlockPool::Impl::fetchGraphicBlock(
uint32_t width, uint32_t height, uint32_t format, C2MemoryUsage usage,
std::shared_ptr<C2GraphicBlock>* block /* nonnull */) {
- ALOGV("%s(%ux%u)", __func__, width, height);
+ ALOGV("%s()", __func__);
std::lock_guard<std::mutex> lock(mMutex);
- if (width != mBufferFormat.mWidth || height != mBufferFormat.mHeight ||
- format != mBufferFormat.mPixelFormat || usage.expected != mBufferFormat.mUsage.expected) {
- ALOGE("%s(): buffer format (%ux%u, format=%u, usage=%" PRIx64
- ") is different from requested format (%ux%u, format=%u, usage=%" PRIx64 ")",
- __func__, width, height, format, usage.expected, mBufferFormat.mWidth,
- mBufferFormat.mHeight, mBufferFormat.mPixelFormat, mBufferFormat.mUsage.expected);
- return C2_BAD_VALUE;
+ if (!mProducer) {
+ // Producer will not be configured in byte-buffer mode. Allocate buffers from allocator
+ // directly as a basic graphic block pool.
+ std::shared_ptr<C2GraphicAllocation> alloc;
+ c2_status_t err = mAllocator->newGraphicAllocation(width, height, format, usage, &alloc);
+ if (err != C2_OK) {
+ return err;
+ }
+ *block = _C2BlockFactory::CreateGraphicBlock(alloc);
+ return C2_OK;
}
- if (mConfigureProducerError || !mProducer) {
- ALOGE("%s(): error occurred at previous configureProducer()", __func__);
- return C2_CORRUPTED;
+
+ // The existence of |mProducerChangeSlotMap| indicates producer is just switched. Use return
+ // code C2_BAD_STATE to inform the component to handle the procedure of producer change.
+ // TODO(johnylin): consider to inform producer change to component in an active way.
+ if (!mProducerChangeSlotMap.empty()) {
+ return C2_BAD_STATE;
}
- slot_t slot;
+ C2AndroidMemoryUsage androidUsage = usage;
+ uint32_t pixelFormat = format;
+ int32_t slot;
sp<Fence> fence = new Fence();
- const auto status = getFreeSlotLocked(width, height, format, usage, &slot, &fence);
- if (status != OK) {
+ status_t status =
+ mProducer->dequeueBuffer(width, height, pixelFormat, androidUsage, &slot, &fence);
+ // The C2VdaBqBlockPool does not fully own the bufferqueue. After buffers are dequeued here,
+ // they are passed into the codec2 framework, processed, and eventually queued into the
+ // bufferqueue. The C2VdaBqBlockPool cannot determine exactly when a buffer gets queued.
+ // However, if every buffer is being processed by the codec2 framework, then dequeueBuffer()
+ // will return INVALID_OPERATION because of an attempt to dequeue too many buffers.
+ // The C2VdaBqBlockPool cannot prevent this from happening, so just map it to TIMED_OUT
+ // and let the C2VdaBqBlockPool's caller's timeout retry logic handle the failure.
+ if (status == android::INVALID_OPERATION) {
+ status = android::TIMED_OUT;
+ }
+ if (status == android::TIMED_OUT) {
+ std::lock_guard<std::mutex> lock(mBufferReleaseMutex);
+ mBufferReleasedAfterTimedOut = false;
+ }
+ if (status != android::NO_ERROR && status != BUFFER_NEEDS_REALLOCATION) {
return asC2Error(status);
}
- unique_id_t uniqueId;
- sp<GraphicBuffer> slotBuffer;
- std::tie(uniqueId, slotBuffer) = mTrackedGraphicBuffers.getSlotBuffer(slot);
- ALOGV("%s(): dequeued slot=%d uniqueId=%u", __func__, slot, uniqueId);
-
- if (!mTrackedGraphicBuffers.hasUniqueId(uniqueId)) {
- if (mTrackedGraphicBuffers.size() >= mBuffersRequested) {
- // The dequeued slot has a pre-allocated buffer whose size and format is as same as
- // currently requested (but was not dequeued during allocation cycle). Just detach it to
- // free this slot. And try dequeueBuffer again.
- ALOGD("dequeued a new slot %d but already allocated enough buffers. Detach it.", slot);
-
- if (mProducer->detachBuffer(slot) != OK) {
+ // Wait for acquire fence if we get one.
+ if (fence) {
+ status_t fenceStatus = fence->wait(kFenceWaitTimeMs);
+ if (fenceStatus != android::NO_ERROR) {
+ if (mProducer->cancelBuffer(slot, fence) != android::NO_ERROR) {
return C2_CORRUPTED;
}
- const auto allocationStatus = allowAllocation(false);
- if (allocationStatus != OK) {
- return asC2Error(allocationStatus);
+ if (fenceStatus == -ETIME) { // fence wait timed out
+ ALOGV("%s(): buffer (slot=%d) fence wait timed out", __func__, slot);
+ return C2_TIMED_OUT;
}
- return C2_TIMED_OUT;
- }
-
- std::shared_ptr<C2GraphicAllocation> allocation =
- ConvertGraphicBuffer2C2Allocation(slotBuffer, mProducerId, slot, mAllocator.get());
- if (!allocation) {
- return C2_CORRUPTED;
- }
- mTrackedGraphicBuffers.registerUniqueId(uniqueId, std::move(allocation));
-
- ALOGV("%s(): mTrackedGraphicBuffers.size=%zu", __func__, mTrackedGraphicBuffers.size());
- if (mTrackedGraphicBuffers.size() == mBuffersRequested) {
- ALOGV("Tracked IGBP slots: %s", mTrackedGraphicBuffers.debugString().c_str());
- // Already allocated enough buffers, set allowAllocation to false to restrict the
- // eligible slots to allocated ones for future dequeue.
- const auto allocationStatus = allowAllocation(false);
- if (allocationStatus != OK) {
- return asC2Error(allocationStatus);
- }
- }
- }
-
- std::shared_ptr<C2SurfaceSyncMemory> syncMem;
- std::shared_ptr<C2GraphicAllocation> allocation =
- mTrackedGraphicBuffers.getRegisteredAllocation(uniqueId);
- auto poolData = std::make_shared<C2BufferQueueBlockPoolData>(
- slotBuffer->getGenerationNumber(), mProducerId, slot,
- mProducer->getBase(), syncMem, 0);
- mTrackedGraphicBuffers.updatePoolData(slot, poolData);
- *block = _C2BlockFactory::CreateGraphicBlock(std::move(allocation), std::move(poolData));
- if (*block == nullptr) {
- ALOGE("failed to create GraphicBlock: no memory");
- return C2_NO_MEMORY;
- }
-
- // Wait for acquire fence at the last point of returning buffer.
- if (fence) {
- const auto fenceStatus = waitFence(slot, fence);
- if (fenceStatus != OK) {
+ ALOGE("buffer fence wait error: %d", fenceStatus);
return asC2Error(fenceStatus);
}
@@ -637,54 +614,73 @@ c2_status_t C2VdaBqBlockPool::Impl::fetchGraphicBlock(
}
}
- return C2_OK;
-}
+ auto iter = mSlotAllocations.find(slot);
+ if (iter == mSlotAllocations.end()) {
+ if (mSlotAllocations.size() >= mBuffersRequested) {
+ // The dequeued slot has a pre-allocated buffer whose size and format is as same as
+ // currently requested (but was not dequeued during allocation cycle). Just detach it to
+ // free this slot. And try dequeueBuffer again.
+ ALOGD("dequeued a new slot index but already allocated enough buffers. Detach it.");
-status_t C2VdaBqBlockPool::Impl::getFreeSlotLocked(uint32_t width, uint32_t height, uint32_t format,
- C2MemoryUsage usage, slot_t* slot,
- sp<Fence>* fence) {
- if (mTrackedGraphicBuffers.needMigrateLostBuffers()) {
- slot_t newSlot;
- if (mTrackedGraphicBuffers.migrateLostBuffer(mAllocator.get(), mProducer.get(), mProducerId,
- &newSlot) == OK) {
- ALOGV("%s(): migrated buffer: slot=%d", __func__, newSlot);
- *slot = newSlot;
- return OK;
+ if (mProducer->detachBuffer(slot) != android::NO_ERROR) {
+ return C2_CORRUPTED;
+ }
+ return C2_TIMED_OUT;
+ }
+ if (status != BUFFER_NEEDS_REALLOCATION) {
+ // The dequeued slot has a pre-allocated buffer whose size and format is as same as
+ // currently requested, so there is no BUFFER_NEEDS_REALLOCATION flag. However since the
+ // buffer reference is already dropped, still call requestBuffer to re-allocate then.
+ // Add a debug note here for tracking.
+ ALOGD("dequeued a new slot index without BUFFER_NEEDS_REALLOCATION flag.");
}
- }
-
- // Dequeue a free slot from IGBP.
- ALOGV("%s(): try to dequeue free slot from IGBP.", __func__);
- const auto dequeueStatus = mProducer->dequeueBuffer(width, height, format, usage, slot, fence);
- if (dequeueStatus == TIMED_OUT) {
- std::lock_guard<std::mutex> lock(mBufferReleaseMutex);
- mBufferReleasedAfterTimedOut = false;
- }
- if (dequeueStatus != OK && dequeueStatus != BUFFER_NEEDS_REALLOCATION) {
- return dequeueStatus;
- }
- // Call requestBuffer to update GraphicBuffer for the slot and obtain the reference.
- if (!mTrackedGraphicBuffers.hasSlotId(*slot) || dequeueStatus == BUFFER_NEEDS_REALLOCATION) {
+ // Call requestBuffer to allocate buffer for the slot and obtain the reference.
sp<GraphicBuffer> slotBuffer = new GraphicBuffer();
- const auto requestStatus = mProducer->requestBuffer(*slot, &slotBuffer);
- if (requestStatus != OK) {
- mProducer->cancelBuffer(*slot, *fence);
- return requestStatus;
+ status = mProducer->requestBuffer(slot, &slotBuffer);
+ if (status != android::NO_ERROR) {
+ if (mProducer->cancelBuffer(slot, fence) != android::NO_ERROR) {
+ return C2_CORRUPTED;
+ }
+ return asC2Error(status);
}
- const auto uniqueId = mDrmHandleManager.getHandle(slotBuffer->handle->data[0]);
- if (!uniqueId) {
- ALOGE("%s(): failed to get uniqueId of GraphicBuffer from slot=%d", __func__, *slot);
- return UNKNOWN_ERROR;
+ // Convert GraphicBuffer to C2GraphicAllocation and wrap producer id and slot index
+ ALOGV("buffer wraps { producer id: %" PRIu64 ", slot: %d }", mProducerId, slot);
+ C2Handle* c2Handle = android::WrapNativeCodec2GrallocHandle(
+ slotBuffer->handle, slotBuffer->width, slotBuffer->height, slotBuffer->format,
+ slotBuffer->usage, slotBuffer->stride, slotBuffer->getGenerationNumber(),
+ mProducerId, slot);
+ if (!c2Handle) {
+ ALOGE("WrapNativeCodec2GrallocHandle failed");
+ return C2_NO_MEMORY;
+ }
+
+ std::shared_ptr<C2GraphicAllocation> alloc;
+ c2_status_t err = mAllocator->priorGraphicAllocation(c2Handle, &alloc);
+ if (err != C2_OK) {
+ ALOGE("priorGraphicAllocation failed: %d", err);
+ return err;
+ }
+
+ mSlotAllocations[slot] = std::move(alloc);
+ if (mSlotAllocations.size() == mBuffersRequested) {
+ // Already allocated enough buffers, set allowAllocation to false to restrict the
+ // eligible slots to allocated ones for future dequeue.
+ status = mProducer->allowAllocation(false);
+ if (status != android::NO_ERROR) {
+ return asC2Error(status);
+ }
+ // Store buffer formats for future usage.
+ mBufferFormat = BufferFormat(width, height, pixelFormat, androidUsage);
+ ALOG_ASSERT(mAllocateBuffersLock.owns_lock());
+ mAllocateBuffersLock.unlock();
}
- mTrackedGraphicBuffers.updateSlotBuffer(*slot, *uniqueId, std::move(slotBuffer));
}
- ALOGV("%s(%ux%u): dequeued slot=%d", __func__, mBufferFormat.mWidth, mBufferFormat.mHeight,
- *slot);
- mTrackedGraphicBuffers.onBufferDequeued(*slot);
- return OK;
+ auto poolData = std::make_shared<C2VdaBqBlockPoolData>(mProducerId, slot, shared_from_this());
+ *block = _C2BlockFactory::CreateGraphicBlock(mSlotAllocations[slot], std::move(poolData));
+ return C2_OK;
}
void C2VdaBqBlockPool::Impl::onEventNotified() {
@@ -695,7 +691,6 @@ void C2VdaBqBlockPool::Impl::onEventNotified() {
mBufferReleasedAfterTimedOut = true;
if (mNotifyBlockAvailableCb) {
- mBufferReleasedAfterTimedOut = false;
outputCb = std::move(mNotifyBlockAvailableCb);
}
}
@@ -706,62 +701,56 @@ void C2VdaBqBlockPool::Impl::onEventNotified() {
}
}
-status_t C2VdaBqBlockPool::Impl::queryGenerationAndUsageLocked(uint32_t width, uint32_t height,
- uint32_t pixelFormat,
- C2AndroidMemoryUsage androidUsage,
- uint32_t* generation,
- uint64_t* usage) {
- ALOGV("%s()", __func__);
-
+c2_status_t C2VdaBqBlockPool::Impl::queryGenerationAndUsage(
+ H2BGraphicBufferProducer* const producer, uint32_t width, uint32_t height,
+ uint32_t pixelFormat, C2AndroidMemoryUsage androidUsage, uint32_t* generation,
+ uint64_t* usage) {
+ ALOGV("queryGenerationAndUsage");
sp<Fence> fence = new Fence();
- slot_t slot;
- const auto dequeueStatus =
- mProducer->dequeueBuffer(width, height, pixelFormat, androidUsage, &slot, &fence);
- if (dequeueStatus != OK && dequeueStatus != BUFFER_NEEDS_REALLOCATION) {
- return dequeueStatus;
+ int32_t status;
+ int32_t slot;
+
+ status = producer->dequeueBuffer(width, height, pixelFormat, androidUsage, &slot, &fence);
+ if (status != android::NO_ERROR && status != BUFFER_NEEDS_REALLOCATION) {
+ return asC2Error(status);
+ }
+
+ // Wait for acquire fence if we get one.
+ if (fence) {
+ status_t fenceStatus = fence->wait(kFenceWaitTimeMs);
+ if (fenceStatus != android::NO_ERROR) {
+ if (producer->cancelBuffer(slot, fence) != android::NO_ERROR) {
+ return C2_CORRUPTED;
+ }
+ if (fenceStatus == -ETIME) { // fence wait timed out
+ ALOGV("%s(): buffer (slot=%d) fence wait timed out", __func__, slot);
+ return C2_TIMED_OUT;
+ }
+ ALOGE("buffer fence wait error: %d", fenceStatus);
+ return asC2Error(fenceStatus);
+ }
}
// Call requestBuffer to allocate buffer for the slot and obtain the reference.
// Get generation number here.
sp<GraphicBuffer> slotBuffer = new GraphicBuffer();
- const auto requestStatus = mProducer->requestBuffer(slot, &slotBuffer);
+ status = producer->requestBuffer(slot, &slotBuffer);
// Detach and delete the temporary buffer.
- const auto detachStatus = mProducer->detachBuffer(slot);
- if (detachStatus != OK) {
- return detachStatus;
+ if (producer->detachBuffer(slot) != android::NO_ERROR) {
+ return C2_CORRUPTED;
}
// Check requestBuffer return flag.
- if (requestStatus != OK) {
- return requestStatus;
+ if (status != android::NO_ERROR) {
+ return asC2Error(status);
}
// Get generation number and usage from the slot buffer.
*usage = slotBuffer->getUsage();
*generation = slotBuffer->getGenerationNumber();
ALOGV("Obtained from temp buffer: generation = %u, usage = %" PRIu64 "", *generation, *usage);
- return OK;
-}
-
-status_t C2VdaBqBlockPool::Impl::waitFence(slot_t slot, sp<Fence> fence) {
- const auto fenceStatus = fence->wait(kFenceWaitTimeMs);
- if (fenceStatus == OK) {
- return OK;
- }
-
- const auto cancelStatus = mProducer->cancelBuffer(slot, fence);
- if (cancelStatus != OK) {
- ALOGE("%s(): failed to cancelBuffer(slot=%d)", __func__, slot);
- return cancelStatus;
- }
-
- if (fenceStatus == -ETIME) { // fence wait timed out
- ALOGV("%s(): buffer (slot=%d) fence wait timed out", __func__, slot);
- return TIMED_OUT;
- }
- ALOGE("buffer fence wait error: %d", fenceStatus);
- return fenceStatus;
+ return C2_OK;
}
void C2VdaBqBlockPool::Impl::setRenderCallback(
@@ -771,135 +760,323 @@ void C2VdaBqBlockPool::Impl::setRenderCallback(
mRenderCallback = renderCallback;
}
-c2_status_t C2VdaBqBlockPool::Impl::requestNewBufferSet(int32_t bufferCount, uint32_t width,
- uint32_t height, uint32_t format,
- C2MemoryUsage usage) {
- ALOGV("%s(bufferCount=%d, size=%ux%u, format=0x%x, usage=%" PRIu64 ")", __func__, bufferCount,
- width, height, format, usage.expected);
-
+c2_status_t C2VdaBqBlockPool::Impl::requestNewBufferSet(int32_t bufferCount) {
if (bufferCount <= 0) {
ALOGE("Invalid requested buffer count = %d", bufferCount);
return C2_BAD_VALUE;
}
+ if (!mAllocateBuffersLock.try_lock_for(kTimedMutexTimeoutMs)) {
+ ALOGE("Cannot acquire allocate buffers / configure producer lock over %" PRId64 " ms...",
+ static_cast<int64_t>(kTimedMutexTimeoutMs.count()));
+ return C2_BLOCKING;
+ }
+
std::lock_guard<std::mutex> lock(mMutex);
if (!mProducer) {
ALOGD("No HGraphicBufferProducer is configured...");
return C2_NO_INIT;
}
- if (mBuffersRequested == static_cast<size_t>(bufferCount) && mBufferFormat.mWidth == width &&
- mBufferFormat.mHeight == height && mBufferFormat.mPixelFormat == format &&
- mBufferFormat.mUsage.expected == usage.expected) {
- ALOGD("%s() Request the same format and amount of buffers, skip", __func__);
- return C2_OK;
+
+ if (mProducerSwitched) {
+ // Some slots can be occupied by buffers transferred from the old producer. They will not
+ // used in the current producer. Free the slots of the buffers here. But we cannot find a
+ // slot is associated with the staled buffer. We free all slots whose associated buffers
+ // are not owned by client.
+ ALOGI("requestNewBufferSet: detachBuffer all slots forcedly");
+ for (int32_t slot = 0; slot < static_cast<int32_t>(NUM_BUFFER_SLOTS); ++slot) {
+ if (mSlotAllocations.find(slot) != mSlotAllocations.end()) {
+ // Skip detaching the buffer which is owned by client now.
+ continue;
+ }
+ status_t status = mProducer->detachBuffer(slot);
+ if (status == android::NO_INIT) {
+ // No more active buffer slot. Break the loop now.
+ break;
+ } else if (status != android::NO_ERROR) {
+ return C2_CORRUPTED;
+ }
+ }
+ mProducerSwitched = false;
}
- const auto status = allowAllocation(true);
- if (status != OK) {
+ ALOGV("Requested new buffer count: %d, still dequeued buffer count: %zu", bufferCount,
+ mSlotAllocations.size());
+
+ // The remained slot indices in |mSlotAllocations| now are still dequeued (un-available).
+ // maxDequeuedBufferCount should be set to "new requested buffer count" + "still dequeued buffer
+ // count" to make sure it has enough available slots to request buffer from.
+ status_t status = mProducer->setMaxDequeuedBufferCount(bufferCount + mSlotAllocations.size());
+ if (status != android::NO_ERROR) {
return asC2Error(status);
}
// Release all remained slot buffer references here. CCodec should either cancel or queue its
// owned buffers from this set before the next resolution change.
- mTrackedGraphicBuffers.reset();
- mDrmHandleManager.closeAllHandles();
-
+ mSlotAllocations.clear();
+ mProducerChangeSlotMap.clear();
mBuffersRequested = static_cast<size_t>(bufferCount);
- // Store buffer formats for future usage.
- mBufferFormat = BufferFormat(width, height, format, C2AndroidMemoryUsage(usage));
-
+ status = mProducer->allowAllocation(true);
+ if (status != android::NO_ERROR) {
+ return asC2Error(status);
+ }
return C2_OK;
}
void C2VdaBqBlockPool::Impl::configureProducer(const sp<HGraphicBufferProducer>& producer) {
- ALOGV("%s(producer=%p)", __func__, producer.get());
-
- std::lock_guard<std::mutex> lock(mMutex);
+ ALOGV("configureProducer");
if (producer == nullptr) {
- ALOGI("input producer is nullptr...");
+ ALOGE("input producer is nullptr...");
+ return;
+ }
- mProducer = nullptr;
- mProducerId = 0;
- mTrackedGraphicBuffers.reset();
- mDrmHandleManager.closeAllHandles();
+ std::unique_lock<std::timed_mutex> configureProducerLock(
+ mConfigureProducerAndAllocateBuffersMutex, std::defer_lock);
+ if (!configureProducerLock.try_lock_for(kTimedMutexTimeoutMs)) {
+ ALOGE("Cannot acquire configure producer / allocate buffers lock over %" PRId64 " ms...",
+ static_cast<int64_t>(kTimedMutexTimeoutMs.count()));
return;
}
+ std::lock_guard<std::mutex> lock(mMutex);
auto newProducer = std::make_unique<H2BGraphicBufferProducer>(producer);
- uint64_t newProducerId;
- if (newProducer->getUniqueId(&newProducerId) != OK) {
- ALOGE("%s(): failed to get IGBP ID", __func__);
- mConfigureProducerError = true;
+ uint64_t producerId;
+ if (newProducer->getUniqueId(&producerId) != android::NO_ERROR) {
return;
}
- if (newProducerId == mProducerId) {
- ALOGI("%s(): configure the same producer, ignore", __func__);
+
+ if (mProducer && mProducerId != producerId) {
+ ALOGI("Producer (Surface) is going to switch... ( %" PRIu64 " -> %" PRIu64 " )",
+ mProducerId, producerId);
+ if (!switchProducer(newProducer.get(), producerId)) {
+ mProducerChangeSlotMap.clear();
+ return;
+ }
+ } else {
+ mSlotAllocations.clear();
+ }
+
+ if (newProducer->setDequeueTimeout(0) != android::NO_ERROR) {
+ ALOGE("%s(): failed to setDequeueTimeout(0)", __func__);
return;
}
- ALOGI("Producer (Surface) is going to switch... ( 0x%" PRIx64 " -> 0x%" PRIx64 " )",
- mProducerId, newProducerId);
+ // hack(b/146409777): Try to connect ARC-specific listener first.
+ sp<BufferReleasedNotifier> listener = new BufferReleasedNotifier(shared_from_this());
+ if (newProducer->connect(listener, 'ARC\0', false) == android::NO_ERROR) {
+ ALOGI("connected to ARC-specific IGBP listener.");
+ mFetchBufferNotifier = listener;
+ }
+
+ // HGraphicBufferProducer could (and should) be replaced if the client has set a new generation
+ // number to producer. The old HGraphicBufferProducer will be disconnected and deprecated then.
mProducer = std::move(newProducer);
- mProducerId = newProducerId;
- mConfigureProducerError = false;
- mAllowAllocation = false;
+ mProducerId = producerId;
+}
- // Set allowAllocation to new producer.
- if (allowAllocation(true) != OK) {
- ALOGE("%s(): failed to allowAllocation(true)", __func__);
- mConfigureProducerError = true;
- return;
+bool C2VdaBqBlockPool::Impl::switchProducer(H2BGraphicBufferProducer* const newProducer,
+ uint64_t newProducerId) {
+ if (mAllocator->getId() == android::V4L2AllocatorId::SECURE_GRAPHIC) {
+ // TODO(johnylin): support this when we meet the use case in the future.
+ ALOGE("Switch producer for secure buffer is not supported...");
+ return false;
}
- if (mProducer->setDequeueTimeout(0) != OK) {
- ALOGE("%s(): failed to setDequeueTimeout(0)", __func__);
- mConfigureProducerError = true;
- return;
+
+ // Set maxDequeuedBufferCount to new producer.
+ // Just like requestNewBufferSet(), maxDequeuedBufferCount should be set to "requested buffer
+ // count" + "buffer count in client" to make sure it has enough available slots to request
+ // buffers from.
+ // "Requested buffer count" could be obtained by the size of |mSlotAllocations|. However, it is
+ // not able to know "buffer count in client" in blockpool's aspect. The alternative solution is
+ // to set the worse case first, which is equal to the size of |mSlotAllocations|. And in the end
+ // of updateGraphicBlock() routine, we could get the arbitrary "buffer count in client" by
+ // counting the calls of updateGraphicBlock(willCancel=true). Then we set maxDequeuedBufferCount
+ // again to the correct value.
+ if (newProducer->setMaxDequeuedBufferCount(mSlotAllocations.size() * 2) != android::NO_ERROR) {
+ return false;
}
- if (mProducer->setMaxDequeuedBufferCount(kMaxDequeuedBufferCount) != OK) {
- ALOGE("%s(): failed to setMaxDequeuedBufferCount(%d)", __func__, kMaxDequeuedBufferCount);
- mConfigureProducerError = true;
- return;
+
+ // Reset "buffer count in client". It will be accumulated in updateGraphicBlock() routine.
+ mBuffersInClient = 0;
+
+ // Set allowAllocation to new producer.
+ if (newProducer->allowAllocation(true) != android::NO_ERROR) {
+ return false;
}
- // Migrate existing buffers to the new producer.
- if (mTrackedGraphicBuffers.size() > 0) {
- uint32_t newGeneration = 0;
- uint64_t newUsage = 0;
- const status_t err = queryGenerationAndUsageLocked(
- mBufferFormat.mWidth, mBufferFormat.mHeight, mBufferFormat.mPixelFormat,
- mBufferFormat.mUsage, &newGeneration, &newUsage);
- if (err != OK) {
- ALOGE("failed to query generation and usage: %d", err);
- mConfigureProducerError = true;
- return;
+ // Get a buffer from the new producer to get the generation number and usage of new producer.
+ // While attaching buffers, generation number and usage must be aligned to the producer.
+ uint32_t newGeneration;
+ uint64_t newUsage;
+ c2_status_t err = queryGenerationAndUsage(newProducer, mBufferFormat.mWidth,
+ mBufferFormat.mHeight, mBufferFormat.mPixelFormat,
+ mBufferFormat.mUsage, &newGeneration, &newUsage);
+ if (err != C2_OK) {
+ ALOGE("queryGenerationAndUsage failed: %d", err);
+ return false;
+ }
+
+ // Attach all buffers to new producer.
+ mProducerChangeSlotMap.clear();
+ int32_t slot;
+ std::map<int32_t, std::shared_ptr<C2GraphicAllocation>> newSlotAllocations;
+ for (auto iter = mSlotAllocations.begin(); iter != mSlotAllocations.end(); ++iter) {
+ // Convert C2GraphicAllocation to GraphicBuffer.
+ uint32_t width, height, format, stride, igbp_slot, generation;
+ uint64_t usage, igbp_id;
+ android::_UnwrapNativeCodec2GrallocMetadata(iter->second->handle(), &width, &height,
+ &format, &usage, &stride, &generation, &igbp_id,
+ &igbp_slot);
+ native_handle_t* grallocHandle =
+ android::UnwrapNativeCodec2GrallocHandle(iter->second->handle());
+
+ // Update generation number and usage.
+ sp<GraphicBuffer> graphicBuffer =
+ new GraphicBuffer(grallocHandle, GraphicBuffer::CLONE_HANDLE, width, height, format,
+ 1, newUsage, stride);
+ if (graphicBuffer->initCheck() != android::NO_ERROR) {
+ ALOGE("Failed to create GraphicBuffer: %d", graphicBuffer->initCheck());
+ return false;
}
+ graphicBuffer->setGenerationNumber(newGeneration);
+ native_handle_delete(grallocHandle);
- if (!mTrackedGraphicBuffers.migrateLocalBuffers(mProducer.get(), mProducerId, newGeneration,
- newUsage)) {
- ALOGE("%s(): failed to migrateLocalBuffers()", __func__);
- mConfigureProducerError = true;
- return;
+ if (newProducer->attachBuffer(graphicBuffer, &slot) != android::NO_ERROR) {
+ return false;
+ }
+ // Convert back to C2GraphicAllocation wrapping new producer id, generation number, usage
+ // and slot index.
+ ALOGV("buffer wraps { producer id: %" PRIu64 ", slot: %d }", newProducerId, slot);
+ C2Handle* c2Handle = android::WrapNativeCodec2GrallocHandle(
+ graphicBuffer->handle, width, height, format, newUsage, stride, newGeneration,
+ newProducerId, slot);
+ if (!c2Handle) {
+ ALOGE("WrapNativeCodec2GrallocHandle failed");
+ return false;
+ }
+ std::shared_ptr<C2GraphicAllocation> alloc;
+ c2_status_t err = mAllocator->priorGraphicAllocation(c2Handle, &alloc);
+ if (err != C2_OK) {
+ ALOGE("priorGraphicAllocation failed: %d", err);
+ return false;
}
- if (mTrackedGraphicBuffers.size() == mBuffersRequested) {
- if (allowAllocation(false) != OK) {
- ALOGE("%s(): failed to allowAllocation(false)", __func__);
- mConfigureProducerError = true;
- return;
- }
+ // Store to |newSlotAllocations| and also store old-to-new producer slot map.
+ ALOGV("Transfered buffer from old producer to new, slot prev: %d -> new %d", iter->first,
+ slot);
+ newSlotAllocations[slot] = std::move(alloc);
+ mProducerChangeSlotMap[iter->first] = slot;
+ }
+
+ // Set allowAllocation to false so producer could not allocate new buffers.
+ if (newProducer->allowAllocation(false) != android::NO_ERROR) {
+ ALOGE("allowAllocation(false) failed");
+ return false;
+ }
+
+ // Try to detach all buffers from old producer.
+ for (const auto& slotAllocation : mSlotAllocations) {
+ status_t status = mProducer->detachBuffer(slotAllocation.first);
+ if (status != android::NO_ERROR) {
+ ALOGW("detachBuffer slot=%d from old producer failed: %d", slotAllocation.first,
+ status);
}
}
- // hack(b/146409777): Try to connect ARC-specific listener first.
- sp<BufferReleasedNotifier> listener = new BufferReleasedNotifier(weak_from_this());
- if (mProducer->connect(listener, 'ARC\0', false) == OK) {
- ALOGI("connected to ARC-specific IGBP listener.");
- mFetchBufferNotifier = listener;
+ mSlotAllocations = std::move(newSlotAllocations);
+ return true;
+}
+
+c2_status_t C2VdaBqBlockPool::Impl::updateGraphicBlock(
+ bool willCancel, uint32_t oldSlot, uint32_t* newSlot,
+ std::shared_ptr<C2GraphicBlock>* block /* nonnull */) {
+ std::lock_guard<std::mutex> lock(mMutex);
+
+ if (mProducerChangeSlotMap.empty()) {
+ ALOGD("A new buffer set is requested right after producer change, no more update needed.");
+ return C2_CANCELED;
+ }
+
+ auto it = mProducerChangeSlotMap.find(static_cast<int32_t>(oldSlot));
+ if (it == mProducerChangeSlotMap.end()) {
+ ALOGE("Cannot find old slot = %u in map...", oldSlot);
+ return C2_NOT_FOUND;
}
- // There might be free buffers at the new producer, notify the client if needed.
- onEventNotified();
+ int32_t slot = it->second;
+ *newSlot = static_cast<uint32_t>(slot);
+ mProducerChangeSlotMap.erase(it);
+
+ if (willCancel) {
+ sp<Fence> fence = new Fence();
+ // The old C2GraphicBlock might be owned by client. Cancel this slot.
+ if (mProducer->cancelBuffer(slot, fence) != android::NO_ERROR) {
+ return C2_CORRUPTED;
+ }
+ // Client might try to attach the old buffer to the current producer on client's end,
+ // although it is useless for us anymore. However it will still occupy an available slot.
+ mBuffersInClient++;
+ } else {
+ // The old C2GraphicBlock is still owned by component, replace by the new one and keep this
+ // slot dequeued.
+ auto poolData =
+ std::make_shared<C2VdaBqBlockPoolData>(mProducerId, slot, shared_from_this());
+ *block = _C2BlockFactory::CreateGraphicBlock(mSlotAllocations[slot], std::move(poolData));
+ }
+
+ if (mProducerChangeSlotMap.empty()) {
+ // The updateGraphicBlock() routine is about to finish.
+ // Set the correct maxDequeuedBufferCount to producer, which is "requested buffer count" +
+ // "buffer count in client".
+ ALOGV("Requested buffer count: %zu, buffer count in client: %u", mSlotAllocations.size(),
+ mBuffersInClient);
+ if (mProducer->setMaxDequeuedBufferCount(mSlotAllocations.size() + mBuffersInClient) !=
+ android::NO_ERROR) {
+ return C2_CORRUPTED;
+ }
+ mProducerSwitched = true;
+ }
+
+ return C2_OK;
+}
+
+c2_status_t C2VdaBqBlockPool::Impl::getMinBuffersForDisplay(size_t* bufferCount) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ if (!mProducer) {
+ ALOGD("No HGraphicBufferProducer is configured...");
+ return C2_NO_INIT;
+ }
+
+ int32_t status, value;
+ status = mProducer->query(NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &value);
+ if (status != android::NO_ERROR) {
+ ALOGE("query(NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS) failed: %d", status);
+ return asC2Error(status);
+ }
+ if (value <= 0) {
+ ALOGE("Illegal value of NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS = %d", value);
+ return C2_BAD_VALUE;
+ }
+ *bufferCount = static_cast<size_t>(value);
+ return C2_OK;
+}
+
+void C2VdaBqBlockPool::Impl::detachBuffer(uint64_t producerId, int32_t slotId) {
+ ALOGV("detachBuffer: producer id = %" PRIu64 ", slot = %d", producerId, slotId);
+ std::lock_guard<std::mutex> lock(mMutex);
+ if (producerId == mProducerId && mProducer) {
+ if (mProducer->detachBuffer(slotId) != android::NO_ERROR) {
+ return;
+ }
+
+ auto it = mSlotAllocations.find(slotId);
+ // It may happen that the slot is not included in |mSlotAllocations|, which means it is
+ // released after resolution change.
+ if (it != mSlotAllocations.end()) {
+ mSlotAllocations.erase(it);
+ }
+ }
}
bool C2VdaBqBlockPool::Impl::setNotifyBlockAvailableCb(::base::OnceClosure cb) {
@@ -915,7 +1092,6 @@ bool C2VdaBqBlockPool::Impl::setNotifyBlockAvailableCb(::base::OnceClosure cb) {
// If there is any buffer released after dequeueBuffer() timed out, then we could notify the
// caller directly.
if (mBufferReleasedAfterTimedOut) {
- mBufferReleasedAfterTimedOut = false;
outputCb = std::move(cb);
} else {
mNotifyBlockAvailableCb = std::move(cb);
@@ -929,29 +1105,6 @@ bool C2VdaBqBlockPool::Impl::setNotifyBlockAvailableCb(::base::OnceClosure cb) {
return true;
}
-std::optional<unique_id_t> C2VdaBqBlockPool::Impl::getBufferIdFromGraphicBlock(
- const C2Block2D& block) {
- return mDrmHandleManager.getHandle(block.handle()->data[0]);
-}
-
-status_t C2VdaBqBlockPool::Impl::allowAllocation(bool allow) {
- ALOGV("%s(%d)", __func__, allow);
-
- if (!mProducer) {
- ALOGW("%s() mProducer is not initiailzed", __func__);
- return NO_INIT;
- }
- if (mAllowAllocation == allow) {
- return OK;
- }
-
- const auto status = mProducer->allowAllocation(allow);
- if (status == OK) {
- mAllowAllocation = allow;
- }
- return status;
-}
-
C2VdaBqBlockPool::C2VdaBqBlockPool(const std::shared_ptr<C2Allocator>& allocator,
const local_id_t localId)
: C2BufferQueueBlockPool(allocator, localId), mLocalId(localId), mImpl(new Impl(allocator)) {}
@@ -972,11 +1125,9 @@ void C2VdaBqBlockPool::setRenderCallback(
}
}
-c2_status_t C2VdaBqBlockPool::requestNewBufferSet(int32_t bufferCount, uint32_t width,
- uint32_t height, uint32_t format,
- C2MemoryUsage usage) {
+c2_status_t C2VdaBqBlockPool::requestNewBufferSet(int32_t bufferCount) {
if (mImpl) {
- return mImpl->requestNewBufferSet(bufferCount, width, height, format, usage);
+ return mImpl->requestNewBufferSet(bufferCount);
}
return C2_NO_INIT;
}
@@ -987,6 +1138,22 @@ void C2VdaBqBlockPool::configureProducer(const sp<HGraphicBufferProducer>& produ
}
}
+c2_status_t C2VdaBqBlockPool::updateGraphicBlock(
+ bool willCancel, uint32_t oldSlot, uint32_t* newSlot,
+ std::shared_ptr<C2GraphicBlock>* block /* nonnull */) {
+ if (mImpl) {
+ return mImpl->updateGraphicBlock(willCancel, oldSlot, newSlot, block);
+ }
+ return C2_NO_INIT;
+}
+
+c2_status_t C2VdaBqBlockPool::getMinBuffersForDisplay(size_t* bufferCount) {
+ if (mImpl) {
+ return mImpl->getMinBuffersForDisplay(bufferCount);
+ }
+ return C2_NO_INIT;
+}
+
bool C2VdaBqBlockPool::setNotifyBlockAvailableCb(::base::OnceClosure cb) {
if (mImpl) {
return mImpl->setNotifyBlockAvailableCb(std::move(cb));
@@ -994,11 +1161,15 @@ bool C2VdaBqBlockPool::setNotifyBlockAvailableCb(::base::OnceClosure cb) {
return false;
}
-std::optional<unique_id_t> C2VdaBqBlockPool::getBufferIdFromGraphicBlock(const C2Block2D& block) {
- if (mImpl) {
- return mImpl->getBufferIdFromGraphicBlock(block);
+C2VdaBqBlockPoolData::C2VdaBqBlockPoolData(uint64_t producerId, int32_t slotId,
+ const std::shared_ptr<C2VdaBqBlockPool::Impl>& pool)
+ : mProducerId(producerId), mSlotId(slotId), mPool(pool) {}
+
+C2VdaBqBlockPoolData::~C2VdaBqBlockPoolData() {
+ if (mShared || !mPool) {
+ return;
}
- return std::nullopt;
+ mPool->detachBuffer(mProducerId, mSlotId);
}
} // namespace android
diff --git a/plugin_store/C2VdaPooledBlockPool.cpp b/plugin_store/C2VdaPooledBlockPool.cpp
index 48cc2e5..08fdfa0 100644
--- a/plugin_store/C2VdaPooledBlockPool.cpp
+++ b/plugin_store/C2VdaPooledBlockPool.cpp
@@ -9,6 +9,7 @@
#include <time.h>
+#include <C2AllocatorGralloc.h>
#include <C2BlockInternal.h>
#include <bufferpool/BufferPoolTypes.h>
#include <log/log.h>
diff --git a/plugin_store/DrmGrallocHelpers.cpp b/plugin_store/DrmGrallocHelpers.cpp
deleted file mode 100644
index 0565a69..0000000
--- a/plugin_store/DrmGrallocHelpers.cpp
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2021 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "DrmGrallocHelper"
-
-#include <v4l2_codec2/plugin_store/DrmGrallocHelpers.h>
-
-#include <fcntl.h>
-#include <string.h>
-
-#include <drm/drm.h>
-#include <log/log.h>
-
-namespace android {
-
-std::optional<int> openRenderFd() {
- const char kVirglName[] = "virtio_gpu";
-
- for (uint32_t i = 128; i < 192; i++) {
- char devName[32];
- snprintf(devName, sizeof(devName), "/dev/dri/renderD%d", i);
-
- int fd = open(devName, O_RDWR | O_CLOEXEC);
- if (fd < 0) {
- continue;
- }
-
- char name[32];
- struct drm_version v;
- memset(&v, 0, sizeof(v));
- v.name = name;
- v.name_len = sizeof(name);
-
- if (ioctl(fd, static_cast<int>(DRM_IOCTL_VERSION), &v)) {
- close(fd);
- continue;
- }
- if (v.name_len != sizeof(kVirglName) - 1 || memcmp(name, kVirglName, v.name_len)) {
- close(fd);
- continue;
- }
- return fd;
- }
- return std::nullopt;
-}
-
-std::optional<uint32_t> getDrmHandle(int renderFd, int primeFd) {
- ALOGV("%s(renderFd=%d, primeFd=%u)", __func__, renderFd, primeFd);
-
- struct drm_prime_handle prime;
- memset(&prime, 0, sizeof(prime));
- prime.fd = primeFd;
-
- if (ioctl(renderFd, static_cast<int>(DRM_IOCTL_PRIME_FD_TO_HANDLE), &prime)) {
- ALOGE("Can't translate prime fd %d to handle", prime.fd);
- return std::nullopt;
- }
- return prime.handle;
-}
-
-void closeDrmHandle(int renderFd, uint32_t handle) {
- ALOGV("%s(renderFd=%d, handle=%u)", __func__, renderFd, handle);
-
- struct drm_gem_close gem;
- memset(&gem, 0, sizeof(gem));
- gem.handle = handle;
-
- ioctl(renderFd, DRM_IOCTL_GEM_CLOSE, &gem);
-}
-
-} // namespace android
diff --git a/plugin_store/H2BGraphicBufferProducer.cpp b/plugin_store/H2BGraphicBufferProducer.cpp
deleted file mode 100644
index 95251de..0000000
--- a/plugin_store/H2BGraphicBufferProducer.cpp
+++ /dev/null
@@ -1,287 +0,0 @@
-// Copyright 2021 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "H2BGraphicBuferProducer"
-
-#include <v4l2_codec2/plugin_store/H2BGraphicBufferProducer.h>
-
-#include <log/log.h>
-#include <types.h>
-#include <ui/BufferQueueDefs.h>
-
-namespace android {
-
-using ::android::BufferQueueDefs::BUFFER_NEEDS_REALLOCATION;
-using ::android::BufferQueueDefs::RELEASE_ALL_BUFFERS;
-using ::android::hardware::Return;
-
-using HBuffer = ::android::hardware::graphics::common::V1_2::HardwareBuffer;
-using HStatus = ::android::hardware::graphics::bufferqueue::V2_0::Status;
-using HConnectionType = hardware::graphics::bufferqueue::V2_0::ConnectionType;
-using HQueueBufferOutput =
- ::android::hardware::graphics::bufferqueue::V2_0::IGraphicBufferProducer::QueueBufferOutput;
-
-using ::android::hardware::graphics::bufferqueue::V2_0::utils::b2h;
-using ::android::hardware::graphics::bufferqueue::V2_0::utils::h2b;
-using ::android::hardware::graphics::bufferqueue::V2_0::utils::HFenceWrapper;
-
-H2BGraphicBufferProducer::H2BGraphicBufferProducer(sp<HGraphicBufferProducer> base) : mBase(base) {}
-
-status_t H2BGraphicBufferProducer::requestBuffer(int slot, sp<GraphicBuffer>* buf) {
- bool converted = false;
- status_t status = UNKNOWN_ERROR;
- Return<void> transResult = mBase->requestBuffer(
- slot, [&converted, &status, buf](HStatus hStatus, HBuffer const& hBuffer,
- uint32_t generationNumber) {
- converted = h2b(hStatus, &status) && h2b(hBuffer, buf);
- if (*buf) {
- (*buf)->setGenerationNumber(generationNumber);
- }
- });
-
- if (!transResult.isOk()) {
- ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
- return FAILED_TRANSACTION;
- }
- if (!converted) {
- ALOGE("%s(): corrupted transaction.", __func__);
- return FAILED_TRANSACTION;
- }
- if (status != OK) {
- ALOGD("%s() failed: %d", __func__, status);
- }
- return status;
-}
-
-status_t H2BGraphicBufferProducer::setMaxDequeuedBufferCount(int maxDequeuedBuffers) {
- status_t status = UNKNOWN_ERROR;
- Return<HStatus> transResult =
- mBase->setMaxDequeuedBufferCount(static_cast<int32_t>(maxDequeuedBuffers));
-
- if (!transResult.isOk()) {
- ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
- return FAILED_TRANSACTION;
- }
- if (!h2b(static_cast<HStatus>(transResult), &status)) {
- ALOGE("%s(): corrupted transaction.", __func__);
- return FAILED_TRANSACTION;
- }
- if (status != OK) {
- ALOGD("%s() failed: %d", __func__, status);
- }
- return status;
-}
-
-status_t H2BGraphicBufferProducer::dequeueBuffer(uint32_t width, uint32_t height,
- uint32_t pixelFormat,
- C2AndroidMemoryUsage androidUsage, int* slot,
- sp<Fence>* fence) {
- using Input = HGraphicBufferProducer::DequeueBufferInput;
- using Output = HGraphicBufferProducer::DequeueBufferOutput;
- Input input{width, height, pixelFormat, androidUsage.asGrallocUsage()};
-
- bool converted = false;
- status_t status = UNKNOWN_ERROR;
- Return<void> transResult = mBase->dequeueBuffer(
- input, [&converted, &status, &slot, &fence](HStatus hStatus, int32_t hSlot,
- Output const& hOutput) {
- converted = h2b(hStatus, &status);
- if (!converted || status != OK) {
- return;
- }
-
- *slot = hSlot;
- if (hOutput.bufferNeedsReallocation) {
- status = BUFFER_NEEDS_REALLOCATION;
- }
- converted = h2b(hOutput.fence, fence);
- });
-
- if (!transResult.isOk()) {
- ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
- return FAILED_TRANSACTION;
- }
- if (!converted) {
- ALOGE("%s(): corrupted transaction.", __func__);
- return FAILED_TRANSACTION;
- }
- // The C2VdaBqBlockPool does not fully own the bufferqueue. After buffers are dequeued here,
- // they are passed into the codec2 framework, processed, and eventually queued into the
- // bufferqueue. The C2VdaBqBlockPool cannot determine exactly when a buffer gets queued.
- // However, if every buffer is being processed by the codec2 framework, then dequeueBuffer()
- // will return INVALID_OPERATION because of an attempt to dequeue too many buffers.
- // The C2VdaBqBlockPool cannot prevent this from happening, so just map it to TIMED_OUT
- // and let the C2VdaBqBlockPool's caller's timeout retry logic handle the failure.
- if (status == INVALID_OPERATION) {
- status = TIMED_OUT;
- }
- if (status != OK && status != BUFFER_NEEDS_REALLOCATION && status != TIMED_OUT) {
- ALOGD("%s() failed: %d", __func__, status);
- }
- return status;
-}
-
-status_t H2BGraphicBufferProducer::detachBuffer(int slot) {
- status_t status = UNKNOWN_ERROR;
- Return<HStatus> transResult = mBase->detachBuffer(static_cast<int32_t>(slot));
-
- if (!transResult.isOk()) {
- ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
- return FAILED_TRANSACTION;
- }
- if (!h2b(static_cast<HStatus>(transResult), &status)) {
- ALOGE("%s(): corrupted transaction.", __func__);
- return FAILED_TRANSACTION;
- }
- if (status != OK) {
- ALOGD("%s() failed: %d", __func__, status);
- }
- return status;
-}
-
-status_t H2BGraphicBufferProducer::attachBuffer(const sp<GraphicBuffer>& buffer, int* outSlot) {
- HBuffer hBuffer;
- uint32_t hGenerationNumber;
- if (!b2h(buffer, &hBuffer, &hGenerationNumber)) {
- ALOGE("%s: invalid input buffer.", __func__);
- return BAD_VALUE;
- }
-
- bool converted = false;
- status_t status = UNKNOWN_ERROR;
- Return<void> transResult = mBase->attachBuffer(
- hBuffer, hGenerationNumber,
- [&converted, &status, outSlot](HStatus hStatus, int32_t hSlot, bool releaseAllBuffers) {
- converted = h2b(hStatus, &status);
- *outSlot = static_cast<int>(hSlot);
- if (converted && releaseAllBuffers && status == OK) {
- status = RELEASE_ALL_BUFFERS;
- }
- });
-
- if (!transResult.isOk()) {
- ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
- return FAILED_TRANSACTION;
- }
- if (!converted) {
- ALOGE("%s(): corrupted transaction.", __func__);
- return FAILED_TRANSACTION;
- }
- if (status != OK) {
- ALOGD("%s() failed: %d", __func__, status);
- }
- return status;
-}
-
-status_t H2BGraphicBufferProducer::cancelBuffer(int slot, const sp<Fence>& fence) {
- HFenceWrapper hFenceWrapper;
- if (!b2h(fence, &hFenceWrapper)) {
- ALOGE("%s(): corrupted input fence.", __func__);
- return UNKNOWN_ERROR;
- }
-
- status_t status = UNKNOWN_ERROR;
- Return<HStatus> transResult =
- mBase->cancelBuffer(static_cast<int32_t>(slot), hFenceWrapper.getHandle());
-
- if (!transResult.isOk()) {
- ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
- return FAILED_TRANSACTION;
- }
- if (!h2b(static_cast<HStatus>(transResult), &status)) {
- ALOGE("%s(): corrupted transaction.", __func__);
- return FAILED_TRANSACTION;
- }
- if (status != OK) {
- ALOGD("%s() failed: %d", __func__, status);
- }
- return status;
-}
-
-int H2BGraphicBufferProducer::query(int what, int* value) {
- int result = 0;
- Return<void> transResult =
- mBase->query(static_cast<int32_t>(what), [&result, value](int32_t r, int32_t v) {
- result = static_cast<int>(r);
- *value = static_cast<int>(v);
- });
-
- if (!transResult.isOk()) {
- ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
- return FAILED_TRANSACTION;
- }
- return result;
-}
-
-status_t H2BGraphicBufferProducer::allowAllocation(bool allow) {
- status_t status = UNKNOWN_ERROR;
- Return<HStatus> transResult = mBase->allowAllocation(allow);
-
- if (!transResult.isOk()) {
- ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
- return FAILED_TRANSACTION;
- }
- if (!h2b(static_cast<HStatus>(transResult), &status)) {
- ALOGE("%s(): corrupted transaction.", __func__);
- return FAILED_TRANSACTION;
- }
- if (status != OK) {
- ALOGD("%s() failed: %d", __func__, status);
- }
- return status;
-}
-
-status_t H2BGraphicBufferProducer::getUniqueId(uint64_t* outId) const {
- Return<uint64_t> transResult = mBase->getUniqueId();
-
- if (!transResult.isOk()) {
- ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
- return FAILED_TRANSACTION;
- }
-
- *outId = static_cast<uint64_t>(transResult);
- return OK;
-}
-
-// android::IProducerListener cannot be depended by vendor library, so we use HProducerListener
-// directly.
-status_t H2BGraphicBufferProducer::connect(sp<HProducerListener> const& hListener, int32_t api,
- bool producerControlledByApp) {
- bool converted = false;
- status_t status = UNKNOWN_ERROR;
- // hack(b/146409777): We pass self-defined api, so we don't use b2h() here.
- Return<void> transResult = mBase->connect(
- hListener, static_cast<HConnectionType>(api), producerControlledByApp,
- [&converted, &status](HStatus hStatus, HQueueBufferOutput const& /* hOutput */) {
- converted = h2b(hStatus, &status);
- });
-
- if (!transResult.isOk()) {
- ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
- return FAILED_TRANSACTION;
- }
- if (!converted) {
- ALOGE("%s(): corrupted transaction.", __func__);
- return FAILED_TRANSACTION;
- }
- return status;
-}
-
-status_t H2BGraphicBufferProducer::setDequeueTimeout(nsecs_t timeout) {
- status_t status = UNKNOWN_ERROR;
- Return<HStatus> transResult = mBase->setDequeueTimeout(static_cast<int64_t>(timeout));
-
- if (!transResult.isOk()) {
- ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
- return FAILED_TRANSACTION;
- }
- if (!h2b(static_cast<HStatus>(transResult), &status)) {
- ALOGE("%s(): corrupted transaction.", __func__);
- return FAILED_TRANSACTION;
- }
- return status;
-}
-
-} // namespace android
diff --git a/plugin_store/V4L2PluginStore.cpp b/plugin_store/V4L2PluginStore.cpp
index 2d53c5f..4475e2f 100644
--- a/plugin_store/V4L2PluginStore.cpp
+++ b/plugin_store/V4L2PluginStore.cpp
@@ -32,7 +32,7 @@ C2Allocator* createAllocator(C2Allocator::id_t allocatorId) {
return sAllocatorLoader->createAllocator(allocatorId);
}
- ALOGI("%s(): Fallback to create C2AllocatorGralloc(id=%u)", __func__, allocatorId);
+ ALOGI("%s(): Fallback to create C2AllocatorGralloc (id=%u)", __func__, allocatorId);
return new C2AllocatorGralloc(allocatorId, true);
}
diff --git a/plugin_store/include/v4l2_codec2/plugin_store/C2VdaBqBlockPool.h b/plugin_store/include/v4l2_codec2/plugin_store/C2VdaBqBlockPool.h
index fde6299..fd524d2 100644
--- a/plugin_store/include/v4l2_codec2/plugin_store/C2VdaBqBlockPool.h
+++ b/plugin_store/include/v4l2_codec2/plugin_store/C2VdaBqBlockPool.h
@@ -17,6 +17,14 @@
namespace android {
/**
+ * Marks the BlockPoolData in |sharedBlock| as shared. The destructor of BlockPoolData would not
+ * call detachBuffer to BufferQueue if it is shared.
+ *
+ * \param sharedBlock the C2ConstGraphicBlock which is about to pass to client.
+ */
+c2_status_t MarkBlockPoolDataAsShared(const C2ConstGraphicBlock& sharedBlock);
+
+/**
* The BufferQueue-backed block pool design which supports to request arbitrary count of graphic
* buffers from IGBP, and use this buffer set among codec component and client.
*
@@ -31,6 +39,18 @@ public:
~C2VdaBqBlockPool() override = default;
/**
+ * Extracts slot index as pool ID from the graphic block.
+ *
+ * \note C2VdaBqBlockPool-specific function
+ *
+ * \param block the graphic block allocated by bufferqueue block pool.
+ *
+ * Return the buffer's slot index in bufferqueue if extraction is successful.
+ * Otherwise return std::nullopt.
+ */
+ static std::optional<uint32_t> getBufferIdFromGraphicBlock(const C2Block2D& block);
+
+ /**
* It's a trick here. Return C2PlatformAllocatorStore::BUFFERQUEUE instead of the ID of backing
* allocator for client's query. It's because in platform side this ID is recognized as
* BufferQueue-backed block pool which is only allowed to set surface.
@@ -75,8 +95,42 @@ public:
* \retval C2_BAD_VALUE |bufferCount| is not greater than zero.
* \retval C2_CORRUPTED some unknown, unrecoverable error occured during operation (unexpected).
*/
- c2_status_t requestNewBufferSet(int32_t bufferCount, uint32_t width, uint32_t height,
- uint32_t format, C2MemoryUsage usage);
+ c2_status_t requestNewBufferSet(int32_t bufferCount);
+
+ /**
+ * Updates the buffer from producer switch.
+ *
+ * \note C2VdaBqBlockPool-specific function
+ *
+ * \param willCancel if true, the corresponding slot will be canceled to new producer. Otherwise
+ * the new graphic block will be returned as |block|.
+ * \param oldSlot the slot index from old producer the caller provided.
+ * \param newSlot the corresponding slot index of new producer is filled.
+ * \param block if |willCancel| is false, the new graphic block is stored.
+ *
+ * \retval C2_OK the operation was successful.
+ * \retval C2_NO_INIT this class is not initialized.
+ * \retval C2_NOT_FOUND cannot find |oldSlot| in the slot changing map.
+ * \retval C2_CANCELED indicates buffer format is changed and a new buffer set is allocated, no
+ * more update needed.
+ * \retval C2_CORRUPTED some unknown, unrecoverable error occured during operation (unexpected).
+ */
+ c2_status_t updateGraphicBlock(bool willCancel, uint32_t oldSlot, uint32_t* newSlot,
+ std::shared_ptr<C2GraphicBlock>* block /* nonnull */);
+
+ /**
+ * Gets minimum undequeued buffer count for display from producer.
+ *
+ * \note C2VdaBqBlockPool-specific function
+ *
+ * \param bufferCount the minimum undequeued buffer count for display is filled.
+ *
+ * \retval C2_OK the operation was successful.
+ * \retval C2_NO_INIT this class is not initialized, or producer is not assigned.
+ * \retval C2_BAD_VALUE the queried value is illegal (less than 0).
+ * \retval C2_CORRUPTED some unknown, unrecoverable error occured during operation (unexpected).
+ */
+ c2_status_t getMinBuffersForDisplay(size_t* bufferCount);
/**
* Set the callback that will be triggered when there is block available.
@@ -90,8 +144,6 @@ public:
*/
bool setNotifyBlockAvailableCb(base::OnceClosure cb);
- std::optional<uint32_t> getBufferIdFromGraphicBlock(const C2Block2D& block);
-
private:
friend struct C2VdaBqBlockPoolData;
class Impl;
diff --git a/plugin_store/include/v4l2_codec2/plugin_store/DrmGrallocHelpers.h b/plugin_store/include/v4l2_codec2/plugin_store/DrmGrallocHelpers.h
deleted file mode 100644
index 46d2967..0000000
--- a/plugin_store/include/v4l2_codec2/plugin_store/DrmGrallocHelpers.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2021 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef ANDROID_V4L2_CODEC2_PLUGIN_STORE_STORE_DRM_GRALLOC_HELPERS_H
-#define ANDROID_V4L2_CODEC2_PLUGIN_STORE_STORE_DRM_GRALLOC_HELPERS_H
-
-#include <stdint.h>
-
-#include <optional>
-
-namespace android {
-
-std::optional<int> openRenderFd();
-std::optional<uint32_t> getDrmHandle(int renderFd, int primeFd);
-void closeDrmHandle(int renderFd, uint32_t handle);
-
-} // namespace android
-#endif // ANDROID_V4L2_CODEC2_PLUGIN_STORE_STORE_DRM_GRALLOC_HELPERS_H
diff --git a/plugin_store/include/v4l2_codec2/plugin_store/H2BGraphicBufferProducer.h b/plugin_store/include/v4l2_codec2/plugin_store/H2BGraphicBufferProducer.h
deleted file mode 100644
index 11185bb..0000000
--- a/plugin_store/include/v4l2_codec2/plugin_store/H2BGraphicBufferProducer.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2021 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef ANDROID_V4L2_CODEC2_PLUGIN_STORE_H2B_GRAPHIC_BUFFER_PRODUCER_H
-#define ANDROID_V4L2_CODEC2_PLUGIN_STORE_H2B_GRAPHIC_BUFFER_PRODUCER_H
-
-#include <C2Buffer.h>
-#include <android/hardware/graphics/bufferqueue/2.0/IGraphicBufferProducer.h>
-#include <android/hardware/graphics/bufferqueue/2.0/IProducerListener.h>
-#include <ui/Fence.h>
-#include <ui/GraphicBuffer.h>
-#include <utils/StrongPointer.h>
-
-namespace android {
-
-class H2BGraphicBufferProducer {
-public:
- using HGraphicBufferProducer =
- ::android::hardware::graphics::bufferqueue::V2_0::IGraphicBufferProducer;
- using HProducerListener = ::android::hardware::graphics::bufferqueue::V2_0::IProducerListener;
-
- explicit H2BGraphicBufferProducer(sp<HGraphicBufferProducer> base);
- ~H2BGraphicBufferProducer() = default;
-
- // Convert HIDL interface of IGraphicBufferProducer.
- status_t requestBuffer(int slot, sp<GraphicBuffer>* buf);
- status_t setMaxDequeuedBufferCount(int maxDequeuedBuffers);
- status_t dequeueBuffer(uint32_t width, uint32_t height, uint32_t pixelFormat,
- C2AndroidMemoryUsage androidUsage, int* slot, sp<Fence>* fence);
- status_t detachBuffer(int slot);
- status_t attachBuffer(const sp<GraphicBuffer>& buffer, int* outSlot);
- status_t cancelBuffer(int slot, const sp<Fence>& fence);
- int query(int what, int* value);
- status_t allowAllocation(bool allow);
- status_t getUniqueId(uint64_t* outId) const;
- status_t connect(sp<HProducerListener> const& hListener, int32_t api,
- bool producerControlledByApp);
- status_t setDequeueTimeout(nsecs_t timeout);
-
- sp<HGraphicBufferProducer> getBase() { return mBase; }
-
-private:
- const sp<HGraphicBufferProducer> mBase;
-};
-
-} // namespace android
-#endif // ANDROID_V4L2_CODEC2_PLUGIN_STORE_H2B_GRAPHIC_BUFFER_PRODUCER_H
diff --git a/service/Android.bp b/service/Android.bp
index 29ee3ff..3fcc66c 100644
--- a/service/Android.bp
+++ b/service/Android.bp
@@ -1,12 +1,3 @@
-package {
- // See: http://go/android-license-faq
- // A large-scale-change added 'default_applicable_licenses' to import
- // all of the 'license_kinds' from "external_v4l2_codec2_license"
- // to get the below license kinds:
- // SPDX-license-identifier-BSD
- default_applicable_licenses: ["external_v4l2_codec2_license"],
-}
-
cc_binary {
name: "android.hardware.media.c2@1.0-service-v4l2",
@@ -21,8 +12,10 @@ cc_binary {
"service.cpp",
],
+ init_rc: ["android.hardware.media.c2@1.0-service-v4l2.rc"],
+
shared_libs: [
- "libv4l2_codec2_components",
+ "libv4l2_codec2_store",
"libavservices_minijail_vendor",
"libchrome",
"libcutils",
@@ -31,17 +24,6 @@ cc_binary {
"libutils",
],
- required: ["android.hardware.media.c2@1.2-default-seccomp_policy"],
-
- compile_multilib: "both",
- multilib: {
- lib32: {
- suffix: "-32",
- init_rc: ["android.hardware.media.c2@1.0-service-v4l2-32.rc"],
- },
- lib64: {
- suffix: "-64",
- init_rc: ["android.hardware.media.c2@1.0-service-v4l2-64.rc"],
- },
- },
+ required: ["android.hardware.media.c2@1.1-default-seccomp_policy"],
+ compile_multilib: "32",
}
diff --git a/service/android.hardware.media.c2@1.0-service-v4l2-64.rc b/service/android.hardware.media.c2@1.0-service-v4l2-64.rc
deleted file mode 100644
index a9e1c6f..0000000
--- a/service/android.hardware.media.c2@1.0-service-v4l2-64.rc
+++ /dev/null
@@ -1,7 +0,0 @@
-service android-hardware-media-c2-v4l2-hal-1-0 /vendor/bin/hw/android.hardware.media.c2@1.0-service-v4l2-64
- class hal
- user media
- group mediadrm drmrpc
- ioprio rt 4
- writepid /dev/cpuset/foreground/tasks
- setenv MESA_GLSL_CACHE_DISABLE 1
diff --git a/service/android.hardware.media.c2@1.0-service-v4l2-32.rc b/service/android.hardware.media.c2@1.0-service-v4l2.rc
index 266a73e..bec4604 100644
--- a/service/android.hardware.media.c2@1.0-service-v4l2-32.rc
+++ b/service/android.hardware.media.c2@1.0-service-v4l2.rc
@@ -1,4 +1,4 @@
-service android-hardware-media-c2-v4l2-hal-1-0 /vendor/bin/hw/android.hardware.media.c2@1.0-service-v4l2-32
+service android-hardware-media-c2-v4l2-hal-1-0 /vendor/bin/hw/android.hardware.media.c2@1.0-service-v4l2
class hal
user media
group mediadrm drmrpc
diff --git a/service/service.cpp b/service/service.cpp
index 616e641..ba78759 100644
--- a/service/service.cpp
+++ b/service/service.cpp
@@ -12,11 +12,11 @@
#include <log/log.h>
#include <minijail.h>
-#include <v4l2_codec2/components/V4L2ComponentStore.h>
+#include <v4l2_codec2/store/V4L2ComponentStore.h>
// Default policy for codec2.0 service.
static constexpr char kBaseSeccompPolicyPath[] =
- "/vendor/etc/seccomp_policy/android.hardware.media.c2@1.2-default-seccomp_policy";
+ "/vendor/etc/seccomp_policy/android.hardware.media.c2@1.1-default-seccomp_policy";
// Additional device-specific seccomp permissions can be added in this file.
static constexpr char kExtSeccompPolicyPath[] =
diff --git a/store/Android.bp b/store/Android.bp
new file mode 100644
index 0000000..c2a6169
--- /dev/null
+++ b/store/Android.bp
@@ -0,0 +1,30 @@
+cc_library_shared {
+ name: "libv4l2_codec2_store",
+ vendor: true,
+
+ defaults: [
+ "libcodec2-impl-defaults",
+ ],
+
+ srcs: [
+ "V4L2ComponentStore.cpp",
+ ],
+ export_include_dirs: [
+ "include",
+ ],
+
+ shared_libs: [
+ "libcutils",
+ "liblog",
+ ],
+ static_libs: [
+ "libv4l2_codec2_common",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ "-Wthread-safety", // Check thread annotation at build time.
+ ],
+}
+
diff --git a/components/V4L2ComponentStore.cpp b/store/V4L2ComponentStore.cpp
index 4004ce5..8942866 100644
--- a/components/V4L2ComponentStore.cpp
+++ b/store/V4L2ComponentStore.cpp
@@ -5,8 +5,9 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "V4L2ComponentStore"
-#include <v4l2_codec2/components/V4L2ComponentStore.h>
+#include <v4l2_codec2/store/V4L2ComponentStore.h>
+#include <dlfcn.h>
#include <stdint.h>
#include <memory>
@@ -15,31 +16,16 @@
#include <C2.h>
#include <C2Config.h>
#include <log/log.h>
-#include <media/stagefright/foundation/MediaDefs.h>
#include <v4l2_codec2/common/V4L2ComponentCommon.h>
-#include <v4l2_codec2/components/V4L2ComponentFactory.h>
namespace android {
namespace {
-const uint32_t kComponentRank = 0x80;
-
-std::string getMediaTypeFromComponentName(const std::string& name) {
- if (name == V4L2ComponentName::kH264Decoder || name == V4L2ComponentName::kH264SecureDecoder ||
- name == V4L2ComponentName::kH264Encoder) {
- return MEDIA_MIMETYPE_VIDEO_AVC;
- }
- if (name == V4L2ComponentName::kVP8Decoder || name == V4L2ComponentName::kVP8SecureDecoder ||
- name == V4L2ComponentName::kVP8Encoder) {
- return MEDIA_MIMETYPE_VIDEO_VP8;
- }
- if (name == V4L2ComponentName::kVP9Decoder || name == V4L2ComponentName::kVP9SecureDecoder ||
- name == V4L2ComponentName::kVP9Encoder) {
- return MEDIA_MIMETYPE_VIDEO_VP9;
- }
- return "";
-}
+const char* kLibPath = "libv4l2_codec2_components.so";
+const char* kCreateFactoryFuncName = "CreateCodec2Factory";
+const char* kDestroyFactoryFuncName = "DestroyCodec2Factory";
+const uint32_t kComponentRank = 0x80;
} // namespace
// static
@@ -53,12 +39,32 @@ std::shared_ptr<C2ComponentStore> V4L2ComponentStore::Create() {
std::shared_ptr<C2ComponentStore> store = platformStore.lock();
if (store != nullptr) return store;
- store = std::shared_ptr<C2ComponentStore>(new V4L2ComponentStore());
+ void* libHandle = dlopen(kLibPath, RTLD_NOW | RTLD_NODELETE);
+ if (!libHandle) {
+ ALOGE("Failed to load library: %s", kLibPath);
+ return nullptr;
+ }
+
+ auto createFactoryFunc = (CreateV4L2FactoryFunc)dlsym(libHandle, kCreateFactoryFuncName);
+ auto destroyFactoryFunc = (DestroyV4L2FactoryFunc)dlsym(libHandle, kDestroyFactoryFuncName);
+ if (!createFactoryFunc || !destroyFactoryFunc) {
+ ALOGE("Failed to load functions: %s, %s", kCreateFactoryFuncName, kDestroyFactoryFuncName);
+ dlclose(libHandle);
+ return nullptr;
+ }
+
+ store = std::shared_ptr<C2ComponentStore>(
+ new V4L2ComponentStore(libHandle, createFactoryFunc, destroyFactoryFunc));
platformStore = store;
return store;
}
-V4L2ComponentStore::V4L2ComponentStore() : mReflector(std::make_shared<C2ReflectorHelper>()) {
+V4L2ComponentStore::V4L2ComponentStore(void* libHandle, CreateV4L2FactoryFunc createFactoryFunc,
+ DestroyV4L2FactoryFunc destroyFactoryFunc)
+ : mLibHandle(libHandle),
+ mCreateFactoryFunc(createFactoryFunc),
+ mDestroyFactoryFunc(destroyFactoryFunc),
+ mReflector(std::make_shared<C2ReflectorHelper>()) {
ALOGV("%s()", __func__);
}
@@ -66,7 +72,10 @@ V4L2ComponentStore::~V4L2ComponentStore() {
ALOGV("%s()", __func__);
std::lock_guard<std::mutex> lock(mCachedFactoriesLock);
+ for (const auto& kv : mCachedFactories) mDestroyFactoryFunc(kv.second);
mCachedFactories.clear();
+
+ dlclose(mLibHandle);
}
C2String V4L2ComponentStore::getName() const {
@@ -77,11 +86,6 @@ c2_status_t V4L2ComponentStore::createComponent(C2String name,
std::shared_ptr<C2Component>* const component) {
ALOGV("%s(%s)", __func__, name.c_str());
- if (!V4L2ComponentName::isValid(name.c_str())) {
- ALOGI("%s(): Invalid component name: %s", __func__, name.c_str());
- return C2_NOT_FOUND;
- }
-
auto factory = GetFactory(name);
if (factory == nullptr) return C2_CORRUPTED;
@@ -93,11 +97,6 @@ c2_status_t V4L2ComponentStore::createInterface(
C2String name, std::shared_ptr<C2ComponentInterface>* const interface) {
ALOGV("%s(%s)", __func__, name.c_str());
- if (!V4L2ComponentName::isValid(name.c_str())) {
- ALOGI("%s(): Invalid component name: %s", __func__, name.c_str());
- return C2_NOT_FOUND;
- }
-
auto factory = GetFactory(name);
if (factory == nullptr) return C2_CORRUPTED;
@@ -112,10 +111,8 @@ std::vector<std::shared_ptr<const C2Component::Traits>> V4L2ComponentStore::list
ret.push_back(GetTraits(V4L2ComponentName::kH264Encoder));
ret.push_back(GetTraits(V4L2ComponentName::kH264Decoder));
ret.push_back(GetTraits(V4L2ComponentName::kH264SecureDecoder));
- ret.push_back(GetTraits(V4L2ComponentName::kVP8Encoder));
ret.push_back(GetTraits(V4L2ComponentName::kVP8Decoder));
ret.push_back(GetTraits(V4L2ComponentName::kVP8SecureDecoder));
- ret.push_back(GetTraits(V4L2ComponentName::kVP9Encoder));
ret.push_back(GetTraits(V4L2ComponentName::kVP9Decoder));
ret.push_back(GetTraits(V4L2ComponentName::kVP9SecureDecoder));
return ret;
@@ -158,22 +155,24 @@ c2_status_t V4L2ComponentStore::querySupportedValues_sm(
::C2ComponentFactory* V4L2ComponentStore::GetFactory(const C2String& name) {
ALOGV("%s(%s)", __func__, name.c_str());
- ALOG_ASSERT(V4L2ComponentName::isValid(name.c_str()));
+
+ if (!V4L2ComponentName::isValid(name.c_str())) {
+ ALOGE("Invalid component name: %s", name.c_str());
+ return nullptr;
+ }
std::lock_guard<std::mutex> lock(mCachedFactoriesLock);
const auto it = mCachedFactories.find(name);
- if (it != mCachedFactories.end()) return it->second.get();
+ if (it != mCachedFactories.end()) return it->second;
- std::unique_ptr<::C2ComponentFactory> factory = V4L2ComponentFactory::create(
- name, std::static_pointer_cast<C2ReflectorHelper>(getParamReflector()));
+ ::C2ComponentFactory* factory = mCreateFactoryFunc(name.c_str());
if (factory == nullptr) {
ALOGE("Failed to create factory for %s", name.c_str());
return nullptr;
}
- auto ret = factory.get();
- mCachedFactories.emplace(name, std::move(factory));
- return ret;
+ mCachedFactories.emplace(name, factory);
+ return factory;
}
std::shared_ptr<const C2Component::Traits> V4L2ComponentStore::GetTraits(const C2String& name) {
@@ -188,13 +187,39 @@ std::shared_ptr<const C2Component::Traits> V4L2ComponentStore::GetTraits(const C
auto it = mCachedTraits.find(name);
if (it != mCachedTraits.end()) return it->second;
+ std::shared_ptr<C2ComponentInterface> intf;
+ auto res = createInterface(name, &intf);
+ if (res != C2_OK) {
+ ALOGE("failed to create interface for %s: %d", name.c_str(), res);
+ return nullptr;
+ }
+
+ bool isEncoder = V4L2ComponentName::isEncoder(name.c_str());
+ uint32_t mediaTypeIndex = isEncoder ? C2PortMediaTypeSetting::output::PARAM_TYPE
+ : C2PortMediaTypeSetting::input::PARAM_TYPE;
+ std::vector<std::unique_ptr<C2Param>> params;
+ res = intf->query_vb({}, {mediaTypeIndex}, C2_MAY_BLOCK, &params);
+ if (res != C2_OK) {
+ ALOGE("failed to query interface: %d", res);
+ return nullptr;
+ }
+ if (params.size() != 1u) {
+ ALOGE("failed to query interface: unexpected vector size: %zu", params.size());
+ return nullptr;
+ }
+
+ C2PortMediaTypeSetting* mediaTypeConfig = (C2PortMediaTypeSetting*)(params[0].get());
+ if (mediaTypeConfig == nullptr) {
+ ALOGE("failed to query media type");
+ return nullptr;
+ }
+
auto traits = std::make_shared<C2Component::Traits>();
- traits->name = name;
+ traits->name = intf->getName();
traits->domain = C2Component::DOMAIN_VIDEO;
+ traits->kind = isEncoder ? C2Component::KIND_ENCODER : C2Component::KIND_DECODER;
+ traits->mediaType = mediaTypeConfig->m.value;
traits->rank = kComponentRank;
- traits->mediaType = getMediaTypeFromComponentName(name);
- traits->kind = V4L2ComponentName::isEncoder(name.c_str()) ? C2Component::KIND_ENCODER
- : C2Component::KIND_DECODER;
mCachedTraits.emplace(name, traits);
return traits;
diff --git a/components/include/v4l2_codec2/components/V4L2ComponentStore.h b/store/include/v4l2_codec2/store/V4L2ComponentStore.h
index bfec407..8013f55 100644
--- a/components/include/v4l2_codec2/components/V4L2ComponentStore.h
+++ b/store/include/v4l2_codec2/store/V4L2ComponentStore.h
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_COMPONENT_STORE_H
-#define ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_COMPONENT_STORE_H
+#ifndef ANDROID_V4L2_CODEC2_STORE_V4L2_COMPONENT_STORE_H
+#define ANDROID_V4L2_CODEC2_STORE_V4L2_COMPONENT_STORE_H
#include <map>
#include <mutex>
+#include <android-base/thread_annotations.h>
#include <C2Component.h>
#include <C2ComponentFactory.h>
-#include <android-base/thread_annotations.h>
#include <util/C2InterfaceHelper.h>
namespace android {
@@ -41,21 +41,27 @@ public:
std::vector<C2FieldSupportedValuesQuery>& fields) const override;
private:
- V4L2ComponentStore();
+ using CreateV4L2FactoryFunc = ::C2ComponentFactory* (*)(const char* /* componentName */);
+ using DestroyV4L2FactoryFunc = void (*)(::C2ComponentFactory*);
+
+ V4L2ComponentStore(void* libHandle, CreateV4L2FactoryFunc createFactoryFunc,
+ DestroyV4L2FactoryFunc destroyFactoryFunc);
::C2ComponentFactory* GetFactory(const C2String& name);
std::shared_ptr<const C2Component::Traits> GetTraits(const C2String& name);
+ void* mLibHandle;
+ CreateV4L2FactoryFunc mCreateFactoryFunc;
+ DestroyV4L2FactoryFunc mDestroyFactoryFunc;
+
std::shared_ptr<C2ReflectorHelper> mReflector;
std::mutex mCachedFactoriesLock;
- std::map<C2String, std::unique_ptr<::C2ComponentFactory>> mCachedFactories
- GUARDED_BY(mCachedFactoriesLock);
+ std::map<C2String, ::C2ComponentFactory*> mCachedFactories GUARDED_BY(mCachedFactoriesLock);
std::mutex mCachedTraitsLock;
- std::map<C2String, std::shared_ptr<const C2Component::Traits>> mCachedTraits
- GUARDED_BY(mCachedTraitsLock);
+ std::map<C2String, std::shared_ptr<const C2Component::Traits>> mCachedTraits GUARDED_BY(mCachedTraitsLock);
};
} // namespace android
-#endif // ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_COMPONENT_STORE_H
+#endif // ANDROID_V4L2_CODEC2_STORE_V4L2_COMPONENT_STORE_H
diff --git a/tests/c2_comp_intf/Android.bp b/tests/c2_comp_intf/Android.bp
index 1d08f6b..bc7ae83 100644
--- a/tests/c2_comp_intf/Android.bp
+++ b/tests/c2_comp_intf/Android.bp
@@ -1,12 +1,3 @@
-package {
- // See: http://go/android-license-faq
- // A large-scale-change added 'default_applicable_licenses' to import
- // all of the 'license_kinds' from "external_v4l2_codec2_license"
- // to get the below license kinds:
- // SPDX-license-identifier-BSD
- default_applicable_licenses: ["external_v4l2_codec2_license"],
-}
-
cc_test {
name: "C2VEACompIntf_test",
vendor: true,
@@ -24,9 +15,11 @@ cc_test {
"liblog",
"libui",
"libutils",
+ "libv4l2_codec2_accel",
"libv4l2_codec2_components",
],
include_dirs: [
+ "external/v4l2_codec2/accel",
"external/v4l2_codec2/common/include",
"external/v4l2_codec2/components/include",
"frameworks/av/media/codec2/components/base/include",
diff --git a/tests/c2_e2e_test/Android.mk b/tests/c2_e2e_test/Android.mk
index aa97741..b12af4b 100644
--- a/tests/c2_e2e_test/Android.mk
+++ b/tests/c2_e2e_test/Android.mk
@@ -16,9 +16,6 @@ LOCAL_RESOURCE_DIR := \
LOCAL_MULTILIB := both
LOCAL_PACKAGE_NAME := C2E2ETest
-LOCAL_LICENSE_KINDS := SPDX-license-identifier-BSD legacy_unencumbered
-LOCAL_LICENSE_CONDITIONS := notice unencumbered
-LOCAL_NOTICE_FILE := $(LOCAL_PATH)/../../NOTICE
LOCAL_JNI_SHARED_LIBRARIES := libcodectest
LOCAL_MODULE_TAGS := tests
diff --git a/tests/c2_e2e_test/jni/Android.mk b/tests/c2_e2e_test/jni/Android.mk
index 890a74b..1cbcdd4 100644
--- a/tests/c2_e2e_test/jni/Android.mk
+++ b/tests/c2_e2e_test/jni/Android.mk
@@ -5,6 +5,9 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
+LOCAL_C_INCLUDES := \
+ system/core/include \
+
LOCAL_SRC_FILES := \
video_encoder_e2e_test.cpp \
video_decoder_e2e_test.cpp \
@@ -23,17 +26,12 @@ LOCAL_SHARED_LIBRARIES := \
libmediandk \
libandroid \
-LOCAL_HEADER_LIBRARIES := liblog_headers
-
LOCAL_SDK_VERSION := 28
LOCAL_NDK_STL_VARIANT := c++_static
LOCAL_STATIC_LIBRARIES := libgtest_ndk_c++
LOCAL_MODULE := libcodectest
-LOCAL_LICENSE_KINDS := SPDX-license-identifier-BSD legacy_unencumbered
-LOCAL_LICENSE_CONDITIONS := notice unencumbered
-LOCAL_NOTICE_FILE := $(LOCAL_PATH)/../../../NOTICE
# TODO(stevensd): Fix and reenable warnings
LOCAL_CFLAGS += -Wno-everything
diff --git a/tests/c2_e2e_test/jni/common.cpp b/tests/c2_e2e_test/jni/common.cpp
index 50f4cdf..9f1a1b0 100644
--- a/tests/c2_e2e_test/jni/common.cpp
+++ b/tests/c2_e2e_test/jni/common.cpp
@@ -15,7 +15,7 @@
#include <numeric>
#include <sstream>
-#include <log/log.h>
+#include <utils/Log.h>
namespace android {
@@ -46,22 +46,14 @@ void InputFile::Rewind() {
file_.seekg(0);
}
-CachedInputFileStream::CachedInputFileStream(std::string file_path)
- : InputFile(file_path, std::ifstream::binary) {
- if (IsValid()) {
- data_.resize(GetLength());
- file_.read(data_.data(), GetLength());
- }
-}
+InputFileStream::InputFileStream(std::string file_path)
+ : InputFile(file_path, std::ifstream::binary) {}
-size_t CachedInputFileStream::Read(char* buffer, size_t size) {
- memcpy(buffer, data_.data() + position_, size);
- position_ += size;
- return size;
-}
+size_t InputFileStream::Read(char* buffer, size_t size) {
+ file_.read(buffer, size);
+ if (file_.fail()) return -1;
-void CachedInputFileStream::Rewind() {
- position_ = 0;
+ return file_.gcount();
}
InputFileASCII::InputFileASCII(std::string file_path) : InputFile(file_path) {}
@@ -77,100 +69,6 @@ bool InputFileASCII::ReadLine(std::string* line) {
return false; // no more lines
}
-IVFWriter::IVFWriter(std::ofstream* output_file, VideoCodecType codec)
- : output_file_(output_file), codec_(codec) {}
-
-bool IVFWriter::WriteHeader(const Size& resolution, uint32_t frame_rate, uint32_t num_frames) {
- constexpr uint16_t kIVFHeaderSize = 32;
- char header[kIVFHeaderSize];
-
- // Helper lambdas to write 16bit and 32bit data, expects the device to use little endian.
- auto write16 = [&header](int i, uint16_t data) { memcpy(&header[i], &data, sizeof(data)); };
- auto write32 = [&header](int i, uint32_t data) { memcpy(&header[i], &data, sizeof(data)); };
-
- const char* codec_str;
- switch (codec_) {
- case VideoCodecType::VP8:
- codec_str = "VP80";
- break;
- case VideoCodecType::VP9:
- codec_str = "VP90";
- break;
- default:
- printf("[ERR] Unknown codec: \n");
- return false;
- }
-
- strcpy(&header[0], "DKIF"); // Bytes 0-3 of an IVF file header always contain 'DKIF' signature.
- constexpr uint16_t kVersion = 0;
- write16(4, kVersion);
- write16(6, kIVFHeaderSize);
- strcpy(&header[8], codec_str);
- write16(12, resolution.width);
- write16(14, resolution.height);
- write32(16, frame_rate);
- write32(20, 1);
- write32(24, num_frames);
- write32(28, 0); // Reserved.
-
- output_file_->write(header, kIVFHeaderSize);
- return !output_file_->bad();
-}
-
-bool IVFWriter::WriteFrame(const uint8_t* data, uint32_t data_size, uint64_t timestamp) {
- constexpr size_t kIVFFrameHeaderSize = 12;
- char frame_header[kIVFFrameHeaderSize];
- memcpy(&frame_header[0], &data_size, sizeof(data_size));
- memcpy(&frame_header[4], &timestamp, sizeof(timestamp));
- output_file_->write(frame_header, kIVFFrameHeaderSize);
- output_file_->write(reinterpret_cast<const char*>(data), data_size);
- return !output_file_->bad();
-}
-
-bool IVFWriter::SetNumFrames(uint32_t num_frames) {
- output_file_->seekp(24);
- output_file_->write(reinterpret_cast<const char*>(&num_frames), sizeof(num_frames));
- return !output_file_->bad();
-}
-
-bool OutputFile::Open(const std::string& file_path, VideoCodecType codec) {
- output_file_.open(file_path, std::ofstream::binary);
- if (!output_file_.is_open()) {
- return false;
- }
-
- if ((codec == VideoCodecType::VP8) || (codec == VideoCodecType::VP9)) {
- ivf_writer_ = std::make_unique<IVFWriter>(&output_file_, codec);
- }
- return true;
-}
-
-void OutputFile::Close() {
- if (ivf_writer_) {
- ivf_writer_->SetNumFrames(frame_index_);
- ivf_writer_.reset();
- }
- output_file_.close();
-}
-
-bool OutputFile::IsOpen() {
- return output_file_.is_open();
-}
-
-// Write the file header.
-bool OutputFile::WriteHeader(const Size& resolution, uint32_t frame_rate, uint32_t num_frames) {
- return !ivf_writer_ || ivf_writer_->WriteHeader(resolution, frame_rate, num_frames);
-}
-
-bool OutputFile::WriteFrame(uint32_t data_size, const uint8_t* data) {
- if (ivf_writer_) {
- return (ivf_writer_->WriteFrame(data, data_size, frame_index_++));
- } else {
- output_file_.write(reinterpret_cast<const char*>(data), data_size);
- return (output_file_.fail());
- }
-}
-
bool FPSCalculator::RecordFrameTimeDiff() {
int64_t now_us = GetNowUs();
if (last_frame_time_us_ != 0) {
diff --git a/tests/c2_e2e_test/jni/common.h b/tests/c2_e2e_test/jni/common.h
index ea8d212..b402a8b 100644
--- a/tests/c2_e2e_test/jni/common.h
+++ b/tests/c2_e2e_test/jni/common.h
@@ -7,7 +7,6 @@
#include <fstream>
#include <ios>
-#include <memory>
#include <string>
#include <vector>
@@ -71,26 +70,20 @@ public:
// Get the size of the file.
size_t GetLength();
// Set position to the beginning of the file.
- virtual void Rewind();
+ void Rewind();
protected:
std::ifstream file_;
};
// Wrapper of std::ifstream for reading binary file.
-class CachedInputFileStream : public InputFile {
+class InputFileStream : public InputFile {
public:
- explicit CachedInputFileStream(std::string file_path);
+ explicit InputFileStream(std::string file_path);
// Read the given number of bytes to the buffer. Return the number of bytes
// read or -1 on error.
size_t Read(char* buffer, size_t size);
-
- void Rewind() override;
-
-private:
- std::vector<char> data_;
- size_t position_ = 0;
};
// Wrapper of std::ifstream for reading ASCII file.
@@ -102,40 +95,6 @@ public:
bool ReadLine(std::string* line);
};
-// IVF file writer, can be used to write an encoded VP8/9 video to disk.
-class IVFWriter {
-public:
- IVFWriter(std::ofstream* output_file, VideoCodecType codec);
-
- // Write the IVF file header.
- bool WriteHeader(const Size& resolution, uint32_t frame_rate, uint32_t num_frames);
- // Append the specified frame data to the IVF file.
- bool WriteFrame(const uint8_t* data, uint32_t data_size, uint64_t timestamp);
- // Set the number of video frames in the IVF file header.
- bool SetNumFrames(uint32_t num_frames);
-
-private:
- std::ofstream* output_file_;
- VideoCodecType codec_ = VideoCodecType::UNKNOWN;
-};
-
-class OutputFile {
-public:
- bool Open(const std::string& file_path, VideoCodecType codec);
- void Close();
- bool IsOpen();
-
- // Write the video file header.
- bool WriteHeader(const Size& resolution, uint32_t frame_rate, uint32_t num_frames);
- // Append the specified frame data to the video file.
- bool WriteFrame(uint32_t data_size, const uint8_t* data);
-
-private:
- std::ofstream output_file_;
- std::unique_ptr<IVFWriter> ivf_writer_;
- uint64_t frame_index_ = 0;
-};
-
// The helper class to calculate FPS.
class FPSCalculator {
public:
diff --git a/tests/c2_e2e_test/jni/e2e_test_jni.cpp b/tests/c2_e2e_test/jni/e2e_test_jni.cpp
index 841cb83..9535fe7 100644
--- a/tests/c2_e2e_test/jni/e2e_test_jni.cpp
+++ b/tests/c2_e2e_test/jni/e2e_test_jni.cpp
@@ -10,7 +10,7 @@
#include <sstream>
#include <android/native_window_jni.h>
-#include <log/log.h>
+#include <utils/Log.h>
#include "e2e_test_jni.h"
#include "mediacodec_decoder.h"
@@ -25,10 +25,10 @@ public:
static constexpr char* kClassName = "org/chromium/c2/test/E2eTestActivity";
- void OnCodecReady(void* codec) override {
+ void OnDecoderReady(void* decoder) override {
jclass cls = env_->FindClass(kClassName);
- jmethodID methodid = env_->GetMethodID(cls, "onCodecReady", "(J)V");
- env_->CallVoidMethod(thiz_, methodid, (jlong)codec);
+ jmethodID methodid = env_->GetMethodID(cls, "onDecoderReady", "(J)V");
+ env_->CallVoidMethod(thiz_, methodid, (jlong)decoder);
}
void OnSizeChanged(int width, int height) override {
@@ -78,10 +78,10 @@ JNIEXPORT jint JNICALL Java_org_chromium_c2_test_E2eTestActivity_c2VideoTest(
ANativeWindow* native_window = ANativeWindow_fromSurface(env, surface);
int res;
- JniConfigureCallback cb(env, thiz);
if (encode) {
- res = RunEncoderTests(final_args, test_args_count + 1, &cb);
+ res = RunEncoderTests(final_args, test_args_count + 1);
} else {
+ JniConfigureCallback cb(env, thiz);
res = RunDecoderTests(final_args, test_args_count + 1, native_window, &cb);
}
delete[] final_args;
diff --git a/tests/c2_e2e_test/jni/e2e_test_jni.h b/tests/c2_e2e_test/jni/e2e_test_jni.h
index 90b7976..14a63bb 100644
--- a/tests/c2_e2e_test/jni/e2e_test_jni.h
+++ b/tests/c2_e2e_test/jni/e2e_test_jni.h
@@ -12,8 +12,8 @@ namespace android {
// Callback to communicate from the test back to the Activity.
class ConfigureCallback {
public:
- // Provides a reference to the current test's codec, or clears the reference.
- virtual void OnCodecReady(void* codec) = 0;
+ // Provides a reference to the current test's decoder, or clears the reference.
+ virtual void OnDecoderReady(void* decoder) = 0;
// Configures the surface with the size of the current video.
virtual void OnSizeChanged(int width, int height) = 0;
virtual ~ConfigureCallback() = default;
@@ -24,6 +24,6 @@ public:
int RunDecoderTests(char** test_args, int test_args_count, ANativeWindow* window,
android::ConfigureCallback* cb);
-int RunEncoderTests(char** test_args, int test_args_count, android::ConfigureCallback* cb);
+int RunEncoderTests(char** test_args, int test_args_count);
#endif // C2_E2E_TEST_E2E_TEST_JNI_H_
diff --git a/tests/c2_e2e_test/jni/encoded_data_helper.cpp b/tests/c2_e2e_test/jni/encoded_data_helper.cpp
index 0ced022..d17e7cb 100644
--- a/tests/c2_e2e_test/jni/encoded_data_helper.cpp
+++ b/tests/c2_e2e_test/jni/encoded_data_helper.cpp
@@ -12,7 +12,7 @@
#include <utility>
-#include <log/log.h>
+#include <utils/Log.h>
namespace android {
@@ -95,7 +95,7 @@ std::string GetBytesForNextFrame(const std::string& data, size_t* next_pos) {
EncodedDataHelper::EncodedDataHelper(const std::string& file_path, VideoCodecType type)
: type_(type) {
- CachedInputFileStream input(file_path);
+ InputFileStream input(file_path);
if (!input.IsValid()) {
ALOGE("Failed to open file: %s", file_path.c_str());
return;
diff --git a/tests/c2_e2e_test/jni/mediacodec_decoder.cpp b/tests/c2_e2e_test/jni/mediacodec_decoder.cpp
index b14a841..1b835c0 100644
--- a/tests/c2_e2e_test/jni/mediacodec_decoder.cpp
+++ b/tests/c2_e2e_test/jni/mediacodec_decoder.cpp
@@ -13,8 +13,8 @@
#include <utility>
#include <vector>
-#include <log/log.h>
#include <media/NdkMediaFormat.h>
+#include <utils/Log.h>
namespace android {
namespace {
@@ -31,8 +31,6 @@ constexpr int kTimeoutWaitForInputUs = 1000; // 1 millisecond
constexpr size_t kTimeoutMaxRetries = 500;
// Helper function to get possible C2 hardware decoder names from |type|.
-// Note: A single test APK is built for both ARC++ and ARCVM, so both the VDA decoder and the new
-// V4L2 decoder names need to be specified here.
std::vector<const char*> GetC2VideoDecoderNames(VideoCodecType type) {
switch (type) {
case VideoCodecType::H264:
@@ -47,16 +45,14 @@ std::vector<const char*> GetC2VideoDecoderNames(VideoCodecType type) {
}
// Helper function to get possible software decoder names from |type|.
-// Note: A single test APK is built for both ARC++ and ARCVM, so both the OMX decoder used on
-// Android P and the c2.android decoder used on Android R need to be specified here.
std::vector<const char*> GetSwVideoDecoderNames(VideoCodecType type) {
switch (type) {
case VideoCodecType::H264:
- return {"c2.android.avc.decoder", "OMX.google.h264.decoder"};
+ return {"OMX.google.h264.decoder"};
case VideoCodecType::VP8:
- return {"c2.android.vp8.decoder", "OMX.google.vp8.decoder"};
+ return {"OMX.google.vp8.decoder"};
case VideoCodecType::VP9:
- return {"c2.android.vp9.decoder", "OMX.google.vp9.decoder"};
+ return {"OMX.google.vp9.decoder"};
default: // unknown type
return {};
}
@@ -78,10 +74,12 @@ int64_t RoundUp(int64_t n, int64_t multiple) {
} // namespace
// static
-std::unique_ptr<MediaCodecDecoder> MediaCodecDecoder::Create(
- const std::string& input_path, VideoCodecProfile profile, bool use_sw_decoder,
- const Size& video_size, int frame_rate, ANativeWindow* surface, bool render_on_release,
- bool loop, bool use_fake_renderer) {
+std::unique_ptr<MediaCodecDecoder> MediaCodecDecoder::Create(const std::string& input_path,
+ VideoCodecProfile profile,
+ bool use_sw_decoder,
+ const Size& video_size, int frame_rate,
+ ANativeWindow* surface,
+ bool render_on_release, bool loop) {
if (video_size.IsEmpty()) {
ALOGE("Size is not valid: %dx%d", video_size.width, video_size.height);
return nullptr;
@@ -112,7 +110,7 @@ std::unique_ptr<MediaCodecDecoder> MediaCodecDecoder::Create(
auto ret = std::unique_ptr<MediaCodecDecoder>(
new MediaCodecDecoder(codec, std::move(encoded_data_helper), type, video_size,
- frame_rate, surface, render_on_release, loop, use_fake_renderer));
+ frame_rate, surface, render_on_release, loop));
AMediaCodecOnAsyncNotifyCallback cb{
.onAsyncInputAvailable =
@@ -150,8 +148,7 @@ std::unique_ptr<MediaCodecDecoder> MediaCodecDecoder::Create(
MediaCodecDecoder::MediaCodecDecoder(AMediaCodec* codec,
std::unique_ptr<EncodedDataHelper> encoded_data_helper,
VideoCodecType type, const Size& size, int frame_rate,
- ANativeWindow* surface, bool render_on_release, bool loop,
- bool use_fake_renderer)
+ ANativeWindow* surface, bool render_on_release, bool loop)
: codec_(codec),
encoded_data_helper_(std::move(encoded_data_helper)),
type_(type),
@@ -159,17 +156,12 @@ MediaCodecDecoder::MediaCodecDecoder(AMediaCodec* codec,
frame_rate_(frame_rate),
surface_(surface),
render_on_release_(render_on_release),
- looping_(loop),
- fake_renderer_running_(use_fake_renderer),
- fake_render_thread_([](MediaCodecDecoder* dec) { dec->FakeRenderLoop(); }, this) {}
+ looping_(loop) {}
MediaCodecDecoder::~MediaCodecDecoder() {
if (codec_ != nullptr) {
AMediaCodec_delete(codec_);
}
- fake_renderer_running_ = false;
- fake_render_cv_.notify_one();
- fake_render_thread_.join();
}
void MediaCodecDecoder::AddOutputBufferReadyCb(const OutputBufferReadyCb& cb) {
@@ -252,13 +244,6 @@ bool MediaCodecDecoder::Decode() {
case FORMAT_CHANGED:
success = GetOutputFormat();
break;
- case FAKE_FRAME_RENDERED:
- media_status_t status = AMediaCodec_releaseOutputBuffer(codec_, evt.idx, false);
- if (status != AMEDIA_OK) {
- ALOGE("Failed to releaseOutputBuffer(index=%zu): %d", evt.idx, status);
- success = false;
- }
- break;
}
assert(success);
}
@@ -300,9 +285,8 @@ bool MediaCodecDecoder::DequeueOutputBuffer(int32_t index, AMediaCodecBufferInfo
base_timestamp_ns_ = now;
} else if (now > GetReleaseTimestampNs(received_outputs_)) {
drop_frame_count_++;
- ALOGD("Drop frame #%d: frame %d deadline %" PRIu64 "us, actual %" PRIu64 "us",
- drop_frame_count_, received_outputs_, (received_outputs_ * 1000000ull / frame_rate_),
- (now - base_timestamp_ns_) / 1000);
+ ALOGD("Drop frame #%d: deadline %" PRIu64 "us, actual %" PRIu64 "us", drop_frame_count_,
+ (received_outputs_ * 1000000 / frame_rate_), (now - base_timestamp_ns_) / 1000);
render_frame = false; // We don't render the dropped frame.
}
@@ -371,7 +355,7 @@ bool MediaCodecDecoder::FeedEOSInputBuffer(size_t index) {
return true;
}
-bool MediaCodecDecoder::ReceiveOutputBuffer(int32_t index, const AMediaCodecBufferInfo& info,
+bool MediaCodecDecoder::ReceiveOutputBuffer(size_t index, const AMediaCodecBufferInfo& info,
bool render_buffer) {
size_t out_size = 0;
uint8_t* buf = nullptr;
@@ -393,50 +377,17 @@ bool MediaCodecDecoder::ReceiveOutputBuffer(int32_t index, const AMediaCodecBuff
callback(buf, info.size, received_outputs_);
}
- if (fake_renderer_running_) {
- std::lock_guard<std::mutex> lock(fake_render_mut_);
- fake_render_frames_.emplace(index, GetReleaseTimestampNs(received_outputs_));
- fake_render_cv_.notify_one();
- } else {
- media_status_t status =
- render_buffer ? AMediaCodec_releaseOutputBufferAtTime(
- codec_, index, GetReleaseTimestampNs(received_outputs_))
- : AMediaCodec_releaseOutputBuffer(codec_, index, false /* render */);
- if (status != AMEDIA_OK) {
- ALOGE("Failed to releaseOutputBuffer(index=%zu): %d", index, status);
- return false;
- }
+ media_status_t status =
+ render_buffer ? AMediaCodec_releaseOutputBufferAtTime(
+ codec_, index, GetReleaseTimestampNs(received_outputs_))
+ : AMediaCodec_releaseOutputBuffer(codec_, index, false /* render */);
+ if (status != AMEDIA_OK) {
+ ALOGE("Failed to releaseOutputBuffer(index=%zu): %d", index, status);
+ return false;
}
return true;
}
-void MediaCodecDecoder::FakeRenderLoop() {
- while (fake_renderer_running_) {
- std::pair<int32_t, int64_t> next_frame;
- {
- std::unique_lock<std::mutex> lock(fake_render_mut_);
- fake_render_cv_.wait(lock, [&]() {
- return !fake_renderer_running_ || !fake_render_frames_.empty();
- });
- if (!fake_renderer_running_) {
- break;
- }
-
- next_frame = fake_render_frames_.front();
- fake_render_frames_.pop();
- }
-
- const uint64_t now = GetCurrentTimeNs();
- if (now < next_frame.second) {
- usleep((next_frame.second - now) / 1000);
- }
-
- std::lock_guard<std::mutex> lock(event_queue_mut_);
- event_queue_.push({.type = FAKE_FRAME_RENDERED, .idx = next_frame.first});
- event_queue_cv_.notify_one();
- }
-}
-
bool MediaCodecDecoder::GetOutputFormat() {
AMediaFormat* format = AMediaCodec_getOutputFormat(codec_);
bool success = true;
diff --git a/tests/c2_e2e_test/jni/mediacodec_decoder.h b/tests/c2_e2e_test/jni/mediacodec_decoder.h
index d5e916f..4f4fe60 100644
--- a/tests/c2_e2e_test/jni/mediacodec_decoder.h
+++ b/tests/c2_e2e_test/jni/mediacodec_decoder.h
@@ -10,7 +10,6 @@
#include <mutex>
#include <queue>
#include <string>
-#include <thread>
#include <vector>
#include <media/NdkMediaCodec.h>
@@ -28,7 +27,7 @@ public:
VideoCodecProfile profile, bool use_sw_decoder,
const Size& video_size, int frame_rate,
ANativeWindow* surface, bool renderOnRelease,
- bool loop, bool use_fake_renderer);
+ bool loop);
MediaCodecDecoder() = delete;
~MediaCodecDecoder();
@@ -71,10 +70,8 @@ public:
void OnAsyncOutputAvailable(int32_t idx, AMediaCodecBufferInfo* info);
void OnAsyncFormatChanged(AMediaFormat* format);
- void FakeRenderLoop();
-
private:
- enum CodecEventType { INPUT_AVAILABLE, OUTPUT_AVAILABLE, FORMAT_CHANGED, FAKE_FRAME_RENDERED };
+ enum CodecEventType { INPUT_AVAILABLE, OUTPUT_AVAILABLE, FORMAT_CHANGED };
struct CodecEvent {
CodecEventType type;
int32_t idx;
@@ -83,7 +80,7 @@ private:
MediaCodecDecoder(AMediaCodec* codec, std::unique_ptr<EncodedDataHelper> encoded_data_helper,
VideoCodecType type, const Size& size, int frame_rate, ANativeWindow* surface,
- bool renderOnRelease, bool loop, bool use_fake_renderer);
+ bool renderOnRelease, bool loop);
// Enum class of the status of dequeueing output buffer.
enum class DequeueStatus { RETRY, SUCCESS, FAILURE };
@@ -106,7 +103,7 @@ private:
// Receive the output buffer and make mOutputBufferReadyCb callback if given.
// |index| is the index of the target output buffer.
// |info| is the buffer info of the target output buffer.
- bool ReceiveOutputBuffer(int32_t index, const AMediaCodecBufferInfo& info, bool render_buffer);
+ bool ReceiveOutputBuffer(size_t index, const AMediaCodecBufferInfo& info, bool render_buffer);
// Get output format by AMediaCodec_getOutputFormat and make
// |output_format_changed_cb_| callback if given.
@@ -156,12 +153,6 @@ private:
std::queue<CodecEvent> event_queue_; // guarded by event_queue_mut_
std::mutex event_queue_mut_;
std::condition_variable event_queue_cv_;
-
- std::atomic<bool> fake_renderer_running_;
- std::thread fake_render_thread_;
- std::mutex fake_render_mut_;
- std::condition_variable fake_render_cv_;
- std::queue<std::pair<int32_t, int64_t>> fake_render_frames_;
};
} // namespace android
diff --git a/tests/c2_e2e_test/jni/mediacodec_encoder.cpp b/tests/c2_e2e_test/jni/mediacodec_encoder.cpp
index d15f285..e4c90a4 100644
--- a/tests/c2_e2e_test/jni/mediacodec_encoder.cpp
+++ b/tests/c2_e2e_test/jni/mediacodec_encoder.cpp
@@ -12,18 +12,15 @@
#include <utility>
#include <vector>
-#include <log/log.h>
#include <media/NdkMediaFormat.h>
+#include <utils/Log.h>
namespace android {
namespace {
-// These values are defined at
+// The values are defined at
// <android_root>/frameworks/base/media/java/android/media/MediaCodecInfo.java.
constexpr int32_t COLOR_FormatYUV420Planar = 19;
constexpr int32_t BITRATE_MODE_CBR = 2;
-constexpr int32_t AVCProfileBaseline = 0x01;
-constexpr int32_t VP8ProfileMain = 0x01;
-constexpr int32_t VP9Profile0 = 0x01;
// The time interval between two key frames.
constexpr int32_t kIFrameIntervalSec = 10;
@@ -36,58 +33,23 @@ constexpr int kTimeoutUs = 1000; // 1ms.
// output buffer.
constexpr int kBufferPeriodTimeoutUs = 1000000; // 1 sec
-// Helper function to get possible C2 hardware encoder names from |type|.
+// Helper function to get possible encoder names from |type|.
// Note: A single test APK is built for both ARC++ and ARCVM, so both the C2 VEA encoder and the new
// V4L2 encoder names need to be specified here.
-std::vector<const char*> GetHWVideoEncoderNames(VideoCodecType type) {
+std::vector<const char*> GetArcVideoEncoderNames(VideoCodecType type) {
switch (type) {
case VideoCodecType::H264:
return {"c2.v4l2.avc.encoder", "c2.vea.avc.encoder"};
- case VideoCodecType::VP8:
- return {"c2.v4l2.vp8.encoder"}; // Only supported on ARCVM
- case VideoCodecType::VP9:
- return {"c2.v4l2.vp9.encoder"}; // Only supported on ARCVM
- default:
+ default: // unsupported type: VP8, VP9, or unknown
return {};
}
}
-// Helper function to get possible software encoder names from |type|.
-// Note: A single test APK is built for both ARC++ and ARCVM, so both the OMX encoder used on
-// Android P and the c2.android encoder used on Android R need to be specified here.
-std::vector<const char*> GetSWVideoEncoderNames(VideoCodecType type) {
- switch (type) {
- case VideoCodecType::H264:
- return {"c2.android.avc.encoder", "OMX.google.h264.encoder"};
- case VideoCodecType::VP8:
- return {"c2.android.vp8.encoder", "OMX.google.vp8.encoder"};
- case VideoCodecType::VP9:
- return {"c2.android.vp9.encoder", "OMX.google.vp9.encoder"};
- default:
- return {};
- }
-}
-
-// Helper function to get the profile associated with the specified codec.
-int32_t GetProfile(VideoCodecType type) {
- switch (type) {
- case VideoCodecType::H264:
- return AVCProfileBaseline;
- case VideoCodecType::VP8:
- return VP8ProfileMain;
- case VideoCodecType::VP9:
- return VP9Profile0;
- default:
- return AVCProfileBaseline;
- }
-}
-
} // namespace
// static
std::unique_ptr<MediaCodecEncoder> MediaCodecEncoder::Create(std::string input_path,
- VideoCodecType type, Size visible_size,
- bool use_sw_encoder) {
+ Size visible_size) {
if (visible_size.width <= 0 || visible_size.height <= 0 || visible_size.width % 2 == 1 ||
visible_size.height % 2 == 1) {
ALOGE("Size is not valid: %dx%d", visible_size.width, visible_size.height);
@@ -95,7 +57,7 @@ std::unique_ptr<MediaCodecEncoder> MediaCodecEncoder::Create(std::string input_p
}
size_t buffer_size = visible_size.width * visible_size.height * 3 / 2;
- std::unique_ptr<CachedInputFileStream> input_file(new CachedInputFileStream(input_path));
+ std::unique_ptr<InputFileStream> input_file(new InputFileStream(input_path));
if (!input_file->IsValid()) {
ALOGE("Failed to open file: %s", input_path.c_str());
return nullptr;
@@ -108,8 +70,8 @@ std::unique_ptr<MediaCodecEncoder> MediaCodecEncoder::Create(std::string input_p
}
AMediaCodec* codec = nullptr;
- auto encoder_names =
- use_sw_encoder ? GetSWVideoEncoderNames(type) : GetHWVideoEncoderNames(type);
+ // Only H264 is supported now.
+ auto encoder_names = GetArcVideoEncoderNames(VideoCodecType::H264);
for (const auto& encoder_name : encoder_names) {
codec = AMediaCodec_createCodecByName(encoder_name);
if (codec) {
@@ -122,19 +84,17 @@ std::unique_ptr<MediaCodecEncoder> MediaCodecEncoder::Create(std::string input_p
return nullptr;
}
- return std::unique_ptr<MediaCodecEncoder>(
- new MediaCodecEncoder(codec, type, std::move(input_file), visible_size, buffer_size,
- file_size / buffer_size));
+ return std::unique_ptr<MediaCodecEncoder>(new MediaCodecEncoder(
+ codec, std::move(input_file), visible_size, buffer_size, file_size / buffer_size));
}
-MediaCodecEncoder::MediaCodecEncoder(AMediaCodec* codec, VideoCodecType type,
- std::unique_ptr<CachedInputFileStream> input_file, Size size,
+MediaCodecEncoder::MediaCodecEncoder(AMediaCodec* codec,
+ std::unique_ptr<InputFileStream> input_file, Size size,
size_t buffer_size, size_t num_total_frames)
: kVisibleSize(size),
kBufferSize(buffer_size),
kNumTotalFrames(num_total_frames),
codec_(codec),
- type_(type),
num_encoded_frames_(num_total_frames),
input_file_(std::move(input_file)) {}
@@ -160,8 +120,7 @@ void MediaCodecEncoder::Rewind() {
bool MediaCodecEncoder::Configure(int32_t bitrate, int32_t framerate) {
ALOGV("Configure encoder bitrate=%d, framerate=%d", bitrate, framerate);
AMediaFormat* format = AMediaFormat_new();
- AMediaFormat_setString(format, AMEDIAFORMAT_KEY_MIME, GetMimeType(type_));
- AMediaFormat_setInt32(format, AMEDIAFORMAT_KEY_PROFILE, GetProfile(type_));
+ AMediaFormat_setString(format, AMEDIAFORMAT_KEY_MIME, "video/avc");
AMediaFormat_setInt32(format, AMEDIAFORMAT_KEY_COLOR_FORMAT, COLOR_FormatYUV420Planar);
AMediaFormat_setInt32(format, AMEDIAFORMAT_KEY_BITRATE_MODE, BITRATE_MODE_CBR);
AMediaFormat_setInt32(format, AMEDIAFORMAT_KEY_I_FRAME_INTERVAL, kIFrameIntervalSec);
diff --git a/tests/c2_e2e_test/jni/mediacodec_encoder.h b/tests/c2_e2e_test/jni/mediacodec_encoder.h
index 53e6fdf..8580061 100644
--- a/tests/c2_e2e_test/jni/mediacodec_encoder.h
+++ b/tests/c2_e2e_test/jni/mediacodec_encoder.h
@@ -18,8 +18,7 @@ namespace android {
class MediaCodecEncoder {
public:
// Checks the argument and create MediaCodecEncoder instance.
- static std::unique_ptr<MediaCodecEncoder> Create(std::string input_path, VideoCodecType type,
- Size visible_size, bool use_sw_encoder);
+ static std::unique_ptr<MediaCodecEncoder> Create(std::string input_path, Size visible_size);
MediaCodecEncoder() = delete;
~MediaCodecEncoder();
@@ -54,8 +53,7 @@ public:
void set_run_at_fps(bool run_at_fps);
private:
- MediaCodecEncoder(AMediaCodec* codec, VideoCodecType type,
- std::unique_ptr<CachedInputFileStream> inputFile, Size size,
+ MediaCodecEncoder(AMediaCodec* codec, std::unique_ptr<InputFileStream> inputFile, Size size,
size_t bufferSize, size_t numTotalFrames);
// Read the content from the |input_file_| and feed into the input buffer.
@@ -81,13 +79,11 @@ private:
// The target mediacodec encoder.
AMediaCodec* codec_;
- // The output codec type.
- VideoCodecType type_;
// The number of frames to encode.
size_t num_encoded_frames_;
// The input video raw stream file. The file size must be the multiple of
// |kBufferSize|.
- std::unique_ptr<CachedInputFileStream> input_file_;
+ std::unique_ptr<InputFileStream> input_file_;
// The target output bitrate.
int bitrate_ = 192000;
// The target output framerate.
diff --git a/tests/c2_e2e_test/jni/video_decoder_e2e_test.cpp b/tests/c2_e2e_test/jni/video_decoder_e2e_test.cpp
index 46b497f..1251625 100644
--- a/tests/c2_e2e_test/jni/video_decoder_e2e_test.cpp
+++ b/tests/c2_e2e_test/jni/video_decoder_e2e_test.cpp
@@ -14,7 +14,7 @@
#include <vector>
#include <gtest/gtest.h>
-#include <log/log.h>
+#include <utils/Log.h>
#include "common.h"
#include "e2e_test_jni.h"
@@ -33,12 +33,11 @@ C2VideoDecoderTestEnvironment* g_env;
class C2VideoDecoderTestEnvironment : public testing::Environment {
public:
- C2VideoDecoderTestEnvironment(bool loop, bool use_sw_decoder, bool use_fake_renderer,
- const std::string& data, const std::string& output_frames_path,
- ANativeWindow* surface, ConfigureCallback* cb)
+ C2VideoDecoderTestEnvironment(bool loop, bool use_sw_decoder, const std::string& data,
+ const std::string& output_frames_path, ANativeWindow* surface,
+ ConfigureCallback* cb)
: loop_(loop),
use_sw_decoder_(use_sw_decoder),
- use_fake_renderer_(use_fake_renderer),
test_video_data_(data),
output_frames_path_(output_frames_path),
surface_(surface),
@@ -104,14 +103,12 @@ public:
ConfigureCallback* configure_cb() const { return configure_cb_; }
bool loop() const { return loop_; }
bool use_sw_decoder() const { return use_sw_decoder_; }
- bool use_fake_renderer() const { return use_fake_renderer_; }
ANativeWindow* surface() const { return surface_; }
protected:
bool loop_;
bool use_sw_decoder_;
- bool use_fake_renderer_;
std::string test_video_data_;
std::string output_frames_path_;
@@ -256,10 +253,10 @@ protected:
decoder_ = MediaCodecDecoder::Create(g_env->input_file_path(), g_env->video_codec_profile(),
g_env->use_sw_decoder(), g_env->visible_size(),
g_env->frame_rate(), surface, renderOnRelease(),
- g_env->loop(), g_env->use_fake_renderer());
+ g_env->loop());
ASSERT_TRUE(decoder_);
- g_env->configure_cb()->OnCodecReady(decoder_.get());
+ g_env->configure_cb()->OnDecoderReady(decoder_.get());
decoder_->Rewind();
@@ -289,7 +286,7 @@ protected:
EXPECT_EQ(g_env->num_frames(), decoded_frames_);
}
- g_env->configure_cb()->OnCodecReady(nullptr);
+ g_env->configure_cb()->OnDecoderReady(nullptr);
decoder_.reset();
}
@@ -373,14 +370,13 @@ TEST_F(C2VideoDecoderSurfaceNoRenderE2ETest, TestFPS) {
} // namespace android
bool GetOption(int argc, char** argv, std::string* test_video_data, std::string* output_frames_path,
- bool* loop, bool* use_sw_decoder, bool* use_fake_renderer) {
+ bool* loop, bool* use_sw_decoder) {
const char* const optstring = "t:o:";
static const struct option opts[] = {
{"test_video_data", required_argument, nullptr, 't'},
{"output_frames_path", required_argument, nullptr, 'o'},
{"loop", no_argument, nullptr, 'l'},
{"use_sw_decoder", no_argument, nullptr, 's'},
- {"fake_renderer", no_argument, nullptr, 'f'},
{nullptr, 0, nullptr, 0},
};
@@ -400,9 +396,6 @@ bool GetOption(int argc, char** argv, std::string* test_video_data, std::string*
case 's':
*use_sw_decoder = true;
break;
- case 'f':
- *use_fake_renderer = true;
- break;
default:
printf("[WARN] Unknown option: getopt_long() returned code 0x%x.\n", opt);
break;
@@ -422,9 +415,8 @@ int RunDecoderTests(char** test_args, int test_args_count, ANativeWindow* surfac
std::string output_frames_path;
bool loop = false;
bool use_sw_decoder = false;
- bool use_fake_renderer = false;
if (!GetOption(test_args_count, test_args, &test_video_data, &output_frames_path, &loop,
- &use_sw_decoder, &use_fake_renderer)) {
+ &use_sw_decoder)) {
ALOGE("GetOption failed");
return EXIT_FAILURE;
}
@@ -432,8 +424,7 @@ int RunDecoderTests(char** test_args, int test_args_count, ANativeWindow* surfac
if (android::g_env == nullptr) {
android::g_env = reinterpret_cast<android::C2VideoDecoderTestEnvironment*>(
testing::AddGlobalTestEnvironment(new android::C2VideoDecoderTestEnvironment(
- loop, use_sw_decoder, use_fake_renderer, test_video_data,
- output_frames_path, surface, cb)));
+ loop, use_sw_decoder, test_video_data, output_frames_path, surface, cb)));
} else {
ALOGE("Trying to reuse test process");
return EXIT_FAILURE;
diff --git a/tests/c2_e2e_test/jni/video_encoder_e2e_test.cpp b/tests/c2_e2e_test/jni/video_encoder_e2e_test.cpp
index 5215052..be3a489 100644
--- a/tests/c2_e2e_test/jni/video_encoder_e2e_test.cpp
+++ b/tests/c2_e2e_test/jni/video_encoder_e2e_test.cpp
@@ -15,10 +15,9 @@
#include <vector>
#include <gtest/gtest.h>
-#include <log/log.h>
+#include <utils/Log.h>
#include "common.h"
-#include "e2e_test_jni.h"
#include "mediacodec_encoder.h"
namespace android {
@@ -53,13 +52,11 @@ struct CmdlineArgs {
std::string test_stream_data;
bool run_at_fps = false;
size_t num_encoded_frames = 0;
- bool use_sw_encoder = false;
};
class C2VideoEncoderTestEnvironment : public testing::Environment {
public:
- explicit C2VideoEncoderTestEnvironment(const CmdlineArgs& args, ConfigureCallback* cb)
- : args_(args), configure_cb_(cb) {}
+ explicit C2VideoEncoderTestEnvironment(const CmdlineArgs& args) : args_(args) {}
void SetUp() override { ParseTestStreamData(); }
@@ -71,6 +68,7 @@ public:
// (see http://www.fourcc.org/yuv.php#IYUV).
// - |width| and |height| are in pixels.
// - |profile| to encode into (values of VideoCodecProfile).
+ // NOTE: Only H264PROFILE_MAIN(1) is supported. Now we ignore this value.
// - |output_file_path| filename to save the encoded stream to (optional).
// The format for H264 is Annex-B byte stream.
// - |requested_bitrate| requested bitrate in bits per second.
@@ -97,21 +95,7 @@ public:
if (fields.size() >= 4 && !fields[3].empty()) {
int profile = stoi(fields[3]);
- switch (profile) {
- case VideoCodecProfile::H264PROFILE_MAIN:
- codec_ = VideoCodecType::H264;
- break;
- case VideoCodecProfile::VP8PROFILE_ANY:
- codec_ = VideoCodecType::VP8;
- break;
- case VideoCodecProfile::VP9PROFILE_PROFILE0:
- codec_ = VideoCodecType::VP9;
- break;
- default:
- printf("[WARN] Only H264PROFILE_MAIN, VP8PROFILE_ANY and VP9PROFILE_PROFILE0 are"
- "supported.\n");
- codec_ = VideoCodecType::H264;
- }
+ if (profile != 1) printf("[WARN] Only H264PROFILE_MAIN(1) is supported.\n");
}
if (fields.size() >= 5 && !fields[4].empty()) {
@@ -154,7 +138,6 @@ public:
}
Size visible_size() const { return visible_size_; }
- VideoCodecType codec() const { return codec_; }
std::string input_file_path() const { return input_file_path_; }
std::string output_file_path() const { return output_file_path_; }
int requested_bitrate() const { return requested_bitrate_; }
@@ -164,16 +147,11 @@ public:
bool run_at_fps() const { return args_.run_at_fps; }
size_t num_encoded_frames() const { return args_.num_encoded_frames; }
- bool use_sw_encoder() const { return args_.use_sw_encoder; }
-
- ConfigureCallback* configure_cb() const { return configure_cb_; }
private:
const CmdlineArgs args_;
- ConfigureCallback* configure_cb_;
Size visible_size_;
- VideoCodecType codec_;
std::string input_file_path_;
std::string output_file_path_;
@@ -186,10 +164,12 @@ private:
class C2VideoEncoderE2ETest : public testing::Test {
public:
// Callback functions of getting output buffers from encoder.
- void WriteOutputBufferToFile(VideoCodecType type, const uint8_t* data,
- const AMediaCodecBufferInfo& info) {
- if (output_file_.IsOpen() && !output_file_.WriteFrame(info.size, data)) {
- printf("[ERR] Failed to write encoded buffer into file.\n");
+ void WriteOutputBufferToFile(const uint8_t* data, const AMediaCodecBufferInfo& info) {
+ if (output_file_.is_open()) {
+ output_file_.write(reinterpret_cast<const char*>(data), info.size);
+ if (output_file_.fail()) {
+ printf("[ERR] Failed to write encoded buffer into file.\n");
+ }
}
}
@@ -199,11 +179,8 @@ public:
protected:
void SetUp() override {
- encoder_ = MediaCodecEncoder::Create(g_env->input_file_path(), g_env->codec(),
- g_env->visible_size(), g_env->use_sw_encoder());
+ encoder_ = MediaCodecEncoder::Create(g_env->input_file_path(), g_env->visible_size());
ASSERT_TRUE(encoder_);
- g_env->configure_cb()->OnCodecReady(encoder_.get());
-
encoder_->Rewind();
ASSERT_TRUE(encoder_->Configure(static_cast<int32_t>(g_env->requested_bitrate()),
@@ -214,22 +191,18 @@ protected:
void TearDown() override {
EXPECT_TRUE(encoder_->Stop());
- output_file_.Close();
+ output_file_.close();
encoder_.reset();
}
bool CreateOutputFile() {
if (g_env->output_file_path().empty()) return false;
- if (!output_file_.Open(g_env->output_file_path(), g_env->codec())) {
+ output_file_.open(g_env->output_file_path(), std::ofstream::binary);
+ if (!output_file_.is_open()) {
printf("[ERR] Failed to open file: %s\n", g_env->output_file_path().c_str());
return false;
}
- if (!output_file_.WriteHeader(g_env->visible_size(), g_env->requested_framerate(), 0)) {
- printf("[ERR] Failed to write file header\n");
- return false;
- }
-
return true;
}
@@ -241,7 +214,7 @@ protected:
std::unique_ptr<MediaCodecEncoder> encoder_;
// The output file to write the encoded video bitstream.
- OutputFile output_file_;
+ std::ofstream output_file_;
// Used to accumulate the output buffer size.
size_t total_output_buffer_size_;
};
@@ -287,7 +260,7 @@ TEST_F(C2VideoEncoderE2ETest, TestSimpleEncode) {
// Write the output buffers to file.
if (CreateOutputFile()) {
encoder_->SetOutputBufferReadyCb(std::bind(&C2VideoEncoderE2ETest::WriteOutputBufferToFile,
- this, g_env->codec(), std::placeholders::_1,
+ this, std::placeholders::_1,
std::placeholders::_2));
}
encoder_->set_run_at_fps(g_env->run_at_fps());
@@ -352,7 +325,6 @@ bool GetOption(int argc, char** argv, android::CmdlineArgs* args) {
{"test_stream_data", required_argument, nullptr, 't'},
{"run_at_fps", no_argument, nullptr, 'r'},
{"num_encoded_frames", required_argument, nullptr, 'n'},
- {"use_sw_encoder", no_argument, nullptr, 's'},
{nullptr, 0, nullptr, 0},
};
@@ -368,9 +340,6 @@ bool GetOption(int argc, char** argv, android::CmdlineArgs* args) {
case 'n':
args->num_encoded_frames = static_cast<size_t>(atoi(optarg));
break;
- case 's':
- args->use_sw_encoder = true;
- break;
default:
printf("[WARN] Unknown option: getopt_long() returned code 0x%x.\n", opt);
break;
@@ -384,13 +353,12 @@ bool GetOption(int argc, char** argv, android::CmdlineArgs* args) {
return true;
}
-int RunEncoderTests(char** test_args, int test_args_count, android::ConfigureCallback* cb) {
+int RunEncoderTests(char** test_args, int test_args_count) {
android::CmdlineArgs args;
if (!GetOption(test_args_count, test_args, &args)) return EXIT_FAILURE;
android::g_env = reinterpret_cast<android::C2VideoEncoderTestEnvironment*>(
- testing::AddGlobalTestEnvironment(
- new android::C2VideoEncoderTestEnvironment(args, cb)));
+ testing::AddGlobalTestEnvironment(new android::C2VideoEncoderTestEnvironment(args)));
testing::InitGoogleTest(&test_args_count, test_args);
return RUN_ALL_TESTS();
}
diff --git a/tests/c2_e2e_test/jni/video_frame.cpp b/tests/c2_e2e_test/jni/video_frame.cpp
index 6a9b361..a15fe27 100644
--- a/tests/c2_e2e_test/jni/video_frame.cpp
+++ b/tests/c2_e2e_test/jni/video_frame.cpp
@@ -9,7 +9,7 @@
#include "video_frame.h"
-#include <log/log.h>
+#include <utils/Log.h>
namespace android {
diff --git a/tests/c2_e2e_test/src/org/chromium/c2/test/E2eTestActivity.java b/tests/c2_e2e_test/src/org/chromium/c2/test/E2eTestActivity.java
index 6b24ea6..140ff82 100644
--- a/tests/c2_e2e_test/src/org/chromium/c2/test/E2eTestActivity.java
+++ b/tests/c2_e2e_test/src/org/chromium/c2/test/E2eTestActivity.java
@@ -7,10 +7,7 @@
package org.chromium.c2.test;
import android.app.Activity;
-import android.content.BroadcastReceiver;
-import android.content.Context;
import android.content.Intent;
-import android.content.IntentFilter;
import android.os.AsyncTask;
import android.os.Bundle;
import android.os.Handler;
@@ -37,9 +34,7 @@ public class E2eTestActivity extends Activity implements SurfaceHolder.Callback
private Size mExpectedSize;
private CountDownLatch mLatch;
- private long mCodecPtr = 0;
-
- private BroadcastReceiver mCodecConfigReceiver;
+ private long mDecoderPtr;
@Override
protected void onCreate(Bundle savedInstanceState) {
@@ -53,17 +48,11 @@ public class E2eTestActivity extends Activity implements SurfaceHolder.Callback
mSurfaceView.getHolder().addCallback(this);
mCanStartTest = !getIntent().getBooleanExtra("delay-start", false);
-
- mCodecConfigReceiver = new CodecReadyReceiver();
- registerReceiver(
- mCodecConfigReceiver,
- new IntentFilter("org.chromium.c2.test.CHECK_CODEC_CONFIGURED"));
}
@Override
protected void onDestroy() {
super.onDestroy();
- unregisterReceiver(mCodecConfigReceiver);
// gtest can't reuse a process
System.exit(0);
}
@@ -110,9 +99,9 @@ public class E2eTestActivity extends Activity implements SurfaceHolder.Callback
});
}
- void onCodecReady(long codecPtr) {
+ void onDecoderReady(long decoderPtr) {
synchronized (this) {
- mCodecPtr = codecPtr;
+ mDecoderPtr = decoderPtr;
}
}
@@ -125,8 +114,8 @@ public class E2eTestActivity extends Activity implements SurfaceHolder.Callback
}
synchronized (this) {
- if (mCodecPtr != 0) {
- stopDecoderLoop(mCodecPtr);
+ if (mDecoderPtr != 0) {
+ stopDecoderLoop(mDecoderPtr);
}
}
}
@@ -177,17 +166,6 @@ public class E2eTestActivity extends Activity implements SurfaceHolder.Callback
}
}
- class CodecReadyReceiver extends BroadcastReceiver {
- @Override
- public void onReceive(Context ctx, Intent intent) {
- boolean ready;
- synchronized (E2eTestActivity.this) {
- ready = mCodecPtr != 0;
- }
- setResultCode(ready ? 1 : 0);
- }
- }
-
public native int c2VideoTest(
boolean encode, String[] testArgs, int testArgsCount, Surface surface, String tmpFile);