Merge e4981672c31aacfc7bd0160edc0d513dd9918d0e on remote branch
Change-Id: I9eff3085bea8863bf675431adf11a3492ecba912
diff --git a/Android.mk b/Android.mk
deleted file mode 100644
index 31188ed..0000000
--- a/Android.mk
+++ /dev/null
@@ -1,79 +0,0 @@
-# Build only if vendor/google_arc/libs/codec2 is
-# visible; otherwise, don't build any target under this repository.
-ifneq (,$(findstring vendor/google_arc/libs/codec2,$(PRODUCT_SOONG_NAMESPACES)))
-
-LOCAL_PATH := $(call my-dir)
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- C2EncoderInterface.cpp \
- C2VDAComponent.cpp \
- C2VDAAdaptor.cpp \
-
-LOCAL_C_INCLUDES += \
- $(TOP)/external/libchrome \
- $(TOP)/external/gtest/include \
- $(TOP)/external/v4l2_codec2/accel \
- $(TOP)/external/v4l2_codec2/common \
- $(TOP)/external/v4l2_codec2/include \
- $(TOP)/frameworks/av/media/codec2/components/base/include \
- $(TOP)/frameworks/av/media/codec2/core/include \
- $(TOP)/frameworks/av/media/codec2/vndk/include \
- $(TOP)/frameworks/av/media/libstagefright/include \
-
-LOCAL_MODULE:= libv4l2_codec2
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_SHARED_LIBRARIES := android.hardware.graphics.common@1.0 \
- libarc_c2componentstore \
- libbinder \
- libc2plugin_store \
- libchrome \
- libcodec2 \
- libcodec2_soft_common \
- libcodec2_vndk \
- libcutils \
- liblog \
- libmedia \
- libsfplugin_ccodec_utils \
- libstagefright \
- libstagefright_bufferqueue_helper \
- libstagefright_foundation \
- libui \
- libutils \
- libv4l2_codec2_accel \
- libvda_c2_pixelformat \
-
-LOCAL_STATIC_LIBRARIES := libv4l2_codec2_common \
-
-# -Wno-unused-parameter is needed for libchrome/base codes
-LOCAL_CFLAGS += -Werror -Wall -Wno-unused-parameter
-LOCAL_CFLAGS += -Wno-unused-lambda-capture -Wno-unknown-warning-option
-LOCAL_CLANG := true
-LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
-
-LOCAL_LDFLAGS := -Wl,-Bsymbolic
-
-# Enable input format converter from C2VEAComponent.
-LOCAL_CFLAGS += -DUSE_VEA_FORMAT_CONVERTER
-
-# Build C2VDAAdaptorProxy only for ARC++ case.
-ifneq (,$(findstring cheets_,$(TARGET_PRODUCT)))
-LOCAL_CFLAGS += -DV4L2_CODEC2_ARC
-LOCAL_SRC_FILES += \
- C2VDAAdaptorProxy.cpp \
- C2VEAAdaptorProxy.cpp \
-
-LOCAL_SRC_FILES := $(filter-out C2VDAAdaptor.cpp, $(LOCAL_SRC_FILES))
-LOCAL_SHARED_LIBRARIES += libarcbridge \
- libarcbridgeservice \
- libcodec2_arcva_factory \
- libmojo \
-
-endif # ifneq (,$(findstring cheets_,$(TARGET_PRODUCT)))
-
-include $(BUILD_SHARED_LIBRARY)
-
-include $(call all-makefiles-under,$(LOCAL_PATH))
-
-endif #ifneq (,$(findstring vendor/google_arc/libs/codec2,$(PRODUCT_SOONG_NAMESPACES)))
diff --git a/C2VDAAdaptor.cpp b/C2VDAAdaptor.cpp
deleted file mode 100644
index a045188..0000000
--- a/C2VDAAdaptor.cpp
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "C2VDAAdaptor"
-
-#include <C2VDAAdaptor.h>
-
-#include <bitstream_buffer.h>
-#include <native_pixmap_handle.h>
-#include <v4l2_device.h>
-#include <v4l2_video_decode_accelerator.h>
-#include <video_pixel_format.h>
-
-#include <utils/Log.h>
-
-namespace android {
-
-C2VDAAdaptor::C2VDAAdaptor() : mNumOutputBuffers(0u) {}
-
-C2VDAAdaptor::~C2VDAAdaptor() {
- if (mVDA) {
- destroy();
- }
-}
-
-VideoDecodeAcceleratorAdaptor::Result C2VDAAdaptor::initialize(
- media::VideoCodecProfile profile, bool secureMode,
- VideoDecodeAcceleratorAdaptor::Client* client) {
- // TODO: use secureMode here, or ignore?
- if (mVDA) {
- ALOGE("Re-initialize() is not allowed");
- return ILLEGAL_STATE;
- }
-
- media::VideoDecodeAccelerator::Config config;
- config.profile = profile;
- config.output_mode = media::VideoDecodeAccelerator::Config::OutputMode::IMPORT;
-
- // TODO(johnylin): may need to implement factory to create VDA if there are multiple VDA
- // implementations in the future.
- scoped_refptr<media::V4L2Device> device = media::V4L2Device::Create();
- std::unique_ptr<media::VideoDecodeAccelerator> vda(
- new media::V4L2VideoDecodeAccelerator(device));
- if (!vda->Initialize(config, this)) {
- ALOGE("Failed to initialize VDA");
- return PLATFORM_FAILURE;
- }
-
- mVDA = std::move(vda);
- mClient = client;
-
- return SUCCESS;
-}
-
-void C2VDAAdaptor::decode(int32_t bitstreamId, int ashmemFd, off_t offset, uint32_t bytesUsed) {
- CHECK(mVDA);
- ::base::SharedMemoryHandle shmHandle(::base::FileDescriptor(ashmemFd, true), 0u,
- ::base::UnguessableToken::Create());
- mVDA->Decode(media::BitstreamBuffer(bitstreamId, shmHandle, bytesUsed, offset));
-}
-
-void C2VDAAdaptor::assignPictureBuffers(uint32_t numOutputBuffers, const media::Size& size) {
- CHECK(mVDA);
- std::vector<media::PictureBuffer> buffers;
- for (uint32_t id = 0; id < numOutputBuffers; ++id) {
- buffers.push_back(media::PictureBuffer(static_cast<int32_t>(id), size));
- }
- mVDA->AssignPictureBuffers(buffers);
- mNumOutputBuffers = numOutputBuffers;
-}
-
-void C2VDAAdaptor::importBufferForPicture(int32_t pictureBufferId, HalPixelFormat format,
- std::vector<::base::ScopedFD> dmabufFds,
- const std::vector<VideoFramePlane>& planes) {
- CHECK(mVDA);
- CHECK_LT(pictureBufferId, static_cast<int32_t>(mNumOutputBuffers));
-
- media::VideoPixelFormat pixelFormat;
- switch (format) {
- case HalPixelFormat::YV12:
- pixelFormat = media::PIXEL_FORMAT_YV12;
- break;
- case HalPixelFormat::NV12:
- pixelFormat = media::PIXEL_FORMAT_NV12;
- break;
- default:
- LOG_ALWAYS_FATAL("Unsupported format: 0x%x", format);
- return;
- }
-
- media::NativePixmapHandle handle;
- for (auto& fd: dmabufFds)
- handle.fds.emplace_back(::base::FileDescriptor(fd.release(), true));
-
- for (const auto& plane : planes) {
- handle.planes.emplace_back(plane.mStride, plane.mOffset, 0, 0);
- }
- mVDA->ImportBufferForPicture(pictureBufferId, pixelFormat, handle);
-}
-
-void C2VDAAdaptor::reusePictureBuffer(int32_t pictureBufferId) {
- CHECK(mVDA);
- CHECK_LT(pictureBufferId, static_cast<int32_t>(mNumOutputBuffers));
-
- mVDA->ReusePictureBuffer(pictureBufferId);
-}
-
-void C2VDAAdaptor::flush() {
- CHECK(mVDA);
- mVDA->Flush();
-}
-
-void C2VDAAdaptor::reset() {
- CHECK(mVDA);
- mVDA->Reset();
-}
-
-void C2VDAAdaptor::destroy() {
- mVDA.reset(nullptr);
- mNumOutputBuffers = 0u;
-}
-
-//static
-media::VideoDecodeAccelerator::SupportedProfiles C2VDAAdaptor::GetSupportedProfiles(
- InputCodec inputCodec) {
- // TODO(johnylin): use factory function to determine whether V4L2 stream or slice API is.
- uint32_t inputFormatFourcc;
- if (inputCodec == InputCodec::H264) {
- inputFormatFourcc = V4L2_PIX_FMT_H264;
- } else if (inputCodec == InputCodec::VP8) {
- inputFormatFourcc = V4L2_PIX_FMT_VP8;
- } else { // InputCodec::VP9
- inputFormatFourcc = V4L2_PIX_FMT_VP9;
- }
-
- media::VideoDecodeAccelerator::SupportedProfiles supportedProfiles;
- auto allProfiles = media::V4L2VideoDecodeAccelerator::GetSupportedProfiles();
- for (const auto& profile : allProfiles) {
- if (inputFormatFourcc ==
- media::V4L2Device::VideoCodecProfileToV4L2PixFmt(profile.profile, false)) {
- supportedProfiles.push_back(profile);
- }
- }
- return supportedProfiles;
-}
-
-void C2VDAAdaptor::ProvidePictureBuffers(uint32_t requested_num_of_buffers,
- media::VideoPixelFormat output_format,
- const media::Size& dimensions) {
- // per change ag/3262504, output_format from VDA is no longer used, component side always
- // allocate graphic buffers for flexible YUV format.
- (void)output_format;
-
- mClient->providePictureBuffers(requested_num_of_buffers, dimensions);
- mPictureSize = dimensions;
-}
-
-void C2VDAAdaptor::DismissPictureBuffer(int32_t picture_buffer_id) {
- mClient->dismissPictureBuffer(picture_buffer_id);
-}
-
-void C2VDAAdaptor::PictureReady(const media::Picture& picture) {
- mClient->pictureReady(picture.picture_buffer_id(), picture.bitstream_buffer_id(),
- picture.visible_rect());
-}
-
-void C2VDAAdaptor::NotifyEndOfBitstreamBuffer(int32_t bitstream_buffer_id) {
- mClient->notifyEndOfBitstreamBuffer(bitstream_buffer_id);
-}
-
-void C2VDAAdaptor::NotifyFlushDone() {
- mClient->notifyFlushDone();
-}
-
-void C2VDAAdaptor::NotifyResetDone() {
- mClient->notifyResetDone();
-}
-
-static VideoDecodeAcceleratorAdaptor::Result convertErrorCode(
- media::VideoDecodeAccelerator::Error error) {
- switch (error) {
- case media::VideoDecodeAccelerator::ILLEGAL_STATE:
- return VideoDecodeAcceleratorAdaptor::ILLEGAL_STATE;
- case media::VideoDecodeAccelerator::INVALID_ARGUMENT:
- return VideoDecodeAcceleratorAdaptor::INVALID_ARGUMENT;
- case media::VideoDecodeAccelerator::UNREADABLE_INPUT:
- return VideoDecodeAcceleratorAdaptor::UNREADABLE_INPUT;
- case media::VideoDecodeAccelerator::PLATFORM_FAILURE:
- return VideoDecodeAcceleratorAdaptor::PLATFORM_FAILURE;
- default:
- ALOGE("Unknown error code: %d", static_cast<int>(error));
- return VideoDecodeAcceleratorAdaptor::PLATFORM_FAILURE;
- }
-}
-
-void C2VDAAdaptor::NotifyError(media::VideoDecodeAccelerator::Error error) {
- mClient->notifyError(convertErrorCode(error));
-}
-
-} // namespace android
diff --git a/C2VDAAdaptorProxy.cpp b/C2VDAAdaptorProxy.cpp
deleted file mode 100644
index f18abeb..0000000
--- a/C2VDAAdaptorProxy.cpp
+++ /dev/null
@@ -1,340 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// #define LOG_NDEBUG 0
-#define LOG_TAG "C2VDAAdaptorProxy"
-
-#include <C2ArcVideoAcceleratorFactory.h>
-#include <C2VDAAdaptorProxy.h>
-
-#include <arc/MojoProcessSupport.h>
-#include <arc/MojoThread.h>
-#include <base/bind.h>
-#include <base/files/scoped_file.h>
-#include <ui/gfx/geometry/size.h>
-#include <mojo/public/cpp/platform/platform_handle.h>
-#include <mojo/public/cpp/system/platform_handle.h>
-
-#include <binder/IServiceManager.h>
-#include <utils/Log.h>
-
-namespace mojo {
-template <>
-struct TypeConverter<::arc::VideoFramePlane, android::VideoFramePlane> {
- static ::arc::VideoFramePlane Convert(const android::VideoFramePlane& plane) {
- return ::arc::VideoFramePlane{static_cast<int32_t>(plane.mOffset),
- static_cast<int32_t>(plane.mStride)};
- }
-};
-} // namespace mojo
-
-namespace android {
-namespace arc {
-C2VDAAdaptorProxy::C2VDAAdaptorProxy()
- : C2VDAAdaptorProxy(::arc::MojoProcessSupport::getLeakyInstance()) {}
-
-C2VDAAdaptorProxy::C2VDAAdaptorProxy(::arc::MojoProcessSupport* mojoProcessSupport)
- : mClient(nullptr),
- mMojoTaskRunner(mojoProcessSupport->mojo_thread().getTaskRunner()),
- mBinding(this),
- mRelay(new ::arc::CancellationRelay()) {}
-
-C2VDAAdaptorProxy::~C2VDAAdaptorProxy() {}
-
-void C2VDAAdaptorProxy::onConnectionError(const std::string& pipeName) {
- ALOGE("onConnectionError (%s)", pipeName.c_str());
- mRelay->cancel();
- NotifyError(::arc::mojom::VideoDecodeAccelerator::Result::PLATFORM_FAILURE);
-}
-
-bool C2VDAAdaptorProxy::establishChannel() {
- ALOGV("establishChannel");
- auto future = ::arc::Future<bool>::make_shared(mRelay);
- mMojoTaskRunner->PostTask(FROM_HERE,
- ::base::Bind(&C2VDAAdaptorProxy::establishChannelOnMojoThread,
- ::base::Unretained(this), future));
- return future->wait() && future->get();
-}
-
-void C2VDAAdaptorProxy::establishChannelOnMojoThread(std::shared_ptr<::arc::Future<bool>> future) {
- auto& factory = ::android::GetC2ArcVideoAcceleratorFactory();
-
- if (!factory.createVideoDecodeAccelerator(mojo::MakeRequest(&mVDAPtr))) {
- future->set(false);
- return;
- }
- mVDAPtr.set_connection_error_handler(::base::Bind(&C2VDAAdaptorProxy::onConnectionError,
- ::base::Unretained(this),
- std::string("mVDAPtr (vda pipe)")));
- mVDAPtr.QueryVersion(::base::Bind(&C2VDAAdaptorProxy::onVersionReady, ::base::Unretained(this),
- std::move(future)));
-}
-
-void C2VDAAdaptorProxy::onVersionReady(std::shared_ptr<::arc::Future<bool>> future, uint32_t version) {
- ALOGI("VideoDecodeAccelerator ready (version=%d)", version);
-
- future->set(true);
-}
-
-void C2VDAAdaptorProxy::ProvidePictureBuffersDeprecated(::arc::mojom::PictureBufferFormatPtr format) {
- ALOGV("ProvidePictureBuffersDeprecated");
- mClient->providePictureBuffers(
- format->min_num_buffers,
- media::Size(format->coded_size.width(), format->coded_size.height()));
-}
-
-void C2VDAAdaptorProxy::ProvidePictureBuffers(::arc::mojom::PictureBufferFormatPtr format,
- const gfx::Rect& visible_rect) {
- ALOGV("ProvidePictureBuffers");
- mClient->providePictureBuffers(
- format->min_num_buffers,
- media::Size(format->coded_size.width(), format->coded_size.height()));
-}
-
-void C2VDAAdaptorProxy::PictureReady(::arc::mojom::PicturePtr picture) {
- ALOGV("PictureReady");
- const auto& rect = picture->crop_rect;
- mClient->pictureReady(picture->picture_buffer_id, picture->bitstream_id,
- media::Rect(rect.x(), rect.y(), rect.right(), rect.bottom()));
-}
-
-static VideoDecodeAcceleratorAdaptor::Result convertErrorCode(
- ::arc::mojom::VideoDecodeAccelerator::Result error) {
- switch (error) {
- case ::arc::mojom::VideoDecodeAccelerator::Result::ILLEGAL_STATE:
- return VideoDecodeAcceleratorAdaptor::ILLEGAL_STATE;
- case ::arc::mojom::VideoDecodeAccelerator::Result::INVALID_ARGUMENT:
- return VideoDecodeAcceleratorAdaptor::INVALID_ARGUMENT;
- case ::arc::mojom::VideoDecodeAccelerator::Result::UNREADABLE_INPUT:
- return VideoDecodeAcceleratorAdaptor::UNREADABLE_INPUT;
- case ::arc::mojom::VideoDecodeAccelerator::Result::PLATFORM_FAILURE:
- return VideoDecodeAcceleratorAdaptor::PLATFORM_FAILURE;
- case ::arc::mojom::VideoDecodeAccelerator::Result::INSUFFICIENT_RESOURCES:
- return VideoDecodeAcceleratorAdaptor::INSUFFICIENT_RESOURCES;
-
- default:
- ALOGE("Unknown error code: %d", static_cast<int>(error));
- return VideoDecodeAcceleratorAdaptor::PLATFORM_FAILURE;
- }
-}
-
-void C2VDAAdaptorProxy::NotifyError(::arc::mojom::VideoDecodeAccelerator::Result error) {
- ALOGE("NotifyError %d", static_cast<int>(error));
- mClient->notifyError(convertErrorCode(error));
-}
-
-void C2VDAAdaptorProxy::NotifyEndOfBitstreamBuffer(int32_t bitstream_id) {
- ALOGV("NotifyEndOfBitstreamBuffer");
- mClient->notifyEndOfBitstreamBuffer(bitstream_id);
-}
-
-void C2VDAAdaptorProxy::NotifyResetDone(::arc::mojom::VideoDecodeAccelerator::Result result) {
- ALOGV("NotifyResetDone");
- // Always notify reset done to component even if result is not success. On shutdown, MediaCodec
- // will wait on shutdown complete notification despite any error. If no notification, it will be
- // hanging until timeout and force release.
- if (result != ::arc::mojom::VideoDecodeAccelerator::Result::SUCCESS) {
- ALOGE("Reset is done incorrectly.");
- NotifyError(result);
- }
- mClient->notifyResetDone();
-}
-
-void C2VDAAdaptorProxy::NotifyFlushDone(::arc::mojom::VideoDecodeAccelerator::Result result) {
- ALOGV("NotifyFlushDone");
- if (result == ::arc::mojom::VideoDecodeAccelerator::Result::CANCELLED) {
- // Flush is cancelled by a succeeding Reset(). A client expects this behavior.
- ALOGE("Flush is canceled.");
- return;
- }
- if (result != ::arc::mojom::VideoDecodeAccelerator::Result::SUCCESS) {
- ALOGE("Flush is done incorrectly.");
- NotifyError(result);
- return;
- }
- mClient->notifyFlushDone();
-}
-
-//static
-media::VideoDecodeAccelerator::SupportedProfiles C2VDAAdaptorProxy::GetSupportedProfiles(
- InputCodec inputCodec) {
- media::VideoDecodeAccelerator::SupportedProfiles profiles(1);
- profiles[0].min_resolution = media::Size(16, 16);
- profiles[0].max_resolution = media::Size(4096, 4096);
- switch (inputCodec) {
- case InputCodec::H264:
- profiles[0].profile = media::H264PROFILE_MAIN;
- break;
- case InputCodec::VP8:
- profiles[0].profile = media::VP8PROFILE_ANY;
- break;
- case InputCodec::VP9:
- profiles[0].profile = media::VP9PROFILE_PROFILE0;
- break;
- default:
- ALOGE("Unknown input codec: %d", inputCodec);
- return {};
- }
- return profiles;
-}
-
-VideoDecodeAcceleratorAdaptor::Result C2VDAAdaptorProxy::initialize(
- media::VideoCodecProfile profile, bool secureMode,
- VideoDecodeAcceleratorAdaptor::Client* client) {
- ALOGV("initialize(profile=%d, secureMode=%d)", static_cast<int>(profile),
- static_cast<int>(secureMode));
- DCHECK(client);
- DCHECK(!mClient);
- mClient = client;
-
- if (!establishChannel()) {
- ALOGE("establishChannel failed");
- return VideoDecodeAcceleratorAdaptor::PLATFORM_FAILURE;
- }
-
- auto future = ::arc::Future<::arc::mojom::VideoDecodeAccelerator::Result>::make_shared(mRelay);
- mMojoTaskRunner->PostTask(FROM_HERE, ::base::Bind(&C2VDAAdaptorProxy::initializeOnMojoThread,
- ::base::Unretained(this), profile, secureMode,
- ::arc::FutureCallback(future)));
-
- if (!future->wait()) {
- ALOGE("Connection lost");
- return VideoDecodeAcceleratorAdaptor::PLATFORM_FAILURE;
- }
- return static_cast<VideoDecodeAcceleratorAdaptor::Result>(future->get());
-}
-
-void C2VDAAdaptorProxy::initializeOnMojoThread(
- const media::VideoCodecProfile profile, const bool secureMode,
- const ::arc::mojom::VideoDecodeAccelerator::InitializeCallback& cb) {
- // base::Unretained is safe because we own |mBinding|.
- mojo::InterfacePtr<::arc::mojom::VideoDecodeClient> client;
- mBinding.Bind(mojo::MakeRequest(&client));
- mBinding.set_connection_error_handler(::base::Bind(&C2VDAAdaptorProxy::onConnectionError,
- ::base::Unretained(this),
- std::string("mBinding (client pipe)")));
-
- ::arc::mojom::VideoDecodeAcceleratorConfigPtr arcConfig =
- ::arc::mojom::VideoDecodeAcceleratorConfig::New();
- arcConfig->secure_mode = secureMode;
- arcConfig->profile = static_cast<::arc::mojom::VideoCodecProfile>(profile);
- mVDAPtr->Initialize(std::move(arcConfig), std::move(client), cb);
-}
-
-void C2VDAAdaptorProxy::decode(int32_t bitstreamId, int handleFd, off_t offset, uint32_t size) {
- ALOGV("decode");
- mMojoTaskRunner->PostTask(
- FROM_HERE, ::base::Bind(&C2VDAAdaptorProxy::decodeOnMojoThread, ::base::Unretained(this),
- bitstreamId, handleFd, offset, size));
-}
-
-void C2VDAAdaptorProxy::decodeOnMojoThread(int32_t bitstreamId, int handleFd, off_t offset,
- uint32_t size) {
- mojo::ScopedHandle wrappedHandle =
- mojo::WrapPlatformHandle(mojo::PlatformHandle(::base::ScopedFD(handleFd)));
- if (!wrappedHandle.is_valid()) {
- ALOGE("failed to wrap handle");
- NotifyError(::arc::mojom::VideoDecodeAccelerator::Result::PLATFORM_FAILURE);
- return;
- }
- auto bufferPtr = ::arc::mojom::BitstreamBuffer::New();
- bufferPtr->bitstream_id = bitstreamId;
- bufferPtr->handle_fd = std::move(wrappedHandle);
- bufferPtr->offset = offset;
- bufferPtr->bytes_used = size;
- mVDAPtr->Decode(std::move(bufferPtr));
-}
-
-void C2VDAAdaptorProxy::assignPictureBuffers(uint32_t numOutputBuffers,
- const media::Size& size) {
- ALOGV("assignPictureBuffers: %d", numOutputBuffers);
- mMojoTaskRunner->PostTask(FROM_HERE,
- ::base::Bind(&C2VDAAdaptorProxy::assignPictureBuffersOnMojoThread,
- ::base::Unretained(this), numOutputBuffers, size));
-}
-
-void C2VDAAdaptorProxy::assignPictureBuffersOnMojoThread(uint32_t numOutputBuffers,
- const media::Size& size) {
- // TODO(crbug.com/982172): Pass size to Chrome.
- mVDAPtr->AssignPictureBuffers(numOutputBuffers);
-}
-
-void C2VDAAdaptorProxy::importBufferForPicture(int32_t pictureBufferId, HalPixelFormat format,
- std::vector<::base::ScopedFD> handleFds,
- const std::vector<VideoFramePlane>& planes) {
- ALOGV("importBufferForPicture");
- mMojoTaskRunner->PostTask(
- FROM_HERE,
- ::base::BindOnce(&C2VDAAdaptorProxy::importBufferForPictureOnMojoThread,
- ::base::Unretained(this), pictureBufferId, format,
- std::move(handleFds), planes));
-}
-
-void C2VDAAdaptorProxy::importBufferForPictureOnMojoThread(
- int32_t pictureBufferId, HalPixelFormat format,
- std::vector<::base::ScopedFD> handleFds,
- const std::vector<VideoFramePlane>& planes) {
- // TODO(hiroh): Pass all the fds to Chrome.
- mojo::ScopedHandle wrappedHandle =
- mojo::WrapPlatformHandle(mojo::PlatformHandle(std::move(handleFds[0])));
- if (!wrappedHandle.is_valid()) {
- ALOGE("failed to wrap handle");
- NotifyError(::arc::mojom::VideoDecodeAccelerator::Result::PLATFORM_FAILURE);
- return;
- }
-
- mVDAPtr->ImportBufferForPicture(pictureBufferId,
- static_cast<::arc::mojom::HalPixelFormat>(format),
- std::move(wrappedHandle),
- mojo::ConvertTo<std::vector<::arc::VideoFramePlane>>(planes));
-}
-
-void C2VDAAdaptorProxy::reusePictureBuffer(int32_t pictureBufferId) {
- ALOGV("reusePictureBuffer: %d", pictureBufferId);
- mMojoTaskRunner->PostTask(FROM_HERE,
- ::base::Bind(&C2VDAAdaptorProxy::reusePictureBufferOnMojoThread,
- ::base::Unretained(this), pictureBufferId));
-}
-
-void C2VDAAdaptorProxy::reusePictureBufferOnMojoThread(int32_t pictureBufferId) {
- mVDAPtr->ReusePictureBuffer(pictureBufferId);
-}
-
-void C2VDAAdaptorProxy::flush() {
- ALOGV("flush");
- mMojoTaskRunner->PostTask(
- FROM_HERE, ::base::Bind(&C2VDAAdaptorProxy::flushOnMojoThread, ::base::Unretained(this)));
-}
-
-void C2VDAAdaptorProxy::flushOnMojoThread() {
- mVDAPtr->Flush(::base::Bind(&C2VDAAdaptorProxy::NotifyFlushDone, ::base::Unretained(this)));
-}
-
-void C2VDAAdaptorProxy::reset() {
- ALOGV("reset");
- mMojoTaskRunner->PostTask(
- FROM_HERE, ::base::Bind(&C2VDAAdaptorProxy::resetOnMojoThread, ::base::Unretained(this)));
-}
-
-void C2VDAAdaptorProxy::resetOnMojoThread() {
- mVDAPtr->Reset(::base::Bind(&C2VDAAdaptorProxy::NotifyResetDone, ::base::Unretained(this)));
-}
-
-void C2VDAAdaptorProxy::destroy() {
- ALOGV("destroy");
- ::arc::Future<void> future;
- ::arc::PostTaskAndSetFutureWithResult(
- mMojoTaskRunner.get(), FROM_HERE,
- ::base::Bind(&C2VDAAdaptorProxy::closeChannelOnMojoThread, ::base::Unretained(this)),
- &future);
- future.get();
-}
-
-void C2VDAAdaptorProxy::closeChannelOnMojoThread() {
- if (mBinding.is_bound()) mBinding.Close();
- mVDAPtr.reset();
-}
-
-} // namespace arc
-} // namespace android
diff --git a/C2VDAComponent.cpp b/C2VDAComponent.cpp
deleted file mode 100644
index c80b9c1..0000000
--- a/C2VDAComponent.cpp
+++ /dev/null
@@ -1,1974 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "C2VDAComponent"
-
-#ifdef V4L2_CODEC2_ARC
-#include <C2VDAAdaptorProxy.h>
-#else
-#include <C2VDAAdaptor.h>
-#endif
-
-#define __C2_GENERATE_GLOBAL_VARS__
-#include <C2ArcSupport.h> // to getParamReflector from arc store
-#include <C2VDAAllocatorStore.h>
-#include <C2VDAComponent.h>
-#include <C2VDAPixelFormat.h>
-#include <C2VdaBqBlockPool.h>
-#include <C2VdaPooledBlockPool.h>
-
-#include <h264_parser.h>
-
-#include <C2AllocatorGralloc.h>
-#include <C2ComponentFactory.h>
-#include <C2PlatformSupport.h>
-#include <Codec2Mapper.h>
-
-#include <base/bind.h>
-#include <base/bind_helpers.h>
-
-#include <android/hardware/graphics/common/1.0/types.h>
-#include <cutils/native_handle.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/foundation/ColorUtils.h>
-#include <ui/GraphicBuffer.h>
-#include <utils/Log.h>
-#include <utils/misc.h>
-
-#include <inttypes.h>
-#include <string.h>
-#include <algorithm>
-#include <string>
-
-#define UNUSED(expr) \
- do { \
- (void)(expr); \
- } while (0)
-
-using android::hardware::graphics::common::V1_0::BufferUsage;
-
-namespace android {
-
-namespace {
-
-// Mask against 30 bits to avoid (undefined) wraparound on signed integer.
-int32_t frameIndexToBitstreamId(c2_cntr64_t frameIndex) {
- return static_cast<int32_t>(frameIndex.peeku() & 0x3FFFFFFF);
-}
-
-// Get android_ycbcr by lockYCbCr() from block handle which uses usage without SW_READ/WRITE bits.
-android_ycbcr getGraphicBlockInfo(const C2GraphicBlock& block) {
- uint32_t width, height, format, stride, igbp_slot, generation;
- uint64_t usage, igbp_id;
- android::_UnwrapNativeCodec2GrallocMetadata(block.handle(), &width, &height,
- &format, &usage, &stride, &generation, &igbp_id,
- &igbp_slot);
- native_handle_t* grallocHandle = android::UnwrapNativeCodec2GrallocHandle(block.handle());
- sp<GraphicBuffer> buf = new GraphicBuffer(grallocHandle, GraphicBuffer::CLONE_HANDLE, width,
- height, format, 1, usage, stride);
- native_handle_delete(grallocHandle);
-
- android_ycbcr ycbcr = {};
- constexpr uint32_t kNonSWLockUsage = 0;
- int32_t status = buf->lockYCbCr(kNonSWLockUsage, &ycbcr);
- if (status != OK)
- ALOGE("lockYCbCr is failed: %d", (int) status);
- buf->unlock();
- return ycbcr;
-}
-
-// Get frame size (stride, height) of a buffer owned by |block|.
-media::Size getFrameSizeFromC2GraphicBlock(const C2GraphicBlock& block) {
- android_ycbcr ycbcr = getGraphicBlockInfo(block);
- return media::Size(ycbcr.ystride, block.height());
-}
-
-// Use basic graphic block pool/allocator as default.
-const C2BlockPool::local_id_t kDefaultOutputBlockPool = C2BlockPool::BASIC_GRAPHIC;
-
-const C2String kH264DecoderName = "c2.vda.avc.decoder";
-const C2String kVP8DecoderName = "c2.vda.vp8.decoder";
-const C2String kVP9DecoderName = "c2.vda.vp9.decoder";
-const C2String kH264SecureDecoderName = "c2.vda.avc.decoder.secure";
-const C2String kVP8SecureDecoderName = "c2.vda.vp8.decoder.secure";
-const C2String kVP9SecureDecoderName = "c2.vda.vp9.decoder.secure";
-
-const uint32_t kDpbOutputBufferExtraCount = 3; // Use the same number as ACodec.
-const int kDequeueRetryDelayUs = 10000; // Wait time of dequeue buffer retry in microseconds.
-const int32_t kAllocateBufferMaxRetries = 10; // Max retry time for fetchGraphicBlock timeout.
-} // namespace
-
-static c2_status_t adaptorResultToC2Status(VideoDecodeAcceleratorAdaptor::Result result) {
- switch (result) {
- case VideoDecodeAcceleratorAdaptor::Result::SUCCESS:
- return C2_OK;
- case VideoDecodeAcceleratorAdaptor::Result::ILLEGAL_STATE:
- ALOGE("Got error: ILLEGAL_STATE");
- return C2_BAD_STATE;
- case VideoDecodeAcceleratorAdaptor::Result::INVALID_ARGUMENT:
- ALOGE("Got error: INVALID_ARGUMENT");
- return C2_BAD_VALUE;
- case VideoDecodeAcceleratorAdaptor::Result::UNREADABLE_INPUT:
- ALOGE("Got error: UNREADABLE_INPUT");
- return C2_BAD_VALUE;
- case VideoDecodeAcceleratorAdaptor::Result::PLATFORM_FAILURE:
- ALOGE("Got error: PLATFORM_FAILURE");
- return C2_CORRUPTED;
- case VideoDecodeAcceleratorAdaptor::Result::INSUFFICIENT_RESOURCES:
- ALOGE("Got error: INSUFFICIENT_RESOURCES");
- return C2_NO_MEMORY;
- default:
- ALOGE("Unrecognizable adaptor result (value = %d)...", result);
- return C2_CORRUPTED;
- }
-}
-
-// static
-C2R C2VDAComponent::IntfImpl::ProfileLevelSetter(bool mayBlock,
- C2P<C2StreamProfileLevelInfo::input>& info) {
- (void)mayBlock;
- return info.F(info.v.profile)
- .validatePossible(info.v.profile)
- .plus(info.F(info.v.level).validatePossible(info.v.level));
-}
-
-// static
-C2R C2VDAComponent::IntfImpl::SizeSetter(bool mayBlock,
- C2P<C2StreamPictureSizeInfo::output>& videoSize) {
- (void)mayBlock;
- // TODO: maybe apply block limit?
- return videoSize.F(videoSize.v.width)
- .validatePossible(videoSize.v.width)
- .plus(videoSize.F(videoSize.v.height).validatePossible(videoSize.v.height));
-}
-
-// static
-template <typename T>
-C2R C2VDAComponent::IntfImpl::DefaultColorAspectsSetter(bool mayBlock, C2P<T>& def) {
- (void)mayBlock;
- if (def.v.range > C2Color::RANGE_OTHER) {
- def.set().range = C2Color::RANGE_OTHER;
- }
- if (def.v.primaries > C2Color::PRIMARIES_OTHER) {
- def.set().primaries = C2Color::PRIMARIES_OTHER;
- }
- if (def.v.transfer > C2Color::TRANSFER_OTHER) {
- def.set().transfer = C2Color::TRANSFER_OTHER;
- }
- if (def.v.matrix > C2Color::MATRIX_OTHER) {
- def.set().matrix = C2Color::MATRIX_OTHER;
- }
- return C2R::Ok();
-}
-
-// static
-C2R C2VDAComponent::IntfImpl::MergedColorAspectsSetter(
- bool mayBlock, C2P<C2StreamColorAspectsInfo::output>& merged,
- const C2P<C2StreamColorAspectsTuning::output>& def,
- const C2P<C2StreamColorAspectsInfo::input>& coded) {
- (void)mayBlock;
- // Take coded values for all specified fields, and default values for unspecified ones.
- merged.set().range = coded.v.range == RANGE_UNSPECIFIED ? def.v.range : coded.v.range;
- merged.set().primaries =
- coded.v.primaries == PRIMARIES_UNSPECIFIED ? def.v.primaries : coded.v.primaries;
- merged.set().transfer =
- coded.v.transfer == TRANSFER_UNSPECIFIED ? def.v.transfer : coded.v.transfer;
- merged.set().matrix = coded.v.matrix == MATRIX_UNSPECIFIED ? def.v.matrix : coded.v.matrix;
- return C2R::Ok();
-}
-
-C2VDAComponent::IntfImpl::IntfImpl(C2String name, const std::shared_ptr<C2ReflectorHelper>& helper)
- : C2InterfaceHelper(helper), mInitStatus(C2_OK) {
- setDerivedInstance(this);
-
- // TODO(johnylin): use factory function to determine whether V4L2 stream or slice API is.
- char inputMime[128];
- if (name == kH264DecoderName || name == kH264SecureDecoderName) {
- strcpy(inputMime, MEDIA_MIMETYPE_VIDEO_AVC);
- mInputCodec = InputCodec::H264;
- addParameter(
- DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
- .withDefault(new C2StreamProfileLevelInfo::input(
- 0u, C2Config::PROFILE_AVC_MAIN, C2Config::LEVEL_AVC_4))
- .withFields(
- {C2F(mProfileLevel, profile)
- .oneOf({C2Config::PROFILE_AVC_BASELINE,
- C2Config::PROFILE_AVC_CONSTRAINED_BASELINE,
- C2Config::PROFILE_AVC_MAIN,
- C2Config::PROFILE_AVC_HIGH,
- C2Config::PROFILE_AVC_CONSTRAINED_HIGH}),
- C2F(mProfileLevel, level)
- .oneOf({C2Config::LEVEL_AVC_1, C2Config::LEVEL_AVC_1B,
- C2Config::LEVEL_AVC_1_1, C2Config::LEVEL_AVC_1_2,
- C2Config::LEVEL_AVC_1_3, C2Config::LEVEL_AVC_2,
- C2Config::LEVEL_AVC_2_1, C2Config::LEVEL_AVC_2_2,
- C2Config::LEVEL_AVC_3, C2Config::LEVEL_AVC_3_1,
- C2Config::LEVEL_AVC_3_2, C2Config::LEVEL_AVC_4,
- C2Config::LEVEL_AVC_4_1, C2Config::LEVEL_AVC_4_2,
- C2Config::LEVEL_AVC_5, C2Config::LEVEL_AVC_5_1,
- C2Config::LEVEL_AVC_5_2})})
- .withSetter(ProfileLevelSetter)
- .build());
- } else if (name == kVP8DecoderName || name == kVP8SecureDecoderName) {
- strcpy(inputMime, MEDIA_MIMETYPE_VIDEO_VP8);
- mInputCodec = InputCodec::VP8;
- addParameter(DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
- .withConstValue(new C2StreamProfileLevelInfo::input(
- 0u, C2Config::PROFILE_UNUSED, C2Config::LEVEL_UNUSED))
- .build());
- } else if (name == kVP9DecoderName || name == kVP9SecureDecoderName) {
- strcpy(inputMime, MEDIA_MIMETYPE_VIDEO_VP9);
- mInputCodec = InputCodec::VP9;
- addParameter(
- DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
- .withDefault(new C2StreamProfileLevelInfo::input(
- 0u, C2Config::PROFILE_VP9_0, C2Config::LEVEL_VP9_5))
- .withFields({C2F(mProfileLevel, profile).oneOf({C2Config::PROFILE_VP9_0}),
- C2F(mProfileLevel, level)
- .oneOf({C2Config::LEVEL_VP9_1, C2Config::LEVEL_VP9_1_1,
- C2Config::LEVEL_VP9_2, C2Config::LEVEL_VP9_2_1,
- C2Config::LEVEL_VP9_3, C2Config::LEVEL_VP9_3_1,
- C2Config::LEVEL_VP9_4, C2Config::LEVEL_VP9_4_1,
- C2Config::LEVEL_VP9_5})})
- .withSetter(ProfileLevelSetter)
- .build());
- } else {
- ALOGE("Invalid component name: %s", name.c_str());
- mInitStatus = C2_BAD_VALUE;
- return;
- }
- // Get supported profiles from VDA.
- // TODO: re-think the suitable method of getting supported profiles for both pure Android and
- // ARC++.
- media::VideoDecodeAccelerator::SupportedProfiles supportedProfiles;
-#ifdef V4L2_CODEC2_ARC
- supportedProfiles = arc::C2VDAAdaptorProxy::GetSupportedProfiles(mInputCodec);
-#else
- supportedProfiles = C2VDAAdaptor::GetSupportedProfiles(mInputCodec);
-#endif
- if (supportedProfiles.empty()) {
- ALOGE("No supported profile from input codec: %d", mInputCodec);
- mInitStatus = C2_BAD_VALUE;
- return;
- }
-
- mCodecProfile = supportedProfiles[0].profile;
-
- auto minSize = supportedProfiles[0].min_resolution;
- auto maxSize = supportedProfiles[0].max_resolution;
-
- addParameter(
- DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
- .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
- .build());
- addParameter(
- DefineParam(mInputMemoryUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
- .withConstValue(new C2StreamUsageTuning::input(
- 0u, static_cast<uint64_t>(android::hardware::graphics::common::V1_0::
- BufferUsage::VIDEO_DECODER)))
- .build());
-
- addParameter(DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
- .withConstValue(
- new C2StreamBufferTypeSetting::output(0u, C2BufferData::GRAPHIC))
- .build());
-
- addParameter(
- DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
- .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(inputMime))
- .build());
-
- addParameter(DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
- .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
- MEDIA_MIMETYPE_VIDEO_RAW))
- .build());
-
- addParameter(DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
- .withDefault(new C2StreamPictureSizeInfo::output(0u, 176, 144))
- .withFields({
- C2F(mSize, width).inRange(minSize.width(), maxSize.width(), 16),
- C2F(mSize, height).inRange(minSize.height(), maxSize.height(), 16),
- })
- .withSetter(SizeSetter)
- .build());
-
- // App may set a smaller value for maximum of input buffer size than actually required
- // by mistake. C2VDAComponent overrides it if the value specified by app is smaller than
- // the calculated value in MaxSizeCalculator().
- // This value is the default maximum of linear buffer size (kLinearBufferSize) in
- // CCodecBufferChannel.cpp.
- constexpr static size_t kLinearBufferSize = 1048576;
- struct LocalCalculator {
- static C2R MaxSizeCalculator(bool mayBlock, C2P<C2StreamMaxBufferSizeInfo::input>& me,
- const C2P<C2StreamPictureSizeInfo::output>& size) {
- (void)mayBlock;
- // TODO: Need larger size?
- me.set().value = kLinearBufferSize;
- const uint32_t width = size.v.width;
- const uint32_t height = size.v.height;
- // Enlarge the input buffer for 4k video
- if ((width > 1920 && height > 1080)) {
- me.set().value = 4 * kLinearBufferSize;
- }
- return C2R::Ok();
- }
- };
- addParameter(DefineParam(mMaxInputSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
- .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, kLinearBufferSize))
- .withFields({
- C2F(mMaxInputSize, value).any(),
- })
- .calculatedAs(LocalCalculator::MaxSizeCalculator, mSize)
- .build());
-
- bool secureMode = name.find(".secure") != std::string::npos;
- C2Allocator::id_t inputAllocators[] = {secureMode ? C2VDAAllocatorStore::SECURE_LINEAR
- : C2PlatformAllocatorStore::BLOB};
-
- C2Allocator::id_t outputAllocators[] = {C2VDAAllocatorStore::V4L2_BUFFERPOOL};
-
- C2Allocator::id_t surfaceAllocator = secureMode ? C2VDAAllocatorStore::SECURE_GRAPHIC
- : C2VDAAllocatorStore::V4L2_BUFFERQUEUE;
-
- addParameter(
- DefineParam(mInputAllocatorIds, C2_PARAMKEY_INPUT_ALLOCATORS)
- .withConstValue(C2PortAllocatorsTuning::input::AllocShared(inputAllocators))
- .build());
-
- addParameter(
- DefineParam(mOutputAllocatorIds, C2_PARAMKEY_OUTPUT_ALLOCATORS)
- .withConstValue(C2PortAllocatorsTuning::output::AllocShared(outputAllocators))
- .build());
-
- addParameter(DefineParam(mOutputSurfaceAllocatorId, C2_PARAMKEY_OUTPUT_SURFACE_ALLOCATOR)
- .withConstValue(new C2PortSurfaceAllocatorTuning::output(surfaceAllocator))
- .build());
-
- C2BlockPool::local_id_t outputBlockPools[] = {kDefaultOutputBlockPool};
-
- addParameter(
- DefineParam(mOutputBlockPoolIds, C2_PARAMKEY_OUTPUT_BLOCK_POOLS)
- .withDefault(C2PortBlockPoolsTuning::output::AllocShared(outputBlockPools))
- .withFields({C2F(mOutputBlockPoolIds, m.values[0]).any(),
- C2F(mOutputBlockPoolIds, m.values).inRange(0, 1)})
- .withSetter(Setter<C2PortBlockPoolsTuning::output>::NonStrictValuesWithNoDeps)
- .build());
-
- addParameter(
- DefineParam(mDefaultColorAspects, C2_PARAMKEY_DEFAULT_COLOR_ASPECTS)
- .withDefault(new C2StreamColorAspectsTuning::output(
- 0u, C2Color::RANGE_UNSPECIFIED, C2Color::PRIMARIES_UNSPECIFIED,
- C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
- .withFields(
- {C2F(mDefaultColorAspects, range)
- .inRange(C2Color::RANGE_UNSPECIFIED, C2Color::RANGE_OTHER),
- C2F(mDefaultColorAspects, primaries)
- .inRange(C2Color::PRIMARIES_UNSPECIFIED,
- C2Color::PRIMARIES_OTHER),
- C2F(mDefaultColorAspects, transfer)
- .inRange(C2Color::TRANSFER_UNSPECIFIED,
- C2Color::TRANSFER_OTHER),
- C2F(mDefaultColorAspects, matrix)
- .inRange(C2Color::MATRIX_UNSPECIFIED, C2Color::MATRIX_OTHER)})
- .withSetter(DefaultColorAspectsSetter)
- .build());
-
- addParameter(
- DefineParam(mCodedColorAspects, C2_PARAMKEY_VUI_COLOR_ASPECTS)
- .withDefault(new C2StreamColorAspectsInfo::input(
- 0u, C2Color::RANGE_LIMITED, C2Color::PRIMARIES_UNSPECIFIED,
- C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
- .withFields(
- {C2F(mCodedColorAspects, range)
- .inRange(C2Color::RANGE_UNSPECIFIED, C2Color::RANGE_OTHER),
- C2F(mCodedColorAspects, primaries)
- .inRange(C2Color::PRIMARIES_UNSPECIFIED,
- C2Color::PRIMARIES_OTHER),
- C2F(mCodedColorAspects, transfer)
- .inRange(C2Color::TRANSFER_UNSPECIFIED,
- C2Color::TRANSFER_OTHER),
- C2F(mCodedColorAspects, matrix)
- .inRange(C2Color::MATRIX_UNSPECIFIED, C2Color::MATRIX_OTHER)})
- .withSetter(DefaultColorAspectsSetter)
- .build());
-
- addParameter(
- DefineParam(mColorAspects, C2_PARAMKEY_COLOR_ASPECTS)
- .withDefault(new C2StreamColorAspectsInfo::output(
- 0u, C2Color::RANGE_UNSPECIFIED, C2Color::PRIMARIES_UNSPECIFIED,
- C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
- .withFields(
- {C2F(mColorAspects, range)
- .inRange(C2Color::RANGE_UNSPECIFIED, C2Color::RANGE_OTHER),
- C2F(mColorAspects, primaries)
- .inRange(C2Color::PRIMARIES_UNSPECIFIED,
- C2Color::PRIMARIES_OTHER),
- C2F(mColorAspects, transfer)
- .inRange(C2Color::TRANSFER_UNSPECIFIED,
- C2Color::TRANSFER_OTHER),
- C2F(mColorAspects, matrix)
- .inRange(C2Color::MATRIX_UNSPECIFIED, C2Color::MATRIX_OTHER)})
- .withSetter(MergedColorAspectsSetter, mDefaultColorAspects, mCodedColorAspects)
- .build());
-}
-
-////////////////////////////////////////////////////////////////////////////////
-#define RETURN_ON_UNINITIALIZED_OR_ERROR() \
- do { \
- if (mHasError || mComponentState == ComponentState::UNINITIALIZED) \
- return; \
- } while (0)
-
-C2VDAComponent::VideoFormat::VideoFormat(HalPixelFormat pixelFormat, uint32_t minNumBuffers,
- media::Size codedSize, media::Rect visibleRect)
- : mPixelFormat(pixelFormat),
- mMinNumBuffers(minNumBuffers),
- mCodedSize(codedSize),
- mVisibleRect(visibleRect) {}
-
-C2VDAComponent::C2VDAComponent(C2String name, c2_node_id_t id,
- const std::shared_ptr<C2ReflectorHelper>& helper)
- : mIntfImpl(std::make_shared<IntfImpl>(name, helper)),
- mIntf(std::make_shared<SimpleInterface<IntfImpl>>(name.c_str(), id, mIntfImpl)),
- mThread("C2VDAComponentThread"),
- mDequeueThread("C2VDAComponentDequeueThread"),
- mVDAInitResult(VideoDecodeAcceleratorAdaptor::Result::ILLEGAL_STATE),
- mComponentState(ComponentState::UNINITIALIZED),
- mPendingOutputEOS(false),
- mPendingColorAspectsChange(false),
- mPendingColorAspectsChangeFrameIndex(0),
- mCodecProfile(media::VIDEO_CODEC_PROFILE_UNKNOWN),
- mState(State::UNLOADED),
- mWeakThisFactory(this) {
- // TODO(johnylin): the client may need to know if init is failed.
- if (mIntfImpl->status() != C2_OK) {
- ALOGE("Component interface init failed (err code = %d)", mIntfImpl->status());
- return;
- }
-
- mSecureMode = name.find(".secure") != std::string::npos;
- if (!mThread.Start()) {
- ALOGE("Component thread failed to start.");
- return;
- }
- mTaskRunner = mThread.task_runner();
- mState.store(State::LOADED);
-}
-
-C2VDAComponent::~C2VDAComponent() {
- if (mThread.IsRunning()) {
- mTaskRunner->PostTask(FROM_HERE,
- ::base::Bind(&C2VDAComponent::onDestroy, ::base::Unretained(this)));
- mThread.Stop();
- }
-}
-
-void C2VDAComponent::onDestroy() {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- ALOGV("onDestroy");
- if (mVDAAdaptor.get()) {
- mVDAAdaptor->destroy();
- mVDAAdaptor.reset(nullptr);
- }
- stopDequeueThread();
-}
-
-void C2VDAComponent::onStart(media::VideoCodecProfile profile, ::base::WaitableEvent* done) {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- ALOGV("onStart");
- CHECK_EQ(mComponentState, ComponentState::UNINITIALIZED);
-
-#ifdef V4L2_CODEC2_ARC
- mVDAAdaptor.reset(new arc::C2VDAAdaptorProxy());
-#else
- mVDAAdaptor.reset(new C2VDAAdaptor());
-#endif
-
- mVDAInitResult = mVDAAdaptor->initialize(profile, mSecureMode, this);
- if (mVDAInitResult == VideoDecodeAcceleratorAdaptor::Result::SUCCESS) {
- mComponentState = ComponentState::STARTED;
- mHasError = false;
- }
-
- if (!mSecureMode && mIntfImpl->getInputCodec() == InputCodec::H264) {
- // Get default color aspects on start.
- updateColorAspects();
- mPendingColorAspectsChange = false;
- }
-
- done->Signal();
-}
-
-void C2VDAComponent::onQueueWork(std::unique_ptr<C2Work> work) {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- ALOGV("onQueueWork: flags=0x%x, index=%llu, timestamp=%llu", work->input.flags,
- work->input.ordinal.frameIndex.peekull(), work->input.ordinal.timestamp.peekull());
- RETURN_ON_UNINITIALIZED_OR_ERROR();
-
- uint32_t drainMode = NO_DRAIN;
- if (work->input.flags & C2FrameData::FLAG_END_OF_STREAM) {
- drainMode = DRAIN_COMPONENT_WITH_EOS;
- }
- mQueue.push({std::move(work), drainMode});
- // TODO(johnylin): set a maximum size of mQueue and check if mQueue is already full.
-
- mTaskRunner->PostTask(FROM_HERE,
- ::base::Bind(&C2VDAComponent::onDequeueWork, ::base::Unretained(this)));
-}
-
-void C2VDAComponent::onDequeueWork() {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- ALOGV("onDequeueWork");
- RETURN_ON_UNINITIALIZED_OR_ERROR();
- if (mQueue.empty()) {
- return;
- }
- if (mComponentState == ComponentState::DRAINING ||
- mComponentState == ComponentState::FLUSHING) {
- ALOGV("Temporarily stop dequeueing works since component is draining/flushing.");
- return;
- }
- if (mComponentState != ComponentState::STARTED) {
- ALOGE("Work queue should be empty if the component is not in STARTED state.");
- return;
- }
-
- // Dequeue a work from mQueue.
- std::unique_ptr<C2Work> work(std::move(mQueue.front().mWork));
- auto drainMode = mQueue.front().mDrainMode;
- mQueue.pop();
-
- CHECK_LE(work->input.buffers.size(), 1u);
- bool isEmptyCSDWork = false;
- // Use frameIndex as bitstreamId.
- int32_t bitstreamId = frameIndexToBitstreamId(work->input.ordinal.frameIndex);
- if (work->input.buffers.empty()) {
- // Client may queue a work with no input buffer for either it's EOS or empty CSD, otherwise
- // every work must have one input buffer.
- isEmptyCSDWork = work->input.flags & C2FrameData::FLAG_CODEC_CONFIG;
- CHECK(drainMode != NO_DRAIN || isEmptyCSDWork);
- // Emplace a nullptr to unify the check for work done.
- ALOGV("Got a work with no input buffer! Emplace a nullptr inside.");
- work->input.buffers.emplace_back(nullptr);
- } else {
- // If input.buffers is not empty, the buffer should have meaningful content inside.
- C2ConstLinearBlock linearBlock = work->input.buffers.front()->data().linearBlocks().front();
- CHECK_GT(linearBlock.size(), 0u);
-
- // Call parseCodedColorAspects() to try to parse color aspects from bitstream only if:
- // 1) This is non-secure decoding.
- // 2) This is H264 codec.
- // 3) This input is CSD buffer (with flags FLAG_CODEC_CONFIG).
- if (!mSecureMode && (mIntfImpl->getInputCodec() == InputCodec::H264) &&
- (work->input.flags & C2FrameData::FLAG_CODEC_CONFIG)) {
- if (parseCodedColorAspects(linearBlock)) {
- // Record current frame index, color aspects should be updated only for output
- // buffers whose frame indices are not less than this one.
- mPendingColorAspectsChange = true;
- mPendingColorAspectsChangeFrameIndex = work->input.ordinal.frameIndex.peeku();
- }
- }
- // Send input buffer to VDA for decode.
- sendInputBufferToAccelerator(linearBlock, bitstreamId);
- }
-
- CHECK_EQ(work->worklets.size(), 1u);
- work->worklets.front()->output.flags = static_cast<C2FrameData::flags_t>(0);
- work->worklets.front()->output.buffers.clear();
- work->worklets.front()->output.ordinal = work->input.ordinal;
-
- if (drainMode != NO_DRAIN) {
- mVDAAdaptor->flush();
- mComponentState = ComponentState::DRAINING;
- mPendingOutputEOS = drainMode == DRAIN_COMPONENT_WITH_EOS;
- }
-
- // Put work to mPendingWorks.
- mPendingWorks.emplace_back(std::move(work));
- if (isEmptyCSDWork) {
- // Directly report the empty CSD work as finished.
- reportWorkIfFinished(bitstreamId);
- }
-
- if (!mQueue.empty()) {
- mTaskRunner->PostTask(FROM_HERE, ::base::Bind(&C2VDAComponent::onDequeueWork,
- ::base::Unretained(this)));
- }
-}
-
-void C2VDAComponent::onInputBufferDone(int32_t bitstreamId) {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- ALOGV("onInputBufferDone: bitstream id=%d", bitstreamId);
- RETURN_ON_UNINITIALIZED_OR_ERROR();
-
- C2Work* work = getPendingWorkByBitstreamId(bitstreamId);
- if (!work) {
- reportError(C2_CORRUPTED);
- return;
- }
-
- // When the work is done, the input buffer shall be reset by component.
- work->input.buffers.front().reset();
-
- reportWorkIfFinished(bitstreamId);
-}
-
-void C2VDAComponent::onOutputBufferReturned(std::shared_ptr<C2GraphicBlock> block,
- uint32_t poolId) {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- ALOGV("onOutputBufferReturned: pool id=%u", poolId);
- if (mComponentState == ComponentState::UNINITIALIZED) {
- // Output buffer is returned from client after component is stopped. Just let the buffer be
- // released.
- return;
- }
- RETURN_ON_UNINITIALIZED_OR_ERROR();
-
- if (block->width() != static_cast<uint32_t>(mOutputFormat.mCodedSize.width()) ||
- block->height() != static_cast<uint32_t>(mOutputFormat.mCodedSize.height())) {
- // Output buffer is returned after we changed output resolution. Just let the buffer be
- // released.
- ALOGV("Discard obsolete graphic block: pool id=%u", poolId);
- return;
- }
-
- GraphicBlockInfo* info = getGraphicBlockByPoolId(poolId);
- if (!info) {
- reportError(C2_CORRUPTED);
- return;
- }
- if (info->mState != GraphicBlockInfo::State::OWNED_BY_CLIENT) {
- ALOGE("Graphic block (id=%d) should be owned by client on return", info->mBlockId);
- reportError(C2_BAD_STATE);
- return;
- }
- info->mGraphicBlock = std::move(block);
- info->mState = GraphicBlockInfo::State::OWNED_BY_COMPONENT;
-
- if (mPendingOutputFormat) {
- tryChangeOutputFormat();
- } else {
- // Do not pass the ownership to accelerator if this buffer will still be reused under
- // |mPendingBuffersToWork|.
- auto existingFrame = std::find_if(
- mPendingBuffersToWork.begin(), mPendingBuffersToWork.end(),
- [id = info->mBlockId](const OutputBufferInfo& o) { return o.mBlockId == id; });
- bool ownByAccelerator = existingFrame == mPendingBuffersToWork.end();
- sendOutputBufferToAccelerator(info, ownByAccelerator);
- sendOutputBufferToWorkIfAny(false /* dropIfUnavailable */);
- }
-}
-
-void C2VDAComponent::onOutputBufferDone(int32_t pictureBufferId, int32_t bitstreamId) {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- ALOGV("onOutputBufferDone: picture id=%d, bitstream id=%d", pictureBufferId, bitstreamId);
- RETURN_ON_UNINITIALIZED_OR_ERROR();
-
- GraphicBlockInfo* info = getGraphicBlockById(pictureBufferId);
- if (!info) {
- reportError(C2_CORRUPTED);
- return;
- }
-
- if (info->mState == GraphicBlockInfo::State::OWNED_BY_ACCELERATOR) {
- info->mState = GraphicBlockInfo::State::OWNED_BY_COMPONENT;
- }
- mPendingBuffersToWork.push_back({bitstreamId, pictureBufferId});
- sendOutputBufferToWorkIfAny(false /* dropIfUnavailable */);
-}
-
-c2_status_t C2VDAComponent::sendOutputBufferToWorkIfAny(bool dropIfUnavailable) {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
-
- while (!mPendingBuffersToWork.empty()) {
- auto nextBuffer = mPendingBuffersToWork.front();
- GraphicBlockInfo* info = getGraphicBlockById(nextBuffer.mBlockId);
- if (info->mState == GraphicBlockInfo::State::OWNED_BY_ACCELERATOR) {
- ALOGE("Graphic block (id=%d) should not be owned by accelerator", info->mBlockId);
- reportError(C2_BAD_STATE);
- return C2_BAD_STATE;
- }
-
- C2Work* work = getPendingWorkByBitstreamId(nextBuffer.mBitstreamId);
- if (!work) {
- reportError(C2_CORRUPTED);
- return C2_CORRUPTED;
- }
-
- if (info->mState == GraphicBlockInfo::State::OWNED_BY_CLIENT) {
- // This buffer is the existing frame and still owned by client.
- if (!dropIfUnavailable &&
- std::find(mUndequeuedBlockIds.begin(), mUndequeuedBlockIds.end(),
- nextBuffer.mBlockId) == mUndequeuedBlockIds.end()) {
- ALOGV("Still waiting for existing frame returned from client...");
- return C2_TIMED_OUT;
- }
- ALOGV("Drop this frame...");
- sendOutputBufferToAccelerator(info, false /* ownByAccelerator */);
- work->worklets.front()->output.flags = C2FrameData::FLAG_DROP_FRAME;
- } else {
- // This buffer is ready to push into the corresponding work.
- // Output buffer will be passed to client soon along with mListener->onWorkDone_nb().
- info->mState = GraphicBlockInfo::State::OWNED_BY_CLIENT;
- mBuffersInClient++;
- updateUndequeuedBlockIds(info->mBlockId);
-
- // Attach output buffer to the work corresponded to bitstreamId.
- C2ConstGraphicBlock constBlock = info->mGraphicBlock->share(
- C2Rect(mOutputFormat.mVisibleRect.width(),
- mOutputFormat.mVisibleRect.height()),
- C2Fence());
- MarkBlockPoolDataAsShared(constBlock);
-
- std::shared_ptr<C2Buffer> buffer = C2Buffer::CreateGraphicBuffer(std::move(constBlock));
- if (mPendingColorAspectsChange &&
- work->input.ordinal.frameIndex.peeku() >= mPendingColorAspectsChangeFrameIndex) {
- updateColorAspects();
- mPendingColorAspectsChange = false;
- }
- if (mCurrentColorAspects) {
- buffer->setInfo(mCurrentColorAspects);
- }
- work->worklets.front()->output.buffers.emplace_back(std::move(buffer));
- info->mGraphicBlock.reset();
- }
-
- // Check no-show frame by timestamps for VP8/VP9 cases before reporting the current work.
- if (mIntfImpl->getInputCodec() == InputCodec::VP8 ||
- mIntfImpl->getInputCodec() == InputCodec::VP9) {
- detectNoShowFrameWorksAndReportIfFinished(&(work->input.ordinal));
- }
-
- reportWorkIfFinished(nextBuffer.mBitstreamId);
- mPendingBuffersToWork.pop_front();
- }
- return C2_OK;
-}
-
-void C2VDAComponent::updateUndequeuedBlockIds(int32_t blockId) {
- // The size of |mUndequedBlockIds| will always be the minimum buffer count for display.
- mUndequeuedBlockIds.push_back(blockId);
- mUndequeuedBlockIds.pop_front();
-}
-
-void C2VDAComponent::onDrain(uint32_t drainMode) {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- ALOGV("onDrain: mode = %u", drainMode);
- RETURN_ON_UNINITIALIZED_OR_ERROR();
-
- if (!mQueue.empty()) {
- // Mark last queued work as "drain-till-here" by setting drainMode. Do not change drainMode
- // if last work already has one.
- if (mQueue.back().mDrainMode == NO_DRAIN) {
- mQueue.back().mDrainMode = drainMode;
- }
- } else if (!mPendingWorks.empty()) {
- // Neglect drain request if component is not in STARTED mode. Otherwise, enters DRAINING
- // mode and signal VDA flush immediately.
- if (mComponentState == ComponentState::STARTED) {
- mVDAAdaptor->flush();
- mComponentState = ComponentState::DRAINING;
- mPendingOutputEOS = drainMode == DRAIN_COMPONENT_WITH_EOS;
- } else {
- ALOGV("Neglect drain. Component in state: %d", mComponentState);
- }
- } else {
- // Do nothing.
- ALOGV("No buffers in VDA, drain takes no effect.");
- }
-}
-
-void C2VDAComponent::onDrainDone() {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- ALOGV("onDrainDone");
- RETURN_ON_UNINITIALIZED_OR_ERROR();
-
- if (mComponentState == ComponentState::DRAINING) {
- mComponentState = ComponentState::STARTED;
- } else if (mComponentState == ComponentState::STOPPING) {
- // The client signals stop right before VDA notifies drain done. Let stop process goes.
- return;
- } else if (mComponentState != ComponentState::FLUSHING) {
- // It is reasonable to get onDrainDone in FLUSHING, which means flush is already signaled
- // and component should still expect onFlushDone callback from VDA.
- ALOGE("Unexpected state while onDrainDone(). State=%d", mComponentState);
- reportError(C2_BAD_STATE);
- return;
- }
-
- // Drop all pending existing frames and return all finished works before drain done.
- if (sendOutputBufferToWorkIfAny(true /* dropIfUnavailable */) != C2_OK) {
- return;
- }
-
- if (mPendingOutputEOS) {
- // Return EOS work.
- if (reportEOSWork() != C2_OK) {
- return;
- }
- }
-
- // Work dequeueing was stopped while component draining. Restart it.
- mTaskRunner->PostTask(FROM_HERE,
- ::base::Bind(&C2VDAComponent::onDequeueWork, ::base::Unretained(this)));
-}
-
-void C2VDAComponent::onFlush() {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- ALOGV("onFlush");
- if (mComponentState == ComponentState::FLUSHING ||
- mComponentState == ComponentState::STOPPING) {
- return; // Ignore other flush request when component is flushing or stopping.
- }
- RETURN_ON_UNINITIALIZED_OR_ERROR();
-
- mVDAAdaptor->reset();
- // Pop all works in mQueue and put into mAbandonedWorks.
- while (!mQueue.empty()) {
- mAbandonedWorks.emplace_back(std::move(mQueue.front().mWork));
- mQueue.pop();
- }
- mComponentState = ComponentState::FLUSHING;
-}
-
-void C2VDAComponent::onStop(::base::WaitableEvent* done) {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- ALOGV("onStop");
- // Stop call should be processed even if component is in error state.
- CHECK_NE(mComponentState, ComponentState::UNINITIALIZED);
-
- // Pop all works in mQueue and put into mAbandonedWorks.
- while (!mQueue.empty()) {
- mAbandonedWorks.emplace_back(std::move(mQueue.front().mWork));
- mQueue.pop();
- }
-
- mStopDoneEvent = done; // restore done event which shoud be signaled in onStopDone().
- mComponentState = ComponentState::STOPPING;
-
- // Immediately release VDA by calling onStopDone() if component is in error state. Otherwise,
- // send reset request to VDA and wait for callback to stop the component gracefully.
- if (mHasError) {
- ALOGV("Component is in error state. Immediately call onStopDone().");
- onStopDone();
- } else if (mComponentState != ComponentState::FLUSHING) {
- // Do not request VDA reset again before the previous one is done. If reset is already sent
- // by onFlush(), just regard the following NotifyResetDone callback as for stopping.
- mVDAAdaptor->reset();
- }
-}
-
-void C2VDAComponent::onResetDone() {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- if (mComponentState == ComponentState::UNINITIALIZED) {
- return; // component is already stopped.
- }
- if (mComponentState == ComponentState::FLUSHING) {
- onFlushDone();
- } else if (mComponentState == ComponentState::STOPPING) {
- onStopDone();
- } else {
- reportError(C2_CORRUPTED);
- }
-}
-
-void C2VDAComponent::onFlushDone() {
- ALOGV("onFlushDone");
- RETURN_ON_UNINITIALIZED_OR_ERROR();
-
- reportAbandonedWorks();
- mPendingBuffersToWork.clear();
- mComponentState = ComponentState::STARTED;
-
- // Work dequeueing was stopped while component flushing. Restart it.
- mTaskRunner->PostTask(FROM_HERE,
- ::base::Bind(&C2VDAComponent::onDequeueWork, ::base::Unretained(this)));
-}
-
-void C2VDAComponent::onStopDone() {
- ALOGV("onStopDone");
- CHECK(mStopDoneEvent);
-
- // TODO(johnylin): At this moment, there may be C2Buffer still owned by client, do we need to
- // do something for them?
- reportAbandonedWorks();
- mPendingOutputFormat.reset();
- mPendingBuffersToWork.clear();
- if (mVDAAdaptor.get()) {
- mVDAAdaptor->destroy();
- mVDAAdaptor.reset(nullptr);
- }
-
- stopDequeueThread();
- mGraphicBlocks.clear();
-
- mStopDoneEvent->Signal();
- mStopDoneEvent = nullptr;
- mComponentState = ComponentState::UNINITIALIZED;
-}
-
-c2_status_t C2VDAComponent::setListener_vb(const std::shared_ptr<C2Component::Listener>& listener,
- c2_blocking_t mayBlock) {
- UNUSED(mayBlock);
- // TODO(johnylin): API says this method must be supported in all states, however I'm quite not
- // sure what is the use case.
- if (mState.load() != State::LOADED) {
- return C2_BAD_STATE;
- }
- mListener = listener;
- return C2_OK;
-}
-
-void C2VDAComponent::sendInputBufferToAccelerator(const C2ConstLinearBlock& input,
- int32_t bitstreamId) {
- ALOGV("sendInputBufferToAccelerator");
- int dupFd = dup(input.handle()->data[0]);
- if (dupFd < 0) {
- ALOGE("Failed to dup(%d) input buffer (bitstreamId=%d), errno=%d", input.handle()->data[0],
- bitstreamId, errno);
- reportError(C2_CORRUPTED);
- return;
- }
- ALOGV("Decode bitstream ID: %d, offset: %u size: %u", bitstreamId, input.offset(),
- input.size());
- mVDAAdaptor->decode(bitstreamId, dupFd, input.offset(), input.size());
-}
-
-std::deque<std::unique_ptr<C2Work>>::iterator C2VDAComponent::findPendingWorkByBitstreamId(
- int32_t bitstreamId) {
- return std::find_if(mPendingWorks.begin(), mPendingWorks.end(),
- [bitstreamId](const std::unique_ptr<C2Work>& w) {
- return frameIndexToBitstreamId(w->input.ordinal.frameIndex) ==
- bitstreamId;
- });
-}
-
-C2Work* C2VDAComponent::getPendingWorkByBitstreamId(int32_t bitstreamId) {
- auto workIter = findPendingWorkByBitstreamId(bitstreamId);
- if (workIter == mPendingWorks.end()) {
- ALOGE("Can't find pending work by bitstream ID: %d", bitstreamId);
- return nullptr;
- }
- return workIter->get();
-}
-
-C2VDAComponent::GraphicBlockInfo* C2VDAComponent::getGraphicBlockById(int32_t blockId) {
- if (blockId < 0 || blockId >= static_cast<int32_t>(mGraphicBlocks.size())) {
- ALOGE("getGraphicBlockById failed: id=%d", blockId);
- return nullptr;
- }
- return &mGraphicBlocks[blockId];
-}
-
-C2VDAComponent::GraphicBlockInfo* C2VDAComponent::getGraphicBlockByPoolId(uint32_t poolId) {
- auto blockIter = std::find_if(mGraphicBlocks.begin(), mGraphicBlocks.end(),
- [poolId](const GraphicBlockInfo& gb) {
- return gb.mPoolId == poolId;
- });
-
- if (blockIter == mGraphicBlocks.end()) {
- ALOGE("getGraphicBlockByPoolId failed: poolId=%u", poolId);
- return nullptr;
- }
- return &(*blockIter);
-}
-
-void C2VDAComponent::onOutputFormatChanged(std::unique_ptr<VideoFormat> format) {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- ALOGV("onOutputFormatChanged");
- RETURN_ON_UNINITIALIZED_OR_ERROR();
-
- ALOGV("New output format(pixel_format=0x%x, min_num_buffers=%u, coded_size=%s, crop_rect=%s)",
- static_cast<uint32_t>(format->mPixelFormat), format->mMinNumBuffers,
- format->mCodedSize.ToString().c_str(), format->mVisibleRect.ToString().c_str());
-
- for (auto& info : mGraphicBlocks) {
- if (info.mState == GraphicBlockInfo::State::OWNED_BY_ACCELERATOR)
- info.mState = GraphicBlockInfo::State::OWNED_BY_COMPONENT;
- }
-
- CHECK(!mPendingOutputFormat);
- mPendingOutputFormat = std::move(format);
- tryChangeOutputFormat();
-}
-
-void C2VDAComponent::tryChangeOutputFormat() {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- ALOGV("tryChangeOutputFormat");
- CHECK(mPendingOutputFormat);
-
- // At this point, all output buffers should not be owned by accelerator. The component is not
- // able to know when a client will release all owned output buffers by now. But it is ok to
- // leave them to client since componenet won't own those buffers anymore.
- // TODO(johnylin): we may also set a parameter for component to keep dequeueing buffers and
- // change format only after the component owns most buffers. This may prevent
- // too many buffers are still on client's hand while component starts to
- // allocate more buffers. However, it leads latency on output format change.
- for (const auto& info : mGraphicBlocks) {
- if (info.mState == GraphicBlockInfo::State::OWNED_BY_ACCELERATOR) {
- ALOGE("Graphic block (id=%d) should not be owned by accelerator while changing format",
- info.mBlockId);
- reportError(C2_BAD_STATE);
- return;
- }
- }
-
- // Drop all pending existing frames and return all finished works before changing output format.
- if (sendOutputBufferToWorkIfAny(true /* dropIfUnavailable */) != C2_OK) {
- return;
- }
-
- CHECK_EQ(mPendingOutputFormat->mPixelFormat, HalPixelFormat::YCbCr_420_888);
-
- mOutputFormat.mPixelFormat = mPendingOutputFormat->mPixelFormat;
- mOutputFormat.mMinNumBuffers = mPendingOutputFormat->mMinNumBuffers;
- mOutputFormat.mCodedSize = mPendingOutputFormat->mCodedSize;
-
- setOutputFormatCrop(mPendingOutputFormat->mVisibleRect);
-
- c2_status_t err = allocateBuffersFromBlockAllocator(
- mPendingOutputFormat->mCodedSize,
- static_cast<uint32_t>(mPendingOutputFormat->mPixelFormat));
- if (err != C2_OK) {
- reportError(err);
- return;
- }
-
- for (auto& info : mGraphicBlocks) {
- sendOutputBufferToAccelerator(&info, true /* ownByAccelerator */);
- }
- mPendingOutputFormat.reset();
-}
-
-c2_status_t C2VDAComponent::allocateBuffersFromBlockAllocator(const media::Size& size,
- uint32_t pixelFormat) {
- ALOGV("allocateBuffersFromBlockAllocator(%s, 0x%x)", size.ToString().c_str(), pixelFormat);
-
- stopDequeueThread();
-
- size_t bufferCount = mOutputFormat.mMinNumBuffers + kDpbOutputBufferExtraCount;
-
- // Get block pool ID configured from the client.
- std::shared_ptr<C2BlockPool> blockPool;
- auto poolId = mIntfImpl->getBlockPoolId();
- ALOGI("Using C2BlockPool ID = %" PRIu64 " for allocating output buffers", poolId);
- auto err = GetCodec2BlockPool(poolId, shared_from_this(), &blockPool);
- if (err != C2_OK) {
- ALOGE("Graphic block allocator is invalid");
- reportError(err);
- return err;
- }
-
- mGraphicBlocks.clear();
-
- bool useBufferQueue = blockPool->getAllocatorId() == C2PlatformAllocatorStore::BUFFERQUEUE;
- size_t minBuffersForDisplay = 0;
- if (useBufferQueue) {
- ALOGV("Bufferqueue-backed block pool is used.");
- // Set requested buffer count to C2VdaBqBlockPool.
- std::shared_ptr<C2VdaBqBlockPool> bqPool =
- std::static_pointer_cast<C2VdaBqBlockPool>(blockPool);
- if (bqPool) {
- err = bqPool->requestNewBufferSet(static_cast<int32_t>(bufferCount));
- if (err != C2_OK) {
- ALOGE("failed to request new buffer set to block pool: %d", err);
- reportError(err);
- return err;
- }
- err = bqPool->getMinBuffersForDisplay(&minBuffersForDisplay);
- if (err != C2_OK) {
- ALOGE("failed to query minimum undequeued buffer count from block pool: %d", err);
- reportError(err);
- return err;
- }
- } else {
- ALOGE("static_pointer_cast C2VdaBqBlockPool failed...");
- reportError(C2_CORRUPTED);
- return C2_CORRUPTED;
- }
- } else {
- ALOGV("Bufferpool-backed block pool is used.");
- // Set requested buffer count to C2VdaPooledBlockPool.
- std::shared_ptr<C2VdaPooledBlockPool> bpPool =
- std::static_pointer_cast<C2VdaPooledBlockPool>(blockPool);
- if (bpPool) {
- err = bpPool->requestNewBufferSet(static_cast<int32_t>(bufferCount));
- if (err != C2_OK) {
- ALOGE("failed to request new buffer set to block pool: %d", err);
- reportError(err);
- return err;
- }
- minBuffersForDisplay = 0; // no undequeued buffer restriction for bufferpool.
- } else {
- ALOGE("static_pointer_cast C2VdaPooledBlockPool failed...");
- reportError(C2_CORRUPTED);
- return C2_CORRUPTED;
- }
- }
-
- ALOGV("Minimum undequeued buffer count = %zu", minBuffersForDisplay);
- mUndequeuedBlockIds.resize(minBuffersForDisplay, -1);
-
- for (size_t i = 0; i < bufferCount; ++i) {
- std::shared_ptr<C2GraphicBlock> block;
- C2MemoryUsage usage = {
- mSecureMode ? C2MemoryUsage::READ_PROTECTED : C2MemoryUsage::CPU_READ,
- static_cast<uint64_t>(BufferUsage::VIDEO_DECODER)};
-
- int32_t retries_left = kAllocateBufferMaxRetries;
- err = C2_NO_INIT;
- while (err != C2_OK) {
- err = blockPool->fetchGraphicBlock(size.width(), size.height(), pixelFormat, usage,
- &block);
- if (err == C2_TIMED_OUT && retries_left > 0) {
- ALOGD("allocate buffer timeout, %d retry time(s) left...", retries_left);
- retries_left--;
- } else if (err != C2_OK) {
- mGraphicBlocks.clear();
- ALOGE("failed to allocate buffer: %d", err);
- reportError(err);
- return err;
- }
- }
-
- uint32_t poolId;
- if (useBufferQueue) {
- err = C2VdaBqBlockPool::getPoolIdFromGraphicBlock(block, &poolId);
- } else { // use bufferpool
- err = C2VdaPooledBlockPool::getPoolIdFromGraphicBlock(block, &poolId);
- }
- if (err != C2_OK) {
- mGraphicBlocks.clear();
- ALOGE("failed to getPoolIdFromGraphicBlock: %d", err);
- reportError(err);
- return err;
- }
-
- if (i == 0) {
- // Allocate the output buffers.
- mVDAAdaptor->assignPictureBuffers(bufferCount, getFrameSizeFromC2GraphicBlock(*block));
- }
- if (mSecureMode) {
- appendSecureOutputBuffer(std::move(block), poolId);
- } else {
- appendOutputBuffer(std::move(block), poolId);
- }
- }
- mOutputFormat.mMinNumBuffers = bufferCount;
-
- if (!startDequeueThread(size, pixelFormat, std::move(blockPool),
- true /* resetBuffersInClient */)) {
- reportError(C2_CORRUPTED);
- return C2_CORRUPTED;
- }
- return C2_OK;
-}
-
-void C2VDAComponent::appendOutputBuffer(std::shared_ptr<C2GraphicBlock> block, uint32_t poolId) {
- GraphicBlockInfo info;
- info.mBlockId = static_cast<int32_t>(mGraphicBlocks.size());
- info.mGraphicBlock = std::move(block);
- info.mPoolId = poolId;
-
- ALOGV("allocate graphic buffer: %p, id: %d, size: %dx%d", info.mGraphicBlock->handle(),
- info.mBlockId, info.mGraphicBlock->width(), info.mGraphicBlock->height());
-
- auto ycbcr = getGraphicBlockInfo(*info.mGraphicBlock);
- // lockYCbCr() stores offsets into the pointers
- // if given usage does not contain SW_READ/WRITE bits.
- std::vector<uint32_t> offsets = {
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(ycbcr.y)),
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(ycbcr.cb)),
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(ycbcr.cr)),
- };
- std::vector<uint32_t> strides = {
- static_cast<uint32_t>(ycbcr.ystride),
- static_cast<uint32_t>(ycbcr.cstride),
- static_cast<uint32_t>(ycbcr.cstride),
- };
-
- bool crcb = false;
- if (offsets[C2PlanarLayout::PLANE_U] > offsets[C2PlanarLayout::PLANE_V]) {
- std::swap(offsets[C2PlanarLayout::PLANE_U], offsets[C2PlanarLayout::PLANE_V]);
- crcb = true;
- }
-
- bool semiplanar = false;
- if (ycbcr.chroma_step > offsets[C2PlanarLayout::PLANE_V] - offsets[C2PlanarLayout::PLANE_U]) {
- offsets.pop_back();
- strides.pop_back();
- semiplanar = true;
- }
-
- const uint32_t numPlanes = 3 - semiplanar;
- for (uint32_t i = 0; i < numPlanes; ++i) {
- ALOGV("plane %u: stride: %d, offset: %u", i, strides[i], offsets[i]);
- }
- info.mPixelFormat = resolveBufferFormat(crcb, semiplanar);
- ALOGV("HAL pixel format: 0x%x", static_cast<uint32_t>(info.mPixelFormat));
-
- std::vector<::base::ScopedFD> fds;
- const C2Handle* const handle = info.mGraphicBlock->handle();
- for (int i = 0; i < handle->numFds; i++) {
- fds.emplace_back(dup(handle->data[i]));
- if (!fds.back().is_valid()) {
- ALOGE("Failed to dup(%d), errno=%d", handle->data[i], errno);
- reportError(C2_CORRUPTED);
- return;
- }
- }
- ALOGV("The number of fds of output buffer: %zu", fds.size());
-
- std::vector<VideoFramePlane> passedPlanes;
- for (uint32_t i = 0; i < numPlanes; ++i) {
- CHECK_GT(strides[i], 0u);
- passedPlanes.push_back({offsets[i], strides[i]});
- }
- info.mHandles = std::move(fds);
- info.mPlanes = std::move(passedPlanes);
-
- mGraphicBlocks.push_back(std::move(info));
-}
-
-void C2VDAComponent::appendSecureOutputBuffer(std::shared_ptr<C2GraphicBlock> block,
- uint32_t poolId) {
-#ifdef V4L2_CODEC2_ARC
- android::HalPixelFormat pixelFormat = getPlatformPixelFormat();
- if (pixelFormat == android::HalPixelFormat::UNKNOWN) {
- ALOGE("Failed to get pixel format on platform.");
- reportError(C2_CORRUPTED);
- return;
- }
- CHECK(pixelFormat == android::HalPixelFormat::YV12 ||
- pixelFormat == android::HalPixelFormat::NV12);
- ALOGV("HAL pixel format: 0x%x", static_cast<uint32_t>(pixelFormat));
-
- std::vector<::base::ScopedFD> fds;
- const C2Handle* const handle = block->handle();
- for (int i = 0; i < handle->numFds; i++) {
- fds.emplace_back(dup(handle->data[i]));
- if (!fds.back().is_valid()) {
- ALOGE("Failed to dup(%d), errno=%d", handle->data[i], errno);
- reportError(C2_CORRUPTED);
- return;
- }
- }
- ALOGV("The number of fds of output buffer: %zu", fds.size());
-
- GraphicBlockInfo info;
- info.mBlockId = static_cast<int32_t>(mGraphicBlocks.size());
- info.mGraphicBlock = std::move(block);
- info.mPoolId = poolId;
- info.mPixelFormat = pixelFormat;
- info.mHandles = std::move(fds);
-
- // In secure mode, since planes are not referred in Chrome side, empty plane is valid.
- info.mPlanes.clear();
- mGraphicBlocks.push_back(std::move(info));
-#else
- ALOGE("appendSecureOutputBuffer() is not supported...");
- reportError(C2_OMITTED);
-#endif // V4L2_CODEC2_ARC
-}
-
-void C2VDAComponent::sendOutputBufferToAccelerator(GraphicBlockInfo* info, bool ownByAccelerator) {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- ALOGV("sendOutputBufferToAccelerator index=%d ownByAccelerator=%d", info->mBlockId,
- ownByAccelerator);
-
- if (ownByAccelerator) {
- CHECK_EQ(info->mState, GraphicBlockInfo::State::OWNED_BY_COMPONENT);
- info->mState = GraphicBlockInfo::State::OWNED_BY_ACCELERATOR;
- }
-
- // mHandles is not empty for the first time the buffer is passed to VDA. In that case, VDA needs
- // to import the buffer first.
- if (!info->mHandles.empty()) {
- mVDAAdaptor->importBufferForPicture(info->mBlockId, info->mPixelFormat,
- std::move(info->mHandles), info->mPlanes);
- } else {
- mVDAAdaptor->reusePictureBuffer(info->mBlockId);
- }
-}
-
-bool C2VDAComponent::parseCodedColorAspects(const C2ConstLinearBlock& input) {
- C2ReadView view = input.map().get();
- const uint8_t* data = view.data();
- const uint32_t size = view.capacity();
-
- std::unique_ptr<media::H264Parser> h264Parser = std::make_unique<media::H264Parser>();
- h264Parser->SetStream(data, static_cast<off_t>(size));
- media::H264NALU nalu;
- media::H264Parser::Result parRes = h264Parser->AdvanceToNextNALU(&nalu);
- if (parRes != media::H264Parser::kEOStream && parRes != media::H264Parser::kOk) {
- ALOGE("H264 AdvanceToNextNALU error: %d", static_cast<int>(parRes));
- return false;
- }
- if (nalu.nal_unit_type != media::H264NALU::kSPS) {
- ALOGV("NALU is not SPS");
- return false;
- }
-
- int spsId;
- parRes = h264Parser->ParseSPS(&spsId);
- if (parRes != media::H264Parser::kEOStream && parRes != media::H264Parser::kOk) {
- ALOGE("H264 ParseSPS error: %d", static_cast<int>(parRes));
- return false;
- }
-
- // Parse ISO color aspects from H264 SPS bitstream.
- const media::H264SPS* sps = h264Parser->GetSPS(spsId);
- if (!sps->colour_description_present_flag) {
- ALOGV("No Color Description in SPS");
- return false;
- }
- int32_t primaries = sps->colour_primaries;
- int32_t transfer = sps->transfer_characteristics;
- int32_t coeffs = sps->matrix_coefficients;
- bool fullRange = sps->video_full_range_flag;
-
- // Convert ISO color aspects to ColorUtils::ColorAspects.
- ColorAspects colorAspects;
- ColorUtils::convertIsoColorAspectsToCodecAspects(primaries, transfer, coeffs, fullRange,
- colorAspects);
- ALOGV("Parsed ColorAspects from bitstream: (R:%d, P:%d, M:%d, T:%d)", colorAspects.mRange,
- colorAspects.mPrimaries, colorAspects.mMatrixCoeffs, colorAspects.mTransfer);
-
- // Map ColorUtils::ColorAspects to C2StreamColorAspectsInfo::input parameter.
- C2StreamColorAspectsInfo::input codedAspects = {0u};
- if (!C2Mapper::map(colorAspects.mPrimaries, &codedAspects.primaries)) {
- codedAspects.primaries = C2Color::PRIMARIES_UNSPECIFIED;
- }
- if (!C2Mapper::map(colorAspects.mRange, &codedAspects.range)) {
- codedAspects.range = C2Color::RANGE_UNSPECIFIED;
- }
- if (!C2Mapper::map(colorAspects.mMatrixCoeffs, &codedAspects.matrix)) {
- codedAspects.matrix = C2Color::MATRIX_UNSPECIFIED;
- }
- if (!C2Mapper::map(colorAspects.mTransfer, &codedAspects.transfer)) {
- codedAspects.transfer = C2Color::TRANSFER_UNSPECIFIED;
- }
- // Configure to interface.
- std::vector<std::unique_ptr<C2SettingResult>> failures;
- c2_status_t status = mIntfImpl->config({&codedAspects}, C2_MAY_BLOCK, &failures);
- if (status != C2_OK) {
- ALOGE("Failed to config color aspects to interface, error: %d", status);
- return false;
- }
- return true;
-}
-
-c2_status_t C2VDAComponent::updateColorAspects() {
- ALOGV("updateColorAspects");
- std::unique_ptr<C2StreamColorAspectsInfo::output> colorAspects =
- std::make_unique<C2StreamColorAspectsInfo::output>(
- 0u, C2Color::RANGE_UNSPECIFIED, C2Color::PRIMARIES_UNSPECIFIED,
- C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED);
- c2_status_t status = mIntfImpl->query({colorAspects.get()}, {}, C2_DONT_BLOCK, nullptr);
- if (status != C2_OK) {
- ALOGE("Failed to query color aspects, error: %d", status);
- return status;
- }
- mCurrentColorAspects = std::move(colorAspects);
- return C2_OK;
-}
-
-void C2VDAComponent::onVisibleRectChanged(const media::Rect& cropRect) {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- ALOGV("onVisibleRectChanged");
- RETURN_ON_UNINITIALIZED_OR_ERROR();
-
- // We should make sure there is no pending output format change. That is, the input cropRect is
- // corresponding to current output format.
- CHECK(mPendingOutputFormat == nullptr);
- setOutputFormatCrop(cropRect);
-}
-
-void C2VDAComponent::setOutputFormatCrop(const media::Rect& cropRect) {
- ALOGV("setOutputFormatCrop(%dx%d)", cropRect.width(), cropRect.height());
- // This visible rect should be set as crop window for each C2ConstGraphicBlock passed to
- // framework.
- mOutputFormat.mVisibleRect = cropRect;
-}
-
-void C2VDAComponent::onSurfaceChanged() {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- ALOGV("onSurfaceChanged");
-
- if (mComponentState == ComponentState::UNINITIALIZED) {
- return; // Component is already stopped, no need to update graphic blocks.
- }
- RETURN_ON_UNINITIALIZED_OR_ERROR();
-
- stopDequeueThread();
-
- // Get block pool ID configured from the client.
- std::shared_ptr<C2BlockPool> blockPool;
- auto blockPoolId = mIntfImpl->getBlockPoolId();
- ALOGI("Retrieving C2BlockPool ID = %" PRIu64 " for updating output buffers", blockPoolId);
- auto err = GetCodec2BlockPool(blockPoolId, shared_from_this(), &blockPool);
- if (err != C2_OK) {
- ALOGE("Graphic block allocator is invalid");
- reportError(err);
- return;
- }
- if (blockPool->getAllocatorId() != C2PlatformAllocatorStore::BUFFERQUEUE) {
- ALOGE("Only Bufferqueue-backed block pool would need to change surface.");
- reportError(C2_CORRUPTED);
- return;
- }
-
- std::shared_ptr<C2VdaBqBlockPool> bqPool =
- std::static_pointer_cast<C2VdaBqBlockPool>(blockPool);
- if (!bqPool) {
- ALOGE("static_pointer_cast C2VdaBqBlockPool failed...");
- reportError(C2_CORRUPTED);
- return;
- }
-
- size_t minBuffersForDisplay = 0;
- err = bqPool->getMinBuffersForDisplay(&minBuffersForDisplay);
- if (err != C2_OK) {
- ALOGE("failed to query minimum undequeued buffer count from block pool: %d", err);
- reportError(err);
- return;
- }
- ALOGV("Minimum undequeued buffer count = %zu", minBuffersForDisplay);
- mUndequeuedBlockIds.resize(minBuffersForDisplay, -1);
-
- for (auto& info : mGraphicBlocks) {
- bool willCancel = (info.mGraphicBlock == nullptr);
- uint32_t oldSlot = info.mPoolId;
- ALOGV("Updating graphic block #%d: slot = %u, willCancel = %d", info.mBlockId, oldSlot,
- willCancel);
- uint32_t newSlot;
- std::shared_ptr<C2GraphicBlock> block;
- err = bqPool->updateGraphicBlock(willCancel, oldSlot, &newSlot, &block);
- if (err == C2_CANCELED) {
- // There may be a chance that a task in task runner before onSurfaceChange triggers
- // output format change. If so, block pool will return C2_CANCELED and no need to
- // updateGraphicBlock anymore.
- return;
- }
- if (err != C2_OK) {
- ALOGE("failed to update graphic block from block pool: %d", err);
- reportError(err);
- return;
- }
-
- // Update slot index.
- info.mPoolId = newSlot;
- // Update C2GraphicBlock if |willCancel| is false. Note that although the old C2GraphicBlock
- // will be released, the block pool data destructor won't do detachBuffer to new surface
- // because the producer ID is not matched.
- if (!willCancel) {
- info.mGraphicBlock = std::move(block);
- }
- }
-
- if (!startDequeueThread(mOutputFormat.mCodedSize,
- static_cast<uint32_t>(mOutputFormat.mPixelFormat), std::move(blockPool),
- false /* resetBuffersInClient */)) {
- reportError(C2_CORRUPTED);
- }
-}
-
-c2_status_t C2VDAComponent::queue_nb(std::list<std::unique_ptr<C2Work>>* const items) {
- if (mState.load() != State::RUNNING) {
- return C2_BAD_STATE;
- }
- while (!items->empty()) {
- mTaskRunner->PostTask(FROM_HERE,
- ::base::Bind(&C2VDAComponent::onQueueWork, ::base::Unretained(this),
- ::base::Passed(&items->front())));
- items->pop_front();
- }
- return C2_OK;
-}
-
-c2_status_t C2VDAComponent::announce_nb(const std::vector<C2WorkOutline>& items) {
- UNUSED(items);
- return C2_OMITTED; // Tunneling is not supported by now
-}
-
-c2_status_t C2VDAComponent::flush_sm(flush_mode_t mode,
- std::list<std::unique_ptr<C2Work>>* const flushedWork) {
- if (mode != FLUSH_COMPONENT) {
- return C2_OMITTED; // Tunneling is not supported by now
- }
- if (mState.load() != State::RUNNING) {
- return C2_BAD_STATE;
- }
- mTaskRunner->PostTask(FROM_HERE, ::base::Bind(&C2VDAComponent::onFlush,
- ::base::Unretained(this)));
- // Instead of |flushedWork|, abandoned works will be returned via onWorkDone_nb() callback.
- return C2_OK;
-}
-
-c2_status_t C2VDAComponent::drain_nb(drain_mode_t mode) {
- if (mode != DRAIN_COMPONENT_WITH_EOS && mode != DRAIN_COMPONENT_NO_EOS) {
- return C2_OMITTED; // Tunneling is not supported by now
- }
- if (mState.load() != State::RUNNING) {
- return C2_BAD_STATE;
- }
- mTaskRunner->PostTask(FROM_HERE,
- ::base::Bind(&C2VDAComponent::onDrain, ::base::Unretained(this),
- static_cast<uint32_t>(mode)));
- return C2_OK;
-}
-
-c2_status_t C2VDAComponent::start() {
- // Use mStartStopLock to block other asynchronously start/stop calls.
- std::lock_guard<std::mutex> lock(mStartStopLock);
-
- if (mState.load() != State::LOADED) {
- return C2_BAD_STATE; // start() is only supported when component is in LOADED state.
- }
-
- mCodecProfile = mIntfImpl->getCodecProfile();
- ALOGI("get parameter: mCodecProfile = %d", static_cast<int>(mCodecProfile));
-
- ::base::WaitableEvent done(::base::WaitableEvent::ResetPolicy::AUTOMATIC,
- ::base::WaitableEvent::InitialState::NOT_SIGNALED);
- mTaskRunner->PostTask(FROM_HERE,
- ::base::Bind(&C2VDAComponent::onStart, ::base::Unretained(this),
- mCodecProfile, &done));
- done.Wait();
- c2_status_t c2Status;
- if (mVDAInitResult == VideoDecodeAcceleratorAdaptor::Result::PLATFORM_FAILURE) {
- // Regard unexpected VDA initialization failure as no more resources, because we still don't
- // have a formal way to obtain the max capable number of concurrent decoders.
- c2Status = C2_NO_MEMORY;
- } else {
- c2Status = adaptorResultToC2Status(mVDAInitResult);
- }
-
- if (c2Status != C2_OK) {
- ALOGE("Failed to start component due to VDA error...");
- return c2Status;
- }
- mState.store(State::RUNNING);
- return C2_OK;
-}
-
-// Stop call should be valid in all states (even in error).
-c2_status_t C2VDAComponent::stop() {
- // Use mStartStopLock to block other asynchronously start/stop calls.
- std::lock_guard<std::mutex> lock(mStartStopLock);
-
- auto state = mState.load();
- if (!(state == State::RUNNING || state == State::ERROR)) {
- return C2_OK; // Component is already in stopped state.
- }
-
- ::base::WaitableEvent done(::base::WaitableEvent::ResetPolicy::AUTOMATIC,
- ::base::WaitableEvent::InitialState::NOT_SIGNALED);
- mTaskRunner->PostTask(FROM_HERE,
- ::base::Bind(&C2VDAComponent::onStop, ::base::Unretained(this), &done));
- done.Wait();
- mState.store(State::LOADED);
- return C2_OK;
-}
-
-c2_status_t C2VDAComponent::reset() {
- return stop();
- // TODO(johnylin): reset is different than stop that it could be called in any state.
- // TODO(johnylin): when reset is called, set ComponentInterface to default values.
-}
-
-c2_status_t C2VDAComponent::release() {
- return reset();
-}
-
-std::shared_ptr<C2ComponentInterface> C2VDAComponent::intf() {
- return mIntf;
-}
-
-void C2VDAComponent::providePictureBuffers(uint32_t minNumBuffers, const media::Size& codedSize) {
- // Always use fexible pixel 420 format YCbCr_420_888 in Android.
- // Uses coded size for crop rect while it is not available.
- auto format = std::make_unique<VideoFormat>(HalPixelFormat::YCbCr_420_888, minNumBuffers,
- codedSize, media::Rect(codedSize));
-
- // Set mRequestedVisibleRect to default.
- mRequestedVisibleRect = media::Rect();
-
- mTaskRunner->PostTask(FROM_HERE, ::base::Bind(&C2VDAComponent::onOutputFormatChanged,
- ::base::Unretained(this),
- ::base::Passed(&format)));
-}
-
-void C2VDAComponent::dismissPictureBuffer(int32_t pictureBufferId) {
- UNUSED(pictureBufferId);
- // no ops
-}
-
-void C2VDAComponent::pictureReady(int32_t pictureBufferId, int32_t bitstreamId,
- const media::Rect& cropRect) {
- UNUSED(pictureBufferId);
- UNUSED(bitstreamId);
-
- if (mRequestedVisibleRect != cropRect) {
- mRequestedVisibleRect = cropRect;
- mTaskRunner->PostTask(FROM_HERE, ::base::Bind(&C2VDAComponent::onVisibleRectChanged,
- ::base::Unretained(this), cropRect));
- }
-
- mTaskRunner->PostTask(FROM_HERE, ::base::Bind(&C2VDAComponent::onOutputBufferDone,
- ::base::Unretained(this),
- pictureBufferId, bitstreamId));
-}
-
-void C2VDAComponent::notifyEndOfBitstreamBuffer(int32_t bitstreamId) {
- mTaskRunner->PostTask(FROM_HERE, ::base::Bind(&C2VDAComponent::onInputBufferDone,
- ::base::Unretained(this), bitstreamId));
-}
-
-void C2VDAComponent::notifyFlushDone() {
- mTaskRunner->PostTask(FROM_HERE,
- ::base::Bind(&C2VDAComponent::onDrainDone, ::base::Unretained(this)));
-}
-
-void C2VDAComponent::notifyResetDone() {
- mTaskRunner->PostTask(FROM_HERE,
- ::base::Bind(&C2VDAComponent::onResetDone, ::base::Unretained(this)));
-}
-
-void C2VDAComponent::notifyError(VideoDecodeAcceleratorAdaptor::Result error) {
- ALOGE("Got notifyError from VDA...");
- c2_status_t err = adaptorResultToC2Status(error);
- if (err == C2_OK) {
- ALOGW("Shouldn't get SUCCESS err code in NotifyError(). Skip it...");
- return;
- }
- mTaskRunner->PostTask(FROM_HERE,
- ::base::Bind(&C2VDAComponent::reportError, ::base::Unretained(this), err));
-}
-
-void C2VDAComponent::detectNoShowFrameWorksAndReportIfFinished(
- const C2WorkOrdinalStruct* currOrdinal) {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- std::vector<int32_t> noShowFrameBitstreamIds;
-
- for (auto& work : mPendingWorks) {
- // A work in mPendingWorks would be considered to have no-show frame if there is no
- // corresponding output buffer returned while the one of the work with latter timestamp is
- // already returned. (VDA is outputted in display order.)
- // Note: this fix is workable but not most appropriate because we rely on timestamps which
- // may wrap around or be uncontinuous in adaptive skip-back case. The ideal fix should parse
- // show_frame flag for each frame by either framework, component, or VDA, and propogate
- // along the stack.
- // TODO(johnylin): Discuss with framework team to handle no-show frame properly.
- if (isNoShowFrameWork(work.get(), currOrdinal)) {
- // Mark FLAG_DROP_FRAME for no-show frame work.
- work->worklets.front()->output.flags = C2FrameData::FLAG_DROP_FRAME;
-
- // We need to call reportWorkIfFinished() for all detected no-show frame works. However,
- // we should do it after the detection loop since reportWorkIfFinished() may erase
- // entries in mPendingWorks.
- int32_t bitstreamId = frameIndexToBitstreamId(work->input.ordinal.frameIndex);
- noShowFrameBitstreamIds.push_back(bitstreamId);
- ALOGV("Detected no-show frame work index=%llu timestamp=%llu",
- work->input.ordinal.frameIndex.peekull(),
- work->input.ordinal.timestamp.peekull());
- }
- }
-
- for (int32_t bitstreamId : noShowFrameBitstreamIds) {
- // Try to report works with no-show frame.
- reportWorkIfFinished(bitstreamId);
- }
-}
-
-bool C2VDAComponent::isNoShowFrameWork(const C2Work* work,
- const C2WorkOrdinalStruct* currOrdinal) const {
- if (work->input.ordinal.timestamp >= currOrdinal->timestamp) {
- // Only consider no-show frame if the timestamp is less than the current ordinal.
- return false;
- }
- if (work->input.ordinal.frameIndex >= currOrdinal->frameIndex) {
- // Only consider no-show frame if the frame index is less than the current ordinal. This is
- // required to tell apart flushless skip-back case.
- return false;
- }
- if (!work->worklets.front()->output.buffers.empty()) {
- // The wrok already have the returned output buffer.
- return false;
- }
- if ((work->input.flags & C2FrameData::FLAG_END_OF_STREAM) ||
- (work->input.flags & C2FrameData::FLAG_CODEC_CONFIG) ||
- (work->worklets.front()->output.flags & C2FrameData::FLAG_DROP_FRAME)) {
- // No-show frame should not be EOS work, CSD work, or work with dropped frame.
- return false;
- }
- return true; // This work contains no-show frame.
-}
-
-void C2VDAComponent::reportWorkIfFinished(int32_t bitstreamId) {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
-
- auto workIter = findPendingWorkByBitstreamId(bitstreamId);
- if (workIter == mPendingWorks.end()) {
- reportError(C2_CORRUPTED);
- return;
- }
-
- // EOS work will not be reported here. reportEOSWork() does it.
- auto work = workIter->get();
- if (isWorkDone(work)) {
- if (work->worklets.front()->output.flags & C2FrameData::FLAG_DROP_FRAME) {
- // A work with neither flags nor output buffer would be treated as no-corresponding
- // output by C2 framework, and regain pipeline capacity immediately.
- // TODO(johnylin): output FLAG_DROP_FRAME flag after it could be handled correctly.
- work->worklets.front()->output.flags = static_cast<C2FrameData::flags_t>(0);
- }
- work->result = C2_OK;
- work->workletsProcessed = static_cast<uint32_t>(work->worklets.size());
-
- ALOGV("Reported finished work index=%llu", work->input.ordinal.frameIndex.peekull());
- std::list<std::unique_ptr<C2Work>> finishedWorks;
- finishedWorks.emplace_back(std::move(*workIter));
- mListener->onWorkDone_nb(shared_from_this(), std::move(finishedWorks));
- mPendingWorks.erase(workIter);
- }
-}
-
-bool C2VDAComponent::isWorkDone(const C2Work* work) const {
- if (work->input.flags & C2FrameData::FLAG_END_OF_STREAM) {
- // This is EOS work and should be processed by reportEOSWork().
- return false;
- }
- if (work->input.buffers.front()) {
- // Input buffer is still owned by VDA.
- return false;
- }
- if (mPendingOutputEOS && mPendingWorks.size() == 1u) {
- // If mPendingOutputEOS is true, the last returned work should be marked EOS flag and
- // returned by reportEOSWork() instead.
- return false;
- }
- if (!(work->input.flags & C2FrameData::FLAG_CODEC_CONFIG) &&
- !(work->worklets.front()->output.flags & C2FrameData::FLAG_DROP_FRAME) &&
- work->worklets.front()->output.buffers.empty()) {
- // Unless the input is CSD or the output is dropped, this work is not done because the
- // output buffer is not returned from VDA yet.
- return false;
- }
- return true; // This work is done.
-}
-
-c2_status_t C2VDAComponent::reportEOSWork() {
- ALOGV("reportEOSWork");
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- // In this moment all works prior to EOS work should be done and returned to listener.
- if (mPendingWorks.size() != 1u) { // only EOS work left
- ALOGE("It shouldn't have remaining works in mPendingWorks except EOS work.");
- reportError(C2_CORRUPTED);
- return C2_CORRUPTED;
- }
-
- mPendingOutputEOS = false;
-
- std::unique_ptr<C2Work> eosWork(std::move(mPendingWorks.front()));
- mPendingWorks.pop_front();
- if (!eosWork->input.buffers.empty()) {
- eosWork->input.buffers.front().reset();
- }
- eosWork->result = C2_OK;
- eosWork->workletsProcessed = static_cast<uint32_t>(eosWork->worklets.size());
- eosWork->worklets.front()->output.flags = C2FrameData::FLAG_END_OF_STREAM;
-
- std::list<std::unique_ptr<C2Work>> finishedWorks;
- finishedWorks.emplace_back(std::move(eosWork));
- mListener->onWorkDone_nb(shared_from_this(), std::move(finishedWorks));
- return C2_OK;
-}
-
-void C2VDAComponent::reportAbandonedWorks() {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- std::list<std::unique_ptr<C2Work>> abandonedWorks;
-
- while (!mPendingWorks.empty()) {
- std::unique_ptr<C2Work> work(std::move(mPendingWorks.front()));
- mPendingWorks.pop_front();
-
- // TODO: correlate the definition of flushed work result to framework.
- work->result = C2_NOT_FOUND;
- // When the work is abandoned, buffer in input.buffers shall reset by component.
- if (!work->input.buffers.empty()) {
- work->input.buffers.front().reset();
- }
- abandonedWorks.emplace_back(std::move(work));
- }
-
- for (auto& work : mAbandonedWorks) {
- // TODO: correlate the definition of flushed work result to framework.
- work->result = C2_NOT_FOUND;
- // When the work is abandoned, buffer in input.buffers shall reset by component.
- if (!work->input.buffers.empty()) {
- work->input.buffers.front().reset();
- }
- abandonedWorks.emplace_back(std::move(work));
- }
- mAbandonedWorks.clear();
-
- // Pending EOS work will be abandoned here due to component flush if any.
- mPendingOutputEOS = false;
-
- if (!abandonedWorks.empty()) {
- mListener->onWorkDone_nb(shared_from_this(), std::move(abandonedWorks));
- }
-}
-
-void C2VDAComponent::reportError(c2_status_t error) {
- DCHECK(mTaskRunner->BelongsToCurrentThread());
- mListener->onError_nb(shared_from_this(), static_cast<uint32_t>(error));
- mHasError = true;
- mState.store(State::ERROR);
-}
-
-bool C2VDAComponent::startDequeueThread(const media::Size& size, uint32_t pixelFormat,
- std::shared_ptr<C2BlockPool> blockPool,
- bool resetBuffersInClient) {
- CHECK(!mDequeueThread.IsRunning());
- if (!mDequeueThread.Start()) {
- ALOGE("failed to start dequeue thread!!");
- return false;
- }
- mDequeueLoopStop.store(false);
- if (resetBuffersInClient) {
- mBuffersInClient.store(0u);
- }
- mDequeueThread.task_runner()->PostTask(
- FROM_HERE, ::base::Bind(&C2VDAComponent::dequeueThreadLoop, ::base::Unretained(this),
- size, pixelFormat, std::move(blockPool)));
- return true;
-}
-
-void C2VDAComponent::stopDequeueThread() {
- if (mDequeueThread.IsRunning()) {
- mDequeueLoopStop.store(true);
- mDequeueThread.Stop();
- }
-}
-
-void C2VDAComponent::dequeueThreadLoop(const media::Size& size, uint32_t pixelFormat,
- std::shared_ptr<C2BlockPool> blockPool) {
- ALOGV("dequeueThreadLoop starts");
- DCHECK(mDequeueThread.task_runner()->BelongsToCurrentThread());
-
- while (!mDequeueLoopStop.load()) {
- if (mBuffersInClient.load() == 0) {
- ::usleep(kDequeueRetryDelayUs); // wait for retry
- continue;
- }
- std::shared_ptr<C2GraphicBlock> block;
- C2MemoryUsage usage = {
- mSecureMode ? C2MemoryUsage::READ_PROTECTED : C2MemoryUsage::CPU_READ,
- static_cast<uint64_t>(BufferUsage::VIDEO_DECODER)};
- auto err = blockPool->fetchGraphicBlock(size.width(), size.height(), pixelFormat, usage,
- &block);
- if (err == C2_TIMED_OUT) {
- // Mutexes often do not care for FIFO. Practically the thread who is locking the mutex
- // usually will be granted to lock again right thereafter. To make this loop not too
- // bossy, the simpliest way is to add a short delay to the next time acquiring the
- // lock. TODO (b/118354314): replace this if there is better solution.
- ::usleep(1);
- continue; // wait for retry
- }
- if (err == C2_BAD_STATE) {
- ALOGV("Got informed from block pool surface is changed.");
- mTaskRunner->PostTask(FROM_HERE, ::base::Bind(&C2VDAComponent::onSurfaceChanged,
- ::base::Unretained(this)));
- break; // terminate the loop, will be resumed after onSurfaceChanged().
- }
- if (err == C2_OK) {
- uint32_t poolId;
- if (blockPool->getAllocatorId() == C2PlatformAllocatorStore::BUFFERQUEUE) {
- err = C2VdaBqBlockPool::getPoolIdFromGraphicBlock(block, &poolId);
- } else { // bufferpool
- err = C2VdaPooledBlockPool::getPoolIdFromGraphicBlock(block, &poolId);
- }
-
- if (err != C2_OK) {
- ALOGE("dequeueThreadLoop got error on getPoolIdFromGraphicBlock: %d", err);
- break;
- }
- mTaskRunner->PostTask(FROM_HERE,
- ::base::Bind(&C2VDAComponent::onOutputBufferReturned,
- ::base::Unretained(this), std::move(block), poolId));
- mBuffersInClient--;
- } else {
- ALOGE("dequeueThreadLoop got error: %d", err);
- break;
- }
- }
- ALOGV("dequeueThreadLoop terminates");
-}
-
-class C2VDAComponentFactory : public C2ComponentFactory {
-public:
- C2VDAComponentFactory(C2String decoderName)
- : mDecoderName(decoderName),
- mReflector(std::static_pointer_cast<C2ReflectorHelper>(
- GetCodec2ArcComponentStore()->getParamReflector())){};
-
- c2_status_t createComponent(c2_node_id_t id, std::shared_ptr<C2Component>* const component,
- ComponentDeleter deleter) override {
- UNUSED(deleter);
- *component = std::shared_ptr<C2Component>(new C2VDAComponent(mDecoderName, id, mReflector));
- return C2_OK;
- }
- c2_status_t createInterface(c2_node_id_t id,
- std::shared_ptr<C2ComponentInterface>* const interface,
- InterfaceDeleter deleter) override {
- UNUSED(deleter);
- *interface =
- std::shared_ptr<C2ComponentInterface>(new SimpleInterface<C2VDAComponent::IntfImpl>(
- mDecoderName.c_str(), id,
- std::make_shared<C2VDAComponent::IntfImpl>(mDecoderName, mReflector)));
- return C2_OK;
- }
- ~C2VDAComponentFactory() override = default;
-
-private:
- const C2String mDecoderName;
- std::shared_ptr<C2ReflectorHelper> mReflector;
-};
-} // namespace android
-
-extern "C" ::C2ComponentFactory* CreateC2VDAH264Factory(bool secureMode) {
- ALOGV("in %s (secureMode=%d)", __func__, secureMode);
- return secureMode ? new ::android::C2VDAComponentFactory(android::kH264SecureDecoderName)
- : new ::android::C2VDAComponentFactory(android::kH264DecoderName);
-}
-
-extern "C" void DestroyC2VDAH264Factory(::C2ComponentFactory* factory) {
- ALOGV("in %s", __func__);
- delete factory;
-}
-
-extern "C" ::C2ComponentFactory* CreateC2VDAVP8Factory(bool secureMode) {
- ALOGV("in %s (secureMode=%d)", __func__, secureMode);
- return secureMode ? new ::android::C2VDAComponentFactory(android::kVP8SecureDecoderName)
- : new ::android::C2VDAComponentFactory(android::kVP8DecoderName);
-}
-
-extern "C" void DestroyC2VDAVP8Factory(::C2ComponentFactory* factory) {
- ALOGV("in %s", __func__);
- delete factory;
-}
-
-extern "C" ::C2ComponentFactory* CreateC2VDAVP9Factory(bool secureMode) {
- ALOGV("in %s (secureMode=%d)", __func__, secureMode);
- return secureMode ? new ::android::C2VDAComponentFactory(android::kVP9SecureDecoderName)
- : new ::android::C2VDAComponentFactory(android::kVP9DecoderName);
-}
-
-extern "C" void DestroyC2VDAVP9Factory(::C2ComponentFactory* factory) {
- ALOGV("in %s", __func__);
- delete factory;
-}
diff --git a/accel/Android.bp b/accel/Android.bp
index 214376a..1bf4805 100644
--- a/accel/Android.bp
+++ b/accel/Android.bp
@@ -38,7 +38,6 @@
"vp9_picture.cc",
"vp9_raw_bits_reader.cc",
"vp9_uncompressed_header_parser.cc",
- "unaligned_shared_memory.cc",
],
shared_libs: ["libchrome"],
diff --git a/accel/unaligned_shared_memory.cc b/accel/unaligned_shared_memory.cc
deleted file mode 100644
index 77d90c9..0000000
--- a/accel/unaligned_shared_memory.cc
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-// Note: ported from Chromium commit head: 690c8b268457
-// Note: only necessary functions are ported.
-
-#include "unaligned_shared_memory.h"
-
-#include <limits>
-
-#include "base/logging.h"
-#include "base/sys_info.h"
-
-namespace media {
-
-namespace {
-
-bool CalculateMisalignmentAndOffset(size_t size,
- off_t offset,
- size_t* misalignment,
- off_t* adjusted_offset) {
- /* | | | | | | shm pages
- * | offset (may exceed max size_t)
- * |-----------| size
- * |-| misalignment
- * | adjusted offset
- * |-------------| requested mapping
- */
-
- // Note: result of % computation may be off_t or size_t, depending on the
- // relative ranks of those types. In any case we assume that
- // VMAllocationGranularity() fits in both types, so the final result does too.
- DCHECK_GE(offset, 0);
- *misalignment = offset % base::SysInfo::VMAllocationGranularity();
-
- // Above this |max_size|, |size| + |*misalignment| overflows.
- size_t max_size = std::numeric_limits<size_t>::max() - *misalignment;
- if (size > max_size) {
- DLOG(ERROR) << "Invalid size";
- return false;
- }
-
- *adjusted_offset = offset - static_cast<off_t>(*misalignment);
-
- return true;
-}
-
-} // namespace
-UnalignedSharedMemory::UnalignedSharedMemory(
- const base::SharedMemoryHandle& handle,
- size_t size,
- bool read_only)
- : shm_(handle, read_only), size_(size) {}
-
-UnalignedSharedMemory::~UnalignedSharedMemory() = default;
-
-bool UnalignedSharedMemory::MapAt(off_t offset, size_t size) {
- if (offset < 0) {
- DLOG(ERROR) << "Invalid offset";
- return false;
- }
-
- size_t misalignment;
- off_t adjusted_offset;
-
- if (!CalculateMisalignmentAndOffset(size, offset, &misalignment,
- &adjusted_offset)) {
- return false;
- }
-
- if (!shm_.MapAt(adjusted_offset, size + misalignment)) {
- DLOG(ERROR) << "Failed to map shared memory";
- return false;
- }
- mapping_ptr_ = static_cast<uint8_t*>(shm_.memory());
-
- DCHECK(mapping_ptr_);
- // There should be no way for the IsValid() checks above to succeed and yet
- // |mapping_ptr_| remain null. However, since an invalid but non-null pointer
- // could be disastrous an extra-careful check is done.
- if (mapping_ptr_)
- mapping_ptr_ += misalignment;
- return true;
-}
-
-} // namespace media
diff --git a/accel/unaligned_shared_memory.h b/accel/unaligned_shared_memory.h
deleted file mode 100644
index 88b594c..0000000
--- a/accel/unaligned_shared_memory.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-// Note: ported from Chromium commit head: 690c8b268457
-// Note: only necessary functions are ported.
-// Note: The version in Chromium has moved away from using base::SharedMemory.
-
-#ifndef UNALIGNED_SHARED_MEMORY_H_
-#define UNALIGNED_SHARED_MEMORY_H_
-
-#include <stdint.h>
-
-#include "base/macros.h"
-#include "base/memory/shared_memory.h"
-
-namespace media {
-
-// Wrapper over base::SharedMemory that can be mapped at unaligned offsets.
-// DEPRECATED! See https://crbug.com/795291.
-class UnalignedSharedMemory {
- public:
- // Creates an |UnalignedSharedMemory| instance from a
- // |SharedMemoryHandle|. |size| sets the maximum size that may be mapped. This
- // instance will own the handle.
- UnalignedSharedMemory(const base::SharedMemoryHandle& handle,
- size_t size,
- bool read_only);
-
- ~UnalignedSharedMemory();
-
- // Map the shared memory region. Note that the passed |size| parameter should
- // be less than or equal to |size()|.
- bool MapAt(off_t offset, size_t size);
- size_t size() const { return size_; }
- void* memory() const { return mapping_ptr_; }
-
- private:
- // Either |shm_| or the set |region_| and one of the mappings are active,
- // depending on which constructor was used and the value of read_only_. These
- // variables are held to keep the shared memory mapping valid for the lifetime
- // of this instance.
- base::SharedMemory shm_;
-
- // The size of the region associated with |shm_|.
- size_t size_;
-
- // Pointer to the unaligned data in the shared memory mapping.
- uint8_t* mapping_ptr_ = nullptr;
-
- DISALLOW_COPY_AND_ASSIGN(UnalignedSharedMemory);
-};
-
-} // namespace media
-
-#endif // UNALIGNED_SHARED_MEMORY_H_
diff --git a/accel/v4l2_device.cc b/accel/v4l2_device.cc
index 9826628..5c258ab 100644
--- a/accel/v4l2_device.cc
+++ b/accel/v4l2_device.cc
@@ -207,6 +207,8 @@
void ReturnBuffer(size_t buffer_id);
// Get any of the buffers in the list. There is no order guarantee whatsoever.
base::Optional<size_t> GetFreeBuffer();
+ // Get the buffer with specified index.
+ base::Optional<size_t> GetFreeBuffer(size_t requested_buffer_id);
// Number of buffers currently in this list.
size_t size() const;
@@ -241,6 +243,15 @@
return buffer_id;
}
+base::Optional<size_t> V4L2BuffersList::GetFreeBuffer(
+ size_t requested_buffer_id) {
+ base::AutoLock auto_lock(lock_);
+
+ return (free_buffers_.erase(requested_buffer_id) > 0)
+ ? base::make_optional(requested_buffer_id)
+ : base::nullopt;
+}
+
size_t V4L2BuffersList::size() const {
base::AutoLock auto_lock(lock_);
@@ -457,7 +468,19 @@
return std::move(self).DoQueue(request_ref);
}
-bool V4L2WritableBufferRef::QueueDMABuf(const std::vector<base::ScopedFD>& fds,
+bool V4L2WritableBufferRef::QueueDMABuf(const std::vector<base::ScopedFD>& scoped_fds,
+ V4L2RequestRef* request_ref) && {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ std::vector<int> fds;
+ fds.reserve(scoped_fds.size());
+ for (const base::ScopedFD& scoped_fd : scoped_fds)
+ fds.push_back(scoped_fd.get());
+
+ return std::move(*this).QueueDMABuf(fds, request_ref);
+}
+
+bool V4L2WritableBufferRef::QueueDMABuf(const std::vector<int>& fds,
V4L2RequestRef* request_ref) && {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(buffer_data_);
@@ -475,7 +498,7 @@
size_t num_planes = self.PlanesCount();
for (size_t i = 0; i < num_planes; i++)
- self.buffer_data_->v4l2_buffer_.m.planes[i].m.fd = fds[i].get();
+ self.buffer_data_->v4l2_buffer_.m.planes[i].m.fd = fds[i];
return std::move(self).DoQueue(request_ref);
}
@@ -883,6 +906,23 @@
weak_this_factory_.GetWeakPtr());
}
+base::Optional<V4L2WritableBufferRef> V4L2Queue::GetFreeBuffer(
+ size_t requested_buffer_id) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ // No buffers allocated at the moment?
+ if (!free_buffers_)
+ return base::nullopt;
+
+ auto buffer_id = free_buffers_->GetFreeBuffer(requested_buffer_id);
+ if (!buffer_id.has_value())
+ return base::nullopt;
+
+ return V4L2BufferRefFactory::CreateWritableRef(
+ buffers_[buffer_id.value()]->v4l2_buffer(),
+ weak_this_factory_.GetWeakPtr());
+}
+
bool V4L2Queue::QueueBuffer(struct v4l2_buffer* v4l2_buffer) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
diff --git a/accel/v4l2_device.h b/accel/v4l2_device.h
index 6c70203..f00a604 100644
--- a/accel/v4l2_device.h
+++ b/accel/v4l2_device.h
@@ -122,7 +122,19 @@
// so this reference becomes invalid.
// In case of error, false is returned and the buffer is returned to the free
// list.
- bool QueueDMABuf(const std::vector<base::ScopedFD>& fds,
+ bool QueueDMABuf(const std::vector<base::ScopedFD>& scoped_fds,
+ V4L2RequestRef* request_ref = nullptr) &&;
+ // Queue a DMABUF buffer, assigning |fds| as file descriptors for each plane.
+ // It is allowed the number of |fds| might be greater than the number of
+ // planes of this buffer. It happens when the v4l2 pixel format is single
+ // planar. The fd of the first plane is only used in that case.
+ // When requests are supported, a |request_ref| can be passed along this
+ // the buffer to be submitted.
+ // If successful, true is returned and the reference to the buffer is dropped
+ // so this reference becomes invalid.
+ // In case of error, false is returned and the buffer is returned to the free
+ // list.
+ bool QueueDMABuf(const std::vector<int>& fds,
V4L2RequestRef* request_ref = nullptr) &&;
// Returns the number of planes in this buffer.
@@ -294,6 +306,8 @@
// If the caller discards the returned reference, the underlying buffer is
// made available to clients again.
base::Optional<V4L2WritableBufferRef> GetFreeBuffer();
+ base::Optional<V4L2WritableBufferRef> GetFreeBuffer(
+ size_t requested_buffer_id);
// Attempt to dequeue a buffer, and return a reference to it if one was
// available.
diff --git a/common/Android.bp b/common/Android.bp
index e9d407b..45334d0 100644
--- a/common/Android.bp
+++ b/common/Android.bp
@@ -10,6 +10,7 @@
"EncodeHelpers.cpp",
"FormatConverter.cpp",
"V4L2ComponentCommon.cpp",
+ "VideoTypes.cpp",
],
export_include_dirs: [
diff --git a/common/FormatConverter.cpp b/common/FormatConverter.cpp
index 657676e..9ab9161 100644
--- a/common/FormatConverter.cpp
+++ b/common/FormatConverter.cpp
@@ -20,7 +20,7 @@
#include <ui/GraphicBuffer.h>
#include <utils/Log.h>
-#include <v4l2_codec2/common/Common.h> // for HalPixelFormat
+#include <v4l2_codec2/common/VideoTypes.h> // for HalPixelFormat
using android::hardware::graphics::common::V1_0::BufferUsage;
@@ -131,7 +131,7 @@
// conversion to perform I420.
halFormat = HalPixelFormat::YV12;
} else {
- halFormat = HalPixelFormat::YCbCr_420_888; // will allocate NV12 by minigbm.
+ halFormat = HalPixelFormat::YCBCR_420_888; // will allocate NV12 by minigbm.
}
uint32_t bufferCount = std::max(inputCount, kMinInputBufferCount);
diff --git a/components/VideoTypes.cpp b/common/VideoTypes.cpp
similarity index 88%
rename from components/VideoTypes.cpp
rename to common/VideoTypes.cpp
index 9b71db8..1ecceca 100644
--- a/components/VideoTypes.cpp
+++ b/common/VideoTypes.cpp
@@ -5,7 +5,7 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "VideoTypes"
-#include <v4l2_codec2/components/VideoTypes.h>
+#include <v4l2_codec2/common/VideoTypes.h>
#include <log/log.h>
@@ -24,6 +24,8 @@
const char* HalPixelFormatToString(HalPixelFormat format) {
switch (format) {
+ case HalPixelFormat::UNKNOWN:
+ return "Unknown";
case HalPixelFormat::YCBCR_420_888:
return "YCBCR_420_888";
case HalPixelFormat::YV12:
diff --git a/common/include/v4l2_codec2/common/Common.h b/common/include/v4l2_codec2/common/Common.h
index 1b816ef..650b7a7 100644
--- a/common/include/v4l2_codec2/common/Common.h
+++ b/common/include/v4l2_codec2/common/Common.h
@@ -15,20 +15,6 @@
uint32_t mStride;
};
-enum class HalPixelFormat : uint32_t {
- UNKNOWN = 0x0,
- // The pixel formats defined in Android but are used among C2VDAComponent.
- YCbCr_420_888 = 0x23,
- YV12 = 0x32315659,
- NV12 = 0x3231564e,
-};
-
-enum class InputCodec {
- H264,
- VP8,
- VP9,
-};
-
-} // namespace android
+} // namespace android
#endif // ANDROID_V4L2_CODEC2_COMMON_COMMON_H
diff --git a/common/include/v4l2_codec2/common/EncodeHelpers.h b/common/include/v4l2_codec2/common/EncodeHelpers.h
index ff6e00b..d152ba8 100644
--- a/common/include/v4l2_codec2/common/EncodeHelpers.h
+++ b/common/include/v4l2_codec2/common/EncodeHelpers.h
@@ -15,14 +15,6 @@
namespace android {
-// Specification of an encoding profile supported by an encoder.
-struct VideoEncodeProfile {
- media::VideoCodecProfile mProfile;
- media::Size mMaxResolution;
- uint32_t mMaxFramerateNumerator;
- uint32_t mMaxFramerateDenominator;
-};
-
// The encoder parameter set.
// |mInputFormat| is the pixel format of the input frames.
// |mInputVisibleSize| is the resolution of the input frames.
diff --git a/components/include/v4l2_codec2/components/VideoTypes.h b/common/include/v4l2_codec2/common/VideoTypes.h
similarity index 80%
rename from components/include/v4l2_codec2/components/VideoTypes.h
rename to common/include/v4l2_codec2/common/VideoTypes.h
index bcc9bc0..a5130d2 100644
--- a/components/include/v4l2_codec2/components/VideoTypes.h
+++ b/common/include/v4l2_codec2/common/VideoTypes.h
@@ -22,10 +22,11 @@
// Enumeration of supported pixel format. The value should be the same as
// ::android::hardware::graphics::common::V1_0::PixelFormat.
-using ::android::hardware::graphics::common::V1_0::PixelFormat;
+using HPixelFormat = ::android::hardware::graphics::common::V1_0::PixelFormat;
enum class HalPixelFormat : int32_t {
- YCBCR_420_888 = static_cast<int32_t>(PixelFormat::YCBCR_420_888),
- YV12 = static_cast<int32_t>(PixelFormat::YV12),
+ UNKNOWN = 0x0,
+ YCBCR_420_888 = static_cast<int32_t>(HPixelFormat::YCBCR_420_888),
+ YV12 = static_cast<int32_t>(HPixelFormat::YV12),
// NV12 is not defined at PixelFormat, follow the convention to use fourcc value.
NV12 = 0x3231564e,
};
diff --git a/components/Android.bp b/components/Android.bp
index 5e4e45d..8273412 100644
--- a/components/Android.bp
+++ b/components/Android.bp
@@ -13,8 +13,9 @@
"V4L2ComponentFactory.cpp",
"V4L2DecodeComponent.cpp",
"V4L2DecodeInterface.cpp",
+ "V4L2EncodeComponent.cpp",
+ "V4L2EncodeInterface.cpp",
"VideoDecoder.cpp",
- "VideoTypes.cpp",
],
export_include_dirs: [
"include",
@@ -31,6 +32,7 @@
"libstagefright_bufferqueue_helper",
"libstagefright_foundation",
"libv4l2_codec2_store",
+ "libui",
],
static_libs: [
"libv4l2_codec2_accel",
diff --git a/components/V4L2ComponentFactory.cpp b/components/V4L2ComponentFactory.cpp
index 1dca7ff..a535c34 100644
--- a/components/V4L2ComponentFactory.cpp
+++ b/components/V4L2ComponentFactory.cpp
@@ -15,6 +15,8 @@
#include <v4l2_codec2/common/V4L2ComponentCommon.h>
#include <v4l2_codec2/components/V4L2DecodeComponent.h>
#include <v4l2_codec2/components/V4L2DecodeInterface.h>
+#include <v4l2_codec2/components/V4L2EncodeComponent.h>
+#include <v4l2_codec2/components/V4L2EncodeInterface.h>
#include <v4l2_codec2/store/V4L2ComponentStore.h>
namespace android {
@@ -61,12 +63,11 @@
}
if (mIsEncoder) {
- // TODO(b/143333813): Fill the encoder component.
- return C2_BAD_VALUE;
+ *component = V4L2EncodeComponent::create(mComponentName, id, mReflector, deleter);
} else {
*component = V4L2DecodeComponent::create(mComponentName, id, mReflector, deleter);
- return *component ? C2_OK : C2_BAD_VALUE;
}
+ return *component ? C2_OK : C2_BAD_VALUE;
}
c2_status_t V4L2ComponentFactory::createInterface(
@@ -80,8 +81,12 @@
}
if (mIsEncoder) {
- // TODO(b/143333813): Fill the encoder component.
- return C2_BAD_VALUE;
+ *interface = std::shared_ptr<C2ComponentInterface>(
+ new SimpleInterface<V4L2EncodeInterface>(
+ mComponentName.c_str(), id,
+ std::make_shared<V4L2EncodeInterface>(mComponentName, mReflector)),
+ deleter);
+ return C2_OK;
} else {
*interface = std::shared_ptr<C2ComponentInterface>(
new SimpleInterface<V4L2DecodeInterface>(
diff --git a/components/V4L2DecodeComponent.cpp b/components/V4L2DecodeComponent.cpp
index 8a86466..1ea9a7b 100644
--- a/components/V4L2DecodeComponent.cpp
+++ b/components/V4L2DecodeComponent.cpp
@@ -24,9 +24,10 @@
#include <media/stagefright/foundation/ColorUtils.h>
#include <h264_parser.h>
+#include <v4l2_codec2/common/VideoTypes.h>
+#include <v4l2_codec2/components/BitstreamBuffer.h>
#include <v4l2_codec2/components/V4L2Decoder.h>
#include <v4l2_codec2/components/VideoFramePool.h>
-#include <v4l2_codec2/components/VideoTypes.h>
#include <v4l2_codec2/plugin_store/C2VdaBqBlockPool.h>
namespace android {
@@ -39,19 +40,6 @@
return static_cast<int32_t>(frameIndex.peeku() & 0x3FFFFFFF);
}
-std::unique_ptr<VideoDecoder::BitstreamBuffer> C2BlockToBitstreamBuffer(
- const C2ConstLinearBlock& block, const int32_t bitstreamId) {
- const int fd = block.handle()->data[0];
- auto dupFd = ::base::ScopedFD(dup(fd));
- if (!dupFd.is_valid()) {
- ALOGE("Failed to dup(%d) input buffer (bitstreamId=%d), errno=%d", fd, bitstreamId, errno);
- return nullptr;
- }
-
- return std::make_unique<VideoDecoder::BitstreamBuffer>(bitstreamId, std::move(dupFd),
- block.offset(), block.size());
-}
-
bool parseCodedColorAspects(const C2ConstLinearBlock& input,
C2StreamColorAspectsInfo::input* codedAspects) {
C2ReadView view = input.map().get();
@@ -263,6 +251,16 @@
ALOGV("%s()", __func__);
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+ // (b/157113946): Prevent malicious dynamic resolution change exhausts system memory.
+ constexpr int kMaximumSupportedArea = 4096 * 4096;
+ if (size.width() * size.height() > kMaximumSupportedArea) {
+ ALOGE("The output size (%dx%d) is larger than supported size (4096x4096)", size.width(),
+ size.height());
+ reportError(C2_BAD_VALUE);
+ *pool = nullptr;
+ return;
+ }
+
// Get block pool ID configured from the client.
auto poolId = mIntfImpl->getBlockPoolId();
ALOGI("Using C2BlockPool ID = %" PRIu64 " for allocating output buffers", poolId);
@@ -275,12 +273,7 @@
return;
}
- // TODO(b/160307705): Consider to remove the dependency of C2VdaBqBlockPool.
- if (blockPool->getAllocatorId() == C2PlatformAllocatorStore::BUFFERQUEUE) {
- reinterpret_cast<C2VdaBqBlockPool*>(blockPool.get())->requestNewBufferSet(numBuffers);
- }
-
- *pool = VideoFramePool::Create(std::move(blockPool), size, pixelFormat, mIsSecure,
+ *pool = VideoFramePool::Create(std::move(blockPool), numBuffers, size, pixelFormat, mIsSecure,
mDecoderTaskRunner);
}
@@ -460,7 +453,9 @@
}
}
- auto buffer = C2BlockToBitstreamBuffer(linearBlock, bitstreamId);
+ std::unique_ptr<BitstreamBuffer> buffer =
+ std::make_unique<BitstreamBuffer>(bitstreamId, linearBlock.handle()->data[0],
+ linearBlock.offset(), linearBlock.size());
if (!buffer) {
reportError(C2_CORRUPTED);
return;
diff --git a/components/V4L2DecodeInterface.cpp b/components/V4L2DecodeInterface.cpp
index f975e95..a09fcc4 100644
--- a/components/V4L2DecodeInterface.cpp
+++ b/components/V4L2DecodeInterface.cpp
@@ -27,13 +27,6 @@
// Input bitstream buffer size for up to 4k streams.
constexpr size_t kInputBufferSizeFor4K = 4 * kInputBufferSizeFor1080p;
-// Supported V4L2 input formats. Currently we only support stateful API.
-constexpr uint32_t kSupportedInputFourccs[] = {
- V4L2_PIX_FMT_H264,
- V4L2_PIX_FMT_VP8,
- V4L2_PIX_FMT_VP9,
-};
-
std::optional<VideoCodec> getCodecFromComponentName(const std::string& name) {
if (name == V4L2ComponentName::kH264Decoder || name == V4L2ComponentName::kH264SecureDecoder)
return VideoCodec::H264;
@@ -200,18 +193,6 @@
break;
}
- auto device = media::V4L2Device::Create();
- const auto supportedProfiles = device->GetSupportedDecodeProfiles(
- base::size(kSupportedInputFourccs), kSupportedInputFourccs);
- if (supportedProfiles.empty()) {
- ALOGE("Failed to get supported profiles from V4L2 device.");
- mInitStatus = C2_BAD_VALUE;
- return;
- }
-
- mMinSize = supportedProfiles[0].min_resolution;
- mMaxSize = supportedProfiles[0].max_resolution;
-
addParameter(
DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
.withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
@@ -242,15 +223,17 @@
MEDIA_MIMETYPE_VIDEO_RAW))
.build());
- addParameter(
- DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
- .withDefault(new C2StreamPictureSizeInfo::output(0u, 320, 240))
- .withFields({
- C2F(mSize, width).inRange(mMinSize.width(), mMaxSize.width(), 16),
- C2F(mSize, height).inRange(mMinSize.height(), mMaxSize.height(), 16),
- })
- .withSetter(SizeSetter)
- .build());
+ // Note(b/165826281): The check is not used at Android framework currently.
+ // In order to fasten the bootup time, we use the maximum supported size instead of querying the
+ // capability from the V4L2 device.
+ addParameter(DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+ .withDefault(new C2StreamPictureSizeInfo::output(0u, 320, 240))
+ .withFields({
+ C2F(mSize, width).inRange(16, 4096, 16),
+ C2F(mSize, height).inRange(16, 4096, 16),
+ })
+ .withSetter(SizeSetter)
+ .build());
addParameter(
DefineParam(mMaxInputSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
@@ -265,7 +248,7 @@
const C2Allocator::id_t inputAllocators[] = {secureMode ? V4L2AllocatorId::SECURE_LINEAR
: C2PlatformAllocatorStore::BLOB};
- const C2Allocator::id_t outputAllocators[] = {C2AllocatorStore::DEFAULT_GRAPHIC};
+ const C2Allocator::id_t outputAllocators[] = {V4L2AllocatorId::V4L2_BUFFERPOOL};
const C2Allocator::id_t surfaceAllocator =
secureMode ? V4L2AllocatorId::SECURE_GRAPHIC : V4L2AllocatorId::V4L2_BUFFERQUEUE;
const C2BlockPool::local_id_t outputBlockPools[] = {C2BlockPool::BASIC_GRAPHIC};
diff --git a/components/V4L2Decoder.cpp b/components/V4L2Decoder.cpp
index 275357e..d52bd6c 100644
--- a/components/V4L2Decoder.cpp
+++ b/components/V4L2Decoder.cpp
@@ -9,7 +9,10 @@
#include <stdint.h>
+#include <vector>
+
#include <base/bind.h>
+#include <base/files/scoped_file.h>
#include <base/memory/ptr_util.h>
#include <log/log.h>
@@ -267,8 +270,9 @@
auto request = std::move(mDecodeRequests.front());
mDecodeRequests.pop();
- ALOGV("QBUF to input queue, bitstreadId=%d", request.buffer->id);
- inputBuffer->SetTimeStamp({.tv_sec = request.buffer->id});
+ const int32_t bitstreamId = request.buffer->id;
+ ALOGV("QBUF to input queue, bitstreadId=%d", bitstreamId);
+ inputBuffer->SetTimeStamp({.tv_sec = bitstreamId});
size_t planeSize = inputBuffer->GetPlaneSize(0);
if (request.buffer->size > planeSize) {
ALOGE("The input size (%zu) is not enough, we need %zu", planeSize,
@@ -281,11 +285,15 @@
request.buffer->offset);
inputBuffer->SetPlaneDataOffset(0, request.buffer->offset);
inputBuffer->SetPlaneBytesUsed(0, request.buffer->offset + request.buffer->size);
- std::vector<::base::ScopedFD> fds;
+ std::vector<int> fds;
fds.push_back(std::move(request.buffer->dmabuf_fd));
- std::move(*inputBuffer).QueueDMABuf(fds);
+ if (!std::move(*inputBuffer).QueueDMABuf(fds)) {
+ ALOGE("%s(): Failed to QBUF to input queue, bitstreamId=%d", __func__, bitstreamId);
+ onError();
+ return;
+ }
- mPendingDecodeCbs.insert(std::make_pair(request.buffer->id, std::move(request.decodeCb)));
+ mPendingDecodeCbs.insert(std::make_pair(bitstreamId, std::move(request.decodeCb)));
}
}
@@ -311,13 +319,16 @@
std::move(mDrainCb).Run(VideoDecoder::DecodeStatus::kAborted);
}
- // Streamoff V4L2 queues to drop input and output buffers.
+ // Streamoff both V4L2 queues to drop input and output buffers.
mDevice->StopPolling();
mOutputQueue->Streamoff();
+ mFrameAtDevice.clear();
mInputQueue->Streamoff();
- // Streamon input queue again.
+ // Streamon both V4L2 queues.
mInputQueue->Streamon();
+ mOutputQueue->Streamon();
+
if (!mDevice->StartPolling(::base::BindRepeating(&V4L2Decoder::serviceDeviceTask, mWeakThis),
::base::BindRepeating(&V4L2Decoder::onError, mWeakThis))) {
ALOGE("Failed to start polling V4L2 device.");
@@ -381,15 +392,43 @@
outputDequeued = true;
+ const size_t bufferId = dequeuedBuffer->BufferId();
+ const int32_t bitstreamId = static_cast<int32_t>(dequeuedBuffer->GetTimeStamp().tv_sec);
+ const size_t bytesUsed = dequeuedBuffer->GetPlaneBytesUsed(0);
+ const bool isLast = dequeuedBuffer->IsLast();
ALOGV("DQBUF from output queue, bufferId=%zu, corresponding bitstreamId=%d, bytesused=%zu",
- dequeuedBuffer->BufferId(),
- static_cast<int32_t>(dequeuedBuffer->GetTimeStamp().tv_sec),
- dequeuedBuffer->GetPlaneBytesUsed(0));
- if (dequeuedBuffer->GetPlaneBytesUsed(0) > 0) {
- sendOutputBuffer(dequeuedBuffer);
+ bufferId, bitstreamId, bytesUsed);
+
+ // Get the corresponding VideoFrame of the dequeued buffer.
+ auto it = mFrameAtDevice.find(bufferId);
+ ALOG_ASSERT(it != mFrameAtDevice.end(), "buffer %zu is not found at mFrameAtDevice",
+ bufferId);
+ auto frame = std::move(it->second);
+ mFrameAtDevice.erase(it);
+
+ if (bytesUsed > 0) {
+ ALOGV("Send output frame(bitstreamId=%d) to client", bitstreamId);
+ frame->setBitstreamId(bitstreamId);
+ frame->setVisibleRect(mVisibleRect);
+ mOutputCb.Run(std::move(frame));
+ } else {
+ // Workaround(b/168750131): If the buffer is not enqueued before the next drain is done,
+ // then the driver will fail to notify EOS. So we recycle the buffer immediately.
+ ALOGV("Recycle empty buffer %zu back to V4L2 output queue.", bufferId);
+ dequeuedBuffer.reset();
+ auto outputBuffer = mOutputQueue->GetFreeBuffer(bufferId);
+ ALOG_ASSERT(outputBuffer, "V4L2 output queue slot %zu is not freed.", bufferId);
+
+ if (!std::move(*outputBuffer).QueueDMABuf(frame->getFDs())) {
+ ALOGE("%s(): Failed to recycle empty buffer to output queue.", __func__);
+ onError();
+ return;
+ }
+ mFrameAtDevice.insert(std::make_pair(bufferId, std::move(frame)));
}
- if (mDrainCb && dequeuedBuffer->IsLast()) {
- ALOGD("All buffers are drained.");
+
+ if (mDrainCb && isLast) {
+ ALOGV("All buffers are drained.");
sendV4L2DecoderCmd(true);
std::move(mDrainCb).Run(VideoDecoder::DecodeStatus::kOk);
setState(State::Idle);
@@ -416,21 +455,6 @@
}
}
-void V4L2Decoder::sendOutputBuffer(media::V4L2ReadableBufferRef buffer) {
- ALOGV("%s(bufferId=%zu)", __func__, buffer->BufferId());
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- size_t bufferId = buffer->BufferId();
- auto it = mFrameAtDevice.find(bufferId);
- ALOG_ASSERT(it != mFrameAtDevice.end(), "buffer %zu is not found at mFrameAtDevice", bufferId);
- auto block = std::move(it->second);
- mFrameAtDevice.erase(it);
-
- block->setBitstreamId(buffer->GetTimeStamp().tv_sec);
- block->setVisibleRect(mVisibleRect);
- mOutputCb.Run(std::move(block));
-}
-
bool V4L2Decoder::dequeueResolutionChangeEvent() {
ALOGV("%s()", __func__);
ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
@@ -468,6 +492,8 @@
mOutputQueue->Streamoff();
mOutputQueue->DeallocateBuffers();
+ mFrameAtDevice.clear();
+ mBlockIdToV4L2Id.clear();
if (mOutputQueue->AllocateBuffers(*numOutputBuffers, V4L2_MEMORY_DMABUF) == 0) {
ALOGE("Failed to allocate output buffer.");
@@ -494,37 +520,72 @@
ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
ALOG_ASSERT(mVideoFramePool, "mVideoFramePool is null, haven't get the instance yet?");
- if (mState == State::Idle) return;
-
- if (mVideoFramePool->hasPendingRequests()) {
- ALOGD("Previous callback is running, ignore.");
+ if (mOutputQueue->FreeBuffersCount() == 0) {
+ ALOGD("No free V4L2 output buffers, ignore.");
return;
}
- auto outputBuffer = mOutputQueue->GetFreeBuffer();
- if (!outputBuffer) {
- ALOGD("No free output buffer.");
- return;
+ if (!mVideoFramePool->getVideoFrame(
+ ::base::BindOnce(&V4L2Decoder::onVideoFrameReady, mWeakThis))) {
+ ALOGV("%s(): Previous callback is running, ignore.", __func__);
}
- mVideoFramePool->getVideoFrame(
- ::base::BindOnce(&V4L2Decoder::onVideoFrameReady, mWeakThis, std::move(*outputBuffer)));
}
-void V4L2Decoder::onVideoFrameReady(media::V4L2WritableBufferRef outputBuffer,
- std::unique_ptr<VideoFrame> frame) {
+void V4L2Decoder::onVideoFrameReady(
+ std::optional<VideoFramePool::FrameWithBlockId> frameWithBlockId) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- if (!frame) {
- ALOGE("Get nullptr VideoFrame.");
+ if (!frameWithBlockId) {
+ ALOGE("Got nullptr VideoFrame.");
onError();
return;
}
- size_t bufferId = outputBuffer.BufferId();
- ALOGV("QBUF to output queue, bufferId=%zu", bufferId);
- std::move(outputBuffer).QueueDMABuf(frame->getFDs());
- mFrameAtDevice.insert(std::make_pair(bufferId, std::move(frame)));
+ // Unwrap our arguments.
+ std::unique_ptr<VideoFrame> frame;
+ uint32_t blockId;
+ std::tie(frame, blockId) = std::move(*frameWithBlockId);
+
+ ::base::Optional<media::V4L2WritableBufferRef> outputBuffer;
+ // Find the V4L2 buffer that is associated with this block.
+ auto iter = mBlockIdToV4L2Id.find(blockId);
+ if (iter != mBlockIdToV4L2Id.end()) {
+ // If we have met this block in the past, reuse the same V4L2 buffer.
+ outputBuffer = mOutputQueue->GetFreeBuffer(iter->second);
+ } else if (mBlockIdToV4L2Id.size() < mOutputQueue->AllocatedBuffersCount()) {
+ // If this is the first time we see this block, give it the next
+ // available V4L2 buffer.
+ const size_t v4l2BufferId = mBlockIdToV4L2Id.size();
+ mBlockIdToV4L2Id.emplace(blockId, v4l2BufferId);
+ outputBuffer = mOutputQueue->GetFreeBuffer(v4l2BufferId);
+ } else {
+ // If this happens, this is a bug in VideoFramePool. It should never
+ // provide more blocks than we have V4L2 buffers.
+ ALOGE("Got more different blocks than we have V4L2 buffers for.");
+ }
+
+ if (!outputBuffer) {
+ ALOGE("V4L2 buffer not available.");
+ onError();
+ return;
+ }
+
+ uint32_t v4l2Id = outputBuffer->BufferId();
+ ALOGV("QBUF to output queue, blockId=%u, V4L2Id=%u", blockId, v4l2Id);
+
+ if (!std::move(*outputBuffer).QueueDMABuf(frame->getFDs())) {
+ ALOGE("%s(): Failed to QBUF to output queue, blockId=%u, V4L2Id=%u", __func__, blockId,
+ v4l2Id);
+ onError();
+ return;
+ }
+ if (mFrameAtDevice.find(v4l2Id) != mFrameAtDevice.end()) {
+ ALOGE("%s(): V4L2 buffer %d already enqueued.", __func__, v4l2Id);
+ onError();
+ return;
+ }
+ mFrameAtDevice.insert(std::make_pair(v4l2Id, std::move(frame)));
tryFetchVideoFrame();
}
diff --git a/components/V4L2EncodeComponent.cpp b/components/V4L2EncodeComponent.cpp
new file mode 100644
index 0000000..ab2230e
--- /dev/null
+++ b/components/V4L2EncodeComponent.cpp
@@ -0,0 +1,1774 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "V4L2EncodeComponent"
+
+#include <v4l2_codec2/components/V4L2EncodeComponent.h>
+
+#include <inttypes.h>
+
+#include <algorithm>
+#include <utility>
+
+#include <C2AllocatorGralloc.h>
+#include <C2PlatformSupport.h>
+#include <C2Work.h>
+#include <android/hardware/graphics/common/1.0/types.h>
+#include <base/bind.h>
+#include <base/bind_helpers.h>
+#include <log/log.h>
+#include <media/stagefright/MediaDefs.h>
+#include <ui/GraphicBuffer.h>
+
+#include <fourcc.h>
+#include <h264_parser.h>
+#include <rect.h>
+#include <v4l2_codec2/common/Common.h>
+#include <v4l2_codec2/common/EncodeHelpers.h>
+#include <v4l2_device.h>
+#include <video_pixel_format.h>
+
+using android::hardware::graphics::common::V1_0::BufferUsage;
+
+namespace android {
+
+namespace {
+
+const media::VideoPixelFormat kInputPixelFormat = media::VideoPixelFormat::PIXEL_FORMAT_NV12;
+
+// Get the video frame layout from the specified |inputBlock|.
+// TODO(dstaessens): Clean up code extracting layout from a C2GraphicBlock.
+std::optional<std::vector<VideoFramePlane>> getVideoFrameLayout(const C2ConstGraphicBlock& block,
+ media::VideoPixelFormat* format) {
+ ALOGV("%s()", __func__);
+
+ // Get the C2PlanarLayout from the graphics block. The C2GraphicView returned by block.map()
+ // needs to be released before calling getGraphicBlockInfo(), or the lockYCbCr() call will block
+ // Indefinitely.
+ C2PlanarLayout layout = block.map().get().layout();
+
+ // The above layout() cannot fill layout information and memset 0 instead if the input format is
+ // IMPLEMENTATION_DEFINED and its backed format is RGB. We fill the layout by using
+ // ImplDefinedToRGBXMap in the case.
+ if (layout.type == C2PlanarLayout::TYPE_UNKNOWN) {
+ std::unique_ptr<ImplDefinedToRGBXMap> idMap = ImplDefinedToRGBXMap::Create(block);
+ if (idMap == nullptr) {
+ ALOGE("Unable to parse RGBX_8888 from IMPLEMENTATION_DEFINED");
+ return std::nullopt;
+ }
+ layout.type = C2PlanarLayout::TYPE_RGB;
+ // These parameters would be used in TYPE_GRB case below.
+ layout.numPlanes = 3; // same value as in C2AllocationGralloc::map()
+ layout.rootPlanes = 1; // same value as in C2AllocationGralloc::map()
+ layout.planes[C2PlanarLayout::PLANE_R].offset = idMap->offset();
+ layout.planes[C2PlanarLayout::PLANE_R].rowInc = idMap->rowInc();
+ }
+
+ std::vector<uint32_t> offsets(layout.numPlanes, 0u);
+ std::vector<uint32_t> strides(layout.numPlanes, 0u);
+ switch (layout.type) {
+ case C2PlanarLayout::TYPE_YUV: {
+ android_ycbcr ycbcr = getGraphicBlockInfo(block);
+ offsets[C2PlanarLayout::PLANE_Y] =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(ycbcr.y));
+ offsets[C2PlanarLayout::PLANE_U] =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(ycbcr.cb));
+ offsets[C2PlanarLayout::PLANE_V] =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(ycbcr.cr));
+ strides[C2PlanarLayout::PLANE_Y] = static_cast<uint32_t>(ycbcr.ystride);
+ strides[C2PlanarLayout::PLANE_U] = static_cast<uint32_t>(ycbcr.cstride);
+ strides[C2PlanarLayout::PLANE_V] = static_cast<uint32_t>(ycbcr.cstride);
+
+ bool crcb = false;
+ if (offsets[C2PlanarLayout::PLANE_U] > offsets[C2PlanarLayout::PLANE_V]) {
+ // Swap offsets, no need to swap strides as they are identical for both chroma planes.
+ std::swap(offsets[C2PlanarLayout::PLANE_U], offsets[C2PlanarLayout::PLANE_V]);
+ crcb = true;
+ }
+
+ bool semiplanar = false;
+ if (ycbcr.chroma_step >
+ offsets[C2PlanarLayout::PLANE_V] - offsets[C2PlanarLayout::PLANE_U]) {
+ semiplanar = true;
+ }
+
+ if (!crcb && !semiplanar) {
+ *format = media::VideoPixelFormat::PIXEL_FORMAT_I420;
+ } else if (!crcb && semiplanar) {
+ *format = media::VideoPixelFormat::PIXEL_FORMAT_NV12;
+ } else if (crcb && !semiplanar) {
+ // HACK: pretend YV12 is I420 now since VEA only accepts I420. (YV12 will be used
+ // for input byte-buffer mode).
+ // TODO(dstaessens): Is this hack still necessary now we're not using the VEA directly?
+ //format = media::VideoPixelFormat::PIXEL_FORMAT_YV12;
+ *format = media::VideoPixelFormat::PIXEL_FORMAT_I420;
+ } else {
+ *format = media::VideoPixelFormat::PIXEL_FORMAT_NV21;
+ }
+ break;
+ }
+ case C2PlanarLayout::TYPE_RGB: {
+ offsets[C2PlanarLayout::PLANE_R] = layout.planes[C2PlanarLayout::PLANE_R].offset;
+ strides[C2PlanarLayout::PLANE_R] =
+ static_cast<uint32_t>(layout.planes[C2PlanarLayout::PLANE_R].rowInc);
+ *format = media::VideoPixelFormat::PIXEL_FORMAT_ARGB;
+ break;
+ }
+ default:
+ ALOGW("Unknown layout type: %u", static_cast<uint32_t>(layout.type));
+ return std::nullopt;
+ }
+
+ std::vector<VideoFramePlane> planes;
+ for (uint32_t i = 0; i < layout.rootPlanes; ++i) {
+ planes.push_back({offsets[i], strides[i]});
+ }
+ return planes;
+}
+
+// The maximum size for output buffer, which is chosen empirically for a 1080p video.
+constexpr size_t kMaxBitstreamBufferSizeInBytes = 2 * 1024 * 1024; // 2MB
+// The frame size for 1080p (FHD) video in pixels.
+constexpr int k1080PSizeInPixels = 1920 * 1080;
+// The frame size for 1440p (QHD) video in pixels.
+constexpr int k1440PSizeInPixels = 2560 * 1440;
+
+// Use quadruple size of kMaxBitstreamBufferSizeInBytes when the input frame size is larger than
+// 1440p, double if larger than 1080p. This is chosen empirically for some 4k encoding use cases and
+// the Android CTS VideoEncoderTest (crbug.com/927284).
+size_t GetMaxOutputBufferSize(const media::Size& size) {
+ if (size.GetArea() > k1440PSizeInPixels) return kMaxBitstreamBufferSizeInBytes * 4;
+ if (size.GetArea() > k1080PSizeInPixels) return kMaxBitstreamBufferSizeInBytes * 2;
+ return kMaxBitstreamBufferSizeInBytes;
+}
+
+// These are rather subjectively tuned.
+constexpr size_t kInputBufferCount = 2;
+constexpr size_t kOutputBufferCount = 2;
+
+// Define V4L2_CID_MPEG_VIDEO_H264_SPS_PPS_BEFORE_IDR control code if not present in header files.
+#ifndef V4L2_CID_MPEG_VIDEO_H264_SPS_PPS_BEFORE_IDR
+#define V4L2_CID_MPEG_VIDEO_H264_SPS_PPS_BEFORE_IDR (V4L2_CID_MPEG_BASE + 388)
+#endif
+
+} // namespace
+
+// static
+std::unique_ptr<V4L2EncodeComponent::InputFrame> V4L2EncodeComponent::InputFrame::Create(
+ const C2ConstGraphicBlock& block) {
+ std::vector<int> fds;
+ const C2Handle* const handle = block.handle();
+ for (int i = 0; i < handle->numFds; i++) {
+ fds.emplace_back(handle->data[i]);
+ }
+
+ return std::unique_ptr<InputFrame>(new InputFrame(std::move(fds)));
+}
+
+// static
+std::shared_ptr<C2Component> V4L2EncodeComponent::create(
+ C2String name, c2_node_id_t id, std::shared_ptr<C2ReflectorHelper> helper,
+ C2ComponentFactory::ComponentDeleter deleter) {
+ ALOGV("%s(%s)", __func__, name.c_str());
+
+ auto interface = std::make_shared<V4L2EncodeInterface>(name, std::move(helper));
+ if (interface->status() != C2_OK) {
+ ALOGE("Component interface initialization failed (error code %d)", interface->status());
+ return nullptr;
+ }
+
+ return std::shared_ptr<C2Component>(new V4L2EncodeComponent(name, id, std::move(interface)),
+ deleter);
+}
+
+V4L2EncodeComponent::V4L2EncodeComponent(C2String name, c2_node_id_t id,
+ std::shared_ptr<V4L2EncodeInterface> interface)
+ : mName(name),
+ mId(id),
+ mInterface(std::move(interface)),
+ mComponentState(ComponentState::LOADED) {
+ ALOGV("%s(%s)", __func__, name.c_str());
+}
+
+V4L2EncodeComponent::~V4L2EncodeComponent() {
+ ALOGV("%s()", __func__);
+
+ // Stop encoder thread and invalidate pointers if component wasn't stopped before destroying.
+ if (mEncoderThread.IsRunning()) {
+ mEncoderTaskRunner->PostTask(
+ FROM_HERE, ::base::BindOnce(
+ [](::base::WeakPtrFactory<V4L2EncodeComponent>* weakPtrFactory) {
+ weakPtrFactory->InvalidateWeakPtrs();
+ },
+ &mWeakThisFactory));
+ mEncoderThread.Stop();
+ }
+ ALOGV("%s(): done", __func__);
+}
+
+c2_status_t V4L2EncodeComponent::start() {
+ ALOGV("%s()", __func__);
+
+ // Lock while starting, to synchronize start/stop/reset/release calls.
+ std::lock_guard<std::mutex> lock(mComponentLock);
+
+ // According to the specification start() should only be called in the LOADED state.
+ if (mComponentState != ComponentState::LOADED) {
+ return C2_BAD_STATE;
+ }
+
+ if (!mEncoderThread.Start()) {
+ ALOGE("Failed to start encoder thread");
+ return C2_CORRUPTED;
+ }
+ mEncoderTaskRunner = mEncoderThread.task_runner();
+ mWeakThis = mWeakThisFactory.GetWeakPtr();
+
+ // Initialize the encoder on the encoder thread.
+ ::base::WaitableEvent done;
+ bool success = false;
+ mEncoderTaskRunner->PostTask(
+ FROM_HERE, ::base::Bind(&V4L2EncodeComponent::startTask, mWeakThis, &success, &done));
+ done.Wait();
+
+ if (!success) {
+ ALOGE("Failed to initialize encoder");
+ return C2_CORRUPTED;
+ }
+
+ setComponentState(ComponentState::RUNNING);
+ return C2_OK;
+}
+
+c2_status_t V4L2EncodeComponent::stop() {
+ ALOGV("%s()", __func__);
+
+ // Lock while stopping, to synchronize start/stop/reset/release calls.
+ std::lock_guard<std::mutex> lock(mComponentLock);
+
+ if (mComponentState != ComponentState::RUNNING && mComponentState != ComponentState::ERROR) {
+ return C2_BAD_STATE;
+ }
+
+ // Return immediately if the component is already stopped.
+ if (!mEncoderThread.IsRunning()) {
+ return C2_OK;
+ }
+
+ // Wait for the component to stop.
+ ::base::WaitableEvent done;
+ mEncoderTaskRunner->PostTask(
+ FROM_HERE, ::base::BindOnce(&V4L2EncodeComponent::stopTask, mWeakThis, &done));
+ done.Wait();
+ mEncoderThread.Stop();
+
+ setComponentState(ComponentState::LOADED);
+
+ ALOGV("%s() - done", __func__);
+ return C2_OK;
+}
+
+c2_status_t V4L2EncodeComponent::reset() {
+ ALOGV("%s()", __func__);
+
+ // The interface specification says: "This method MUST be supported in all (including tripped)
+ // states other than released".
+ if (mComponentState == ComponentState::UNLOADED) {
+ return C2_BAD_STATE;
+ }
+
+ // TODO(dstaessens): Reset the component's interface to default values.
+ stop();
+
+ return C2_OK;
+}
+
+c2_status_t V4L2EncodeComponent::release() {
+ ALOGV("%s()", __func__);
+
+ // The interface specification says: "This method MUST be supported in stopped state.", but the
+ // release method seems to be called in other states as well.
+ reset();
+
+ setComponentState(ComponentState::UNLOADED);
+ return C2_OK;
+}
+
+c2_status_t V4L2EncodeComponent::queue_nb(std::list<std::unique_ptr<C2Work>>* const items) {
+ ALOGV("%s()", __func__);
+
+ if (mComponentState != ComponentState::RUNNING) {
+ ALOGE("Trying to queue work item while component is not running");
+ return C2_BAD_STATE;
+ }
+
+ while (!items->empty()) {
+ mEncoderTaskRunner->PostTask(FROM_HERE,
+ ::base::BindOnce(&V4L2EncodeComponent::queueTask, mWeakThis,
+ std::move(items->front())));
+ items->pop_front();
+ }
+
+ return C2_OK;
+}
+
+c2_status_t V4L2EncodeComponent::drain_nb(drain_mode_t mode) {
+ ALOGV("%s()", __func__);
+
+ if (mode == DRAIN_CHAIN) {
+ return C2_OMITTED; // Tunneling is not supported for now.
+ }
+
+ if (mComponentState != ComponentState::RUNNING) {
+ return C2_BAD_STATE;
+ }
+
+ mEncoderTaskRunner->PostTask(
+ FROM_HERE, ::base::BindOnce(&V4L2EncodeComponent::drainTask, mWeakThis, mode));
+ return C2_OK;
+}
+
+c2_status_t V4L2EncodeComponent::flush_sm(flush_mode_t mode,
+ std::list<std::unique_ptr<C2Work>>* const flushedWork) {
+ ALOGV("%s()", __func__);
+
+ if (mode != FLUSH_COMPONENT) {
+ return C2_OMITTED; // Tunneling is not supported by now
+ }
+
+ if (mComponentState != ComponentState::RUNNING) {
+ return C2_BAD_STATE;
+ }
+
+ // Work that can be immediately discarded should be returned in |flushedWork|. This method may
+ // be momentarily blocking but must return within 5ms, which should give us enough time to
+ // immediately abandon all non-started work on the encoder thread. We can return all work that
+ // can't be immediately discarded using onWorkDone() later.
+ ::base::WaitableEvent done;
+ mEncoderTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(&V4L2EncodeComponent::flushTask,
+ mWeakThis, &done, flushedWork));
+ done.Wait();
+
+ return C2_OK;
+}
+
+c2_status_t V4L2EncodeComponent::announce_nb(const std::vector<C2WorkOutline>& items) {
+ return C2_OMITTED; // Tunneling is not supported by now
+}
+
+c2_status_t V4L2EncodeComponent::setListener_vb(const std::shared_ptr<Listener>& listener,
+ c2_blocking_t mayBlock) {
+ ALOG_ASSERT(mComponentState != ComponentState::UNLOADED);
+
+ // Lock so we're sure the component isn't currently starting or stopping.
+ std::lock_guard<std::mutex> lock(mComponentLock);
+
+ // If the encoder thread is not running it's safe to update the listener directly.
+ if (!mEncoderThread.IsRunning()) {
+ mListener = listener;
+ return C2_OK;
+ }
+
+ // The listener should be updated before exiting this function. If called while the component is
+ // currently running we should be allowed to block, as we can only change the listener on the
+ // encoder thread.
+ ALOG_ASSERT(mayBlock == c2_blocking_t::C2_MAY_BLOCK);
+
+ ::base::WaitableEvent done;
+ mEncoderTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(&V4L2EncodeComponent::setListenerTask,
+ mWeakThis, listener, &done));
+ done.Wait();
+
+ return C2_OK;
+}
+
+std::shared_ptr<C2ComponentInterface> V4L2EncodeComponent::intf() {
+ return std::make_shared<SimpleInterface<V4L2EncodeInterface>>(mName.c_str(), mId, mInterface);
+}
+
+void V4L2EncodeComponent::startTask(bool* success, ::base::WaitableEvent* done) {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mEncoderState == EncoderState::UNINITIALIZED);
+
+ *success = initializeEncoder();
+ done->Signal();
+}
+
+void V4L2EncodeComponent::stopTask(::base::WaitableEvent* done) {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ // Flushing the encoder will abort all pending work and stop polling and streaming on the V4L2
+ // device queues.
+ flush();
+
+ // Deallocate all V4L2 device input and output buffers.
+ destroyInputBuffers();
+ destroyOutputBuffers();
+
+ // Invalidate all weak pointers so no more functions will be executed on the encoder thread.
+ mWeakThisFactory.InvalidateWeakPtrs();
+
+ setEncoderState(EncoderState::UNINITIALIZED);
+ done->Signal();
+}
+
+void V4L2EncodeComponent::queueTask(std::unique_ptr<C2Work> work) {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mEncoderState != EncoderState::UNINITIALIZED);
+
+ // If we're in the error state we can immediately return, freeing all buffers in the work item.
+ if (mEncoderState == EncoderState::ERROR) {
+ return;
+ }
+
+ ALOGV("Queued work item (index: %llu, timestamp: %llu, EOS: %d)",
+ work->input.ordinal.frameIndex.peekull(), work->input.ordinal.timestamp.peekull(),
+ work->input.flags & C2FrameData::FLAG_END_OF_STREAM);
+
+ mInputWorkQueue.push(std::move(work));
+
+ // If we were waiting for work, start encoding again.
+ if (mEncoderState == EncoderState::WAITING_FOR_INPUT) {
+ setEncoderState(EncoderState::ENCODING);
+ mEncoderTaskRunner->PostTask(
+ FROM_HERE,
+ ::base::BindOnce(&V4L2EncodeComponent::scheduleNextEncodeTask, mWeakThis));
+ }
+}
+
+// TODO(dstaessens): Investigate improving drain logic after draining the virtio device is fixed.
+void V4L2EncodeComponent::drainTask(drain_mode_t /*drainMode*/) {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ // We can only start draining if all the work in our input queue has been queued on the V4L2
+ // device input queue, so we mark the last item in the input queue as EOS.
+ if (!mInputWorkQueue.empty()) {
+ ALOGV("Marking last item in input work queue as EOS");
+ mInputWorkQueue.back()->input.flags = static_cast<C2FrameData::flags_t>(
+ mInputWorkQueue.back()->input.flags | C2FrameData::FLAG_END_OF_STREAM);
+ return;
+ }
+
+ // If the input queue is empty and there is only a single empty EOS work item in the output
+ // queue we can immediately consider flushing done.
+ if ((mOutputWorkQueue.size() == 1) && mOutputWorkQueue.back()->input.buffers.empty()) {
+ ALOG_ASSERT(mOutputWorkQueue.back()->input.flags & C2FrameData::FLAG_END_OF_STREAM);
+ setEncoderState(EncoderState::DRAINING);
+ mEncoderTaskRunner->PostTask(
+ FROM_HERE, ::base::BindOnce(&V4L2EncodeComponent::onDrainDone, mWeakThis, true));
+ return;
+ }
+
+ // If the input queue is empty all work that needs to be drained has already been queued in the
+ // V4L2 device, so we can immediately request a drain.
+ if (!mOutputWorkQueue.empty()) {
+ // Mark the last item in the output work queue as EOS, so we will only report it as
+ // finished after draining has completed.
+ ALOGV("Starting drain and marking last item in output work queue as EOS");
+ mOutputWorkQueue.back()->input.flags = C2FrameData::FLAG_END_OF_STREAM;
+ drain();
+ }
+}
+
+void V4L2EncodeComponent::onDrainDone(bool done) {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mEncoderState == EncoderState::DRAINING || mEncoderState == EncoderState::ERROR);
+
+ if (mEncoderState == EncoderState::ERROR) {
+ return;
+ }
+
+ if (!done) {
+ ALOGE("draining the encoder failed");
+ reportError(C2_CORRUPTED);
+ return;
+ }
+
+ // The last work item in the output work queue should be an EOS request.
+ if (mOutputWorkQueue.empty() ||
+ !(mOutputWorkQueue.back()->input.flags & C2FrameData::FLAG_END_OF_STREAM)) {
+ ALOGE("The last item in the output work queue should be marked EOS");
+ reportError(C2_CORRUPTED);
+ return;
+ }
+
+ // Mark the last item in the output work queue as EOS done.
+ C2Work* eosWork = mOutputWorkQueue.back().get();
+ eosWork->worklets.back()->output.flags = C2FrameData::FLAG_END_OF_STREAM;
+
+ // Draining is done which means all buffers on the device output queue have been returned, but
+ // not all buffers on the device input queue might have been returned yet.
+ if ((mOutputWorkQueue.size() > 1) || !isWorkDone(*eosWork)) {
+ ALOGV("Draining done, waiting for input buffers to be returned");
+ return;
+ }
+
+ ALOGV("Draining done");
+ reportWork(std::move(mOutputWorkQueue.front()));
+ mOutputWorkQueue.pop_front();
+
+ // Draining the encoder is now done, we can start encoding again.
+ if (!mInputWorkQueue.empty()) {
+ setEncoderState(EncoderState::ENCODING);
+ mEncoderTaskRunner->PostTask(
+ FROM_HERE,
+ ::base::BindOnce(&V4L2EncodeComponent::scheduleNextEncodeTask, mWeakThis));
+ } else {
+ setEncoderState(EncoderState::WAITING_FOR_INPUT);
+ }
+}
+
+void V4L2EncodeComponent::flushTask(::base::WaitableEvent* done,
+ std::list<std::unique_ptr<C2Work>>* const flushedWork) {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ // Move all work that can immediately be aborted to flushedWork, and notify the caller.
+ if (flushedWork) {
+ while (!mInputWorkQueue.empty()) {
+ std::unique_ptr<C2Work> work = std::move(mInputWorkQueue.front());
+ work->input.buffers.clear();
+ flushedWork->push_back(std::move(work));
+ mInputWorkQueue.pop();
+ }
+ }
+ done->Signal();
+
+ flush();
+}
+
+void V4L2EncodeComponent::setListenerTask(const std::shared_ptr<Listener>& listener,
+ ::base::WaitableEvent* done) {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ mListener = listener;
+ done->Signal();
+}
+
+bool V4L2EncodeComponent::initializeEncoder() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mEncoderState == EncoderState::UNINITIALIZED);
+
+ mVisibleSize = mInterface->getInputVisibleSize();
+ mKeyFramePeriod = mInterface->getKeyFramePeriod();
+ mKeyFrameCounter = 0;
+ mCSDSubmitted = false;
+
+ // Open the V4L2 device for encoding to the requested output format.
+ // TODO(dstaessens): Do we need to close the device first if already opened?
+ // TODO(dstaessens): Avoid conversion to VideoCodecProfile and use C2Config::profile_t directly.
+ media::VideoCodecProfile outputProfile =
+ c2ProfileToVideoCodecProfile(mInterface->getOutputProfile());
+ uint32_t outputPixelFormat =
+ media::V4L2Device::VideoCodecProfileToV4L2PixFmt(outputProfile, false);
+ if (!outputPixelFormat) {
+ ALOGE("Invalid output profile %s", media::GetProfileName(outputProfile).c_str());
+ return false;
+ }
+
+ mDevice = media::V4L2Device::Create();
+ if (!mDevice) {
+ ALOGE("Failed to create V4L2 device");
+ return false;
+ }
+
+ if (!mDevice->Open(media::V4L2Device::Type::kEncoder, outputPixelFormat)) {
+ ALOGE("Failed to open device for profile %s (%s)",
+ media::GetProfileName(outputProfile).c_str(),
+ media::FourccToString(outputPixelFormat).c_str());
+ return false;
+ }
+
+ // Make sure the device has all required capabilities (multi-planar Memory-To-Memory and
+ // streaming I/O), and whether flushing is supported.
+ if (!mDevice->HasCapabilities(V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING)) {
+ ALOGE("Device doesn't have the required capabilities");
+ return false;
+ }
+ if (!mDevice->IsCommandSupported(V4L2_ENC_CMD_STOP)) {
+ ALOGE("Device does not support flushing (V4L2_ENC_CMD_STOP)");
+ return false;
+ }
+
+ // Get input/output queues so we can send encode request to the device and get back the results.
+ mInputQueue = mDevice->GetQueue(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ mOutputQueue = mDevice->GetQueue(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (!mInputQueue || !mOutputQueue) {
+ ALOGE("Failed to get V4L2 device queues");
+ return false;
+ }
+
+ // First try to configure the specified output format, as changing the output format can affect
+ // the configured input format.
+ if (!configureOutputFormat(outputProfile)) return false;
+
+ // Configure the input format. If the device doesn't support the specified format we'll use one
+ // of the device's preferred formats in combination with an input format convertor.
+ if (!configureInputFormat(kInputPixelFormat)) return false;
+
+ // Create input and output buffers.
+ // TODO(dstaessens): Avoid allocating output buffers, encode directly into blockpool buffers.
+ if (!createInputBuffers() || !createOutputBuffers()) return false;
+
+ // Configure the device, setting all required controls.
+ uint8_t level = c2LevelToLevelIDC(mInterface->getOutputLevel());
+ if (!configureDevice(outputProfile, level)) return false;
+
+ // We're ready to start encoding now.
+ setEncoderState(EncoderState::WAITING_FOR_INPUT);
+
+ // As initialization is asynchronous work might have already be queued.
+ if (!mInputWorkQueue.empty()) {
+ setEncoderState(EncoderState::ENCODING);
+ mEncoderTaskRunner->PostTask(
+ FROM_HERE, ::base::Bind(&V4L2EncodeComponent::scheduleNextEncodeTask, mWeakThis));
+ }
+ return true;
+}
+
+bool V4L2EncodeComponent::configureInputFormat(media::VideoPixelFormat inputFormat) {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mEncoderState == EncoderState::UNINITIALIZED);
+ ALOG_ASSERT(!mInputQueue->IsStreaming());
+ ALOG_ASSERT(!mVisibleSize.IsEmpty());
+ ALOG_ASSERT(!mInputFormatConverter);
+
+ // First try to use the requested pixel format directly.
+ ::base::Optional<struct v4l2_format> format;
+ auto fourcc = media::Fourcc::FromVideoPixelFormat(inputFormat, false);
+ if (fourcc) {
+ format = mInputQueue->SetFormat(fourcc->ToV4L2PixFmt(), mVisibleSize, 0);
+ }
+
+ // If the device doesn't support the requested input format we'll try the device's preferred
+ // input pixel formats and use a format convertor. We need to try all formats as some formats
+ // might not be supported for the configured output format.
+ if (!format) {
+ std::vector<uint32_t> preferredFormats =
+ mDevice->PreferredInputFormat(media::V4L2Device::Type::kEncoder);
+ for (uint32_t i = 0; !format && i < preferredFormats.size(); ++i) {
+ format = mInputQueue->SetFormat(preferredFormats[i], mVisibleSize, 0);
+ }
+ }
+
+ if (!format) {
+ ALOGE("Failed to set input format to %s",
+ media::VideoPixelFormatToString(inputFormat).c_str());
+ return false;
+ }
+
+ // Check whether the negotiated input format is valid. The coded size might be adjusted to match
+ // encoder minimums, maximums and alignment requirements of the currently selected formats.
+ auto layout = media::V4L2Device::V4L2FormatToVideoFrameLayout(*format);
+ if (!layout) {
+ ALOGE("Invalid input layout");
+ return false;
+ }
+
+ mInputLayout = layout.value();
+ if (!media::Rect(mInputLayout->coded_size()).Contains(media::Rect(mVisibleSize))) {
+ ALOGE("Input size %s exceeds encoder capability, encoder can handle %s",
+ mVisibleSize.ToString().c_str(), mInputLayout->coded_size().ToString().c_str());
+ return false;
+ }
+
+ // Calculate the input coded size from the format.
+ // TODO(dstaessens): How is this different from mInputLayout->coded_size()?
+ mInputCodedSize = media::V4L2Device::AllocatedSizeFromV4L2Format(*format);
+
+ // Add an input format convertor if the device doesn't support the requested input format.
+ // Note: The amount of input buffers in the convertor should match the amount of buffers on the
+ // device input queue, to simplify logic.
+ // TODO(dstaessens): Currently an input format convertor is always required. Mapping an input
+ // buffer always seems to fail unless we copy it into a new a buffer first. As a temporary
+ // workaround the line below is commented, but this should be undone once the issue is fixed.
+ //if (mInputLayout->format() != inputFormat) {
+ ALOGV("Creating input format convertor (%s)",
+ media::VideoPixelFormatToString(mInputLayout->format()).c_str());
+ mInputFormatConverter =
+ FormatConverter::Create(inputFormat, mVisibleSize, kInputBufferCount, mInputCodedSize);
+ if (!mInputFormatConverter) {
+ ALOGE("Failed to created input format convertor");
+ return false;
+ }
+ //}
+
+ // The coded input size might be different from the visible size due to alignment requirements,
+ // So we need to specify the visible rectangle. Note that this rectangle might still be adjusted
+ // due to hardware limitations.
+ // TODO(dstaessens): Overwrite mVisibleSize with the adapted visible size here?
+ media::Rect visibleRectangle(mVisibleSize.width(), mVisibleSize.height());
+
+ struct v4l2_rect rect;
+ rect.left = visibleRectangle.x();
+ rect.top = visibleRectangle.y();
+ rect.width = visibleRectangle.width();
+ rect.height = visibleRectangle.height();
+
+ // Try to adjust the visible rectangle using the VIDIOC_S_SELECTION command. If this is not
+ // supported we'll try to use the VIDIOC_S_CROP command instead. The visible rectangle might be
+ // adjusted to conform to hardware limitations (e.g. round to closest horizontal and vertical
+ // offsets, width and height).
+ struct v4l2_selection selection_arg;
+ memset(&selection_arg, 0, sizeof(selection_arg));
+ selection_arg.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ selection_arg.target = V4L2_SEL_TGT_CROP;
+ selection_arg.r = rect;
+ if (mDevice->Ioctl(VIDIOC_S_SELECTION, &selection_arg) == 0) {
+ visibleRectangle = media::Rect(selection_arg.r.left, selection_arg.r.top,
+ selection_arg.r.width, selection_arg.r.height);
+ } else {
+ struct v4l2_crop crop;
+ memset(&crop, 0, sizeof(v4l2_crop));
+ crop.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ crop.c = rect;
+ if (mDevice->Ioctl(VIDIOC_S_CROP, &crop) != 0 ||
+ mDevice->Ioctl(VIDIOC_G_CROP, &crop) != 0) {
+ ALOGE("Failed to crop to specified visible rectangle");
+ return false;
+ }
+ visibleRectangle = media::Rect(crop.c.left, crop.c.top, crop.c.width, crop.c.height);
+ }
+
+ ALOGV("Input format set to %s (size: %s, adjusted size: %dx%d, coded size: %s)",
+ media::VideoPixelFormatToString(mInputLayout->format()).c_str(),
+ mVisibleSize.ToString().c_str(), visibleRectangle.width(), visibleRectangle.height(),
+ mInputCodedSize.ToString().c_str());
+
+ mVisibleSize.SetSize(visibleRectangle.width(), visibleRectangle.height());
+ return true;
+}
+
+bool V4L2EncodeComponent::configureOutputFormat(media::VideoCodecProfile outputProfile) {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mEncoderState == EncoderState::UNINITIALIZED);
+ ALOG_ASSERT(!mOutputQueue->IsStreaming());
+ ALOG_ASSERT(!mVisibleSize.IsEmpty());
+
+ auto format = mOutputQueue->SetFormat(
+ media::V4L2Device::VideoCodecProfileToV4L2PixFmt(outputProfile, false), mVisibleSize,
+ GetMaxOutputBufferSize(mVisibleSize));
+ if (!format) {
+ ALOGE("Failed to set output format to %s", media::GetProfileName(outputProfile).c_str());
+ return false;
+ }
+
+ // The device might adjust the requested output buffer size to match hardware requirements.
+ mOutputBufferSize = ::base::checked_cast<size_t>(format->fmt.pix_mp.plane_fmt[0].sizeimage);
+
+ ALOGV("Output format set to %s (buffer size: %u)", media::GetProfileName(outputProfile).c_str(),
+ mOutputBufferSize);
+ return true;
+}
+
+bool V4L2EncodeComponent::configureDevice(media::VideoCodecProfile outputProfile,
+ std::optional<const uint8_t> outputH264Level) {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ // Enable frame-level bitrate control. This is the only mandatory general control.
+ if (!mDevice->SetExtCtrls(V4L2_CTRL_CLASS_MPEG,
+ {media::V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, 1)})) {
+ ALOGW("Failed enabling bitrate control");
+ // TODO(b/161508368): V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE is currently not supported yet,
+ // assume the operation was successful for now.
+ }
+
+ // Additional optional controls:
+ // - Enable macroblock-level bitrate control.
+ // - Set GOP length to 0 to disable periodic key frames.
+ mDevice->SetExtCtrls(V4L2_CTRL_CLASS_MPEG,
+ {media::V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE, 1),
+ media::V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0)});
+
+ // All controls below are H.264-specific, so we can return here if the profile is not H.264.
+ if (outputProfile < media::H264PROFILE_MIN || outputProfile > media::H264PROFILE_MAX) {
+ return true;
+ }
+
+ // When encoding H.264 we want to prepend SPS and PPS to each IDR for resilience. Some
+ // devices support this through the V4L2_CID_MPEG_VIDEO_H264_SPS_PPS_BEFORE_IDR control.
+ // TODO(b/161495502): V4L2_CID_MPEG_VIDEO_H264_SPS_PPS_BEFORE_IDR is currently not supported
+ // yet, just log a warning if the operation was unsuccessful for now.
+ if (mDevice->IsCtrlExposed(V4L2_CID_MPEG_VIDEO_H264_SPS_PPS_BEFORE_IDR)) {
+ if (!mDevice->SetExtCtrls(
+ V4L2_CTRL_CLASS_MPEG,
+ {media::V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_H264_SPS_PPS_BEFORE_IDR, 1)})) {
+ ALOGE("Failed to configure device to prepend SPS and PPS to each IDR");
+ return false;
+ }
+ ALOGV("Device supports prepending SPS and PPS to each IDR");
+ } else {
+ ALOGW("Device doesn't support prepending SPS and PPS to IDR");
+ }
+
+ std::vector<media::V4L2ExtCtrl> h264Ctrls;
+
+ // No B-frames, for lowest decoding latency.
+ h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_B_FRAMES, 0);
+ // Quantization parameter maximum value (for variable bitrate control).
+ h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 51);
+
+ // Set H.264 profile.
+ int32_t profile = media::V4L2Device::VideoCodecProfileToV4L2H264Profile(outputProfile);
+ if (profile < 0) {
+ ALOGE("Trying to set invalid H.264 profile");
+ return false;
+ }
+ h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_H264_PROFILE, profile);
+
+ // Set H.264 output level. Use Level 4.0 as fallback default.
+ // TODO(dstaessens): Investigate code added by hiroh@ recently to select level in Chrome VEA.
+ uint8_t h264Level = outputH264Level.value_or(media::H264SPS::kLevelIDC4p0);
+ h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ media::V4L2Device::H264LevelIdcToV4L2H264Level(h264Level));
+
+ // Ask not to put SPS and PPS into separate bitstream buffers.
+ h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_HEADER_MODE,
+ V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
+
+ // Ignore return value as these controls are optional.
+ mDevice->SetExtCtrls(V4L2_CTRL_CLASS_MPEG, std::move(h264Ctrls));
+
+ return true;
+}
+
+bool V4L2EncodeComponent::updateEncodingParameters() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ // Query the interface for the encoding parameters requested by the codec 2.0 framework.
+ C2StreamBitrateInfo::output bitrateInfo;
+ C2StreamFrameRateInfo::output framerateInfo;
+ c2_status_t status =
+ mInterface->query({&bitrateInfo, &framerateInfo}, {}, C2_DONT_BLOCK, nullptr);
+ if (status != C2_OK) {
+ ALOGE("Failed to query interface for encoding parameters (error code: %d)", status);
+ reportError(status);
+ return false;
+ }
+
+ // Ask device to change bitrate if it's different from the currently configured bitrate.
+ uint32_t bitrate = bitrateInfo.value;
+ if (mBitrate != bitrate) {
+ ALOG_ASSERT(bitrate > 0u);
+ ALOGV("Setting bitrate to %u", bitrate);
+ if (!mDevice->SetExtCtrls(V4L2_CTRL_CLASS_MPEG,
+ {media::V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_BITRATE, bitrate)})) {
+ // TODO(b/161495749): V4L2_CID_MPEG_VIDEO_BITRATE is currently not supported yet, assume
+ // the operation was successful for now.
+ ALOGW("Requesting bitrate change failed");
+ }
+ mBitrate = bitrate;
+ }
+
+ // Ask device to change framerate if it's different from the currently configured framerate.
+ // TODO(dstaessens): Move IOCTL to device and use helper function.
+ uint32_t framerate = static_cast<uint32_t>(std::round(framerateInfo.value));
+ if (mFramerate != framerate) {
+ ALOG_ASSERT(framerate > 0u);
+ ALOGV("Setting framerate to %u", framerate);
+ struct v4l2_streamparm parms;
+ memset(&parms, 0, sizeof(v4l2_streamparm));
+ parms.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ parms.parm.output.timeperframe.numerator = 1;
+ parms.parm.output.timeperframe.denominator = framerate;
+ if (mDevice->Ioctl(VIDIOC_S_PARM, &parms) != 0) {
+ // TODO(b/161499573): VIDIOC_S_PARM is currently not supported yet, assume the operation
+ // was successful for now.
+ ALOGW("Requesting framerate change failed");
+ }
+ mFramerate = framerate;
+ }
+
+ // Check whether an explicit key frame was requested, if so reset the key frame counter to
+ // immediately request a key frame.
+ C2StreamRequestSyncFrameTuning::output requestKeyFrame;
+ status = mInterface->query({&requestKeyFrame}, {}, C2_DONT_BLOCK, nullptr);
+ if (status != C2_OK) {
+ ALOGE("Failed to query interface for key frame request (error code: %d)", status);
+ reportError(status);
+ return false;
+ }
+ if (requestKeyFrame.value == C2_TRUE) {
+ mKeyFrameCounter = 0;
+ requestKeyFrame.value = C2_FALSE;
+ std::vector<std::unique_ptr<C2SettingResult>> failures;
+ status = mInterface->config({&requestKeyFrame}, C2_MAY_BLOCK, &failures);
+ if (status != C2_OK) {
+ ALOGE("Failed to reset key frame request on interface (error code: %d)", status);
+ reportError(status);
+ return false;
+ }
+ }
+
+ // Request the next frame to be a key frame each time the counter reaches 0.
+ if (mKeyFrameCounter == 0) {
+ if (!mDevice->SetExtCtrls(V4L2_CTRL_CLASS_MPEG,
+ {media::V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME)})) {
+ // TODO(b/161498590): V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME is currently not supported
+ // yet, assume the operation was successful for now.
+ ALOGW("Failed requesting key frame");
+ }
+ }
+
+ return true;
+}
+
+void V4L2EncodeComponent::scheduleNextEncodeTask() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mEncoderState == EncoderState::ENCODING || mEncoderState == EncoderState::ERROR);
+
+ // If we're in the error state we can immediately return.
+ if (mEncoderState == EncoderState::ERROR) {
+ return;
+ }
+
+ // Get the next work item. Currently only a single worklet per work item is supported. An input
+ // buffer should always be supplied unless this is a drain or CSD request.
+ ALOG_ASSERT(!mInputWorkQueue.empty());
+ C2Work* work = mInputWorkQueue.front().get();
+ ALOG_ASSERT(work->input.buffers.size() <= 1u && work->worklets.size() == 1u);
+
+ // Set the default values for the output worklet.
+ work->worklets.front()->output.flags = static_cast<C2FrameData::flags_t>(0);
+ work->worklets.front()->output.buffers.clear();
+ work->worklets.front()->output.ordinal = work->input.ordinal;
+
+ uint64_t index = work->input.ordinal.frameIndex.peeku();
+ int64_t timestamp = static_cast<int64_t>(work->input.ordinal.timestamp.peeku());
+ bool endOfStream = work->input.flags & C2FrameData::FLAG_END_OF_STREAM;
+ ALOGV("Scheduling next encode (index: %" PRIu64 ", timestamp: %" PRId64 ", EOS: %d)", index,
+ timestamp, endOfStream);
+
+ if (!work->input.buffers.empty()) {
+ // Check if the device has free input buffers available. If not we'll switch to the
+ // WAITING_FOR_INPUT_BUFFERS state, and resume encoding once we're notified buffers are
+ // available in the onInputBufferDone() task. Note: The input buffers are not copied into
+ // the device's input buffers, but rather a memory pointer is imported. We still have to
+ // throttle the number of enqueues queued simultaneously on the device however.
+ if (mInputQueue->FreeBuffersCount() == 0) {
+ ALOGV("Waiting for device to return input buffers");
+ setEncoderState(EncoderState::WAITING_FOR_INPUT_BUFFERS);
+ return;
+ }
+
+ C2ConstGraphicBlock inputBlock =
+ work->input.buffers.front()->data().graphicBlocks().front();
+
+ // If encoding fails, we'll wait for an event (e.g. input buffers available) to start
+ // encoding again.
+ if (!encode(inputBlock, index, timestamp)) {
+ return;
+ }
+ }
+
+ // The codec 2.0 framework might queue an empty CSD request, but this is currently not
+ // supported. We will return the CSD with the first encoded buffer work.
+ // TODO(dstaessens): Avoid doing this, store CSD request work at start of output queue.
+ if (work->input.buffers.empty() && !endOfStream) {
+ ALOGV("Discarding empty CSD request");
+ reportWork(std::move(mInputWorkQueue.front()));
+ } else {
+ mOutputWorkQueue.push_back(std::move(mInputWorkQueue.front()));
+ }
+ mInputWorkQueue.pop();
+
+ // Drain the encoder if required.
+ if (endOfStream) {
+ drainTask(C2Component::DRAIN_COMPONENT_WITH_EOS);
+ }
+
+ if (mEncoderState == EncoderState::DRAINING) {
+ return;
+ } else if (mInputWorkQueue.empty()) {
+ setEncoderState(EncoderState::WAITING_FOR_INPUT);
+ return;
+ }
+
+ // Queue the next work item to be encoded.
+ mEncoderTaskRunner->PostTask(
+ FROM_HERE, ::base::BindOnce(&V4L2EncodeComponent::scheduleNextEncodeTask, mWeakThis));
+}
+
+bool V4L2EncodeComponent::encode(C2ConstGraphicBlock block, uint64_t index, int64_t timestamp) {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mEncoderState == EncoderState::ENCODING);
+
+ // Update dynamic encoding parameters (bitrate, framerate, key frame) if requested.
+ if (!updateEncodingParameters()) return false;
+
+ mKeyFrameCounter = (mKeyFrameCounter + 1) % mKeyFramePeriod;
+
+ // If required convert the data to the V4L2 device's configured input pixel format. We
+ // allocate the same amount of buffers on the device input queue and the format convertor,
+ // so we should never run out of conversion buffers if there are free buffers in the input
+ // queue.
+ if (mInputFormatConverter) {
+ if (!mInputFormatConverter->isReady()) {
+ ALOGE("Input format convertor ran out of buffers");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+
+ ALOGV("Converting input block (index: %" PRIu64 ")", index);
+ c2_status_t status = C2_CORRUPTED;
+ block = mInputFormatConverter->convertBlock(index, block, &status);
+ if (status != C2_OK) {
+ ALOGE("Failed to convert input block (index: %" PRIu64 ")", index);
+ reportError(status);
+ return false;
+ }
+ }
+
+ ALOGV("Encoding input block (index: %" PRIu64 ", timestamp: %" PRId64 ", size: %dx%d)", index,
+ timestamp, block.width(), block.height());
+
+ // Create a video frame from the graphic block.
+ std::unique_ptr<InputFrame> frame = InputFrame::Create(block);
+ if (!frame) {
+ ALOGE("Failed to create video frame from input block (index: %" PRIu64
+ ", timestamp: %" PRId64 ")",
+ index, timestamp);
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+
+ // Get the video frame layout and pixel format from the graphic block.
+ // TODO(dstaessens) Integrate getVideoFrameLayout() into InputFrame::Create()
+ media::VideoPixelFormat format;
+ std::optional<std::vector<VideoFramePlane>> planes = getVideoFrameLayout(block, &format);
+ if (!planes) {
+ ALOGE("Failed to get input block's layout");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+
+ if (!enqueueInputBuffer(std::move(frame), format, *planes, index, timestamp)) {
+ ALOGE("Failed to enqueue video frame (index: %" PRIu64 ", timestamp: %" PRId64 ")", index,
+ timestamp);
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+
+ // Start streaming on the input and output queue if required.
+ if (!mInputQueue->IsStreaming()) {
+ ALOG_ASSERT(!mOutputQueue->IsStreaming());
+ if (!mOutputQueue->Streamon() || !mInputQueue->Streamon()) {
+ ALOGE("Failed to start streaming on input and output queue");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+ // Start polling on the V4L2 device.
+ startDevicePoll();
+ }
+
+ // Queue all buffers on the output queue. These buffers will be used to store the encoded
+ // bitstreams.
+ while (mOutputQueue->FreeBuffersCount() > 0) {
+ if (!enqueueOutputBuffer()) return false;
+ }
+
+ return true;
+}
+
+void V4L2EncodeComponent::drain() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ if (mEncoderState == EncoderState::DRAINING || mEncoderState == EncoderState::ERROR) {
+ return;
+ }
+
+ ALOG_ASSERT(mInputQueue->IsStreaming() && mOutputQueue->IsStreaming());
+ ALOG_ASSERT(!mOutputWorkQueue.empty());
+
+ // TODO(dstaessens): Move IOCTL to device class.
+ struct v4l2_encoder_cmd cmd;
+ memset(&cmd, 0, sizeof(v4l2_encoder_cmd));
+ cmd.cmd = V4L2_ENC_CMD_STOP;
+ if (mDevice->Ioctl(VIDIOC_ENCODER_CMD, &cmd) != 0) {
+ ALOGE("Failed to stop encoder");
+ onDrainDone(false);
+ return;
+ }
+ ALOGV("%s(): Sent STOP command to encoder", __func__);
+
+ setEncoderState(EncoderState::DRAINING);
+}
+
+void V4L2EncodeComponent::flush() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ // Stop the device poll thread.
+ stopDevicePoll();
+
+ // Stop streaming on the V4L2 device, which stops all currently queued work and releases all
+ // buffers currently in use by the device.
+ // TODO(b/160540027): Calling streamoff currently results in a bug.
+ for (auto& queue : {mInputQueue, mOutputQueue}) {
+ if (queue && queue->IsStreaming() && !queue->Streamoff()) {
+ ALOGE("Failed to stop streaming on the device queue");
+ reportError(C2_CORRUPTED);
+ }
+ }
+
+ // Return all buffers to the input format convertor and clear all references to graphic blocks
+ // in the input queue. We don't need to clear the output map as those buffers will still be
+ // used.
+ for (auto& it : mInputBuffersMap) {
+ if (mInputFormatConverter && it.second) {
+ mInputFormatConverter->returnBlock(it.first);
+ }
+ it.second = nullptr;
+ }
+
+ // Report all queued work items as aborted.
+ std::list<std::unique_ptr<C2Work>> abortedWorkItems;
+ while (!mInputWorkQueue.empty()) {
+ std::unique_ptr<C2Work> work = std::move(mInputWorkQueue.front());
+ work->result = C2_NOT_FOUND;
+ work->input.buffers.clear();
+ abortedWorkItems.push_back(std::move(work));
+ mInputWorkQueue.pop();
+ }
+ while (!mOutputWorkQueue.empty()) {
+ std::unique_ptr<C2Work> work = std::move(mOutputWorkQueue.front());
+ work->result = C2_NOT_FOUND;
+ work->input.buffers.clear();
+ abortedWorkItems.push_back(std::move(work));
+ mOutputWorkQueue.pop_front();
+ }
+ if (!abortedWorkItems.empty())
+ mListener->onWorkDone_nb(shared_from_this(), std::move(abortedWorkItems));
+
+ // Streaming and polling on the V4L2 device input and output queues will be resumed once new
+ // encode work is queued.
+}
+
+std::shared_ptr<C2LinearBlock> V4L2EncodeComponent::fetchOutputBlock() {
+ // TODO(dstaessens): fetchLinearBlock() might be blocking.
+ ALOGV("Fetching linear block (size: %u)", mOutputBufferSize);
+ std::shared_ptr<C2LinearBlock> outputBlock;
+ c2_status_t status = mOutputBlockPool->fetchLinearBlock(
+ mOutputBufferSize,
+ C2MemoryUsage(C2MemoryUsage::CPU_READ |
+ static_cast<uint64_t>(BufferUsage::VIDEO_ENCODER)),
+ &outputBlock);
+ if (status != C2_OK) {
+ ALOGE("Failed to fetch linear block (error: %d)", status);
+ reportError(status);
+ return nullptr;
+ }
+
+ return outputBlock;
+}
+
+void V4L2EncodeComponent::onInputBufferDone(uint64_t index) {
+ ALOGV("%s(): Input buffer done (index: %" PRIu64 ")", __func__, index);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mEncoderState != EncoderState::UNINITIALIZED);
+
+ // There are no guarantees the input buffers are returned in order, so we need to find the work
+ // item which this buffer belongs to.
+ C2Work* work = getWorkByIndex(index);
+ if (!work) {
+ ALOGE("Failed to find work associated with input buffer %" PRIu64, index);
+ reportError(C2_CORRUPTED);
+ return;
+ }
+
+ // We're done using the input block, release reference to return the block to the client. If
+ // using an input format convertor, we also need to return the block to the convertor.
+ LOG_ASSERT(!work->input.buffers.empty());
+ work->input.buffers.front().reset();
+ if (mInputFormatConverter) {
+ c2_status_t status = mInputFormatConverter->returnBlock(index);
+ if (status != C2_OK) {
+ reportError(status);
+ return;
+ }
+ }
+
+ // Return all completed work items. The work item might have been waiting for it's input buffer
+ // to be returned, in which case we can report it as completed now. As input buffers are not
+ // necessarily returned in order we might be able to return multiple ready work items now.
+ while (!mOutputWorkQueue.empty() && isWorkDone(*mOutputWorkQueue.front())) {
+ reportWork(std::move(mOutputWorkQueue.front()));
+ mOutputWorkQueue.pop_front();
+ }
+
+ // We might have been waiting for input buffers to be returned after draining finished.
+ if (mEncoderState == EncoderState::DRAINING && mOutputWorkQueue.empty()) {
+ ALOGV("Draining done");
+ mEncoderState = EncoderState::WAITING_FOR_INPUT_BUFFERS;
+ }
+
+ // If we previously used up all input queue buffers we can start encoding again now.
+ if ((mEncoderState == EncoderState::WAITING_FOR_INPUT_BUFFERS) && !mInputWorkQueue.empty()) {
+ setEncoderState(EncoderState::ENCODING);
+ mEncoderTaskRunner->PostTask(
+ FROM_HERE,
+ ::base::BindOnce(&V4L2EncodeComponent::scheduleNextEncodeTask, mWeakThis));
+ }
+}
+
+void V4L2EncodeComponent::onOutputBufferDone(uint32_t payloadSize, bool keyFrame, int64_t timestamp,
+ std::shared_ptr<C2LinearBlock> outputBlock) {
+ ALOGV("%s(): output buffer done (timestamp: %" PRId64 ", size: %u, key frame: %d)", __func__,
+ timestamp, payloadSize, keyFrame);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ if (mEncoderState == EncoderState::ERROR) {
+ return;
+ }
+
+ C2ConstLinearBlock constBlock =
+ outputBlock->share(outputBlock->offset(), payloadSize, C2Fence());
+
+ // If no CSD (content-specific-data, e.g. SPS for H.264) has been submitted yet, we expect this
+ // output block to contain CSD. We only submit the CSD once, even if it's attached to each key
+ // frame.
+ if (!mCSDSubmitted) {
+ ALOGV("No CSD submitted yet, extracting CSD");
+ std::unique_ptr<C2StreamInitDataInfo::output> csd;
+ C2ReadView view = constBlock.map().get();
+ extractCSDInfo(&csd, view.data(), view.capacity());
+ if (!csd) {
+ ALOGE("Failed to extract CSD");
+ reportError(C2_CORRUPTED);
+ return;
+ }
+
+ // Attach the CSD to the first item in our output work queue.
+ LOG_ASSERT(!mOutputWorkQueue.empty());
+ C2Work* work = mOutputWorkQueue.front().get();
+ work->worklets.front()->output.configUpdate.push_back(std::move(csd));
+ mCSDSubmitted = true;
+ }
+
+ // Get the work item associated with the timestamp.
+ C2Work* work = getWorkByTimestamp(timestamp);
+ if (!work) {
+ // It's possible we got an empty CSD request with timestamp 0, which we currently just
+ // discard.
+ // TODO(dstaessens): Investigate handling empty CSD requests.
+ if (timestamp != 0) {
+ reportError(C2_CORRUPTED);
+ }
+ return;
+ }
+
+ std::shared_ptr<C2Buffer> buffer = C2Buffer::CreateLinearBuffer(std::move(constBlock));
+ if (keyFrame) {
+ buffer->setInfo(
+ std::make_shared<C2StreamPictureTypeMaskInfo::output>(0u, C2Config::SYNC_FRAME));
+ }
+ work->worklets.front()->output.buffers.emplace_back(buffer);
+
+ // We can report the work item as completed if its associated input buffer has also been
+ // released. As output buffers are not necessarily returned in order we might be able to return
+ // multiple ready work items now.
+ while (!mOutputWorkQueue.empty() && isWorkDone(*mOutputWorkQueue.front())) {
+ reportWork(std::move(mOutputWorkQueue.front()));
+ mOutputWorkQueue.pop_front();
+ }
+}
+
+C2Work* V4L2EncodeComponent::getWorkByIndex(uint64_t index) {
+ ALOGV("%s(): getting work item (index: %" PRIu64 ")", __func__, index);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ auto it = std::find_if(mOutputWorkQueue.begin(), mOutputWorkQueue.end(),
+ [index](const std::unique_ptr<C2Work>& w) {
+ return w->input.ordinal.frameIndex.peeku() == index;
+ });
+ if (it == mOutputWorkQueue.end()) {
+ ALOGE("Failed to find work (index: %" PRIu64 ")", index);
+ return nullptr;
+ }
+ return it->get();
+}
+
+C2Work* V4L2EncodeComponent::getWorkByTimestamp(int64_t timestamp) {
+ ALOGV("%s(): getting work item (timestamp: %" PRId64 ")", __func__, timestamp);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(timestamp >= 0);
+
+ // Find the work with specified timestamp by looping over the output work queue. This should be
+ // very fast as the output work queue will never be longer then a few items. Ignore empty work
+ // items that are marked as EOS, as their timestamp might clash with other work items.
+ auto it = std::find_if(mOutputWorkQueue.begin(), mOutputWorkQueue.end(),
+ [timestamp](const std::unique_ptr<C2Work>& w) {
+ return !(w->input.flags & C2FrameData::FLAG_END_OF_STREAM) &&
+ w->input.ordinal.timestamp.peeku() ==
+ static_cast<uint64_t>(timestamp);
+ });
+ if (it == mOutputWorkQueue.end()) {
+ ALOGE("Failed to find work (timestamp: %" PRIu64 ")", timestamp);
+ return nullptr;
+ }
+ return it->get();
+}
+
+bool V4L2EncodeComponent::isWorkDone(const C2Work& work) const {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ if ((work.input.flags & C2FrameData::FLAG_END_OF_STREAM) &&
+ !(work.worklets.front()->output.flags & C2FrameData::FLAG_END_OF_STREAM)) {
+ ALOGV("Work item %" PRIu64 " is marked as EOS but draining has not finished yet",
+ work.input.ordinal.frameIndex.peeku());
+ return false;
+ }
+
+ if (!work.input.buffers.empty() && work.input.buffers.front()) {
+ ALOGV("Input buffer associated with work item %" PRIu64 " not returned yet",
+ work.input.ordinal.frameIndex.peeku());
+ return false;
+ }
+
+ // If the work item had an input buffer to be encoded, it should have an output buffer set.
+ if (!work.input.buffers.empty() && work.worklets.front()->output.buffers.empty()) {
+ ALOGV("Output buffer associated with work item %" PRIu64 " not returned yet",
+ work.input.ordinal.frameIndex.peeku());
+ return false;
+ }
+
+ return true;
+}
+
+void V4L2EncodeComponent::reportWork(std::unique_ptr<C2Work> work) {
+ ALOG_ASSERT(work);
+ ALOGV("%s(): Reporting work item as finished (index: %llu, timestamp: %llu)", __func__,
+ work->input.ordinal.frameIndex.peekull(), work->input.ordinal.timestamp.peekull());
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ work->result = C2_OK;
+ work->workletsProcessed = static_cast<uint32_t>(work->worklets.size());
+
+ std::list<std::unique_ptr<C2Work>> finishedWorkList;
+ finishedWorkList.emplace_back(std::move(work));
+ mListener->onWorkDone_nb(shared_from_this(), std::move(finishedWorkList));
+}
+
+bool V4L2EncodeComponent::startDevicePoll() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ if (!mDevice->StartPolling(
+ ::base::BindRepeating(&V4L2EncodeComponent::serviceDeviceTask, mWeakThis),
+ ::base::BindRepeating(&V4L2EncodeComponent::onPollError, mWeakThis))) {
+ ALOGE("Device poll thread failed to start");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+
+ ALOGV("Device poll started");
+ return true;
+}
+
+bool V4L2EncodeComponent::stopDevicePoll() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ if (!mDevice->StopPolling()) {
+ ALOGE("Failed to stop polling on the device");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+
+ ALOGV("Device poll stopped");
+ return true;
+}
+
+void V4L2EncodeComponent::onPollError() {
+ ALOGV("%s()", __func__);
+ reportError(C2_CORRUPTED);
+}
+
+void V4L2EncodeComponent::serviceDeviceTask(bool /*event*/) {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mEncoderState != EncoderState::UNINITIALIZED);
+
+ if (mEncoderState == EncoderState::ERROR) {
+ return;
+ }
+
+ // Dequeue completed input (VIDEO_OUTPUT) buffers, and recycle to the free list.
+ while (mInputQueue->QueuedBuffersCount() > 0) {
+ if (!dequeueInputBuffer()) break;
+ }
+
+ // Dequeue completed output (VIDEO_CAPTURE) buffers, and recycle to the free list.
+ while (mOutputQueue->QueuedBuffersCount() > 0) {
+ if (!dequeueOutputBuffer()) break;
+ }
+
+ ALOGV("%s() - done", __func__);
+}
+
+bool V4L2EncodeComponent::enqueueInputBuffer(std::unique_ptr<InputFrame> frame,
+ media::VideoPixelFormat format,
+ const std::vector<VideoFramePlane>& planes,
+ int64_t index, int64_t timestamp) {
+ ALOGV("%s(): queuing input buffer (index: %" PRId64 ")", __func__, index);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mInputQueue->FreeBuffersCount() > 0);
+ ALOG_ASSERT(mEncoderState == EncoderState::ENCODING);
+ ALOG_ASSERT(mInputLayout->format() == format);
+ ALOG_ASSERT(mInputLayout->planes().size() == planes.size());
+
+ auto buffer = mInputQueue->GetFreeBuffer();
+ if (!buffer) {
+ ALOGE("Failed to get free buffer from device input queue");
+ return false;
+ }
+
+ // Mark the buffer with the frame's timestamp so we can identify the associated output buffers.
+ buffer->SetTimeStamp(
+ {.tv_sec = static_cast<time_t>(timestamp / ::base::Time::kMicrosecondsPerSecond),
+ .tv_usec = static_cast<time_t>(timestamp % ::base::Time::kMicrosecondsPerSecond)});
+ size_t bufferId = buffer->BufferId();
+
+ for (size_t i = 0; i < planes.size(); ++i) {
+ // Single-buffer input format may have multiple color planes, so bytesUsed of the single
+ // buffer should be sum of each color planes' size.
+ size_t bytesUsed = 0;
+ if (planes.size() == 1) {
+ bytesUsed = media::VideoFrame::AllocationSize(format, mInputLayout->coded_size());
+ } else {
+ bytesUsed = ::base::checked_cast<size_t>(
+ media::VideoFrame::PlaneSize(format, i, mInputLayout->coded_size()).GetArea());
+ }
+
+ // TODO(crbug.com/901264): The way to pass an offset within a DMA-buf is not defined
+ // in V4L2 specification, so we abuse data_offset for now. Fix it when we have the
+ // right interface, including any necessary validation and potential alignment.
+ buffer->SetPlaneDataOffset(i, planes[i].mOffset);
+ bytesUsed += planes[i].mOffset;
+ // Workaround: filling length should not be needed. This is a bug of videobuf2 library.
+ buffer->SetPlaneSize(i, mInputLayout->planes()[i].size + planes[i].mOffset);
+ buffer->SetPlaneBytesUsed(i, bytesUsed);
+ }
+
+ std::move(*buffer).QueueDMABuf(frame->getFDs());
+
+ ALOGV("Queued buffer in input queue (index: %" PRId64 ", timestamp: %" PRId64
+ ", bufferId: %zu)",
+ index, timestamp, bufferId);
+
+ mInputBuffersMap[bufferId] = {index, std::move(frame)};
+
+ return true;
+}
+
+bool V4L2EncodeComponent::enqueueOutputBuffer() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mOutputQueue->FreeBuffersCount() > 0);
+
+ auto buffer = mOutputQueue->GetFreeBuffer();
+ if (!buffer) {
+ ALOGE("Failed to get free buffer from device output queue");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+
+ std::shared_ptr<C2LinearBlock> outputBlock = fetchOutputBlock();
+ if (!outputBlock) {
+ ALOGE("Failed to fetch output block");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+
+ size_t bufferId = buffer->BufferId();
+
+ std::vector<int> fds;
+ fds.push_back(outputBlock->handle()->data[0]);
+ if (!std::move(*buffer).QueueDMABuf(fds)) {
+ ALOGE("Failed to queue output buffer using QueueDMABuf");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+
+ ALOG_ASSERT(!mOutputBuffersMap[bufferId]);
+ mOutputBuffersMap[bufferId] = std::move(outputBlock);
+ ALOGV("%s(): Queued buffer in output queue (bufferId: %zu)", __func__, bufferId);
+ return true;
+}
+
+bool V4L2EncodeComponent::dequeueInputBuffer() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mEncoderState != EncoderState::UNINITIALIZED);
+ ALOG_ASSERT(mInputQueue->QueuedBuffersCount() > 0);
+
+ std::pair<bool, media::V4L2ReadableBufferRef> result = mInputQueue->DequeueBuffer();
+ if (!result.first) {
+ ALOGE("Failed to dequeue buffer from input queue");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+ if (!result.second) {
+ // No more buffers ready to be dequeued in input queue.
+ return false;
+ }
+
+ const media::V4L2ReadableBufferRef buffer = std::move(result.second);
+ uint64_t index = mInputBuffersMap[buffer->BufferId()].first;
+ int64_t timestamp = buffer->GetTimeStamp().tv_usec +
+ buffer->GetTimeStamp().tv_sec * ::base::Time::kMicrosecondsPerSecond;
+ ALOGV("Dequeued buffer from input queue (index: %" PRId64 ", timestamp: %" PRId64
+ ", bufferId: %zu)",
+ index, timestamp, buffer->BufferId());
+
+ mInputBuffersMap[buffer->BufferId()].second = nullptr;
+ onInputBufferDone(index);
+
+ return true;
+}
+
+bool V4L2EncodeComponent::dequeueOutputBuffer() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(mEncoderState != EncoderState::UNINITIALIZED);
+ ALOG_ASSERT(mOutputQueue->QueuedBuffersCount() > 0);
+
+ std::pair<bool, media::V4L2ReadableBufferRef> result = mOutputQueue->DequeueBuffer();
+ if (!result.first) {
+ ALOGE("Failed to dequeue buffer from output queue");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+ if (!result.second) {
+ // No more buffers ready to be dequeued in output queue.
+ return false;
+ }
+
+ media::V4L2ReadableBufferRef buffer = std::move(result.second);
+ size_t encodedDataSize = buffer->GetPlaneBytesUsed(0) - buffer->GetPlaneDataOffset(0);
+ ::base::TimeDelta timestamp = ::base::TimeDelta::FromMicroseconds(
+ buffer->GetTimeStamp().tv_usec +
+ buffer->GetTimeStamp().tv_sec * ::base::Time::kMicrosecondsPerSecond);
+
+ ALOGV("Dequeued buffer from output queue (timestamp: %" PRId64
+ ", bufferId: %zu, data size: %zu, EOS: %d)",
+ timestamp.InMicroseconds(), buffer->BufferId(), encodedDataSize, buffer->IsLast());
+
+ if (!mOutputBuffersMap[buffer->BufferId()]) {
+ ALOGE("Failed to find output block associated with output buffer");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+
+ std::shared_ptr<C2LinearBlock> block = std::move(mOutputBuffersMap[buffer->BufferId()]);
+ if (encodedDataSize > 0) {
+ onOutputBufferDone(encodedDataSize, buffer->IsKeyframe(), timestamp.InMicroseconds(),
+ std::move(block));
+ }
+
+ // If the buffer is marked as last and we were flushing the encoder, flushing is now done.
+ if ((mEncoderState == EncoderState::DRAINING) && buffer->IsLast()) {
+ onDrainDone(true);
+
+ // Start the encoder again.
+ struct v4l2_encoder_cmd cmd;
+ memset(&cmd, 0, sizeof(v4l2_encoder_cmd));
+ cmd.cmd = V4L2_ENC_CMD_START;
+ if (mDevice->Ioctl(VIDIOC_ENCODER_CMD, &cmd) != 0) {
+ ALOGE("Failed to restart encoder after flushing (V4L2_ENC_CMD_START)");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+ }
+
+ // Queue a new output buffer to replace the one we dequeued.
+ buffer = nullptr;
+ enqueueOutputBuffer();
+
+ return true;
+}
+
+bool V4L2EncodeComponent::createInputBuffers() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(!mInputQueue->IsStreaming());
+ ALOG_ASSERT(mInputBuffersMap.empty());
+
+ // No memory is allocated here, we just generate a list of buffers on the input queue, which
+ // will hold memory handles to the real buffers.
+ if (mInputQueue->AllocateBuffers(kInputBufferCount, V4L2_MEMORY_DMABUF) < kInputBufferCount) {
+ ALOGE("Failed to create V4L2 input buffers.");
+ return false;
+ }
+
+ mInputBuffersMap.resize(mInputQueue->AllocatedBuffersCount());
+ return true;
+}
+
+bool V4L2EncodeComponent::createOutputBuffers() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(!mOutputQueue->IsStreaming());
+ ALOG_ASSERT(mOutputBuffersMap.empty());
+
+ // Fetch the output block pool.
+ C2BlockPool::local_id_t poolId = mInterface->getBlockPoolId();
+ c2_status_t status = GetCodec2BlockPool(poolId, shared_from_this(), &mOutputBlockPool);
+ if (status != C2_OK || !mOutputBlockPool) {
+ ALOGE("Failed to get output block pool, error: %d", status);
+ return false;
+ }
+
+ // No memory is allocated here, we just generate a list of buffers on the output queue, which
+ // will hold memory handles to the real buffers.
+ if (mOutputQueue->AllocateBuffers(kOutputBufferCount, V4L2_MEMORY_DMABUF) <
+ kOutputBufferCount) {
+ ALOGE("Failed to create V4L2 output buffers.");
+ return false;
+ }
+
+ mOutputBuffersMap.resize(mOutputQueue->AllocatedBuffersCount());
+ return true;
+}
+
+void V4L2EncodeComponent::destroyInputBuffers() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(!mInputQueue->IsStreaming());
+
+ if (!mInputQueue || mInputQueue->AllocatedBuffersCount() == 0) return;
+ mInputQueue->DeallocateBuffers();
+ mInputBuffersMap.clear();
+}
+
+void V4L2EncodeComponent::destroyOutputBuffers() {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(!mOutputQueue->IsStreaming());
+
+ if (!mOutputQueue || mOutputQueue->AllocatedBuffersCount() == 0) return;
+ mOutputQueue->DeallocateBuffers();
+ mOutputBuffersMap.clear();
+ mOutputBlockPool.reset();
+}
+
+void V4L2EncodeComponent::reportError(c2_status_t error) {
+ ALOGV("%s()", __func__);
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ {
+ std::lock_guard<std::mutex> lock(mComponentLock);
+ setComponentState(ComponentState::ERROR);
+ }
+
+ // TODO(dstaessens): Report all pending work items as finished upon failure.
+ if (mEncoderState != EncoderState::ERROR) {
+ setEncoderState(EncoderState::ERROR);
+ mListener->onError_nb(shared_from_this(), static_cast<uint32_t>(error));
+ }
+}
+
+void V4L2EncodeComponent::setComponentState(ComponentState state) {
+ // Check whether the state change is valid.
+ switch (state) {
+ case ComponentState::UNLOADED:
+ ALOG_ASSERT(mComponentState == ComponentState::LOADED);
+ break;
+ case ComponentState::LOADED:
+ ALOG_ASSERT(mComponentState == ComponentState::UNLOADED ||
+ mComponentState == ComponentState::RUNNING ||
+ mComponentState == ComponentState::ERROR);
+ break;
+ case ComponentState::RUNNING:
+ ALOG_ASSERT(mComponentState == ComponentState::LOADED);
+ break;
+ case ComponentState::ERROR:
+ break;
+ }
+
+ ALOGV("Changed component state from %s to %s", componentStateToString(mComponentState),
+ componentStateToString(state));
+ mComponentState = state;
+}
+
+void V4L2EncodeComponent::setEncoderState(EncoderState state) {
+ ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
+
+ // Check whether the state change is valid.
+ switch (state) {
+ case EncoderState::UNINITIALIZED:
+ // TODO(dstaessens): Check all valid state changes.
+ break;
+ case EncoderState::WAITING_FOR_INPUT:
+ ALOG_ASSERT(mEncoderState == EncoderState::UNINITIALIZED ||
+ mEncoderState == EncoderState::ENCODING ||
+ mEncoderState == EncoderState::DRAINING);
+ break;
+ case EncoderState::WAITING_FOR_INPUT_BUFFERS:
+ ALOG_ASSERT(mEncoderState == EncoderState::ENCODING);
+ break;
+ case EncoderState::ENCODING:
+ ALOG_ASSERT(mEncoderState == EncoderState::WAITING_FOR_INPUT ||
+ mEncoderState == EncoderState::WAITING_FOR_INPUT_BUFFERS ||
+ mEncoderState == EncoderState::DRAINING);
+ break;
+ case EncoderState::DRAINING:
+ ALOG_ASSERT(mEncoderState == EncoderState::ENCODING);
+ break;
+ case EncoderState::ERROR:
+ break;
+ }
+
+ ALOGV("Changed encoder state from %s to %s", encoderStateToString(mEncoderState),
+ encoderStateToString(state));
+ mEncoderState = state;
+}
+
+const char* V4L2EncodeComponent::componentStateToString(V4L2EncodeComponent::ComponentState state) {
+ switch (state) {
+ case ComponentState::UNLOADED:
+ return "UNLOADED";
+ case ComponentState::LOADED:
+ return "LOADED";
+ case ComponentState::RUNNING:
+ return "RUNNING";
+ case ComponentState::ERROR:
+ return "ERROR";
+ }
+}
+
+const char* V4L2EncodeComponent::encoderStateToString(V4L2EncodeComponent::EncoderState state) {
+ switch (state) {
+ case EncoderState::UNINITIALIZED:
+ return "UNINITIALIZED";
+ case EncoderState::WAITING_FOR_INPUT:
+ return "WAITING_FOR_INPUT";
+ case EncoderState::WAITING_FOR_INPUT_BUFFERS:
+ return "WAITING_FOR_INPUT_BUFFERS";
+ case EncoderState::ENCODING:
+ return "ENCODING";
+ case EncoderState::DRAINING:
+ return "Draining";
+ case EncoderState::ERROR:
+ return "ERROR";
+ }
+}
+
+} // namespace android
diff --git a/C2EncoderInterface.cpp b/components/V4L2EncodeInterface.cpp
similarity index 84%
rename from C2EncoderInterface.cpp
rename to components/V4L2EncodeInterface.cpp
index 0f19a65..9e6b556 100644
--- a/C2EncoderInterface.cpp
+++ b/components/V4L2EncodeInterface.cpp
@@ -3,16 +3,24 @@
// found in the LICENSE file.
//#define LOG_NDEBUG 0
-#define LOG_TAG "C2EncoderInterface"
+#define LOG_TAG "V4L2EncodeInterface"
-#include <C2EncoderInterface.h>
+#include <v4l2_codec2/components/V4L2EncodeInterface.h>
+
+#include <inttypes.h>
+
#include <C2PlatformSupport.h>
#include <SimpleC2Interface.h>
-#include <accel/video_codecs.h>
-#include <inttypes.h>
+#include <android/hardware/graphics/common/1.0/types.h>
#include <media/stagefright/MediaDefs.h>
#include <utils/Log.h>
+#include <v4l2_device.h>
+#include <v4l2_codec2/common/V4L2ComponentCommon.h>
+#include <video_codecs.h>
+
+using android::hardware::graphics::common::V1_0::BufferUsage;
+
namespace android {
namespace {
@@ -66,14 +74,22 @@
}
}
+std::optional<media::VideoCodec> getCodecFromComponentName(const std::string& name) {
+ if (name == V4L2ComponentName::kH264Encoder)
+ return media::VideoCodec::kCodecH264;
+
+ ALOGE("Unknown name: %s", name.c_str());
+ return std::nullopt;
+}
+
} // namespace
// static
-C2R C2EncoderInterface::ProfileLevelSetter(bool mayBlock,
- C2P<C2StreamProfileLevelInfo::output>& info,
- const C2P<C2StreamPictureSizeInfo::input>& videoSize,
- const C2P<C2StreamFrameRateInfo::output>& frameRate,
- const C2P<C2StreamBitrateInfo::output>& bitrate) {
+C2R V4L2EncodeInterface::ProfileLevelSetter(bool mayBlock,
+ C2P<C2StreamProfileLevelInfo::output>& info,
+ const C2P<C2StreamPictureSizeInfo::input>& videoSize,
+ const C2P<C2StreamFrameRateInfo::output>& frameRate,
+ const C2P<C2StreamBitrateInfo::output>& bitrate) {
(void)mayBlock;
static C2Config::level_t lowestConfigLevel = C2Config::LEVEL_UNUSED;
@@ -195,7 +211,7 @@
}
// static
-C2R C2EncoderInterface::SizeSetter(bool mayBlock, C2P<C2StreamPictureSizeInfo::input>& videoSize) {
+C2R V4L2EncodeInterface::SizeSetter(bool mayBlock, C2P<C2StreamPictureSizeInfo::input>& videoSize) {
(void)mayBlock;
// TODO: maybe apply block limit?
return videoSize.F(videoSize.v.width)
@@ -204,8 +220,8 @@
}
// static
-C2R C2EncoderInterface::IntraRefreshPeriodSetter(bool mayBlock,
- C2P<C2StreamIntraRefreshTuning::output>& period) {
+C2R V4L2EncodeInterface::IntraRefreshPeriodSetter(bool mayBlock,
+ C2P<C2StreamIntraRefreshTuning::output>& period) {
(void)mayBlock;
if (period.v.period < 1) {
period.set().mode = C2Config::INTRA_REFRESH_DISABLED;
@@ -217,28 +233,39 @@
return C2R::Ok();
}
-C2EncoderInterface::C2EncoderInterface(const std::shared_ptr<C2ReflectorHelper>& helper)
- : C2InterfaceHelper(helper) {
+V4L2EncodeInterface::V4L2EncodeInterface(
+ const C2String& name, std::shared_ptr<C2ReflectorHelper> helper)
+ : C2InterfaceHelper(std::move(helper)) {
+ ALOGV("%s(%s)", __func__, name.c_str());
+
setDerivedInstance(this);
+
+ Initialize(name);
}
-void C2EncoderInterface::Initialize(const C2String& name,
- const std::vector<VideoEncodeProfile>& supportedProfiles) {
+void V4L2EncodeInterface::Initialize(const C2String& name) {
+ scoped_refptr<media::V4L2Device> device = media::V4L2Device::Create();
+ if (!device) {
+ ALOGE("Failed to create V4L2 device");
+ mInitStatus = C2_CORRUPTED;
+ return;
+ }
+
// Use type=unsigned int here, otherwise it will cause compile error in
// C2F(mProfileLevel, profile).oneOf(profiles) since std::vector<C2Config::profile_t> cannot
// convert to std::vector<unsigned int>.
std::vector<unsigned int> profiles;
media::Size maxSize;
- for (const auto& supportedProfile : supportedProfiles) {
- C2Config::profile_t profile = videoCodecProfileToC2Profile(supportedProfile.mProfile);
+ for (const auto& supportedProfile : device->GetSupportedEncodeProfiles()) {
+ C2Config::profile_t profile = videoCodecProfileToC2Profile(supportedProfile.profile);
if (profile == C2Config::PROFILE_UNUSED) {
continue; // neglect unrecognizable profile
}
ALOGV("Queried c2_profile = 0x%x : max_size = %d x %d", profile,
- supportedProfile.mMaxResolution.width(), supportedProfile.mMaxResolution.height());
+ supportedProfile.max_resolution.width(), supportedProfile.max_resolution.height());
profiles.push_back(static_cast<unsigned int>(profile));
- maxSize.set_width(std::max(maxSize.width(), supportedProfile.mMaxResolution.width()));
- maxSize.set_height(std::max(maxSize.height(), supportedProfile.mMaxResolution.height()));
+ maxSize.set_width(std::max(maxSize.width(), supportedProfile.max_resolution.width()));
+ maxSize.set_height(std::max(maxSize.height(), supportedProfile.max_resolution.height()));
}
if (profiles.empty()) {
@@ -309,6 +336,18 @@
.withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::GRAPHIC))
.build());
+ // TODO(b/167640667) Add VIDEO_ENCODER flag once input convertor is not enabled by default.
+ // When using the format convertor (which is currently always enabled) it's not useful to add
+ // the VIDEO_ENCODER buffer flag for input buffers here. Currently zero-copy is not supported
+ // yet, so when using this flag an additional buffer will be allocated on host side and a copy
+ // will be performed between the guest and host buffer to keep them in sync. This is wasteful as
+ // the buffer is only used on guest side by the format convertor which converts and copies the
+ // buffer into another buffer.
+ //addParameter(DefineParam(mInputMemoryUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
+ // .withConstValue(new C2StreamUsageTuning::input(
+ // 0u, static_cast<uint64_t>(BufferUsage::VIDEO_ENCODER)))
+ // .build());
+
addParameter(
DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
.withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
@@ -369,9 +408,11 @@
C2F(mOutputBlockPoolIds, m.values).inRange(0, 1)})
.withSetter(Setter<C2PortBlockPoolsTuning::output>::NonStrictValuesWithNoDeps)
.build());
+
+ mInitStatus = C2_OK;
}
-uint32_t C2EncoderInterface::getKeyFramePeriod() const {
+uint32_t V4L2EncodeInterface::getKeyFramePeriod() const {
if (mKeyFramePeriodUs->value < 0 || mKeyFramePeriodUs->value == INT64_MAX) {
return 0;
}
diff --git a/components/VideoFrame.cpp b/components/VideoFrame.cpp
index bcdb283..cb5efb7 100644
--- a/components/VideoFrame.cpp
+++ b/components/VideoFrame.cpp
@@ -16,25 +16,21 @@
std::unique_ptr<VideoFrame> VideoFrame::Create(std::shared_ptr<C2GraphicBlock> block) {
if (!block) return nullptr;
- std::vector<::base::ScopedFD> fds;
+ std::vector<int> fds;
const C2Handle* const handle = block->handle();
for (int i = 0; i < handle->numFds; i++) {
- fds.emplace_back(dup(handle->data[i]));
- if (!fds.back().is_valid()) {
- ALOGE("Failed to dup(%d), errno=%d", handle->data[i], errno);
- return nullptr;
- }
+ fds.emplace_back(handle->data[i]);
}
return std::unique_ptr<VideoFrame>(new VideoFrame(std::move(block), std::move(fds)));
}
-VideoFrame::VideoFrame(std::shared_ptr<C2GraphicBlock> block, std::vector<::base::ScopedFD> fds)
- : mGraphicBlock(std::move(block)), mFds(std::move(fds)) {}
+VideoFrame::VideoFrame(std::shared_ptr<C2GraphicBlock> block, std::vector<int> fds)
+ : mGraphicBlock(std::move(block)), mFds(fds) {}
VideoFrame::~VideoFrame() = default;
-const std::vector<::base::ScopedFD>& VideoFrame::getFDs() const {
+const std::vector<int>& VideoFrame::getFDs() const {
return mFds;
}
diff --git a/components/VideoFramePool.cpp b/components/VideoFramePool.cpp
index 33e01e9..b6bbfab 100644
--- a/components/VideoFramePool.cpp
+++ b/components/VideoFramePool.cpp
@@ -16,21 +16,68 @@
#include <base/time/time.h>
#include <log/log.h>
-#include <v4l2_codec2/components/VideoTypes.h>
+#include <v4l2_codec2/common/VideoTypes.h>
+#include <v4l2_codec2/plugin_store/C2VdaBqBlockPool.h>
+#include <v4l2_codec2/plugin_store/C2VdaPooledBlockPool.h>
+#include <v4l2_codec2/plugin_store/V4L2AllocatorId.h>
using android::hardware::graphics::common::V1_0::BufferUsage;
namespace android {
-namespace {
-// The number of times and timeout used between subsequent calls when fetching graphic blocks.
-constexpr size_t kAllocateBufferMaxRetries = 32;
-constexpr size_t kFetchRetryDelayUs = 1000;
-} // namespace
+
+// static
+std::optional<uint32_t> VideoFramePool::getBufferIdFromGraphicBlock(const C2BlockPool& blockPool,
+ const C2Block2D& block) {
+ ALOGV("%s() blockPool.getAllocatorId() = %u", __func__, blockPool.getAllocatorId());
+
+ if (blockPool.getAllocatorId() == android::V4L2AllocatorId::V4L2_BUFFERPOOL) {
+ return C2VdaPooledBlockPool::getBufferIdFromGraphicBlock(block);
+ } else if (blockPool.getAllocatorId() == C2PlatformAllocatorStore::BUFFERQUEUE) {
+ return C2VdaBqBlockPool::getBufferIdFromGraphicBlock(block);
+ }
+
+ ALOGE("%s(): unknown allocator ID: %u", __func__, blockPool.getAllocatorId());
+ return std::nullopt;
+}
+
+// static
+c2_status_t VideoFramePool::requestNewBufferSet(C2BlockPool& blockPool, int32_t bufferCount) {
+ ALOGV("%s() blockPool.getAllocatorId() = %u", __func__, blockPool.getAllocatorId());
+
+ if (blockPool.getAllocatorId() == android::V4L2AllocatorId::V4L2_BUFFERPOOL) {
+ C2VdaPooledBlockPool* bpPool = static_cast<C2VdaPooledBlockPool*>(&blockPool);
+ return bpPool->requestNewBufferSet(bufferCount);
+ } else if (blockPool.getAllocatorId() == C2PlatformAllocatorStore::BUFFERQUEUE) {
+ C2VdaBqBlockPool* bqPool = static_cast<C2VdaBqBlockPool*>(&blockPool);
+ return bqPool->requestNewBufferSet(bufferCount);
+ }
+
+ ALOGE("%s(): unknown allocator ID: %u", __func__, blockPool.getAllocatorId());
+ return C2_BAD_VALUE;
+}
+
+// static
+bool VideoFramePool::setNotifyBlockAvailableCb(C2BlockPool& blockPool, ::base::OnceClosure cb) {
+ ALOGV("%s() blockPool.getAllocatorId() = %u", __func__, blockPool.getAllocatorId());
+
+ if (blockPool.getAllocatorId() == C2PlatformAllocatorStore::BUFFERQUEUE) {
+ C2VdaBqBlockPool* bqPool = static_cast<C2VdaBqBlockPool*>(&blockPool);
+ return bqPool->setNotifyBlockAvailableCb(std::move(cb));
+ }
+ return false;
+}
// static
std::unique_ptr<VideoFramePool> VideoFramePool::Create(
- std::shared_ptr<C2BlockPool> blockPool, const media::Size& size, HalPixelFormat pixelFormat,
- bool isSecure, scoped_refptr<::base::SequencedTaskRunner> taskRunner) {
+ std::shared_ptr<C2BlockPool> blockPool, const size_t numBuffers, const media::Size& size,
+ HalPixelFormat pixelFormat, bool isSecure,
+ scoped_refptr<::base::SequencedTaskRunner> taskRunner) {
+ ALOG_ASSERT(blockPool != nullptr);
+
+ if (requestNewBufferSet(*blockPool, numBuffers) != C2_OK) {
+ return nullptr;
+ }
+
std::unique_ptr<VideoFramePool> pool = ::base::WrapUnique(new VideoFramePool(
std::move(blockPool), size, pixelFormat, isSecure, std::move(taskRunner)));
if (!pool->initialize()) return nullptr;
@@ -85,71 +132,101 @@
mFetchWeakThisFactory.InvalidateWeakPtrs();
}
-void VideoFramePool::getVideoFrame(GetVideoFrameCB cb) {
+bool VideoFramePool::getVideoFrame(GetVideoFrameCB cb) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mClientTaskRunner->RunsTasksInCurrentSequence());
- ++mNumPendingRequests;
- mFetchTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(&VideoFramePool::getVideoFrameTask,
- mFetchWeakThis, std::move(cb)));
+ if (mOutputCb) {
+ return false;
+ }
+
+ mOutputCb = std::move(cb);
+ mFetchTaskRunner->PostTask(
+ FROM_HERE, ::base::BindOnce(&VideoFramePool::getVideoFrameTask, mFetchWeakThis));
+ return true;
}
-bool VideoFramePool::hasPendingRequests() const {
+// static
+void VideoFramePool::getVideoFrameTaskThunk(
+ scoped_refptr<::base::SequencedTaskRunner> taskRunner,
+ std::optional<::base::WeakPtr<VideoFramePool>> weakPool) {
ALOGV("%s()", __func__);
- ALOG_ASSERT(mClientTaskRunner->RunsTasksInCurrentSequence());
+ ALOG_ASSERT(weakPool);
- return mNumPendingRequests > 0;
+ taskRunner->PostTask(FROM_HERE,
+ ::base::BindOnce(&VideoFramePool::getVideoFrameTask, *weakPool));
}
-void VideoFramePool::getVideoFrameTask(GetVideoFrameCB cb) {
+void VideoFramePool::getVideoFrameTask() {
ALOGV("%s()", __func__);
ALOG_ASSERT(mFetchTaskRunner->RunsTasksInCurrentSequence());
- std::unique_ptr<VideoFrame> frame = nullptr;
+ // Variables used to exponential backoff retry when buffer fetching times out.
+ constexpr size_t kFetchRetryDelayInit = 64; // Initial delay: 64us
+ constexpr size_t kFetchRetryDelayMax = 16384; // Max delay: 16ms (1 frame at 60fps)
+ static size_t sNumRetries = 0;
+ static size_t sDelay = kFetchRetryDelayInit;
- size_t numRetries = 0;
- while (numRetries < kAllocateBufferMaxRetries) {
- std::shared_ptr<C2GraphicBlock> block;
- c2_status_t err = mBlockPool->fetchGraphicBlock(mSize.width(), mSize.height(),
- static_cast<uint32_t>(mPixelFormat),
- mMemoryUsage, &block);
-
- if (err == C2_OK) {
- frame = VideoFrame::Create(std::move(block));
- break;
- } else if (err != C2_TIMED_OUT && err != C2_BLOCKING) {
- ALOGE("Failed to fetch block, err=%d, retry %zu times", err, numRetries);
- break;
+ std::shared_ptr<C2GraphicBlock> block;
+ c2_status_t err = mBlockPool->fetchGraphicBlock(mSize.width(), mSize.height(),
+ static_cast<uint32_t>(mPixelFormat),
+ mMemoryUsage, &block);
+ if (err == C2_TIMED_OUT || err == C2_BLOCKING) {
+ if (setNotifyBlockAvailableCb(*mBlockPool,
+ ::base::BindOnce(&VideoFramePool::getVideoFrameTaskThunk,
+ mFetchTaskRunner, mFetchWeakThis))) {
+ ALOGV("%s(): fetchGraphicBlock() timeout, waiting for block available.", __func__);
} else {
- ++numRetries;
- ALOGD("fetchGraphicBlock() timeout. retry %zu times", numRetries);
- usleep(kFetchRetryDelayUs);
+ ALOGV("%s(): fetchGraphicBlock() timeout, waiting %zuus (%zu retry)", __func__, sDelay,
+ sNumRetries + 1);
+ mFetchTaskRunner->PostDelayedTask(
+ FROM_HERE, ::base::BindOnce(&VideoFramePool::getVideoFrameTask, mFetchWeakThis),
+ ::base::TimeDelta::FromMicroseconds(sDelay));
+
+ sDelay = std::min(sDelay * 2, kFetchRetryDelayMax); // Exponential backoff
+ sNumRetries++;
}
+
+ return;
}
- if (numRetries == kAllocateBufferMaxRetries) {
- ALOGE("Timeout to fetch block, retry %zu times", numRetries);
+
+ // Reset to the default value.
+ sNumRetries = 0;
+ sDelay = kFetchRetryDelayInit;
+
+ std::optional<FrameWithBlockId> frameWithBlockId;
+ if (err == C2_OK) {
+ ALOG_ASSERT(block != nullptr);
+ std::optional<uint32_t> bufferId = getBufferIdFromGraphicBlock(*mBlockPool, *block);
+ std::unique_ptr<VideoFrame> frame = VideoFrame::Create(std::move(block));
+ // Only pass the frame + id pair if both have successfully been obtained.
+ // Otherwise exit the loop so a nullopt is passed to the client.
+ if (bufferId && frame) {
+ frameWithBlockId = std::make_pair(std::move(frame), *bufferId);
+ } else {
+ ALOGE("%s(): Failed to generate VideoFrame or get the buffer id.", __func__);
+ }
+ } else {
+ ALOGE("%s(): Failed to fetch block, err=%d", __func__, err);
}
mClientTaskRunner->PostTask(
FROM_HERE, ::base::BindOnce(&VideoFramePool::onVideoFrameReady, mClientWeakThis,
- std::move(cb), std::move(frame)));
+ std::move(frameWithBlockId)));
}
-void VideoFramePool::onVideoFrameReady(GetVideoFrameCB cb, std::unique_ptr<VideoFrame> frame) {
+void VideoFramePool::onVideoFrameReady(std::optional<FrameWithBlockId> frameWithBlockId) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mClientTaskRunner->RunsTasksInCurrentSequence());
- --mNumPendingRequests;
-
- if (!frame) {
+ if (!frameWithBlockId) {
ALOGE("Failed to get GraphicBlock, abandoning all pending requests.");
mClientWeakThisFactory.InvalidateWeakPtrs();
mClientWeakThis = mClientWeakThisFactory.GetWeakPtr();
-
- mNumPendingRequests = 0;
}
- std::move(cb).Run(std::move(frame));
+ ALOG_ASSERT(mOutputCb);
+ std::move(mOutputCb).Run(std::move(frameWithBlockId));
}
} // namespace android
diff --git a/components/include/v4l2_codec2/components/BitstreamBuffer.h b/components/include/v4l2_codec2/components/BitstreamBuffer.h
new file mode 100644
index 0000000..ec8a917
--- /dev/null
+++ b/components/include/v4l2_codec2/components/BitstreamBuffer.h
@@ -0,0 +1,30 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_BITSTREAMBUFFER_H
+#define ANDROID_V4L2_CODEC2_COMPONENTS_BITSTREAMBUFFER_H
+
+#include <stdint.h>
+
+#include <base/files/scoped_file.h>
+
+namespace android {
+
+// The BitstreamBuffer class can be used to store encoded video data.
+// Note: The BitstreamBuffer does not take ownership of the data. The file descriptor is not
+// duplicated and the caller is responsible for keeping the data alive.
+struct BitstreamBuffer {
+ BitstreamBuffer(const int32_t id, int dmabuf_fd, const size_t offset, const size_t size)
+ : id(id), dmabuf_fd(dmabuf_fd), offset(offset), size(size) {}
+ ~BitstreamBuffer() = default;
+
+ const int32_t id;
+ int dmabuf_fd;
+ const size_t offset;
+ const size_t size;
+};
+
+} // namespace android
+
+#endif // ANDROID_V4L2_CODEC2_COMPONENTS_BITSTREAMBUFFER_H
diff --git a/components/include/v4l2_codec2/components/V4L2DecodeInterface.h b/components/include/v4l2_codec2/components/V4L2DecodeInterface.h
index 46c565e..b57f6c1 100644
--- a/components/include/v4l2_codec2/components/V4L2DecodeInterface.h
+++ b/components/include/v4l2_codec2/components/V4L2DecodeInterface.h
@@ -12,7 +12,7 @@
#include <util/C2InterfaceHelper.h>
#include <size.h>
-#include <v4l2_codec2/components/VideoTypes.h>
+#include <v4l2_codec2/common/VideoTypes.h>
namespace android {
diff --git a/components/include/v4l2_codec2/components/V4L2Decoder.h b/components/include/v4l2_codec2/components/V4L2Decoder.h
index 9cf0532..bdddc7f 100644
--- a/components/include/v4l2_codec2/components/V4L2Decoder.h
+++ b/components/include/v4l2_codec2/components/V4L2Decoder.h
@@ -11,15 +11,14 @@
#include <optional>
#include <base/callback.h>
-#include <base/files/scoped_file.h>
#include <base/memory/weak_ptr.h>
#include <rect.h>
#include <size.h>
+#include <v4l2_codec2/common/VideoTypes.h>
#include <v4l2_codec2/components/VideoDecoder.h>
#include <v4l2_codec2/components/VideoFrame.h>
#include <v4l2_codec2/components/VideoFramePool.h>
-#include <v4l2_codec2/components/VideoTypes.h>
#include <v4l2_device.h>
namespace android {
@@ -63,13 +62,11 @@
void pumpDecodeRequest();
void serviceDeviceTask(bool event);
- void sendOutputBuffer(media::V4L2ReadableBufferRef buffer);
bool dequeueResolutionChangeEvent();
bool changeResolution();
void tryFetchVideoFrame();
- void onVideoFrameReady(media::V4L2WritableBufferRef outputBuffer,
- std::unique_ptr<VideoFrame> block);
+ void onVideoFrameReady(std::optional<VideoFramePool::FrameWithBlockId> frameWithBlockId);
std::optional<size_t> getNumOutputBuffers();
std::optional<struct v4l2_format> getFormatInfo();
@@ -98,6 +95,11 @@
std::map<size_t, std::unique_ptr<VideoFrame>> mFrameAtDevice;
+ // Block IDs can be arbitrarily large, but we only have a limited number of
+ // buffers. This maintains an association between a block ID and a specific
+ // V4L2 buffer index.
+ std::map<size_t, size_t> mBlockIdToV4L2Id;
+
State mState = State::Idle;
scoped_refptr<::base::SequencedTaskRunner> mTaskRunner;
diff --git a/components/include/v4l2_codec2/components/V4L2EncodeComponent.h b/components/include/v4l2_codec2/components/V4L2EncodeComponent.h
new file mode 100644
index 0000000..4a61e05
--- /dev/null
+++ b/components/include/v4l2_codec2/components/V4L2EncodeComponent.h
@@ -0,0 +1,280 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_ENCODE_COMPONENT_H
+#define ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_ENCODE_COMPONENT_H
+
+#include <atomic>
+#include <memory>
+#include <optional>
+
+#include <C2Component.h>
+#include <C2ComponentFactory.h>
+#include <C2Config.h>
+#include <C2Enum.h>
+#include <C2Param.h>
+#include <C2ParamDef.h>
+#include <SimpleC2Interface.h>
+#include <base/memory/scoped_refptr.h>
+#include <base/single_thread_task_runner.h>
+#include <base/synchronization/waitable_event.h>
+#include <base/threading/thread.h>
+#include <util/C2InterfaceHelper.h>
+
+#include <size.h>
+#include <v4l2_codec2/common/FormatConverter.h>
+#include <v4l2_codec2/components/V4L2EncodeInterface.h>
+#include <video_frame_layout.h>
+
+namespace media {
+class V4L2Device;
+class V4L2ReadableBuffer;
+class V4L2Queue;
+} // namespace media
+
+namespace android {
+
+struct VideoFramePlane;
+
+class V4L2EncodeComponent : public C2Component,
+ public std::enable_shared_from_this<V4L2EncodeComponent> {
+public:
+ // Create a new instance of the V4L2EncodeComponent.
+ static std::shared_ptr<C2Component> create(C2String name, c2_node_id_t id,
+ std::shared_ptr<C2ReflectorHelper> helper,
+ C2ComponentFactory::ComponentDeleter deleter);
+
+ virtual ~V4L2EncodeComponent() override;
+
+ // Implementation of the C2Component interface.
+ c2_status_t start() override;
+ c2_status_t stop() override;
+ c2_status_t reset() override;
+ c2_status_t release() override;
+ c2_status_t queue_nb(std::list<std::unique_ptr<C2Work>>* const items) override;
+ c2_status_t drain_nb(drain_mode_t mode) override;
+ c2_status_t flush_sm(flush_mode_t mode,
+ std::list<std::unique_ptr<C2Work>>* const flushedWork) override;
+ c2_status_t announce_nb(const std::vector<C2WorkOutline>& items) override;
+ c2_status_t setListener_vb(const std::shared_ptr<Listener>& listener,
+ c2_blocking_t mayBlock) override;
+ std::shared_ptr<C2ComponentInterface> intf() override;
+
+private:
+ class InputFrame {
+ public:
+ // Create an input frame from a C2GraphicBlock.
+ static std::unique_ptr<InputFrame> Create(const C2ConstGraphicBlock& block);
+ ~InputFrame() = default;
+
+ const std::vector<int>& getFDs() const { return mFds; }
+
+ private:
+ InputFrame(std::vector<int> fds) : mFds(std::move(fds)) {}
+ const std::vector<int> mFds;
+ };
+
+ // Possible component states.
+ enum class ComponentState {
+ UNLOADED, // Initial state of component.
+ LOADED, // The component is stopped, ready to start running.
+ RUNNING, // The component is currently running.
+ ERROR, // An error occurred.
+ };
+
+ // Possible encoder states.
+ enum class EncoderState {
+ UNINITIALIZED, // Not initialized yet or initialization failed.
+ WAITING_FOR_INPUT, // Waiting for work to be queued.
+ WAITING_FOR_INPUT_BUFFERS, // Waiting for V4L2 input queue buffers.
+ ENCODING, // Queuing input buffers.
+ DRAINING, // Flushing encoder.
+ ERROR, // encoder encountered an error.
+ };
+
+ V4L2EncodeComponent(C2String name, c2_node_id_t id,
+ std::shared_ptr<V4L2EncodeInterface> interface);
+
+ V4L2EncodeComponent(const V4L2EncodeComponent&) = delete;
+ V4L2EncodeComponent& operator=(const V4L2EncodeComponent&) = delete;
+
+ // Initialize the encoder on the encoder thread.
+ void startTask(bool* success, ::base::WaitableEvent* done);
+ // Destroy the encoder on the encoder thread.
+ void stopTask(::base::WaitableEvent* done);
+ // Queue a new encode work item on the encoder thread.
+ void queueTask(std::unique_ptr<C2Work> work);
+ // Drain all currently scheduled work on the encoder thread. The encoder will process all
+ // scheduled work and mark the last item as EOS, before processing any new work.
+ void drainTask(drain_mode_t drainMode);
+ // Called on the encoder thread when a drain is completed.
+ void onDrainDone(bool done);
+ // Flush all currently scheduled work on the encoder thread. The encoder will abort all
+ // scheduled work items, work that can be immediately aborted will be placed in |flushedWork|.
+ void flushTask(::base::WaitableEvent* done,
+ std::list<std::unique_ptr<C2Work>>* const flushedWork);
+ // Set the component listener on the encoder thread.
+ void setListenerTask(const std::shared_ptr<Listener>& listener, ::base::WaitableEvent* done);
+
+ // Initialize the V4L2 device for encoding with the requested configuration.
+ bool initializeEncoder();
+ // Configure input format on the V4L2 device.
+ bool configureInputFormat(media::VideoPixelFormat inputFormat);
+ // Configure output format on the V4L2 device.
+ bool configureOutputFormat(media::VideoCodecProfile outputProfile);
+ // Configure required and optional controls on the V4L2 device.
+ bool configureDevice(media::VideoCodecProfile outputProfile,
+ std::optional<const uint8_t> outputH264Level);
+ // Update the |mBitrate| and |mFramerate| currently configured on the V4L2 device, to match the
+ // values requested by the codec 2.0 framework.
+ bool updateEncodingParameters();
+
+ // Schedule the next encode operation on the V4L2 device.
+ void scheduleNextEncodeTask();
+ // Encode the specified |block| with corresponding |index| and |timestamp|.
+ bool encode(C2ConstGraphicBlock block, uint64_t index, int64_t timestamp);
+ // Drain the encoder.
+ void drain();
+ // Flush the encoder.
+ void flush();
+
+ // Fetch a new output buffer from the output block pool.
+ std::shared_ptr<C2LinearBlock> fetchOutputBlock();
+
+ // Called on the encoder thread when the encoder is done using an input buffer.
+ void onInputBufferDone(uint64_t index);
+ // Called on the encoder thread when an output buffer is ready.
+ void onOutputBufferDone(uint32_t payloadSize, bool keyFrame, int64_t timestamp,
+ std::shared_ptr<C2LinearBlock> outputBlock);
+
+ // Helper function to find a work item in the output work queue by index.
+ C2Work* getWorkByIndex(uint64_t index);
+ // Helper function to find a work item in the output work queue by timestamp.
+ C2Work* getWorkByTimestamp(int64_t timestamp);
+ // Helper function to determine if the specified |work| item is finished.
+ bool isWorkDone(const C2Work& work) const;
+ // Notify the listener the specified |work| item is finished.
+ void reportWork(std::unique_ptr<C2Work> work);
+
+ // Attempt to start the V4L2 device poller.
+ bool startDevicePoll();
+ // Attempt to stop the V4L2 device poller.
+ bool stopDevicePoll();
+ // Called by the V4L2 device poller on the |mEncoderTaskRunner| whenever an error occurred.
+ void onPollError();
+ // Service I/O on the V4L2 device, called by the V4L2 device poller on the |mEncoderTaskRunner|.
+ void serviceDeviceTask(bool event);
+
+ // Enqueue an input buffer to be encoded on the device input queue. Returns whether the
+ // operation was successful.
+ bool enqueueInputBuffer(std::unique_ptr<InputFrame> frame, media::VideoPixelFormat format,
+ const std::vector<VideoFramePlane>& planes, int64_t index,
+ int64_t timestamp);
+ // Enqueue an output buffer to store the encoded bitstream on the device output queue. Returns
+ // whether the operation was successful.
+ bool enqueueOutputBuffer();
+ // Dequeue an input buffer the V4L2 device has finished encoding on the device input queue.
+ // Returns whether a buffer could be dequeued.
+ bool dequeueInputBuffer();
+ // Dequeue an output buffer containing the encoded bitstream from the device output queue. The
+ // bitstream is copied into another buffer that is sent to the client, after which the output
+ // buffer is returned to the queue. Returns whether the operation was successful.
+ bool dequeueOutputBuffer();
+
+ // Create input buffers on the V4L2 device input queue.
+ bool createInputBuffers();
+ // Create output buffers on the V4L2 device output queue.
+ bool createOutputBuffers();
+ // Destroy the input buffers on the V4L2 device input queue.
+ void destroyInputBuffers();
+ // Destroy the output buffers on the V4L2 device output queue.
+ void destroyOutputBuffers();
+
+ // Notify the client an error occurred and switch to the error state.
+ void reportError(c2_status_t error);
+
+ // Change the state of the component.
+ void setComponentState(ComponentState state);
+ // Change the state of the encoder, only called on the encoder thread.
+ void setEncoderState(EncoderState state);
+ // Get the specified component |state| as string.
+ static const char* componentStateToString(ComponentState state);
+ // Get the specified encoder |state| as string.
+ static const char* encoderStateToString(EncoderState state);
+
+ // The component's registered name.
+ const C2String mName;
+ // The component's id, provided by the C2 framework upon initialization.
+ const c2_node_id_t mId = 0;
+ // The component's interface implementation.
+ const std::shared_ptr<V4L2EncodeInterface> mInterface;
+
+ // Mutex used by the component to synchronize start/stop/reset/release calls, as the codec 2.0
+ // API can be accessed from any thread.
+ std::mutex mComponentLock;
+
+ // The component's listener to be notified when events occur, only accessed on encoder thread.
+ std::shared_ptr<Listener> mListener;
+
+ // The V4L2 device used to interact with the driver, only accessed on encoder thread.
+ scoped_refptr<media::V4L2Device> mDevice;
+ scoped_refptr<media::V4L2Queue> mInputQueue;
+ scoped_refptr<media::V4L2Queue> mOutputQueue;
+
+ // The video stream's visible size.
+ media::Size mVisibleSize;
+ // The video stream's coded size.
+ media::Size mInputCodedSize;
+ // The input layout configured on the V4L2 device.
+ std::optional<media::VideoFrameLayout> mInputLayout;
+ // An input format convertor will be used if the device doesn't support the video's format.
+ std::unique_ptr<FormatConverter> mInputFormatConverter;
+ // Required output buffer byte size.
+ uint32_t mOutputBufferSize = 0;
+
+ // The bitrate currently configured on the v4l2 device.
+ uint32_t mBitrate = 0;
+ // The framerate currently configured on the v4l2 device.
+ uint32_t mFramerate = 0;
+
+ // How often we want to request the V4L2 device to create a key frame.
+ uint32_t mKeyFramePeriod = 0;
+ // Key frame counter, a key frame will be requested each time it reaches zero.
+ uint32_t mKeyFrameCounter = 0;
+
+ // Whether we extracted and submitted CSD (codec-specific data, e.g. H.264 SPS) to the framework.
+ bool mCSDSubmitted = false;
+
+ // The queue of encode work items to be processed.
+ std::queue<std::unique_ptr<C2Work>> mInputWorkQueue;
+ // The queue of encode work items currently being processed.
+ std::deque<std::unique_ptr<C2Work>> mOutputWorkQueue;
+
+ // List of work item indices and frames associated with each buffer in the device input queue.
+ std::vector<std::pair<int64_t, std::unique_ptr<InputFrame>>> mInputBuffersMap;
+
+ // Map of buffer indices and output blocks associated with each buffer in the output queue. This
+ // map keeps the C2LinearBlock buffers alive so we can avoid duplicated fds.
+ std::vector<std::shared_ptr<C2LinearBlock>> mOutputBuffersMap;
+ // The output block pool.
+ std::shared_ptr<C2BlockPool> mOutputBlockPool;
+
+ // The component state, accessible from any thread as C2Component interface is not thread-safe.
+ std::atomic<ComponentState> mComponentState;
+ // The current state of the encoder, only accessed on the encoder thread.
+ EncoderState mEncoderState = EncoderState::UNINITIALIZED;
+
+ // The encoder thread on which all interaction with the V4L2 device is performed.
+ ::base::Thread mEncoderThread{"V4L2EncodeComponentThread"};
+ // The task runner on the encoder thread.
+ scoped_refptr<::base::SequencedTaskRunner> mEncoderTaskRunner;
+
+ // The WeakPtrFactory used to get weak pointers of this.
+ ::base::WeakPtr<V4L2EncodeComponent> mWeakThis;
+ ::base::WeakPtrFactory<V4L2EncodeComponent> mWeakThisFactory{this};
+};
+
+} // namespace android
+
+#endif // ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_ENCODE_COMPONENT_H
diff --git a/include/C2EncoderInterface.h b/components/include/v4l2_codec2/components/V4L2EncodeInterface.h
similarity index 81%
rename from include/C2EncoderInterface.h
rename to components/include/v4l2_codec2/components/V4L2EncodeInterface.h
index b4f42a9..f480d25 100644
--- a/include/C2EncoderInterface.h
+++ b/components/include/v4l2_codec2/components/V4L2EncodeInterface.h
@@ -2,28 +2,34 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef ANDROID_C2_ENCODER_INTERFACE_H
-#define ANDROID_C2_ENCODER_INTERFACE_H
+#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_ENCODE_INTERFACE_H
+#define ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_ENCODE_INTERFACE_H
+#include <optional>
#include <vector>
-#include <base/optional.h>
#include <C2.h>
#include <C2Buffer.h>
#include <C2Config.h>
#include <util/C2InterfaceHelper.h>
#include <size.h>
-#include <video_codecs.h>
#include <v4l2_codec2/common/EncodeHelpers.h>
+#include <video_codecs.h>
+
+namespace media {
+class V4L2Device;
+};
namespace android {
-class C2EncoderInterface : public C2InterfaceHelper {
+// Codec 2.0 interface describing the V4L2EncodeComponent. This interface is used by the codec 2.0
+// framework to query the component's capabilities and request configuration changes.
+class V4L2EncodeInterface : public C2InterfaceHelper {
public:
- C2EncoderInterface(const std::shared_ptr<C2ReflectorHelper>& helper);
+ V4L2EncodeInterface(const C2String& name, std::shared_ptr<C2ReflectorHelper> helper);
- // Interfaces for the C2EncoderInterface
+ // Interfaces for the V4L2EncodeInterface
// Note: these getters are not thread-safe. For dynamic parameters, component should use
// formal query API for C2ComponentInterface instead.
c2_status_t status() const { return mInitStatus; }
@@ -37,10 +43,7 @@
uint32_t getKeyFramePeriod() const;
protected:
- void Initialize(const C2String& name, const std::vector<VideoEncodeProfile>& supportedProfiles);
-
- virtual base::Optional<media::VideoCodec> getCodecFromComponentName(
- const std::string& name) const = 0;
+ void Initialize(const C2String& name);
// Configurable parameter setters.
static C2R ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::output>& info,
@@ -57,6 +60,8 @@
// The input format kind; should be C2FormatVideo.
std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ // The memory usage flag of input buffer; should be BufferUsage::VIDEO_ENCODER.
+ std::shared_ptr<C2StreamUsageTuning::input> mInputMemoryUsage;
// The output format kind; should be C2FormatCompressed.
std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
// The MIME type of input port; should be MEDIA_MIMETYPE_VIDEO_RAW.
@@ -98,4 +103,4 @@
} // namespace android
-#endif // ANDROID_C2_ENCODER_INTERFACE_H
+#endif // ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_ENCODE_INTERFACE_H
diff --git a/components/include/v4l2_codec2/components/VideoDecoder.h b/components/include/v4l2_codec2/components/VideoDecoder.h
index 41517b3..c737c65 100644
--- a/components/include/v4l2_codec2/components/VideoDecoder.h
+++ b/components/include/v4l2_codec2/components/VideoDecoder.h
@@ -9,11 +9,11 @@
#include <memory>
#include <base/callback.h>
-#include <base/files/scoped_file.h>
+#include <v4l2_codec2/common/VideoTypes.h>
+#include <v4l2_codec2/components/BitstreamBuffer.h>
#include <v4l2_codec2/components/VideoFrame.h>
#include <v4l2_codec2/components/VideoFramePool.h>
-#include <v4l2_codec2/components/VideoTypes.h>
namespace android {
@@ -26,18 +26,6 @@
};
static const char* DecodeStatusToString(DecodeStatus status);
- struct BitstreamBuffer {
- BitstreamBuffer(const int32_t id, base::ScopedFD dmabuf_fd, const size_t offset,
- const size_t size)
- : id(id), dmabuf_fd(std::move(dmabuf_fd)), offset(offset), size(size) {}
- ~BitstreamBuffer() = default;
-
- const int32_t id;
- base::ScopedFD dmabuf_fd;
- const size_t offset;
- const size_t size;
- };
-
using GetPoolCB =
base::RepeatingCallback<void(std::unique_ptr<VideoFramePool>*, const media::Size& size,
HalPixelFormat pixelFormat, size_t numOutputBuffers)>;
diff --git a/components/include/v4l2_codec2/components/VideoFrame.h b/components/include/v4l2_codec2/components/VideoFrame.h
index f666f4d..395a52b 100644
--- a/components/include/v4l2_codec2/components/VideoFrame.h
+++ b/components/include/v4l2_codec2/components/VideoFrame.h
@@ -9,7 +9,6 @@
#include <vector>
#include <C2Buffer.h>
-#include <base/files/scoped_file.h>
#include <rect.h>
@@ -23,7 +22,7 @@
~VideoFrame();
// Return the file descriptors of the corresponding buffer.
- const std::vector<::base::ScopedFD>& getFDs() const;
+ const std::vector<int>& getFDs() const;
// Getter and setter of the visible rectangle.
void setVisibleRect(const media::Rect& visibleRect);
@@ -37,10 +36,10 @@
C2ConstGraphicBlock getGraphicBlock();
private:
- VideoFrame(std::shared_ptr<C2GraphicBlock> block, std::vector<::base::ScopedFD> fds);
+ VideoFrame(std::shared_ptr<C2GraphicBlock> block, std::vector<int> fds);
std::shared_ptr<C2GraphicBlock> mGraphicBlock;
- std::vector<::base::ScopedFD> mFds;
+ std::vector<int> mFds;
media::Rect mVisibleRect;
int32_t mBitstreamId = -1;
};
diff --git a/components/include/v4l2_codec2/components/VideoFramePool.h b/components/include/v4l2_codec2/components/VideoFramePool.h
index 079e640..71bfe27 100644
--- a/components/include/v4l2_codec2/components/VideoFramePool.h
+++ b/components/include/v4l2_codec2/components/VideoFramePool.h
@@ -5,7 +5,9 @@
#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_VIDEO_FRAME_POOL_H
#define ANDROID_V4L2_CODEC2_COMPONENTS_VIDEO_FRAME_POOL_H
+#include <atomic>
#include <memory>
+#include <optional>
#include <queue>
#include <C2Buffer.h>
@@ -15,8 +17,8 @@
#include <base/threading/thread.h>
#include <size.h>
+#include <v4l2_codec2/common/VideoTypes.h>
#include <v4l2_codec2/components/VideoFrame.h>
-#include <v4l2_codec2/components/VideoTypes.h>
namespace android {
@@ -25,20 +27,20 @@
// C2BlockPool::fetchGraphicBlock() times out.
class VideoFramePool {
public:
- using GetVideoFrameCB = base::OnceCallback<void(std::unique_ptr<VideoFrame>)>;
+ using FrameWithBlockId = std::pair<std::unique_ptr<VideoFrame>, uint32_t>;
+ using GetVideoFrameCB = ::base::OnceCallback<void(std::optional<FrameWithBlockId>)>;
static std::unique_ptr<VideoFramePool> Create(
- std::shared_ptr<C2BlockPool> blockPool, const media::Size& size,
- HalPixelFormat pixelFormat, bool isSecure,
+ std::shared_ptr<C2BlockPool> blockPool, const size_t numBuffers,
+ const media::Size& size, HalPixelFormat pixelFormat, bool isSecure,
scoped_refptr<::base::SequencedTaskRunner> taskRunner);
~VideoFramePool();
// Get a VideoFrame instance, which will be passed via |cb|.
- // If any error occurs, then pass nullptr.
- void getVideoFrame(GetVideoFrameCB cb);
-
- // Return true if any callback of getting VideoFrame instance is pending.
- bool hasPendingRequests() const;
+ // If any error occurs, then nullptr will be passed via |cb|.
+ // Return false if the previous callback has not been called, and |cb| will
+ // be dropped directly.
+ bool getVideoFrame(GetVideoFrameCB cb);
private:
// |blockPool| is the C2BlockPool that we fetch graphic blocks from.
@@ -52,15 +54,30 @@
bool initialize();
void destroyTask();
- void getVideoFrameTask(GetVideoFrameCB cb);
- void onVideoFrameReady(GetVideoFrameCB cb, std::unique_ptr<VideoFrame> frame);
+ static void getVideoFrameTaskThunk(scoped_refptr<::base::SequencedTaskRunner> taskRunner,
+ std::optional<::base::WeakPtr<VideoFramePool>> weakPool);
+ void getVideoFrameTask();
+ void onVideoFrameReady(std::optional<FrameWithBlockId> frameWithBlockId);
+
+ // Extracts buffer ID from graphic block.
+ // |block| is the graphic block allocated by |blockPool|.
+ static std::optional<uint32_t> getBufferIdFromGraphicBlock(const C2BlockPool& blockPool,
+ const C2Block2D& block);
+
+ // Ask |blockPool| to allocate the specified number of buffers.
+ // |bufferCount| is the number of requested buffers.
+ static c2_status_t requestNewBufferSet(C2BlockPool& blockPool, int32_t bufferCount);
+
+ // Ask |blockPool| to notify when a block is available via |cb|.
+ // Return true if |blockPool| supports notifying buffer available.
+ static bool setNotifyBlockAvailableCb(C2BlockPool& blockPool, ::base::OnceClosure cb);
std::shared_ptr<C2BlockPool> mBlockPool;
const media::Size mSize;
const HalPixelFormat mPixelFormat;
const C2MemoryUsage mMemoryUsage;
- size_t mNumPendingRequests = 0;
+ GetVideoFrameCB mOutputCb;
scoped_refptr<::base::SequencedTaskRunner> mClientTaskRunner;
::base::Thread mFetchThread{"VideoFramePoolFetchThread"};
diff --git a/include/C2VDAAdaptor.h b/include/C2VDAAdaptor.h
deleted file mode 100644
index 659564c..0000000
--- a/include/C2VDAAdaptor.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef ANDROID_C2_VDA_ADAPTOR_H
-#define ANDROID_C2_VDA_ADAPTOR_H
-
-#include <VideoDecodeAcceleratorAdaptor.h>
-
-#include <video_decode_accelerator.h>
-
-#include <base/macros.h>
-
-namespace android {
-
-// This class translates adaptor API to media::VideoDecodeAccelerator API to make communication
-// between Codec 2.0 VDA component and VDA.
-class C2VDAAdaptor : public VideoDecodeAcceleratorAdaptor,
- public media::VideoDecodeAccelerator::Client {
-public:
- C2VDAAdaptor();
- ~C2VDAAdaptor() override;
-
- // Implementation of the VideoDecodeAcceleratorAdaptor interface.
- Result initialize(media::VideoCodecProfile profile, bool secureMode,
- VideoDecodeAcceleratorAdaptor::Client* client) override;
- void decode(int32_t bitstreamId, int handleFd, off_t offset, uint32_t bytesUsed) override;
- void assignPictureBuffers(uint32_t numOutputBuffers, const media::Size& size) override;
- void importBufferForPicture(int32_t pictureBufferId, HalPixelFormat format,
- std::vector<::base::ScopedFD> handleFds,
- const std::vector<VideoFramePlane>& planes) override;
- void reusePictureBuffer(int32_t pictureBufferId) override;
- void flush() override;
- void reset() override;
- void destroy() override;
-
- static media::VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles(
- InputCodec inputCodec);
-
- // Implementation of the media::VideoDecodeAccelerator::Client interface.
- void ProvidePictureBuffers(uint32_t requested_num_of_buffers,
- media::VideoPixelFormat output_format,
- const media::Size& dimensions) override;
- void DismissPictureBuffer(int32_t picture_buffer_id) override;
- void PictureReady(const media::Picture& picture) override;
- void NotifyEndOfBitstreamBuffer(int32_t bitstream_buffer_id) override;
- void NotifyFlushDone() override;
- void NotifyResetDone() override;
- void NotifyError(media::VideoDecodeAccelerator::Error error) override;
-
-private:
- std::unique_ptr<media::VideoDecodeAccelerator> mVDA;
- VideoDecodeAcceleratorAdaptor::Client* mClient;
-
- // The number of allocated output buffers. This is obtained from assignPictureBuffers call from
- // client, and used to check validity of picture id in importBufferForPicture and
- // reusePictureBuffer.
- uint32_t mNumOutputBuffers;
- // The picture size for creating picture buffers. This is obtained while VDA calls
- // ProvidePictureBuffers.
- media::Size mPictureSize;
-
- DISALLOW_COPY_AND_ASSIGN(C2VDAAdaptor);
-};
-
-} // namespace android
-
-#endif // ANDROID_C2_VDA_ADAPTOR_H
diff --git a/include/C2VDAAdaptorProxy.h b/include/C2VDAAdaptorProxy.h
deleted file mode 100644
index 9deca3b..0000000
--- a/include/C2VDAAdaptorProxy.h
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef ANDROID_C2_VDA_ADAPTOR_PROXY_H
-#define ANDROID_C2_VDA_ADAPTOR_PROXY_H
-
-#include <memory>
-
-#include <VideoDecodeAcceleratorAdaptor.h>
-
-#include <video_decode_accelerator.h>
-
-#include <arc/Future.h>
-#include <mojo/public/cpp/bindings/binding.h>
-
-#include <components/arc/mojom/video.mojom.h>
-#include <components/arc/mojom/video_decode_accelerator.mojom.h>
-
-namespace arc {
-class MojoProcessSupport;
-} // namespace arc
-
-namespace android {
-namespace arc {
-class C2VDAAdaptorProxy : public VideoDecodeAcceleratorAdaptor,
- public ::arc::mojom::VideoDecodeClient {
-public:
- C2VDAAdaptorProxy();
- explicit C2VDAAdaptorProxy(::arc::MojoProcessSupport* MojomProcessSupport);
- ~C2VDAAdaptorProxy() override;
-
- // Establishes ipc channel for video acceleration. Returns true if channel
- // connected successfully.
- // This must be called before all other methods.
- bool establishChannel();
-
- // Implementation of the VideoDecodeAcceleratorAdaptor interface.
- Result initialize(media::VideoCodecProfile profile, bool secureMode,
- VideoDecodeAcceleratorAdaptor::Client* client) override;
- void decode(int32_t bitstreamId, int handleFd, off_t offset, uint32_t size) override;
- void assignPictureBuffers(uint32_t numOutputBuffers, const media::Size& size) override;
- void importBufferForPicture(int32_t pictureBufferId, HalPixelFormat format,
- std::vector<::base::ScopedFD> handleFds,
- const std::vector<VideoFramePlane>& planes) override;
- void reusePictureBuffer(int32_t pictureBufferId) override;
- void flush() override;
- void reset() override;
- void destroy() override;
-
- // ::arc::mojom::VideoDecodeClient implementations.
- void ProvidePictureBuffersDeprecated(::arc::mojom::PictureBufferFormatPtr format) override;
- void ProvidePictureBuffers(::arc::mojom::PictureBufferFormatPtr format,
- const gfx::Rect& visible_rect) override;
- void PictureReady(::arc::mojom::PicturePtr picture) override;
- void NotifyEndOfBitstreamBuffer(int32_t bitstream_id) override;
- void NotifyError(::arc::mojom::VideoDecodeAccelerator::Result error) override;
-
- // The following functions are called as callbacks.
- void NotifyResetDone(::arc::mojom::VideoDecodeAccelerator::Result result);
- void NotifyFlushDone(::arc::mojom::VideoDecodeAccelerator::Result result);
-
- static media::VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles(
- InputCodec inputCodec);
-
-private:
- void onConnectionError(const std::string& pipeName);
- void establishChannelOnMojoThread(std::shared_ptr<::arc::Future<bool>> future);
- void onVersionReady(std::shared_ptr<::arc::Future<bool>> future, uint32_t version);
-
- // Closes ipc channel for video acceleration.
- // This must be called before deleting this object.
- void closeChannelOnMojoThread();
-
- // mojo thread corresponding part of C2VDAAdaptorProxy implementations.
- void initializeOnMojoThread(const media::VideoCodecProfile profile, const bool mSecureMode,
- const ::arc::mojom::VideoDecodeAccelerator::InitializeCallback& cb);
- void decodeOnMojoThread(int32_t bitstreamId, int ashmemFd, off_t offset, uint32_t bytesUsed);
- void assignPictureBuffersOnMojoThread(uint32_t numOutputBuffers, const media::Size& size);
-
- void importBufferForPictureOnMojoThread(int32_t pictureBufferId, HalPixelFormat format,
- std::vector<::base::ScopedFD> handleFds,
- const std::vector<VideoFramePlane>& planes);
- void reusePictureBufferOnMojoThread(int32_t pictureBufferId);
- void flushOnMojoThread();
- void resetOnMojoThread();
-
- VideoDecodeAcceleratorAdaptor::Client* mClient;
-
- // Task runner for mojom functions.
- const scoped_refptr<::base::SingleThreadTaskRunner> mMojoTaskRunner;
-
- // |mVDAPtr| and |mBinding| should only be called on |mMojoTaskRunner| after bound.
- ::arc::mojom::VideoDecodeAcceleratorPtr mVDAPtr;
- mojo::Binding<::arc::mojom::VideoDecodeClient> mBinding;
-
- // Used to cancel the wait on arc::Future.
- sp<::arc::CancellationRelay> mRelay;
-
- DISALLOW_COPY_AND_ASSIGN(C2VDAAdaptorProxy);
-};
-} // namespace arc
-} // namespace android
-
-#endif // ANDROID_C2_VDA_ADAPTOR_PROXY_H
diff --git a/include/C2VDACommon.h b/include/C2VDACommon.h
deleted file mode 100644
index 2807af0..0000000
--- a/include/C2VDACommon.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef ANDROID_C2_VDA_COMMON_H
-#define ANDROID_C2_VDA_COMMON_H
-
-#include <inttypes.h>
-
-namespace android {
-enum class HalPixelFormat : uint32_t {
- UNKNOWN = 0x0,
- // The pixel formats defined in Android but are used among C2VDAComponent.
- YCbCr_420_888 = 0x23,
- YV12 = 0x32315659,
- NV12 = 0x3231564e,
-};
-
-enum class InputCodec {
- H264,
- VP8,
- VP9,
-};
-} // namespace android
-#endif // ANDROID_C2_VDA_COMMON_H
diff --git a/include/C2VDAComponent.h b/include/C2VDAComponent.h
deleted file mode 100644
index 7d87ee0..0000000
--- a/include/C2VDAComponent.h
+++ /dev/null
@@ -1,397 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef ANDROID_C2_VDA_COMPONENT_H
-#define ANDROID_C2_VDA_COMPONENT_H
-
-#include <atomic>
-#include <deque>
-#include <map>
-#include <mutex>
-#include <queue>
-#include <unordered_map>
-
-#include <C2Component.h>
-#include <C2Config.h>
-#include <C2Enum.h>
-#include <C2Param.h>
-#include <C2ParamDef.h>
-#include <SimpleC2Interface.h>
-#include <util/C2InterfaceHelper.h>
-
-#include <base/macros.h>
-#include <base/memory/ref_counted.h>
-#include <base/single_thread_task_runner.h>
-#include <base/synchronization/waitable_event.h>
-#include <base/threading/thread.h>
-
-#include <VideoDecodeAcceleratorAdaptor.h>
-#include <rect.h>
-#include <size.h>
-#include <video_codecs.h>
-#include <video_decode_accelerator.h>
-#include <v4l2_codec2/common/Common.h>
-
-namespace android {
-
-class C2VDAComponent : public C2Component,
- public VideoDecodeAcceleratorAdaptor::Client,
- public std::enable_shared_from_this<C2VDAComponent> {
-public:
- class IntfImpl : public C2InterfaceHelper {
- public:
- IntfImpl(C2String name, const std::shared_ptr<C2ReflectorHelper>& helper);
-
- // interfaces for C2VDAComponent
- c2_status_t status() const { return mInitStatus; }
- media::VideoCodecProfile getCodecProfile() const { return mCodecProfile; }
- C2BlockPool::local_id_t getBlockPoolId() const { return mOutputBlockPoolIds->m.values[0]; }
- InputCodec getInputCodec() const { return mInputCodec; }
-
- private:
- // Configurable parameter setters.
- static C2R ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::input>& info);
-
- static C2R SizeSetter(bool mayBlock, C2P<C2StreamPictureSizeInfo::output>& videoSize);
-
- template <typename T>
- static C2R DefaultColorAspectsSetter(bool mayBlock, C2P<T>& def);
-
- static C2R MergedColorAspectsSetter(bool mayBlock,
- C2P<C2StreamColorAspectsInfo::output>& merged,
- const C2P<C2StreamColorAspectsTuning::output>& def,
- const C2P<C2StreamColorAspectsInfo::input>& coded);
-
- // The input format kind; should be C2FormatCompressed.
- std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
- // The memory usage flag of input buffer; should be BufferUsage::VIDEO_DECODER.
- std::shared_ptr<C2StreamUsageTuning::input> mInputMemoryUsage;
- // The output format kind; should be C2FormatVideo.
- std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
- // The MIME type of input port.
- std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
- // The MIME type of output port; should be MEDIA_MIMETYPE_VIDEO_RAW.
- std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
- // The input codec profile and level. For now configuring this parameter is useless since
- // the component always uses fixed codec profile to initialize accelerator. It is only used
- // for the client to query supported profile and level values.
- // TODO: use configured profile/level to initialize accelerator.
- std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
- // Decoded video size for output.
- std::shared_ptr<C2StreamPictureSizeInfo::output> mSize;
- // Maximum size of one input buffer.
- std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mMaxInputSize;
- // The suggested usage of input buffer allocator ID.
- std::shared_ptr<C2PortAllocatorsTuning::input> mInputAllocatorIds;
- // The suggested usage of output buffer allocator ID.
- std::shared_ptr<C2PortAllocatorsTuning::output> mOutputAllocatorIds;
- // The suggested usage of output buffer allocator ID with surface.
- std::shared_ptr<C2PortSurfaceAllocatorTuning::output> mOutputSurfaceAllocatorId;
- // Compnent uses this ID to fetch corresponding output block pool from platform.
- std::shared_ptr<C2PortBlockPoolsTuning::output> mOutputBlockPoolIds;
- // The color aspects parsed from input bitstream. This parameter should be configured by
- // component while decoding.
- std::shared_ptr<C2StreamColorAspectsInfo::input> mCodedColorAspects;
- // The default color aspects specified by requested output format. This parameter should be
- // configured by client.
- std::shared_ptr<C2StreamColorAspectsTuning::output> mDefaultColorAspects;
- // The combined color aspects by |mCodedColorAspects| and |mDefaultColorAspects|, and the
- // former has higher priority. This parameter is used for component to provide color aspects
- // as C2Info in decoded output buffers.
- std::shared_ptr<C2StreamColorAspectsInfo::output> mColorAspects;
-
- c2_status_t mInitStatus;
- media::VideoCodecProfile mCodecProfile;
- InputCodec mInputCodec;
- };
-
- C2VDAComponent(C2String name, c2_node_id_t id,
- const std::shared_ptr<C2ReflectorHelper>& helper);
- virtual ~C2VDAComponent() override;
-
- // Implementation of C2Component interface
- virtual c2_status_t setListener_vb(const std::shared_ptr<Listener>& listener,
- c2_blocking_t mayBlock) override;
- virtual c2_status_t queue_nb(std::list<std::unique_ptr<C2Work>>* const items) override;
- virtual c2_status_t announce_nb(const std::vector<C2WorkOutline>& items) override;
- virtual c2_status_t flush_sm(flush_mode_t mode,
- std::list<std::unique_ptr<C2Work>>* const flushedWork) override;
- virtual c2_status_t drain_nb(drain_mode_t mode) override;
- virtual c2_status_t start() override;
- virtual c2_status_t stop() override;
- virtual c2_status_t reset() override;
- virtual c2_status_t release() override;
- virtual std::shared_ptr<C2ComponentInterface> intf() override;
-
- // Implementation of VideDecodeAcceleratorAdaptor::Client interface
- virtual void providePictureBuffers(uint32_t minNumBuffers,
- const media::Size& codedSize) override;
- virtual void dismissPictureBuffer(int32_t pictureBufferId) override;
- virtual void pictureReady(int32_t pictureBufferId, int32_t bitstreamId,
- const media::Rect& cropRect) override;
- virtual void notifyEndOfBitstreamBuffer(int32_t bitstreamId) override;
- virtual void notifyFlushDone() override;
- virtual void notifyResetDone() override;
- virtual void notifyError(VideoDecodeAcceleratorAdaptor::Result error) override;
-
-private:
- // The state machine enumeration on parent thread.
- enum class State : int32_t {
- // The initial state of component. State will change to LOADED after the component is
- // created.
- UNLOADED,
- // The component is stopped. State will change to RUNNING when start() is called by
- // framework.
- LOADED,
- // The component is running, State will change to LOADED when stop() or reset() is called by
- // framework.
- RUNNING,
- // The component is in error state.
- ERROR,
- };
- // The state machine enumeration on component thread.
- enum class ComponentState : int32_t {
- // This is the initial state until VDA initialization returns successfully.
- UNINITIALIZED,
- // VDA initialization returns successfully. VDA is ready to make progress.
- STARTED,
- // onDrain() is called. VDA is draining. Component will hold on queueing works until
- // onDrainDone().
- DRAINING,
- // onFlush() is called. VDA is flushing. State will change to STARTED after onFlushDone().
- FLUSHING,
- // onStop() is called. VDA is shutting down. State will change to UNINITIALIZED after
- // onStopDone().
- STOPPING,
- };
-
- // This constant is used to tell apart from drain_mode_t enumerations in C2Component.h, which
- // means no drain request.
- // Note: this value must be different than all enumerations in drain_mode_t.
- static constexpr uint32_t NO_DRAIN = ~0u;
-
- // Internal struct for work queue.
- struct WorkEntry {
- std::unique_ptr<C2Work> mWork;
- uint32_t mDrainMode = NO_DRAIN;
- };
-
- // Internal struct to keep the information of a specific graphic block.
- struct GraphicBlockInfo {
- enum class State {
- OWNED_BY_COMPONENT, // Owned by this component.
- OWNED_BY_ACCELERATOR, // Owned by video decode accelerator.
- OWNED_BY_CLIENT, // Owned by client.
- };
-
- // The ID of this block used for accelerator.
- int32_t mBlockId = -1;
- // The ID of this block used in block pool. It indicates slot index for bufferqueue-backed
- // block pool, and buffer ID of BufferPoolData for bufferpool block pool.
- uint32_t mPoolId = 0;
- State mState = State::OWNED_BY_COMPONENT;
- // Graphic block buffer allocated from allocator. The graphic block should be owned until
- // it is passed to client.
- std::shared_ptr<C2GraphicBlock> mGraphicBlock;
- // HAL pixel format used while importing to VDA.
- HalPixelFormat mPixelFormat;
- // The dmabuf fds dupped from graphic block for importing to VDA.
- std::vector<::base::ScopedFD> mHandles;
- // VideoFramePlane information for importing to VDA.
- std::vector<VideoFramePlane> mPlanes;
- };
-
- struct VideoFormat {
- HalPixelFormat mPixelFormat = HalPixelFormat::UNKNOWN;
- uint32_t mMinNumBuffers = 0;
- media::Size mCodedSize;
- media::Rect mVisibleRect;
-
- VideoFormat() {}
- VideoFormat(HalPixelFormat pixelFormat, uint32_t minNumBuffers, media::Size codedSize,
- media::Rect visibleRect);
- };
-
- // Internal struct for the information of output buffer returned from the accelerator.
- struct OutputBufferInfo {
- int32_t mBitstreamId;
- int32_t mBlockId;
- };
-
- // These tasks should be run on the component thread |mThread|.
- void onDestroy();
- void onStart(media::VideoCodecProfile profile, ::base::WaitableEvent* done);
- void onQueueWork(std::unique_ptr<C2Work> work);
- void onDequeueWork();
- void onInputBufferDone(int32_t bitstreamId);
- void onOutputBufferDone(int32_t pictureBufferId, int32_t bitstreamId);
- void onDrain(uint32_t drainMode);
- void onDrainDone();
- void onFlush();
- void onStop(::base::WaitableEvent* done);
- void onResetDone();
- void onFlushDone();
- void onStopDone();
- void onOutputFormatChanged(std::unique_ptr<VideoFormat> format);
- void onVisibleRectChanged(const media::Rect& cropRect);
- void onOutputBufferReturned(std::shared_ptr<C2GraphicBlock> block, uint32_t poolId);
- void onSurfaceChanged();
-
- // Send input buffer to accelerator with specified bitstream id.
- void sendInputBufferToAccelerator(const C2ConstLinearBlock& input, int32_t bitstreamId);
- // Send output buffer to accelerator. If |passToAccelerator|, change the ownership to
- // OWNED_BY_ACCELERATOR of this buffer.
- void sendOutputBufferToAccelerator(GraphicBlockInfo* info, bool passToAccelerator);
- // Set crop rectangle infomation to output format.
- void setOutputFormatCrop(const media::Rect& cropRect);
- // Helper function to get the specified GraphicBlockInfo object by its id.
- GraphicBlockInfo* getGraphicBlockById(int32_t blockId);
- // Helper function to get the specified GraphicBlockInfo object by its pool id.
- GraphicBlockInfo* getGraphicBlockByPoolId(uint32_t poolId);
- // Helper function to find the work iterator in |mPendingWorks| by bitstream id.
- std::deque<std::unique_ptr<C2Work>>::iterator findPendingWorkByBitstreamId(int32_t bitstreamId);
- // Helper function to get the specified work in |mPendingWorks| by bitstream id.
- C2Work* getPendingWorkByBitstreamId(int32_t bitstreamId);
- // Try to apply the output format change.
- void tryChangeOutputFormat();
- // Allocate output buffers (graphic blocks) from block allocator.
- c2_status_t allocateBuffersFromBlockAllocator(const media::Size& size, uint32_t pixelFormat);
- // Append allocated buffer (graphic block) to |mGraphicBlocks|.
- void appendOutputBuffer(std::shared_ptr<C2GraphicBlock> block, uint32_t poolId);
- // Append allocated buffer (graphic block) to |mGraphicBlocks| in secure mode.
- void appendSecureOutputBuffer(std::shared_ptr<C2GraphicBlock> block, uint32_t poolId);
- // Parse coded color aspects from bitstream and configs parameter if applicable.
- bool parseCodedColorAspects(const C2ConstLinearBlock& input);
- // Update color aspects for current output buffer.
- c2_status_t updateColorAspects();
- // Dequeue |mPendingBuffersToWork| to put output buffer to corresponding work and report if
- // finished as many as possible. If |dropIfUnavailable|, drop all pending existing frames
- // without blocking.
- c2_status_t sendOutputBufferToWorkIfAny(bool dropIfUnavailable);
- // Update |mUndequeuedBlockIds| FIFO by pushing |blockId|.
- void updateUndequeuedBlockIds(int32_t blockId);
-
- // Specific to VP8/VP9, since for no-show frame cases VDA will not call PictureReady to return
- // output buffer which the corresponding work is waiting for, this function detects these works
- // by comparing timestamps. If there are works with no-show frame, call reportWorkIfFinished()
- // to report to listener if finished.
- void detectNoShowFrameWorksAndReportIfFinished(const C2WorkOrdinalStruct* currOrdinal);
- // Check if the corresponding work is finished by |bitstreamId|. If yes, make onWorkDone call to
- // listener and erase the work from |mPendingWorks|.
- void reportWorkIfFinished(int32_t bitstreamId);
- // Make onWorkDone call to listener for reporting EOS work in |mPendingWorks|.
- c2_status_t reportEOSWork();
- // Abandon all works in |mPendingWorks| and |mAbandonedWorks|.
- void reportAbandonedWorks();
- // Make onError call to listener for reporting errors.
- void reportError(c2_status_t error);
- // Helper function to determine if the work indicates no-show output frame.
- bool isNoShowFrameWork(const C2Work* work, const C2WorkOrdinalStruct* currOrdinal) const;
- // Helper function to determine if the work is finished.
- bool isWorkDone(const C2Work* work) const;
-
- // Start dequeue thread, return true on success. If |resetBuffersInClient|, reset the counter
- // |mBuffersInClient| on start.
- bool startDequeueThread(const media::Size& size, uint32_t pixelFormat,
- std::shared_ptr<C2BlockPool> blockPool, bool resetBuffersInClient);
- // Stop dequeue thread.
- void stopDequeueThread();
- // The rountine task running on dequeue thread.
- void dequeueThreadLoop(const media::Size& size, uint32_t pixelFormat,
- std::shared_ptr<C2BlockPool> blockPool);
-
- // The pointer of component interface implementation.
- std::shared_ptr<IntfImpl> mIntfImpl;
- // The pointer of component interface.
- const std::shared_ptr<C2ComponentInterface> mIntf;
- // The pointer of component listener.
- std::shared_ptr<Listener> mListener;
-
- // The main component thread.
- ::base::Thread mThread;
- // The task runner on component thread.
- scoped_refptr<::base::SingleThreadTaskRunner> mTaskRunner;
-
- // The dequeue buffer loop thread.
- ::base::Thread mDequeueThread;
- // The stop signal for dequeue loop which should be atomic (toggled by main thread).
- std::atomic<bool> mDequeueLoopStop;
- // The count of buffers owned by client which should be atomic.
- std::atomic<uint32_t> mBuffersInClient;
-
- // The following members should be utilized on component thread |mThread|.
-
- // The initialization result retrieved from VDA.
- VideoDecodeAcceleratorAdaptor::Result mVDAInitResult;
- // The pointer of VideoDecodeAcceleratorAdaptor.
- std::unique_ptr<VideoDecodeAcceleratorAdaptor> mVDAAdaptor;
- // The done event pointer of stop procedure. It should be restored in onStop() and signaled in
- // onStopDone().
- ::base::WaitableEvent* mStopDoneEvent;
- // The state machine on component thread.
- ComponentState mComponentState;
- // The indicator of draining with EOS. This should be always set along with component going to
- // DRAINING state, and will be unset either after reportEOSWork() (EOS is outputted), or
- // reportAbandonedWorks() (drain is cancelled and works are abandoned).
- bool mPendingOutputEOS;
- // The vector of storing allocated output graphic block information.
- std::vector<GraphicBlockInfo> mGraphicBlocks;
- // The work queue. Works are queued along with drain mode from component API queue_nb and
- // dequeued by the decode process of component.
- std::queue<WorkEntry> mQueue;
- // Store all pending works. The dequeued works are placed here until they are finished and then
- // sent out by onWorkDone call to listener.
- // TODO: maybe use priority_queue instead.
- std::deque<std::unique_ptr<C2Work>> mPendingWorks;
- // Store all abandoned works. When component gets flushed/stopped, remaining works in queue are
- // dumped here and sent out by onWorkDone call to listener after flush/stop is finished.
- std::vector<std::unique_ptr<C2Work>> mAbandonedWorks;
- // Store the visible rect provided from VDA. If this is changed, component should issue a
- // visible size change event.
- media::Rect mRequestedVisibleRect;
- // The current output format.
- VideoFormat mOutputFormat;
- // The pending output format. We need to wait until all buffers are returned back to apply the
- // format change.
- std::unique_ptr<VideoFormat> mPendingOutputFormat;
- // The color aspects parameter for current decoded output buffers.
- std::shared_ptr<C2StreamColorAspectsInfo::output> mCurrentColorAspects;
- // The flag of pending color aspects change. This should be set once we have parsed color
- // aspects from bitstream by parseCodedColorAspects(), at the same time recorded input frame
- // index into |mPendingColorAspectsChangeFrameIndex|.
- // When this flag is true and the corresponding frame index is not less than
- // |mPendingColorAspectsChangeFrameIndex| for the output buffer in onOutputBufferDone(), update
- // |mCurrentColorAspects| from component interface and reset the flag.
- bool mPendingColorAspectsChange;
- // The record of frame index to update color aspects. Details as above.
- uint64_t mPendingColorAspectsChangeFrameIndex;
- // The record of bitstream and block ID of pending output buffers returned from accelerator.
- std::deque<OutputBufferInfo> mPendingBuffersToWork;
- // A FIFO queue to record the block IDs which are currently undequequed for display. The size
- // of this queue will be equal to the minimum number of undequeued buffers.
- std::deque<int32_t> mUndequeuedBlockIds;
- // The error state indicator which sets to true when an error is occured.
- bool mHasError = false;
-
- // The indicator of whether component is in secure mode.
- bool mSecureMode;
-
- // The following members should be utilized on parent thread.
-
- // The input codec profile which is configured in component interface.
- media::VideoCodecProfile mCodecProfile;
- // The state machine on parent thread which should be atomic.
- std::atomic<State> mState;
- // The mutex lock to synchronize start/stop/reset/release calls.
- std::mutex mStartStopLock;
-
- // The WeakPtrFactory for getting weak pointer of this.
- ::base::WeakPtrFactory<C2VDAComponent> mWeakThisFactory;
-
- DISALLOW_COPY_AND_ASSIGN(C2VDAComponent);
-};
-
-} // namespace android
-
-#endif // ANDROID_C2_VDA_COMPONENT_H
diff --git a/include/VideoDecodeAcceleratorAdaptor.h b/include/VideoDecodeAcceleratorAdaptor.h
deleted file mode 100644
index f5792a0..0000000
--- a/include/VideoDecodeAcceleratorAdaptor.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef ANDROID_VIDEO_DECODE_ACCELERATOR_ADAPTOR_H
-#define ANDROID_VIDEO_DECODE_ACCELERATOR_ADAPTOR_H
-
-#include <vector>
-
-#include <base/files/scoped_file.h>
-
-#include <rect.h>
-#include <size.h>
-#include <video_codecs.h>
-#include <video_pixel_format.h>
-#include <v4l2_codec2/common/Common.h>
-
-namespace android {
-
-// Video decoder accelerator adaptor interface.
-// The adaptor plays the role of providing unified adaptor API functions and client callback to
-// codec component side.
-// The adaptor API and client callback are modeled after media::VideoDecodeAccelerator which is
-// ported from Chrome and are 1:1 mapped with its API functions.
-class VideoDecodeAcceleratorAdaptor {
-public:
- enum Result {
- SUCCESS = 0,
- ILLEGAL_STATE = 1,
- INVALID_ARGUMENT = 2,
- UNREADABLE_INPUT = 3,
- PLATFORM_FAILURE = 4,
- INSUFFICIENT_RESOURCES = 5,
- };
-
- // The adaptor client interface. This interface should be implemented in the component side.
- class Client {
- public:
- virtual ~Client() {}
-
- // Callback to tell client how many and what size of buffers to provide.
- virtual void providePictureBuffers(uint32_t minNumBuffers,
- const media::Size& codedSize) = 0;
-
- // Callback to dismiss picture buffer that was assigned earlier.
- virtual void dismissPictureBuffer(int32_t pictureBufferId) = 0;
-
- // Callback to deliver decoded pictures ready to be displayed.
- virtual void pictureReady(int32_t pictureBufferId, int32_t bitstreamId,
- const media::Rect& cropRect) = 0;
-
- // Callback to notify that decoder has decoded the end of the bitstream buffer with
- // specified ID.
- virtual void notifyEndOfBitstreamBuffer(int32_t bitstreamId) = 0;
-
- // Flush completion callback.
- virtual void notifyFlushDone() = 0;
-
- // Reset completion callback.
- virtual void notifyResetDone() = 0;
-
- // Callback to notify about errors. Note that errors in initialize() will not be reported
- // here, instead of by its returned value.
- virtual void notifyError(Result error) = 0;
- };
-
- // Initializes the video decoder with specific profile. This call is synchronous and returns
- // SUCCESS iff initialization is successful.
- virtual Result initialize(media::VideoCodecProfile profile, bool secureMode,
- Client* client) = 0;
-
- // Decodes given buffer handle with bitstream ID.
- virtual void decode(int32_t bitstreamId, int handleFd, off_t offset, uint32_t bytesUsed) = 0;
-
- // Assigns a specified number of picture buffer set to the video decoder.
- virtual void assignPictureBuffers(uint32_t numOutputBuffers, const media::Size& size) = 0;
-
- // Imports planes as backing memory for picture buffer with specified ID.
- virtual void importBufferForPicture(int32_t pictureBufferId, HalPixelFormat format,
- std::vector<::base::ScopedFD> handleFds,
- const std::vector<VideoFramePlane>& planes) = 0;
-
- // Sends picture buffer to be reused by the decoder by its piture ID.
- virtual void reusePictureBuffer(int32_t pictureBufferId) = 0;
-
- // Flushes the decoder.
- virtual void flush() = 0;
-
- // Resets the decoder.
- virtual void reset() = 0;
-
- // Destroys the decoder.
- virtual void destroy() = 0;
-
- virtual ~VideoDecodeAcceleratorAdaptor() {}
-};
-
-} // namespace android
-
-#endif // ANDROID_VIDEO_DECODE_ACCELERATOR_ADAPTOR_H
diff --git a/plugin_store/Android.bp b/plugin_store/Android.bp
index 3d3d040..73dccaf 100644
--- a/plugin_store/Android.bp
+++ b/plugin_store/Android.bp
@@ -8,6 +8,7 @@
srcs: [
"C2VdaBqBlockPool.cpp",
+ "C2VdaPooledBlockPool.cpp",
"V4L2PluginStore.cpp",
"VendorAllocatorLoader.cpp",
],
@@ -20,11 +21,13 @@
],
shared_libs: [
"android.hardware.graphics.bufferqueue@2.0",
+ "libchrome",
"libcutils",
"libhardware",
"libhidlbase",
"libnativewindow",
"liblog",
+ "libstagefright_bufferpool@1.0",
"libstagefright_bufferqueue_helper",
"libstagefright_foundation",
"libui",
@@ -33,5 +36,7 @@
cflags: [
"-Werror",
"-Wall",
+ "-Wno-unused-parameter", // needed for libchrome/base codes
+ "-Wthread-safety",
],
}
diff --git a/plugin_store/C2VdaBqBlockPool.cpp b/plugin_store/C2VdaBqBlockPool.cpp
index 94dce41..9abc698 100644
--- a/plugin_store/C2VdaBqBlockPool.cpp
+++ b/plugin_store/C2VdaBqBlockPool.cpp
@@ -11,9 +11,13 @@
#include <chrono>
#include <mutex>
+#include <thread>
#include <C2AllocatorGralloc.h>
#include <C2BlockInternal.h>
+#include <android/hardware/graphics/bufferqueue/2.0/IGraphicBufferProducer.h>
+#include <android/hardware/graphics/bufferqueue/2.0/IProducerListener.h>
+#include <base/callback.h>
#include <log/log.h>
#include <system/window.h>
#include <types.h>
@@ -21,6 +25,18 @@
#include <v4l2_codec2/plugin_store/V4L2AllocatorId.h>
+namespace android {
+namespace {
+
+// The wait time for acquire fence in milliseconds.
+constexpr int kFenceWaitTimeMs = 10;
+// The timeout limit of acquiring lock of timed_mutex in milliseconds.
+constexpr std::chrono::milliseconds kTimedMutexTimeoutMs = std::chrono::milliseconds(500);
+
+} // namespace
+
+using namespace std::chrono_literals;
+
using ::android::C2AndroidMemoryUsage;
using ::android::Fence;
using ::android::GraphicBuffer;
@@ -28,29 +44,23 @@
using ::android::status_t;
using ::android::BufferQueueDefs::BUFFER_NEEDS_REALLOCATION;
using ::android::BufferQueueDefs::NUM_BUFFER_SLOTS;
+using ::android::BufferQueueDefs::RELEASE_ALL_BUFFERS;
using ::android::hardware::hidl_handle;
using ::android::hardware::Return;
using HBuffer = ::android::hardware::graphics::common::V1_2::HardwareBuffer;
using HStatus = ::android::hardware::graphics::bufferqueue::V2_0::Status;
+using HGraphicBufferProducer =
+ ::android::hardware::graphics::bufferqueue::V2_0::IGraphicBufferProducer;
+using HProducerListener = ::android::hardware::graphics::bufferqueue::V2_0::IProducerListener;
+using HConnectionType = hardware::graphics::bufferqueue::V2_0::ConnectionType;
+using HQueueBufferOutput =
+ ::android::hardware::graphics::bufferqueue::V2_0::IGraphicBufferProducer::QueueBufferOutput;
+
using ::android::hardware::graphics::bufferqueue::V2_0::utils::b2h;
using ::android::hardware::graphics::bufferqueue::V2_0::utils::h2b;
using ::android::hardware::graphics::bufferqueue::V2_0::utils::HFenceWrapper;
-namespace {
-
-// The wait time for acquire fence in milliseconds.
-constexpr int kFenceWaitTimeMs = 10;
-// The timeout delay range for dequeuing spare buffer delay time in microseconds.
-constexpr int kDequeueSpareMinDelayUs = 500;
-constexpr int kDequeueSpareMaxDelayUs = 16 * 1000;
-// The timeout limit of acquiring lock of timed_mutex in milliseconds.
-constexpr std::chrono::milliseconds kTimedMutexTimeoutMs = std::chrono::milliseconds(500);
-// The max retry times for fetchSpareBufferSlot timeout.
-constexpr int32_t kFetchSpareBufferMaxRetries = 10;
-
-} // namespace
-
static c2_status_t asC2Error(int32_t err) {
switch (err) {
case android::NO_ERROR:
@@ -69,6 +79,302 @@
return C2_CORRUPTED;
}
+class H2BGraphicBufferProducer {
+public:
+ explicit H2BGraphicBufferProducer(sp<HGraphicBufferProducer> base) : mBase(base) {}
+ ~H2BGraphicBufferProducer() = default;
+
+ status_t requestBuffer(int slot, sp<GraphicBuffer>* buf) {
+ bool converted = false;
+ status_t status = UNKNOWN_ERROR;
+ Return<void> transResult = mBase->requestBuffer(
+ slot, [&converted, &status, buf](HStatus hStatus, HBuffer const& hBuffer,
+ uint32_t generationNumber) {
+ converted = h2b(hStatus, &status) && h2b(hBuffer, buf);
+ if (*buf) {
+ (*buf)->setGenerationNumber(generationNumber);
+ }
+ });
+
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+ if (!converted) {
+ ALOGE("%s(): corrupted transaction.", __func__);
+ return FAILED_TRANSACTION;
+ }
+ if (status != android::NO_ERROR) {
+ ALOGE("%s() failed: %d", __func__, status);
+ }
+ return status;
+ }
+
+ status_t setMaxDequeuedBufferCount(int maxDequeuedBuffers) {
+ status_t status = UNKNOWN_ERROR;
+ Return<HStatus> transResult =
+ mBase->setMaxDequeuedBufferCount(static_cast<int32_t>(maxDequeuedBuffers));
+
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+ if (!h2b(static_cast<HStatus>(transResult), &status)) {
+ ALOGE("%s(): corrupted transaction.", __func__);
+ return FAILED_TRANSACTION;
+ }
+ if (status != android::NO_ERROR) {
+ ALOGE("%s() failed: %d", __func__, status);
+ }
+ return status;
+ }
+
+ status_t dequeueBuffer(uint32_t width, uint32_t height, uint32_t pixelFormat,
+ C2AndroidMemoryUsage androidUsage, int* slot, sp<Fence>* fence) {
+ using Input = HGraphicBufferProducer::DequeueBufferInput;
+ using Output = HGraphicBufferProducer::DequeueBufferOutput;
+ Input input{width, height, pixelFormat, androidUsage.asGrallocUsage()};
+
+ bool converted = false;
+ status_t status = UNKNOWN_ERROR;
+ Return<void> transResult = mBase->dequeueBuffer(
+ input, [&converted, &status, &slot, &fence](HStatus hStatus, int32_t hSlot,
+ Output const& hOutput) {
+ converted = h2b(hStatus, &status);
+ if (!converted || status != android::NO_ERROR) {
+ return;
+ }
+
+ *slot = hSlot;
+ if (hOutput.bufferNeedsReallocation) {
+ status = BUFFER_NEEDS_REALLOCATION;
+ }
+ converted = h2b(hOutput.fence, fence);
+ });
+
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+ if (!converted) {
+ ALOGE("%s(): corrupted transaction.", __func__);
+ return FAILED_TRANSACTION;
+ }
+ if (status != android::NO_ERROR && status != BUFFER_NEEDS_REALLOCATION &&
+ status != android::TIMED_OUT) {
+ ALOGE("%s() failed: %d", __func__, status);
+ }
+ return status;
+ }
+
+ status_t detachBuffer(int slot) {
+ status_t status = UNKNOWN_ERROR;
+ Return<HStatus> transResult = mBase->detachBuffer(static_cast<int32_t>(slot));
+
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+ if (!h2b(static_cast<HStatus>(transResult), &status)) {
+ ALOGE("%s(): corrupted transaction.", __func__);
+ return FAILED_TRANSACTION;
+ }
+ if (status != android::NO_ERROR) {
+ ALOGE("%s() failed: %d", __func__, status);
+ }
+ return status;
+ }
+
+ status_t attachBuffer(const sp<GraphicBuffer>& buffer, int* outSlot) {
+ HBuffer hBuffer;
+ uint32_t hGenerationNumber;
+ if (!b2h(buffer, &hBuffer, &hGenerationNumber)) {
+ ALOGE("%s: invalid input buffer.", __func__);
+ return BAD_VALUE;
+ }
+
+ bool converted = false;
+ status_t status = UNKNOWN_ERROR;
+ Return<void> transResult = mBase->attachBuffer(
+ hBuffer, hGenerationNumber,
+ [&converted, &status, outSlot](HStatus hStatus, int32_t hSlot,
+ bool releaseAllBuffers) {
+ converted = h2b(hStatus, &status);
+ *outSlot = static_cast<int>(hSlot);
+ if (converted && releaseAllBuffers && status == android::NO_ERROR) {
+ status = RELEASE_ALL_BUFFERS;
+ }
+ });
+
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+ if (!converted) {
+ ALOGE("%s(): corrupted transaction.", __func__);
+ return FAILED_TRANSACTION;
+ }
+ if (status != android::NO_ERROR) {
+ ALOGE("%s() failed: %d", __func__, status);
+ }
+ return status;
+ }
+
+ status_t cancelBuffer(int slot, const sp<Fence>& fence) {
+ HFenceWrapper hFenceWrapper;
+ if (!b2h(fence, &hFenceWrapper)) {
+ ALOGE("%s(): corrupted input fence.", __func__);
+ return UNKNOWN_ERROR;
+ }
+
+ status_t status = UNKNOWN_ERROR;
+ Return<HStatus> transResult =
+ mBase->cancelBuffer(static_cast<int32_t>(slot), hFenceWrapper.getHandle());
+
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+ if (!h2b(static_cast<HStatus>(transResult), &status)) {
+ ALOGE("%s(): corrupted transaction.", __func__);
+ return FAILED_TRANSACTION;
+ }
+ if (status != android::NO_ERROR) {
+ ALOGE("%s() failed: %d", __func__, status);
+ }
+ return status;
+ }
+
+ int query(int what, int* value) {
+ int result = 0;
+ Return<void> transResult =
+ mBase->query(static_cast<int32_t>(what), [&result, value](int32_t r, int32_t v) {
+ result = static_cast<int>(r);
+ *value = static_cast<int>(v);
+ });
+
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+ return result;
+ }
+
+ status_t allowAllocation(bool allow) {
+ status_t status = UNKNOWN_ERROR;
+ Return<HStatus> transResult = mBase->allowAllocation(allow);
+
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+ if (!h2b(static_cast<HStatus>(transResult), &status)) {
+ ALOGE("%s(): corrupted transaction.", __func__);
+ return FAILED_TRANSACTION;
+ }
+ if (status != android::NO_ERROR) {
+ ALOGW("%s() failed: %d", __func__, status);
+ }
+ return status;
+ }
+
+ status_t getUniqueId(uint64_t* outId) const {
+ Return<uint64_t> transResult = mBase->getUniqueId();
+
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+
+ *outId = static_cast<uint64_t>(transResult);
+ return android::NO_ERROR;
+ }
+
+ // android::IProducerListener cannot be depended by vendor library, so we use HProducerListener
+ // directly.
+ status_t connect(sp<HProducerListener> const& hListener, int32_t api,
+ bool producerControlledByApp) {
+ bool converted = false;
+ status_t status = UNKNOWN_ERROR;
+ // hack(b/146409777): We pass self-defined api, so we don't use b2h() here.
+ Return<void> transResult = mBase->connect(
+ hListener, static_cast<HConnectionType>(api), producerControlledByApp,
+ [&converted, &status](HStatus hStatus, HQueueBufferOutput const& /* hOutput */) {
+ converted = h2b(hStatus, &status);
+ });
+
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+ if (!converted) {
+ ALOGE("%s(): corrupted transaction.", __func__);
+ return FAILED_TRANSACTION;
+ }
+ return status;
+ }
+
+ status_t setDequeueTimeout(nsecs_t timeout) {
+ status_t status = UNKNOWN_ERROR;
+ Return<HStatus> transResult = mBase->setDequeueTimeout(static_cast<int64_t>(timeout));
+
+ if (!transResult.isOk()) {
+ ALOGE("%s(): transaction failed: %s", __func__, transResult.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+ if (!h2b(static_cast<HStatus>(transResult), &status)) {
+ ALOGE("%s(): corrupted transaction.", __func__);
+ return FAILED_TRANSACTION;
+ }
+ return status;
+ }
+
+private:
+ const sp<HGraphicBufferProducer> mBase;
+};
+
+// This class is used to notify the listener when a certain event happens.
+class EventNotifier : public virtual android::RefBase {
+public:
+ class Listener {
+ public:
+ virtual ~Listener() = default;
+
+ // Called by EventNotifier when a certain event happens.
+ virtual void onEventNotified() = 0;
+ };
+
+ explicit EventNotifier(const std::shared_ptr<Listener>& listener) : mListener(listener) {}
+ virtual ~EventNotifier() = default;
+
+protected:
+ void notify() {
+ ALOGV("%s()", __func__);
+ std::shared_ptr<Listener> listener = mListener.lock();
+ if (listener) {
+ listener->onEventNotified();
+ }
+ }
+
+ std::weak_ptr<Listener> mListener;
+};
+
+// Notifies the listener when the connected IGBP releases buffers.
+class BufferReleasedNotifier : public EventNotifier, public HProducerListener {
+public:
+ using EventNotifier::EventNotifier;
+ ~BufferReleasedNotifier() override = default;
+
+ // HProducerListener implementation
+ Return<void> onBuffersReleased(uint32_t count) override {
+ ALOGV("%s(%u)", __func__, count);
+ if (count > 0) {
+ notify();
+ }
+ return {};
+ }
+};
+
/**
* BlockPoolData implementation for C2VdaBqBlockPool. The life cycle of this object should be as
* long as its accompanied C2GraphicBlock.
@@ -119,19 +425,18 @@
}
// static
-c2_status_t C2VdaBqBlockPool::getPoolIdFromGraphicBlock(
- const std::shared_ptr<C2GraphicBlock>& block, uint32_t* poolId) {
+std::optional<uint32_t> C2VdaBqBlockPool::getBufferIdFromGraphicBlock(const C2Block2D& block) {
uint32_t width, height, format, stride, igbp_slot, generation;
uint64_t usage, igbp_id;
- android::_UnwrapNativeCodec2GrallocMetadata(block->handle(), &width, &height, &format, &usage,
+ android::_UnwrapNativeCodec2GrallocMetadata(block.handle(), &width, &height, &format, &usage,
&stride, &generation, &igbp_id, &igbp_slot);
ALOGV("Unwrap Metadata: igbp[%" PRIu64 ", %u] (%u*%u, fmt %#x, usage %" PRIx64 ", stride %u)",
igbp_id, igbp_slot, width, height, format, usage, stride);
- *poolId = igbp_slot;
- return C2_OK;
+ return igbp_slot;
}
-class C2VdaBqBlockPool::Impl : public std::enable_shared_from_this<C2VdaBqBlockPool::Impl> {
+class C2VdaBqBlockPool::Impl : public std::enable_shared_from_this<C2VdaBqBlockPool::Impl>,
+ public EventNotifier::Listener {
public:
using HGraphicBufferProducer = C2VdaBqBlockPool::HGraphicBufferProducer;
@@ -139,6 +444,9 @@
// TODO: should we detach buffers on producer if any on destructor?
~Impl() = default;
+ // EventNotifier::Listener implementation.
+ void onEventNotified() override;
+
c2_status_t fetchGraphicBlock(uint32_t width, uint32_t height, uint32_t format,
C2MemoryUsage usage,
std::shared_ptr<C2GraphicBlock>* block /* nonnull */);
@@ -148,27 +456,11 @@
c2_status_t updateGraphicBlock(bool willCancel, uint32_t oldSlot, uint32_t* newSlot,
std::shared_ptr<C2GraphicBlock>* block /* nonnull */);
c2_status_t getMinBuffersForDisplay(size_t* bufferCount);
+ bool setNotifyBlockAvailableCb(::base::OnceClosure cb);
private:
friend struct C2VdaBqBlockPoolData;
- // The exponential rate control calculator with factor of 2. Per increase() call will double the
- // value until it reaches maximum. reset() will set value to the minimum.
- class ExpRateControlCalculator {
- public:
- ExpRateControlCalculator(int min, int max) : kMinValue(min), kMaxValue(max), mValue(min) {}
- ExpRateControlCalculator() = delete;
-
- void reset() { mValue = kMinValue; }
- void increase() { mValue = std::min(kMaxValue, mValue << 1); }
- int value() const { return mValue; }
-
- private:
- const int kMinValue;
- const int kMaxValue;
- int mValue;
- };
-
// Requested buffer formats.
struct BufferFormat {
BufferFormat(uint32_t width, uint32_t height, uint32_t pixelFormat,
@@ -185,32 +477,19 @@
// For C2VdaBqBlockPoolData to detach corresponding slot buffer from BufferQueue.
void detachBuffer(uint64_t producerId, int32_t slotId);
- // Fetches a spare slot index by dequeueing and requesting one extra buffer from producer. The
- // spare buffer slot guarantees at least one buffer to be dequeued in producer, so as to prevent
- // the invalid operation for producer of the attempt to dequeue buffers exceeded the maximal
- // dequeued buffer count.
- // This function should be called after the last requested buffer is fetched in
- // fetchGraphicBlock(), or in the beginning of switchProducer(). Block pool should store the
- // slot index into |mSpareSlot| and cancel the buffer immediately.
- // The generation number and usage of the spare buffer will be recorded in |generation| and
- // |usage|, which will be useful later in switchProducer().
- c2_status_t fetchSpareBufferSlot(HGraphicBufferProducer* const producer, uint32_t width,
- uint32_t height, uint32_t pixelFormat,
- C2AndroidMemoryUsage androidUsage, uint32_t* generation,
- uint64_t* usage);
-
- // Helper function to call dequeue buffer to producer.
- c2_status_t dequeueBuffer(HGraphicBufferProducer* const producer, uint32_t width,
- uint32_t height, uint32_t pixelFormat,
- C2AndroidMemoryUsage androidUsage, int32_t& status, int32_t& slot,
- sp<Fence>& fence);
+ // Queries the generation and usage flags from the given producer by dequeuing and requesting a
+ // buffer (the buffer is then detached and freed).
+ c2_status_t queryGenerationAndUsage(H2BGraphicBufferProducer* const producer, uint32_t width,
+ uint32_t height, uint32_t pixelFormat,
+ C2AndroidMemoryUsage androidUsage, uint32_t* generation,
+ uint64_t* usage);
// Switches producer and transfers allocated buffers from old producer to the new one.
- bool switchProducer(HGraphicBufferProducer* const newProducer, uint64_t newProducerId);
+ bool switchProducer(H2BGraphicBufferProducer* const newProducer, uint64_t newProducerId);
const std::shared_ptr<C2Allocator> mAllocator;
- sp<HGraphicBufferProducer> mProducer;
+ std::unique_ptr<H2BGraphicBufferProducer> mProducer;
uint64_t mProducerId;
C2BufferQueueBlockPool::OnRenderCallback mRenderCallback;
@@ -232,14 +511,10 @@
std::map<int32_t, std::shared_ptr<C2GraphicAllocation>> mSlotAllocations;
// Number of buffers requested on requestNewBufferSet() call.
size_t mBuffersRequested;
- // The slot index of spare buffer.
- int32_t mSpareSlot;
// Currently requested buffer formats.
BufferFormat mBufferFormat;
// The map recorded the slot indices from old producer to new producer.
std::map<int32_t, int32_t> mProducerChangeSlotMap;
- // The rate control calculator for the delay of dequeueing spare buffer.
- ExpRateControlCalculator mSpareDequeueDelayUs;
// The counter for representing the buffer count in client. Only used in producer switching
// case. It will be reset in switchProducer(), and accumulated in updateGraphicBlock() routine.
uint32_t mBuffersInClient = 0u;
@@ -247,18 +522,27 @@
// Toggle off when requestNewBufferSet() is called. We forcedly detach all slots to make sure
// all slots are available, except the ones owned by client.
bool mProducerSwitched = false;
+
+ // Listener for buffer release events.
+ sp<EventNotifier> mFetchBufferNotifier;
+
+ std::mutex mBufferReleaseMutex;
+ // Set to true when the buffer release event is triggered after dequeueing
+ // buffer from IGBP times out.
+ bool mBufferReleasedAfterTimedOut GUARDED_BY(mBufferReleaseMutex) = false;
+ // The callback to notify the caller the buffer is available.
+ ::base::OnceClosure mNotifyBlockAvailableCb GUARDED_BY(mBufferReleaseMutex);
};
C2VdaBqBlockPool::Impl::Impl(const std::shared_ptr<C2Allocator>& allocator)
: mAllocator(allocator),
mAllocateBuffersLock(mConfigureProducerAndAllocateBuffersMutex, std::defer_lock),
- mBuffersRequested(0u),
- mSpareSlot(-1),
- mSpareDequeueDelayUs(kDequeueSpareMinDelayUs, kDequeueSpareMaxDelayUs) {}
+ mBuffersRequested(0u) {}
c2_status_t C2VdaBqBlockPool::Impl::fetchGraphicBlock(
uint32_t width, uint32_t height, uint32_t format, C2MemoryUsage usage,
std::shared_ptr<C2GraphicBlock>* block /* nonnull */) {
+ ALOGV("%s()", __func__);
std::lock_guard<std::mutex> lock(mMutex);
if (!mProducer) {
@@ -280,41 +564,46 @@
return C2_BAD_STATE;
}
- sp<Fence> fence = new Fence();
C2AndroidMemoryUsage androidUsage = usage;
- int32_t status;
uint32_t pixelFormat = format;
int32_t slot;
-
- c2_status_t err = dequeueBuffer(mProducer.get(), width, height, pixelFormat, androidUsage,
- status, slot, fence);
- if (err != C2_OK) {
- return err;
+ sp<Fence> fence = new Fence();
+ status_t status =
+ mProducer->dequeueBuffer(width, height, pixelFormat, androidUsage, &slot, &fence);
+ // The C2VdaBqBlockPool does not fully own the bufferqueue. After buffers are dequeued here,
+ // they are passed into the codec2 framework, processed, and eventually queued into the
+ // bufferqueue. The C2VdaBqBlockPool cannot determine exactly when a buffer gets queued.
+ // However, if every buffer is being processed by the codec2 framework, then dequeueBuffer()
+ // will return INVALID_OPERATION because of an attempt to dequeue too many buffers.
+ // The C2VdaBqBlockPool cannot prevent this from happening, so just map it to TIMED_OUT
+ // and let the C2VdaBqBlockPool's caller's timeout retry logic handle the failure.
+ if (status == android::INVALID_OPERATION) {
+ status = android::TIMED_OUT;
+ }
+ if (status == android::TIMED_OUT) {
+ std::lock_guard<std::mutex> lock(mBufferReleaseMutex);
+ mBufferReleasedAfterTimedOut = false;
+ }
+ if (status != android::NO_ERROR && status != BUFFER_NEEDS_REALLOCATION) {
+ return asC2Error(status);
}
// Wait for acquire fence if we get one.
- HFenceWrapper hFenceWrapper{};
- if (!b2h(fence, &hFenceWrapper)) {
- ALOGE("Invalid fence received from dequeueBuffer.");
- return C2_BAD_VALUE;
- }
if (fence) {
status_t fenceStatus = fence->wait(kFenceWaitTimeMs);
if (fenceStatus != android::NO_ERROR) {
- Return<HStatus> cancelTransStatus =
- mProducer->cancelBuffer(slot, hFenceWrapper.getHandle());
- if (!cancelTransStatus.isOk()) {
- ALOGE("cancelBuffer transaction error: %s",
- cancelTransStatus.description().c_str());
+ if (mProducer->cancelBuffer(slot, fence) != android::NO_ERROR) {
return C2_CORRUPTED;
}
+
if (fenceStatus == -ETIME) { // fence wait timed out
- ALOGV("buffer fence wait timed out, wait for retry...");
+ ALOGV("%s(): buffer (slot=%d) fence wait timed out", __func__, slot);
return C2_TIMED_OUT;
}
ALOGE("buffer fence wait error: %d", fenceStatus);
return asC2Error(fenceStatus);
}
+
if (mRenderCallback) {
nsecs_t signalTime = fence->getSignalTime();
if (signalTime >= 0 && signalTime < INT64_MAX) {
@@ -327,35 +616,13 @@
auto iter = mSlotAllocations.find(slot);
if (iter == mSlotAllocations.end()) {
- if (slot == mSpareSlot) {
- // The dequeued slot is the spare buffer, we don't use this buffer for decoding and must
- // cancel it after the delay time. Other working buffers may be available and pushed to
- // free buffer queue in producer during the delay.
- ALOGV("dequeued spare slot, cancel it after a wait time delay (%d)...",
- mSpareDequeueDelayUs.value());
- ::usleep(mSpareDequeueDelayUs.value()); // wait for retry
- // Double the delay time if spare buffer still be dequeued the next time. This could
- // prevent block pool keeps aggressively dequeueing spare buffer while other buffers are
- // not available yet.
- mSpareDequeueDelayUs.increase();
- Return<HStatus> cancelTransStatus =
- mProducer->cancelBuffer(slot, hFenceWrapper.getHandle());
- if (!cancelTransStatus.isOk()) {
- ALOGE("cancelBuffer transaction error: %s",
- cancelTransStatus.description().c_str());
- return C2_CORRUPTED;
- }
- return C2_TIMED_OUT;
- }
if (mSlotAllocations.size() >= mBuffersRequested) {
// The dequeued slot has a pre-allocated buffer whose size and format is as same as
// currently requested (but was not dequeued during allocation cycle). Just detach it to
// free this slot. And try dequeueBuffer again.
ALOGD("dequeued a new slot index but already allocated enough buffers. Detach it.");
- Return<HStatus> detachTransStatus = mProducer->detachBuffer(slot);
- if (!detachTransStatus.isOk()) {
- ALOGE("detachBuffer transaction error: %s",
- detachTransStatus.description().c_str());
+
+ if (mProducer->detachBuffer(slot) != android::NO_ERROR) {
return C2_CORRUPTED;
}
return C2_TIMED_OUT;
@@ -370,31 +637,9 @@
// Call requestBuffer to allocate buffer for the slot and obtain the reference.
sp<GraphicBuffer> slotBuffer = new GraphicBuffer();
- uint32_t generation;
- Return<void> transStatus = mProducer->requestBuffer(
- slot, [&status, &slotBuffer, &generation](HStatus hStatus, HBuffer const& hBuffer,
- uint32_t generationNumber) {
- if (h2b(hStatus, &status) && h2b(hBuffer, &slotBuffer) && slotBuffer) {
- generation = generationNumber;
- slotBuffer->setGenerationNumber(generationNumber);
- } else {
- status = android::BAD_VALUE;
- }
- });
-
- // Check requestBuffer transaction status
- if (!transStatus.isOk()) {
- ALOGE("requestBuffer transaction error: %s", transStatus.description().c_str());
- return C2_CORRUPTED;
- }
- // Check requestBuffer return flag
+ status = mProducer->requestBuffer(slot, &slotBuffer);
if (status != android::NO_ERROR) {
- ALOGE("requestBuffer failed: %d", status);
- Return<HStatus> cancelTransStatus =
- mProducer->cancelBuffer(slot, hFenceWrapper.getHandle());
- if (!cancelTransStatus.isOk()) {
- ALOGE("cancelBuffer transaction error: %s",
- cancelTransStatus.description().c_str());
+ if (mProducer->cancelBuffer(slot, fence) != android::NO_ERROR) {
return C2_CORRUPTED;
}
return asC2Error(status);
@@ -420,35 +665,10 @@
mSlotAllocations[slot] = std::move(alloc);
if (mSlotAllocations.size() == mBuffersRequested) {
- // Allocate one spare buffer after allocating enough buffers requested by client.
- uint32_t generation;
- uint64_t usage;
-
- err = C2_TIMED_OUT;
- for (int32_t retriesLeft = kFetchSpareBufferMaxRetries;
- err == C2_TIMED_OUT && retriesLeft >= 0; retriesLeft--) {
- err = fetchSpareBufferSlot(mProducer.get(), width, height, pixelFormat,
- androidUsage, &generation, &usage);
- }
- if (err != C2_OK) {
- ALOGE("fetchSpareBufferSlot failed after %d retries: %d",
- kFetchSpareBufferMaxRetries, err);
- return err;
- }
-
// Already allocated enough buffers, set allowAllocation to false to restrict the
// eligible slots to allocated ones for future dequeue.
- Return<HStatus> transStatus = mProducer->allowAllocation(false);
- if (!transStatus.isOk()) {
- ALOGE("allowAllocation(false) transaction error: %s",
- transStatus.description().c_str());
- return C2_CORRUPTED;
- }
- if (!h2b(static_cast<HStatus>(transStatus), &status)) {
- status = android::BAD_VALUE;
- }
+ status = mProducer->allowAllocation(false);
if (status != android::NO_ERROR) {
- ALOGE("allowAllocation(false) failed");
return asC2Error(status);
}
// Store buffer formats for future usage.
@@ -456,53 +676,54 @@
ALOG_ASSERT(mAllocateBuffersLock.owns_lock());
mAllocateBuffersLock.unlock();
}
- } else if (mSlotAllocations.size() < mBuffersRequested) {
- ALOGE("failed to allocate enough buffers");
- return C2_NO_MEMORY;
}
- // Reset spare dequeue delay time once we have dequeued a working buffer.
- mSpareDequeueDelayUs.reset();
-
auto poolData = std::make_shared<C2VdaBqBlockPoolData>(mProducerId, slot, shared_from_this());
*block = _C2BlockFactory::CreateGraphicBlock(mSlotAllocations[slot], std::move(poolData));
return C2_OK;
}
-c2_status_t C2VdaBqBlockPool::Impl::fetchSpareBufferSlot(HGraphicBufferProducer* const producer,
- uint32_t width, uint32_t height,
- uint32_t pixelFormat,
- C2AndroidMemoryUsage androidUsage,
- uint32_t* generation, uint64_t* usage) {
- ALOGV("fetchSpareBufferSlot");
+void C2VdaBqBlockPool::Impl::onEventNotified() {
+ ALOGV("%s()", __func__);
+ ::base::OnceClosure outputCb;
+ {
+ std::lock_guard<std::mutex> lock(mBufferReleaseMutex);
+
+ mBufferReleasedAfterTimedOut = true;
+ if (mNotifyBlockAvailableCb) {
+ outputCb = std::move(mNotifyBlockAvailableCb);
+ }
+ }
+
+ // Calling the callback outside the lock to avoid the deadlock.
+ if (outputCb) {
+ std::move(outputCb).Run();
+ }
+}
+
+c2_status_t C2VdaBqBlockPool::Impl::queryGenerationAndUsage(
+ H2BGraphicBufferProducer* const producer, uint32_t width, uint32_t height,
+ uint32_t pixelFormat, C2AndroidMemoryUsage androidUsage, uint32_t* generation,
+ uint64_t* usage) {
+ ALOGV("queryGenerationAndUsage");
sp<Fence> fence = new Fence();
int32_t status;
int32_t slot;
- c2_status_t err =
- dequeueBuffer(producer, width, height, pixelFormat, androidUsage, status, slot, fence);
- if (err != C2_OK) {
- return err;
+ status = producer->dequeueBuffer(width, height, pixelFormat, androidUsage, &slot, &fence);
+ if (status != android::NO_ERROR && status != BUFFER_NEEDS_REALLOCATION) {
+ return asC2Error(status);
}
// Wait for acquire fence if we get one.
- HFenceWrapper hFenceWrapper{};
- if (!b2h(fence, &hFenceWrapper)) {
- ALOGE("Invalid fence received from dequeueBuffer.");
- return C2_BAD_VALUE;
- }
if (fence) {
status_t fenceStatus = fence->wait(kFenceWaitTimeMs);
if (fenceStatus != android::NO_ERROR) {
- Return<HStatus> cancelTransStatus =
- producer->cancelBuffer(slot, hFenceWrapper.getHandle());
- if (!cancelTransStatus.isOk()) {
- ALOGE("cancelBuffer transaction error: %s",
- cancelTransStatus.description().c_str());
+ if (producer->cancelBuffer(slot, fence) != android::NO_ERROR) {
return C2_CORRUPTED;
}
if (fenceStatus == -ETIME) { // fence wait timed out
- ALOGV("buffer fence wait timed out, wait for retry...");
+ ALOGV("%s(): buffer (slot=%d) fence wait timed out", __func__, slot);
return C2_TIMED_OUT;
}
ALOGE("buffer fence wait error: %d", fenceStatus);
@@ -510,87 +731,25 @@
}
}
- if (status != BUFFER_NEEDS_REALLOCATION) {
- ALOGD("dequeued a new slot index without BUFFER_NEEDS_REALLOCATION flag.");
- }
-
// Call requestBuffer to allocate buffer for the slot and obtain the reference.
// Get generation number here.
sp<GraphicBuffer> slotBuffer = new GraphicBuffer();
- Return<void> transStatus = producer->requestBuffer(
- slot, [&status, &slotBuffer, &generation](HStatus hStatus, HBuffer const& hBuffer,
- uint32_t generationNumber) {
- if (h2b(hStatus, &status) && h2b(hBuffer, &slotBuffer) && slotBuffer) {
- *generation = generationNumber;
- slotBuffer->setGenerationNumber(generationNumber);
- } else {
- status = android::BAD_VALUE;
- }
- });
+ status = producer->requestBuffer(slot, &slotBuffer);
- // Check requestBuffer transaction status.
- if (!transStatus.isOk()) {
- ALOGE("requestBuffer transaction error: %s", transStatus.description().c_str());
- return C2_CORRUPTED;
- }
-
- // Get generation number and usage from the slot buffer.
- *usage = slotBuffer->getUsage();
- ALOGV("Obtained from spare buffer: generation = %u, usage = %" PRIu64 "", *generation, *usage);
-
- // Cancel this buffer anyway.
- Return<HStatus> cancelTransStatus = producer->cancelBuffer(slot, hFenceWrapper.getHandle());
- if (!cancelTransStatus.isOk()) {
- ALOGE("cancelBuffer transaction error: %s", cancelTransStatus.description().c_str());
+ // Detach and delete the temporary buffer.
+ if (producer->detachBuffer(slot) != android::NO_ERROR) {
return C2_CORRUPTED;
}
// Check requestBuffer return flag.
if (status != android::NO_ERROR) {
- ALOGE("requestBuffer failed: %d", status);
return asC2Error(status);
}
- mSpareSlot = slot;
- mSpareDequeueDelayUs.reset();
- ALOGV("Spare slot index = %d", mSpareSlot);
- return C2_OK;
-}
-
-c2_status_t C2VdaBqBlockPool::Impl::dequeueBuffer(HGraphicBufferProducer* const producer,
- uint32_t width, uint32_t height,
- uint32_t pixelFormat,
- C2AndroidMemoryUsage androidUsage,
- int32_t& status, int32_t& slot,
- sp<Fence>& fence) {
- using Input = HGraphicBufferProducer::DequeueBufferInput;
- using Output = HGraphicBufferProducer::DequeueBufferOutput;
- bool needRealloc = false;
- Return<void> transStatus = producer->dequeueBuffer(
- Input{width, height, pixelFormat, androidUsage.asGrallocUsage()},
- [&status, &slot, &needRealloc, &fence](HStatus hStatus, int32_t hSlot,
- Output const& hOutput) {
- slot = hSlot;
- if (!h2b(hStatus, &status) || !h2b(hOutput.fence, &fence)) {
- status = android::BAD_VALUE;
- } else {
- needRealloc = hOutput.bufferNeedsReallocation;
- if (needRealloc) {
- status = BUFFER_NEEDS_REALLOCATION;
- }
- }
- });
-
- // Check dequeueBuffer transaction status
- if (!transStatus.isOk()) {
- ALOGE("dequeueBuffer transaction error: %s", transStatus.description().c_str());
- return C2_CORRUPTED;
- }
- // Check dequeueBuffer return flag
- if (status != android::NO_ERROR && status != BUFFER_NEEDS_REALLOCATION) {
- ALOGE("dequeueBuffer failed: %d", status);
- return asC2Error(status);
- }
+ // Get generation number and usage from the slot buffer.
+ *usage = slotBuffer->getUsage();
+ *generation = slotBuffer->getGenerationNumber();
+ ALOGV("Obtained from temp buffer: generation = %u, usage = %" PRIu64 "", *generation, *usage);
return C2_OK;
}
@@ -630,18 +789,12 @@
// Skip detaching the buffer which is owned by client now.
continue;
}
- Return<HStatus> transStatus = mProducer->detachBuffer(slot);
- if (!transStatus.isOk()) {
- ALOGE("detachBuffer trans error: %s", transStatus.description().c_str());
- return C2_CORRUPTED;
- }
- int32_t status;
- if (!h2b(static_cast<HStatus>(transStatus), &status)) {
- status = android::BAD_VALUE;
- }
+ status_t status = mProducer->detachBuffer(slot);
if (status == android::NO_INIT) {
// No more active buffer slot. Break the loop now.
break;
+ } else if (status != android::NO_ERROR) {
+ return C2_CORRUPTED;
}
}
mProducerSwitched = false;
@@ -653,19 +806,8 @@
// The remained slot indices in |mSlotAllocations| now are still dequeued (un-available).
// maxDequeuedBufferCount should be set to "new requested buffer count" + "still dequeued buffer
// count" to make sure it has enough available slots to request buffer from.
- // Moreover, one extra buffer count is added for fetching spare buffer slot index.
- Return<HStatus> transStatus =
- mProducer->setMaxDequeuedBufferCount(bufferCount + mSlotAllocations.size() + 1);
- if (!transStatus.isOk()) {
- ALOGE("setMaxDequeuedBufferCount trans error: %s", transStatus.description().c_str());
- return C2_CORRUPTED;
- }
- int32_t status;
- if (!h2b(static_cast<HStatus>(transStatus), &status)) {
- status = android::BAD_VALUE;
- }
+ status_t status = mProducer->setMaxDequeuedBufferCount(bufferCount + mSlotAllocations.size());
if (status != android::NO_ERROR) {
- ALOGE("setMaxDequeuedBufferCount failed");
return asC2Error(status);
}
@@ -674,18 +816,9 @@
mSlotAllocations.clear();
mProducerChangeSlotMap.clear();
mBuffersRequested = static_cast<size_t>(bufferCount);
- mSpareSlot = -1;
- Return<HStatus> transStatus2 = mProducer->allowAllocation(true);
- if (!transStatus2.isOk()) {
- ALOGE("allowAllocation(true) transaction error: %s", transStatus2.description().c_str());
- return C2_CORRUPTED;
- }
- if (!h2b(static_cast<HStatus>(transStatus2), &status)) {
- status = android::BAD_VALUE;
- }
+ status = mProducer->allowAllocation(true);
if (status != android::NO_ERROR) {
- ALOGE("allowAllocation(true) failed");
return asC2Error(status);
}
return C2_OK;
@@ -707,18 +840,16 @@
}
std::lock_guard<std::mutex> lock(mMutex);
+ auto newProducer = std::make_unique<H2BGraphicBufferProducer>(producer);
uint64_t producerId;
- Return<uint64_t> transStatus = producer->getUniqueId();
- if (!transStatus.isOk()) {
- ALOGE("getUniqueId transaction error: %s", transStatus.description().c_str());
+ if (newProducer->getUniqueId(&producerId) != android::NO_ERROR) {
return;
}
- producerId = static_cast<uint64_t>(transStatus);
if (mProducer && mProducerId != producerId) {
ALOGI("Producer (Surface) is going to switch... ( %" PRIu64 " -> %" PRIu64 " )",
mProducerId, producerId);
- if (!switchProducer(producer.get(), producerId)) {
+ if (!switchProducer(newProducer.get(), producerId)) {
mProducerChangeSlotMap.clear();
return;
}
@@ -726,13 +857,25 @@
mSlotAllocations.clear();
}
+ if (newProducer->setDequeueTimeout(0) != android::NO_ERROR) {
+ ALOGE("%s(): failed to setDequeueTimeout(0)", __func__);
+ return;
+ }
+
+ // hack(b/146409777): Try to connect ARC-specific listener first.
+ sp<BufferReleasedNotifier> listener = new BufferReleasedNotifier(shared_from_this());
+ if (newProducer->connect(listener, 'ARC\0', false) == android::NO_ERROR) {
+ ALOGI("connected to ARC-specific IGBP listener.");
+ mFetchBufferNotifier = listener;
+ }
+
// HGraphicBufferProducer could (and should) be replaced if the client has set a new generation
// number to producer. The old HGraphicBufferProducer will be disconnected and deprecated then.
- mProducer = producer;
+ mProducer = std::move(newProducer);
mProducerId = producerId;
}
-bool C2VdaBqBlockPool::Impl::switchProducer(HGraphicBufferProducer* const newProducer,
+bool C2VdaBqBlockPool::Impl::switchProducer(H2BGraphicBufferProducer* const newProducer,
uint64_t newProducerId) {
if (mAllocator->getId() == android::V4L2AllocatorId::SECURE_GRAPHIC) {
// TODO(johnylin): support this when we meet the use case in the future.
@@ -742,26 +885,15 @@
// Set maxDequeuedBufferCount to new producer.
// Just like requestNewBufferSet(), maxDequeuedBufferCount should be set to "requested buffer
- // count" + "buffer count in client" + 1 (spare buffer) to make sure it has enough available
- // slots to request buffer from.
+ // count" + "buffer count in client" to make sure it has enough available slots to request
+ // buffers from.
// "Requested buffer count" could be obtained by the size of |mSlotAllocations|. However, it is
// not able to know "buffer count in client" in blockpool's aspect. The alternative solution is
// to set the worse case first, which is equal to the size of |mSlotAllocations|. And in the end
// of updateGraphicBlock() routine, we could get the arbitrary "buffer count in client" by
// counting the calls of updateGraphicBlock(willCancel=true). Then we set maxDequeuedBufferCount
// again to the correct value.
- Return<HStatus> transStatus =
- newProducer->setMaxDequeuedBufferCount(mSlotAllocations.size() * 2 + 1);
- if (!transStatus.isOk()) {
- ALOGE("setMaxDequeuedBufferCount trans error: %s", transStatus.description().c_str());
- return false;
- }
- int32_t status;
- if (!h2b(static_cast<HStatus>(transStatus), &status)) {
- status = android::BAD_VALUE;
- }
- if (status != android::NO_ERROR) {
- ALOGE("setMaxDequeuedBufferCount failed");
+ if (newProducer->setMaxDequeuedBufferCount(mSlotAllocations.size() * 2) != android::NO_ERROR) {
return false;
}
@@ -769,29 +901,19 @@
mBuffersInClient = 0;
// Set allowAllocation to new producer.
- Return<HStatus> transStatus2 = newProducer->allowAllocation(true);
- if (!transStatus2.isOk()) {
- ALOGE("allowAllocation(true) transaction error: %s", transStatus2.description().c_str());
- return false;
- }
- if (!h2b(static_cast<HStatus>(transStatus2), &status)) {
- status = android::BAD_VALUE;
- }
- if (status != android::NO_ERROR) {
- ALOGE("allowAllocation(true) failed");
+ if (newProducer->allowAllocation(true) != android::NO_ERROR) {
return false;
}
- // Fetch spare buffer slot from new producer first, this step also allows us to obtain the
- // generation number and usage of new producer. While attaching buffers, generation number and
- // usage must be aligned to the producer.
+ // Get a buffer from the new producer to get the generation number and usage of new producer.
+ // While attaching buffers, generation number and usage must be aligned to the producer.
uint32_t newGeneration;
uint64_t newUsage;
- c2_status_t err = fetchSpareBufferSlot(newProducer, mBufferFormat.mWidth, mBufferFormat.mHeight,
- mBufferFormat.mPixelFormat, mBufferFormat.mUsage,
- &newGeneration, &newUsage);
+ c2_status_t err = queryGenerationAndUsage(newProducer, mBufferFormat.mWidth,
+ mBufferFormat.mHeight, mBufferFormat.mPixelFormat,
+ mBufferFormat.mUsage, &newGeneration, &newUsage);
if (err != C2_OK) {
- ALOGE("fetchSpareBufferSlot failed: %d", err);
+ ALOGE("queryGenerationAndUsage failed: %d", err);
return false;
}
@@ -809,7 +931,7 @@
native_handle_t* grallocHandle =
android::UnwrapNativeCodec2GrallocHandle(iter->second->handle());
- // Update generation number and usage from newly-allocated spare buffer.
+ // Update generation number and usage.
sp<GraphicBuffer> graphicBuffer =
new GraphicBuffer(grallocHandle, GraphicBuffer::CLONE_HANDLE, width, height, format,
1, newUsage, stride);
@@ -820,37 +942,9 @@
graphicBuffer->setGenerationNumber(newGeneration);
native_handle_delete(grallocHandle);
- // Convert GraphicBuffer into HBuffer.
- HBuffer hBuffer{};
- uint32_t hGenerationNumber{};
- if (!b2h(graphicBuffer, &hBuffer, &hGenerationNumber)) {
- ALOGE("Failed to convert GraphicBuffer to HBuffer");
+ if (newProducer->attachBuffer(graphicBuffer, &slot) != android::NO_ERROR) {
return false;
}
-
- // Attach HBuffer to new producer and get the attached slot index.
- bool converted{};
- Return<void> transStatus = newProducer->attachBuffer(
- hBuffer, hGenerationNumber,
- [&converted, &status, &slot](HStatus hStatus, int32_t hSlot, bool releaseAll) {
- converted = h2b(hStatus, &status);
- if (!converted) {
- status = android::BAD_VALUE;
- }
- slot = hSlot;
- if (converted && releaseAll && status == android::OK) {
- status = android::INVALID_OPERATION;
- }
- });
- if (!transStatus.isOk()) {
- ALOGE("attachBuffer trans error: %s", transStatus.description().c_str());
- return false;
- }
- if (status != android::NO_ERROR) {
- ALOGE("attachBuffer failed: %d", status);
- return false;
- }
-
// Convert back to C2GraphicAllocation wrapping new producer id, generation number, usage
// and slot index.
ALOGV("buffer wraps { producer id: %" PRIu64 ", slot: %d }", newProducerId, slot);
@@ -876,29 +970,14 @@
}
// Set allowAllocation to false so producer could not allocate new buffers.
- Return<HStatus> transStatus4 = newProducer->allowAllocation(false);
- if (!transStatus4.isOk()) {
- ALOGE("allowAllocation(false) transaction error: %s", transStatus4.description().c_str());
- return false;
- }
- if (!h2b(static_cast<HStatus>(transStatus4), &status)) {
- status = android::BAD_VALUE;
- }
- if (status != android::NO_ERROR) {
+ if (newProducer->allowAllocation(false) != android::NO_ERROR) {
ALOGE("allowAllocation(false) failed");
return false;
}
// Try to detach all buffers from old producer.
for (const auto& slotAllocation : mSlotAllocations) {
- Return<HStatus> transStatus = mProducer->detachBuffer(slotAllocation.first);
- if (!transStatus.isOk()) {
- ALOGE("detachBuffer trans error: %s", transStatus.description().c_str());
- return false;
- }
- if (!h2b(static_cast<HStatus>(transStatus), &status)) {
- status = android::BAD_VALUE;
- }
+ status_t status = mProducer->detachBuffer(slotAllocation.first);
if (status != android::NO_ERROR) {
ALOGW("detachBuffer slot=%d from old producer failed: %d", slotAllocation.first,
status);
@@ -930,10 +1009,9 @@
mProducerChangeSlotMap.erase(it);
if (willCancel) {
+ sp<Fence> fence = new Fence();
// The old C2GraphicBlock might be owned by client. Cancel this slot.
- Return<HStatus> transStatus = mProducer->cancelBuffer(slot, hidl_handle{});
- if (!transStatus.isOk()) {
- ALOGE("cancelBuffer transaction error: %s", transStatus.description().c_str());
+ if (mProducer->cancelBuffer(slot, fence) != android::NO_ERROR) {
return C2_CORRUPTED;
}
// Client might try to attach the old buffer to the current producer on client's end,
@@ -950,21 +1028,11 @@
if (mProducerChangeSlotMap.empty()) {
// The updateGraphicBlock() routine is about to finish.
// Set the correct maxDequeuedBufferCount to producer, which is "requested buffer count" +
- // "buffer count in client" + 1 (spare buffer).
+ // "buffer count in client".
ALOGV("Requested buffer count: %zu, buffer count in client: %u", mSlotAllocations.size(),
mBuffersInClient);
- Return<HStatus> transStatus = mProducer->setMaxDequeuedBufferCount(mSlotAllocations.size() +
- mBuffersInClient + 1);
- if (!transStatus.isOk()) {
- ALOGE("setMaxDequeuedBufferCount trans error: %s", transStatus.description().c_str());
- return C2_CORRUPTED;
- }
- int32_t status;
- if (!h2b(static_cast<HStatus>(transStatus), &status)) {
- status = android::BAD_VALUE;
- }
- if (status != android::NO_ERROR) {
- ALOGE("setMaxDequeuedBufferCount failed: %d", status);
+ if (mProducer->setMaxDequeuedBufferCount(mSlotAllocations.size() + mBuffersInClient) !=
+ android::NO_ERROR) {
return C2_CORRUPTED;
}
mProducerSwitched = true;
@@ -981,16 +1049,7 @@
}
int32_t status, value;
- Return<void> transStatus = mProducer->query(NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
- [&status, &value](int32_t tStatus, int32_t tValue) {
- status = tStatus;
- value = tValue;
- });
- if (!transStatus.isOk()) {
- ALOGE("query(NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS) trans error: %s",
- transStatus.description().c_str());
- return C2_CORRUPTED;
- }
+ status = mProducer->query(NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &value);
if (status != android::NO_ERROR) {
ALOGE("query(NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS) failed: %d", status);
return asC2Error(status);
@@ -1007,17 +1066,7 @@
ALOGV("detachBuffer: producer id = %" PRIu64 ", slot = %d", producerId, slotId);
std::lock_guard<std::mutex> lock(mMutex);
if (producerId == mProducerId && mProducer) {
- Return<HStatus> transStatus = mProducer->detachBuffer(slotId);
- if (!transStatus.isOk()) {
- ALOGE("detachBuffer trans error: %s", transStatus.description().c_str());
- return;
- }
- int32_t status;
- if (!h2b(static_cast<HStatus>(transStatus), &status)) {
- status = android::BAD_VALUE;
- }
- if (status != android::NO_ERROR) {
- ALOGD("detachBuffer failed: %d", status);
+ if (mProducer->detachBuffer(slotId) != android::NO_ERROR) {
return;
}
@@ -1030,6 +1079,32 @@
}
}
+bool C2VdaBqBlockPool::Impl::setNotifyBlockAvailableCb(::base::OnceClosure cb) {
+ ALOGV("%s()", __func__);
+ if (mFetchBufferNotifier == nullptr) {
+ return false;
+ }
+
+ ::base::OnceClosure outputCb;
+ {
+ std::lock_guard<std::mutex> lock(mBufferReleaseMutex);
+
+ // If there is any buffer released after dequeueBuffer() timed out, then we could notify the
+ // caller directly.
+ if (mBufferReleasedAfterTimedOut) {
+ outputCb = std::move(cb);
+ } else {
+ mNotifyBlockAvailableCb = std::move(cb);
+ }
+ }
+
+ // Calling the callback outside the lock to avoid the deadlock.
+ if (outputCb) {
+ std::move(outputCb).Run();
+ }
+ return true;
+}
+
C2VdaBqBlockPool::C2VdaBqBlockPool(const std::shared_ptr<C2Allocator>& allocator,
const local_id_t localId)
: C2BufferQueueBlockPool(allocator, localId), mLocalId(localId), mImpl(new Impl(allocator)) {}
@@ -1079,6 +1154,13 @@
return C2_NO_INIT;
}
+bool C2VdaBqBlockPool::setNotifyBlockAvailableCb(::base::OnceClosure cb) {
+ if (mImpl) {
+ return mImpl->setNotifyBlockAvailableCb(std::move(cb));
+ }
+ return false;
+}
+
C2VdaBqBlockPoolData::C2VdaBqBlockPoolData(uint64_t producerId, int32_t slotId,
const std::shared_ptr<C2VdaBqBlockPool::Impl>& pool)
: mProducerId(producerId), mSlotId(slotId), mPool(pool) {}
@@ -1089,3 +1171,5 @@
}
mPool->detachBuffer(mProducerId, mSlotId);
}
+
+} // namespace android
diff --git a/plugin_store/C2VdaPooledBlockPool.cpp b/plugin_store/C2VdaPooledBlockPool.cpp
new file mode 100644
index 0000000..08fdfa0
--- /dev/null
+++ b/plugin_store/C2VdaPooledBlockPool.cpp
@@ -0,0 +1,109 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2VdaPooledBlockPool"
+
+#include <v4l2_codec2/plugin_store/C2VdaPooledBlockPool.h>
+
+#include <time.h>
+
+#include <C2AllocatorGralloc.h>
+#include <C2BlockInternal.h>
+#include <bufferpool/BufferPoolTypes.h>
+#include <log/log.h>
+
+namespace android {
+namespace {
+// The wait time for another try to fetch a buffer from bufferpool.
+const int64_t kFetchRetryDelayUs = 10 * 1000;
+
+int64_t GetNowUs() {
+ struct timespec t;
+ t.tv_sec = 0;
+ t.tv_nsec = 0;
+ clock_gettime(CLOCK_MONOTONIC, &t);
+ int64_t nsecs = static_cast<int64_t>(t.tv_sec) * 1000000000LL + t.tv_nsec;
+ return nsecs / 1000ll;
+}
+} // namespace
+
+using android::hardware::media::bufferpool::BufferPoolData;
+
+// static
+std::optional<uint32_t> C2VdaPooledBlockPool::getBufferIdFromGraphicBlock(const C2Block2D& block) {
+ std::shared_ptr<_C2BlockPoolData> blockPoolData =
+ _C2BlockFactory::GetGraphicBlockPoolData(block);
+ if (blockPoolData->getType() != _C2BlockPoolData::TYPE_BUFFERPOOL) {
+ ALOGE("Obtained C2GraphicBlock is not bufferpool-backed.");
+ return std::nullopt;
+ }
+ std::shared_ptr<BufferPoolData> bpData;
+ if (!_C2BlockFactory::GetBufferPoolData(blockPoolData, &bpData) || !bpData) {
+ ALOGE("BufferPoolData unavailable in block.");
+ return std::nullopt;
+ }
+ return bpData->mId;
+}
+
+// Tries to fetch a buffer from bufferpool. When the size of |mBufferIds| is smaller than
+// |mBufferCount|, pass the obtained buffer to caller and record its ID in BufferPoolData to
+// |mBufferIds|. When the size of |mBufferIds| is equal to |mBufferCount|, pass the obtained
+// buffer only if its ID is included in |mBufferIds|. Otherwise, discard the buffer and
+// return C2_TIMED_OUT.
+c2_status_t C2VdaPooledBlockPool::fetchGraphicBlock(uint32_t width, uint32_t height,
+ uint32_t format, C2MemoryUsage usage,
+ std::shared_ptr<C2GraphicBlock>* block) {
+ ALOG_ASSERT(block != nullptr);
+ std::lock_guard<std::mutex> lock(mMutex);
+
+ if (mNextFetchTimeUs != 0) {
+ int delayUs = GetNowUs() - mNextFetchTimeUs;
+ if (delayUs > 0) {
+ ::usleep(delayUs);
+ }
+ mNextFetchTimeUs = 0;
+ }
+
+ std::shared_ptr<C2GraphicBlock> fetchBlock;
+ c2_status_t err =
+ C2PooledBlockPool::fetchGraphicBlock(width, height, format, usage, &fetchBlock);
+ if (err != C2_OK) {
+ ALOGE("Failed at C2PooledBlockPool::fetchGraphicBlock: %d", err);
+ return err;
+ }
+
+ std::optional<uint32_t> bufferId = getBufferIdFromGraphicBlock(*fetchBlock);
+ if (!bufferId) {
+ ALOGE("Failed to getBufferIdFromGraphicBlock");
+ return C2_CORRUPTED;
+ }
+
+ if (mBufferIds.size() < mBufferCount) {
+ mBufferIds.insert(*bufferId);
+ }
+
+ if (mBufferIds.find(*bufferId) != mBufferIds.end()) {
+ ALOGV("Returned buffer id = %u", *bufferId);
+ *block = std::move(fetchBlock);
+ return C2_OK;
+ }
+ ALOGV("No buffer could be recycled now, wait for another try...");
+ mNextFetchTimeUs = GetNowUs() + kFetchRetryDelayUs;
+ return C2_TIMED_OUT;
+}
+
+c2_status_t C2VdaPooledBlockPool::requestNewBufferSet(int32_t bufferCount) {
+ if (bufferCount <= 0) {
+ ALOGE("Invalid requested buffer count = %d", bufferCount);
+ return C2_BAD_VALUE;
+ }
+
+ std::lock_guard<std::mutex> lock(mMutex);
+ mBufferIds.clear();
+ mBufferCount = bufferCount;
+ return C2_OK;
+}
+
+} // namespace android
diff --git a/plugin_store/V4L2PluginStore.cpp b/plugin_store/V4L2PluginStore.cpp
index 86dd018..4475e2f 100644
--- a/plugin_store/V4L2PluginStore.cpp
+++ b/plugin_store/V4L2PluginStore.cpp
@@ -16,6 +16,7 @@
#include <log/log.h>
#include <v4l2_codec2/plugin_store/C2VdaBqBlockPool.h>
+#include <v4l2_codec2/plugin_store/C2VdaPooledBlockPool.h>
#include <v4l2_codec2/plugin_store/V4L2AllocatorId.h>
#include <v4l2_codec2/plugin_store/VendorAllocatorLoader.h>
@@ -67,7 +68,7 @@
switch (allocatorId) {
case V4L2AllocatorId::V4L2_BUFFERPOOL:
- return new C2PooledBlockPool(allocator, poolId);
+ return new C2VdaPooledBlockPool(allocator, poolId);
case V4L2AllocatorId::V4L2_BUFFERQUEUE:
return new C2VdaBqBlockPool(allocator, poolId);
diff --git a/plugin_store/include/v4l2_codec2/plugin_store/C2VdaBqBlockPool.h b/plugin_store/include/v4l2_codec2/plugin_store/C2VdaBqBlockPool.h
index 161e176..fd524d2 100644
--- a/plugin_store/include/v4l2_codec2/plugin_store/C2VdaBqBlockPool.h
+++ b/plugin_store/include/v4l2_codec2/plugin_store/C2VdaBqBlockPool.h
@@ -7,10 +7,14 @@
#include <functional>
#include <map>
+#include <optional>
#include <C2BqBufferPriv.h>
#include <C2Buffer.h>
#include <C2PlatformSupport.h>
+#include <base/callback_forward.h>
+
+namespace android {
/**
* Marks the BlockPoolData in |sharedBlock| as shared. The destructor of BlockPoolData would not
@@ -40,10 +44,11 @@
* \note C2VdaBqBlockPool-specific function
*
* \param block the graphic block allocated by bufferqueue block pool.
- * \param poolId raw pointer where slot index in bufferqueue is stored.
+ *
+ * Return the buffer's slot index in bufferqueue if extraction is successful.
+ * Otherwise return std::nullopt.
*/
- static c2_status_t getPoolIdFromGraphicBlock(const std::shared_ptr<C2GraphicBlock>& block,
- uint32_t* poolId);
+ static std::optional<uint32_t> getBufferIdFromGraphicBlock(const C2Block2D& block);
/**
* It's a trick here. Return C2PlatformAllocatorStore::BUFFERQUEUE instead of the ID of backing
@@ -127,6 +132,18 @@
*/
c2_status_t getMinBuffersForDisplay(size_t* bufferCount);
+ /**
+ * Set the callback that will be triggered when there is block available.
+ *
+ * \note C2VdaBqBlockPool-specific function
+ *
+ * \param cb the callback function that will be triggered when there is block available.
+ *
+ * Return false if we don't support to notify the caller when a buffer is available.
+ *
+ */
+ bool setNotifyBlockAvailableCb(base::OnceClosure cb);
+
private:
friend struct C2VdaBqBlockPoolData;
class Impl;
@@ -135,4 +152,5 @@
std::shared_ptr<Impl> mImpl;
};
+} // namespace android
#endif // ANDROID_V4L2_CODEC2_PLUGIN_STORE_C2_VDA_BQ_BLOCK_POOL_H
diff --git a/plugin_store/include/v4l2_codec2/plugin_store/C2VdaPooledBlockPool.h b/plugin_store/include/v4l2_codec2/plugin_store/C2VdaPooledBlockPool.h
new file mode 100644
index 0000000..5603046
--- /dev/null
+++ b/plugin_store/include/v4l2_codec2/plugin_store/C2VdaPooledBlockPool.h
@@ -0,0 +1,56 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_V4L2_CODEC2_PLUGIN_STORE_C2_VDA_POOLED_BLOCK_POOL_H
+#define ANDROID_V4L2_CODEC2_PLUGIN_STORE_C2_VDA_POOLED_BLOCK_POOL_H
+
+#include <memory>
+#include <mutex>
+#include <optional>
+#include <set>
+
+#include <C2Buffer.h>
+#include <C2BufferPriv.h>
+#include <C2PlatformSupport.h>
+#include <android-base/thread_annotations.h>
+
+namespace android {
+
+class C2VdaPooledBlockPool : public C2PooledBlockPool {
+public:
+ using C2PooledBlockPool::C2PooledBlockPool;
+ ~C2VdaPooledBlockPool() override = default;
+
+ // Extracts the buffer ID from BufferPoolData of the graphic block.
+ // |block| is the graphic block allocated by bufferpool block pool.
+ static std::optional<uint32_t> getBufferIdFromGraphicBlock(const C2Block2D& block);
+
+ // Allocate the specified number of buffers.
+ // |bufferCount| is the number of requested buffers.
+ c2_status_t requestNewBufferSet(int32_t bufferCount);
+
+ // Return C2_OK and store a buffer in |block| if a buffer is successfully fetched.
+ // Return C2_TIMED_OUT if the pool already allocated |mBufferCount| buffers but they are all in
+ // use.
+ // Return C2_NO_MEMORY if the pool fails to allocate a new buffer.
+ c2_status_t fetchGraphicBlock(uint32_t width, uint32_t height, uint32_t format,
+ C2MemoryUsage usage,
+ std::shared_ptr<C2GraphicBlock>* block /* nonnull */) override;
+
+private:
+ // Function mutex to lock at the start of each API function call for protecting the
+ // synchronization of all member variables.
+ std::mutex mMutex;
+
+ // The ids of all allocated buffers.
+ std::set<uint32_t> mBufferIds GUARDED_BY(mMutex);
+ // The maximum count of allocated buffers.
+ size_t mBufferCount GUARDED_BY(mMutex){0};
+ // The timestamp for the next fetchGraphicBlock() call.
+ // Set when the previous fetchGraphicBlock() call timed out.
+ int64_t mNextFetchTimeUs GUARDED_BY(mMutex){0};
+};
+
+} // namespace android
+#endif // ANDROID_V4L2_CODEC2_PLUGIN_STORE_C2_VDA_POOLED_BLOCK_POOL_H
diff --git a/tests/c2_comp_intf/Android.bp b/tests/c2_comp_intf/Android.bp
new file mode 100644
index 0000000..bc7ae83
--- /dev/null
+++ b/tests/c2_comp_intf/Android.bp
@@ -0,0 +1,36 @@
+cc_test {
+ name: "C2VEACompIntf_test",
+ vendor: true,
+
+ srcs: [
+ "C2CompIntfTest.cpp",
+ "C2VEACompIntf_test.cpp",
+ ],
+
+ shared_libs: [
+ "libchrome",
+ "libcodec2",
+ "libcodec2_vndk",
+ "libcutils",
+ "liblog",
+ "libui",
+ "libutils",
+ "libv4l2_codec2_accel",
+ "libv4l2_codec2_components",
+ ],
+ include_dirs: [
+ "external/v4l2_codec2/accel",
+ "external/v4l2_codec2/common/include",
+ "external/v4l2_codec2/components/include",
+ "frameworks/av/media/codec2/components/base/include",
+ "frameworks/av/media/codec2/core/include",
+ "frameworks/av/media/codec2/vndk/include",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+ ldflags: ["-Wl,-Bsymbolic"],
+ clang: true,
+}
diff --git a/tests/c2_comp_intf/Android.mk b/tests/c2_comp_intf/Android.mk
deleted file mode 100644
index b91a598..0000000
--- a/tests/c2_comp_intf/Android.mk
+++ /dev/null
@@ -1,120 +0,0 @@
-# Build the unit tests.
-LOCAL_PATH:= $(call my-dir)
-include $(CLEAR_VARS)
-LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
-
-LOCAL_MODULE := C2VDACompIntf_test
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_SRC_FILES := \
- C2CompIntfTest.cpp \
- C2VDACompIntf_test.cpp \
-
-LOCAL_SHARED_LIBRARIES := \
- libchrome \
- libcodec2 \
- libcodec2_vndk \
- libcutils \
- liblog \
- libutils \
- libv4l2_codec2 \
- libv4l2_codec2_accel \
-
-LOCAL_C_INCLUDES += \
- $(TOP)/external/v4l2_codec2/accel \
- $(TOP)/external/v4l2_codec2/common/include \
- $(TOP)/external/v4l2_codec2/include \
- $(TOP)/frameworks/av/media/codec2/components/base/include \
- $(TOP)/frameworks/av/media/codec2/core/include \
- $(TOP)/frameworks/av/media/codec2/vndk/include \
- $(TOP)/vendor/google_arc/libs/codec2/vdastore/include \
-
-LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
-
-LOCAL_LDFLAGS := -Wl,-Bsymbolic
-
-include $(BUILD_NATIVE_TEST)
-
-
-include $(CLEAR_VARS)
-LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
-
-LOCAL_MODULE := C2VEACompIntf_test
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_SRC_FILES := \
- C2CompIntfTest.cpp \
- C2VEACompIntf_test.cpp \
-
-LOCAL_SHARED_LIBRARIES := \
- libchrome \
- libcodec2 \
- libcodec2_vndk \
- libcutils \
- liblog \
- libui \
- libutils \
- libv4l2_codec2 \
- libv4l2_codec2_accel \
-
-LOCAL_C_INCLUDES += \
- $(TOP)/external/v4l2_codec2/accel \
- $(TOP)/external/v4l2_codec2/common/include \
- $(TOP)/external/v4l2_codec2/include \
- $(TOP)/frameworks/av/media/codec2/components/base/include \
- $(TOP)/frameworks/av/media/codec2/core/include \
- $(TOP)/frameworks/av/media/codec2/vndk/include \
-
-LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
-
-LOCAL_LDFLAGS := -Wl,-Bsymbolic
-
-include $(BUILD_NATIVE_TEST)
-
-
-include $(CLEAR_VARS)
-LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
-
-LOCAL_MODULE := C2VDAComponent_test
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_SRC_FILES := \
- C2VDAComponent_test.cpp \
-
-LOCAL_SHARED_LIBRARIES := \
- android.hardware.media.bufferpool@2.0 \
- libchrome \
- libcodec2 \
- libcodec2_vndk \
- libcutils \
- libdatasource \
- liblog \
- libmedia \
- libstagefright \
- libstagefright_foundation \
- libutils \
- libv4l2_codec2 \
- libv4l2_codec2_accel \
-
-LOCAL_C_INCLUDES += \
- $(TOP)/external/v4l2_codec2/accel \
- $(TOP)/external/v4l2_codec2/common/include \
- $(TOP)/external/v4l2_codec2/include \
- $(TOP)/frameworks/av/media/codec2/components/base/include \
- $(TOP)/frameworks/av/media/codec2/core/include \
- $(TOP)/frameworks/av/media/codec2/vndk/include \
- $(TOP)/frameworks/av/media/libstagefright/include \
- $(TOP)/vendor/google_arc/libs/codec2/vdastore/include \
-
-# -Wno-unused-parameter is needed for libchrome/base codes
-LOCAL_CFLAGS += -Werror -Wall -Wno-unused-parameter
-LOCAL_CLANG := true
-
-LOCAL_LDFLAGS := -Wl,-Bsymbolic
-
-include $(BUILD_NATIVE_TEST)
diff --git a/tests/c2_comp_intf/C2VDACompIntf_test.cpp b/tests/c2_comp_intf/C2VDACompIntf_test.cpp
deleted file mode 100644
index e7ec859..0000000
--- a/tests/c2_comp_intf/C2VDACompIntf_test.cpp
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "C2VDACompIntf_test"
-
-#include <C2CompIntfTest.h>
-
-#include <C2VDAAllocatorStore.h>
-#include <C2VDAComponent.h>
-
-#include <C2PlatformSupport.h>
-
-#include <gtest/gtest.h>
-#include <utils/Log.h>
-
-#include <inttypes.h>
-#include <stdio.h>
-#include <limits>
-
-namespace android {
-
-const C2String testCompName = "c2.vda.avc.decoder";
-const c2_node_id_t testCompNodeId = 12345;
-
-const char* MEDIA_MIMETYPE_VIDEO_RAW = "video/raw";
-const char* MEDIA_MIMETYPE_VIDEO_AVC = "video/avc";
-
-const size_t kMaxInputBufferSize = 1048576;
-const C2Allocator::id_t kInputAllocators[] = {C2PlatformAllocatorStore::BLOB};
-const C2Allocator::id_t kOutputAllocators[] = {C2VDAAllocatorStore::V4L2_BUFFERPOOL};
-const C2Allocator::id_t kSurfaceAllocator = C2VDAAllocatorStore::V4L2_BUFFERQUEUE;
-const C2BlockPool::local_id_t kDefaultOutputBlockPool = C2BlockPool::BASIC_GRAPHIC;
-
-class C2VDACompIntfTest : public C2CompIntfTest {
-protected:
- C2VDACompIntfTest() {
- mReflector = std::make_shared<C2ReflectorHelper>();
- mIntf = std::shared_ptr<C2ComponentInterface>(new SimpleInterface<C2VDAComponent::IntfImpl>(
- testCompName.c_str(), testCompNodeId,
- std::make_shared<C2VDAComponent::IntfImpl>(testCompName, mReflector)));
- }
- ~C2VDACompIntfTest() override {}
-};
-
-#define TRACED_FAILURE(func) \
- do { \
- SCOPED_TRACE(#func); \
- func; \
- if (::testing::Test::HasFatalFailure()) return; \
- } while (false)
-
-TEST_F(C2VDACompIntfTest, CreateInstance) {
- auto name = mIntf->getName();
- auto id = mIntf->getId();
- printf("name = %s\n", name.c_str());
- printf("node_id = %u\n", id);
- EXPECT_STREQ(name.c_str(), testCompName.c_str());
- EXPECT_EQ(id, testCompNodeId);
-}
-
-TEST_F(C2VDACompIntfTest, TestInputFormat) {
- C2StreamBufferTypeSetting::input expected(0u, C2BufferData::LINEAR);
- C2StreamBufferTypeSetting::input invalid(0u, C2BufferData::GRAPHIC);
- TRACED_FAILURE(testReadOnlyParam(&expected, &invalid));
-}
-
-TEST_F(C2VDACompIntfTest, TestOutputFormat) {
- C2StreamBufferTypeSetting::output expected(0u, C2BufferData::GRAPHIC);
- C2StreamBufferTypeSetting::output invalid(0u, C2BufferData::LINEAR);
- TRACED_FAILURE(testReadOnlyParam(&expected, &invalid));
-}
-
-TEST_F(C2VDACompIntfTest, TestInputPortMime) {
- std::shared_ptr<C2PortMediaTypeSetting::input> expected(
- AllocSharedString<C2PortMediaTypeSetting::input>(MEDIA_MIMETYPE_VIDEO_AVC));
- std::shared_ptr<C2PortMediaTypeSetting::input> invalid(
- AllocSharedString<C2PortMediaTypeSetting::input>(MEDIA_MIMETYPE_VIDEO_RAW));
- TRACED_FAILURE(testReadOnlyParamOnHeap(expected.get(), invalid.get()));
-}
-
-TEST_F(C2VDACompIntfTest, TestOutputPortMime) {
- std::shared_ptr<C2PortMediaTypeSetting::output> expected(
- AllocSharedString<C2PortMediaTypeSetting::output>(MEDIA_MIMETYPE_VIDEO_RAW));
- std::shared_ptr<C2PortMediaTypeSetting::output> invalid(
- AllocSharedString<C2PortMediaTypeSetting::output>(MEDIA_MIMETYPE_VIDEO_AVC));
- TRACED_FAILURE(testReadOnlyParamOnHeap(expected.get(), invalid.get()));
-}
-
-TEST_F(C2VDACompIntfTest, TestProfileLevel) {
- // Iterate all possible profile and level combination
- TRACED_FAILURE(testWritableProfileLevelParam<C2StreamProfileLevelInfo::input>());
-}
-
-TEST_F(C2VDACompIntfTest, TestVideoSize) {
- C2StreamPictureSizeInfo::output videoSize;
- videoSize.setStream(0); // only support single stream
- std::vector<C2FieldSupportedValuesQuery> widthC2FSV = {
- {C2ParamField(&videoSize, &C2StreamPictureSizeInfo::width),
- C2FieldSupportedValuesQuery::CURRENT},
- };
- ASSERT_EQ(C2_OK, mIntf->querySupportedValues_vb(widthC2FSV, C2_DONT_BLOCK));
- std::vector<C2FieldSupportedValuesQuery> heightC2FSV = {
- {C2ParamField(&videoSize, &C2StreamPictureSizeInfo::height),
- C2FieldSupportedValuesQuery::CURRENT},
- };
- ASSERT_EQ(C2_OK, mIntf->querySupportedValues_vb(heightC2FSV, C2_DONT_BLOCK));
- ASSERT_EQ(1u, widthC2FSV.size());
- ASSERT_EQ(C2_OK, widthC2FSV[0].status);
- ASSERT_EQ(C2FieldSupportedValues::RANGE, widthC2FSV[0].values.type);
- auto& widthFSVRange = widthC2FSV[0].values.range;
- int32_t widthMin = widthFSVRange.min.i32;
- int32_t widthMax = widthFSVRange.max.i32;
- int32_t widthStep = widthFSVRange.step.i32;
-
- ASSERT_EQ(1u, heightC2FSV.size());
- ASSERT_EQ(C2_OK, heightC2FSV[0].status);
- ASSERT_EQ(C2FieldSupportedValues::RANGE, heightC2FSV[0].values.type);
- auto& heightFSVRange = heightC2FSV[0].values.range;
- int32_t heightMin = heightFSVRange.min.i32;
- int32_t heightMax = heightFSVRange.max.i32;
- int32_t heightStep = heightFSVRange.step.i32;
-
- // test updating valid and invalid values
- TRACED_FAILURE(testWritableVideoSizeParam<C2StreamPictureSizeInfo::output>(
- widthMin, widthMax, widthStep, heightMin, heightMax, heightStep));
-}
-
-TEST_F(C2VDACompIntfTest, TestMaxInputSize) {
- // If output video size <= 1080p, max input size = kMaxInputBufferSize.
- C2StreamPictureSizeInfo::output videoSize(0u, 320, 240);
- std::vector<C2Param*> params{&videoSize};
- std::vector<std::unique_ptr<C2SettingResult>> failures;
- ASSERT_EQ(C2_OK, mIntf->config_vb(params, C2_DONT_BLOCK, &failures));
- EXPECT_EQ(0u, failures.size());
-
- C2StreamMaxBufferSizeInfo::input expected(0u, kMaxInputBufferSize);
- C2StreamMaxBufferSizeInfo::input invalid(0u, kMaxInputBufferSize * 4);
- TRACED_FAILURE(testReadOnlyParam(&expected, &invalid));
-
- // If output video size > 1080p, max input size = kMaxInputBufferSize * 4.
- videoSize.width = 3840;
- videoSize.height = 2160;
- failures.clear();
- ASSERT_EQ(C2_OK, mIntf->config_vb(params, C2_DONT_BLOCK, &failures));
- EXPECT_EQ(0u, failures.size());
-
- expected.value = kMaxInputBufferSize * 4;
- invalid.value = kMaxInputBufferSize;
- TRACED_FAILURE(testReadOnlyParam(&expected, &invalid));
-}
-
-TEST_F(C2VDACompIntfTest, TestInputAllocatorIds) {
- std::shared_ptr<C2PortAllocatorsTuning::input> expected(
- C2PortAllocatorsTuning::input::AllocShared(kInputAllocators));
- std::shared_ptr<C2PortAllocatorsTuning::input> invalid(
- C2PortAllocatorsTuning::input::AllocShared(kOutputAllocators));
- TRACED_FAILURE(testReadOnlyParamOnHeap(expected.get(), invalid.get()));
-}
-
-TEST_F(C2VDACompIntfTest, TestOutputAllocatorIds) {
- std::shared_ptr<C2PortAllocatorsTuning::output> expected(
- C2PortAllocatorsTuning::output::AllocShared(kOutputAllocators));
- std::shared_ptr<C2PortAllocatorsTuning::output> invalid(
- C2PortAllocatorsTuning::output::AllocShared(kInputAllocators));
- TRACED_FAILURE(testReadOnlyParamOnHeap(expected.get(), invalid.get()));
-}
-
-TEST_F(C2VDACompIntfTest, TestSurfaceAllocatorId) {
- C2PortSurfaceAllocatorTuning::output expected(kSurfaceAllocator);
- C2PortSurfaceAllocatorTuning::output invalid(kOutputAllocators[0]);
- TRACED_FAILURE(testReadOnlyParam(&expected, &invalid));
-}
-
-TEST_F(C2VDACompIntfTest, TestOutputBlockPoolIds) {
- std::vector<std::unique_ptr<C2Param>> heapParams;
- C2Param::Index index = C2PortBlockPoolsTuning::output::PARAM_TYPE;
-
- // Query the param and check the default value.
- ASSERT_EQ(C2_OK, mIntf->query_vb({}, {index}, C2_DONT_BLOCK, &heapParams));
- ASSERT_EQ(1u, heapParams.size());
- C2BlockPool::local_id_t value = ((C2PortBlockPoolsTuning*)heapParams[0].get())->m.values[0];
- ASSERT_EQ(kDefaultOutputBlockPool, value);
-
- // Configure the param.
- C2BlockPool::local_id_t configBlockPools[] = {C2BlockPool::PLATFORM_START + 1};
- std::shared_ptr<C2PortBlockPoolsTuning::output> newParam(
- C2PortBlockPoolsTuning::output::AllocShared(configBlockPools));
-
- std::vector<C2Param*> params{newParam.get()};
- std::vector<std::unique_ptr<C2SettingResult>> failures;
- ASSERT_EQ(C2_OK, mIntf->config_vb(params, C2_DONT_BLOCK, &failures));
- EXPECT_EQ(0u, failures.size());
-
- // Query the param again and check the value is as configured
- heapParams.clear();
- ASSERT_EQ(C2_OK, mIntf->query_vb({}, {index}, C2_DONT_BLOCK, &heapParams));
- ASSERT_EQ(1u, heapParams.size());
- value = ((C2PortBlockPoolsTuning*)heapParams[0].get())->m.values[0];
- ASSERT_EQ(configBlockPools[0], value);
-}
-
-TEST_F(C2VDACompIntfTest, TestColorAspects) {
- // Combined color aspects takes values from both coded color aspects first. If unspecified, take
- // default color aspects alternatively.
- C2StreamColorAspectsTuning::output defaultAspects(
- 0u, C2Color::RANGE_FULL, C2Color::PRIMARIES_BT709, C2Color::TRANSFER_LINEAR,
- C2Color::MATRIX_BT709);
- C2StreamColorAspectsInfo::input codedAspects1(
- 0u, C2Color::RANGE_LIMITED, C2Color::PRIMARIES_UNSPECIFIED, C2Color::TRANSFER_SRGB,
- C2Color::MATRIX_UNSPECIFIED);
- C2StreamColorAspectsInfo::input codedAspects2(
- 0u, C2Color::RANGE_UNSPECIFIED, C2Color::PRIMARIES_BT2020,
- C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_BT601);
-
- C2StreamColorAspectsInfo::output expectedMergedAspects1(
- 0u, C2Color::RANGE_LIMITED, C2Color::PRIMARIES_BT709, C2Color::TRANSFER_SRGB,
- C2Color::MATRIX_BT709);
- C2StreamColorAspectsInfo::output expectedMergedAspects2(
- 0u, C2Color::RANGE_FULL, C2Color::PRIMARIES_BT2020, C2Color::TRANSFER_LINEAR,
- C2Color::MATRIX_BT601);
-
- // Test: default + coded 1 --> expected merged 1
- std::vector<C2Param*> params1{&defaultAspects, &codedAspects1};
- std::vector<std::unique_ptr<C2SettingResult>> failures;
- ASSERT_EQ(C2_OK, mIntf->config_vb(params1, C2_DONT_BLOCK, &failures));
- EXPECT_EQ(0u, failures.size());
-
- C2StreamColorAspectsInfo::output mergedAspects;
- std::vector<C2Param*> stackParams{&mergedAspects};
- ASSERT_EQ(C2_OK, mIntf->query_vb(stackParams, {}, C2_DONT_BLOCK, nullptr));
- EXPECT_EQ(mergedAspects, expectedMergedAspects1);
-
- // Test: default + coded 2 --> expected merged 2
- std::vector<C2Param*> params2{&codedAspects2};
- failures.clear();
- ASSERT_EQ(C2_OK, mIntf->config_vb(params2, C2_DONT_BLOCK, &failures));
- EXPECT_EQ(0u, failures.size());
-
- ASSERT_EQ(C2_OK, mIntf->query_vb(stackParams, {}, C2_DONT_BLOCK, nullptr));
- EXPECT_EQ(mergedAspects, expectedMergedAspects2);
-}
-
-TEST_F(C2VDACompIntfTest, TestUnsupportedParam) {
- C2ComponentTimeStretchTuning unsupportedParam;
- std::vector<C2Param*> stackParams{&unsupportedParam};
- ASSERT_EQ(C2_BAD_INDEX, mIntf->query_vb(stackParams, {}, C2_DONT_BLOCK, nullptr));
- EXPECT_EQ(0u, unsupportedParam.size()); // invalidated
-}
-
-TEST_F(C2VDACompIntfTest, ParamReflector) {
- dumpParamDescriptions();
-}
-} // namespace android
diff --git a/tests/c2_comp_intf/C2VDAComponent_test.cpp b/tests/c2_comp_intf/C2VDAComponent_test.cpp
deleted file mode 100644
index 9370b23..0000000
--- a/tests/c2_comp_intf/C2VDAComponent_test.cpp
+++ /dev/null
@@ -1,805 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "C2VDAComponent_test"
-
-#include <C2VDAAllocatorStore.h>
-#include <C2VDAComponent.h>
-
-#include <C2Buffer.h>
-#include <C2BufferPriv.h>
-#include <C2Component.h>
-#include <C2PlatformSupport.h>
-#include <C2Work.h>
-#include <SimpleC2Interface.h>
-
-#include <base/files/file.h>
-#include <base/files/file_path.h>
-#include <base/md5.h>
-#include <base/strings/string_piece.h>
-#include <base/strings/string_split.h>
-
-#include <datasource/DataSourceFactory.h>
-#include <gtest/gtest.h>
-#include <media/DataSource.h>
-#include <media/IMediaHTTPService.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <media/stagefright/MediaExtractorFactory.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/AUtils.h>
-#include <utils/Log.h>
-
-#include <fcntl.h>
-#include <inttypes.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <algorithm>
-#include <chrono>
-#include <thread>
-
-using namespace std::chrono_literals;
-
-namespace {
-
-const int kMD5StringLength = 32;
-
-// Read in golden MD5s for the sanity play-through check of this video
-void readGoldenMD5s(const std::string& videoFile, std::vector<std::string>* md5Strings) {
- base::FilePath filepath(videoFile + ".md5");
- std::string allMD5s;
- base::ReadFileToString(filepath, &allMD5s);
- *md5Strings = base::SplitString(allMD5s, "\n", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
- // Check these are legitimate MD5s.
- for (const std::string& md5String : *md5Strings) {
- // Ignore the empty string added by SplitString. Ignore comments.
- if (!md5String.length() || md5String.at(0) == '#') {
- continue;
- }
- if (static_cast<int>(md5String.length()) != kMD5StringLength) {
- fprintf(stderr, "MD5 length error: %s\n", md5String.c_str());
- }
- if (std::count_if(md5String.begin(), md5String.end(), isxdigit) != kMD5StringLength) {
- fprintf(stderr, "MD5 includes non-hex char: %s\n", md5String.c_str());
- }
- }
- if (md5Strings->empty()) {
- fprintf(stderr, "MD5 checksum file (%s) missing or empty.\n",
- filepath.MaybeAsASCII().c_str());
- }
-}
-
-// Get file path name of recording raw YUV
-base::FilePath getRecordOutputPath(const std::string& videoFile, int width, int height) {
- base::FilePath filepath(videoFile);
- filepath = filepath.RemoveExtension();
- std::string suffix = "_output_" + std::to_string(width) + "x" + std::to_string(height) + ".yuv";
- return base::FilePath(filepath.value() + suffix);
-}
-} // namespace
-
-namespace android {
-
-// Input video data parameters. This could be overwritten by user argument [-i].
-// The syntax of each column is:
-// filename:componentName:width:height:numFrames:numFragments
-// - |filename| is the file path to mp4 (h264) or webm (VP8/9) video.
-// - |componentName| specifies the name of decoder component.
-// - |width| and |height| are for video size (in pixels).
-// - |numFrames| is the number of picture frames.
-// - |numFragments| is the NALU (h264) or frame (VP8/9) count by MediaExtractor.
-const char* gTestVideoData = "bear.mp4:c2.vda.avc.decoder:640:360:82:84";
-//const char* gTestVideoData = "bear-vp8.webm:c2.vda.vp8.decoder:640:360:82:82";
-//const char* gTestVideoData = "bear-vp9.webm:c2.vda.vp9.decoder:320:240:82:82";
-
-// Record decoded output frames as raw YUV format.
-// The recorded file will be named as "<video_name>_output_<width>x<height>.yuv" under the same
-// folder of input video file.
-bool gRecordOutputYUV = false;
-
-const std::string kH264DecoderName = "c2.vda.avc.decoder";
-const std::string kVP8DecoderName = "c2.vda.vp8.decoder";
-const std::string kVP9DecoderName = "c2.vda.vp9.decoder";
-
-// Magic constants for indicating the timing of flush being called.
-enum FlushPoint : int { END_OF_STREAM_FLUSH = -3, MID_STREAM_FLUSH = -2, NO_FLUSH = -1 };
-
-struct TestVideoFile {
- enum class CodecType { UNKNOWN, H264, VP8, VP9 };
-
- std::string mFilename;
- std::string mComponentName;
- CodecType mCodec = CodecType::UNKNOWN;
- int mWidth = -1;
- int mHeight = -1;
- int mNumFrames = -1;
- int mNumFragments = -1;
- sp<IMediaSource> mData;
-};
-
-class C2VDALinearBuffer : public C2Buffer {
-public:
- explicit C2VDALinearBuffer(const std::shared_ptr<C2LinearBlock>& block)
- : C2Buffer({block->share(block->offset(), block->size(), C2Fence())}) {}
-};
-
-class Listener;
-
-class C2VDAComponentTest : public ::testing::Test {
-public:
- void onWorkDone(std::weak_ptr<C2Component> component,
- std::list<std::unique_ptr<C2Work>> workItems);
- void onTripped(std::weak_ptr<C2Component> component,
- std::vector<std::shared_ptr<C2SettingResult>> settingResult);
- void onError(std::weak_ptr<C2Component> component, uint32_t errorCode);
-
-protected:
- C2VDAComponentTest();
- void SetUp() override;
-
- void parseTestVideoData(const char* testVideoData);
-
-protected:
- using ULock = std::unique_lock<std::mutex>;
-
- enum {
- kWorkCount = 16,
- };
-
- std::shared_ptr<Listener> mListener;
-
- // The array of output video frame counters which will be counted in listenerThread. The array
- // length equals to iteration time of stream play.
- std::vector<int> mOutputFrameCounts;
- // The array of work counters returned from component which will be counted in listenerThread.
- // The array length equals to iteration time of stream play.
- std::vector<int> mFinishedWorkCounts;
- // The array of output frame MD5Sum which will be computed in listenerThread. The array length
- // equals to iteration time of stream play.
- std::vector<std::string> mMD5Strings;
-
- // Mutex for |mWorkQueue| among main and listenerThread.
- std::mutex mQueueLock;
- std::condition_variable mQueueCondition;
- std::list<std::unique_ptr<C2Work>> mWorkQueue;
-
- // Mutex for |mProcessedWork| among main and listenerThread.
- std::mutex mProcessedLock;
- std::condition_variable mProcessedCondition;
- std::list<std::unique_ptr<C2Work>> mProcessedWork;
-
- // Mutex for |mFlushDone| among main and listenerThread.
- std::mutex mFlushDoneLock;
- std::condition_variable mFlushDoneCondition;
- bool mFlushDone;
-
- std::unique_ptr<TestVideoFile> mTestVideoFile;
-};
-
-class Listener : public C2Component::Listener {
-public:
- explicit Listener(C2VDAComponentTest* thiz) : mThis(thiz) {}
- virtual ~Listener() = default;
-
- virtual void onWorkDone_nb(std::weak_ptr<C2Component> component,
- std::list<std::unique_ptr<C2Work>> workItems) override {
- mThis->onWorkDone(component, std::move(workItems));
- }
-
- virtual void onTripped_nb(
- std::weak_ptr<C2Component> component,
- std::vector<std::shared_ptr<C2SettingResult>> settingResult) override {
- mThis->onTripped(component, settingResult);
- }
-
- virtual void onError_nb(std::weak_ptr<C2Component> component, uint32_t errorCode) override {
- mThis->onError(component, errorCode);
- }
-
-private:
- C2VDAComponentTest* const mThis;
-};
-
-C2VDAComponentTest::C2VDAComponentTest() : mListener(new Listener(this)) {}
-
-void C2VDAComponentTest::onWorkDone(std::weak_ptr<C2Component> component,
- std::list<std::unique_ptr<C2Work>> workItems) {
- (void)component;
- ULock l(mProcessedLock);
- for (auto& item : workItems) {
- mProcessedWork.emplace_back(std::move(item));
- }
- mProcessedCondition.notify_all();
-}
-
-void C2VDAComponentTest::onTripped(std::weak_ptr<C2Component> component,
- std::vector<std::shared_ptr<C2SettingResult>> settingResult) {
- (void)component;
- (void)settingResult;
- // no-ops
-}
-
-void C2VDAComponentTest::onError(std::weak_ptr<C2Component> component, uint32_t errorCode) {
- (void)component;
- // fail the test
- FAIL() << "Get error code from component: " << errorCode;
-}
-
-void C2VDAComponentTest::SetUp() {
- parseTestVideoData(gTestVideoData);
-
- mWorkQueue.clear();
- for (int i = 0; i < kWorkCount; ++i) {
- mWorkQueue.emplace_back(new C2Work);
- }
- mProcessedWork.clear();
- mFlushDone = false;
-}
-
-static bool getMediaSourceFromFile(const std::string& filename,
- const TestVideoFile::CodecType codec, sp<IMediaSource>* source) {
- source->clear();
-
- sp<DataSource> dataSource = DataSourceFactory::getInstance()->CreateFromURI(
- nullptr /* httpService */, filename.c_str());
-
- if (dataSource == nullptr) {
- fprintf(stderr, "Unable to create data source.\n");
- return false;
- }
-
- sp<IMediaExtractor> extractor = MediaExtractorFactory::Create(dataSource);
- if (extractor == nullptr) {
- fprintf(stderr, "could not create extractor.\n");
- return false;
- }
-
- std::string expectedMime;
- if (codec == TestVideoFile::CodecType::H264) {
- expectedMime = "video/avc";
- } else if (codec == TestVideoFile::CodecType::VP8) {
- expectedMime = "video/x-vnd.on2.vp8";
- } else if (codec == TestVideoFile::CodecType::VP9) {
- expectedMime = "video/x-vnd.on2.vp9";
- } else {
- fprintf(stderr, "unsupported codec type.\n");
- return false;
- }
-
- for (size_t i = 0, numTracks = extractor->countTracks(); i < numTracks; ++i) {
- sp<MetaData> meta =
- extractor->getTrackMetaData(i, MediaExtractor::kIncludeExtensiveMetaData);
- if (meta == nullptr) {
- continue;
- }
- const char* mime;
- meta->findCString(kKeyMIMEType, &mime);
- if (!strcasecmp(mime, expectedMime.c_str())) {
- *source = extractor->getTrack(i);
- if (*source == nullptr) {
- fprintf(stderr, "It's NULL track for track %zu.\n", i);
- return false;
- }
- return true;
- }
- }
- fprintf(stderr, "No track found.\n");
- return false;
-}
-
-void C2VDAComponentTest::parseTestVideoData(const char* testVideoData) {
- ALOGV("videoDataStr: %s", testVideoData);
- mTestVideoFile = std::make_unique<TestVideoFile>();
-
- auto splitString = [](const std::string& input, const char delim) {
- std::vector<std::string> splits;
- auto beg = input.begin();
- while (beg != input.end()) {
- auto pos = std::find(beg, input.end(), delim);
- splits.emplace_back(beg, pos);
- beg = pos != input.end() ? pos + 1 : pos;
- }
- return splits;
- };
- auto tokens = splitString(testVideoData, ':');
- ASSERT_EQ(tokens.size(), 6u);
- mTestVideoFile->mFilename = tokens[0];
- ASSERT_GT(mTestVideoFile->mFilename.length(), 0u);
-
- mTestVideoFile->mComponentName = tokens[1];
- if (mTestVideoFile->mComponentName == kH264DecoderName) {
- mTestVideoFile->mCodec = TestVideoFile::CodecType::H264;
- } else if (mTestVideoFile->mComponentName == kVP8DecoderName) {
- mTestVideoFile->mCodec = TestVideoFile::CodecType::VP8;
- } else if (mTestVideoFile->mComponentName == kVP9DecoderName) {
- mTestVideoFile->mCodec = TestVideoFile::CodecType::VP9;
- }
- ASSERT_NE(mTestVideoFile->mCodec, TestVideoFile::CodecType::UNKNOWN);
-
- mTestVideoFile->mWidth = std::stoi(tokens[2]);
- mTestVideoFile->mHeight = std::stoi(tokens[3]);
- mTestVideoFile->mNumFrames = std::stoi(tokens[4]);
- mTestVideoFile->mNumFragments = std::stoi(tokens[5]);
-
- ALOGV("mTestVideoFile: %s, %s, %d, %d, %d, %d", mTestVideoFile->mFilename.c_str(),
- mTestVideoFile->mComponentName.c_str(), mTestVideoFile->mWidth, mTestVideoFile->mHeight,
- mTestVideoFile->mNumFrames, mTestVideoFile->mNumFragments);
-}
-
-static void getFrameStringPieces(const C2GraphicView& constGraphicView,
- std::vector<::base::StringPiece>* framePieces) {
- const uint8_t* const* constData = constGraphicView.data();
- ASSERT_NE(constData, nullptr);
- const C2PlanarLayout& layout = constGraphicView.layout();
- ASSERT_EQ(layout.type, C2PlanarLayout::TYPE_YUV) << "Only support YUV plane format";
-
- framePieces->clear();
- framePieces->push_back(
- ::base::StringPiece(reinterpret_cast<const char*>(constData[C2PlanarLayout::PLANE_Y]),
- constGraphicView.width() * constGraphicView.height()));
- if (layout.planes[C2PlanarLayout::PLANE_U].colInc == 2) { // semi-planar mode
- framePieces->push_back(::base::StringPiece(
- reinterpret_cast<const char*>(std::min(constData[C2PlanarLayout::PLANE_U],
- constData[C2PlanarLayout::PLANE_V])),
- constGraphicView.width() * constGraphicView.height() / 2));
- } else {
- framePieces->push_back(::base::StringPiece(
- reinterpret_cast<const char*>(constData[C2PlanarLayout::PLANE_U]),
- constGraphicView.width() * constGraphicView.height() / 4));
- framePieces->push_back(::base::StringPiece(
- reinterpret_cast<const char*>(constData[C2PlanarLayout::PLANE_V]),
- constGraphicView.width() * constGraphicView.height() / 4));
- }
-}
-
-// Test parameters:
-// - Flush after work index. If this value is not negative, test will signal flush to component
-// after queueing the work frame index equals to this value in the first iteration. Negative
-// values may be magic constants, please refer to FlushPoint enum.
-// - Number of play through. This value specifies the iteration time for playing entire video. If
-// |mFlushAfterWorkIndex| is not negative, the first iteration will perform flush, then repeat
-// times as this value for playing entire video.
-// - Sanity check. If this is true, decoded content sanity check is enabled. Test will compute the
-// MD5Sum for output frame data for a play-though iteration (not flushed), and compare to golden
-// MD5Sums which should be stored in the file |video_filename|.md5
-// - Use dummy EOS work. If this is true, test will queue a dummy work with end-of-stream flag in
-// the end of all input works. On the contrary, test will call drain_nb() to component.
-class C2VDAComponentParamTest
- : public C2VDAComponentTest,
- public ::testing::WithParamInterface<std::tuple<int, uint32_t, bool, bool>> {
-protected:
- int mFlushAfterWorkIndex;
- uint32_t mNumberOfPlaythrough;
- bool mSanityCheck;
- bool mUseDummyEOSWork;
-};
-
-TEST_P(C2VDAComponentParamTest, SimpleDecodeTest) {
- mFlushAfterWorkIndex = std::get<0>(GetParam());
- if (mFlushAfterWorkIndex == FlushPoint::MID_STREAM_FLUSH) {
- mFlushAfterWorkIndex = mTestVideoFile->mNumFragments / 2;
- } else if (mFlushAfterWorkIndex == FlushPoint::END_OF_STREAM_FLUSH) {
- mFlushAfterWorkIndex = mTestVideoFile->mNumFragments - 1;
- }
- ASSERT_LT(mFlushAfterWorkIndex, mTestVideoFile->mNumFragments);
- mNumberOfPlaythrough = std::get<1>(GetParam());
-
- if (mFlushAfterWorkIndex >= 0) {
- mNumberOfPlaythrough++; // add the first iteration for perform mid-stream flushing.
- }
-
- mSanityCheck = std::get<2>(GetParam());
- mUseDummyEOSWork = std::get<3>(GetParam());
-
- // Reset counters and determine the expected answers for all iterations.
- mOutputFrameCounts.resize(mNumberOfPlaythrough, 0);
- mFinishedWorkCounts.resize(mNumberOfPlaythrough, 0);
- mMD5Strings.resize(mNumberOfPlaythrough);
- std::vector<int> expectedOutputFrameCounts(mNumberOfPlaythrough, mTestVideoFile->mNumFrames);
- auto expectedWorkCount = mTestVideoFile->mNumFragments;
- if (mUseDummyEOSWork) {
- expectedWorkCount += 1; // plus one dummy EOS work
- }
- std::vector<int> expectedFinishedWorkCounts(mNumberOfPlaythrough, expectedWorkCount);
- if (mFlushAfterWorkIndex >= 0) {
- // First iteration performs the mid-stream flushing.
- expectedOutputFrameCounts[0] = mFlushAfterWorkIndex + 1;
- expectedFinishedWorkCounts[0] = mFlushAfterWorkIndex + 1;
- }
-
- std::shared_ptr<C2Component> component(std::make_shared<C2VDAComponent>(
- mTestVideoFile->mComponentName, 0, std::make_shared<C2ReflectorHelper>()));
-
- // Get input allocator & block pool.
- std::shared_ptr<C2AllocatorStore> store = GetCodec2PlatformAllocatorStore();
- std::shared_ptr<C2Allocator> inputAllocator;
- std::shared_ptr<C2BlockPool> inputBlockPool;
-
- CHECK_EQ(store->fetchAllocator(C2AllocatorStore::DEFAULT_LINEAR, &inputAllocator), C2_OK);
- inputBlockPool = std::make_shared<C2BasicLinearBlockPool>(inputAllocator);
-
- // Setup output block pool (bufferpool-backed).
- std::vector<std::unique_ptr<C2Param>> params;
- ASSERT_EQ(component->intf()->query_vb({}, {C2PortAllocatorsTuning::output::PARAM_TYPE},
- C2_DONT_BLOCK, ¶ms),
- C2_OK);
- ASSERT_EQ(params.size(), 1u);
- C2PortAllocatorsTuning::output* outputAllocators =
- C2PortAllocatorsTuning::output::From(params[0].get());
- C2Allocator::id_t outputAllocatorId = outputAllocators->m.values[0];
- ALOGV("output allocator ID = %u", outputAllocatorId);
-
- // Check bufferpool-backed block pool is used.
- ASSERT_EQ(outputAllocatorId, C2VDAAllocatorStore::V4L2_BUFFERPOOL);
-
- std::shared_ptr<C2BlockPool> outputBlockPool;
- ASSERT_EQ(CreateCodec2BlockPool(outputAllocatorId, component, &outputBlockPool), C2_OK);
- C2BlockPool::local_id_t outputPoolId = outputBlockPool->getLocalId();
- ALOGV("output block pool ID = %" PRIu64 "", outputPoolId);
-
- std::unique_ptr<C2PortBlockPoolsTuning::output> poolIdsTuning =
- C2PortBlockPoolsTuning::output::AllocUnique({outputPoolId});
-
- std::vector<std::unique_ptr<C2SettingResult>> failures;
- ASSERT_EQ(component->intf()->config_vb({poolIdsTuning.get()}, C2_MAY_BLOCK, &failures), C2_OK);
-
- // Set listener and start.
- ASSERT_EQ(component->setListener_vb(mListener, C2_DONT_BLOCK), C2_OK);
- ASSERT_EQ(component->start(), C2_OK);
-
- std::atomic_bool running(true);
- std::thread listenerThread([this, &running]() {
- uint32_t iteration = 0;
- ::base::MD5Context md5Ctx;
- ::base::MD5Init(&md5Ctx);
- ::base::File recordFile;
- if (gRecordOutputYUV) {
- auto recordFilePath = getRecordOutputPath(
- mTestVideoFile->mFilename, mTestVideoFile->mWidth, mTestVideoFile->mHeight);
- fprintf(stdout, "record output file: %s\n", recordFilePath.value().c_str());
- recordFile = ::base::File(recordFilePath,
- ::base::File::FLAG_OPEN_ALWAYS | ::base::File::FLAG_WRITE);
- ASSERT_TRUE(recordFile.IsValid());
- }
- while (running) {
- std::unique_ptr<C2Work> work;
- {
- ULock l(mProcessedLock);
- if (mProcessedWork.empty()) {
- mProcessedCondition.wait_for(l, 100ms);
- if (mProcessedWork.empty()) {
- continue;
- }
- }
- work = std::move(mProcessedWork.front());
- mProcessedWork.pop_front();
- }
- mFinishedWorkCounts[iteration]++;
- ALOGV("Output: frame index: %llu result: %d flags: 0x%x buffers: %zu",
- work->input.ordinal.frameIndex.peekull(), work->result,
- work->worklets.front()->output.flags,
- work->worklets.front()->output.buffers.size());
-
- // Don't check output buffer and flags for flushed works.
- bool flushed = work->result == C2_NOT_FOUND;
-
- ASSERT_EQ(work->worklets.size(), 1u);
- if (!flushed && work->worklets.front()->output.buffers.size() == 1u) {
- std::shared_ptr<C2Buffer> output = work->worklets.front()->output.buffers[0];
- C2ConstGraphicBlock graphicBlock = output->data().graphicBlocks().front();
-
- // check graphic buffer size (coded size) is not less than given video size.
- ASSERT_LE(mTestVideoFile->mWidth, static_cast<int>(graphicBlock.width()));
- ASSERT_LE(mTestVideoFile->mHeight, static_cast<int>(graphicBlock.height()));
-
- // check visible rect equals to given video size.
- ASSERT_EQ(mTestVideoFile->mWidth, static_cast<int>(graphicBlock.crop().width));
- ASSERT_EQ(mTestVideoFile->mHeight, static_cast<int>(graphicBlock.crop().height));
- ASSERT_EQ(0u, graphicBlock.crop().left);
- ASSERT_EQ(0u, graphicBlock.crop().top);
-
- // Intended behavior for Intel libva driver (crbug.com/148546):
- // The 5ms latency is laid here to make sure surface content is finished processed
- // processed by libva.
- std::this_thread::sleep_for(std::chrono::milliseconds(5));
-
- const C2GraphicView& constGraphicView = graphicBlock.map().get();
- ASSERT_EQ(C2_OK, constGraphicView.error());
- std::vector<::base::StringPiece> framePieces;
- getFrameStringPieces(constGraphicView, &framePieces);
- ASSERT_FALSE(framePieces.empty());
- if (mSanityCheck) {
- for (const auto& piece : framePieces) {
- ::base::MD5Update(&md5Ctx, piece);
- }
- }
- if (gRecordOutputYUV) {
- for (const auto& piece : framePieces) {
- ASSERT_EQ(static_cast<int>(piece.length()),
- recordFile.WriteAtCurrentPos(piece.data(), piece.length()))
- << "Failed to write file for yuv recording...";
- }
- }
-
- work->worklets.front()->output.buffers.clear();
- mOutputFrameCounts[iteration]++;
- }
-
- bool iteration_end = !flushed && (work->worklets.front()->output.flags &
- C2FrameData::FLAG_END_OF_STREAM);
-
- // input buffer should be reset in component side.
- ASSERT_EQ(work->input.buffers.size(), 1u);
- ASSERT_TRUE(work->input.buffers.front() == nullptr);
- work->worklets.clear();
- work->workletsProcessed = 0;
-
- if (iteration == 0 && work->input.ordinal.frameIndex.peeku() ==
- static_cast<uint64_t>(mFlushAfterWorkIndex)) {
- ULock l(mFlushDoneLock);
- mFlushDone = true;
- mFlushDoneCondition.notify_all();
- iteration_end = true;
- }
-
- ULock l(mQueueLock);
- mWorkQueue.emplace_back(std::move(work));
- mQueueCondition.notify_all();
-
- if (iteration_end) {
- // record md5sum
- ::base::MD5Digest digest;
- ::base::MD5Final(&digest, &md5Ctx);
- mMD5Strings[iteration] = ::base::MD5DigestToBase16(digest);
- ::base::MD5Init(&md5Ctx);
-
- iteration++;
- if (iteration == mNumberOfPlaythrough) {
- running.store(false); // stop the thread
- }
- }
- }
- });
-
- for (uint32_t iteration = 0; iteration < mNumberOfPlaythrough; ++iteration) {
- ASSERT_TRUE(getMediaSourceFromFile(mTestVideoFile->mFilename, mTestVideoFile->mCodec,
- &mTestVideoFile->mData));
-
- std::deque<sp<ABuffer>> csds;
- if (mTestVideoFile->mCodec == TestVideoFile::CodecType::H264) {
- // Get csd buffers for h264.
- sp<AMessage> format;
- (void)convertMetaDataToMessage(mTestVideoFile->mData->getFormat(), &format);
- csds.resize(2);
- format->findBuffer("csd-0", &csds[0]);
- format->findBuffer("csd-1", &csds[1]);
- ASSERT_TRUE(csds[0] != nullptr && csds[1] != nullptr);
- }
-
- ASSERT_EQ(mTestVideoFile->mData->start(), OK);
-
- int numWorks = 0;
- while (true) {
- size_t size = 0u;
- void* data = nullptr;
- int64_t timestamp = 0u;
- MediaBufferBase* buffer = nullptr;
- sp<ABuffer> csd;
- C2FrameData::flags_t inputFlag = static_cast<C2FrameData::flags_t>(0);
- bool queueDummyEOSWork = false;
- if (!csds.empty()) {
- csd = std::move(csds.front());
- csds.pop_front();
- size = csd->size();
- data = csd->data();
- inputFlag = C2FrameData::FLAG_CODEC_CONFIG;
- } else {
- if (mTestVideoFile->mData->read(&buffer) != OK) {
- ASSERT_TRUE(buffer == nullptr);
- if (mUseDummyEOSWork) {
- ALOGV("Meet end of stream. Put a dummy EOS work.");
- queueDummyEOSWork = true;
- } else {
- ALOGV("Meet end of stream. Now drain the component.");
- ASSERT_EQ(component->drain_nb(C2Component::DRAIN_COMPONENT_WITH_EOS),
- C2_OK);
- break;
- }
- // TODO(johnylin): add test with drain with DRAIN_COMPONENT_NO_EOS when we know
- // the actual use case of it.
- } else {
- MetaDataBase& meta = buffer->meta_data();
- ASSERT_TRUE(meta.findInt64(kKeyTime, ×tamp));
- size = buffer->size();
- data = buffer->data();
- }
- }
-
- std::unique_ptr<C2Work> work;
- while (!work) {
- ULock l(mQueueLock);
- if (!mWorkQueue.empty()) {
- work = std::move(mWorkQueue.front());
- mWorkQueue.pop_front();
- } else {
- mQueueCondition.wait_for(l, 100ms);
- }
- }
-
- work->input.flags = inputFlag;
- work->input.ordinal.frameIndex = static_cast<uint64_t>(numWorks);
- work->input.buffers.clear();
-
- std::shared_ptr<C2LinearBlock> block;
- if (queueDummyEOSWork) {
- // Create the dummy EOS work with no input buffer inside.
- work->input.flags = static_cast<C2FrameData::flags_t>(
- work->input.flags | C2FrameData::FLAG_END_OF_STREAM);
- work->input.ordinal.timestamp = 0; // timestamp is invalid for dummy EOS work
- ALOGV("Input: (Dummy EOS) id: %llu", work->input.ordinal.frameIndex.peekull());
- } else {
- work->input.ordinal.timestamp = static_cast<uint64_t>(timestamp);
-
- // Allocate an input buffer with data size.
- inputBlockPool->fetchLinearBlock(
- size, {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE}, &block);
- C2WriteView view = block->map().get();
- ASSERT_EQ(view.error(), C2_OK);
- memcpy(view.base(), data, size);
- work->input.buffers.emplace_back(new C2VDALinearBuffer(std::move(block)));
- ALOGV("Input: bitstream id: %llu timestamp: %llu size: %zu",
- work->input.ordinal.frameIndex.peekull(),
- work->input.ordinal.timestamp.peekull(), size);
- }
-
- work->worklets.clear();
- work->worklets.emplace_back(new C2Worklet);
-
- std::list<std::unique_ptr<C2Work>> items;
- items.push_back(std::move(work));
-
- // Queue the work.
- ASSERT_EQ(component->queue_nb(&items), C2_OK);
- numWorks++;
-
- if (buffer) {
- buffer->release();
- }
-
- if (iteration == 0 && numWorks == mFlushAfterWorkIndex + 1) {
- // Perform flush.
- // Note: C2VDAComponent does not return work via |flushedWork|.
- ASSERT_EQ(component->flush_sm(C2Component::FLUSH_COMPONENT,
- nullptr /* flushedWork */),
- C2_OK);
- break;
- }
-
- if (queueDummyEOSWork) {
- break;
- }
- }
-
- if (iteration == 0 && mFlushAfterWorkIndex >= 0) {
- // Wait here until client get all flushed works.
- while (true) {
- ULock l(mFlushDoneLock);
- if (mFlushDone) {
- break;
- }
- mFlushDoneCondition.wait_for(l, 100ms);
- }
- ALOGV("Got flush done signal");
- EXPECT_EQ(numWorks, mFlushAfterWorkIndex + 1);
- } else {
- EXPECT_EQ(numWorks, expectedWorkCount);
- }
- ASSERT_EQ(mTestVideoFile->mData->stop(), OK);
- }
-
- listenerThread.join();
- ASSERT_EQ(running, false);
- ASSERT_EQ(component->stop(), C2_OK);
-
- // Finally check the decoding want as expected.
- for (uint32_t i = 0; i < mNumberOfPlaythrough; ++i) {
- if (mFlushAfterWorkIndex >= 0 && i == 0) {
- EXPECT_LE(mOutputFrameCounts[i], expectedOutputFrameCounts[i]) << "At iteration: " << i;
- } else {
- EXPECT_EQ(mOutputFrameCounts[i], expectedOutputFrameCounts[i]) << "At iteration: " << i;
- }
- EXPECT_EQ(mFinishedWorkCounts[i], expectedFinishedWorkCounts[i]) << "At iteration: " << i;
- }
-
- if (mSanityCheck) {
- std::vector<std::string> goldenMD5s;
- readGoldenMD5s(mTestVideoFile->mFilename, &goldenMD5s);
- for (uint32_t i = 0; i < mNumberOfPlaythrough; ++i) {
- if (mFlushAfterWorkIndex >= 0 && i == 0) {
- continue; // do not compare the iteration with flushing
- }
- bool matched = std::find(goldenMD5s.begin(), goldenMD5s.end(), mMD5Strings[i]) !=
- goldenMD5s.end();
- EXPECT_TRUE(matched) << "Unknown MD5: " << mMD5Strings[i] << " at iter: " << i;
- }
- }
-}
-
-// Play input video once, end by draining.
-INSTANTIATE_TEST_CASE_P(SinglePlaythroughTest, C2VDAComponentParamTest,
- ::testing::Values(std::make_tuple(static_cast<int>(FlushPoint::NO_FLUSH),
- 1u, false, false)));
-// Play input video once, end by dummy EOS work.
-INSTANTIATE_TEST_CASE_P(DummyEOSWorkTest, C2VDAComponentParamTest,
- ::testing::Values(std::make_tuple(static_cast<int>(FlushPoint::NO_FLUSH),
- 1u, false, true)));
-
-// Play 5 times of input video, and check sanity by MD5Sum.
-INSTANTIATE_TEST_CASE_P(MultiplePlaythroughSanityTest, C2VDAComponentParamTest,
- ::testing::Values(std::make_tuple(static_cast<int>(FlushPoint::NO_FLUSH),
- 5u, true, false)));
-
-// Test mid-stream flush then play once entirely.
-INSTANTIATE_TEST_CASE_P(FlushPlaythroughTest, C2VDAComponentParamTest,
- ::testing::Values(std::make_tuple(40, 1u, true, false)));
-
-// Test mid-stream flush then stop.
-INSTANTIATE_TEST_CASE_P(FlushStopTest, C2VDAComponentParamTest,
- ::testing::Values(std::make_tuple(
- static_cast<int>(FlushPoint::MID_STREAM_FLUSH), 0u, false, false)));
-
-// Test early flush (after a few works) then stop.
-INSTANTIATE_TEST_CASE_P(EarlyFlushStopTest, C2VDAComponentParamTest,
- ::testing::Values(std::make_tuple(0, 0u, false, false),
- std::make_tuple(1, 0u, false, false),
- std::make_tuple(2, 0u, false, false),
- std::make_tuple(3, 0u, false, false)));
-
-// Test end-of-stream flush then stop.
-INSTANTIATE_TEST_CASE_P(
- EndOfStreamFlushStopTest, C2VDAComponentParamTest,
- ::testing::Values(std::make_tuple(static_cast<int>(FlushPoint::END_OF_STREAM_FLUSH), 0u,
- false, false)));
-
-} // namespace android
-
-static void usage(const char* me) {
- fprintf(stderr, "usage: %s [-i test_video_data] [-r(ecord YUV)] [gtest options]\n", me);
-}
-
-int main(int argc, char** argv) {
- ::testing::InitGoogleTest(&argc, argv);
-
- int res;
- while ((res = getopt(argc, argv, "i:r")) >= 0) {
- switch (res) {
- case 'i': {
- android::gTestVideoData = optarg;
- break;
- }
- case 'r': {
- android::gRecordOutputYUV = true;
- break;
- }
- default: {
- usage(argv[0]);
- exit(1);
- break;
- }
- }
- }
-
- return RUN_ALL_TESTS();
-}
diff --git a/tests/c2_comp_intf/C2VEACompIntf_test.cpp b/tests/c2_comp_intf/C2VEACompIntf_test.cpp
index db0eba9..4a07b94 100644
--- a/tests/c2_comp_intf/C2VEACompIntf_test.cpp
+++ b/tests/c2_comp_intf/C2VEACompIntf_test.cpp
@@ -16,8 +16,8 @@
#include <gtest/gtest.h>
#include <utils/Log.h>
-#include <C2EncoderInterface.h>
-#include <v4l2_device.h>
+#include <v4l2_codec2/components/V4L2EncodeComponent.h>
+#include <v4l2_codec2/components/V4L2EncodeInterface.h>
namespace android {
@@ -31,42 +31,13 @@
constexpr C2Allocator::id_t kOutputAllocators[] = {C2PlatformAllocatorStore::BLOB};
constexpr C2BlockPool::local_id_t kDefaultOutputBlockPool = C2BlockPool::BASIC_LINEAR;
-class C2V4L2EncoderInterface: public C2EncoderInterface {
-public:
- C2V4L2EncoderInterface(const C2String& name, const std::shared_ptr<C2ReflectorHelper>& helper,
- media::V4L2Device* device) :
- C2EncoderInterface(helper) {
- ALOGV("%s(%s)", __func__, name.c_str());
- ALOG_ASSERT(device);
-
- std::vector<VideoEncodeProfile> supportedProfiles;
- for (const auto& supportedProfile : device->GetSupportedEncodeProfiles()) {
- supportedProfiles.emplace_back(
- VideoEncodeProfile { .mProfile = supportedProfile.profile, .mMaxResolution =
- supportedProfile.max_resolution, .mMaxFramerateNumerator =
- supportedProfile.max_framerate_numerator, .mMaxFramerateDenominator =
- supportedProfile.max_framerate_denominator });
- }
-
- Initialize(name, supportedProfiles);
- mInitStatus = C2_OK;
- }
-
- base::Optional<media::VideoCodec> getCodecFromComponentName(const std::string& /*name*/) const {
- return media::VideoCodec::kCodecH264;
- }
-};
-
class C2VEACompIntfTest: public C2CompIntfTest {
protected:
C2VEACompIntfTest() {
mReflector = std::make_shared<C2ReflectorHelper>();
- scoped_refptr<media::V4L2Device> device = media::V4L2Device::Create();
- auto componentInterface = std::make_shared<C2V4L2EncoderInterface>(
- testCompName, mReflector, device.get());
- mIntf = std::shared_ptr<C2ComponentInterface>(
- new SimpleInterface<C2V4L2EncoderInterface>(
- testCompName, testCompNodeId, componentInterface));
+ auto componentInterface = std::make_shared<V4L2EncodeInterface>(testCompName, mReflector);
+ mIntf = std::shared_ptr<C2ComponentInterface>(new SimpleInterface<V4L2EncodeInterface>(
+ testCompName, testCompNodeId, componentInterface));
}
~C2VEACompIntfTest() override {
}
diff --git a/tests/c2_e2e_test/AndroidManifest.xml b/tests/c2_e2e_test/AndroidManifest.xml
index 3898348..e167cb5 100644
--- a/tests/c2_e2e_test/AndroidManifest.xml
+++ b/tests/c2_e2e_test/AndroidManifest.xml
@@ -19,7 +19,8 @@
android:allowBackup="false"
android:label="@string/app_name">
<activity android:name=".E2eTestActivity"
- android:launchMode="singleTop">
+ android:launchMode="singleTop"
+ android:theme="@android:style/Theme.NoTitleBar.Fullscreen">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
diff --git a/tests/c2_e2e_test/jni/mediacodec_decoder.cpp b/tests/c2_e2e_test/jni/mediacodec_decoder.cpp
index c263eec..1b835c0 100644
--- a/tests/c2_e2e_test/jni/mediacodec_decoder.cpp
+++ b/tests/c2_e2e_test/jni/mediacodec_decoder.cpp
@@ -327,7 +327,7 @@
// output buffers from |codec_|.
uint64_t timestamp_us = 0;
- ALOGD("queueInputBuffer(index=%zu, offset=0, size=%zu, time=%" PRIu64 ", flags=%u) #%d", index,
+ ALOGV("queueInputBuffer(index=%zu, offset=0, size=%zu, time=%" PRIu64 ", flags=%u) #%d", index,
fragment->data.size(), timestamp_us, input_flag, input_fragment_index_);
media_status_t status = AMediaCodec_queueInputBuffer(
codec_, index, 0 /* offset */, fragment->data.size(), timestamp_us, input_flag);
@@ -344,7 +344,7 @@
// robustness.
uint64_t timestamp_us = 0;
- ALOGD("queueInputBuffer(index=%zu) EOS", index);
+ ALOGV("queueInputBuffer(index=%zu) EOS", index);
media_status_t status =
AMediaCodec_queueInputBuffer(codec_, index, 0 /* offset */, 0 /* size */, timestamp_us,
AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM);
@@ -368,7 +368,7 @@
}
received_outputs_++;
- ALOGD("ReceiveOutputBuffer(index=%zu, size=%d, flags=%u) #%d", index, info.size, info.flags,
+ ALOGV("ReceiveOutputBuffer(index=%zu, size=%d, flags=%u) #%d", index, info.size, info.flags,
received_outputs_);
// Do not callback for dummy EOS output (info.size == 0)
diff --git a/tests/c2_e2e_test/src/org/chromium/c2/test/E2eTestActivity.java b/tests/c2_e2e_test/src/org/chromium/c2/test/E2eTestActivity.java
index e423fc2..140ff82 100644
--- a/tests/c2_e2e_test/src/org/chromium/c2/test/E2eTestActivity.java
+++ b/tests/c2_e2e_test/src/org/chromium/c2/test/E2eTestActivity.java
@@ -29,6 +29,8 @@
private SurfaceView mSurfaceView;
private Size mSize;
+ private boolean mSurfaceCreated = false;
+ private boolean mCanStartTest = false;
private Size mExpectedSize;
private CountDownLatch mLatch;
@@ -44,6 +46,8 @@
mSurfaceView = (SurfaceView) findViewById(R.id.surface);
mSurfaceView.getHolder().addCallback(this);
+
+ mCanStartTest = !getIntent().getBooleanExtra("delay-start", false);
}
@Override
@@ -55,6 +59,14 @@
@Override
public void surfaceCreated(SurfaceHolder holder) {
+ mSurfaceCreated = true;
+ maybeStartTest();
+ }
+
+ private void maybeStartTest() {
+ if (!mSurfaceCreated || !mCanStartTest) {
+ return;
+ }
boolean encode = getIntent().getBooleanExtra("do-encode", false);
String[] testArgs =
getIntent().getStringArrayExtra("test-args") != null
@@ -71,7 +83,7 @@
encode,
testArgs,
testArgs.length,
- holder.getSurface(),
+ mSurfaceView.getHolder().getSurface(),
logFile);
Log.i(TAG, "Test returned result code " + res);
@@ -95,6 +107,12 @@
@Override
public void onNewIntent(Intent intent) {
+ if (intent.getAction().equals("org.chromium.c2.test.START_TEST")) {
+ mCanStartTest = true;
+ maybeStartTest();
+ return;
+ }
+
synchronized (this) {
if (mDecoderPtr != 0) {
stopDecoderLoop(mDecoderPtr);