Don't continue with an invalid iterator
am: cf6899a77b

Change-Id: Id986adef64834545a26affe4d68d5bd80fd76359
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..b043d46
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,33 @@
+---
+BasedOnStyle: Google
+AllowShortFunctionsOnASingleLine: Inline
+AllowShortIfStatementsOnASingleLine: true
+AllowShortLoopsOnASingleLine: true
+BinPackArguments: true
+BinPackParameters: true
+CommentPragmas: NOLINT:.*
+ContinuationIndentWidth: 8
+DerivePointerAlignment: false
+IndentWidth: 4
+PointerAlignment: Left
+TabWidth: 4
+
+# Deviations from the above file:
+# "Don't indent the section label"
+AccessModifierOffset: -4
+# "Each line of text in your code should be at most 100 columns long."
+ColumnLimit: 100
+# "Constructor initializer lists can be all on one line or with subsequent
+# lines indented eight spaces.". clang-format does not support having the colon
+# on the same line as the constructor function name, so this is the best
+# approximation of that rule, which makes all entries in the list (except the
+# first one) have an eight space indentation.
+ConstructorInitializerIndentWidth: 6
+# There is nothing in go/droidcppstyle about case labels, but there seems to be
+# more code that does not indent the case labels in frameworks/base.
+IndentCaseLabels: false
+# There have been some bugs in which subsequent formatting operations introduce
+# weird comment jumps.
+ReflowComments: false
+# Android does support C++11 now.
+Standard: Cpp11
diff --git a/Android.mk b/Android.mk
index 3208f8c..32656ff 100644
--- a/Android.mk
+++ b/Android.mk
@@ -1,4 +1,70 @@
+# Build only if both hardware/google/av and device/google/cheets2/codec2 are
+# visible; otherwise, don't build any target under this repository.
+ifneq (,$(findstring hardware/google/av,$(PRODUCT_SOONG_NAMESPACES)))
+ifneq (,$(findstring device/google/cheets2/codec2,$(PRODUCT_SOONG_NAMESPACES)))
+
 LOCAL_PATH := $(call my-dir)
 include $(CLEAR_VARS)
 
-include $(LOCAL_PATH)/vda/Android.mk
+LOCAL_SRC_FILES:= \
+        C2VDAComponent.cpp \
+        C2VDAAdaptor.cpp   \
+
+LOCAL_C_INCLUDES += \
+        $(TOP)/device/google/cheets2/codec2/vdastore/include \
+        $(TOP)/external/libchrome \
+        $(TOP)/external/gtest/include \
+        $(TOP)/external/v4l2_codec2/include \
+        $(TOP)/external/v4l2_codec2/vda \
+        $(TOP)/frameworks/av/media/libstagefright/include \
+        $(TOP)/hardware/google/av/codec2/include \
+        $(TOP)/hardware/google/av/codec2/vndk/include \
+        $(TOP)/hardware/google/av/media/codecs/base/include \
+
+LOCAL_MODULE:= libv4l2_codec2
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SHARED_LIBRARIES := libbinder \
+                          libchrome \
+                          liblog \
+                          libmedia \
+                          libstagefright \
+                          libstagefright_bufferqueue_helper \
+                          libstagefright_ccodec_ext \
+                          libstagefright_codec2 \
+                          libstagefright_codec2_vndk \
+                          libstagefright_simple_c2component \
+                          libstagefright_foundation \
+                          libutils \
+                          libv4l2_codec2_vda \
+                          libvda_c2_pixelformat \
+                          libvda_c2componentstore \
+
+# -Wno-unused-parameter is needed for libchrome/base codes
+LOCAL_CFLAGS += -Werror -Wall -Wno-unused-parameter -std=c++14
+LOCAL_CFLAGS += -Wno-unused-lambda-capture -Wno-unknown-warning-option
+LOCAL_CLANG := true
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
+
+LOCAL_LDFLAGS := -Wl,-Bsymbolic
+
+# Build C2VDAAdaptorProxy only for ARC++ case.
+ifneq (,$(findstring cheets_,$(TARGET_PRODUCT)))
+LOCAL_CFLAGS += -DV4L2_CODEC2_ARC
+LOCAL_SRC_FILES += \
+                   C2VDAAdaptorProxy.cpp \
+
+LOCAL_SRC_FILES := $(filter-out C2VDAAdaptor.cpp, $(LOCAL_SRC_FILES))
+LOCAL_SHARED_LIBRARIES += libarcbridge \
+                          libarcbridgeservice \
+                          libcodec2_arcva_factory \
+                          libmojo \
+
+endif # ifneq (,$(findstring cheets_,$(TARGET_PRODUCT)))
+
+include $(BUILD_SHARED_LIBRARY)
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
+
+endif  #ifneq (,$(findstring device/google/cheets2/codec2,$(PRODUCT_SOONG_NAMESPACES)))
+endif  #ifneq (,$(findstring hardware/google/av,$(PRODUCT_SOONG_NAMESPACES)))
diff --git a/C2VDAAdaptor.cpp b/C2VDAAdaptor.cpp
new file mode 100644
index 0000000..2e837a9
--- /dev/null
+++ b/C2VDAAdaptor.cpp
@@ -0,0 +1,194 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2VDAAdaptor"
+
+#include <C2VDAAdaptor.h>
+
+#include <bitstream_buffer.h>
+#include <native_pixmap_handle.h>
+#include <v4l2_device.h>
+#include <v4l2_slice_video_decode_accelerator.h>
+#include <video_pixel_format.h>
+#include <videodev2_custom.h>
+
+#include <utils/Log.h>
+
+namespace android {
+
+C2VDAAdaptor::C2VDAAdaptor() : mNumOutputBuffers(0u) {}
+
+C2VDAAdaptor::~C2VDAAdaptor() {
+    if (mVDA) {
+        destroy();
+    }
+}
+
+VideoDecodeAcceleratorAdaptor::Result C2VDAAdaptor::initialize(
+        media::VideoCodecProfile profile, bool secureMode,
+        VideoDecodeAcceleratorAdaptor::Client* client) {
+    // TODO: use secureMode here, or ignore?
+    if (mVDA) {
+        ALOGE("Re-initialize() is not allowed");
+        return ILLEGAL_STATE;
+    }
+
+    media::VideoDecodeAccelerator::Config config;
+    config.profile = profile;
+    config.output_mode = media::VideoDecodeAccelerator::Config::OutputMode::IMPORT;
+
+    // TODO(johnylin): may need to implement factory to create VDA if there are multiple VDA
+    // implementations in the future.
+    scoped_refptr<media::V4L2Device> device = new media::V4L2Device();
+    std::unique_ptr<media::VideoDecodeAccelerator> vda(
+            new media::V4L2SliceVideoDecodeAccelerator(device));
+    if (!vda->Initialize(config, this)) {
+        ALOGE("Failed to initialize VDA");
+        return PLATFORM_FAILURE;
+    }
+
+    mVDA = std::move(vda);
+    mClient = client;
+
+    return SUCCESS;
+}
+
+void C2VDAAdaptor::decode(int32_t bitstreamId, int ashmemFd, off_t offset, uint32_t bytesUsed) {
+    CHECK(mVDA);
+    mVDA->Decode(media::BitstreamBuffer(bitstreamId, base::SharedMemoryHandle(ashmemFd, true),
+                                        bytesUsed, offset));
+}
+
+void C2VDAAdaptor::assignPictureBuffers(uint32_t numOutputBuffers) {
+    CHECK(mVDA);
+    std::vector<media::PictureBuffer> buffers;
+    for (uint32_t id = 0; id < numOutputBuffers; ++id) {
+        buffers.push_back(media::PictureBuffer(static_cast<int32_t>(id), mPictureSize));
+    }
+    mVDA->AssignPictureBuffers(buffers);
+    mNumOutputBuffers = numOutputBuffers;
+}
+
+void C2VDAAdaptor::importBufferForPicture(int32_t pictureBufferId, HalPixelFormat format,
+                                          int dmabufFd,
+                                          const std::vector<VideoFramePlane>& planes) {
+    CHECK(mVDA);
+    CHECK_LT(pictureBufferId, static_cast<int32_t>(mNumOutputBuffers));
+
+    media::VideoPixelFormat pixelFormat;
+    switch (format) {
+        case HalPixelFormat::YV12:
+            pixelFormat = media::PIXEL_FORMAT_YV12;
+            break;
+        case HalPixelFormat::NV12:
+            pixelFormat = media::PIXEL_FORMAT_NV12;
+            break;
+        default:
+            LOG_ALWAYS_FATAL("Unsupported format: 0x%x", format);
+            return;
+    }
+
+    media::NativePixmapHandle handle;
+    handle.fds.emplace_back(base::FileDescriptor(dmabufFd, true));
+    for (const auto& plane : planes) {
+        handle.planes.emplace_back(plane.mStride, plane.mOffset, 0, 0);
+    }
+    mVDA->ImportBufferForPicture(pictureBufferId, pixelFormat, handle);
+}
+
+void C2VDAAdaptor::reusePictureBuffer(int32_t pictureBufferId) {
+    CHECK(mVDA);
+    CHECK_LT(pictureBufferId, static_cast<int32_t>(mNumOutputBuffers));
+
+    mVDA->ReusePictureBuffer(pictureBufferId);
+}
+
+void C2VDAAdaptor::flush() {
+    CHECK(mVDA);
+    mVDA->Flush();
+}
+
+void C2VDAAdaptor::reset() {
+    CHECK(mVDA);
+    mVDA->Reset();
+}
+
+void C2VDAAdaptor::destroy() {
+    mVDA.reset(nullptr);
+    mNumOutputBuffers = 0u;
+    mPictureSize = media::Size();
+}
+
+//static
+media::VideoDecodeAccelerator::SupportedProfiles C2VDAAdaptor::GetSupportedProfiles(
+        uint32_t inputFormatFourcc) {
+    media::VideoDecodeAccelerator::SupportedProfiles supportedProfiles;
+    auto allProfiles = media::V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles();
+    bool isSliceBased = (inputFormatFourcc == V4L2_PIX_FMT_H264_SLICE) ||
+                        (inputFormatFourcc == V4L2_PIX_FMT_VP8_FRAME) ||
+                        (inputFormatFourcc == V4L2_PIX_FMT_VP9_FRAME);
+    for (const auto& profile : allProfiles) {
+        if (inputFormatFourcc ==
+            media::V4L2Device::VideoCodecProfileToV4L2PixFmt(profile.profile, isSliceBased)) {
+            supportedProfiles.push_back(profile);
+        }
+    }
+    return supportedProfiles;
+}
+
+void C2VDAAdaptor::ProvidePictureBuffers(uint32_t requested_num_of_buffers,
+                                         media::VideoPixelFormat output_format,
+                                         const media::Size& dimensions) {
+    // per change ag/3262504, output_format from VDA is no longer used, component side always
+    // allocate graphic buffers for flexible YUV format.
+    (void)output_format;
+
+    mClient->providePictureBuffers(requested_num_of_buffers, dimensions);
+    mPictureSize = dimensions;
+}
+
+void C2VDAAdaptor::DismissPictureBuffer(int32_t picture_buffer_id) {
+    mClient->dismissPictureBuffer(picture_buffer_id);
+}
+
+void C2VDAAdaptor::PictureReady(const media::Picture& picture) {
+    mClient->pictureReady(picture.picture_buffer_id(), picture.bitstream_buffer_id(),
+                          picture.visible_rect());
+}
+
+void C2VDAAdaptor::NotifyEndOfBitstreamBuffer(int32_t bitstream_buffer_id) {
+    mClient->notifyEndOfBitstreamBuffer(bitstream_buffer_id);
+}
+
+void C2VDAAdaptor::NotifyFlushDone() {
+    mClient->notifyFlushDone();
+}
+
+void C2VDAAdaptor::NotifyResetDone() {
+    mClient->notifyResetDone();
+}
+
+static VideoDecodeAcceleratorAdaptor::Result convertErrorCode(
+        media::VideoDecodeAccelerator::Error error) {
+    switch (error) {
+    case media::VideoDecodeAccelerator::ILLEGAL_STATE:
+        return VideoDecodeAcceleratorAdaptor::ILLEGAL_STATE;
+    case media::VideoDecodeAccelerator::INVALID_ARGUMENT:
+        return VideoDecodeAcceleratorAdaptor::INVALID_ARGUMENT;
+    case media::VideoDecodeAccelerator::UNREADABLE_INPUT:
+        return VideoDecodeAcceleratorAdaptor::UNREADABLE_INPUT;
+    case media::VideoDecodeAccelerator::PLATFORM_FAILURE:
+        return VideoDecodeAcceleratorAdaptor::PLATFORM_FAILURE;
+    default:
+        ALOGE("Unknown error code: %d", static_cast<int>(error));
+        return VideoDecodeAcceleratorAdaptor::PLATFORM_FAILURE;
+    }
+}
+
+void C2VDAAdaptor::NotifyError(media::VideoDecodeAccelerator::Error error) {
+    mClient->notifyError(convertErrorCode(error));
+}
+
+}  // namespace android
diff --git a/C2VDAAdaptorProxy.cpp b/C2VDAAdaptorProxy.cpp
new file mode 100644
index 0000000..5b1558b
--- /dev/null
+++ b/C2VDAAdaptorProxy.cpp
@@ -0,0 +1,326 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "C2VDAAdaptorProxy"
+
+#include <C2ArcVideoAcceleratorFactory.h>
+#include <C2VDAAdaptorProxy.h>
+
+#include <videodev2_custom.h>
+
+#include <arc/MojoProcessSupport.h>
+#include <arc/MojoThread.h>
+#include <base/bind.h>
+#include <binder/IServiceManager.h>
+#include <mojo/edk/embedder/embedder.h>
+#include <mojo/public/cpp/system/handle.h>
+#include <utils/Log.h>
+
+namespace mojo {
+template <>
+struct TypeConverter<::arc::VideoFramePlane, android::VideoFramePlane> {
+    static ::arc::VideoFramePlane Convert(const android::VideoFramePlane& plane) {
+        return ::arc::VideoFramePlane{static_cast<int32_t>(plane.mOffset),
+                                      static_cast<int32_t>(plane.mStride)};
+    }
+};
+}  // namespace mojo
+
+namespace android {
+namespace arc {
+C2VDAAdaptorProxy::C2VDAAdaptorProxy()
+      : C2VDAAdaptorProxy(::arc::MojoProcessSupport::getLeakyInstance()) {}
+
+C2VDAAdaptorProxy::C2VDAAdaptorProxy(::arc::MojoProcessSupport* mojoProcessSupport)
+      : mClient(nullptr),
+        mMojoTaskRunner(mojoProcessSupport->mojo_thread().getTaskRunner()),
+        mBinding(this),
+        mRelay(new ::arc::CancellationRelay()) {}
+
+C2VDAAdaptorProxy::~C2VDAAdaptorProxy() {}
+
+void C2VDAAdaptorProxy::onConnectionError(const std::string& pipeName) {
+    ALOGE("onConnectionError (%s)", pipeName.c_str());
+    mRelay->cancel();
+    NotifyError(::arc::mojom::VideoDecodeAccelerator::Result::PLATFORM_FAILURE);
+}
+
+bool C2VDAAdaptorProxy::establishChannel() {
+    ALOGV("establishChannel");
+    auto future = ::arc::Future<bool>::make_shared(mRelay);
+    mMojoTaskRunner->PostTask(FROM_HERE,
+                              base::Bind(&C2VDAAdaptorProxy::establishChannelOnMojoThread,
+                                         base::Unretained(this), future));
+    return future->wait() && future->get();
+}
+
+void C2VDAAdaptorProxy::establishChannelOnMojoThread(std::shared_ptr<::arc::Future<bool>> future) {
+    auto& factory = ::android::GetC2ArcVideoAcceleratorFactory();
+
+    if (!factory.createVideoDecodeAccelerator(mojo::MakeRequest(&mVDAPtr))) {
+        future->set(false);
+        return;
+    }
+    mVDAPtr.set_connection_error_handler(base::Bind(&C2VDAAdaptorProxy::onConnectionError,
+                                                    base::Unretained(this),
+                                                    std::string("mVDAPtr (vda pipe)")));
+    mVDAPtr.QueryVersion(base::Bind(&C2VDAAdaptorProxy::onVersionReady, base::Unretained(this),
+                                    std::move(future)));
+}
+
+void C2VDAAdaptorProxy::onVersionReady(std::shared_ptr<::arc::Future<bool>> future, uint32_t version) {
+    ALOGI("VideoDecodeAccelerator ready (version=%d)", version);
+
+    future->set(true);
+}
+
+void C2VDAAdaptorProxy::ProvidePictureBuffers(::arc::mojom::PictureBufferFormatPtr format) {
+    ALOGV("ProvidePictureBuffers");
+    mClient->providePictureBuffers(
+            format->min_num_buffers,
+            media::Size(format->coded_size.width(), format->coded_size.height()));
+}
+void C2VDAAdaptorProxy::PictureReady(::arc::mojom::PicturePtr picture) {
+    ALOGV("PictureReady");
+    const auto& rect = picture->crop_rect;
+    mClient->pictureReady(picture->picture_buffer_id, picture->bitstream_id,
+                          media::Rect(rect.x(), rect.y(), rect.right(), rect.bottom()));
+}
+
+static VideoDecodeAcceleratorAdaptor::Result convertErrorCode(
+        ::arc::mojom::VideoDecodeAccelerator::Result error) {
+    switch (error) {
+    case ::arc::mojom::VideoDecodeAccelerator::Result::ILLEGAL_STATE:
+        return VideoDecodeAcceleratorAdaptor::ILLEGAL_STATE;
+    case ::arc::mojom::VideoDecodeAccelerator::Result::INVALID_ARGUMENT:
+        return VideoDecodeAcceleratorAdaptor::INVALID_ARGUMENT;
+    case ::arc::mojom::VideoDecodeAccelerator::Result::UNREADABLE_INPUT:
+        return VideoDecodeAcceleratorAdaptor::UNREADABLE_INPUT;
+    case ::arc::mojom::VideoDecodeAccelerator::Result::PLATFORM_FAILURE:
+        return VideoDecodeAcceleratorAdaptor::PLATFORM_FAILURE;
+    case ::arc::mojom::VideoDecodeAccelerator::Result::INSUFFICIENT_RESOURCES:
+        return VideoDecodeAcceleratorAdaptor::INSUFFICIENT_RESOURCES;
+
+    default:
+        ALOGE("Unknown error code: %d", static_cast<int>(error));
+        return VideoDecodeAcceleratorAdaptor::PLATFORM_FAILURE;
+    }
+}
+
+void C2VDAAdaptorProxy::NotifyError(::arc::mojom::VideoDecodeAccelerator::Result error) {
+    ALOGE("NotifyError %d", static_cast<int>(error));
+    mClient->notifyError(convertErrorCode(error));
+}
+
+void C2VDAAdaptorProxy::NotifyEndOfBitstreamBuffer(int32_t bitstream_id) {
+    ALOGV("NotifyEndOfBitstreamBuffer");
+    mClient->notifyEndOfBitstreamBuffer(bitstream_id);
+}
+
+void C2VDAAdaptorProxy::NotifyResetDone(::arc::mojom::VideoDecodeAccelerator::Result result) {
+    ALOGV("NotifyResetDone");
+    if (result != ::arc::mojom::VideoDecodeAccelerator::Result::SUCCESS) {
+        ALOGE("Reset is done incorrectly.");
+        NotifyError(result);
+        return;
+    }
+    mClient->notifyResetDone();
+}
+
+void C2VDAAdaptorProxy::NotifyFlushDone(::arc::mojom::VideoDecodeAccelerator::Result result) {
+    ALOGV("NotifyFlushDone");
+    if (result == ::arc::mojom::VideoDecodeAccelerator::Result::CANCELLED) {
+        // Flush is cancelled by a succeeding Reset(). A client expects this behavior.
+        ALOGE("Flush is canceled.");
+        return;
+    }
+    if (result != ::arc::mojom::VideoDecodeAccelerator::Result::SUCCESS) {
+        ALOGE("Flush is done incorrectly.");
+        NotifyError(result);
+        return;
+    }
+    mClient->notifyFlushDone();
+}
+
+//static
+media::VideoDecodeAccelerator::SupportedProfiles C2VDAAdaptorProxy::GetSupportedProfiles(
+        uint32_t inputFormatFourcc) {
+    media::VideoDecodeAccelerator::SupportedProfiles profiles(1);
+    profiles[0].min_resolution = media::Size(16, 16);
+    profiles[0].max_resolution = media::Size(4096, 4096);
+    switch (inputFormatFourcc) {
+    case V4L2_PIX_FMT_H264:
+    case V4L2_PIX_FMT_H264_SLICE:
+        profiles[0].profile = media::H264PROFILE_MAIN;
+        break;
+    case V4L2_PIX_FMT_VP8:
+    case V4L2_PIX_FMT_VP8_FRAME:
+        profiles[0].profile = media::VP8PROFILE_ANY;
+        break;
+    case V4L2_PIX_FMT_VP9:
+    case V4L2_PIX_FMT_VP9_FRAME:
+        profiles[0].profile = media::VP9PROFILE_PROFILE0;
+        break;
+    default:
+        ALOGE("Unknown formatfourcc: %d", inputFormatFourcc);
+        return {};
+    }
+    return profiles;
+}
+
+VideoDecodeAcceleratorAdaptor::Result C2VDAAdaptorProxy::initialize(
+        media::VideoCodecProfile profile, bool secureMode,
+        VideoDecodeAcceleratorAdaptor::Client* client) {
+    ALOGV("initialize(profile=%d, secureMode=%d)", static_cast<int>(profile),
+          static_cast<int>(secureMode));
+    DCHECK(client);
+    DCHECK(!mClient);
+    mClient = client;
+
+    if (!establishChannel()) {
+        ALOGE("establishChannel failed");
+        return VideoDecodeAcceleratorAdaptor::PLATFORM_FAILURE;
+    }
+
+    auto future = ::arc::Future<::arc::mojom::VideoDecodeAccelerator::Result>::make_shared(mRelay);
+    mMojoTaskRunner->PostTask(FROM_HERE, base::Bind(&C2VDAAdaptorProxy::initializeOnMojoThread,
+                                                    base::Unretained(this), profile, secureMode,
+                                                    ::arc::FutureCallback(future)));
+
+    if (!future->wait()) {
+        ALOGE("Connection lost");
+        return VideoDecodeAcceleratorAdaptor::PLATFORM_FAILURE;
+    }
+    return static_cast<VideoDecodeAcceleratorAdaptor::Result>(future->get());
+}
+
+void C2VDAAdaptorProxy::initializeOnMojoThread(
+        const media::VideoCodecProfile profile, const bool secureMode,
+        const ::arc::mojom::VideoDecodeAccelerator::InitializeCallback& cb) {
+    // base::Unretained is safe because we own |mBinding|.
+    auto client = mBinding.CreateInterfacePtrAndBind();
+    mBinding.set_connection_error_handler(base::Bind(&C2VDAAdaptorProxy::onConnectionError,
+                                                     base::Unretained(this),
+                                                     std::string("mBinding (client pipe)")));
+
+    ::arc::mojom::VideoDecodeAcceleratorConfigPtr arcConfig =
+            ::arc::mojom::VideoDecodeAcceleratorConfig::New();
+    arcConfig->secure_mode = secureMode;
+    arcConfig->profile = static_cast<::arc::mojom::VideoCodecProfile>(profile);
+    mVDAPtr->Initialize(std::move(arcConfig), std::move(client), cb);
+}
+
+void C2VDAAdaptorProxy::decode(int32_t bitstreamId, int handleFd, off_t offset, uint32_t size) {
+    ALOGV("decode");
+    mMojoTaskRunner->PostTask(
+            FROM_HERE, base::Bind(&C2VDAAdaptorProxy::decodeOnMojoThread, base::Unretained(this),
+                                  bitstreamId, handleFd, offset, size));
+}
+
+void C2VDAAdaptorProxy::decodeOnMojoThread(int32_t bitstreamId, int handleFd, off_t offset,
+                                           uint32_t size) {
+    MojoHandle wrappedHandle;
+    MojoResult wrapResult = mojo::edk::CreatePlatformHandleWrapper(
+            mojo::edk::ScopedPlatformHandle(mojo::edk::PlatformHandle(handleFd)), &wrappedHandle);
+    if (wrapResult != MOJO_RESULT_OK) {
+        ALOGE("failed to wrap handle: %d", static_cast<int>(wrapResult));
+        NotifyError(::arc::mojom::VideoDecodeAccelerator::Result::PLATFORM_FAILURE);
+        return;
+    }
+    auto bufferPtr = ::arc::mojom::BitstreamBuffer::New();
+    bufferPtr->bitstream_id = bitstreamId;
+    bufferPtr->handle_fd = mojo::ScopedHandle(mojo::Handle(wrappedHandle));
+    bufferPtr->offset = offset;
+    bufferPtr->bytes_used = size;
+    mVDAPtr->Decode(std::move(bufferPtr));
+}
+
+void C2VDAAdaptorProxy::assignPictureBuffers(uint32_t numOutputBuffers) {
+    ALOGV("assignPictureBuffers: %d", numOutputBuffers);
+    mMojoTaskRunner->PostTask(FROM_HERE,
+                              base::Bind(&C2VDAAdaptorProxy::assignPictureBuffersOnMojoThread,
+                                         base::Unretained(this), numOutputBuffers));
+}
+
+void C2VDAAdaptorProxy::assignPictureBuffersOnMojoThread(uint32_t numOutputBuffers) {
+    mVDAPtr->AssignPictureBuffers(numOutputBuffers);
+}
+
+void C2VDAAdaptorProxy::importBufferForPicture(int32_t pictureBufferId, HalPixelFormat format,
+                                               int handleFd,
+                                               const std::vector<VideoFramePlane>& planes) {
+    ALOGV("importBufferForPicture");
+    mMojoTaskRunner->PostTask(
+            FROM_HERE,
+            base::Bind(&C2VDAAdaptorProxy::importBufferForPictureOnMojoThread,
+                       base::Unretained(this), pictureBufferId, format, handleFd, planes));
+}
+
+void C2VDAAdaptorProxy::importBufferForPictureOnMojoThread(
+        int32_t pictureBufferId, HalPixelFormat format, int handleFd,
+        const std::vector<VideoFramePlane>& planes) {
+    MojoHandle wrappedHandle;
+    MojoResult wrapResult = mojo::edk::CreatePlatformHandleWrapper(
+            mojo::edk::ScopedPlatformHandle(mojo::edk::PlatformHandle(handleFd)), &wrappedHandle);
+    if (wrapResult != MOJO_RESULT_OK) {
+        ALOGE("failed to wrap handle: %d", static_cast<int>(wrapResult));
+        NotifyError(::arc::mojom::VideoDecodeAccelerator::Result::PLATFORM_FAILURE);
+        return;
+    }
+
+    mVDAPtr->ImportBufferForPicture(pictureBufferId,
+                                    static_cast<::arc::mojom::HalPixelFormat>(format),
+                                    mojo::ScopedHandle(mojo::Handle(wrappedHandle)),
+                                    mojo::ConvertTo<std::vector<::arc::VideoFramePlane>>(planes));
+}
+
+void C2VDAAdaptorProxy::reusePictureBuffer(int32_t pictureBufferId) {
+    ALOGV("reusePictureBuffer: %d", pictureBufferId);
+    mMojoTaskRunner->PostTask(FROM_HERE,
+                              base::Bind(&C2VDAAdaptorProxy::reusePictureBufferOnMojoThread,
+                                         base::Unretained(this), pictureBufferId));
+}
+
+void C2VDAAdaptorProxy::reusePictureBufferOnMojoThread(int32_t pictureBufferId) {
+    mVDAPtr->ReusePictureBuffer(pictureBufferId);
+}
+
+void C2VDAAdaptorProxy::flush() {
+    ALOGV("flush");
+    mMojoTaskRunner->PostTask(
+            FROM_HERE, base::Bind(&C2VDAAdaptorProxy::flushOnMojoThread, base::Unretained(this)));
+}
+
+void C2VDAAdaptorProxy::flushOnMojoThread() {
+    mVDAPtr->Flush(base::Bind(&C2VDAAdaptorProxy::NotifyFlushDone, base::Unretained(this)));
+}
+
+void C2VDAAdaptorProxy::reset() {
+    ALOGV("reset");
+    mMojoTaskRunner->PostTask(
+            FROM_HERE, base::Bind(&C2VDAAdaptorProxy::resetOnMojoThread, base::Unretained(this)));
+}
+
+void C2VDAAdaptorProxy::resetOnMojoThread() {
+    mVDAPtr->Reset(base::Bind(&C2VDAAdaptorProxy::NotifyResetDone, base::Unretained(this)));
+}
+
+void C2VDAAdaptorProxy::destroy() {
+    ALOGV("destroy");
+    ::arc::Future<void> future;
+    ::arc::PostTaskAndSetFutureWithResult(
+            mMojoTaskRunner.get(), FROM_HERE,
+            base::Bind(&C2VDAAdaptorProxy::closeChannelOnMojoThread, base::Unretained(this)),
+            &future);
+    future.get();
+}
+
+void C2VDAAdaptorProxy::closeChannelOnMojoThread() {
+    if (mBinding.is_bound()) mBinding.Close();
+    mVDAPtr.reset();
+}
+
+}  // namespace arc
+}  // namespace android
diff --git a/C2VDAComponent.cpp b/C2VDAComponent.cpp
new file mode 100644
index 0000000..325bf6b
--- /dev/null
+++ b/C2VDAComponent.cpp
@@ -0,0 +1,1358 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2VDAComponent"
+
+#ifdef V4L2_CODEC2_ARC
+#include <C2VDAAdaptorProxy.h>
+#else
+#include <C2VDAAdaptor.h>
+#endif
+
+#define __C2_GENERATE_GLOBAL_VARS__
+#include <C2VDAAllocatorStore.h>
+#include <C2VDAComponent.h>
+#include <C2VDAPixelFormat.h>
+#include <C2VDASupport.h>  // to getParamReflector from vda store
+#include <C2VdaBqBlockPool.h>
+#include <C2VdaPooledBlockPool.h>
+
+#include <videodev2_custom.h>
+
+#include <C2AllocatorGralloc.h>
+#include <C2ComponentFactory.h>
+#include <C2PlatformSupport.h>
+
+#include <base/bind.h>
+#include <base/bind_helpers.h>
+
+#include <media/stagefright/MediaDefs.h>
+#include <utils/Log.h>
+#include <utils/misc.h>
+
+#include <inttypes.h>
+#include <string.h>
+#include <algorithm>
+#include <string>
+
+#define UNUSED(expr)  \
+    do {              \
+        (void)(expr); \
+    } while (0)
+
+namespace android {
+
+namespace {
+
+// Mask against 30 bits to avoid (undefined) wraparound on signed integer.
+int32_t frameIndexToBitstreamId(c2_cntr64_t frameIndex) {
+    return static_cast<int32_t>(frameIndex.peeku() & 0x3FFFFFFF);
+}
+
+// Use basic graphic block pool/allocator as default.
+const C2BlockPool::local_id_t kDefaultOutputBlockPool = C2BlockPool::BASIC_GRAPHIC;
+
+const C2String kH264DecoderName = "c2.vda.avc.decoder";
+const C2String kVP8DecoderName = "c2.vda.vp8.decoder";
+const C2String kVP9DecoderName = "c2.vda.vp9.decoder";
+const C2String kH264SecureDecoderName = "c2.vda.avc.decoder.secure";
+const C2String kVP8SecureDecoderName = "c2.vda.vp8.decoder.secure";
+const C2String kVP9SecureDecoderName = "c2.vda.vp9.decoder.secure";
+
+const uint32_t kDpbOutputBufferExtraCount = 3;  // Use the same number as ACodec.
+const int kDequeueRetryDelayUs = 10000;  // Wait time of dequeue buffer retry in microseconds.
+const int32_t kAllocateBufferMaxRetries = 10;  // Max retry time for fetchGraphicBlock timeout.
+}  // namespace
+
+C2VDAComponent::IntfImpl::IntfImpl(C2String name, const std::shared_ptr<C2ReflectorHelper>& helper)
+      : C2InterfaceHelper(helper), mInitStatus(C2_OK) {
+    setDerivedInstance(this);
+
+    // TODO(johnylin): use factory function to determine whether V4L2 stream or slice API is.
+    uint32_t inputFormatFourcc;
+    char inputMime[128];
+    if (name == kH264DecoderName || name == kH264SecureDecoderName) {
+        strcpy(inputMime, MEDIA_MIMETYPE_VIDEO_AVC);
+        inputFormatFourcc = V4L2_PIX_FMT_H264_SLICE;
+    } else if (name == kVP8DecoderName || name == kVP8SecureDecoderName) {
+        strcpy(inputMime, MEDIA_MIMETYPE_VIDEO_VP8);
+        inputFormatFourcc = V4L2_PIX_FMT_VP8_FRAME;
+    } else if (name == kVP9DecoderName || name == kVP9SecureDecoderName) {
+        strcpy(inputMime, MEDIA_MIMETYPE_VIDEO_VP9);
+        inputFormatFourcc = V4L2_PIX_FMT_VP9_FRAME;
+    } else {
+        ALOGE("Invalid component name: %s", name.c_str());
+        mInitStatus = C2_BAD_VALUE;
+        return;
+    }
+    // Get supported profiles from VDA.
+    // TODO: re-think the suitable method of getting supported profiles for both pure Android and
+    //       ARC++.
+    media::VideoDecodeAccelerator::SupportedProfiles supportedProfiles;
+#ifdef V4L2_CODEC2_ARC
+    supportedProfiles = arc::C2VDAAdaptorProxy::GetSupportedProfiles(inputFormatFourcc);
+#else
+    supportedProfiles = C2VDAAdaptor::GetSupportedProfiles(inputFormatFourcc);
+#endif
+    if (supportedProfiles.empty()) {
+        ALOGE("No supported profile from input format: %u", inputFormatFourcc);
+        mInitStatus = C2_BAD_VALUE;
+        return;
+    }
+
+    mCodecProfile = supportedProfiles[0].profile;
+
+    auto minSize = supportedProfiles[0].min_resolution;
+    auto maxSize = supportedProfiles[0].max_resolution;
+
+    addParameter(
+            DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+                    .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2FormatCompressed))
+                    .build());
+
+    addParameter(DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+                         .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2FormatVideo))
+                         .build());
+
+    addParameter(
+            DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                    .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(inputMime))
+                    .build());
+
+    addParameter(DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                         .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
+                                 MEDIA_MIMETYPE_VIDEO_RAW))
+                         .build());
+
+    struct LocalSetter {
+        static C2R SizeSetter(bool mayBlock, C2P<C2StreamPictureSizeInfo::output>& videoSize) {
+            (void)mayBlock;
+            // TODO: maybe apply block limit?
+            return videoSize.F(videoSize.v.width)
+                    .validatePossible(videoSize.v.width)
+                    .plus(videoSize.F(videoSize.v.height).validatePossible(videoSize.v.height));
+        }
+    };
+
+    addParameter(DefineParam(mSize, C2_PARAMKEY_STREAM_PICTURE_SIZE)
+                         .withDefault(new C2StreamPictureSizeInfo::output(0u, 176, 144))
+                         .withFields({
+                                 C2F(mSize, width).inRange(minSize.width(), maxSize.width(), 16),
+                                 C2F(mSize, height).inRange(minSize.height(), maxSize.height(), 16),
+                         })
+                         .withSetter(LocalSetter::SizeSetter)
+                         .build());
+
+    bool secureMode = name.find(".secure") != std::string::npos;
+    C2Allocator::id_t inputAllocators[] = {secureMode ? C2VDAAllocatorStore::SECURE_LINEAR
+                                                      : C2PlatformAllocatorStore::ION};
+
+    C2Allocator::id_t outputAllocators[] = {C2VDAAllocatorStore::V4L2_BUFFERPOOL};
+
+    C2Allocator::id_t surfaceAllocator = secureMode ? C2VDAAllocatorStore::SECURE_GRAPHIC
+                                                    : C2VDAAllocatorStore::V4L2_BUFFERQUEUE;
+
+    addParameter(
+            DefineParam(mInputAllocatorIds, C2_PARAMKEY_INPUT_ALLOCATORS)
+                    .withConstValue(C2PortAllocatorsTuning::input::AllocShared(inputAllocators))
+                    .build());
+
+    addParameter(
+            DefineParam(mOutputAllocatorIds, C2_PARAMKEY_OUTPUT_ALLOCATORS)
+                    .withConstValue(C2PortAllocatorsTuning::output::AllocShared(outputAllocators))
+                    .build());
+
+    addParameter(DefineParam(mOutputSurfaceAllocatorId, C2_PARAMKEY_OUTPUT_SURFACE_ALLOCATOR)
+                         .withConstValue(new C2PortSurfaceAllocatorTuning::output(surfaceAllocator))
+                         .build());
+
+    C2BlockPool::local_id_t outputBlockPools[] = {kDefaultOutputBlockPool};
+
+    addParameter(
+            DefineParam(mOutputBlockPoolIds, C2_PARAMKEY_OUTPUT_BLOCK_POOLS)
+                    .withDefault(C2PortBlockPoolsTuning::output::AllocShared(outputBlockPools))
+                    .withFields({C2F(mOutputBlockPoolIds, m.values[0]).any(),
+                                 C2F(mOutputBlockPoolIds, m.values).inRange(0, 1)})
+                    .withSetter(Setter<C2PortBlockPoolsTuning::output>::NonStrictValuesWithNoDeps)
+                    .build());
+}
+
+////////////////////////////////////////////////////////////////////////////////
+#define EXPECT_STATE_OR_RETURN_ON_ERROR(x)                    \
+    do {                                                      \
+        if (mComponentState == ComponentState::ERROR) return; \
+        CHECK_EQ(mComponentState, ComponentState::x);         \
+    } while (0)
+
+#define EXPECT_RUNNING_OR_RETURN_ON_ERROR()                       \
+    do {                                                          \
+        if (mComponentState == ComponentState::ERROR) return;     \
+        CHECK_NE(mComponentState, ComponentState::UNINITIALIZED); \
+    } while (0)
+
+C2VDAComponent::VideoFormat::VideoFormat(HalPixelFormat pixelFormat, uint32_t minNumBuffers,
+                                         media::Size codedSize, media::Rect visibleRect)
+      : mPixelFormat(pixelFormat),
+        mMinNumBuffers(minNumBuffers),
+        mCodedSize(codedSize),
+        mVisibleRect(visibleRect) {}
+
+C2VDAComponent::C2VDAComponent(C2String name, c2_node_id_t id,
+                               const std::shared_ptr<C2ReflectorHelper>& helper)
+      : mIntfImpl(std::make_shared<IntfImpl>(name, helper)),
+        mIntf(std::make_shared<SimpleInterface<IntfImpl>>(name.c_str(), id, mIntfImpl)),
+        mThread("C2VDAComponentThread"),
+        mDequeueThread("C2VDAComponentDequeueThread"),
+        mVDAInitResult(VideoDecodeAcceleratorAdaptor::Result::ILLEGAL_STATE),
+        mComponentState(ComponentState::UNINITIALIZED),
+        mPendingOutputEOS(false),
+        mLastOutputTimestamp(-1),
+        mCodecProfile(media::VIDEO_CODEC_PROFILE_UNKNOWN),
+        mState(State::UNLOADED),
+        mWeakThisFactory(this) {
+    // TODO(johnylin): the client may need to know if init is failed.
+    if (mIntfImpl->status() != C2_OK) {
+        ALOGE("Component interface init failed (err code = %d)", mIntfImpl->status());
+        return;
+    }
+
+    mSecureMode = name.find(".secure") != std::string::npos;
+    if (!mThread.Start()) {
+        ALOGE("Component thread failed to start.");
+        return;
+    }
+    mTaskRunner = mThread.task_runner();
+    mState.store(State::LOADED);
+}
+
+C2VDAComponent::~C2VDAComponent() {
+    CHECK_EQ(mState.load(), State::LOADED);
+
+    if (mThread.IsRunning()) {
+        mTaskRunner->PostTask(FROM_HERE,
+                              ::base::Bind(&C2VDAComponent::onDestroy, ::base::Unretained(this)));
+        mThread.Stop();
+    }
+}
+
+void C2VDAComponent::onDestroy() {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onDestroy");
+    if (mVDAAdaptor.get()) {
+        mVDAAdaptor->destroy();
+        mVDAAdaptor.reset(nullptr);
+    }
+    stopDequeueThread();
+}
+
+void C2VDAComponent::onStart(media::VideoCodecProfile profile, ::base::WaitableEvent* done) {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onStart");
+    CHECK_EQ(mComponentState, ComponentState::UNINITIALIZED);
+
+#ifdef V4L2_CODEC2_ARC
+    mVDAAdaptor.reset(new arc::C2VDAAdaptorProxy());
+#else
+    mVDAAdaptor.reset(new C2VDAAdaptor());
+#endif
+
+    mVDAInitResult = mVDAAdaptor->initialize(profile, mSecureMode, this);
+    if (mVDAInitResult == VideoDecodeAcceleratorAdaptor::Result::SUCCESS) {
+        mComponentState = ComponentState::STARTED;
+    }
+
+    done->Signal();
+}
+
+void C2VDAComponent::onQueueWork(std::unique_ptr<C2Work> work) {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onQueueWork: flags=0x%x, index=%llu, timestamp=%llu", work->input.flags,
+          work->input.ordinal.frameIndex.peekull(), work->input.ordinal.timestamp.peekull());
+    EXPECT_RUNNING_OR_RETURN_ON_ERROR();
+
+    uint32_t drainMode = NO_DRAIN;
+    if (work->input.flags & C2FrameData::FLAG_END_OF_STREAM) {
+        drainMode = DRAIN_COMPONENT_WITH_EOS;
+    }
+    mQueue.push({std::move(work), drainMode});
+    // TODO(johnylin): set a maximum size of mQueue and check if mQueue is already full.
+
+    mTaskRunner->PostTask(FROM_HERE,
+                          ::base::Bind(&C2VDAComponent::onDequeueWork, ::base::Unretained(this)));
+}
+
+void C2VDAComponent::onDequeueWork() {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onDequeueWork");
+    EXPECT_RUNNING_OR_RETURN_ON_ERROR();
+    if (mQueue.empty()) {
+        return;
+    }
+    if (mComponentState == ComponentState::DRAINING ||
+        mComponentState == ComponentState::FLUSHING) {
+        ALOGV("Temporarily stop dequeueing works since component is draining/flushing.");
+        return;
+    }
+    if (mComponentState != ComponentState::STARTED) {
+        ALOGE("Work queue should be empty if the component is not in STARTED state.");
+        return;
+    }
+
+    // Dequeue a work from mQueue.
+    std::unique_ptr<C2Work> work(std::move(mQueue.front().mWork));
+    auto drainMode = mQueue.front().mDrainMode;
+    mQueue.pop();
+
+    CHECK_LE(work->input.buffers.size(), 1u);
+    if (work->input.buffers.empty()) {
+        // Client may queue an EOS work with no input buffer, otherwise every work must have one
+        // input buffer.
+        CHECK(drainMode != NO_DRAIN);
+    } else {
+        // If input.buffers is not empty, the buffer should have meaningful content inside.
+        C2ConstLinearBlock linearBlock = work->input.buffers.front()->data().linearBlocks().front();
+        CHECK_GT(linearBlock.size(), 0u);
+        // Send input buffer to VDA for decode.
+        // Use frameIndex as bitstreamId.
+        int32_t bitstreamId = frameIndexToBitstreamId(work->input.ordinal.frameIndex);
+        sendInputBufferToAccelerator(linearBlock, bitstreamId);
+    }
+
+    CHECK_EQ(work->worklets.size(), 1u);
+    work->worklets.front()->output.flags = static_cast<C2FrameData::flags_t>(0);
+    work->worklets.front()->output.buffers.clear();
+    work->worklets.front()->output.ordinal = work->input.ordinal;
+
+    if (drainMode != NO_DRAIN) {
+        mVDAAdaptor->flush();
+        mComponentState = ComponentState::DRAINING;
+        mPendingOutputEOS = drainMode == DRAIN_COMPONENT_WITH_EOS;
+    }
+
+    // Put work to mPendingWorks.
+    mPendingWorks.emplace_back(std::move(work));
+
+    if (!mQueue.empty()) {
+        mTaskRunner->PostTask(FROM_HERE, ::base::Bind(&C2VDAComponent::onDequeueWork,
+                                                      ::base::Unretained(this)));
+    }
+}
+
+void C2VDAComponent::onInputBufferDone(int32_t bitstreamId) {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onInputBufferDone: bitstream id=%d", bitstreamId);
+    EXPECT_RUNNING_OR_RETURN_ON_ERROR();
+
+    C2Work* work = getPendingWorkByBitstreamId(bitstreamId);
+    if (!work) {
+        reportError(C2_CORRUPTED);
+        return;
+    }
+
+    // When the work is done, the input buffer shall be reset by component.
+    work->input.buffers.front().reset();
+
+    reportFinishedWorkIfAny();
+}
+
+void C2VDAComponent::onOutputBufferReturned(std::shared_ptr<C2GraphicBlock> block,
+                                            uint32_t poolId) {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onOutputBufferReturned: pool id=%u", poolId);
+    if (mComponentState == ComponentState::UNINITIALIZED) {
+        // Output buffer is returned from client after component is stopped. Just let the buffer be
+        // released.
+        return;
+    }
+
+    if (block->width() != static_cast<uint32_t>(mOutputFormat.mCodedSize.width()) ||
+        block->height() != static_cast<uint32_t>(mOutputFormat.mCodedSize.height())) {
+        // Output buffer is returned after we changed output resolution. Just let the buffer be
+        // released.
+        ALOGV("Discard obsolete graphic block: pool id=%u", poolId);
+        return;
+    }
+
+    GraphicBlockInfo* info = getGraphicBlockByPoolId(poolId);
+    if (!info) {
+        reportError(C2_CORRUPTED);
+        return;
+    }
+    CHECK_EQ(info->mState, GraphicBlockInfo::State::OWNED_BY_CLIENT);
+    info->mGraphicBlock = std::move(block);
+    info->mState = GraphicBlockInfo::State::OWNED_BY_COMPONENT;
+
+    if (mPendingOutputFormat) {
+        tryChangeOutputFormat();
+    } else {
+        sendOutputBufferToAccelerator(info);
+    }
+}
+
+void C2VDAComponent::onOutputBufferDone(int32_t pictureBufferId, int32_t bitstreamId) {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onOutputBufferDone: picture id=%d, bitstream id=%d", pictureBufferId, bitstreamId);
+    EXPECT_RUNNING_OR_RETURN_ON_ERROR();
+
+    C2Work* work = getPendingWorkByBitstreamId(bitstreamId);
+    if (!work) {
+        reportError(C2_CORRUPTED);
+        return;
+    }
+    GraphicBlockInfo* info = getGraphicBlockById(pictureBufferId);
+    if (!info) {
+        reportError(C2_CORRUPTED);
+        return;
+    }
+    CHECK_EQ(info->mState, GraphicBlockInfo::State::OWNED_BY_ACCELERATOR);
+    // Output buffer will be passed to client soon along with mListener->onWorkDone_nb().
+    info->mState = GraphicBlockInfo::State::OWNED_BY_CLIENT;
+    mBuffersInClient++;
+
+    // Attach output buffer to the work corresponded to bitstreamId.
+    C2ConstGraphicBlock constBlock = info->mGraphicBlock->share(
+            C2Rect(mOutputFormat.mVisibleRect.width(), mOutputFormat.mVisibleRect.height()),
+            C2Fence());
+    MarkBlockPoolDataAsShared(constBlock);
+    work->worklets.front()->output.buffers.emplace_back(
+            C2Buffer::CreateGraphicBuffer(std::move(constBlock)));
+    info->mGraphicBlock.reset();
+
+    // TODO: this does not work for timestamps as they can wrap around
+    int64_t currentTimestamp = ::base::checked_cast<int64_t>(work->input.ordinal.timestamp.peek());
+    CHECK_GE(currentTimestamp, mLastOutputTimestamp);
+    mLastOutputTimestamp = currentTimestamp;
+
+    reportFinishedWorkIfAny();
+}
+
+void C2VDAComponent::onDrain(uint32_t drainMode) {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onDrain: mode = %u", drainMode);
+    EXPECT_RUNNING_OR_RETURN_ON_ERROR();
+
+    if (!mQueue.empty()) {
+        // Mark last queued work as "drain-till-here" by setting drainMode. Do not change drainMode
+        // if last work already has one.
+        if (mQueue.back().mDrainMode == NO_DRAIN) {
+            mQueue.back().mDrainMode = drainMode;
+        }
+    } else if (!mPendingWorks.empty()) {
+        // Neglect drain request if component is not in STARTED mode. Otherwise, enters DRAINING
+        // mode and signal VDA flush immediately.
+        if (mComponentState == ComponentState::STARTED) {
+            mVDAAdaptor->flush();
+            mComponentState = ComponentState::DRAINING;
+            mPendingOutputEOS = drainMode == DRAIN_COMPONENT_WITH_EOS;
+        } else {
+            ALOGV("Neglect drain. Component in state: %d", mComponentState);
+        }
+    } else {
+        // Do nothing.
+        ALOGV("No buffers in VDA, drain takes no effect.");
+    }
+}
+
+void C2VDAComponent::onDrainDone() {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onDrainDone");
+    if (mComponentState == ComponentState::DRAINING) {
+        mComponentState = ComponentState::STARTED;
+    } else if (mComponentState == ComponentState::STOPPING) {
+        // The client signals stop right before VDA notifies drain done. Let stop process goes.
+        return;
+    } else if (mComponentState != ComponentState::FLUSHING) {
+        // It is reasonable to get onDrainDone in FLUSHING, which means flush is already signaled
+        // and component should still expect onFlushDone callback from VDA.
+        ALOGE("Unexpected state while onDrainDone(). State=%d", mComponentState);
+        reportError(C2_BAD_STATE);
+        return;
+    }
+
+    if (mPendingOutputEOS) {
+        // Return EOS work.
+        reportEOSWork();
+    }
+    // mPendingWorks must be empty after draining is finished.
+    CHECK(mPendingWorks.empty());
+
+    // Last stream is finished. Reset the timestamp record.
+    mLastOutputTimestamp = -1;
+
+    // Work dequeueing was stopped while component draining. Restart it.
+    mTaskRunner->PostTask(FROM_HERE,
+                          ::base::Bind(&C2VDAComponent::onDequeueWork, ::base::Unretained(this)));
+}
+
+void C2VDAComponent::onFlush() {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onFlush");
+    if (mComponentState == ComponentState::FLUSHING ||
+        mComponentState == ComponentState::STOPPING) {
+        return;  // Ignore other flush request when component is flushing or stopping.
+    }
+    EXPECT_RUNNING_OR_RETURN_ON_ERROR();
+
+    mVDAAdaptor->reset();
+    // Pop all works in mQueue and put into mAbandonedWorks.
+    while (!mQueue.empty()) {
+        mAbandonedWorks.emplace_back(std::move(mQueue.front().mWork));
+        mQueue.pop();
+    }
+    mComponentState = ComponentState::FLUSHING;
+}
+
+void C2VDAComponent::onStop(::base::WaitableEvent* done) {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onStop");
+    EXPECT_RUNNING_OR_RETURN_ON_ERROR();
+
+    // Do not request VDA reset again before the previous one is done. If reset is already sent by
+    // onFlush(), just regard the following NotifyResetDone callback as for stopping.
+    if (mComponentState != ComponentState::FLUSHING) {
+        mVDAAdaptor->reset();
+    }
+
+    // Pop all works in mQueue and put into mAbandonedWorks.
+    while (!mQueue.empty()) {
+        mAbandonedWorks.emplace_back(std::move(mQueue.front().mWork));
+        mQueue.pop();
+    }
+
+    mStopDoneEvent = done;  // restore done event which shoud be signaled in onStopDone().
+    mComponentState = ComponentState::STOPPING;
+}
+
+void C2VDAComponent::onResetDone() {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    if (mComponentState == ComponentState::ERROR) {
+        return;
+    }
+    if (mComponentState == ComponentState::FLUSHING) {
+        onFlushDone();
+    } else if (mComponentState == ComponentState::STOPPING) {
+        onStopDone();
+    } else {
+        reportError(C2_CORRUPTED);
+    }
+}
+
+void C2VDAComponent::onFlushDone() {
+    ALOGV("onFlushDone");
+    reportAbandonedWorks();
+    // Reset the timestamp record.
+    mLastOutputTimestamp = -1;
+    mComponentState = ComponentState::STARTED;
+
+    // Work dequeueing was stopped while component flushing. Restart it.
+    mTaskRunner->PostTask(FROM_HERE,
+                          ::base::Bind(&C2VDAComponent::onDequeueWork, ::base::Unretained(this)));
+}
+
+void C2VDAComponent::onStopDone() {
+    ALOGV("onStopDone");
+    CHECK(mStopDoneEvent);
+
+    // TODO(johnylin): At this moment, there may be C2Buffer still owned by client, do we need to
+    // do something for them?
+    reportAbandonedWorks();
+    mPendingOutputFormat.reset();
+    mLastOutputTimestamp = -1;
+    if (mVDAAdaptor.get()) {
+        mVDAAdaptor->destroy();
+        mVDAAdaptor.reset(nullptr);
+    }
+
+    mGraphicBlocks.clear();
+
+    stopDequeueThread();
+
+    mStopDoneEvent->Signal();
+    mStopDoneEvent = nullptr;
+    mComponentState = ComponentState::UNINITIALIZED;
+}
+
+c2_status_t C2VDAComponent::setListener_vb(const std::shared_ptr<C2Component::Listener>& listener,
+                                           c2_blocking_t mayBlock) {
+    UNUSED(mayBlock);
+    // TODO(johnylin): API says this method must be supported in all states, however I'm quite not
+    //                 sure what is the use case.
+    if (mState.load() != State::LOADED) {
+        return C2_BAD_STATE;
+    }
+    mListener = listener;
+    return C2_OK;
+}
+
+void C2VDAComponent::sendInputBufferToAccelerator(const C2ConstLinearBlock& input,
+                                                  int32_t bitstreamId) {
+    ALOGV("sendInputBufferToAccelerator");
+    int dupFd = dup(input.handle()->data[0]);
+    if (dupFd < 0) {
+        ALOGE("Failed to dup(%d) input buffer (bitstreamId=%d), errno=%d", input.handle()->data[0],
+              bitstreamId, errno);
+        reportError(C2_CORRUPTED);
+        return;
+    }
+    ALOGV("Decode bitstream ID: %d, offset: %u size: %u", bitstreamId, input.offset(),
+          input.size());
+    mVDAAdaptor->decode(bitstreamId, dupFd, input.offset(), input.size());
+}
+
+C2Work* C2VDAComponent::getPendingWorkByBitstreamId(int32_t bitstreamId) {
+    auto workIter = std::find_if(mPendingWorks.begin(), mPendingWorks.end(),
+                                 [bitstreamId](const std::unique_ptr<C2Work>& w) {
+                                     return frameIndexToBitstreamId(w->input.ordinal.frameIndex) ==
+                                            bitstreamId;
+                                 });
+
+    if (workIter == mPendingWorks.end()) {
+        ALOGE("Can't find pending work by bitstream ID: %d", bitstreamId);
+        return nullptr;
+    }
+    return workIter->get();
+}
+
+C2VDAComponent::GraphicBlockInfo* C2VDAComponent::getGraphicBlockById(int32_t blockId) {
+    if (blockId < 0 || blockId >= static_cast<int32_t>(mGraphicBlocks.size())) {
+        ALOGE("getGraphicBlockById failed: id=%d", blockId);
+        return nullptr;
+    }
+    return &mGraphicBlocks[blockId];
+}
+
+C2VDAComponent::GraphicBlockInfo* C2VDAComponent::getGraphicBlockByPoolId(uint32_t poolId) {
+    auto blockIter = std::find_if(mGraphicBlocks.begin(), mGraphicBlocks.end(),
+                                  [poolId](const GraphicBlockInfo& gb) {
+                                      return gb.mPoolId == poolId;
+                                  });
+
+    if (blockIter == mGraphicBlocks.end()) {
+        ALOGE("getGraphicBlockByPoolId failed: poolId=%u", poolId);
+        return nullptr;
+    }
+    return &(*blockIter);
+}
+
+void C2VDAComponent::onOutputFormatChanged(std::unique_ptr<VideoFormat> format) {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onOutputFormatChanged");
+    EXPECT_RUNNING_OR_RETURN_ON_ERROR();
+
+    ALOGV("New output format(pixel_format=0x%x, min_num_buffers=%u, coded_size=%s, crop_rect=%s)",
+          static_cast<uint32_t>(format->mPixelFormat), format->mMinNumBuffers,
+          format->mCodedSize.ToString().c_str(), format->mVisibleRect.ToString().c_str());
+
+    for (auto& info : mGraphicBlocks) {
+        if (info.mState == GraphicBlockInfo::State::OWNED_BY_ACCELERATOR)
+            info.mState = GraphicBlockInfo::State::OWNED_BY_COMPONENT;
+    }
+
+    CHECK(!mPendingOutputFormat);
+    mPendingOutputFormat = std::move(format);
+    tryChangeOutputFormat();
+}
+
+void C2VDAComponent::tryChangeOutputFormat() {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("tryChangeOutputFormat");
+    CHECK(mPendingOutputFormat);
+
+    // At this point, all output buffers should not be owned by accelerator. The component is not
+    // able to know when a client will release all owned output buffers by now. But it is ok to
+    // leave them to client since componenet won't own those buffers anymore.
+    // TODO(johnylin): we may also set a parameter for component to keep dequeueing buffers and
+    //                 change format only after the component owns most buffers. This may prevent
+    //                 too many buffers are still on client's hand while component starts to
+    //                 allocate more buffers. However, it leads latency on output format change.
+    for (const auto& info : mGraphicBlocks) {
+        CHECK(info.mState != GraphicBlockInfo::State::OWNED_BY_ACCELERATOR);
+    }
+
+    CHECK_EQ(mPendingOutputFormat->mPixelFormat, HalPixelFormat::YCbCr_420_888);
+
+    mOutputFormat.mPixelFormat = mPendingOutputFormat->mPixelFormat;
+    mOutputFormat.mMinNumBuffers = mPendingOutputFormat->mMinNumBuffers;
+    mOutputFormat.mCodedSize = mPendingOutputFormat->mCodedSize;
+
+    setOutputFormatCrop(mPendingOutputFormat->mVisibleRect);
+
+    c2_status_t err = allocateBuffersFromBlockAllocator(
+            mPendingOutputFormat->mCodedSize,
+            static_cast<uint32_t>(mPendingOutputFormat->mPixelFormat));
+    if (err != C2_OK) {
+        reportError(err);
+        return;
+    }
+
+    for (auto& info : mGraphicBlocks) {
+        sendOutputBufferToAccelerator(&info);
+    }
+    mPendingOutputFormat.reset();
+}
+
+c2_status_t C2VDAComponent::allocateBuffersFromBlockAllocator(const media::Size& size,
+                                                              uint32_t pixelFormat) {
+    ALOGV("allocateBuffersFromBlockAllocator(%s, 0x%x)", size.ToString().c_str(), pixelFormat);
+
+    stopDequeueThread();
+
+    size_t bufferCount = mOutputFormat.mMinNumBuffers + kDpbOutputBufferExtraCount;
+
+    // Allocate the output buffers.
+    mVDAAdaptor->assignPictureBuffers(bufferCount);
+
+    // Get block pool ID configured from the client.
+    std::shared_ptr<C2BlockPool> blockPool;
+    auto poolId = mIntfImpl->getBlockPoolId();
+    ALOGI("Using C2BlockPool ID = %" PRIu64 " for allocating output buffers", poolId);
+    auto err = GetCodec2BlockPool(poolId, shared_from_this(), &blockPool);
+    if (err != C2_OK) {
+        ALOGE("Graphic block allocator is invalid");
+        reportError(err);
+        return err;
+    }
+
+    mGraphicBlocks.clear();
+
+    bool useBufferQueue = blockPool->getAllocatorId() == C2PlatformAllocatorStore::BUFFERQUEUE;
+    if (useBufferQueue) {
+        ALOGV("Bufferqueue-backed block pool is used.");
+        // Set requested buffer count to C2VdaBqBlockPool.
+        std::shared_ptr<C2VdaBqBlockPool> bqPool =
+                std::static_pointer_cast<C2VdaBqBlockPool>(blockPool);
+        if (bqPool) {
+            err = bqPool->requestNewBufferSet(static_cast<int32_t>(bufferCount));
+            if (err != C2_OK) {
+                ALOGE("failed to request new buffer set to block pool: %d", err);
+                reportError(err);
+                return err;
+            }
+        } else {
+            ALOGE("static_pointer_cast C2VdaBqBlockPool failed...");
+            reportError(C2_CORRUPTED);
+            return C2_CORRUPTED;
+        }
+    } else {
+        ALOGV("Bufferpool-backed block pool is used.");
+        // Set requested buffer count to C2VdaPooledBlockPool.
+        std::shared_ptr<C2VdaPooledBlockPool> bpPool =
+                std::static_pointer_cast<C2VdaPooledBlockPool>(blockPool);
+        if (bpPool) {
+            err = bpPool->requestNewBufferSet(static_cast<int32_t>(bufferCount));
+            if (err != C2_OK) {
+                ALOGE("failed to request new buffer set to block pool: %d", err);
+                reportError(err);
+                return err;
+            }
+        } else {
+            ALOGE("static_pointer_cast C2VdaPooledBlockPool failed...");
+            reportError(C2_CORRUPTED);
+            return C2_CORRUPTED;
+        }
+    }
+
+    for (size_t i = 0; i < bufferCount; ++i) {
+        std::shared_ptr<C2GraphicBlock> block;
+        C2MemoryUsage usage = {
+                mSecureMode ? C2MemoryUsage::READ_PROTECTED : C2MemoryUsage::CPU_READ, 0};
+
+        int32_t retries_left = kAllocateBufferMaxRetries;
+        err = C2_NO_INIT;
+        while (err != C2_OK) {
+            err = blockPool->fetchGraphicBlock(size.width(), size.height(), pixelFormat, usage,
+                                               &block);
+            if (err == C2_TIMED_OUT && retries_left > 0) {
+                ALOGD("allocate buffer timeout, %d retry time(s) left...", retries_left);
+                retries_left--;
+            } else if (err != C2_OK) {
+                mGraphicBlocks.clear();
+                ALOGE("failed to allocate buffer: %d", err);
+                reportError(err);
+                return err;
+            }
+        }
+
+        uint32_t poolId;
+        if (useBufferQueue) {
+            err = C2VdaBqBlockPool::getPoolIdFromGraphicBlock(block, &poolId);
+        } else {  // use bufferpool
+            err = C2VdaPooledBlockPool::getPoolIdFromGraphicBlock(block, &poolId);
+        }
+        if (err != C2_OK) {
+            mGraphicBlocks.clear();
+            ALOGE("failed to getPoolIdFromGraphicBlock: %d", err);
+            reportError(err);
+            return err;
+        }
+        if (mSecureMode) {
+            appendSecureOutputBuffer(std::move(block), poolId);
+        } else {
+            appendOutputBuffer(std::move(block), poolId);
+        }
+    }
+    mOutputFormat.mMinNumBuffers = bufferCount;
+
+    if (!startDequeueThread(size, pixelFormat, std::move(blockPool))) {
+        reportError(C2_CORRUPTED);
+        return C2_CORRUPTED;
+    }
+    return C2_OK;
+}
+
+void C2VDAComponent::appendOutputBuffer(std::shared_ptr<C2GraphicBlock> block, uint32_t poolId) {
+    GraphicBlockInfo info;
+    info.mBlockId = static_cast<int32_t>(mGraphicBlocks.size());
+    info.mGraphicBlock = std::move(block);
+    info.mPoolId = poolId;
+
+    C2ConstGraphicBlock constBlock = info.mGraphicBlock->share(
+            C2Rect(info.mGraphicBlock->width(), info.mGraphicBlock->height()), C2Fence());
+
+    const C2GraphicView& view = constBlock.map().get();
+    const uint8_t* const* data = view.data();
+    CHECK_NE(data, nullptr);
+    const C2PlanarLayout& layout = view.layout();
+
+    ALOGV("allocate graphic buffer: %p, id: %d, size: %dx%d", info.mGraphicBlock->handle(),
+          info.mBlockId, info.mGraphicBlock->width(), info.mGraphicBlock->height());
+
+    // get offset from data pointers
+    uint32_t offsets[C2PlanarLayout::MAX_NUM_PLANES];
+    auto baseAddress = reinterpret_cast<intptr_t>(data[0]);
+    for (uint32_t i = 0; i < layout.numPlanes; ++i) {
+        auto planeAddress = reinterpret_cast<intptr_t>(data[i]);
+        offsets[i] = static_cast<uint32_t>(planeAddress - baseAddress);
+    }
+
+    bool crcb = false;
+    if (layout.numPlanes == 3 &&
+        offsets[C2PlanarLayout::PLANE_U] > offsets[C2PlanarLayout::PLANE_V]) {
+        // YCrCb format
+        std::swap(offsets[C2PlanarLayout::PLANE_U], offsets[C2PlanarLayout::PLANE_V]);
+        crcb = true;
+    }
+
+    bool semiplanar = false;
+    uint32_t passedNumPlanes = layout.numPlanes;
+    if (layout.planes[C2PlanarLayout::PLANE_U].colInc == 2) {  // chroma_step
+        // Semi-planar format
+        passedNumPlanes--;
+        semiplanar = true;
+    }
+
+    for (uint32_t i = 0; i < passedNumPlanes; ++i) {
+        ALOGV("plane %u: stride: %d, offset: %u", i, layout.planes[i].rowInc, offsets[i]);
+    }
+    info.mPixelFormat = resolveBufferFormat(crcb, semiplanar);
+    ALOGV("HAL pixel format: 0x%x", static_cast<uint32_t>(info.mPixelFormat));
+
+    ::base::ScopedFD passedHandle(dup(info.mGraphicBlock->handle()->data[0]));
+    if (!passedHandle.is_valid()) {
+        ALOGE("Failed to dup(%d), errno=%d", info.mGraphicBlock->handle()->data[0], errno);
+        reportError(C2_CORRUPTED);
+        return;
+    }
+    std::vector<VideoFramePlane> passedPlanes;
+    for (uint32_t i = 0; i < passedNumPlanes; ++i) {
+        CHECK_GT(layout.planes[i].rowInc, 0);
+        passedPlanes.push_back({offsets[i], static_cast<uint32_t>(layout.planes[i].rowInc)});
+    }
+    info.mHandle = std::move(passedHandle);
+    info.mPlanes = std::move(passedPlanes);
+
+    mGraphicBlocks.push_back(std::move(info));
+}
+
+void C2VDAComponent::appendSecureOutputBuffer(std::shared_ptr<C2GraphicBlock> block,
+                                              uint32_t poolId) {
+#ifdef V4L2_CODEC2_ARC
+    const C2Handle* const handle = block->handle();
+    const int handleFd = handle->data[0];
+    ::base::ScopedFD passedHandle(dup(handleFd));
+    if (!passedHandle.is_valid()) {
+        ALOGE("Failed to dup(%d), errno=%d", handleFd, errno);
+        reportError(C2_CORRUPTED);
+        return;
+    }
+
+    android::HalPixelFormat pixelFormat = getPlatformPixelFormat();
+    if (pixelFormat == android::HalPixelFormat::UNKNOWN) {
+        ALOGE("Failed to get pixel format on platform.");
+        reportError(C2_CORRUPTED);
+        return;
+    }
+    CHECK(pixelFormat == android::HalPixelFormat::YV12 ||
+          pixelFormat == android::HalPixelFormat::NV12);
+    ALOGV("HAL pixel format: 0x%x", static_cast<uint32_t>(pixelFormat));
+
+    GraphicBlockInfo info;
+    info.mBlockId = static_cast<int32_t>(mGraphicBlocks.size());
+    info.mGraphicBlock = std::move(block);
+    info.mPoolId = poolId;
+    info.mHandle = std::move(passedHandle);
+    info.mPixelFormat = pixelFormat;
+    // In secure mode, since planes are not referred in Chrome side, empty plane is valid.
+    info.mPlanes.clear();
+    mGraphicBlocks.push_back(std::move(info));
+#else
+    ALOGE("appendSecureOutputBuffer() is not supported...");
+    reportError(C2_OMITTED);
+#endif // V4L2_CODEC2_ARC
+}
+
+void C2VDAComponent::sendOutputBufferToAccelerator(GraphicBlockInfo* info) {
+    ALOGV("sendOutputBufferToAccelerator index=%d", info->mBlockId);
+    CHECK_EQ(info->mState, GraphicBlockInfo::State::OWNED_BY_COMPONENT);
+    info->mState = GraphicBlockInfo::State::OWNED_BY_ACCELERATOR;
+
+    // is_valid() is true for the first time the buffer is passed to VDA. In that case, VDA needs to
+    // import the buffer first.
+    if (info->mHandle.is_valid()) {
+        mVDAAdaptor->importBufferForPicture(info->mBlockId, info->mPixelFormat,
+                                            info->mHandle.release(), info->mPlanes);
+    } else {
+        mVDAAdaptor->reusePictureBuffer(info->mBlockId);
+    }
+}
+
+void C2VDAComponent::onVisibleRectChanged(const media::Rect& cropRect) {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onVisibleRectChanged");
+    EXPECT_RUNNING_OR_RETURN_ON_ERROR();
+
+    // We should make sure there is no pending output format change. That is, the input cropRect is
+    // corresponding to current output format.
+    CHECK(mPendingOutputFormat == nullptr);
+    setOutputFormatCrop(cropRect);
+}
+
+void C2VDAComponent::setOutputFormatCrop(const media::Rect& cropRect) {
+    ALOGV("setOutputFormatCrop(%dx%d)", cropRect.width(), cropRect.height());
+    // This visible rect should be set as crop window for each C2ConstGraphicBlock passed to
+    // framework.
+    mOutputFormat.mVisibleRect = cropRect;
+}
+
+c2_status_t C2VDAComponent::queue_nb(std::list<std::unique_ptr<C2Work>>* const items) {
+    if (mState.load() != State::RUNNING) {
+        return C2_BAD_STATE;
+    }
+    while (!items->empty()) {
+        mTaskRunner->PostTask(FROM_HERE,
+                              ::base::Bind(&C2VDAComponent::onQueueWork, ::base::Unretained(this),
+                                           ::base::Passed(&items->front())));
+        items->pop_front();
+    }
+    return C2_OK;
+}
+
+c2_status_t C2VDAComponent::announce_nb(const std::vector<C2WorkOutline>& items) {
+    UNUSED(items);
+    return C2_OMITTED;  // Tunneling is not supported by now
+}
+
+c2_status_t C2VDAComponent::flush_sm(flush_mode_t mode,
+                                     std::list<std::unique_ptr<C2Work>>* const flushedWork) {
+    if (mode != FLUSH_COMPONENT) {
+        return C2_OMITTED;  // Tunneling is not supported by now
+    }
+    if (mState.load() != State::RUNNING) {
+        return C2_BAD_STATE;
+    }
+    mTaskRunner->PostTask(FROM_HERE, ::base::Bind(&C2VDAComponent::onFlush,
+                                                  ::base::Unretained(this)));
+    // Instead of |flushedWork|, abandoned works will be returned via onWorkDone_nb() callback.
+    return C2_OK;
+}
+
+c2_status_t C2VDAComponent::drain_nb(drain_mode_t mode) {
+    if (mode != DRAIN_COMPONENT_WITH_EOS && mode != DRAIN_COMPONENT_NO_EOS) {
+        return C2_OMITTED;  // Tunneling is not supported by now
+    }
+    if (mState.load() != State::RUNNING) {
+        return C2_BAD_STATE;
+    }
+    mTaskRunner->PostTask(FROM_HERE,
+                          ::base::Bind(&C2VDAComponent::onDrain, ::base::Unretained(this),
+                                       static_cast<uint32_t>(mode)));
+    return C2_OK;
+}
+
+c2_status_t C2VDAComponent::start() {
+    // Use mStartStopLock to block other asynchronously start/stop calls.
+    std::lock_guard<std::mutex> lock(mStartStopLock);
+
+    if (mState.load() != State::LOADED) {
+        return C2_BAD_STATE;  // start() is only supported when component is in LOADED state.
+    }
+
+    mCodecProfile = mIntfImpl->getCodecProfile();
+    ALOGI("get parameter: mCodecProfile = %d", static_cast<int>(mCodecProfile));
+
+    ::base::WaitableEvent done(::base::WaitableEvent::ResetPolicy::AUTOMATIC,
+                               ::base::WaitableEvent::InitialState::NOT_SIGNALED);
+    mTaskRunner->PostTask(FROM_HERE,
+                          ::base::Bind(&C2VDAComponent::onStart, ::base::Unretained(this),
+                                       mCodecProfile, &done));
+    done.Wait();
+    if (mVDAInitResult != VideoDecodeAcceleratorAdaptor::Result::SUCCESS) {
+        ALOGE("Failed to start component due to VDA error: %d", static_cast<int>(mVDAInitResult));
+        return C2_CORRUPTED;
+    }
+    mState.store(State::RUNNING);
+    return C2_OK;
+}
+
+c2_status_t C2VDAComponent::stop() {
+    // Use mStartStopLock to block other asynchronously start/stop calls.
+    std::lock_guard<std::mutex> lock(mStartStopLock);
+
+    auto state = mState.load();
+    if (!(state == State::RUNNING || state == State::ERROR)) {
+        return C2_OK;  // Component is already in stopped state.
+    }
+
+    ::base::WaitableEvent done(::base::WaitableEvent::ResetPolicy::AUTOMATIC,
+                               ::base::WaitableEvent::InitialState::NOT_SIGNALED);
+    mTaskRunner->PostTask(FROM_HERE,
+                          ::base::Bind(&C2VDAComponent::onStop, ::base::Unretained(this), &done));
+    done.Wait();
+    mState.store(State::LOADED);
+    return C2_OK;
+}
+
+c2_status_t C2VDAComponent::reset() {
+    return stop();
+    // TODO(johnylin): reset is different than stop that it could be called in any state.
+    // TODO(johnylin): when reset is called, set ComponentInterface to default values.
+}
+
+c2_status_t C2VDAComponent::release() {
+    return reset();
+}
+
+std::shared_ptr<C2ComponentInterface> C2VDAComponent::intf() {
+    return mIntf;
+}
+
+void C2VDAComponent::providePictureBuffers(uint32_t minNumBuffers, const media::Size& codedSize) {
+    // Always use fexible pixel 420 format YCbCr_420_888 in Android.
+    // Uses coded size for crop rect while it is not available.
+    auto format = std::make_unique<VideoFormat>(HalPixelFormat::YCbCr_420_888, minNumBuffers,
+                                                codedSize, media::Rect(codedSize));
+
+    // Set mRequestedVisibleRect to default.
+    mRequestedVisibleRect = media::Rect();
+
+    mTaskRunner->PostTask(FROM_HERE, ::base::Bind(&C2VDAComponent::onOutputFormatChanged,
+                                                  ::base::Unretained(this),
+                                                  ::base::Passed(&format)));
+}
+
+void C2VDAComponent::dismissPictureBuffer(int32_t pictureBufferId) {
+    UNUSED(pictureBufferId);
+    // no ops
+}
+
+void C2VDAComponent::pictureReady(int32_t pictureBufferId, int32_t bitstreamId,
+                                  const media::Rect& cropRect) {
+    UNUSED(pictureBufferId);
+    UNUSED(bitstreamId);
+
+    if (mRequestedVisibleRect != cropRect) {
+        mRequestedVisibleRect = cropRect;
+        mTaskRunner->PostTask(FROM_HERE, ::base::Bind(&C2VDAComponent::onVisibleRectChanged,
+                                                      ::base::Unretained(this), cropRect));
+    }
+
+    mTaskRunner->PostTask(FROM_HERE, ::base::Bind(&C2VDAComponent::onOutputBufferDone,
+                                                  ::base::Unretained(this),
+                                                  pictureBufferId, bitstreamId));
+}
+
+void C2VDAComponent::notifyEndOfBitstreamBuffer(int32_t bitstreamId) {
+    mTaskRunner->PostTask(FROM_HERE, ::base::Bind(&C2VDAComponent::onInputBufferDone,
+                                                  ::base::Unretained(this), bitstreamId));
+}
+
+void C2VDAComponent::notifyFlushDone() {
+    mTaskRunner->PostTask(FROM_HERE,
+                          ::base::Bind(&C2VDAComponent::onDrainDone, ::base::Unretained(this)));
+}
+
+void C2VDAComponent::notifyResetDone() {
+    mTaskRunner->PostTask(FROM_HERE,
+                          ::base::Bind(&C2VDAComponent::onResetDone, ::base::Unretained(this)));
+}
+
+void C2VDAComponent::notifyError(VideoDecodeAcceleratorAdaptor::Result error) {
+    ALOGE("Got notifyError from VDA error=%d", error);
+    c2_status_t err;
+    switch (error) {
+    case VideoDecodeAcceleratorAdaptor::Result::ILLEGAL_STATE:
+        err = C2_BAD_STATE;
+        break;
+    case VideoDecodeAcceleratorAdaptor::Result::INVALID_ARGUMENT:
+    case VideoDecodeAcceleratorAdaptor::Result::UNREADABLE_INPUT:
+        err = C2_BAD_VALUE;
+        break;
+    case VideoDecodeAcceleratorAdaptor::Result::PLATFORM_FAILURE:
+        err = C2_CORRUPTED;
+        break;
+    case VideoDecodeAcceleratorAdaptor::Result::INSUFFICIENT_RESOURCES:
+        err = C2_NO_MEMORY;
+        break;
+    case VideoDecodeAcceleratorAdaptor::Result::SUCCESS:
+        ALOGE("Shouldn't get SUCCESS err code in NotifyError(). Skip it...");
+        return;
+    }
+    reportError(err);
+}
+
+void C2VDAComponent::reportFinishedWorkIfAny() {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    std::list<std::unique_ptr<C2Work>> finishedWorks;
+
+    // Work should be reported as done if both input and output buffer are returned by VDA.
+
+    // Note that not every input buffer has matched output (ex. CSD header for H.264).
+    // However, the timestamp is guaranteed to be monotonic increasing for buffers in display order.
+    // That is, since VDA output is in display order, if we get a returned output with timestamp T,
+    // it implies all works with timestamp <= T are done.
+    // EOS work will not be reported here. reportEOSWork() does it.
+    auto iter = mPendingWorks.begin();
+    while (iter != mPendingWorks.end()) {
+        if (isWorkDone(iter->get())) {
+            iter->get()->result = C2_OK;
+            iter->get()->workletsProcessed = static_cast<uint32_t>(iter->get()->worklets.size());
+            finishedWorks.emplace_back(std::move(*iter));
+            iter = mPendingWorks.erase(iter);
+        } else {
+            ++iter;
+        }
+    }
+
+    if (!finishedWorks.empty()) {
+        mListener->onWorkDone_nb(shared_from_this(), std::move(finishedWorks));
+    }
+}
+
+bool C2VDAComponent::isWorkDone(const C2Work* work) const {
+    if (work->input.buffers.empty()) {
+        // This is EOS work with no input buffer and should be processed by reportEOSWork().
+        return false;
+    }
+    if (work->input.buffers.front()) {
+        // Input buffer is still owned by VDA.
+        return false;
+    }
+    if (mPendingOutputEOS && mPendingWorks.size() == 1u) {
+        // If mPendingOutputEOS is true, the last returned work should be marked EOS flag and
+        // returned by reportEOSWork() instead.
+        return false;
+    }
+    if (mLastOutputTimestamp < 0) {
+        return false;  // No output buffer is returned yet.
+    }
+    if (work->input.ordinal.timestamp > static_cast<uint64_t>(mLastOutputTimestamp)) {
+        return false;  // Output buffer is not returned by VDA yet.
+    }
+    return true;  // Output buffer is returned, or it has no related output buffer.
+}
+
+void C2VDAComponent::reportEOSWork() {
+    ALOGV("reportEOSWork");
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    // In this moment all works prior to EOS work should be done and returned to listener.
+    if (mPendingWorks.size() != 1u) {  // only EOS work left
+        ALOGE("It shouldn't have remaining works in mPendingWorks except EOS work.");
+        reportError(C2_CORRUPTED);
+        return;
+    }
+
+    mPendingOutputEOS = false;
+
+    std::unique_ptr<C2Work> eosWork(std::move(mPendingWorks.front()));
+    mPendingWorks.pop_front();
+    if (!eosWork->input.buffers.empty()) {
+        eosWork->input.buffers.front().reset();
+    }
+    eosWork->result = C2_OK;
+    eosWork->workletsProcessed = static_cast<uint32_t>(eosWork->worklets.size());
+    eosWork->worklets.front()->output.flags = C2FrameData::FLAG_END_OF_STREAM;
+
+    std::list<std::unique_ptr<C2Work>> finishedWorks;
+    finishedWorks.emplace_back(std::move(eosWork));
+    mListener->onWorkDone_nb(shared_from_this(), std::move(finishedWorks));
+}
+
+void C2VDAComponent::reportAbandonedWorks() {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    std::list<std::unique_ptr<C2Work>> abandonedWorks;
+
+    while (!mPendingWorks.empty()) {
+        std::unique_ptr<C2Work> work(std::move(mPendingWorks.front()));
+        mPendingWorks.pop_front();
+
+        // TODO: correlate the definition of flushed work result to framework.
+        work->result = C2_NOT_FOUND;
+        // When the work is abandoned, buffer in input.buffers shall reset by component.
+        if (!work->input.buffers.empty()) {
+            work->input.buffers.front().reset();
+        }
+        abandonedWorks.emplace_back(std::move(work));
+    }
+
+    for (auto& work : mAbandonedWorks) {
+        // TODO: correlate the definition of flushed work result to framework.
+        work->result = C2_NOT_FOUND;
+        // When the work is abandoned, buffer in input.buffers shall reset by component.
+        if (!work->input.buffers.empty()) {
+            work->input.buffers.front().reset();
+        }
+        abandonedWorks.emplace_back(std::move(work));
+    }
+    mAbandonedWorks.clear();
+
+    // Pending EOS work will be abandoned here due to component flush if any.
+    mPendingOutputEOS = false;
+
+    if (!abandonedWorks.empty()) {
+        mListener->onWorkDone_nb(shared_from_this(), std::move(abandonedWorks));
+    }
+}
+
+void C2VDAComponent::reportError(c2_status_t error) {
+    mListener->onError_nb(shared_from_this(), static_cast<uint32_t>(error));
+}
+
+bool C2VDAComponent::startDequeueThread(const media::Size& size, uint32_t pixelFormat,
+                                        std::shared_ptr<C2BlockPool> blockPool) {
+    CHECK(!mDequeueThread.IsRunning());
+    if (!mDequeueThread.Start()) {
+        ALOGE("failed to start dequeue thread!!");
+        return false;
+    }
+    mDequeueLoopStop.store(false);
+    mBuffersInClient.store(0u);
+    mDequeueThread.task_runner()->PostTask(
+            FROM_HERE, ::base::Bind(&C2VDAComponent::dequeueThreadLoop, ::base::Unretained(this),
+                                    size, pixelFormat, std::move(blockPool)));
+    return true;
+}
+
+void C2VDAComponent::stopDequeueThread() {
+    if (mDequeueThread.IsRunning()) {
+        mDequeueLoopStop.store(true);
+        mDequeueThread.Stop();
+    }
+}
+
+void C2VDAComponent::dequeueThreadLoop(const media::Size& size, uint32_t pixelFormat,
+                                       std::shared_ptr<C2BlockPool> blockPool) {
+    ALOGV("dequeueThreadLoop starts");
+    DCHECK(mDequeueThread.task_runner()->BelongsToCurrentThread());
+
+    while (!mDequeueLoopStop.load()) {
+        if (mBuffersInClient.load() == 0) {
+            ::usleep(kDequeueRetryDelayUs);  // wait for retry
+            continue;
+        }
+        std::shared_ptr<C2GraphicBlock> block;
+        C2MemoryUsage usage = {
+                mSecureMode ? C2MemoryUsage::READ_PROTECTED : C2MemoryUsage::CPU_READ, 0};
+        auto err = blockPool->fetchGraphicBlock(size.width(), size.height(), pixelFormat, usage,
+                                                &block);
+        if (err == C2_TIMED_OUT) {
+            continue;  // wait for retry
+        }
+        if (err == C2_OK) {
+            uint32_t poolId;
+            if (blockPool->getAllocatorId() == C2PlatformAllocatorStore::BUFFERQUEUE) {
+                err = C2VdaBqBlockPool::getPoolIdFromGraphicBlock(block, &poolId);
+            } else {  // bufferpool
+                err = C2VdaPooledBlockPool::getPoolIdFromGraphicBlock(block, &poolId);
+            }
+
+            if (err != C2_OK) {
+                ALOGE("dequeueThreadLoop got error on getPoolIdFromGraphicBlock: %d", err);
+                break;
+            }
+            mTaskRunner->PostTask(FROM_HERE,
+                                  ::base::Bind(&C2VDAComponent::onOutputBufferReturned,
+                                               ::base::Unretained(this), std::move(block), poolId));
+            mBuffersInClient--;
+        } else {
+            ALOGE("dequeueThreadLoop got error: %d", err);
+            break;
+        }
+    }
+    ALOGV("dequeueThreadLoop terminates");
+}
+
+class C2VDAComponentFactory : public C2ComponentFactory {
+public:
+    C2VDAComponentFactory(C2String decoderName)
+          : mDecoderName(decoderName),
+            mReflector(std::static_pointer_cast<C2ReflectorHelper>(
+                    GetCodec2VDAComponentStore()->getParamReflector())){};
+
+    c2_status_t createComponent(c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+                                ComponentDeleter deleter) override {
+        UNUSED(deleter);
+        *component = std::shared_ptr<C2Component>(new C2VDAComponent(mDecoderName, id, mReflector));
+        return C2_OK;
+    }
+    c2_status_t createInterface(c2_node_id_t id,
+                                std::shared_ptr<C2ComponentInterface>* const interface,
+                                InterfaceDeleter deleter) override {
+        UNUSED(deleter);
+        *interface =
+                std::shared_ptr<C2ComponentInterface>(new SimpleInterface<C2VDAComponent::IntfImpl>(
+                        mDecoderName.c_str(), id,
+                        std::make_shared<C2VDAComponent::IntfImpl>(mDecoderName, mReflector)));
+        return C2_OK;
+    }
+    ~C2VDAComponentFactory() override = default;
+
+private:
+    const C2String mDecoderName;
+    std::shared_ptr<C2ReflectorHelper> mReflector;
+};
+}  // namespace android
+
+extern "C" ::C2ComponentFactory* CreateC2VDAH264Factory(bool secureMode) {
+    ALOGV("in %s (secureMode=%d)", __func__, secureMode);
+    return secureMode ? new ::android::C2VDAComponentFactory(android::kH264SecureDecoderName)
+                      : new ::android::C2VDAComponentFactory(android::kH264DecoderName);
+}
+
+extern "C" void DestroyC2VDAH264Factory(::C2ComponentFactory* factory) {
+    ALOGV("in %s", __func__);
+    delete factory;
+}
+
+extern "C" ::C2ComponentFactory* CreateC2VDAVP8Factory(bool secureMode) {
+    ALOGV("in %s (secureMode=%d)", __func__, secureMode);
+    return secureMode ? new ::android::C2VDAComponentFactory(android::kVP8SecureDecoderName)
+                      : new ::android::C2VDAComponentFactory(android::kVP8DecoderName);
+}
+
+extern "C" void DestroyC2VDAVP8Factory(::C2ComponentFactory* factory) {
+    ALOGV("in %s", __func__);
+    delete factory;
+}
+
+extern "C" ::C2ComponentFactory* CreateC2VDAVP9Factory(bool secureMode) {
+    ALOGV("in %s (secureMode=%d)", __func__, secureMode);
+    return secureMode ? new ::android::C2VDAComponentFactory(android::kVP9SecureDecoderName)
+                      : new ::android::C2VDAComponentFactory(android::kVP9DecoderName);
+}
+
+extern "C" void DestroyC2VDAVP9Factory(::C2ComponentFactory* factory) {
+    ALOGV("in %s", __func__);
+    delete factory;
+}
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
new file mode 100644
index 0000000..cbdf0f1
--- /dev/null
+++ b/PREUPLOAD.cfg
@@ -0,0 +1,5 @@
+[Builtin Hooks]
+clang_format = true
+
+[Builtin Hooks Options]
+clang_format = --commit ${PREUPLOAD_COMMIT} -- style file --extensions h,cpp
diff --git a/cmds/Android.mk b/cmds/Android.mk
new file mode 100644
index 0000000..6cc84ae
--- /dev/null
+++ b/cmds/Android.mk
@@ -0,0 +1,42 @@
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+        codec2.cpp \
+
+LOCAL_C_INCLUDES += \
+        $(TOP)/external/libchrome \
+        $(TOP)/external/gtest/include \
+        $(TOP)/external/v4l2_codec2/include \
+        $(TOP)/external/v4l2_codec2/vda \
+        $(TOP)/frameworks/av/media/libstagefright/include \
+        $(TOP)/frameworks/native/include \
+        $(TOP)/hardware/google/av/codec2/include \
+        $(TOP)/hardware/google/av/codec2/vndk/include \
+	$(TOP)/hardware/google/av/media/codecs/base/include \
+
+LOCAL_MODULE := v4l2_codec2_testapp
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SHARED_LIBRARIES := libbinder \
+                          libchrome \
+                          libcutils \
+                          libgui \
+                          liblog \
+                          libmedia \
+                          libmediaextractor \
+                          libstagefright \
+                          libstagefright_codec2 \
+                          libstagefright_foundation \
+                          libstagefright_codec2_vndk \
+                          libui \
+                          libutils \
+                          libv4l2_codec2 \
+                          libv4l2_codec2_vda \
+                          android.hardware.media.bufferpool@1.0 \
+
+# -Wno-unused-parameter is needed for libchrome/base codes
+LOCAL_CFLAGS += -Werror -Wall -Wno-unused-parameter
+LOCAL_CLANG := true
+
+include $(BUILD_EXECUTABLE)
diff --git a/cmds/codec2.cpp b/cmds/codec2.cpp
new file mode 100644
index 0000000..9458bd3
--- /dev/null
+++ b/cmds/codec2.cpp
@@ -0,0 +1,466 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "codec2"
+
+#include <C2VDAComponent.h>
+
+#include <C2Buffer.h>
+#include <C2BufferPriv.h>
+#include <C2Component.h>
+#include <C2PlatformSupport.h>
+#include <C2Work.h>
+#include <SimpleC2Interface.h>
+
+#include <binder/IServiceManager.h>
+#include <binder/ProcessState.h>
+#include <gui/GLConsumer.h>
+#include <gui/IProducerListener.h>
+#include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
+#include <media/DataSource.h>
+#include <media/ICrypto.h>
+#include <media/IMediaHTTPService.h>
+#include <media/MediaExtractor.h>
+#include <media/MediaSource.h>
+#include <media/stagefright/DataSourceFactory.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaExtractorFactory.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AUtils.h>
+
+#include <fcntl.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <thread>
+
+using namespace android;
+using namespace std::chrono_literals;
+
+namespace {
+
+const std::string kH264DecoderName = "c2.vda.avc.decoder";
+const std::string kVP8DecoderName = "c2.vda.vp8.decoder";
+const std::string kVP9DecoderName = "c2.vda.vp9.decoder";
+
+const int kWidth = 416;
+const int kHeight = 240;  // BigBuckBunny.mp4
+//const int kWidth = 560;
+//const int kHeight = 320;  // small.mp4
+const std::string kComponentName = kH264DecoderName;
+
+class C2VDALinearBuffer : public C2Buffer {
+public:
+    explicit C2VDALinearBuffer(const std::shared_ptr<C2LinearBlock>& block)
+          : C2Buffer({block->share(block->offset(), block->size(), ::C2Fence())}) {}
+};
+
+class Listener;
+
+class SimplePlayer {
+public:
+    SimplePlayer();
+    ~SimplePlayer();
+
+    void onWorkDone(std::weak_ptr<C2Component> component,
+                    std::list<std::unique_ptr<C2Work>> workItems);
+    void onTripped(std::weak_ptr<C2Component> component,
+                   std::vector<std::shared_ptr<C2SettingResult>> settingResult);
+    void onError(std::weak_ptr<C2Component> component, uint32_t errorCode);
+
+    status_t play(const sp<IMediaSource>& source);
+
+private:
+    typedef std::unique_lock<std::mutex> ULock;
+
+    enum {
+        kInputBufferCount = 8,
+        kDefaultInputBufferSize = 1024 * 1024,
+    };
+
+    std::shared_ptr<Listener> mListener;
+
+    sp<IProducerListener> mProducerListener;
+
+    // Allocators
+    std::shared_ptr<C2Allocator> mLinearAlloc;
+    std::shared_ptr<C2BlockPool> mLinearBlockPool;
+
+    std::mutex mQueueLock;
+    std::condition_variable mQueueCondition;
+    std::list<std::unique_ptr<C2Work>> mWorkQueue;
+
+    std::mutex mProcessedLock;
+    std::condition_variable mProcessedCondition;
+    std::list<std::unique_ptr<C2Work>> mProcessedWork;
+
+    sp<Surface> mSurface;
+    sp<SurfaceComposerClient> mComposerClient;
+    sp<SurfaceControl> mControl;
+};
+
+class Listener : public C2Component::Listener {
+public:
+    explicit Listener(SimplePlayer* thiz) : mThis(thiz) {}
+    virtual ~Listener() = default;
+
+    virtual void onWorkDone_nb(std::weak_ptr<C2Component> component,
+                               std::list<std::unique_ptr<C2Work>> workItems) override {
+        mThis->onWorkDone(component, std::move(workItems));
+    }
+
+    virtual void onTripped_nb(
+            std::weak_ptr<C2Component> component,
+            std::vector<std::shared_ptr<C2SettingResult>> settingResult) override {
+        mThis->onTripped(component, settingResult);
+    }
+
+    virtual void onError_nb(std::weak_ptr<C2Component> component, uint32_t errorCode) override {
+        mThis->onError(component, errorCode);
+    }
+
+private:
+    SimplePlayer* const mThis;
+};
+
+SimplePlayer::SimplePlayer()
+      : mListener(new Listener(this)),
+        mProducerListener(new DummyProducerListener),
+        mComposerClient(new SurfaceComposerClient) {
+    CHECK_EQ(mComposerClient->initCheck(), OK);
+
+    std::shared_ptr<C2AllocatorStore> store = GetCodec2PlatformAllocatorStore();
+    CHECK_EQ(store->fetchAllocator(C2AllocatorStore::DEFAULT_LINEAR, &mLinearAlloc), C2_OK);
+
+    mLinearBlockPool = std::make_shared<C2BasicLinearBlockPool>(mLinearAlloc);
+
+    mControl = mComposerClient->createSurface(String8("A Surface"), kWidth, kHeight,
+                                              HAL_PIXEL_FORMAT_YV12);
+
+    CHECK(mControl != nullptr);
+    CHECK(mControl->isValid());
+
+    SurfaceComposerClient::Transaction{}.setLayer(mControl, INT_MAX).show(mControl).apply();
+
+    mSurface = mControl->getSurface();
+    CHECK(mSurface != nullptr);
+    mSurface->connect(NATIVE_WINDOW_API_CPU, mProducerListener);
+}
+
+SimplePlayer::~SimplePlayer() {
+    mComposerClient->dispose();
+}
+
+void SimplePlayer::onWorkDone(std::weak_ptr<C2Component> component,
+                              std::list<std::unique_ptr<C2Work>> workItems) {
+    (void)component;
+    ULock l(mProcessedLock);
+    for (auto& item : workItems) {
+        mProcessedWork.emplace_back(std::move(item));
+    }
+    mProcessedCondition.notify_all();
+}
+
+void SimplePlayer::onTripped(std::weak_ptr<C2Component> component,
+                             std::vector<std::shared_ptr<C2SettingResult>> settingResult) {
+    (void)component;
+    (void)settingResult;
+    // TODO
+}
+
+void SimplePlayer::onError(std::weak_ptr<C2Component> component, uint32_t errorCode) {
+    (void)component;
+    (void)errorCode;
+    // TODO
+}
+
+status_t SimplePlayer::play(const sp<IMediaSource>& source) {
+    std::deque<sp<ABuffer>> csds;
+    if (kComponentName == kH264DecoderName) {
+        sp<AMessage> format;
+        (void)convertMetaDataToMessage(source->getFormat(), &format);
+
+        csds.resize(2);
+        format->findBuffer("csd-0", &csds[0]);
+        format->findBuffer("csd-1", &csds[1]);
+    }
+
+    status_t err = source->start();
+
+    if (err != OK) {
+        ALOGE("source returned error %d (0x%08x)", err, err);
+        fprintf(stderr, "source returned error %d (0x%08x)\n", err, err);
+        return err;
+    }
+
+    std::shared_ptr<C2Component> component(std::make_shared<C2VDAComponent>(
+            kComponentName, 0, std::make_shared<C2ReflectorHelper>()));
+
+    component->setListener_vb(mListener, C2_DONT_BLOCK);
+    std::unique_ptr<C2PortBlockPoolsTuning::output> pools =
+            C2PortBlockPoolsTuning::output::AllocUnique(
+                    {static_cast<uint64_t>(C2BlockPool::BASIC_GRAPHIC)});
+    std::vector<std::unique_ptr<C2SettingResult>> result;
+    (void)component->intf()->config_vb({pools.get()}, C2_DONT_BLOCK, &result);
+    component->start();
+
+    mProcessedWork.clear();
+    for (int i = 0; i < kInputBufferCount; ++i) {
+        mWorkQueue.emplace_back(new C2Work);
+    }
+
+    std::atomic_bool running(true);
+    std::thread surfaceThread([this, &running]() {
+        const sp<IGraphicBufferProducer>& igbp = mSurface->getIGraphicBufferProducer();
+        std::vector<std::shared_ptr<C2Buffer>> pendingDisplayBuffers;
+        pendingDisplayBuffers.resize(BufferQueue::NUM_BUFFER_SLOTS);
+        while (running) {
+            std::unique_ptr<C2Work> work;
+            {
+                ULock l(mProcessedLock);
+                if (mProcessedWork.empty()) {
+                    mProcessedCondition.wait_for(l, 100ms);
+                    if (mProcessedWork.empty()) {
+                        continue;
+                    }
+                }
+                work = std::move(mProcessedWork.front());
+                mProcessedWork.pop_front();
+            }
+
+            CHECK_EQ(work->worklets.size(), 1u);
+            if (work->worklets.front()->output.buffers.size() == 1u) {
+                int slot;
+                sp<Fence> fence;
+                std::shared_ptr<C2Buffer> output = work->worklets.front()->output.buffers[0];
+                C2ConstGraphicBlock graphic_block = output->data().graphicBlocks().front();
+
+                sp<GraphicBuffer> buffer(new GraphicBuffer(
+                        graphic_block.handle(), GraphicBuffer::CLONE_HANDLE, graphic_block.width(),
+                        graphic_block.height(), HAL_PIXEL_FORMAT_YCbCr_420_888, 1 /* layerCount */,
+                        GRALLOC_USAGE_SW_READ_OFTEN, graphic_block.width()));
+
+                CHECK_EQ(igbp->attachBuffer(&slot, buffer), OK);
+                ALOGV("attachBuffer slot=%d ts=%lld", slot,
+                      (work->worklets.front()->output.ordinal.timestamp * 1000ll).peekll());
+
+                IGraphicBufferProducer::QueueBufferInput qbi(
+                        (work->worklets.front()->output.ordinal.timestamp * 1000ll).peekll(), false,
+                        HAL_DATASPACE_UNKNOWN, Rect(graphic_block.width(), graphic_block.height()),
+                        NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW, 0, Fence::NO_FENCE, 0);
+                IGraphicBufferProducer::QueueBufferOutput qbo;
+                CHECK_EQ(igbp->queueBuffer(slot, qbi, &qbo), OK);
+
+                // If the slot is reused then we can make sure the previous graphic buffer is
+                // displayed (consumed), so we could returned the graphic buffer.
+                pendingDisplayBuffers[slot].swap(output);
+            }
+
+            bool eos = work->worklets.front()->output.flags & C2FrameData::FLAG_END_OF_STREAM;
+            // input buffer should be reset in component side.
+            CHECK_EQ(work->input.buffers.size(), 1u);
+            CHECK(work->input.buffers.front() == nullptr);
+            work->worklets.clear();
+            work->workletsProcessed = 0;
+
+            if (eos) {
+                running.store(false);  // stop the thread
+            }
+
+            ULock l(mQueueLock);
+            mWorkQueue.emplace_back(std::move(work));
+            mQueueCondition.notify_all();
+        }
+    });
+
+    long numFrames = 0;
+
+    for (;;) {
+        size_t size = 0u;
+        void* data = nullptr;
+        int64_t timestamp = 0u;
+        MediaBufferBase* buffer = nullptr;
+        sp<ABuffer> csd;
+        if (!csds.empty()) {
+            csd = std::move(csds.front());
+            csds.pop_front();
+            size = csd->size();
+            data = csd->data();
+        } else {
+            status_t err = source->read(&buffer);
+            if (err != OK) {
+                CHECK(buffer == nullptr);
+
+                if (err == INFO_FORMAT_CHANGED) {
+                    continue;
+                }
+
+                break;
+            }
+            MetaDataBase& meta = buffer->meta_data();
+            CHECK(meta.findInt64(kKeyTime, &timestamp));
+
+            size = buffer->size();
+            data = buffer->data();
+        }
+
+        // Prepare C2Work
+
+        std::unique_ptr<C2Work> work;
+        while (!work) {
+            ULock l(mQueueLock);
+            if (!mWorkQueue.empty()) {
+                work = std::move(mWorkQueue.front());
+                mWorkQueue.pop_front();
+            } else {
+                mQueueCondition.wait_for(l, 100ms);
+            }
+        }
+        work->input.flags = static_cast<C2FrameData::flags_t>(0);
+        work->input.ordinal.timestamp = timestamp;
+        work->input.ordinal.frameIndex = numFrames;
+
+        // Allocate input buffer.
+        std::shared_ptr<C2LinearBlock> block;
+        mLinearBlockPool->fetchLinearBlock(
+                size, {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE}, &block);
+        C2WriteView view = block->map().get();
+        if (view.error() != C2_OK) {
+            fprintf(stderr, "C2LinearBlock::map() failed : %d\n", view.error());
+            break;
+        }
+        memcpy(view.base(), data, size);
+
+        work->input.buffers.clear();
+        work->input.buffers.emplace_back(new C2VDALinearBuffer(std::move(block)));
+        work->worklets.clear();
+        work->worklets.emplace_back(new C2Worklet);
+
+        std::list<std::unique_ptr<C2Work>> items;
+        items.push_back(std::move(work));
+
+        // DO THE DECODING
+        component->queue_nb(&items);
+
+        if (buffer) {
+            buffer->release();
+        }
+        ++numFrames;
+    }
+    component->drain_nb(C2Component::DRAIN_COMPONENT_WITH_EOS);
+
+    surfaceThread.join();
+
+    source->stop();
+    component->stop();
+    printf("finished...\n");
+    return OK;
+}
+
+}  // namespace
+
+static bool getMediaSourceFromFile(const char* filename, sp<IMediaSource>* source) {
+    source->clear();
+
+    sp<DataSource> dataSource =
+            DataSourceFactory::CreateFromURI(nullptr /* httpService */, filename);
+
+    if (dataSource == nullptr) {
+        fprintf(stderr, "Unable to create data source.\n");
+        return false;
+    }
+
+    sp<IMediaExtractor> extractor = MediaExtractorFactory::Create(dataSource);
+    if (extractor == nullptr) {
+        fprintf(stderr, "could not create extractor.\n");
+        return false;
+    }
+
+    std::string expectedMime;
+    if (kComponentName == kH264DecoderName) {
+        expectedMime = "video/avc";
+    } else if (kComponentName == kVP8DecoderName) {
+        expectedMime = "video/x-vnd.on2.vp8";
+    } else if (kComponentName == kVP9DecoderName) {
+        expectedMime = "video/x-vnd.on2.vp9";
+    } else {
+        fprintf(stderr, "unrecognized component name: %s\n", kComponentName.c_str());
+        return false;
+    }
+
+    for (size_t i = 0, numTracks = extractor->countTracks(); i < numTracks; ++i) {
+        sp<MetaData> meta =
+                extractor->getTrackMetaData(i, MediaExtractor::kIncludeExtensiveMetaData);
+        if (meta == nullptr) {
+            continue;
+        }
+        const char* mime;
+        meta->findCString(kKeyMIMEType, &mime);
+        if (!strcasecmp(mime, expectedMime.c_str())) {
+            *source = extractor->getTrack(i);
+            if (*source == nullptr) {
+                fprintf(stderr, "It's nullptr track for track %zu.\n", i);
+                return false;
+            }
+            return true;
+        }
+    }
+    fprintf(stderr, "No track found.\n");
+    return false;
+}
+
+static void usage(const char* me) {
+    fprintf(stderr, "usage: %s [options] [input_filename]...\n", me);
+    fprintf(stderr, "       -h(elp)\n");
+}
+
+int main(int argc, char** argv) {
+    android::ProcessState::self()->startThreadPool();
+
+    int res;
+    while ((res = getopt(argc, argv, "h")) >= 0) {
+        switch (res) {
+        case 'h':
+        default: {
+            usage(argv[0]);
+            exit(1);
+            break;
+        }
+        }
+    }
+
+    argc -= optind;
+    argv += optind;
+
+    if (argc < 1) {
+        fprintf(stderr, "No input file specified\n");
+        return 1;
+    }
+
+    SimplePlayer player;
+
+    for (int k = 0; k < argc; ++k) {
+        sp<IMediaSource> mediaSource;
+        if (!getMediaSourceFromFile(argv[k], &mediaSource)) {
+            fprintf(stderr, "Unable to get media source from file: %s\n", argv[k]);
+            return -1;
+        }
+        if (player.play(mediaSource) != OK) {
+            fprintf(stderr, "Player failed to play media source: %s\n", argv[k]);
+            return -1;
+        }
+    }
+
+    return 0;
+}
diff --git a/include/C2VDAAdaptor.h b/include/C2VDAAdaptor.h
new file mode 100644
index 0000000..3ba85f2
--- /dev/null
+++ b/include/C2VDAAdaptor.h
@@ -0,0 +1,67 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_C2_VDA_ADAPTOR_H
+#define ANDROID_C2_VDA_ADAPTOR_H
+
+#include <VideoDecodeAcceleratorAdaptor.h>
+
+#include <video_decode_accelerator.h>
+
+#include <base/macros.h>
+
+namespace android {
+
+// This class translates adaptor API to media::VideoDecodeAccelerator API to make communication
+// between Codec 2.0 VDA component and VDA.
+class C2VDAAdaptor : public VideoDecodeAcceleratorAdaptor,
+                     public media::VideoDecodeAccelerator::Client {
+public:
+    C2VDAAdaptor();
+    ~C2VDAAdaptor() override;
+
+    // Implementation of the VideoDecodeAcceleratorAdaptor interface.
+    Result initialize(media::VideoCodecProfile profile, bool secureMode,
+                      VideoDecodeAcceleratorAdaptor::Client* client) override;
+    void decode(int32_t bitstreamId, int handleFd, off_t offset, uint32_t bytesUsed) override;
+    void assignPictureBuffers(uint32_t numOutputBuffers) override;
+    void importBufferForPicture(int32_t pictureBufferId, HalPixelFormat format, int handleFd,
+                                const std::vector<VideoFramePlane>& planes) override;
+    void reusePictureBuffer(int32_t pictureBufferId) override;
+    void flush() override;
+    void reset() override;
+    void destroy() override;
+
+    static media::VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles(
+            uint32_t inputFormatFourcc);
+
+    // Implementation of the media::VideoDecodeAccelerator::Client interface.
+    void ProvidePictureBuffers(uint32_t requested_num_of_buffers,
+                               media::VideoPixelFormat output_format,
+                               const media::Size& dimensions) override;
+    void DismissPictureBuffer(int32_t picture_buffer_id) override;
+    void PictureReady(const media::Picture& picture) override;
+    void NotifyEndOfBitstreamBuffer(int32_t bitstream_buffer_id) override;
+    void NotifyFlushDone() override;
+    void NotifyResetDone() override;
+    void NotifyError(media::VideoDecodeAccelerator::Error error) override;
+
+private:
+    std::unique_ptr<media::VideoDecodeAccelerator> mVDA;
+    VideoDecodeAcceleratorAdaptor::Client* mClient;
+
+    // The number of allocated output buffers. This is obtained from assignPictureBuffers call from
+    // client, and used to check validity of picture id in importBufferForPicture and
+    // reusePictureBuffer.
+    uint32_t mNumOutputBuffers;
+    // The picture size for creating picture buffers. This is obtained while VDA calls
+    // ProvidePictureBuffers.
+    media::Size mPictureSize;
+
+    DISALLOW_COPY_AND_ASSIGN(C2VDAAdaptor);
+};
+
+}  // namespace android
+
+#endif  // ANDROID_C2_VDA_ADAPTOR_H
diff --git a/include/C2VDAAdaptorProxy.h b/include/C2VDAAdaptorProxy.h
new file mode 100644
index 0000000..eab6882
--- /dev/null
+++ b/include/C2VDAAdaptorProxy.h
@@ -0,0 +1,102 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_C2_VDA_ADAPTOR_PROXY_H
+#define ANDROID_C2_VDA_ADAPTOR_PROXY_H
+
+#include <memory>
+
+#include <VideoDecodeAcceleratorAdaptor.h>
+
+#include <video_decode_accelerator.h>
+
+#include <arc/Future.h>
+#include <mojo/public/cpp/bindings/binding.h>
+
+#include <components/arc/common/video.mojom.h>
+#include <components/arc/common/video_decode_accelerator.mojom.h>
+
+namespace arc {
+class MojoProcessSupport;
+}  // namespace arc
+
+namespace android {
+namespace arc {
+class C2VDAAdaptorProxy : public VideoDecodeAcceleratorAdaptor,
+                          public ::arc::mojom::VideoDecodeClient {
+public:
+    C2VDAAdaptorProxy();
+    explicit C2VDAAdaptorProxy(::arc::MojoProcessSupport* MojomProcessSupport);
+    ~C2VDAAdaptorProxy() override;
+
+    // Establishes ipc channel for video acceleration. Returns true if channel
+    // connected successfully.
+    // This must be called before all other methods.
+    bool establishChannel();
+
+    // Implementation of the VideoDecodeAcceleratorAdaptor interface.
+    Result initialize(media::VideoCodecProfile profile, bool secureMode,
+                      VideoDecodeAcceleratorAdaptor::Client* client) override;
+    void decode(int32_t bitstreamId, int handleFd, off_t offset, uint32_t size) override;
+    void assignPictureBuffers(uint32_t numOutputBuffers) override;
+    void importBufferForPicture(int32_t pictureBufferId, HalPixelFormat format, int handleFd,
+                                const std::vector<VideoFramePlane>& planes) override;
+    void reusePictureBuffer(int32_t pictureBufferId) override;
+    void flush() override;
+    void reset() override;
+    void destroy() override;
+
+    // ::arc::mojom::VideoDecodeClient implementations.
+    void ProvidePictureBuffers(::arc::mojom::PictureBufferFormatPtr format) override;
+    void PictureReady(::arc::mojom::PicturePtr picture) override;
+    void NotifyEndOfBitstreamBuffer(int32_t bitstream_id) override;
+    void NotifyError(::arc::mojom::VideoDecodeAccelerator::Result error) override;
+
+    // The following functions are called as callbacks.
+    void NotifyResetDone(::arc::mojom::VideoDecodeAccelerator::Result result);
+    void NotifyFlushDone(::arc::mojom::VideoDecodeAccelerator::Result result);
+
+    static media::VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles(
+            uint32_t inputFormatFourcc);
+
+private:
+    void onConnectionError(const std::string& pipeName);
+    void establishChannelOnMojoThread(std::shared_ptr<::arc::Future<bool>> future);
+    void onVersionReady(std::shared_ptr<::arc::Future<bool>> future, uint32_t version);
+
+    // Closes ipc channel for video acceleration.
+    // This must be called before deleting this object.
+    void closeChannelOnMojoThread();
+
+    // mojo thread corresponding part of C2VDAAdaptorProxy implementations.
+    void initializeOnMojoThread(const media::VideoCodecProfile profile, const bool mSecureMode,
+                                const ::arc::mojom::VideoDecodeAccelerator::InitializeCallback& cb);
+    void decodeOnMojoThread(int32_t bitstreamId, int ashmemFd, off_t offset, uint32_t bytesUsed);
+    void assignPictureBuffersOnMojoThread(uint32_t numOutputBuffers);
+
+    void importBufferForPictureOnMojoThread(int32_t pictureBufferId, HalPixelFormat format,
+                                            int handleFd,
+                                            const std::vector<VideoFramePlane>& planes);
+    void reusePictureBufferOnMojoThread(int32_t pictureBufferId);
+    void flushOnMojoThread();
+    void resetOnMojoThread();
+
+    VideoDecodeAcceleratorAdaptor::Client* mClient;
+
+    // Task runner for mojom functions.
+    const scoped_refptr<base::SingleThreadTaskRunner> mMojoTaskRunner;
+
+    // |mVDAPtr| and |mBinding| should only be called on |mMojoTaskRunner| after bound.
+    ::arc::mojom::VideoDecodeAcceleratorPtr mVDAPtr;
+    mojo::Binding<::arc::mojom::VideoDecodeClient> mBinding;
+
+    // Used to cancel the wait on arc::Future.
+    sp<::arc::CancellationRelay> mRelay;
+
+    DISALLOW_COPY_AND_ASSIGN(C2VDAAdaptorProxy);
+};
+}  // namespace arc
+}  // namespace android
+
+#endif  // ANDROID_C2_VDA_ADAPTOR_PROXY_H
diff --git a/include/C2VDACommon.h b/include/C2VDACommon.h
new file mode 100644
index 0000000..510a96c
--- /dev/null
+++ b/include/C2VDACommon.h
@@ -0,0 +1,19 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_C2_VDA_COMMON_H
+#define ANDROID_C2_VDA_COMMON_H
+
+#include <inttypes.h>
+
+namespace android {
+enum class HalPixelFormat : uint32_t {
+    UNKNOWN = 0x0,
+    // The pixel formats defined in Android but are used among C2VDAComponent.
+    YCbCr_420_888 = 0x23,
+    YV12 = 0x32315659,
+    NV12 = 0x3231564e,
+};
+} // namespace android
+#endif  // ANDROID_C2_VDA_COMMON_H
diff --git a/include/C2VDAComponent.h b/include/C2VDAComponent.h
new file mode 100644
index 0000000..254b9b0
--- /dev/null
+++ b/include/C2VDAComponent.h
@@ -0,0 +1,320 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_C2_VDA_COMPONENT_H
+#define ANDROID_C2_VDA_COMPONENT_H
+
+#include <VideoDecodeAcceleratorAdaptor.h>
+
+#include <rect.h>
+#include <size.h>
+#include <video_codecs.h>
+#include <video_decode_accelerator.h>
+
+#include <C2Component.h>
+#include <C2Config.h>
+#include <C2Enum.h>
+#include <C2Param.h>
+#include <C2ParamDef.h>
+#include <SimpleC2Interface.h>
+#include <util/C2InterfaceHelper.h>
+
+#include <base/macros.h>
+#include <base/memory/ref_counted.h>
+#include <base/single_thread_task_runner.h>
+#include <base/synchronization/waitable_event.h>
+#include <base/threading/thread.h>
+
+#include <atomic>
+#include <deque>
+#include <map>
+#include <mutex>
+#include <queue>
+#include <unordered_map>
+
+namespace android {
+
+class C2VDAComponent : public C2Component,
+                       public VideoDecodeAcceleratorAdaptor::Client,
+                       public std::enable_shared_from_this<C2VDAComponent> {
+public:
+    class IntfImpl : public C2InterfaceHelper {
+    public:
+        IntfImpl(C2String name, const std::shared_ptr<C2ReflectorHelper>& helper);
+
+        // interfaces for C2VDAComponent
+        c2_status_t status() const { return mInitStatus; }
+        media::VideoCodecProfile getCodecProfile() const { return mCodecProfile; }
+        C2BlockPool::local_id_t getBlockPoolId() const { return mOutputBlockPoolIds->m.values[0]; }
+
+    private:
+        // The input format kind; should be C2FormatCompressed.
+        std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+        // The output format kind; should be C2FormatVideo.
+        std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+        // The MIME type of input port.
+        std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+        // The MIME type of output port; should be MEDIA_MIMETYPE_VIDEO_RAW.
+        std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
+        // Decoded video size for output.
+        std::shared_ptr<C2StreamPictureSizeInfo::output> mSize;
+        // The suggested usage of input buffer allocator ID.
+        std::shared_ptr<C2PortAllocatorsTuning::input> mInputAllocatorIds;
+        // The suggested usage of output buffer allocator ID.
+        std::shared_ptr<C2PortAllocatorsTuning::output> mOutputAllocatorIds;
+        // The suggested usage of output buffer allocator ID with surface.
+        std::shared_ptr<C2PortSurfaceAllocatorTuning::output> mOutputSurfaceAllocatorId;
+        // Compnent uses this ID to fetch corresponding output block pool from platform.
+        std::shared_ptr<C2PortBlockPoolsTuning::output> mOutputBlockPoolIds;
+
+        c2_status_t mInitStatus;
+        media::VideoCodecProfile mCodecProfile;
+    };
+
+    C2VDAComponent(C2String name, c2_node_id_t id,
+                   const std::shared_ptr<C2ReflectorHelper>& helper);
+    virtual ~C2VDAComponent() override;
+
+    // Implementation of C2Component interface
+    virtual c2_status_t setListener_vb(const std::shared_ptr<Listener>& listener,
+                                       c2_blocking_t mayBlock) override;
+    virtual c2_status_t queue_nb(std::list<std::unique_ptr<C2Work>>* const items) override;
+    virtual c2_status_t announce_nb(const std::vector<C2WorkOutline>& items) override;
+    virtual c2_status_t flush_sm(flush_mode_t mode,
+                                 std::list<std::unique_ptr<C2Work>>* const flushedWork) override;
+    virtual c2_status_t drain_nb(drain_mode_t mode) override;
+    virtual c2_status_t start() override;
+    virtual c2_status_t stop() override;
+    virtual c2_status_t reset() override;
+    virtual c2_status_t release() override;
+    virtual std::shared_ptr<C2ComponentInterface> intf() override;
+
+    // Implementation of VideDecodeAcceleratorAdaptor::Client interface
+    virtual void providePictureBuffers(uint32_t minNumBuffers,
+                                       const media::Size& codedSize) override;
+    virtual void dismissPictureBuffer(int32_t pictureBufferId) override;
+    virtual void pictureReady(int32_t pictureBufferId, int32_t bitstreamId,
+                              const media::Rect& cropRect) override;
+    virtual void notifyEndOfBitstreamBuffer(int32_t bitstreamId) override;
+    virtual void notifyFlushDone() override;
+    virtual void notifyResetDone() override;
+    virtual void notifyError(VideoDecodeAcceleratorAdaptor::Result error) override;
+
+private:
+    // The state machine enumeration on parent thread.
+    enum class State : int32_t {
+        // The initial state of component. State will change to LOADED after the component is
+        // created.
+        UNLOADED,
+        // The component is stopped. State will change to RUNNING when start() is called by
+        // framework.
+        LOADED,
+        // The component is running, State will change to LOADED when stop() or reset() is called by
+        // framework.
+        RUNNING,
+        // The component is in error state.
+        ERROR,
+    };
+    // The state machine enumeration on component thread.
+    enum class ComponentState : int32_t {
+        // This is the initial state until VDA initialization returns successfully.
+        UNINITIALIZED,
+        // VDA initialization returns successfully. VDA is ready to make progress.
+        STARTED,
+        // onDrain() is called. VDA is draining. Component will hold on queueing works until
+        // onDrainDone().
+        DRAINING,
+        // onFlush() is called. VDA is flushing. State will change to STARTED after onFlushDone().
+        FLUSHING,
+        // onStop() is called. VDA is shutting down. State will change to UNINITIALIZED after
+        // onStopDone().
+        STOPPING,
+        // onError() is called.
+        ERROR,
+    };
+
+    // This constant is used to tell apart from drain_mode_t enumerations in C2Component.h, which
+    // means no drain request.
+    // Note: this value must be different than all enumerations in drain_mode_t.
+    static constexpr uint32_t NO_DRAIN = ~0u;
+
+    // Internal struct for work queue.
+    struct WorkEntry {
+        std::unique_ptr<C2Work> mWork;
+        uint32_t mDrainMode = NO_DRAIN;
+    };
+
+    // Internal struct to keep the information of a specific graphic block.
+    struct GraphicBlockInfo {
+        enum class State {
+            OWNED_BY_COMPONENT,    // Owned by this component.
+            OWNED_BY_ACCELERATOR,  // Owned by video decode accelerator.
+            OWNED_BY_CLIENT,       // Owned by client.
+        };
+
+        // The ID of this block used for accelerator.
+        int32_t mBlockId = -1;
+        // The ID of this block used in block pool. It indicates slot index for bufferqueue-backed
+        // block pool, and buffer ID of BufferPoolData for bufferpool block pool.
+        uint32_t mPoolId = 0;
+        State mState = State::OWNED_BY_COMPONENT;
+        // Graphic block buffer allocated from allocator. The graphic block should be owned until
+        // it is passed to client.
+        std::shared_ptr<C2GraphicBlock> mGraphicBlock;
+        // HAL pixel format used while importing to VDA.
+        HalPixelFormat mPixelFormat;
+        // The handle dupped from graphic block for importing to VDA.
+        ::base::ScopedFD mHandle;
+        // VideoFramePlane information for importing to VDA.
+        std::vector<VideoFramePlane> mPlanes;
+    };
+
+    struct VideoFormat {
+        HalPixelFormat mPixelFormat = HalPixelFormat::UNKNOWN;
+        uint32_t mMinNumBuffers = 0;
+        media::Size mCodedSize;
+        media::Rect mVisibleRect;
+
+        VideoFormat() {}
+        VideoFormat(HalPixelFormat pixelFormat, uint32_t minNumBuffers, media::Size codedSize,
+                    media::Rect visibleRect);
+    };
+
+    // These tasks should be run on the component thread |mThread|.
+    void onDestroy();
+    void onStart(media::VideoCodecProfile profile, ::base::WaitableEvent* done);
+    void onQueueWork(std::unique_ptr<C2Work> work);
+    void onDequeueWork();
+    void onInputBufferDone(int32_t bitstreamId);
+    void onOutputBufferDone(int32_t pictureBufferId, int32_t bitstreamId);
+    void onDrain(uint32_t drainMode);
+    void onDrainDone();
+    void onFlush();
+    void onStop(::base::WaitableEvent* done);
+    void onResetDone();
+    void onFlushDone();
+    void onStopDone();
+    void onOutputFormatChanged(std::unique_ptr<VideoFormat> format);
+    void onVisibleRectChanged(const media::Rect& cropRect);
+    void onOutputBufferReturned(std::shared_ptr<C2GraphicBlock> block, uint32_t poolId);
+
+    // Send input buffer to accelerator with specified bitstream id.
+    void sendInputBufferToAccelerator(const C2ConstLinearBlock& input, int32_t bitstreamId);
+    // Send output buffer to accelerator.
+    void sendOutputBufferToAccelerator(GraphicBlockInfo* info);
+    // Set crop rectangle infomation to output format.
+    void setOutputFormatCrop(const media::Rect& cropRect);
+    // Helper function to get the specified GraphicBlockInfo object by its id.
+    GraphicBlockInfo* getGraphicBlockById(int32_t blockId);
+    // Helper function to get the specified GraphicBlockInfo object by its pool id.
+    GraphicBlockInfo* getGraphicBlockByPoolId(uint32_t poolId);
+    // Helper function to get the specified work in mPendingWorks by bitstream id.
+    C2Work* getPendingWorkByBitstreamId(int32_t bitstreamId);
+    // Try to apply the output format change.
+    void tryChangeOutputFormat();
+    // Allocate output buffers (graphic blocks) from block allocator.
+    c2_status_t allocateBuffersFromBlockAllocator(const media::Size& size, uint32_t pixelFormat);
+    // Append allocated buffer (graphic block) to mGraphicBlocks.
+    void appendOutputBuffer(std::shared_ptr<C2GraphicBlock> block, uint32_t poolId);
+    // Append allocated buffer (graphic block) to mGraphicBlocks in secure mode.
+    void appendSecureOutputBuffer(std::shared_ptr<C2GraphicBlock> block, uint32_t poolId);
+
+    // Check for finished works in mPendingWorks. If any, make onWorkDone call to listener.
+    void reportFinishedWorkIfAny();
+    // Make onWorkDone call to listener for reporting EOS work in mPendingWorks.
+    void reportEOSWork();
+    // Abandon all works in mPendingWorks and mAbandonedWorks.
+    void reportAbandonedWorks();
+    // Make onError call to listener for reporting errors.
+    void reportError(c2_status_t error);
+    // Helper function to determine if the work is finished.
+    bool isWorkDone(const C2Work* work) const;
+
+    // Start dequeue thread, return true on success.
+    bool startDequeueThread(const media::Size& size, uint32_t pixelFormat,
+                            std::shared_ptr<C2BlockPool> blockPool);
+    // Stop dequeue thread.
+    void stopDequeueThread();
+    // The rountine task running on dequeue thread.
+    void dequeueThreadLoop(const media::Size& size, uint32_t pixelFormat,
+                           std::shared_ptr<C2BlockPool> blockPool);
+
+    // The pointer of component interface implementation.
+    std::shared_ptr<IntfImpl> mIntfImpl;
+    // The pointer of component interface.
+    const std::shared_ptr<C2ComponentInterface> mIntf;
+    // The pointer of component listener.
+    std::shared_ptr<Listener> mListener;
+
+    // The main component thread.
+    ::base::Thread mThread;
+    // The task runner on component thread.
+    scoped_refptr<::base::SingleThreadTaskRunner> mTaskRunner;
+
+    // The dequeue buffer loop thread.
+    ::base::Thread mDequeueThread;
+    // The stop signal for dequeue loop which should be atomic (toggled by main thread).
+    std::atomic<bool> mDequeueLoopStop;
+    // The count of buffers owned by client which should be atomic.
+    std::atomic<uint32_t> mBuffersInClient;
+
+    // The following members should be utilized on component thread |mThread|.
+
+    // The initialization result retrieved from VDA.
+    VideoDecodeAcceleratorAdaptor::Result mVDAInitResult;
+    // The pointer of VideoDecodeAcceleratorAdaptor.
+    std::unique_ptr<VideoDecodeAcceleratorAdaptor> mVDAAdaptor;
+    // The done event pointer of stop procedure. It should be restored in onStop() and signaled in
+    // onStopDone().
+    ::base::WaitableEvent* mStopDoneEvent;
+    // The state machine on component thread.
+    ComponentState mComponentState;
+    // The indicator of draining with EOS. This should be always set along with component going to
+    // DRAINING state, and will be unset either after reportEOSWork() (EOS is outputted), or
+    // reportAbandonedWorks() (drain is cancelled and works are abandoned).
+    bool mPendingOutputEOS;
+    // The vector of storing allocated output graphic block information.
+    std::vector<GraphicBlockInfo> mGraphicBlocks;
+    // The work queue. Works are queued along with drain mode from component API queue_nb and
+    // dequeued by the decode process of component.
+    std::queue<WorkEntry> mQueue;
+    // Store all pending works. The dequeued works are placed here until they are finished and then
+    // sent out by onWorkDone call to listener.
+    std::deque<std::unique_ptr<C2Work>> mPendingWorks;
+    // Store all abandoned works. When component gets flushed/stopped, remaining works in queue are
+    // dumped here and sent out by onWorkDone call to listener after flush/stop is finished.
+    std::vector<std::unique_ptr<C2Work>> mAbandonedWorks;
+    // Store the visible rect provided from VDA. If this is changed, component should issue a
+    // visible size change event.
+    media::Rect mRequestedVisibleRect;
+    // The current output format.
+    VideoFormat mOutputFormat;
+    // The pending output format. We need to wait until all buffers are returned back to apply the
+    // format change.
+    std::unique_ptr<VideoFormat> mPendingOutputFormat;
+    // Record the timestamp of the last output buffer. This is used to determine if the work is
+    // finished.
+    int64_t mLastOutputTimestamp;
+
+    // The indicator of whether component is in secure mode.
+    bool mSecureMode;
+
+    // The following members should be utilized on parent thread.
+
+    // The input codec profile which is configured in component interface.
+    media::VideoCodecProfile mCodecProfile;
+    // The state machine on parent thread which should be atomic.
+    std::atomic<State> mState;
+    // The mutex lock to synchronize start/stop/reset/release calls.
+    std::mutex mStartStopLock;
+
+    // The WeakPtrFactory for getting weak pointer of this.
+    ::base::WeakPtrFactory<C2VDAComponent> mWeakThisFactory;
+
+    DISALLOW_COPY_AND_ASSIGN(C2VDAComponent);
+};
+
+}  // namespace android
+
+#endif  // ANDROID_C2_VDA_COMPONENT_H
diff --git a/include/VideoDecodeAcceleratorAdaptor.h b/include/VideoDecodeAcceleratorAdaptor.h
new file mode 100644
index 0000000..9118467
--- /dev/null
+++ b/include/VideoDecodeAcceleratorAdaptor.h
@@ -0,0 +1,105 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_VIDEO_DECODE_ACCELERATOR_ADAPTOR_H
+#define ANDROID_VIDEO_DECODE_ACCELERATOR_ADAPTOR_H
+
+#include <C2VDACommon.h>
+
+#include <rect.h>
+#include <size.h>
+#include <video_codecs.h>
+#include <video_pixel_format.h>
+
+#include <vector>
+
+namespace android {
+
+// The offset and stride of a video frame plane.
+struct VideoFramePlane {
+    uint32_t mOffset;
+    uint32_t mStride;
+};
+
+// Video decoder accelerator adaptor interface.
+// The adaptor plays the role of providing unified adaptor API functions and client callback to
+// codec component side.
+// The adaptor API and client callback are modeled after media::VideoDecodeAccelerator which is
+// ported from Chrome and are 1:1 mapped with its API functions.
+class VideoDecodeAcceleratorAdaptor {
+public:
+    enum Result {
+        SUCCESS = 0,
+        ILLEGAL_STATE = 1,
+        INVALID_ARGUMENT = 2,
+        UNREADABLE_INPUT = 3,
+        PLATFORM_FAILURE = 4,
+        INSUFFICIENT_RESOURCES = 5,
+    };
+
+    // The adaptor client interface. This interface should be implemented in the component side.
+    class Client {
+    public:
+        virtual ~Client() {}
+
+        // Callback to tell client how many and what size of buffers to provide.
+        virtual void providePictureBuffers(uint32_t minNumBuffers,
+                                           const media::Size& codedSize) = 0;
+
+        // Callback to dismiss picture buffer that was assigned earlier.
+        virtual void dismissPictureBuffer(int32_t pictureBufferId) = 0;
+
+        // Callback to deliver decoded pictures ready to be displayed.
+        virtual void pictureReady(int32_t pictureBufferId, int32_t bitstreamId,
+                                  const media::Rect& cropRect) = 0;
+
+        // Callback to notify that decoder has decoded the end of the bitstream buffer with
+        // specified ID.
+        virtual void notifyEndOfBitstreamBuffer(int32_t bitstreamId) = 0;
+
+        // Flush completion callback.
+        virtual void notifyFlushDone() = 0;
+
+        // Reset completion callback.
+        virtual void notifyResetDone() = 0;
+
+        // Callback to notify about errors. Note that errors in initialize() will not be reported
+        // here, instead of by its returned value.
+        virtual void notifyError(Result error) = 0;
+    };
+
+    // Initializes the video decoder with specific profile. This call is synchronous and returns
+    // SUCCESS iff initialization is successful.
+    virtual Result initialize(media::VideoCodecProfile profile, bool secureMode,
+                              Client* client) = 0;
+
+    // Decodes given buffer handle with bitstream ID.
+    virtual void decode(int32_t bitstreamId, int handleFd, off_t offset, uint32_t bytesUsed) = 0;
+
+    // Assigns a specified number of picture buffer set to the video decoder.
+    virtual void assignPictureBuffers(uint32_t numOutputBuffers) = 0;
+
+    // Imports planes as backing memory for picture buffer with specified ID.
+    virtual void importBufferForPicture(int32_t pictureBufferId, HalPixelFormat format,
+                                        int handleFd,
+                                        const std::vector<VideoFramePlane>& planes) = 0;
+
+    // Sends picture buffer to be reused by the decoder by its piture ID.
+    virtual void reusePictureBuffer(int32_t pictureBufferId) = 0;
+
+    // Flushes the decoder.
+    virtual void flush() = 0;
+
+    // Resets the decoder.
+    virtual void reset() = 0;
+
+    // Destroys the decoder.
+    virtual void destroy() = 0;
+
+    virtual ~VideoDecodeAcceleratorAdaptor() {}
+};
+
+}  // namespace android
+
+#endif  // ANDROID_VIDEO_DECODE_ACCELERATOR_ADAPTOR_H
diff --git a/tests/Android.mk b/tests/Android.mk
new file mode 100644
index 0000000..3b384b4
--- /dev/null
+++ b/tests/Android.mk
@@ -0,0 +1,79 @@
+# Build the unit tests.
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+
+LOCAL_MODULE := C2VDACompIntf_test
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+  C2VDACompIntf_test.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+  libchrome \
+  libcutils \
+  liblog \
+  libstagefright_codec2 \
+  libstagefright_codec2_vndk \
+  libutils \
+  libv4l2_codec2 \
+  libv4l2_codec2_vda \
+
+LOCAL_C_INCLUDES += \
+  $(TOP)/device/google/cheets2/codec2/vdastore/include \
+  $(TOP)/external/v4l2_codec2/include \
+  $(TOP)/external/v4l2_codec2/vda \
+  $(TOP)/hardware/google/av/codec2/include \
+  $(TOP)/hardware/google/av/codec2/vndk/include \
+  $(TOP)/hardware/google/av/media/codecs/base/include \
+
+LOCAL_CFLAGS += -Werror -Wall -std=c++14
+LOCAL_CLANG := true
+
+LOCAL_LDFLAGS := -Wl,-Bsymbolic
+
+include $(BUILD_NATIVE_TEST)
+
+
+include $(CLEAR_VARS)
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+
+LOCAL_MODULE := C2VDAComponent_test
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+  C2VDAComponent_test.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+  libchrome \
+  libcutils \
+  liblog \
+  libmedia \
+  libmediaextractor \
+  libstagefright \
+  libstagefright_codec2 \
+  libstagefright_codec2_vndk \
+  libstagefright_foundation \
+  libutils \
+  libv4l2_codec2 \
+  libv4l2_codec2_vda \
+  android.hardware.media.bufferpool@1.0 \
+
+LOCAL_C_INCLUDES += \
+  $(TOP)/external/libchrome \
+  $(TOP)/external/v4l2_codec2/include \
+  $(TOP)/external/v4l2_codec2/vda \
+  $(TOP)/frameworks/av/media/libstagefright/include \
+  $(TOP)/hardware/google/av/codec2/include \
+  $(TOP)/hardware/google/av/codec2/vndk/include \
+  $(TOP)/hardware/google/av/media/codecs/base/include \
+
+# -Wno-unused-parameter is needed for libchrome/base codes
+LOCAL_CFLAGS += -Werror -Wall -Wno-unused-parameter -std=c++14
+LOCAL_CLANG := true
+
+LOCAL_LDFLAGS := -Wl,-Bsymbolic
+
+include $(BUILD_NATIVE_TEST)
diff --git a/tests/C2VDACompIntf_test.cpp b/tests/C2VDACompIntf_test.cpp
new file mode 100644
index 0000000..f08ee50
--- /dev/null
+++ b/tests/C2VDACompIntf_test.cpp
@@ -0,0 +1,427 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2VDACompIntf_test"
+
+#include <C2VDAAllocatorStore.h>
+#include <C2VDAComponent.h>
+
+#include <C2PlatformSupport.h>
+
+#include <gtest/gtest.h>
+#include <utils/Log.h>
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <limits>
+
+#define UNUSED(expr)  \
+    do {              \
+        (void)(expr); \
+    } while (0)
+
+namespace android {
+
+const C2String testCompName = "c2.vda.avc.decoder";
+const c2_node_id_t testCompNodeId = 12345;
+
+const char* MEDIA_MIMETYPE_VIDEO_RAW = "video/raw";
+const char* MEDIA_MIMETYPE_VIDEO_AVC = "video/avc";
+
+const C2Allocator::id_t kInputAllocators[] = {C2PlatformAllocatorStore::ION};
+const C2Allocator::id_t kOutputAllocators[] = {C2VDAAllocatorStore::V4L2_BUFFERQUEUE};
+const C2BlockPool::local_id_t kDefaultOutputBlockPool = C2BlockPool::BASIC_GRAPHIC;
+
+class C2VDACompIntfTest : public ::testing::Test {
+protected:
+    C2VDACompIntfTest() {
+        mReflector = std::make_shared<C2ReflectorHelper>();
+        mIntf = std::shared_ptr<C2ComponentInterface>(new SimpleInterface<C2VDAComponent::IntfImpl>(
+                testCompName.c_str(), testCompNodeId,
+                std::make_shared<C2VDAComponent::IntfImpl>(testCompName, mReflector)));
+    }
+    ~C2VDACompIntfTest() override {}
+
+    template <typename T>
+    void testReadOnlyParam(const T* expected, T* invalid);
+
+    template <typename T>
+    void checkReadOnlyFailureOnConfig(T* param);
+
+    template <typename T>
+    void testReadOnlyParamOnStack(const T* expected, T* invalid);
+
+    template <typename T>
+    void testReadOnlyParamOnHeap(const T* expected, T* invalid);
+
+    template <typename T>
+    void testWritableParam(T* newParam);
+
+    template <typename T>
+    void testInvalidWritableParam(T* invalidParam);
+
+    template <typename T>
+    void testWritableVideoSizeParam(int32_t widthMin, int32_t widthMax, int32_t widthStep,
+                                    int32_t heightMin, int32_t heightMax, int32_t heightStep);
+
+    std::shared_ptr<C2ComponentInterface> mIntf;
+    std::shared_ptr<C2ReflectorHelper> mReflector;
+};
+
+template <typename T>
+void C2VDACompIntfTest::testReadOnlyParam(const T* expected, T* invalid) {
+    testReadOnlyParamOnStack(expected, invalid);
+    testReadOnlyParamOnHeap(expected, invalid);
+}
+
+template <typename T>
+void C2VDACompIntfTest::checkReadOnlyFailureOnConfig(T* param) {
+    std::vector<C2Param*> params{param};
+    std::vector<std::unique_ptr<C2SettingResult>> failures;
+
+    // TODO: do not assert on checking return value since it is not consistent for
+    //       C2InterfaceHelper now. (b/79720928)
+    //   1) if config same value, it returns C2_OK
+    //   2) if config different value, it returns C2_CORRUPTED. But when you config again, it
+    //      returns C2_OK
+    //ASSERT_EQ(C2_BAD_VALUE, mIntf->config_vb(params, C2_DONT_BLOCK, &failures));
+    mIntf->config_vb(params, C2_DONT_BLOCK, &failures);
+
+    // TODO: failure is not yet supported for C2InterfaceHelper
+    //ASSERT_EQ(1u, failures.size());
+    //EXPECT_EQ(C2SettingResult::READ_ONLY, failures[0]->failure);
+}
+
+template <typename T>
+void C2VDACompIntfTest::testReadOnlyParamOnStack(const T* expected, T* invalid) {
+    T param;
+    std::vector<C2Param*> stackParams{&param};
+    ASSERT_EQ(C2_OK, mIntf->query_vb(stackParams, {}, C2_DONT_BLOCK, nullptr));
+    EXPECT_EQ(*expected, param);
+
+    checkReadOnlyFailureOnConfig(&param);
+    checkReadOnlyFailureOnConfig(invalid);
+
+    // The param must not change after failed config.
+    ASSERT_EQ(C2_OK, mIntf->query_vb(stackParams, {}, C2_DONT_BLOCK, nullptr));
+    EXPECT_EQ(*expected, param);
+}
+
+template <typename T>
+void C2VDACompIntfTest::testReadOnlyParamOnHeap(const T* expected, T* invalid) {
+    std::vector<std::unique_ptr<C2Param>> heapParams;
+
+    uint32_t index = expected->index();
+
+    ASSERT_EQ(C2_OK, mIntf->query_vb({}, {index}, C2_DONT_BLOCK, &heapParams));
+    ASSERT_EQ(1u, heapParams.size());
+    EXPECT_EQ(*expected, *heapParams[0]);
+
+    checkReadOnlyFailureOnConfig(heapParams[0].get());
+    checkReadOnlyFailureOnConfig(invalid);
+
+    // The param must not change after failed config.
+    heapParams.clear();
+    ASSERT_EQ(C2_OK, mIntf->query_vb({}, {index}, C2_DONT_BLOCK, &heapParams));
+    ASSERT_EQ(1u, heapParams.size());
+    EXPECT_EQ(*expected, *heapParams[0]);
+}
+
+template <typename T>
+void C2VDACompIntfTest::testWritableParam(T* newParam) {
+    std::vector<C2Param*> params{newParam};
+    std::vector<std::unique_ptr<C2SettingResult>> failures;
+    ASSERT_EQ(C2_OK, mIntf->config_vb(params, C2_DONT_BLOCK, &failures));
+    EXPECT_EQ(0u, failures.size());
+
+    // The param must change to newParam
+    // Check like param on stack
+    T param;
+    std::vector<C2Param*> stackParams{&param};
+    ASSERT_EQ(C2_OK, mIntf->query_vb(stackParams, {}, C2_DONT_BLOCK, nullptr));
+    EXPECT_EQ(*newParam, param);
+
+    // Check also like param on heap
+    std::vector<std::unique_ptr<C2Param>> heapParams;
+    ASSERT_EQ(C2_OK, mIntf->query_vb({}, {newParam->index()}, C2_DONT_BLOCK, &heapParams));
+    ASSERT_EQ(1u, heapParams.size());
+    EXPECT_EQ(*newParam, *heapParams[0]);
+}
+
+template <typename T>
+void C2VDACompIntfTest::testInvalidWritableParam(T* invalidParam) {
+    // Get the current parameter info
+    T preParam;
+    std::vector<C2Param*> stackParams{&preParam};
+    ASSERT_EQ(C2_OK, mIntf->query_vb(stackParams, {}, C2_DONT_BLOCK, nullptr));
+
+    // Config invalid value. The failure is expected
+    std::vector<C2Param*> params{invalidParam};
+    std::vector<std::unique_ptr<C2SettingResult>> failures;
+    ASSERT_EQ(C2_BAD_VALUE, mIntf->config_vb(params, C2_DONT_BLOCK, &failures));
+    EXPECT_EQ(1u, failures.size());
+
+    //The param must not change after config failed
+    T param;
+    std::vector<C2Param*> stackParams2{&param};
+    ASSERT_EQ(C2_OK, mIntf->query_vb(stackParams2, {}, C2_DONT_BLOCK, nullptr));
+    EXPECT_EQ(preParam, param);
+
+    // Check also like param on heap
+    std::vector<std::unique_ptr<C2Param>> heapParams;
+    ASSERT_EQ(C2_OK, mIntf->query_vb({}, {invalidParam->index()}, C2_DONT_BLOCK, &heapParams));
+    ASSERT_EQ(1u, heapParams.size());
+    EXPECT_EQ(preParam, *heapParams[0]);
+}
+
+bool isUnderflowSubstract(int32_t a, int32_t b) {
+    return a < 0 && b > a - std::numeric_limits<int32_t>::min();
+}
+
+bool isOverflowAdd(int32_t a, int32_t b) {
+    return a > 0 && b > std::numeric_limits<int32_t>::max() - a;
+}
+
+template <typename T>
+void C2VDACompIntfTest::testWritableVideoSizeParam(int32_t widthMin, int32_t widthMax,
+                                                   int32_t widthStep, int32_t heightMin,
+                                                   int32_t heightMax, int32_t heightStep) {
+    // Test supported values of video size
+    T valid;
+    for (int32_t h = heightMin; h <= heightMax; h += heightStep) {
+        for (int32_t w = widthMin; w <= widthMax; w += widthStep) {
+            valid.width = w;
+            valid.height = h;
+            {
+                SCOPED_TRACE("testWritableParam");
+                testWritableParam(&valid);
+                if (HasFailure()) {
+                    printf("Failed while config width = %d, height = %d\n", valid.width,
+                           valid.height);
+                }
+                if (HasFatalFailure()) return;
+            }
+        }
+    }
+
+    // TODO: validate possible values in C2InterfaceHelper is not implemented yet.
+    //// Test invalid values video size
+    //T invalid;
+    //// Width or height is smaller than min values
+    //if (!isUnderflowSubstract(widthMin, widthStep)) {
+    //    invalid.width = widthMin - widthStep;
+    //    invalid.height = heightMin;
+    //    testInvalidWritableParam(&invalid);
+    //}
+    //if (!isUnderflowSubstract(heightMin, heightStep)) {
+    //    invalid.width = widthMin;
+    //    invalid.height = heightMin - heightStep;
+    //    testInvalidWritableParam(&invalid);
+    //}
+
+    //// Width or height is bigger than max values
+    //if (!isOverflowAdd(widthMax, widthStep)) {
+    //    invalid.width = widthMax + widthStep;
+    //    invalid.height = heightMax;
+    //    testInvalidWritableParam(&invalid);
+    //}
+    //if (!isOverflowAdd(heightMax, heightStep)) {
+    //    invalid.width = widthMax;
+    //    invalid.height = heightMax + heightStep;
+    //    testInvalidWritableParam(&invalid);
+    //}
+
+    //// Invalid width/height within the range
+    //if (widthStep != 1) {
+    //    invalid.width = widthMin + 1;
+    //    invalid.height = heightMin;
+    //    testInvalidWritableParam(&invalid);
+    //}
+    //if (heightStep != 1) {
+    //    invalid.width = widthMin;
+    //    invalid.height = heightMin + 1;
+    //    testInvalidWritableParam(&invalid);
+    //}
+}
+
+#define TRACED_FAILURE(func)                            \
+    do {                                                \
+        SCOPED_TRACE(#func);                            \
+        func;                                           \
+        if (::testing::Test::HasFatalFailure()) return; \
+    } while (false)
+
+TEST_F(C2VDACompIntfTest, CreateInstance) {
+    auto name = mIntf->getName();
+    auto id = mIntf->getId();
+    printf("name = %s\n", name.c_str());
+    printf("node_id = %u\n", id);
+    EXPECT_STREQ(name.c_str(), testCompName.c_str());
+    EXPECT_EQ(id, testCompNodeId);
+}
+
+TEST_F(C2VDACompIntfTest, TestInputFormat) {
+    C2StreamBufferTypeSetting::input expected(0u, C2FormatCompressed);
+    expected.setStream(0);  // only support single stream
+    C2StreamBufferTypeSetting::input invalid(0u, C2FormatVideo);
+    invalid.setStream(0);  // only support single stream
+    TRACED_FAILURE(testReadOnlyParam(&expected, &invalid));
+}
+
+TEST_F(C2VDACompIntfTest, TestOutputFormat) {
+    C2StreamBufferTypeSetting::output expected(0u, C2FormatVideo);
+    expected.setStream(0);  // only support single stream
+    C2StreamBufferTypeSetting::output invalid(0u, C2FormatCompressed);
+    invalid.setStream(0);  // only support single stream
+    TRACED_FAILURE(testReadOnlyParam(&expected, &invalid));
+}
+
+TEST_F(C2VDACompIntfTest, TestInputPortMime) {
+    std::shared_ptr<C2PortMediaTypeSetting::input> expected(
+            AllocSharedString<C2PortMediaTypeSetting::input>(MEDIA_MIMETYPE_VIDEO_AVC));
+    std::shared_ptr<C2PortMediaTypeSetting::input> invalid(
+            AllocSharedString<C2PortMediaTypeSetting::input>(MEDIA_MIMETYPE_VIDEO_RAW));
+    TRACED_FAILURE(testReadOnlyParamOnHeap(expected.get(), invalid.get()));
+}
+
+TEST_F(C2VDACompIntfTest, TestOutputPortMime) {
+    std::shared_ptr<C2PortMediaTypeSetting::output> expected(
+            AllocSharedString<C2PortMediaTypeSetting::output>(MEDIA_MIMETYPE_VIDEO_RAW));
+    std::shared_ptr<C2PortMediaTypeSetting::output> invalid(
+            AllocSharedString<C2PortMediaTypeSetting::output>(MEDIA_MIMETYPE_VIDEO_AVC));
+    TRACED_FAILURE(testReadOnlyParamOnHeap(expected.get(), invalid.get()));
+}
+
+TEST_F(C2VDACompIntfTest, TestVideoSize) {
+    C2StreamPictureSizeInfo::output videoSize;
+    videoSize.setStream(0);  // only support single stream
+    std::vector<C2FieldSupportedValuesQuery> widthC2FSV = {
+            {C2ParamField(&videoSize, &C2StreamPictureSizeInfo::width),
+             C2FieldSupportedValuesQuery::CURRENT},
+    };
+    ASSERT_EQ(C2_OK, mIntf->querySupportedValues_vb(widthC2FSV, C2_DONT_BLOCK));
+    std::vector<C2FieldSupportedValuesQuery> heightC2FSV = {
+            {C2ParamField(&videoSize, &C2StreamPictureSizeInfo::height),
+             C2FieldSupportedValuesQuery::CURRENT},
+    };
+    ASSERT_EQ(C2_OK, mIntf->querySupportedValues_vb(heightC2FSV, C2_DONT_BLOCK));
+    ASSERT_EQ(1u, widthC2FSV.size());
+    ASSERT_EQ(C2_OK, widthC2FSV[0].status);
+    ASSERT_EQ(C2FieldSupportedValues::RANGE, widthC2FSV[0].values.type);
+    auto& widthFSVRange = widthC2FSV[0].values.range;
+    int32_t widthMin = widthFSVRange.min.i32;
+    int32_t widthMax = widthFSVRange.max.i32;
+    int32_t widthStep = widthFSVRange.step.i32;
+
+    ASSERT_EQ(1u, heightC2FSV.size());
+    ASSERT_EQ(C2_OK, heightC2FSV[0].status);
+    ASSERT_EQ(C2FieldSupportedValues::RANGE, heightC2FSV[0].values.type);
+    auto& heightFSVRange = heightC2FSV[0].values.range;
+    int32_t heightMin = heightFSVRange.min.i32;
+    int32_t heightMax = heightFSVRange.max.i32;
+    int32_t heightStep = heightFSVRange.step.i32;
+
+    // test updating valid and invalid values
+    TRACED_FAILURE(testWritableVideoSizeParam<C2StreamPictureSizeInfo::output>(
+            widthMin, widthMax, widthStep, heightMin, heightMax, heightStep));
+}
+
+TEST_F(C2VDACompIntfTest, TestInputAllocatorIds) {
+    std::shared_ptr<C2PortAllocatorsTuning::input> expected(
+            C2PortAllocatorsTuning::input::AllocShared(kInputAllocators));
+    std::shared_ptr<C2PortAllocatorsTuning::input> invalid(
+            C2PortAllocatorsTuning::input::AllocShared(kOutputAllocators));
+    TRACED_FAILURE(testReadOnlyParamOnHeap(expected.get(), invalid.get()));
+}
+
+TEST_F(C2VDACompIntfTest, TestOutputAllocatorIds) {
+    std::shared_ptr<C2PortAllocatorsTuning::output> expected(
+            C2PortAllocatorsTuning::output::AllocShared(kOutputAllocators));
+    std::shared_ptr<C2PortAllocatorsTuning::output> invalid(
+            C2PortAllocatorsTuning::output::AllocShared(kInputAllocators));
+    TRACED_FAILURE(testReadOnlyParamOnHeap(expected.get(), invalid.get()));
+}
+
+TEST_F(C2VDACompIntfTest, TestOutputBlockPoolIds) {
+    std::vector<std::unique_ptr<C2Param>> heapParams;
+    C2Param::Index index = C2PortBlockPoolsTuning::output::PARAM_TYPE;
+
+    // Query the param and check the default value.
+    ASSERT_EQ(C2_OK, mIntf->query_vb({}, {index}, C2_DONT_BLOCK, &heapParams));
+    ASSERT_EQ(1u, heapParams.size());
+    C2BlockPool::local_id_t value = ((C2PortBlockPoolsTuning*)heapParams[0].get())->m.values[0];
+    ASSERT_EQ(kDefaultOutputBlockPool, value);
+
+    // Configure the param.
+    C2BlockPool::local_id_t configBlockPools[] = {C2BlockPool::PLATFORM_START + 1};
+    std::shared_ptr<C2PortBlockPoolsTuning::output> newParam(
+            C2PortBlockPoolsTuning::output::AllocShared(configBlockPools));
+
+    std::vector<C2Param*> params{newParam.get()};
+    std::vector<std::unique_ptr<C2SettingResult>> failures;
+    ASSERT_EQ(C2_OK, mIntf->config_vb(params, C2_DONT_BLOCK, &failures));
+    EXPECT_EQ(0u, failures.size());
+
+    // Query the param again and check the value is as configured
+    heapParams.clear();
+    ASSERT_EQ(C2_OK, mIntf->query_vb({}, {index}, C2_DONT_BLOCK, &heapParams));
+    ASSERT_EQ(1u, heapParams.size());
+    value = ((C2PortBlockPoolsTuning*)heapParams[0].get())->m.values[0];
+    ASSERT_EQ(configBlockPools[0], value);
+}
+
+TEST_F(C2VDACompIntfTest, TestUnsupportedParam) {
+    C2ComponentTemporalInfo unsupportedParam;
+    std::vector<C2Param*> stackParams{&unsupportedParam};
+    ASSERT_EQ(C2_BAD_INDEX, mIntf->query_vb(stackParams, {}, C2_DONT_BLOCK, nullptr));
+    EXPECT_EQ(0u, unsupportedParam.size());  // invalidated
+}
+
+void dumpType(const C2FieldDescriptor::type_t type) {
+    switch (type) {
+    case C2FieldDescriptor::INT32:
+        printf("int32_t");
+        break;
+    case C2FieldDescriptor::UINT32:
+        printf("uint32_t");
+        break;
+    case C2FieldDescriptor::INT64:
+        printf("int64_t");
+        break;
+    case C2FieldDescriptor::UINT64:
+        printf("uint64_t");
+        break;
+    case C2FieldDescriptor::FLOAT:
+        printf("float");
+        break;
+    default:
+        printf("<flex>");
+        break;
+    }
+}
+
+void dumpStruct(const C2StructDescriptor& sd) {
+    printf("  struct: { ");
+    for (const C2FieldDescriptor& f : sd) {
+        printf("%s:", f.name().c_str());
+        dumpType(f.type());
+        printf(", ");
+    }
+    printf("}\n");
+}
+
+TEST_F(C2VDACompIntfTest, ParamReflector) {
+    std::vector<std::shared_ptr<C2ParamDescriptor>> params;
+
+    ASSERT_EQ(mIntf->querySupportedParams_nb(&params), C2_OK);
+    for (const auto& paramDesc : params) {
+        printf("name: %s\n", paramDesc->name().c_str());
+        printf("  required: %s\n", paramDesc->isRequired() ? "yes" : "no");
+        printf("  type: %x\n", paramDesc->index().type());
+        std::unique_ptr<C2StructDescriptor> desc{mReflector->describe(paramDesc->index().type())};
+        if (desc.get()) dumpStruct(*desc);
+    }
+}
+}  // namespace android
diff --git a/tests/C2VDAComponent_test.cpp b/tests/C2VDAComponent_test.cpp
new file mode 100644
index 0000000..5e597c8
--- /dev/null
+++ b/tests/C2VDAComponent_test.cpp
@@ -0,0 +1,783 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2VDAComponent_test"
+
+#include <C2VDAComponent.h>
+
+#include <C2Buffer.h>
+#include <C2BufferPriv.h>
+#include <C2Component.h>
+#include <C2PlatformSupport.h>
+#include <C2Work.h>
+#include <SimpleC2Interface.h>
+
+#include <base/files/file.h>
+#include <base/files/file_path.h>
+#include <base/md5.h>
+#include <base/strings/string_piece.h>
+#include <base/strings/string_split.h>
+
+#include <gtest/gtest.h>
+#include <media/DataSource.h>
+#include <media/IMediaHTTPService.h>
+#include <media/MediaExtractor.h>
+#include <media/MediaSource.h>
+#include <media/stagefright/DataSourceFactory.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaExtractorFactory.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <utils/Log.h>
+
+#include <fcntl.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <algorithm>
+#include <chrono>
+#include <thread>
+
+using namespace std::chrono_literals;
+
+namespace {
+
+const int kMD5StringLength = 32;
+
+// Read in golden MD5s for the sanity play-through check of this video
+void readGoldenMD5s(const std::string& videoFile, std::vector<std::string>* md5Strings) {
+    base::FilePath filepath(videoFile + ".md5");
+    std::string allMD5s;
+    base::ReadFileToString(filepath, &allMD5s);
+    *md5Strings = base::SplitString(allMD5s, "\n", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+    // Check these are legitimate MD5s.
+    for (const std::string& md5String : *md5Strings) {
+        // Ignore the empty string added by SplitString. Ignore comments.
+        if (!md5String.length() || md5String.at(0) == '#') {
+            continue;
+        }
+        if (static_cast<int>(md5String.length()) != kMD5StringLength) {
+            fprintf(stderr, "MD5 length error: %s\n", md5String.c_str());
+        }
+        if (std::count_if(md5String.begin(), md5String.end(), isxdigit) != kMD5StringLength) {
+            fprintf(stderr, "MD5 includes non-hex char: %s\n", md5String.c_str());
+        }
+    }
+    if (md5Strings->empty()) {
+        fprintf(stderr, "MD5 checksum file (%s) missing or empty.\n",
+                filepath.MaybeAsASCII().c_str());
+    }
+}
+
+// Get file path name of recording raw YUV
+base::FilePath getRecordOutputPath(const std::string& videoFile, int width, int height) {
+    base::FilePath filepath(videoFile);
+    filepath = filepath.RemoveExtension();
+    std::string suffix = "_output_" + std::to_string(width) + "x" + std::to_string(height) + ".yuv";
+    return base::FilePath(filepath.value() + suffix);
+}
+}  // namespace
+
+namespace android {
+
+// Input video data parameters. This could be overwritten by user argument [-i].
+// The syntax of each column is:
+//  filename:componentName:width:height:numFrames:numFragments
+// - |filename| is the file path to mp4 (h264) or webm (VP8/9) video.
+// - |componentName| specifies the name of decoder component.
+// - |width| and |height| are for video size (in pixels).
+// - |numFrames| is the number of picture frames.
+// - |numFragments| is the NALU (h264) or frame (VP8/9) count by MediaExtractor.
+const char* gTestVideoData = "bear.mp4:c2.vda.avc.decoder:640:360:82:84";
+//const char* gTestVideoData = "bear-vp8.webm:c2.vda.vp8.decoder:640:360:82:82";
+//const char* gTestVideoData = "bear-vp9.webm:c2.vda.vp9.decoder:320:240:82:82";
+
+// Record decoded output frames as raw YUV format.
+// The recorded file will be named as "<video_name>_output_<width>x<height>.yuv" under the same
+// folder of input video file.
+bool gRecordOutputYUV = false;
+
+const std::string kH264DecoderName = "c2.vda.avc.decoder";
+const std::string kVP8DecoderName = "c2.vda.vp8.decoder";
+const std::string kVP9DecoderName = "c2.vda.vp9.decoder";
+
+// Magic constants for indicating the timing of flush being called.
+enum FlushPoint : int { END_OF_STREAM_FLUSH = -3, MID_STREAM_FLUSH = -2, NO_FLUSH = -1 };
+
+struct TestVideoFile {
+    enum class CodecType { UNKNOWN, H264, VP8, VP9 };
+
+    std::string mFilename;
+    std::string mComponentName;
+    CodecType mCodec = CodecType::UNKNOWN;
+    int mWidth = -1;
+    int mHeight = -1;
+    int mNumFrames = -1;
+    int mNumFragments = -1;
+    sp<IMediaSource> mData;
+};
+
+class C2VDALinearBuffer : public C2Buffer {
+public:
+    explicit C2VDALinearBuffer(const std::shared_ptr<C2LinearBlock>& block)
+          : C2Buffer({block->share(block->offset(), block->size(), C2Fence())}) {}
+};
+
+class C2VDADummyLinearBuffer : public C2Buffer {
+public:
+    explicit C2VDADummyLinearBuffer(const std::shared_ptr<C2LinearBlock>& block)
+          : C2Buffer({block->share(0, 0, C2Fence())}) {}
+};
+
+class Listener;
+
+class C2VDAComponentTest : public ::testing::Test {
+public:
+    void onWorkDone(std::weak_ptr<C2Component> component,
+                    std::list<std::unique_ptr<C2Work>> workItems);
+    void onTripped(std::weak_ptr<C2Component> component,
+                   std::vector<std::shared_ptr<C2SettingResult>> settingResult);
+    void onError(std::weak_ptr<C2Component> component, uint32_t errorCode);
+
+protected:
+    C2VDAComponentTest();
+    void SetUp() override;
+
+    void parseTestVideoData(const char* testVideoData);
+
+protected:
+    using ULock = std::unique_lock<std::mutex>;
+
+    enum {
+        kWorkCount = 16,
+    };
+
+    std::shared_ptr<Listener> mListener;
+
+    // Allocators
+    std::shared_ptr<C2Allocator> mLinearAlloc;
+    std::shared_ptr<C2BlockPool> mLinearBlockPool;
+
+    // The array of output video frame counters which will be counted in listenerThread. The array
+    // length equals to iteration time of stream play.
+    std::vector<int> mOutputFrameCounts;
+    // The array of work counters returned from component which will be counted in listenerThread.
+    // The array length equals to iteration time of stream play.
+    std::vector<int> mFinishedWorkCounts;
+    // The array of output frame MD5Sum which will be computed in listenerThread. The array length
+    // equals to iteration time of stream play.
+    std::vector<std::string> mMD5Strings;
+
+    // Mutex for |mWorkQueue| among main and listenerThread.
+    std::mutex mQueueLock;
+    std::condition_variable mQueueCondition;
+    std::list<std::unique_ptr<C2Work>> mWorkQueue;
+
+    // Mutex for |mProcessedWork| among main and listenerThread.
+    std::mutex mProcessedLock;
+    std::condition_variable mProcessedCondition;
+    std::list<std::unique_ptr<C2Work>> mProcessedWork;
+
+    // Mutex for |mFlushDone| among main and listenerThread.
+    std::mutex mFlushDoneLock;
+    std::condition_variable mFlushDoneCondition;
+    bool mFlushDone;
+
+    std::unique_ptr<TestVideoFile> mTestVideoFile;
+};
+
+class Listener : public C2Component::Listener {
+public:
+    explicit Listener(C2VDAComponentTest* thiz) : mThis(thiz) {}
+    virtual ~Listener() = default;
+
+    virtual void onWorkDone_nb(std::weak_ptr<C2Component> component,
+                               std::list<std::unique_ptr<C2Work>> workItems) override {
+        mThis->onWorkDone(component, std::move(workItems));
+    }
+
+    virtual void onTripped_nb(
+            std::weak_ptr<C2Component> component,
+            std::vector<std::shared_ptr<C2SettingResult>> settingResult) override {
+        mThis->onTripped(component, settingResult);
+    }
+
+    virtual void onError_nb(std::weak_ptr<C2Component> component, uint32_t errorCode) override {
+        mThis->onError(component, errorCode);
+    }
+
+private:
+    C2VDAComponentTest* const mThis;
+};
+
+C2VDAComponentTest::C2VDAComponentTest() : mListener(new Listener(this)) {
+    std::shared_ptr<C2AllocatorStore> store = GetCodec2PlatformAllocatorStore();
+    CHECK_EQ(store->fetchAllocator(C2AllocatorStore::DEFAULT_LINEAR, &mLinearAlloc), C2_OK);
+
+    mLinearBlockPool = std::make_shared<C2BasicLinearBlockPool>(mLinearAlloc);
+}
+
+void C2VDAComponentTest::onWorkDone(std::weak_ptr<C2Component> component,
+                                    std::list<std::unique_ptr<C2Work>> workItems) {
+    (void)component;
+    ULock l(mProcessedLock);
+    for (auto& item : workItems) {
+        mProcessedWork.emplace_back(std::move(item));
+    }
+    mProcessedCondition.notify_all();
+}
+
+void C2VDAComponentTest::onTripped(std::weak_ptr<C2Component> component,
+                                   std::vector<std::shared_ptr<C2SettingResult>> settingResult) {
+    (void)component;
+    (void)settingResult;
+    // no-ops
+}
+
+void C2VDAComponentTest::onError(std::weak_ptr<C2Component> component, uint32_t errorCode) {
+    (void)component;
+    // fail the test
+    FAIL() << "Get error code from component: " << errorCode;
+}
+
+void C2VDAComponentTest::SetUp() {
+    parseTestVideoData(gTestVideoData);
+
+    mWorkQueue.clear();
+    for (int i = 0; i < kWorkCount; ++i) {
+        mWorkQueue.emplace_back(new C2Work);
+    }
+    mProcessedWork.clear();
+    mFlushDone = false;
+}
+
+static bool getMediaSourceFromFile(const std::string& filename,
+                                   const TestVideoFile::CodecType codec, sp<IMediaSource>* source) {
+    source->clear();
+
+    sp<DataSource> dataSource =
+            DataSourceFactory::CreateFromURI(nullptr /* httpService */, filename.c_str());
+
+    if (dataSource == nullptr) {
+        fprintf(stderr, "Unable to create data source.\n");
+        return false;
+    }
+
+    sp<IMediaExtractor> extractor = MediaExtractorFactory::Create(dataSource);
+    if (extractor == nullptr) {
+        fprintf(stderr, "could not create extractor.\n");
+        return false;
+    }
+
+    std::string expectedMime;
+    if (codec == TestVideoFile::CodecType::H264) {
+        expectedMime = "video/avc";
+    } else if (codec == TestVideoFile::CodecType::VP8) {
+        expectedMime = "video/x-vnd.on2.vp8";
+    } else if (codec == TestVideoFile::CodecType::VP9) {
+        expectedMime = "video/x-vnd.on2.vp9";
+    } else {
+        fprintf(stderr, "unsupported codec type.\n");
+        return false;
+    }
+
+    for (size_t i = 0, numTracks = extractor->countTracks(); i < numTracks; ++i) {
+        sp<MetaData> meta =
+                extractor->getTrackMetaData(i, MediaExtractor::kIncludeExtensiveMetaData);
+        if (meta == nullptr) {
+            continue;
+        }
+        const char* mime;
+        meta->findCString(kKeyMIMEType, &mime);
+        if (!strcasecmp(mime, expectedMime.c_str())) {
+            *source = extractor->getTrack(i);
+            if (*source == nullptr) {
+                fprintf(stderr, "It's NULL track for track %zu.\n", i);
+                return false;
+            }
+            return true;
+        }
+    }
+    fprintf(stderr, "No track found.\n");
+    return false;
+}
+
+void C2VDAComponentTest::parseTestVideoData(const char* testVideoData) {
+    ALOGV("videoDataStr: %s", testVideoData);
+    mTestVideoFile = std::make_unique<TestVideoFile>();
+
+    auto splitString = [](const std::string& input, const char delim) {
+        std::vector<std::string> splits;
+        auto beg = input.begin();
+        while (beg != input.end()) {
+            auto pos = std::find(beg, input.end(), delim);
+            splits.emplace_back(beg, pos);
+            beg = pos != input.end() ? pos + 1 : pos;
+        }
+        return splits;
+    };
+    auto tokens = splitString(testVideoData, ':');
+    ASSERT_EQ(tokens.size(), 6u);
+    mTestVideoFile->mFilename = tokens[0];
+    ASSERT_GT(mTestVideoFile->mFilename.length(), 0u);
+
+    mTestVideoFile->mComponentName = tokens[1];
+    if (mTestVideoFile->mComponentName == kH264DecoderName) {
+        mTestVideoFile->mCodec = TestVideoFile::CodecType::H264;
+    } else if (mTestVideoFile->mComponentName == kVP8DecoderName) {
+        mTestVideoFile->mCodec = TestVideoFile::CodecType::VP8;
+    } else if (mTestVideoFile->mComponentName == kVP9DecoderName) {
+        mTestVideoFile->mCodec = TestVideoFile::CodecType::VP9;
+    }
+    ASSERT_NE(mTestVideoFile->mCodec, TestVideoFile::CodecType::UNKNOWN);
+
+    mTestVideoFile->mWidth = std::stoi(tokens[2]);
+    mTestVideoFile->mHeight = std::stoi(tokens[3]);
+    mTestVideoFile->mNumFrames = std::stoi(tokens[4]);
+    mTestVideoFile->mNumFragments = std::stoi(tokens[5]);
+
+    ALOGV("mTestVideoFile: %s, %s, %d, %d, %d, %d", mTestVideoFile->mFilename.c_str(),
+          mTestVideoFile->mComponentName.c_str(), mTestVideoFile->mWidth, mTestVideoFile->mHeight,
+          mTestVideoFile->mNumFrames, mTestVideoFile->mNumFragments);
+}
+
+static void getFrameStringPieces(const C2GraphicView& constGraphicView,
+                                 std::vector<::base::StringPiece>* framePieces) {
+    const uint8_t* const* constData = constGraphicView.data();
+    ASSERT_NE(constData, nullptr);
+    const C2PlanarLayout& layout = constGraphicView.layout();
+    ASSERT_EQ(layout.type, C2PlanarLayout::TYPE_YUV) << "Only support YUV plane format";
+
+    framePieces->clear();
+    framePieces->push_back(
+            ::base::StringPiece(reinterpret_cast<const char*>(constData[C2PlanarLayout::PLANE_Y]),
+                                constGraphicView.width() * constGraphicView.height()));
+    if (layout.planes[C2PlanarLayout::PLANE_U].colInc == 2) {  // semi-planar mode
+        framePieces->push_back(::base::StringPiece(
+                reinterpret_cast<const char*>(std::min(constData[C2PlanarLayout::PLANE_U],
+                                                       constData[C2PlanarLayout::PLANE_V])),
+                constGraphicView.width() * constGraphicView.height() / 2));
+    } else {
+        framePieces->push_back(::base::StringPiece(
+                reinterpret_cast<const char*>(constData[C2PlanarLayout::PLANE_U]),
+                constGraphicView.width() * constGraphicView.height() / 4));
+        framePieces->push_back(::base::StringPiece(
+                reinterpret_cast<const char*>(constData[C2PlanarLayout::PLANE_V]),
+                constGraphicView.width() * constGraphicView.height() / 4));
+    }
+}
+
+// Test parameters:
+// - Flush after work index. If this value is not negative, test will signal flush to component
+//   after queueing the work frame index equals to this value in the first iteration. Negative
+//   values may be magic constants, please refer to FlushPoint enum.
+// - Number of play through. This value specifies the iteration time for playing entire video. If
+//   |mFlushAfterWorkIndex| is not negative, the first iteration will perform flush, then repeat
+//   times as this value for playing entire video.
+// - Sanity check. If this is true, decoded content sanity check is enabled. Test will compute the
+//   MD5Sum for output frame data for a play-though iteration (not flushed), and compare to golden
+//   MD5Sums which should be stored in the file |video_filename|.md5
+// - Use dummy EOS work. If this is true, test will queue a dummy work with end-of-stream flag in
+//   the end of all input works. On the contrary, test will call drain_nb() to component.
+class C2VDAComponentParamTest
+      : public C2VDAComponentTest,
+        public ::testing::WithParamInterface<std::tuple<int, uint32_t, bool, bool>> {
+protected:
+    int mFlushAfterWorkIndex;
+    uint32_t mNumberOfPlaythrough;
+    bool mSanityCheck;
+    bool mUseDummyEOSWork;
+};
+
+TEST_P(C2VDAComponentParamTest, SimpleDecodeTest) {
+    mFlushAfterWorkIndex = std::get<0>(GetParam());
+    if (mFlushAfterWorkIndex == FlushPoint::MID_STREAM_FLUSH) {
+        mFlushAfterWorkIndex = mTestVideoFile->mNumFragments / 2;
+    } else if (mFlushAfterWorkIndex == FlushPoint::END_OF_STREAM_FLUSH) {
+        mFlushAfterWorkIndex = mTestVideoFile->mNumFragments - 1;
+    }
+    ASSERT_LT(mFlushAfterWorkIndex, mTestVideoFile->mNumFragments);
+    mNumberOfPlaythrough = std::get<1>(GetParam());
+
+    if (mFlushAfterWorkIndex >= 0) {
+        mNumberOfPlaythrough++;  // add the first iteration for perform mid-stream flushing.
+    }
+
+    mSanityCheck = std::get<2>(GetParam());
+    mUseDummyEOSWork = std::get<3>(GetParam());
+
+    // Reset counters and determine the expected answers for all iterations.
+    mOutputFrameCounts.resize(mNumberOfPlaythrough, 0);
+    mFinishedWorkCounts.resize(mNumberOfPlaythrough, 0);
+    mMD5Strings.resize(mNumberOfPlaythrough);
+    std::vector<int> expectedOutputFrameCounts(mNumberOfPlaythrough, mTestVideoFile->mNumFrames);
+    auto expectedWorkCount = mTestVideoFile->mNumFragments;
+    if (mUseDummyEOSWork) {
+        expectedWorkCount += 1;  // plus one dummy EOS work
+    }
+    std::vector<int> expectedFinishedWorkCounts(mNumberOfPlaythrough, expectedWorkCount);
+    if (mFlushAfterWorkIndex >= 0) {
+        // First iteration performs the mid-stream flushing.
+        expectedOutputFrameCounts[0] = mFlushAfterWorkIndex + 1;
+        expectedFinishedWorkCounts[0] = mFlushAfterWorkIndex + 1;
+    }
+
+    std::shared_ptr<C2Component> component(std::make_shared<C2VDAComponent>(
+            mTestVideoFile->mComponentName, 0, std::make_shared<C2ReflectorHelper>()));
+
+    ASSERT_EQ(component->setListener_vb(mListener, C2_DONT_BLOCK), C2_OK);
+    ASSERT_EQ(component->start(), C2_OK);
+
+    std::atomic_bool running(true);
+    std::thread listenerThread([this, &running]() {
+        uint32_t iteration = 0;
+        ::base::MD5Context md5Ctx;
+        ::base::MD5Init(&md5Ctx);
+        ::base::File recordFile;
+        if (gRecordOutputYUV) {
+            auto recordFilePath = getRecordOutputPath(
+                    mTestVideoFile->mFilename, mTestVideoFile->mWidth, mTestVideoFile->mHeight);
+            fprintf(stdout, "record output file: %s\n", recordFilePath.value().c_str());
+            recordFile = ::base::File(recordFilePath,
+                                      ::base::File::FLAG_OPEN_ALWAYS | ::base::File::FLAG_WRITE);
+            ASSERT_TRUE(recordFile.IsValid());
+        }
+        while (running) {
+            std::unique_ptr<C2Work> work;
+            {
+                ULock l(mProcessedLock);
+                if (mProcessedWork.empty()) {
+                    mProcessedCondition.wait_for(l, 100ms);
+                    if (mProcessedWork.empty()) {
+                        continue;
+                    }
+                }
+                work = std::move(mProcessedWork.front());
+                mProcessedWork.pop_front();
+            }
+            mFinishedWorkCounts[iteration]++;
+            ALOGV("Output: frame index: %llu result: %d flags: 0x%x buffers: %zu",
+                  work->input.ordinal.frameIndex.peekull(), work->result,
+                  work->worklets.front()->output.flags,
+                  work->worklets.front()->output.buffers.size());
+
+            ASSERT_EQ(work->worklets.size(), 1u);
+            if (work->worklets.front()->output.buffers.size() == 1u) {
+                std::shared_ptr<C2Buffer> output = work->worklets.front()->output.buffers[0];
+                C2ConstGraphicBlock graphicBlock = output->data().graphicBlocks().front();
+
+                // check graphic buffer size (coded size) is not less than given video size.
+                ASSERT_LE(mTestVideoFile->mWidth, static_cast<int>(graphicBlock.width()));
+                ASSERT_LE(mTestVideoFile->mHeight, static_cast<int>(graphicBlock.height()));
+
+                // check visible rect equals to given video size.
+                ASSERT_EQ(mTestVideoFile->mWidth, static_cast<int>(graphicBlock.crop().width));
+                ASSERT_EQ(mTestVideoFile->mHeight, static_cast<int>(graphicBlock.crop().height));
+                ASSERT_EQ(0u, graphicBlock.crop().left);
+                ASSERT_EQ(0u, graphicBlock.crop().top);
+
+                // Intended behavior for Intel libva driver (crbug.com/148546):
+                // The 5ms latency is laid here to make sure surface content is finished processed
+                // processed by libva.
+                std::this_thread::sleep_for(std::chrono::milliseconds(5));
+
+                const C2GraphicView& constGraphicView = graphicBlock.map().get();
+                ASSERT_EQ(C2_OK, constGraphicView.error());
+                std::vector<::base::StringPiece> framePieces;
+                getFrameStringPieces(constGraphicView, &framePieces);
+                ASSERT_FALSE(framePieces.empty());
+                if (mSanityCheck) {
+                    for (const auto& piece : framePieces) {
+                        ::base::MD5Update(&md5Ctx, piece);
+                    }
+                }
+                if (gRecordOutputYUV) {
+                    for (const auto& piece : framePieces) {
+                        ASSERT_EQ(static_cast<int>(piece.length()),
+                                  recordFile.WriteAtCurrentPos(piece.data(), piece.length()))
+                                << "Failed to write file for yuv recording...";
+                    }
+                }
+
+                work->worklets.front()->output.buffers.clear();
+                mOutputFrameCounts[iteration]++;
+            }
+
+            bool iteration_end =
+                    work->worklets.front()->output.flags & C2FrameData::FLAG_END_OF_STREAM;
+
+            // input buffer should be reset in component side.
+            ASSERT_EQ(work->input.buffers.size(), 1u);
+            ASSERT_TRUE(work->input.buffers.front() == nullptr);
+            work->worklets.clear();
+            work->workletsProcessed = 0;
+
+            if (iteration == 0 && work->input.ordinal.frameIndex.peeku() ==
+                                          static_cast<uint64_t>(mFlushAfterWorkIndex)) {
+                ULock l(mFlushDoneLock);
+                mFlushDone = true;
+                mFlushDoneCondition.notify_all();
+                iteration_end = true;
+            }
+
+            ULock l(mQueueLock);
+            mWorkQueue.emplace_back(std::move(work));
+            mQueueCondition.notify_all();
+
+            if (iteration_end) {
+                // record md5sum
+                ::base::MD5Digest digest;
+                ::base::MD5Final(&digest, &md5Ctx);
+                mMD5Strings[iteration] = ::base::MD5DigestToBase16(digest);
+                ::base::MD5Init(&md5Ctx);
+
+                iteration++;
+                if (iteration == mNumberOfPlaythrough) {
+                    running.store(false);  // stop the thread
+                }
+            }
+        }
+    });
+
+    for (uint32_t iteration = 0; iteration < mNumberOfPlaythrough; ++iteration) {
+        ASSERT_TRUE(getMediaSourceFromFile(mTestVideoFile->mFilename, mTestVideoFile->mCodec,
+                                           &mTestVideoFile->mData));
+
+        std::deque<sp<ABuffer>> csds;
+        if (mTestVideoFile->mCodec == TestVideoFile::CodecType::H264) {
+            // Get csd buffers for h264.
+            sp<AMessage> format;
+            (void)convertMetaDataToMessage(mTestVideoFile->mData->getFormat(), &format);
+            csds.resize(2);
+            format->findBuffer("csd-0", &csds[0]);
+            format->findBuffer("csd-1", &csds[1]);
+            ASSERT_TRUE(csds[0] != nullptr && csds[1] != nullptr);
+        }
+
+        ASSERT_EQ(mTestVideoFile->mData->start(), OK);
+
+        int numWorks = 0;
+        while (true) {
+            size_t size = 0u;
+            void* data = nullptr;
+            int64_t timestamp = 0u;
+            MediaBufferBase* buffer = nullptr;
+            sp<ABuffer> csd;
+            bool queueDummyEOSWork = false;
+            if (!csds.empty()) {
+                csd = std::move(csds.front());
+                csds.pop_front();
+                size = csd->size();
+                data = csd->data();
+            } else {
+                if (mTestVideoFile->mData->read(&buffer) != OK) {
+                    ASSERT_TRUE(buffer == nullptr);
+                    if (mUseDummyEOSWork) {
+                        ALOGV("Meet end of stream. Put a dummy EOS work.");
+                        queueDummyEOSWork = true;
+                    } else {
+                        ALOGV("Meet end of stream. Now drain the component.");
+                        ASSERT_EQ(component->drain_nb(C2Component::DRAIN_COMPONENT_WITH_EOS),
+                                  C2_OK);
+                        break;
+                    }
+                    // TODO(johnylin): add test with drain with DRAIN_COMPONENT_NO_EOS when we know
+                    //                 the actual use case of it.
+                } else {
+                    MetaDataBase& meta = buffer->meta_data();
+                    ASSERT_TRUE(meta.findInt64(kKeyTime, &timestamp));
+                    size = buffer->size();
+                    data = buffer->data();
+                }
+            }
+
+            std::unique_ptr<C2Work> work;
+            while (!work) {
+                ULock l(mQueueLock);
+                if (!mWorkQueue.empty()) {
+                    work = std::move(mWorkQueue.front());
+                    mWorkQueue.pop_front();
+                } else {
+                    mQueueCondition.wait_for(l, 100ms);
+                }
+            }
+
+            work->input.ordinal.frameIndex = static_cast<uint64_t>(numWorks);
+            work->input.buffers.clear();
+
+            std::shared_ptr<C2LinearBlock> block;
+            if (queueDummyEOSWork) {
+                work->input.flags = C2FrameData::FLAG_END_OF_STREAM;
+                work->input.ordinal.timestamp = 0;  // timestamp is invalid for dummy EOS work
+
+                // Create a dummy input buffer by allocating minimal size of buffer from block pool.
+                mLinearBlockPool->fetchLinearBlock(
+                        1, {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE}, &block);
+                work->input.buffers.emplace_back(new C2VDADummyLinearBuffer(std::move(block)));
+                ALOGV("Input: (Dummy EOS) id: %llu", work->input.ordinal.frameIndex.peekull());
+            } else {
+                work->input.flags = static_cast<C2FrameData::flags_t>(0);
+                work->input.ordinal.timestamp = static_cast<uint64_t>(timestamp);
+
+                // Allocate an input buffer with data size.
+                mLinearBlockPool->fetchLinearBlock(
+                        size, {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE}, &block);
+                C2WriteView view = block->map().get();
+                ASSERT_EQ(view.error(), C2_OK);
+                memcpy(view.base(), data, size);
+                work->input.buffers.emplace_back(new C2VDALinearBuffer(std::move(block)));
+                ALOGV("Input: bitstream id: %llu timestamp: %llu size: %zu",
+                      work->input.ordinal.frameIndex.peekull(),
+                      work->input.ordinal.timestamp.peekull(), size);
+            }
+
+            work->worklets.clear();
+            work->worklets.emplace_back(new C2Worklet);
+
+            std::list<std::unique_ptr<C2Work>> items;
+            items.push_back(std::move(work));
+
+            // Queue the work.
+            ASSERT_EQ(component->queue_nb(&items), C2_OK);
+            numWorks++;
+
+            if (buffer) {
+                buffer->release();
+            }
+
+            if (iteration == 0 && numWorks == mFlushAfterWorkIndex + 1) {
+                // Perform flush.
+                // Note: C2VDAComponent does not return work via |flushedWork|.
+                ASSERT_EQ(component->flush_sm(C2Component::FLUSH_COMPONENT,
+                                              nullptr /* flushedWork */),
+                          C2_OK);
+                break;
+            }
+
+            if (queueDummyEOSWork) {
+                break;
+            }
+        }
+
+        if (iteration == 0 && mFlushAfterWorkIndex >= 0) {
+            // Wait here until client get all flushed works.
+            while (true) {
+                ULock l(mFlushDoneLock);
+                if (mFlushDone) {
+                    break;
+                }
+                mFlushDoneCondition.wait_for(l, 100ms);
+            }
+            ALOGV("Got flush done signal");
+            EXPECT_EQ(numWorks, mFlushAfterWorkIndex + 1);
+        } else {
+            EXPECT_EQ(numWorks, expectedWorkCount);
+        }
+        ASSERT_EQ(mTestVideoFile->mData->stop(), OK);
+    }
+
+    listenerThread.join();
+    ASSERT_EQ(running, false);
+    ASSERT_EQ(component->stop(), C2_OK);
+
+    // Finally check the decoding want as expected.
+    for (uint32_t i = 0; i < mNumberOfPlaythrough; ++i) {
+        if (mFlushAfterWorkIndex >= 0 && i == 0) {
+            EXPECT_LE(mOutputFrameCounts[i], expectedOutputFrameCounts[i]) << "At iteration: " << i;
+        } else {
+            EXPECT_EQ(mOutputFrameCounts[i], expectedOutputFrameCounts[i]) << "At iteration: " << i;
+        }
+        EXPECT_EQ(mFinishedWorkCounts[i], expectedFinishedWorkCounts[i]) << "At iteration: " << i;
+    }
+
+    if (mSanityCheck) {
+        std::vector<std::string> goldenMD5s;
+        readGoldenMD5s(mTestVideoFile->mFilename, &goldenMD5s);
+        for (uint32_t i = 0; i < mNumberOfPlaythrough; ++i) {
+            if (mFlushAfterWorkIndex >= 0 && i == 0) {
+                continue;  // do not compare the iteration with flushing
+            }
+            bool matched = std::find(goldenMD5s.begin(), goldenMD5s.end(), mMD5Strings[i]) !=
+                           goldenMD5s.end();
+            EXPECT_TRUE(matched) << "Unknown MD5: " << mMD5Strings[i] << " at iter: " << i;
+        }
+    }
+}
+
+// Play input video once, end by draining.
+INSTANTIATE_TEST_CASE_P(SinglePlaythroughTest, C2VDAComponentParamTest,
+                        ::testing::Values(std::make_tuple(static_cast<int>(FlushPoint::NO_FLUSH),
+                                                          1u, false, false)));
+// Play input video once, end by dummy EOS work.
+INSTANTIATE_TEST_CASE_P(DummyEOSWorkTest, C2VDAComponentParamTest,
+                        ::testing::Values(std::make_tuple(static_cast<int>(FlushPoint::NO_FLUSH),
+                                                          1u, false, true)));
+
+// Play 5 times of input video, and check sanity by MD5Sum.
+INSTANTIATE_TEST_CASE_P(MultiplePlaythroughSanityTest, C2VDAComponentParamTest,
+                        ::testing::Values(std::make_tuple(static_cast<int>(FlushPoint::NO_FLUSH),
+                                                          5u, true, false)));
+
+// Test mid-stream flush then play once entirely.
+INSTANTIATE_TEST_CASE_P(FlushPlaythroughTest, C2VDAComponentParamTest,
+                        ::testing::Values(std::make_tuple(40, 1u, true, false)));
+
+// Test mid-stream flush then stop.
+INSTANTIATE_TEST_CASE_P(FlushStopTest, C2VDAComponentParamTest,
+                        ::testing::Values(std::make_tuple(
+                                static_cast<int>(FlushPoint::MID_STREAM_FLUSH), 0u, false, false)));
+
+// Test early flush (after a few works) then stop.
+INSTANTIATE_TEST_CASE_P(EarlyFlushStopTest, C2VDAComponentParamTest,
+                        ::testing::Values(std::make_tuple(0, 0u, false, false),
+                                          std::make_tuple(1, 0u, false, false),
+                                          std::make_tuple(2, 0u, false, false),
+                                          std::make_tuple(3, 0u, false, false)));
+
+// Test end-of-stream flush then stop.
+INSTANTIATE_TEST_CASE_P(
+        EndOfStreamFlushStopTest, C2VDAComponentParamTest,
+        ::testing::Values(std::make_tuple(static_cast<int>(FlushPoint::END_OF_STREAM_FLUSH), 0u,
+                                          false, false)));
+
+}  // namespace android
+
+static void usage(const char* me) {
+    fprintf(stderr, "usage: %s [-i test_video_data] [-r(ecord YUV)] [gtest options]\n", me);
+}
+
+int main(int argc, char** argv) {
+    ::testing::InitGoogleTest(&argc, argv);
+
+    int res;
+    while ((res = getopt(argc, argv, "i:r")) >= 0) {
+        switch (res) {
+        case 'i': {
+            android::gTestVideoData = optarg;
+            break;
+        }
+        case 'r': {
+            android::gRecordOutputYUV = true;
+            break;
+        }
+        default: {
+            usage(argv[0]);
+            exit(1);
+            break;
+        }
+        }
+    }
+
+    return RUN_ALL_TESTS();
+}
diff --git a/tests/data/bear-vp8.webm b/tests/data/bear-vp8.webm
new file mode 100644
index 0000000..02ae36c
--- /dev/null
+++ b/tests/data/bear-vp8.webm
Binary files differ
diff --git a/tests/data/bear-vp8.webm.md5 b/tests/data/bear-vp8.webm.md5
new file mode 100644
index 0000000..25d983c
--- /dev/null
+++ b/tests/data/bear-vp8.webm.md5
@@ -0,0 +1,5 @@
+# gTestVideoData = "bear-vp8.webm:c2.vda.vp8.decoder:640:360:82:82"
+# ARM - Mali
+056a2484b34bc78637b37b36481027c6
+# Intel
+fdc9d348b06a77e65a8aa0ccc120c6f9
diff --git a/tests/data/bear-vp9.webm b/tests/data/bear-vp9.webm
new file mode 100644
index 0000000..4f497ae
--- /dev/null
+++ b/tests/data/bear-vp9.webm
Binary files differ
diff --git a/tests/data/bear-vp9.webm.md5 b/tests/data/bear-vp9.webm.md5
new file mode 100644
index 0000000..99810d5
--- /dev/null
+++ b/tests/data/bear-vp9.webm.md5
@@ -0,0 +1,5 @@
+# gTestVideoData = "bear-vp9.webm:c2.vda.vp9.decoder:320:240:82:82"
+# ARM - Mali
+7228c16473724e4dff2fc55edcf94683
+# Intel
+058213ed7a7e119838564001b7ee8004
diff --git a/tests/data/bear.mp4 b/tests/data/bear.mp4
new file mode 100644
index 0000000..f1d30fb
--- /dev/null
+++ b/tests/data/bear.mp4
Binary files differ
diff --git a/tests/data/bear.mp4.md5 b/tests/data/bear.mp4.md5
new file mode 100644
index 0000000..d8f8c2d
--- /dev/null
+++ b/tests/data/bear.mp4.md5
@@ -0,0 +1,5 @@
+# gTestVideoData = "bear.mp4:c2.vda.avc.decoder:640:360:82:84"
+# ARM - Mali
+a3ea733a472e222608d690e91e6c88cc
+# Intel
+431076e337c24fe71a50ae07c64fdf3c
diff --git a/vda/.clang-format b/vda/.clang-format
new file mode 100644
index 0000000..151d19a
--- /dev/null
+++ b/vda/.clang-format
@@ -0,0 +1,4 @@
+# The codes in this directory are ported from Chromium Project.
+# Therefore, they are obviously based on Chromium coding style
+# and shouldn't be formatted by Android Coding Style
+BasedOnStyle: None
diff --git a/vda/Android.bp b/vda/Android.bp
new file mode 100644
index 0000000..5f84535
--- /dev/null
+++ b/vda/Android.bp
@@ -0,0 +1,53 @@
+cc_library_shared {
+    name: "libv4l2_codec2_vda",
+    srcs: [
+        "bit_reader.cc",
+        "bit_reader_core.cc",
+        "bitstream_buffer.cc",
+        "h264_bit_reader.cc",
+        "h264_decoder.cc",
+        "h264_dpb.cc",
+        "h264_parser.cc",
+        "native_pixmap_handle.cc",
+        "picture.cc",
+        "ranges.cc",
+        "shared_memory_region.cc",
+        "v4l2_device.cc",
+        "v4l2_slice_video_decode_accelerator.cc",
+        "v4l2_video_decode_accelerator.cc",
+        "video_codecs.cc",
+        "video_decode_accelerator.cc",
+        "vp8_bool_decoder.cc",
+        "vp8_decoder.cc",
+        "vp8_parser.cc",
+        "vp8_picture.cc",
+        "vp9_bool_decoder.cc",
+        "vp9_compressed_header_parser.cc",
+        "vp9_decoder.cc",
+        "vp9_parser.cc",
+        "vp9_picture.cc",
+        "vp9_raw_bits_reader.cc",
+        "vp9_uncompressed_header_parser.cc",
+    ],
+
+    shared_libs: ["libchrome"],
+    // -Wno-unused-parameter is needed for libchrome/base codes
+    cflags: [
+        "-Wall",
+        "-Werror",
+        "-Wno-unused-parameter",
+    ],
+    clang: true,
+    sanitize: {
+        misc_undefined: [
+            "unsigned-integer-overflow",
+            "signed-integer-overflow",
+        ],
+    },
+
+    ldflags: [
+        "-Wl",
+        "-Bsymbolic",
+    ],
+    export_include_dirs: ["."],
+}
diff --git a/vda/Android.mk b/vda/Android.mk
deleted file mode 100644
index 08a88ea..0000000
--- a/vda/Android.mk
+++ /dev/null
@@ -1,48 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-include $(CLEAR_VARS)
-
-LOCAL_CPP_EXTENSION:= .cc
-LOCAL_SRC_FILES:= \
-        bit_reader.cc       \
-        bit_reader_core.cc  \
-        bitstream_buffer.cc \
-        h264_bit_reader.cc  \
-        h264_decoder.cc     \
-        h264_dpb.cc         \
-        h264_parser.cc      \
-        picture.cc          \
-        ranges.cc           \
-        shared_memory_region.cc \
-        v4l2_device.cc      \
-        v4l2_slice_video_decode_accelerator.cc \
-        video_codecs.cc     \
-        video_decode_accelerator.cc \
-        vp8_bool_decoder.cc \
-        vp8_decoder.cc      \
-        vp8_parser.cc       \
-        vp8_picture.cc      \
-        vp9_bool_decoder.cc \
-        vp9_compressed_header_parser.cc \
-        vp9_decoder.cc      \
-        vp9_parser.cc       \
-        vp9_picture.cc      \
-        vp9_raw_bits_reader.cc \
-        vp9_uncompressed_header_parser.cc \
-
-# gtest/include is for included file from libchrome/base/gtest_prod_util.h
-LOCAL_C_INCLUDES += \
-        $(TOP)/external/libchrome \
-        $(TOP)/external/gtest/include \
-
-LOCAL_MODULE:= libv4l2_codec2_vda
-
-LOCAL_SHARED_LIBRARIES := libchrome \
-
-# -Wno-unused-parameter is needed for libchrome/base codes
-LOCAL_CFLAGS += -Werror -Wall -Wno-unused-parameter
-LOCAL_CLANG := true
-LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
-
-LOCAL_LDFLAGS := -Wl,-Bsymbolic
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/vda/accelerated_video_decoder.h b/vda/accelerated_video_decoder.h
index fe1c711..238e34d 100644
--- a/vda/accelerated_video_decoder.h
+++ b/vda/accelerated_video_decoder.h
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 77118c9
 
 #ifndef ACCELERATED_VIDEO_DECODER_H_
 #define ACCELERATED_VIDEO_DECODER_H_
diff --git a/vda/bit_reader.cc b/vda/bit_reader.cc
index 953d144..95e7634 100644
--- a/vda/bit_reader.cc
+++ b/vda/bit_reader.cc
@@ -1,6 +1,7 @@
 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
 
 #include "bit_reader.h"
 
@@ -15,7 +16,7 @@
   DCHECK_GE(size, 0);
 }
 
-BitReader::~BitReader() {}
+BitReader::~BitReader() = default;
 
 bool BitReader::ReadString(int num_bits, std::string* str) {
   DCHECK_EQ(num_bits % 8, 0);
diff --git a/vda/bit_reader.h b/vda/bit_reader.h
index 2b3fad0..dfc2b0b 100644
--- a/vda/bit_reader.h
+++ b/vda/bit_reader.h
@@ -1,6 +1,7 @@
 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 43ddd7a
 
 #ifndef BIT_READER_H_
 #define BIT_READER_H_
@@ -14,8 +15,7 @@
 
 namespace media {
 
-class BitReader
-    : NON_EXPORTED_BASE(private BitReaderCore::ByteStreamProvider)  {
+class BitReader : private BitReaderCore::ByteStreamProvider {
  public:
   // Initialize the reader to start reading at |data|, |size| being size
   // of |data| in bytes.
diff --git a/vda/bit_reader_core.cc b/vda/bit_reader_core.cc
index 220ea03..92b3211 100644
--- a/vda/bit_reader_core.cc
+++ b/vda/bit_reader_core.cc
@@ -1,6 +1,7 @@
 // Copyright 2014 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
 
 #include "bit_reader_core.h"
 
@@ -14,11 +15,9 @@
 
 namespace media {
 
-BitReaderCore::ByteStreamProvider::ByteStreamProvider() {
-}
+BitReaderCore::ByteStreamProvider::ByteStreamProvider() = default;
 
-BitReaderCore::ByteStreamProvider::~ByteStreamProvider() {
-}
+BitReaderCore::ByteStreamProvider::~ByteStreamProvider() = default;
 
 BitReaderCore::BitReaderCore(ByteStreamProvider* byte_stream_provider)
     : byte_stream_provider_(byte_stream_provider),
@@ -29,8 +28,7 @@
       reg_next_(0) {
 }
 
-BitReaderCore::~BitReaderCore() {
-}
+BitReaderCore::~BitReaderCore() = default;
 
 bool BitReaderCore::ReadFlag(bool* flag) {
   if (nbits_ == 0 && !Refill(1))
diff --git a/vda/bit_reader_core.h b/vda/bit_reader_core.h
index 9e73018..62a21e2 100644
--- a/vda/bit_reader_core.h
+++ b/vda/bit_reader_core.h
@@ -1,6 +1,7 @@
 // Copyright 2014 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 1323b9c
 
 #ifndef BIT_READER_CORE_H_
 #define BIT_READER_CORE_H_
diff --git a/vda/bitstream_buffer.cc b/vda/bitstream_buffer.cc
index 4f71755..36b8d06 100644
--- a/vda/bitstream_buffer.cc
+++ b/vda/bitstream_buffer.cc
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
 
 #include "bitstream_buffer.h"
 
@@ -22,6 +23,6 @@
 
 BitstreamBuffer::BitstreamBuffer(const BitstreamBuffer& other) = default;
 
-BitstreamBuffer::~BitstreamBuffer() {}
+BitstreamBuffer::~BitstreamBuffer() = default;
 
 }  // namespace media
diff --git a/vda/bitstream_buffer.h b/vda/bitstream_buffer.h
index 88555a2..3a267a0 100644
--- a/vda/bitstream_buffer.h
+++ b/vda/bitstream_buffer.h
@@ -1,6 +1,7 @@
 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 39a7f93
 
 #ifndef MEDIA_BASE_BITSTREAM_BUFFER_H_
 #define MEDIA_BASE_BITSTREAM_BUFFER_H_
diff --git a/vda/h264_bit_reader.cc b/vda/h264_bit_reader.cc
index 7c536b3..6713655 100644
--- a/vda/h264_bit_reader.cc
+++ b/vda/h264_bit_reader.cc
@@ -1,6 +1,7 @@
 // Copyright 2014 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
 
 #include "base/logging.h"
 #include "h264_bit_reader.h"
@@ -15,7 +16,7 @@
       prev_two_bytes_(0),
       emulation_prevention_bytes_(0) {}
 
-H264BitReader::~H264BitReader() {}
+H264BitReader::~H264BitReader() = default;
 
 bool H264BitReader::Initialize(const uint8_t* data, off_t size) {
   DCHECK(data);
diff --git a/vda/h264_bit_reader.h b/vda/h264_bit_reader.h
index 156b524..aa162ce 100644
--- a/vda/h264_bit_reader.h
+++ b/vda/h264_bit_reader.h
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 //
 // This file contains an implementation of an H264 Annex-B video stream parser.
+// Note: ported from Chromium commit head: 77be7ae
 
 #ifndef H264_BIT_READER_H_
 #define H264_BIT_READER_H_
diff --git a/vda/h264_decoder.cc b/vda/h264_decoder.cc
index 3964059..abaaac5 100644
--- a/vda/h264_decoder.cc
+++ b/vda/h264_decoder.cc
@@ -1,6 +1,7 @@
 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: c3bd64c
 
 #include <algorithm>
 #include <limits>
@@ -16,22 +17,22 @@
 
 namespace media {
 
-H264Decoder::H264Accelerator::H264Accelerator() {}
+H264Decoder::H264Accelerator::H264Accelerator() = default;
 
-H264Decoder::H264Accelerator::~H264Accelerator() {}
+H264Decoder::H264Accelerator::~H264Accelerator() = default;
 
 H264Decoder::H264Decoder(H264Accelerator* accelerator)
-    : max_frame_num_(0),
+    : state_(kNeedStreamMetadata),
+      max_frame_num_(0),
       max_pic_num_(0),
       max_long_term_frame_idx_(0),
       max_num_reorder_frames_(0),
       accelerator_(accelerator) {
   DCHECK(accelerator_);
   Reset();
-  state_ = kNeedStreamMetadata;
 }
 
-H264Decoder::~H264Decoder() {}
+H264Decoder::~H264Decoder() = default;
 
 void H264Decoder::Reset() {
   curr_pic_ = nullptr;
@@ -177,6 +178,8 @@
            sizeof(curr_pic_->ref_pic_marking));
   }
 
+  curr_pic_->visible_rect = visible_rect_;
+
   return true;
 }
 
@@ -1107,10 +1110,24 @@
   if (max_dpb_mbs == 0)
     return false;
 
-  size_t max_dpb_size = std::min(max_dpb_mbs / (width_mb * height_mb),
-                                 static_cast<int>(H264DPB::kDPBMaxSize));
-  if (max_dpb_size == 0) {
-    DVLOG(1) << "Invalid DPB Size";
+  // MaxDpbFrames from level limits per spec.
+  size_t max_dpb_frames = std::min(max_dpb_mbs / (width_mb * height_mb),
+                                   static_cast<int>(H264DPB::kDPBMaxSize));
+  DVLOG(1) << "MaxDpbFrames: " << max_dpb_frames
+           << ", max_num_ref_frames: " << sps->max_num_ref_frames
+           << ", max_dec_frame_buffering: " << sps->max_dec_frame_buffering;
+
+  // Set DPB size to at least the level limit, or what the stream requires.
+  size_t max_dpb_size =
+      std::max(static_cast<int>(max_dpb_frames),
+               std::max(sps->max_num_ref_frames, sps->max_dec_frame_buffering));
+  // Some non-conforming streams specify more frames are needed than the current
+  // level limit. Allow this, but only up to the maximum number of reference
+  // frames allowed per spec.
+  DVLOG_IF(1, max_dpb_size > max_dpb_frames)
+      << "Invalid stream, DPB size > MaxDpbFrames";
+  if (max_dpb_size == 0 || max_dpb_size > H264DPB::kDPBMaxSize) {
+    DVLOG(1) << "Invalid DPB size: " << max_dpb_size;
     return false;
   }
 
@@ -1124,6 +1141,12 @@
     dpb_.set_max_num_pics(max_dpb_size);
   }
 
+  Rect new_visible_rect = sps->GetVisibleRect().value_or(Rect());
+  if (visible_rect_ != new_visible_rect) {
+    DVLOG(2) << "New visible rect: " << new_visible_rect.ToString();
+    visible_rect_ = new_visible_rect;
+  }
+
   if (!UpdateMaxNumReorderFrames(sps))
     return false;
   DVLOG(1) << "max_num_reorder_frames: " << max_num_reorder_frames_;
@@ -1320,7 +1343,7 @@
         if (state_ != kDecoding)
           break;
 
-      // else fallthrough
+        // else fallthrough
       case H264NALU::kIDRSlice: {
         // TODO(posciak): the IDR may require an SPS that we don't have
         // available. For now we'd fail if that happens, but ideally we'd like
diff --git a/vda/h264_decoder.h b/vda/h264_decoder.h
index 27a4c10..82ab98f 100644
--- a/vda/h264_decoder.h
+++ b/vda/h264_decoder.h
@@ -1,6 +1,7 @@
 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 77be7ae
 
 #ifndef H264_DECODER_H_
 #define H264_DECODER_H_
@@ -16,6 +17,7 @@
 #include "accelerated_video_decoder.h"
 #include "h264_dpb.h"
 #include "h264_parser.h"
+#include "rect.h"
 #include "size.h"
 
 namespace media {
@@ -266,6 +268,8 @@
 
   // Output picture size.
   Size pic_size_;
+  // Output visible cropping rect.
+  Rect visible_rect_;
 
   // PicOrderCount of the previously outputted frame.
   int last_output_poc_;
diff --git a/vda/h264_dpb.cc b/vda/h264_dpb.cc
index 0e1b411..af0b5e0 100644
--- a/vda/h264_dpb.cc
+++ b/vda/h264_dpb.cc
@@ -1,6 +1,7 @@
 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
 
 #include <string.h>
 
@@ -44,14 +45,14 @@
   memset(&ref_pic_marking, 0, sizeof(ref_pic_marking));
 }
 
-H264Picture::~H264Picture() {}
+H264Picture::~H264Picture() = default;
 
 V4L2H264Picture* H264Picture::AsV4L2H264Picture() {
   return nullptr;
 }
 
 H264DPB::H264DPB() : max_num_pics_(0) {}
-H264DPB::~H264DPB() {}
+H264DPB::~H264DPB() = default;
 
 void H264DPB::Clear() {
   pics_.clear();
diff --git a/vda/h264_dpb.h b/vda/h264_dpb.h
index 6be9f21..3da284e 100644
--- a/vda/h264_dpb.h
+++ b/vda/h264_dpb.h
@@ -4,6 +4,7 @@
 //
 // This file contains an implementation of an H.264 Decoded Picture Buffer
 // used in H264 decoders.
+// Note: ported from Chromium commit head: 70340ce
 
 #ifndef H264_DPB_H_
 #define H264_DPB_H_
@@ -15,6 +16,7 @@
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
 #include "h264_parser.h"
+#include "rect.h"
 
 namespace media {
 
@@ -22,7 +24,7 @@
 
 // A picture (a frame or a field) in the H.264 spec sense.
 // See spec at http://www.itu.int/rec/T-REC-H.264
-class H264Picture : public base::RefCounted<H264Picture> {
+class H264Picture : public base::RefCountedThreadSafe<H264Picture> {
  public:
   using Vector = std::vector<scoped_refptr<H264Picture>>;
 
@@ -82,8 +84,12 @@
   // Position in DPB (i.e. index in DPB).
   int dpb_position;
 
+  // The visible size of picture. This could be either parsed from SPS, or set
+  // to Rect(0, 0) for indicating invalid values or not available.
+  Rect visible_rect;
+
  protected:
-  friend class base::RefCounted<H264Picture>;
+  friend class base::RefCountedThreadSafe<H264Picture>;
   virtual ~H264Picture();
 
  private:
diff --git a/vda/h264_parser.cc b/vda/h264_parser.cc
index 0f37924..94b1e10 100644
--- a/vda/h264_parser.cc
+++ b/vda/h264_parser.cc
@@ -1,8 +1,10 @@
 // Copyright 2014 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
 
 #include "h264_parser.h"
+#include "subsample_entry.h"
 
 #include <limits>
 #include <memory>
@@ -64,6 +66,60 @@
               map_unit * (pic_height_in_map_units_minus1 + 1));
 }
 
+// Also based on section 7.4.2.1.1.
+base::Optional<Rect> H264SPS::GetVisibleRect() const {
+  base::Optional<Size> coded_size = GetCodedSize();
+  if (!coded_size)
+    return base::nullopt;
+
+  if (!frame_cropping_flag)
+    return Rect(coded_size.value());
+
+  int crop_unit_x;
+  int crop_unit_y;
+  if (chroma_array_type == 0) {
+    crop_unit_x = 1;
+    crop_unit_y = frame_mbs_only_flag ? 1 : 2;
+  } else {
+    // Section 6.2.
+    // |chroma_format_idc| may be:
+    //   1 => 4:2:0
+    //   2 => 4:2:2
+    //   3 => 4:4:4
+    // Everything else has |chroma_array_type| == 0.
+    int sub_width_c = chroma_format_idc > 2 ? 1 : 2;
+    int sub_height_c = chroma_format_idc > 1 ? 1 : 2;
+    crop_unit_x = sub_width_c;
+    crop_unit_y = sub_height_c * (frame_mbs_only_flag ? 1 : 2);
+  }
+
+  // Verify that the values are not too large before multiplying.
+  if (coded_size->width() / crop_unit_x < frame_crop_left_offset ||
+      coded_size->width() / crop_unit_x < frame_crop_right_offset ||
+      coded_size->height() / crop_unit_y < frame_crop_top_offset ||
+      coded_size->height() / crop_unit_y < frame_crop_bottom_offset) {
+    DVLOG(1) << "Frame cropping exceeds coded size.";
+    return base::nullopt;
+  }
+  int crop_left = crop_unit_x * frame_crop_left_offset;
+  int crop_right = crop_unit_x * frame_crop_right_offset;
+  int crop_top = crop_unit_y * frame_crop_top_offset;
+  int crop_bottom = crop_unit_y * frame_crop_bottom_offset;
+
+  // Verify that the values are sane. Note that some decoders also require that
+  // crops are smaller than a macroblock and/or that crops must be adjacent to
+  // at least one corner of the coded frame.
+  if (coded_size->width() - crop_left <= crop_right ||
+      coded_size->height() - crop_top <= crop_bottom) {
+    DVLOG(1) << "Frame cropping excludes entire frame.";
+    return base::nullopt;
+  }
+
+  return Rect(crop_left, crop_top,
+              coded_size->width() - crop_left - crop_right,
+              coded_size->height() - crop_top - crop_bottom);
+}
+
 H264PPS::H264PPS() {
   memset(this, 0, sizeof(*this));
 }
@@ -134,12 +190,10 @@
 
 // ISO 14496 part 10
 // VUI parameters: Table E-1 "Meaning of sample aspect ratio indicator"
-static const int kTableSarWidth[] = {
-  0, 1, 12, 10, 16, 40, 24, 20, 32, 80, 18, 15, 64, 160, 4, 3, 2
-};
-static const int kTableSarHeight[] = {
-  0, 1, 11, 11, 11, 33, 11, 11, 11, 33, 11, 11, 33, 99, 3, 2, 1
-};
+static const int kTableSarWidth[] = {0,  1,  12, 10, 16,  40, 24, 20, 32,
+                                     80, 18, 15, 64, 160, 4,  3,  2};
+static const int kTableSarHeight[] = {0,  1,  11, 11, 11, 33, 11, 11, 11,
+                                      33, 11, 11, 33, 99, 3,  2,  1};
 static_assert(arraysize(kTableSarWidth) == arraysize(kTableSarHeight),
               "sar tables must have the same size");
 
@@ -147,8 +201,7 @@
   Reset();
 }
 
-H264Parser::~H264Parser() {
-}
+H264Parser::~H264Parser() = default;
 
 void H264Parser::Reset() {
   stream_ = NULL;
@@ -217,6 +270,19 @@
   off_t bytes_left = data_size;
 
   while (bytes_left >= 3) {
+    // The start code is "\0\0\1", ones are more unusual than zeroes, so let's
+    // search for it first.
+    const uint8_t* tmp =
+        reinterpret_cast<const uint8_t*>(memchr(data + 2, 1, bytes_left - 2));
+    if (!tmp) {
+      data += bytes_left - 2;
+      bytes_left = 2;
+      break;
+    }
+    tmp -= 2;
+    bytes_left -= tmp - data;
+    data = tmp;
+
     if (IsStartCode(data)) {
       // Found three-byte start code, set pointer at its beginning.
       *offset = data_size - bytes_left;
@@ -251,8 +317,7 @@
   off_t nalu_start_off = 0;
   off_t annexb_start_code_size = 0;
 
-  if (!FindStartCodeInClearRanges(stream_, bytes_left_,
-                                  encrypted_ranges_,
+  if (!FindStartCodeInClearRanges(stream_, bytes_left_, encrypted_ranges_,
                                   &nalu_start_off, &annexb_start_code_size)) {
     DVLOG(4) << "Could not find start code, end of stream?";
     return false;
@@ -277,10 +342,9 @@
   // belong to the current NALU.
   off_t next_start_code_size = 0;
   off_t nalu_size_without_start_code = 0;
-  if (!FindStartCodeInClearRanges(nalu_data, max_nalu_data_size,
-                                  encrypted_ranges_,
-                                  &nalu_size_without_start_code,
-                                  &next_start_code_size)) {
+  if (!FindStartCodeInClearRanges(
+          nalu_data, max_nalu_data_size, encrypted_ranges_,
+          &nalu_size_without_start_code, &next_start_code_size)) {
     nalu_size_without_start_code = max_nalu_data_size;
   }
   *nalu_size = nalu_size_without_start_code + annexb_start_code_size;
@@ -288,6 +352,7 @@
   return true;
 }
 
+// static
 bool H264Parser::FindStartCodeInClearRanges(
     const uint8_t* data,
     off_t data_size,
@@ -325,6 +390,30 @@
   return true;
 }
 
+// static
+bool H264Parser::ParseNALUs(const uint8_t* stream,
+                            size_t stream_size,
+                            std::vector<H264NALU>* nalus) {
+  DCHECK(nalus);
+  H264Parser parser;
+  parser.SetStream(stream, stream_size);
+
+  while (true) {
+    H264NALU nalu;
+    const H264Parser::Result result = parser.AdvanceToNextNALU(&nalu);
+    if (result == H264Parser::kOk) {
+      nalus->push_back(nalu);
+    } else if (result == media::H264Parser::kEOStream) {
+      return true;
+    } else {
+      DLOG(ERROR) << "Unexpected H264 parser result";
+      return false;
+    }
+  }
+  NOTREACHED();
+  return false;
+}
+
 H264Parser::Result H264Parser::ReadUE(int* val) {
   int num_bits = -1;
   int bit;
@@ -381,6 +470,8 @@
   if (!LocateNALU(&nalu_size_with_start_code, &start_code_size)) {
     DVLOG(4) << "Could not find next NALU, bytes left in stream: "
              << bytes_left_;
+    stream_ = nullptr;
+    bytes_left_ = 0;
     return kEOStream;
   }
 
@@ -389,8 +480,11 @@
   DVLOG(4) << "NALU found: size=" << nalu_size_with_start_code;
 
   // Initialize bit reader at the start of found NALU.
-  if (!br_.Initialize(nalu->data, nalu->size))
+  if (!br_.Initialize(nalu->data, nalu->size)) {
+    stream_ = nullptr;
+    bytes_left_ = 0;
     return kEOStream;
+  }
 
   // Move parser state to after this NALU, so next time AdvanceToNextNALU
   // is called, we will effectively be skipping it;
@@ -417,22 +511,26 @@
 
 // Default scaling lists (per spec).
 static const int kDefault4x4Intra[kH264ScalingList4x4Length] = {
-    6, 13, 13, 20, 20, 20, 28, 28, 28, 28, 32, 32, 32, 37, 37, 42, };
+    6, 13, 13, 20, 20, 20, 28, 28, 28, 28, 32, 32, 32, 37, 37, 42,
+};
 
 static const int kDefault4x4Inter[kH264ScalingList4x4Length] = {
-    10, 14, 14, 20, 20, 20, 24, 24, 24, 24, 27, 27, 27, 30, 30, 34, };
+    10, 14, 14, 20, 20, 20, 24, 24, 24, 24, 27, 27, 27, 30, 30, 34,
+};
 
 static const int kDefault8x8Intra[kH264ScalingList8x8Length] = {
     6,  10, 10, 13, 11, 13, 16, 16, 16, 16, 18, 18, 18, 18, 18, 23,
     23, 23, 23, 23, 23, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27, 27,
     27, 27, 27, 27, 29, 29, 29, 29, 29, 29, 29, 31, 31, 31, 31, 31,
-    31, 33, 33, 33, 33, 33, 36, 36, 36, 36, 38, 38, 38, 40, 40, 42, };
+    31, 33, 33, 33, 33, 33, 36, 36, 36, 36, 38, 38, 38, 40, 40, 42,
+};
 
 static const int kDefault8x8Inter[kH264ScalingList8x8Length] = {
     9,  13, 13, 15, 13, 15, 17, 17, 17, 17, 19, 19, 19, 19, 19, 21,
     21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 24, 24, 24, 24,
     24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27, 27, 27,
-    27, 28, 28, 28, 28, 28, 30, 30, 30, 30, 32, 32, 32, 33, 33, 35, };
+    27, 28, 28, 28, 28, 28, 30, 30, 30, 30, 32, 32, 32, 33, 33, 35,
+};
 
 static inline void DefaultScalingList4x4(
     int i,
@@ -579,8 +677,7 @@
 
     if (seq_scaling_list_present_flag) {
       res = ParseScalingList(arraysize(sps->scaling_list4x4[i]),
-                             sps->scaling_list4x4[i],
-                             &use_default);
+                             sps->scaling_list4x4[i], &use_default);
       if (res != kOk)
         return res;
 
@@ -588,8 +685,8 @@
         DefaultScalingList4x4(i, sps->scaling_list4x4);
 
     } else {
-      FallbackScalingList4x4(
-          i, kDefault4x4Intra, kDefault4x4Inter, sps->scaling_list4x4);
+      FallbackScalingList4x4(i, kDefault4x4Intra, kDefault4x4Inter,
+                             sps->scaling_list4x4);
     }
   }
 
@@ -599,8 +696,7 @@
 
     if (seq_scaling_list_present_flag) {
       res = ParseScalingList(arraysize(sps->scaling_list8x8[i]),
-                             sps->scaling_list8x8[i],
-                             &use_default);
+                             sps->scaling_list8x8[i], &use_default);
       if (res != kOk)
         return res;
 
@@ -608,8 +704,8 @@
         DefaultScalingList8x8(i, sps->scaling_list8x8);
 
     } else {
-      FallbackScalingList8x8(
-          i, kDefault8x8Intra, kDefault8x8Inter, sps->scaling_list8x8);
+      FallbackScalingList8x8(i, kDefault8x8Intra, kDefault8x8Inter,
+                             sps->scaling_list8x8);
     }
   }
 
@@ -628,8 +724,7 @@
 
     if (pic_scaling_list_present_flag) {
       res = ParseScalingList(arraysize(pps->scaling_list4x4[i]),
-                             pps->scaling_list4x4[i],
-                             &use_default);
+                             pps->scaling_list4x4[i], &use_default);
       if (res != kOk)
         return res;
 
@@ -639,14 +734,12 @@
     } else {
       if (!sps.seq_scaling_matrix_present_flag) {
         // Table 7-2 fallback rule A in spec.
-        FallbackScalingList4x4(
-            i, kDefault4x4Intra, kDefault4x4Inter, pps->scaling_list4x4);
+        FallbackScalingList4x4(i, kDefault4x4Intra, kDefault4x4Inter,
+                               pps->scaling_list4x4);
       } else {
         // Table 7-2 fallback rule B in spec.
-        FallbackScalingList4x4(i,
-                               sps.scaling_list4x4[0],
-                               sps.scaling_list4x4[3],
-                               pps->scaling_list4x4);
+        FallbackScalingList4x4(i, sps.scaling_list4x4[0],
+                               sps.scaling_list4x4[3], pps->scaling_list4x4);
       }
     }
   }
@@ -657,8 +750,7 @@
 
       if (pic_scaling_list_present_flag) {
         res = ParseScalingList(arraysize(pps->scaling_list8x8[i]),
-                               pps->scaling_list8x8[i],
-                               &use_default);
+                               pps->scaling_list8x8[i], &use_default);
         if (res != kOk)
           return res;
 
@@ -668,14 +760,12 @@
       } else {
         if (!sps.seq_scaling_matrix_present_flag) {
           // Table 7-2 fallback rule A in spec.
-          FallbackScalingList8x8(
-              i, kDefault8x8Intra, kDefault8x8Inter, pps->scaling_list8x8);
+          FallbackScalingList8x8(i, kDefault8x8Intra, kDefault8x8Inter,
+                                 pps->scaling_list8x8);
         } else {
           // Table 7-2 fallback rule B in spec.
-          FallbackScalingList8x8(i,
-                                 sps.scaling_list8x8[0],
-                                 sps.scaling_list8x8[1],
-                                 pps->scaling_list8x8);
+          FallbackScalingList8x8(i, sps.scaling_list8x8[0],
+                                 sps.scaling_list8x8[1], pps->scaling_list8x8);
         }
       }
     }
@@ -697,8 +787,8 @@
   IN_RANGE_OR_RETURN(cpb_cnt_minus1, 0, 31);
   READ_BITS_OR_RETURN(8, &data);  // bit_rate_scale, cpb_size_scale
   for (int i = 0; i <= cpb_cnt_minus1; ++i) {
-    READ_UE_OR_RETURN(&data);  // bit_rate_value_minus1[i]
-    READ_UE_OR_RETURN(&data);  // cpb_size_value_minus1[i]
+    READ_UE_OR_RETURN(&data);    // bit_rate_value_minus1[i]
+    READ_UE_OR_RETURN(&data);    // cpb_size_value_minus1[i]
     READ_BOOL_OR_RETURN(&data);  // cbr_flag
   }
   READ_BITS_OR_RETURN(20, &data);  // cpb/dpb delays, etc.
@@ -755,7 +845,7 @@
     READ_BITS_OR_RETURN(16, &data);  // num_units_in_tick
     READ_BITS_OR_RETURN(16, &data);  // time_scale
     READ_BITS_OR_RETURN(16, &data);  // time_scale
-    READ_BOOL_OR_RETURN(&data);  // fixed_frame_rate_flag
+    READ_BOOL_OR_RETURN(&data);      // fixed_frame_rate_flag
   }
 
   // Read and ignore NAL HRD parameters, if present.
@@ -769,22 +859,22 @@
   if (res != kOk)
     return res;
 
-  if (hrd_parameters_present)  // One of NAL or VCL params present is enough.
+  if (hrd_parameters_present)    // One of NAL or VCL params present is enough.
     READ_BOOL_OR_RETURN(&data);  // low_delay_hrd_flag
 
   READ_BOOL_OR_RETURN(&data);  // pic_struct_present_flag
   READ_BOOL_OR_RETURN(&sps->bitstream_restriction_flag);
   if (sps->bitstream_restriction_flag) {
     READ_BOOL_OR_RETURN(&data);  // motion_vectors_over_pic_boundaries_flag
-    READ_UE_OR_RETURN(&data);  // max_bytes_per_pic_denom
-    READ_UE_OR_RETURN(&data);  // max_bits_per_mb_denom
-    READ_UE_OR_RETURN(&data);  // log2_max_mv_length_horizontal
-    READ_UE_OR_RETURN(&data);  // log2_max_mv_length_vertical
+    READ_UE_OR_RETURN(&data);    // max_bytes_per_pic_denom
+    READ_UE_OR_RETURN(&data);    // max_bits_per_mb_denom
+    READ_UE_OR_RETURN(&data);    // log2_max_mv_length_horizontal
+    READ_UE_OR_RETURN(&data);    // log2_max_mv_length_vertical
     READ_UE_OR_RETURN(&sps->max_num_reorder_frames);
     READ_UE_OR_RETURN(&sps->max_dec_frame_buffering);
     TRUE_OR_RETURN(sps->max_dec_frame_buffering >= sps->max_num_ref_frames);
-    IN_RANGE_OR_RETURN(
-        sps->max_num_reorder_frames, 0, sps->max_dec_frame_buffering);
+    IN_RANGE_OR_RETURN(sps->max_num_reorder_frames, 0,
+                       sps->max_dec_frame_buffering);
   }
 
   return kOk;
@@ -1072,7 +1162,6 @@
     int luma_log2_weight_denom,
     int chroma_log2_weight_denom,
     H264WeightingFactors* w_facts) {
-
   int def_luma_weight = 1 << luma_log2_weight_denom;
   int def_chroma_weight = 1 << chroma_log2_weight_denom;
 
@@ -1120,20 +1209,18 @@
     READ_UE_OR_RETURN(&shdr->chroma_log2_weight_denom);
   TRUE_OR_RETURN(shdr->chroma_log2_weight_denom < 8);
 
-  Result res = ParseWeightingFactors(shdr->num_ref_idx_l0_active_minus1,
-                                     sps.chroma_array_type,
-                                     shdr->luma_log2_weight_denom,
-                                     shdr->chroma_log2_weight_denom,
-                                     &shdr->pred_weight_table_l0);
+  Result res = ParseWeightingFactors(
+      shdr->num_ref_idx_l0_active_minus1, sps.chroma_array_type,
+      shdr->luma_log2_weight_denom, shdr->chroma_log2_weight_denom,
+      &shdr->pred_weight_table_l0);
   if (res != kOk)
     return res;
 
   if (shdr->IsBSlice()) {
-    res = ParseWeightingFactors(shdr->num_ref_idx_l1_active_minus1,
-                                sps.chroma_array_type,
-                                shdr->luma_log2_weight_denom,
-                                shdr->chroma_log2_weight_denom,
-                                &shdr->pred_weight_table_l1);
+    res = ParseWeightingFactors(
+        shdr->num_ref_idx_l1_active_minus1, sps.chroma_array_type,
+        shdr->luma_log2_weight_denom, shdr->chroma_log2_weight_denom,
+        &shdr->pred_weight_table_l1);
     if (res != kOk)
       return res;
   }
diff --git a/vda/h264_parser.h b/vda/h264_parser.h
index fdd3f77..a29685a 100644
--- a/vda/h264_parser.h
+++ b/vda/h264_parser.h
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 //
 // This file contains an implementation of an H264 Annex-B video stream parser.
+// Note: ported from Chromium commit head: 0a918e9
 
 #ifndef H264_PARSER_H_
 #define H264_PARSER_H_
@@ -19,6 +20,7 @@
 #include "base/optional.h"
 #include "h264_bit_reader.h"
 #include "ranges.h"
+#include "rect.h"
 #include "size.h"
 #include "subsample_entry.h"
 
@@ -142,8 +144,8 @@
   int frame_crop_bottom_offset;
 
   bool vui_parameters_present_flag;
-  int sar_width;    // Set to 0 when not specified.
-  int sar_height;   // Set to 0 when not specified.
+  int sar_width;   // Set to 0 when not specified.
+  int sar_height;  // Set to 0 when not specified.
   bool bitstream_restriction_flag;
   int max_num_reorder_frames;
   int max_dec_frame_buffering;
@@ -181,6 +183,7 @@
   // base::nullopt if they encounter integer overflow. They do not verify that
   // the results are in-spec for the given profile or level.
   base::Optional<Size> GetCodedSize() const;
+  base::Optional<Rect> GetVisibleRect() const;
 };
 
 struct H264PPS {
@@ -239,10 +242,7 @@
 struct H264SliceHeader {
   H264SliceHeader();
 
-  enum {
-    kRefListSize = 32,
-    kRefListModSize = kRefListSize
-  };
+  enum { kRefListSize = 32, kRefListModSize = kRefListSize };
 
   enum Type {
     kPSlice = 0,
@@ -258,11 +258,11 @@
   bool IsSPSlice() const;
   bool IsSISlice() const;
 
-  bool idr_pic_flag;       // from NAL header
-  int nal_ref_idc;         // from NAL header
+  bool idr_pic_flag;         // from NAL header
+  int nal_ref_idc;           // from NAL header
   const uint8_t* nalu_data;  // from NAL header
-  off_t nalu_size;         // from NAL header
-  off_t header_bit_size;   // calculated
+  off_t nalu_size;           // from NAL header
+  off_t header_bit_size;     // calculated
 
   int first_mb_in_slice;
   int slice_type;
@@ -377,6 +377,12 @@
                                          off_t* offset,
                                          off_t* start_code_size);
 
+  // Parses the input stream and returns all the NALUs through |nalus|. Returns
+  // false if the stream is invalid.
+  static bool ParseNALUs(const uint8_t* stream,
+                         size_t stream_size,
+                         std::vector<H264NALU>* nalus);
+
   H264Parser();
   ~H264Parser();
 
diff --git a/vda/native_pixmap_handle.cc b/vda/native_pixmap_handle.cc
new file mode 100644
index 0000000..050a683
--- /dev/null
+++ b/vda/native_pixmap_handle.cc
@@ -0,0 +1,29 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: a9d98e6
+
+#include "native_pixmap_handle.h"
+
+namespace media {
+
+NativePixmapPlane::NativePixmapPlane()
+    : stride(0), offset(0), size(0), modifier(0) {}
+
+NativePixmapPlane::NativePixmapPlane(int stride,
+                                     int offset,
+                                     uint64_t size,
+                                     uint64_t modifier)
+    : stride(stride), offset(offset), size(size), modifier(modifier) {}
+
+NativePixmapPlane::NativePixmapPlane(const NativePixmapPlane& other) = default;
+
+NativePixmapPlane::~NativePixmapPlane() {}
+
+NativePixmapHandle::NativePixmapHandle() {}
+NativePixmapHandle::NativePixmapHandle(const NativePixmapHandle& other) =
+    default;
+
+NativePixmapHandle::~NativePixmapHandle() {}
+
+}  // namespace media
diff --git a/vda/native_pixmap_handle.h b/vda/native_pixmap_handle.h
new file mode 100644
index 0000000..62e2294
--- /dev/null
+++ b/vda/native_pixmap_handle.h
@@ -0,0 +1,57 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: a9d98e6
+
+#ifndef NATIVE_PIXMAP_HANDLE_H_
+#define NATIVE_PIXMAP_HANDLE_H_
+
+#include <vector>
+
+#include "base/file_descriptor_posix.h"
+
+namespace media {
+
+// NativePixmapPlane is used to carry the plane related information for GBM
+// buffer. More fields can be added if they are plane specific.
+struct NativePixmapPlane {
+  // This is the same value as DRM_FORMAT_MOD_INVALID, which is not a valid
+  // modifier. We use this to indicate that layout information
+  // (tiling/compression) if any will be communicated out of band.
+  static constexpr uint64_t kNoModifier = 0x00ffffffffffffffULL;
+
+  NativePixmapPlane();
+  NativePixmapPlane(int stride,
+                    int offset,
+                    uint64_t size,
+                    uint64_t modifier = kNoModifier);
+  NativePixmapPlane(const NativePixmapPlane& other);
+  ~NativePixmapPlane();
+
+  // The strides and offsets in bytes to be used when accessing the buffers via
+  // a memory mapping. One per plane per entry.
+  int stride;
+  int offset;
+  // Size in bytes of the plane.
+  // This is necessary to map the buffers.
+  uint64_t size;
+  // The modifier is retrieved from GBM library and passed to EGL driver.
+  // Generally it's platform specific, and we don't need to modify it in
+  // Chromium code. Also one per plane per entry.
+  uint64_t modifier;
+};
+
+struct NativePixmapHandle {
+  NativePixmapHandle();
+  NativePixmapHandle(const NativePixmapHandle& other);
+
+  ~NativePixmapHandle();
+
+  // File descriptors for the underlying memory objects (usually dmabufs).
+  std::vector<base::FileDescriptor> fds;
+  std::vector<NativePixmapPlane> planes;
+};
+
+}  // namespace media
+
+#endif  // NATIVE_PIXMAP_HANDLE_H_
diff --git a/vda/picture.cc b/vda/picture.cc
index a086725..8933bc5 100644
--- a/vda/picture.cc
+++ b/vda/picture.cc
@@ -1,6 +1,7 @@
 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
 
 #include "picture.h"
 
@@ -18,7 +19,7 @@
 
 PictureBuffer::PictureBuffer(const PictureBuffer& other) = default;
 
-PictureBuffer::~PictureBuffer() {}
+PictureBuffer::~PictureBuffer() = default;
 
 Picture::Picture(int32_t picture_buffer_id,
                  int32_t bitstream_buffer_id,
diff --git a/vda/picture.h b/vda/picture.h
index 3dbf0e9..e07b677 100644
--- a/vda/picture.h
+++ b/vda/picture.h
@@ -1,6 +1,7 @@
 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: d264e47
 
 #ifndef PICTURE_H_
 #define PICTURE_H_
diff --git a/vda/ranges.cc b/vda/ranges.cc
index 00400b5..4394011 100644
--- a/vda/ranges.cc
+++ b/vda/ranges.cc
@@ -1,6 +1,7 @@
 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: a4f94d3
 
 #include "ranges.h"
 
diff --git a/vda/ranges.h b/vda/ranges.h
index 98b32ce..6a76ae4 100644
--- a/vda/ranges.h
+++ b/vda/ranges.h
@@ -1,6 +1,7 @@
 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 1323b9c
 
 #ifndef RANGES_H_
 #define RANGES_H_
diff --git a/vda/rect.h b/vda/rect.h
index d9640b2..b23e19d 100644
--- a/vda/rect.h
+++ b/vda/rect.h
@@ -1,6 +1,8 @@
 // Copyright 2017 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 0e161fe
+// Note: only necessary functions are ported from gfx::Rect
 
 // Defines a simple integer rectangle class.  The containment semantics
 // are array-like; that is, the coordinate (x, y) is considered to be
diff --git a/vda/shared_memory_region.cc b/vda/shared_memory_region.cc
index ed56559..775a5f2 100644
--- a/vda/shared_memory_region.cc
+++ b/vda/shared_memory_region.cc
@@ -1,6 +1,7 @@
 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 60f9667
 
 #include "base/sys_info.h"
 #include "shared_memory_region.h"
diff --git a/vda/shared_memory_region.h b/vda/shared_memory_region.h
index ce9a322..3c5d4b3 100644
--- a/vda/shared_memory_region.h
+++ b/vda/shared_memory_region.h
@@ -1,6 +1,7 @@
 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 60f9667
 
 #ifndef SHARED_MEMORY_REGION_H_
 #define SHARED_MEMORY_REGION_H_
diff --git a/vda/size.h b/vda/size.h
index 4806ddc..c3e8c82 100644
--- a/vda/size.h
+++ b/vda/size.h
@@ -1,6 +1,8 @@
 // Copyright 2017 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: a8e9f71
+// Note: only necessary functions are ported from gfx::Size
 
 #ifndef SIZE_H_
 #define SIZE_H_
diff --git a/vda/subsample_entry.h b/vda/subsample_entry.h
index e7529fb..1e0bfad 100644
--- a/vda/subsample_entry.h
+++ b/vda/subsample_entry.h
@@ -1,6 +1,7 @@
 // Copyright 2016 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 7014d6d
 
 #ifndef SUBSAMPLE_ENTRY_H_
 #define SUBSAMPLE_ENTRY_H_
diff --git a/vda/v4l2-controls.h b/vda/v4l2-controls.h
deleted file mode 100644
index a8ccf58..0000000
--- a/vda/v4l2-controls.h
+++ /dev/null
@@ -1,1368 +0,0 @@
-/*
- *  Video for Linux Two controls header file
- *
- *  Copyright (C) 1999-2012 the contributors
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  Alternatively you can redistribute this file under the terms of the
- *  BSD license as stated below:
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- *  3. The names of its contributors may not be used to endorse or promote
- *     products derived from this software without specific prior written
- *     permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
- *  TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- *  PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  The contents of this header was split off from videodev2.h. All control
- *  definitions should be added to this header, which is included by
- *  videodev2.h.
- */
-
-// Note:
-// This header file is obtained from ChromeOS which is not upstreamed in Linux
-// mainline. This should be removed once it is upstreamed.
-// TODO(johnylin): remove this file once it is upstreamed.
-
-#ifndef __LINUX_V4L2_CONTROLS_H
-#define __LINUX_V4L2_CONTROLS_H
-
-#include <linux/types.h>
-
-/* Control classes */
-#define V4L2_CTRL_CLASS_USER		0x00980000	/* Old-style 'user' controls */
-#define V4L2_CTRL_CLASS_MPEG		0x00990000	/* MPEG-compression controls */
-#define V4L2_CTRL_CLASS_CAMERA		0x009a0000	/* Camera class controls */
-#define V4L2_CTRL_CLASS_FM_TX		0x009b0000	/* FM Modulator controls */
-#define V4L2_CTRL_CLASS_FLASH		0x009c0000	/* Camera flash controls */
-#define V4L2_CTRL_CLASS_JPEG		0x009d0000	/* JPEG-compression controls */
-#define V4L2_CTRL_CLASS_IMAGE_SOURCE	0x009e0000	/* Image source controls */
-#define V4L2_CTRL_CLASS_IMAGE_PROC	0x009f0000	/* Image processing controls */
-#define V4L2_CTRL_CLASS_DV		0x00a00000	/* Digital Video controls */
-#define V4L2_CTRL_CLASS_FM_RX		0x00a10000	/* FM Receiver controls */
-#define V4L2_CTRL_CLASS_RF_TUNER	0x00a20000	/* RF tuner controls */
-#define V4L2_CTRL_CLASS_DETECT		0x00a30000	/* Detection controls */
-
-/* User-class control IDs */
-
-#define V4L2_CID_BASE			(V4L2_CTRL_CLASS_USER | 0x900)
-#define V4L2_CID_USER_BASE 		V4L2_CID_BASE
-#define V4L2_CID_USER_CLASS 		(V4L2_CTRL_CLASS_USER | 1)
-#define V4L2_CID_BRIGHTNESS		(V4L2_CID_BASE+0)
-#define V4L2_CID_CONTRAST		(V4L2_CID_BASE+1)
-#define V4L2_CID_SATURATION		(V4L2_CID_BASE+2)
-#define V4L2_CID_HUE			(V4L2_CID_BASE+3)
-#define V4L2_CID_AUDIO_VOLUME		(V4L2_CID_BASE+5)
-#define V4L2_CID_AUDIO_BALANCE		(V4L2_CID_BASE+6)
-#define V4L2_CID_AUDIO_BASS		(V4L2_CID_BASE+7)
-#define V4L2_CID_AUDIO_TREBLE		(V4L2_CID_BASE+8)
-#define V4L2_CID_AUDIO_MUTE		(V4L2_CID_BASE+9)
-#define V4L2_CID_AUDIO_LOUDNESS		(V4L2_CID_BASE+10)
-#define V4L2_CID_BLACK_LEVEL		(V4L2_CID_BASE+11) /* Deprecated */
-#define V4L2_CID_AUTO_WHITE_BALANCE	(V4L2_CID_BASE+12)
-#define V4L2_CID_DO_WHITE_BALANCE	(V4L2_CID_BASE+13)
-#define V4L2_CID_RED_BALANCE		(V4L2_CID_BASE+14)
-#define V4L2_CID_BLUE_BALANCE		(V4L2_CID_BASE+15)
-#define V4L2_CID_GAMMA			(V4L2_CID_BASE+16)
-#define V4L2_CID_WHITENESS		(V4L2_CID_GAMMA) /* Deprecated */
-#define V4L2_CID_EXPOSURE		(V4L2_CID_BASE+17)
-#define V4L2_CID_AUTOGAIN		(V4L2_CID_BASE+18)
-#define V4L2_CID_GAIN			(V4L2_CID_BASE+19)
-#define V4L2_CID_HFLIP			(V4L2_CID_BASE+20)
-#define V4L2_CID_VFLIP			(V4L2_CID_BASE+21)
-
-#define V4L2_CID_POWER_LINE_FREQUENCY	(V4L2_CID_BASE+24)
-enum v4l2_power_line_frequency {
-	V4L2_CID_POWER_LINE_FREQUENCY_DISABLED	= 0,
-	V4L2_CID_POWER_LINE_FREQUENCY_50HZ	= 1,
-	V4L2_CID_POWER_LINE_FREQUENCY_60HZ	= 2,
-	V4L2_CID_POWER_LINE_FREQUENCY_AUTO	= 3,
-};
-#define V4L2_CID_HUE_AUTO			(V4L2_CID_BASE+25)
-#define V4L2_CID_WHITE_BALANCE_TEMPERATURE	(V4L2_CID_BASE+26)
-#define V4L2_CID_SHARPNESS			(V4L2_CID_BASE+27)
-#define V4L2_CID_BACKLIGHT_COMPENSATION 	(V4L2_CID_BASE+28)
-#define V4L2_CID_CHROMA_AGC                     (V4L2_CID_BASE+29)
-#define V4L2_CID_COLOR_KILLER                   (V4L2_CID_BASE+30)
-#define V4L2_CID_COLORFX			(V4L2_CID_BASE+31)
-enum v4l2_colorfx {
-	V4L2_COLORFX_NONE			= 0,
-	V4L2_COLORFX_BW				= 1,
-	V4L2_COLORFX_SEPIA			= 2,
-	V4L2_COLORFX_NEGATIVE			= 3,
-	V4L2_COLORFX_EMBOSS			= 4,
-	V4L2_COLORFX_SKETCH			= 5,
-	V4L2_COLORFX_SKY_BLUE			= 6,
-	V4L2_COLORFX_GRASS_GREEN		= 7,
-	V4L2_COLORFX_SKIN_WHITEN		= 8,
-	V4L2_COLORFX_VIVID			= 9,
-	V4L2_COLORFX_AQUA			= 10,
-	V4L2_COLORFX_ART_FREEZE			= 11,
-	V4L2_COLORFX_SILHOUETTE			= 12,
-	V4L2_COLORFX_SOLARIZATION		= 13,
-	V4L2_COLORFX_ANTIQUE			= 14,
-	V4L2_COLORFX_SET_CBCR			= 15,
-};
-#define V4L2_CID_AUTOBRIGHTNESS			(V4L2_CID_BASE+32)
-#define V4L2_CID_BAND_STOP_FILTER		(V4L2_CID_BASE+33)
-
-#define V4L2_CID_ROTATE				(V4L2_CID_BASE+34)
-#define V4L2_CID_BG_COLOR			(V4L2_CID_BASE+35)
-
-#define V4L2_CID_CHROMA_GAIN                    (V4L2_CID_BASE+36)
-
-#define V4L2_CID_ILLUMINATORS_1			(V4L2_CID_BASE+37)
-#define V4L2_CID_ILLUMINATORS_2			(V4L2_CID_BASE+38)
-
-#define V4L2_CID_MIN_BUFFERS_FOR_CAPTURE	(V4L2_CID_BASE+39)
-#define V4L2_CID_MIN_BUFFERS_FOR_OUTPUT		(V4L2_CID_BASE+40)
-
-#define V4L2_CID_ALPHA_COMPONENT		(V4L2_CID_BASE+41)
-#define V4L2_CID_COLORFX_CBCR			(V4L2_CID_BASE+42)
-
-/* last CID + 1 */
-#define V4L2_CID_LASTP1                         (V4L2_CID_BASE+43)
-
-/* USER-class private control IDs */
-
-/* The base for the meye driver controls. See linux/meye.h for the list
- * of controls. We reserve 16 controls for this driver. */
-#define V4L2_CID_USER_MEYE_BASE			(V4L2_CID_USER_BASE + 0x1000)
-
-/* The base for the bttv driver controls.
- * We reserve 32 controls for this driver. */
-#define V4L2_CID_USER_BTTV_BASE			(V4L2_CID_USER_BASE + 0x1010)
-
-
-/* The base for the s2255 driver controls.
- * We reserve 16 controls for this driver. */
-#define V4L2_CID_USER_S2255_BASE		(V4L2_CID_USER_BASE + 0x1030)
-
-/* The base for the si476x driver controls. See include/media/si476x.h for the list
- * of controls. Total of 16 controls is reserved for this driver */
-#define V4L2_CID_USER_SI476X_BASE		(V4L2_CID_USER_BASE + 0x1040)
-
-/* The base for the TI VPE driver controls. Total of 16 controls is reserved for
- * this driver */
-#define V4L2_CID_USER_TI_VPE_BASE		(V4L2_CID_USER_BASE + 0x1050)
-
-/* The base for the saa7134 driver controls.
- * We reserve 16 controls for this driver. */
-#define V4L2_CID_USER_SAA7134_BASE		(V4L2_CID_USER_BASE + 0x1060)
-
-/* The base for the adv7180 driver controls.
- * We reserve 16 controls for this driver. */
-#define V4L2_CID_USER_ADV7180_BASE		(V4L2_CID_USER_BASE + 0x1070)
-
-/* The base for the tc358743 driver controls.
- * We reserve 16 controls for this driver. */
-#define V4L2_CID_USER_TC358743_BASE		(V4L2_CID_USER_BASE + 0x1080)
-
-/* MPEG-class control IDs */
-/* The MPEG controls are applicable to all codec controls
- * and the 'MPEG' part of the define is historical */
-
-#define V4L2_CID_MPEG_BASE 			(V4L2_CTRL_CLASS_MPEG | 0x900)
-#define V4L2_CID_MPEG_CLASS 			(V4L2_CTRL_CLASS_MPEG | 1)
-
-/*  MPEG streams, specific to multiplexed streams */
-#define V4L2_CID_MPEG_STREAM_TYPE 		(V4L2_CID_MPEG_BASE+0)
-enum v4l2_mpeg_stream_type {
-	V4L2_MPEG_STREAM_TYPE_MPEG2_PS   = 0, /* MPEG-2 program stream */
-	V4L2_MPEG_STREAM_TYPE_MPEG2_TS   = 1, /* MPEG-2 transport stream */
-	V4L2_MPEG_STREAM_TYPE_MPEG1_SS   = 2, /* MPEG-1 system stream */
-	V4L2_MPEG_STREAM_TYPE_MPEG2_DVD  = 3, /* MPEG-2 DVD-compatible stream */
-	V4L2_MPEG_STREAM_TYPE_MPEG1_VCD  = 4, /* MPEG-1 VCD-compatible stream */
-	V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD = 5, /* MPEG-2 SVCD-compatible stream */
-};
-#define V4L2_CID_MPEG_STREAM_PID_PMT 		(V4L2_CID_MPEG_BASE+1)
-#define V4L2_CID_MPEG_STREAM_PID_AUDIO 		(V4L2_CID_MPEG_BASE+2)
-#define V4L2_CID_MPEG_STREAM_PID_VIDEO 		(V4L2_CID_MPEG_BASE+3)
-#define V4L2_CID_MPEG_STREAM_PID_PCR 		(V4L2_CID_MPEG_BASE+4)
-#define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO 	(V4L2_CID_MPEG_BASE+5)
-#define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO 	(V4L2_CID_MPEG_BASE+6)
-#define V4L2_CID_MPEG_STREAM_VBI_FMT 		(V4L2_CID_MPEG_BASE+7)
-enum v4l2_mpeg_stream_vbi_fmt {
-	V4L2_MPEG_STREAM_VBI_FMT_NONE = 0,  /* No VBI in the MPEG stream */
-	V4L2_MPEG_STREAM_VBI_FMT_IVTV = 1,  /* VBI in private packets, IVTV format */
-};
-
-/*  MPEG audio controls specific to multiplexed streams  */
-#define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ 	(V4L2_CID_MPEG_BASE+100)
-enum v4l2_mpeg_audio_sampling_freq {
-	V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100 = 0,
-	V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000 = 1,
-	V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000 = 2,
-};
-#define V4L2_CID_MPEG_AUDIO_ENCODING 		(V4L2_CID_MPEG_BASE+101)
-enum v4l2_mpeg_audio_encoding {
-	V4L2_MPEG_AUDIO_ENCODING_LAYER_1 = 0,
-	V4L2_MPEG_AUDIO_ENCODING_LAYER_2 = 1,
-	V4L2_MPEG_AUDIO_ENCODING_LAYER_3 = 2,
-	V4L2_MPEG_AUDIO_ENCODING_AAC     = 3,
-	V4L2_MPEG_AUDIO_ENCODING_AC3     = 4,
-};
-#define V4L2_CID_MPEG_AUDIO_L1_BITRATE 		(V4L2_CID_MPEG_BASE+102)
-enum v4l2_mpeg_audio_l1_bitrate {
-	V4L2_MPEG_AUDIO_L1_BITRATE_32K  = 0,
-	V4L2_MPEG_AUDIO_L1_BITRATE_64K  = 1,
-	V4L2_MPEG_AUDIO_L1_BITRATE_96K  = 2,
-	V4L2_MPEG_AUDIO_L1_BITRATE_128K = 3,
-	V4L2_MPEG_AUDIO_L1_BITRATE_160K = 4,
-	V4L2_MPEG_AUDIO_L1_BITRATE_192K = 5,
-	V4L2_MPEG_AUDIO_L1_BITRATE_224K = 6,
-	V4L2_MPEG_AUDIO_L1_BITRATE_256K = 7,
-	V4L2_MPEG_AUDIO_L1_BITRATE_288K = 8,
-	V4L2_MPEG_AUDIO_L1_BITRATE_320K = 9,
-	V4L2_MPEG_AUDIO_L1_BITRATE_352K = 10,
-	V4L2_MPEG_AUDIO_L1_BITRATE_384K = 11,
-	V4L2_MPEG_AUDIO_L1_BITRATE_416K = 12,
-	V4L2_MPEG_AUDIO_L1_BITRATE_448K = 13,
-};
-#define V4L2_CID_MPEG_AUDIO_L2_BITRATE 		(V4L2_CID_MPEG_BASE+103)
-enum v4l2_mpeg_audio_l2_bitrate {
-	V4L2_MPEG_AUDIO_L2_BITRATE_32K  = 0,
-	V4L2_MPEG_AUDIO_L2_BITRATE_48K  = 1,
-	V4L2_MPEG_AUDIO_L2_BITRATE_56K  = 2,
-	V4L2_MPEG_AUDIO_L2_BITRATE_64K  = 3,
-	V4L2_MPEG_AUDIO_L2_BITRATE_80K  = 4,
-	V4L2_MPEG_AUDIO_L2_BITRATE_96K  = 5,
-	V4L2_MPEG_AUDIO_L2_BITRATE_112K = 6,
-	V4L2_MPEG_AUDIO_L2_BITRATE_128K = 7,
-	V4L2_MPEG_AUDIO_L2_BITRATE_160K = 8,
-	V4L2_MPEG_AUDIO_L2_BITRATE_192K = 9,
-	V4L2_MPEG_AUDIO_L2_BITRATE_224K = 10,
-	V4L2_MPEG_AUDIO_L2_BITRATE_256K = 11,
-	V4L2_MPEG_AUDIO_L2_BITRATE_320K = 12,
-	V4L2_MPEG_AUDIO_L2_BITRATE_384K = 13,
-};
-#define V4L2_CID_MPEG_AUDIO_L3_BITRATE 		(V4L2_CID_MPEG_BASE+104)
-enum v4l2_mpeg_audio_l3_bitrate {
-	V4L2_MPEG_AUDIO_L3_BITRATE_32K  = 0,
-	V4L2_MPEG_AUDIO_L3_BITRATE_40K  = 1,
-	V4L2_MPEG_AUDIO_L3_BITRATE_48K  = 2,
-	V4L2_MPEG_AUDIO_L3_BITRATE_56K  = 3,
-	V4L2_MPEG_AUDIO_L3_BITRATE_64K  = 4,
-	V4L2_MPEG_AUDIO_L3_BITRATE_80K  = 5,
-	V4L2_MPEG_AUDIO_L3_BITRATE_96K  = 6,
-	V4L2_MPEG_AUDIO_L3_BITRATE_112K = 7,
-	V4L2_MPEG_AUDIO_L3_BITRATE_128K = 8,
-	V4L2_MPEG_AUDIO_L3_BITRATE_160K = 9,
-	V4L2_MPEG_AUDIO_L3_BITRATE_192K = 10,
-	V4L2_MPEG_AUDIO_L3_BITRATE_224K = 11,
-	V4L2_MPEG_AUDIO_L3_BITRATE_256K = 12,
-	V4L2_MPEG_AUDIO_L3_BITRATE_320K = 13,
-};
-#define V4L2_CID_MPEG_AUDIO_MODE 		(V4L2_CID_MPEG_BASE+105)
-enum v4l2_mpeg_audio_mode {
-	V4L2_MPEG_AUDIO_MODE_STEREO       = 0,
-	V4L2_MPEG_AUDIO_MODE_JOINT_STEREO = 1,
-	V4L2_MPEG_AUDIO_MODE_DUAL         = 2,
-	V4L2_MPEG_AUDIO_MODE_MONO         = 3,
-};
-#define V4L2_CID_MPEG_AUDIO_MODE_EXTENSION 	(V4L2_CID_MPEG_BASE+106)
-enum v4l2_mpeg_audio_mode_extension {
-	V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4  = 0,
-	V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8  = 1,
-	V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12 = 2,
-	V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16 = 3,
-};
-#define V4L2_CID_MPEG_AUDIO_EMPHASIS 		(V4L2_CID_MPEG_BASE+107)
-enum v4l2_mpeg_audio_emphasis {
-	V4L2_MPEG_AUDIO_EMPHASIS_NONE         = 0,
-	V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS = 1,
-	V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17    = 2,
-};
-#define V4L2_CID_MPEG_AUDIO_CRC 		(V4L2_CID_MPEG_BASE+108)
-enum v4l2_mpeg_audio_crc {
-	V4L2_MPEG_AUDIO_CRC_NONE  = 0,
-	V4L2_MPEG_AUDIO_CRC_CRC16 = 1,
-};
-#define V4L2_CID_MPEG_AUDIO_MUTE 		(V4L2_CID_MPEG_BASE+109)
-#define V4L2_CID_MPEG_AUDIO_AAC_BITRATE		(V4L2_CID_MPEG_BASE+110)
-#define V4L2_CID_MPEG_AUDIO_AC3_BITRATE		(V4L2_CID_MPEG_BASE+111)
-enum v4l2_mpeg_audio_ac3_bitrate {
-	V4L2_MPEG_AUDIO_AC3_BITRATE_32K  = 0,
-	V4L2_MPEG_AUDIO_AC3_BITRATE_40K  = 1,
-	V4L2_MPEG_AUDIO_AC3_BITRATE_48K  = 2,
-	V4L2_MPEG_AUDIO_AC3_BITRATE_56K  = 3,
-	V4L2_MPEG_AUDIO_AC3_BITRATE_64K  = 4,
-	V4L2_MPEG_AUDIO_AC3_BITRATE_80K  = 5,
-	V4L2_MPEG_AUDIO_AC3_BITRATE_96K  = 6,
-	V4L2_MPEG_AUDIO_AC3_BITRATE_112K = 7,
-	V4L2_MPEG_AUDIO_AC3_BITRATE_128K = 8,
-	V4L2_MPEG_AUDIO_AC3_BITRATE_160K = 9,
-	V4L2_MPEG_AUDIO_AC3_BITRATE_192K = 10,
-	V4L2_MPEG_AUDIO_AC3_BITRATE_224K = 11,
-	V4L2_MPEG_AUDIO_AC3_BITRATE_256K = 12,
-	V4L2_MPEG_AUDIO_AC3_BITRATE_320K = 13,
-	V4L2_MPEG_AUDIO_AC3_BITRATE_384K = 14,
-	V4L2_MPEG_AUDIO_AC3_BITRATE_448K = 15,
-	V4L2_MPEG_AUDIO_AC3_BITRATE_512K = 16,
-	V4L2_MPEG_AUDIO_AC3_BITRATE_576K = 17,
-	V4L2_MPEG_AUDIO_AC3_BITRATE_640K = 18,
-};
-#define V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK	(V4L2_CID_MPEG_BASE+112)
-enum v4l2_mpeg_audio_dec_playback {
-	V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO	    = 0,
-	V4L2_MPEG_AUDIO_DEC_PLAYBACK_STEREO	    = 1,
-	V4L2_MPEG_AUDIO_DEC_PLAYBACK_LEFT	    = 2,
-	V4L2_MPEG_AUDIO_DEC_PLAYBACK_RIGHT	    = 3,
-	V4L2_MPEG_AUDIO_DEC_PLAYBACK_MONO	    = 4,
-	V4L2_MPEG_AUDIO_DEC_PLAYBACK_SWAPPED_STEREO = 5,
-};
-#define V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK (V4L2_CID_MPEG_BASE+113)
-
-/*  MPEG video controls specific to multiplexed streams */
-#define V4L2_CID_MPEG_VIDEO_ENCODING 		(V4L2_CID_MPEG_BASE+200)
-enum v4l2_mpeg_video_encoding {
-	V4L2_MPEG_VIDEO_ENCODING_MPEG_1     = 0,
-	V4L2_MPEG_VIDEO_ENCODING_MPEG_2     = 1,
-	V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC = 2,
-};
-#define V4L2_CID_MPEG_VIDEO_ASPECT 		(V4L2_CID_MPEG_BASE+201)
-enum v4l2_mpeg_video_aspect {
-	V4L2_MPEG_VIDEO_ASPECT_1x1     = 0,
-	V4L2_MPEG_VIDEO_ASPECT_4x3     = 1,
-	V4L2_MPEG_VIDEO_ASPECT_16x9    = 2,
-	V4L2_MPEG_VIDEO_ASPECT_221x100 = 3,
-};
-#define V4L2_CID_MPEG_VIDEO_B_FRAMES 		(V4L2_CID_MPEG_BASE+202)
-#define V4L2_CID_MPEG_VIDEO_GOP_SIZE 		(V4L2_CID_MPEG_BASE+203)
-#define V4L2_CID_MPEG_VIDEO_GOP_CLOSURE 	(V4L2_CID_MPEG_BASE+204)
-#define V4L2_CID_MPEG_VIDEO_PULLDOWN 		(V4L2_CID_MPEG_BASE+205)
-#define V4L2_CID_MPEG_VIDEO_BITRATE_MODE 	(V4L2_CID_MPEG_BASE+206)
-enum v4l2_mpeg_video_bitrate_mode {
-	V4L2_MPEG_VIDEO_BITRATE_MODE_VBR = 0,
-	V4L2_MPEG_VIDEO_BITRATE_MODE_CBR = 1,
-};
-#define V4L2_CID_MPEG_VIDEO_BITRATE 		(V4L2_CID_MPEG_BASE+207)
-#define V4L2_CID_MPEG_VIDEO_BITRATE_PEAK 	(V4L2_CID_MPEG_BASE+208)
-#define V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (V4L2_CID_MPEG_BASE+209)
-#define V4L2_CID_MPEG_VIDEO_MUTE 		(V4L2_CID_MPEG_BASE+210)
-#define V4L2_CID_MPEG_VIDEO_MUTE_YUV 		(V4L2_CID_MPEG_BASE+211)
-#define V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE		(V4L2_CID_MPEG_BASE+212)
-#define V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER	(V4L2_CID_MPEG_BASE+213)
-#define V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB		(V4L2_CID_MPEG_BASE+214)
-#define V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE			(V4L2_CID_MPEG_BASE+215)
-#define V4L2_CID_MPEG_VIDEO_HEADER_MODE				(V4L2_CID_MPEG_BASE+216)
-enum v4l2_mpeg_video_header_mode {
-	V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE			= 0,
-	V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME	= 1,
-
-};
-#define V4L2_CID_MPEG_VIDEO_MAX_REF_PIC			(V4L2_CID_MPEG_BASE+217)
-#define V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE		(V4L2_CID_MPEG_BASE+218)
-#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES	(V4L2_CID_MPEG_BASE+219)
-#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB		(V4L2_CID_MPEG_BASE+220)
-#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE		(V4L2_CID_MPEG_BASE+221)
-enum v4l2_mpeg_video_multi_slice_mode {
-	V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE		= 0,
-	V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB		= 1,
-	V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES	= 2,
-};
-#define V4L2_CID_MPEG_VIDEO_VBV_SIZE			(V4L2_CID_MPEG_BASE+222)
-#define V4L2_CID_MPEG_VIDEO_DEC_PTS			(V4L2_CID_MPEG_BASE+223)
-#define V4L2_CID_MPEG_VIDEO_DEC_FRAME			(V4L2_CID_MPEG_BASE+224)
-#define V4L2_CID_MPEG_VIDEO_VBV_DELAY			(V4L2_CID_MPEG_BASE+225)
-#define V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER		(V4L2_CID_MPEG_BASE+226)
-#define V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE		(V4L2_CID_MPEG_BASE+227)
-#define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE		(V4L2_CID_MPEG_BASE+228)
-#define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME		(V4L2_CID_MPEG_BASE+229)
-
-#define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP		(V4L2_CID_MPEG_BASE+300)
-#define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP		(V4L2_CID_MPEG_BASE+301)
-#define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP		(V4L2_CID_MPEG_BASE+302)
-#define V4L2_CID_MPEG_VIDEO_H263_MIN_QP			(V4L2_CID_MPEG_BASE+303)
-#define V4L2_CID_MPEG_VIDEO_H263_MAX_QP			(V4L2_CID_MPEG_BASE+304)
-#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP		(V4L2_CID_MPEG_BASE+350)
-#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP		(V4L2_CID_MPEG_BASE+351)
-#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP		(V4L2_CID_MPEG_BASE+352)
-#define V4L2_CID_MPEG_VIDEO_H264_MIN_QP			(V4L2_CID_MPEG_BASE+353)
-#define V4L2_CID_MPEG_VIDEO_H264_MAX_QP			(V4L2_CID_MPEG_BASE+354)
-#define V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM		(V4L2_CID_MPEG_BASE+355)
-#define V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE		(V4L2_CID_MPEG_BASE+356)
-#define V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE		(V4L2_CID_MPEG_BASE+357)
-enum v4l2_mpeg_video_h264_entropy_mode {
-	V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC	= 0,
-	V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC	= 1,
-};
-#define V4L2_CID_MPEG_VIDEO_H264_I_PERIOD		(V4L2_CID_MPEG_BASE+358)
-#define V4L2_CID_MPEG_VIDEO_H264_LEVEL			(V4L2_CID_MPEG_BASE+359)
-enum v4l2_mpeg_video_h264_level {
-	V4L2_MPEG_VIDEO_H264_LEVEL_1_0	= 0,
-	V4L2_MPEG_VIDEO_H264_LEVEL_1B	= 1,
-	V4L2_MPEG_VIDEO_H264_LEVEL_1_1	= 2,
-	V4L2_MPEG_VIDEO_H264_LEVEL_1_2	= 3,
-	V4L2_MPEG_VIDEO_H264_LEVEL_1_3	= 4,
-	V4L2_MPEG_VIDEO_H264_LEVEL_2_0	= 5,
-	V4L2_MPEG_VIDEO_H264_LEVEL_2_1	= 6,
-	V4L2_MPEG_VIDEO_H264_LEVEL_2_2	= 7,
-	V4L2_MPEG_VIDEO_H264_LEVEL_3_0	= 8,
-	V4L2_MPEG_VIDEO_H264_LEVEL_3_1	= 9,
-	V4L2_MPEG_VIDEO_H264_LEVEL_3_2	= 10,
-	V4L2_MPEG_VIDEO_H264_LEVEL_4_0	= 11,
-	V4L2_MPEG_VIDEO_H264_LEVEL_4_1	= 12,
-	V4L2_MPEG_VIDEO_H264_LEVEL_4_2	= 13,
-	V4L2_MPEG_VIDEO_H264_LEVEL_5_0	= 14,
-	V4L2_MPEG_VIDEO_H264_LEVEL_5_1	= 15,
-};
-#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA	(V4L2_CID_MPEG_BASE+360)
-#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA	(V4L2_CID_MPEG_BASE+361)
-#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE	(V4L2_CID_MPEG_BASE+362)
-enum v4l2_mpeg_video_h264_loop_filter_mode {
-	V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED				= 0,
-	V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED				= 1,
-	V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY	= 2,
-};
-#define V4L2_CID_MPEG_VIDEO_H264_PROFILE		(V4L2_CID_MPEG_BASE+363)
-enum v4l2_mpeg_video_h264_profile {
-	V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE			= 0,
-	V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE	= 1,
-	V4L2_MPEG_VIDEO_H264_PROFILE_MAIN			= 2,
-	V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED			= 3,
-	V4L2_MPEG_VIDEO_H264_PROFILE_HIGH			= 4,
-	V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10			= 5,
-	V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422			= 6,
-	V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE	= 7,
-	V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10_INTRA		= 8,
-	V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422_INTRA		= 9,
-	V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_INTRA		= 10,
-	V4L2_MPEG_VIDEO_H264_PROFILE_CAVLC_444_INTRA		= 11,
-	V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_BASELINE		= 12,
-	V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH		= 13,
-	V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA	= 14,
-	V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH		= 15,
-	V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH		= 16,
-};
-#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT	(V4L2_CID_MPEG_BASE+364)
-#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH	(V4L2_CID_MPEG_BASE+365)
-#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE		(V4L2_CID_MPEG_BASE+366)
-#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC		(V4L2_CID_MPEG_BASE+367)
-enum v4l2_mpeg_video_h264_vui_sar_idc {
-	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED	= 0,
-	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1		= 1,
-	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_12x11		= 2,
-	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_10x11		= 3,
-	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_16x11		= 4,
-	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_40x33		= 5,
-	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_24x11		= 6,
-	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_20x11		= 7,
-	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_32x11		= 8,
-	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_80x33		= 9,
-	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_18x11		= 10,
-	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_15x11		= 11,
-	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_64x33		= 12,
-	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_160x99		= 13,
-	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_4x3		= 14,
-	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_3x2		= 15,
-	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1		= 16,
-	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED	= 17,
-};
-#define V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING		(V4L2_CID_MPEG_BASE+368)
-#define V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0		(V4L2_CID_MPEG_BASE+369)
-#define V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE	(V4L2_CID_MPEG_BASE+370)
-enum v4l2_mpeg_video_h264_sei_fp_arrangement_type {
-	V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_CHECKERBOARD	= 0,
-	V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_COLUMN		= 1,
-	V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_ROW		= 2,
-	V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_SIDE_BY_SIDE	= 3,
-	V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TOP_BOTTOM		= 4,
-	V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TEMPORAL		= 5,
-};
-#define V4L2_CID_MPEG_VIDEO_H264_FMO			(V4L2_CID_MPEG_BASE+371)
-#define V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE		(V4L2_CID_MPEG_BASE+372)
-enum v4l2_mpeg_video_h264_fmo_map_type {
-	V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_INTERLEAVED_SLICES		= 0,
-	V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_SCATTERED_SLICES		= 1,
-	V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_FOREGROUND_WITH_LEFT_OVER	= 2,
-	V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_BOX_OUT			= 3,
-	V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_RASTER_SCAN			= 4,
-	V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_WIPE_SCAN			= 5,
-	V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_EXPLICIT			= 6,
-};
-#define V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP	(V4L2_CID_MPEG_BASE+373)
-#define V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION	(V4L2_CID_MPEG_BASE+374)
-enum v4l2_mpeg_video_h264_fmo_change_dir {
-	V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_RIGHT	= 0,
-	V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_LEFT	= 1,
-};
-#define V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE	(V4L2_CID_MPEG_BASE+375)
-#define V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH		(V4L2_CID_MPEG_BASE+376)
-#define V4L2_CID_MPEG_VIDEO_H264_ASO			(V4L2_CID_MPEG_BASE+377)
-#define V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER	(V4L2_CID_MPEG_BASE+378)
-#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING		(V4L2_CID_MPEG_BASE+379)
-#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE	(V4L2_CID_MPEG_BASE+380)
-enum v4l2_mpeg_video_h264_hierarchical_coding_type {
-	V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_B	= 0,
-	V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_P	= 1,
-};
-#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER	(V4L2_CID_MPEG_BASE+381)
-#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP	(V4L2_CID_MPEG_BASE+382)
-#define V4L2_CID_MPEG_VIDEO_H264_SPS		(V4L2_CID_MPEG_BASE+383)
-#define V4L2_CID_MPEG_VIDEO_H264_PPS		(V4L2_CID_MPEG_BASE+384)
-#define V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX	(V4L2_CID_MPEG_BASE+385)
-#define V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAM	(V4L2_CID_MPEG_BASE+386)
-#define V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAM	(V4L2_CID_MPEG_BASE+387)
-#define V4L2_CID_MPEG_VIDEO_H264_SPS_PPS_BEFORE_IDR		(V4L2_CID_MPEG_BASE+388)
-#define V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP	(V4L2_CID_MPEG_BASE+400)
-#define V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP	(V4L2_CID_MPEG_BASE+401)
-#define V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP	(V4L2_CID_MPEG_BASE+402)
-#define V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP	(V4L2_CID_MPEG_BASE+403)
-#define V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP	(V4L2_CID_MPEG_BASE+404)
-#define V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL		(V4L2_CID_MPEG_BASE+405)
-enum v4l2_mpeg_video_mpeg4_level {
-	V4L2_MPEG_VIDEO_MPEG4_LEVEL_0	= 0,
-	V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B	= 1,
-	V4L2_MPEG_VIDEO_MPEG4_LEVEL_1	= 2,
-	V4L2_MPEG_VIDEO_MPEG4_LEVEL_2	= 3,
-	V4L2_MPEG_VIDEO_MPEG4_LEVEL_3	= 4,
-	V4L2_MPEG_VIDEO_MPEG4_LEVEL_3B	= 5,
-	V4L2_MPEG_VIDEO_MPEG4_LEVEL_4	= 6,
-	V4L2_MPEG_VIDEO_MPEG4_LEVEL_5	= 7,
-};
-#define V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE	(V4L2_CID_MPEG_BASE+406)
-enum v4l2_mpeg_video_mpeg4_profile {
-	V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE				= 0,
-	V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE			= 1,
-	V4L2_MPEG_VIDEO_MPEG4_PROFILE_CORE				= 2,
-	V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE_SCALABLE			= 3,
-	V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY	= 4,
-};
-#define V4L2_CID_MPEG_VIDEO_MPEG4_QPEL		(V4L2_CID_MPEG_BASE+407)
-
-/*  Control IDs for VP8 streams
- *  Although VP8 is not part of MPEG we add these controls to the MPEG class
- *  as that class is already handling other video compression standards
- */
-#define V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS		(V4L2_CID_MPEG_BASE+500)
-enum v4l2_vp8_num_partitions {
-	V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION	= 0,
-	V4L2_CID_MPEG_VIDEO_VPX_2_PARTITIONS	= 1,
-	V4L2_CID_MPEG_VIDEO_VPX_4_PARTITIONS	= 2,
-	V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS	= 3,
-};
-#define V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4		(V4L2_CID_MPEG_BASE+501)
-#define V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES		(V4L2_CID_MPEG_BASE+502)
-enum v4l2_vp8_num_ref_frames {
-	V4L2_CID_MPEG_VIDEO_VPX_1_REF_FRAME	= 0,
-	V4L2_CID_MPEG_VIDEO_VPX_2_REF_FRAME	= 1,
-	V4L2_CID_MPEG_VIDEO_VPX_3_REF_FRAME	= 2,
-};
-#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL		(V4L2_CID_MPEG_BASE+503)
-#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS	(V4L2_CID_MPEG_BASE+504)
-#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD	(V4L2_CID_MPEG_BASE+505)
-#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL	(V4L2_CID_MPEG_BASE+506)
-enum v4l2_vp8_golden_frame_sel {
-	V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV		= 0,
-	V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_REF_PERIOD	= 1,
-};
-#define V4L2_CID_MPEG_VIDEO_VPX_MIN_QP			(V4L2_CID_MPEG_BASE+507)
-#define V4L2_CID_MPEG_VIDEO_VPX_MAX_QP			(V4L2_CID_MPEG_BASE+508)
-#define V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP		(V4L2_CID_MPEG_BASE+509)
-#define V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP		(V4L2_CID_MPEG_BASE+510)
-#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE			(V4L2_CID_MPEG_BASE+511)
-
-#define V4L2_CID_MPEG_VIDEO_VP8_FRAME_HDR		(V4L2_CID_MPEG_BASE+512)
-
-#define V4L2_CID_MPEG_VIDEO_VP9_FRAME_HDR		(V4L2_CID_MPEG_BASE+513)
-#define V4L2_CID_MPEG_VIDEO_VP9_DECODE_PARAM		(V4L2_CID_MPEG_BASE+514)
-#define V4L2_CID_MPEG_VIDEO_VP9_ENTROPY			(V4L2_CID_MPEG_BASE+515)
-
-/*  MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */
-#define V4L2_CID_MPEG_CX2341X_BASE 				(V4L2_CTRL_CLASS_MPEG | 0x1000)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE 	(V4L2_CID_MPEG_CX2341X_BASE+0)
-enum v4l2_mpeg_cx2341x_video_spatial_filter_mode {
-	V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL = 0,
-	V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO   = 1,
-};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER 		(V4L2_CID_MPEG_CX2341X_BASE+1)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE 	(V4L2_CID_MPEG_CX2341X_BASE+2)
-enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type {
-	V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF                  = 0,
-	V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR               = 1,
-	V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_VERT              = 2,
-	V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE      = 3,
-	V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE = 4,
-};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE 	(V4L2_CID_MPEG_CX2341X_BASE+3)
-enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type {
-	V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF    = 0,
-	V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR = 1,
-};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE 	(V4L2_CID_MPEG_CX2341X_BASE+4)
-enum v4l2_mpeg_cx2341x_video_temporal_filter_mode {
-	V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL = 0,
-	V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO   = 1,
-};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER 		(V4L2_CID_MPEG_CX2341X_BASE+5)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE 		(V4L2_CID_MPEG_CX2341X_BASE+6)
-enum v4l2_mpeg_cx2341x_video_median_filter_type {
-	V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF      = 0,
-	V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR      = 1,
-	V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_VERT     = 2,
-	V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT = 3,
-	V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG     = 4,
-};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM 	(V4L2_CID_MPEG_CX2341X_BASE+7)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP 	(V4L2_CID_MPEG_CX2341X_BASE+8)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM	(V4L2_CID_MPEG_CX2341X_BASE+9)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP 	(V4L2_CID_MPEG_CX2341X_BASE+10)
-#define V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS 	(V4L2_CID_MPEG_CX2341X_BASE+11)
-
-/*  MPEG-class control IDs specific to the Samsung MFC 5.1 driver as defined by V4L2 */
-#define V4L2_CID_MPEG_MFC51_BASE				(V4L2_CTRL_CLASS_MPEG | 0x1100)
-
-#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY		(V4L2_CID_MPEG_MFC51_BASE+0)
-#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE	(V4L2_CID_MPEG_MFC51_BASE+1)
-#define V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE			(V4L2_CID_MPEG_MFC51_BASE+2)
-enum v4l2_mpeg_mfc51_video_frame_skip_mode {
-	V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_DISABLED		= 0,
-	V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_LEVEL_LIMIT	= 1,
-	V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT		= 2,
-};
-#define V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE			(V4L2_CID_MPEG_MFC51_BASE+3)
-enum v4l2_mpeg_mfc51_video_force_frame_type {
-	V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED		= 0,
-	V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_I_FRAME		= 1,
-	V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_NOT_CODED	= 2,
-};
-#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING				(V4L2_CID_MPEG_MFC51_BASE+4)
-#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV				(V4L2_CID_MPEG_MFC51_BASE+5)
-#define V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT			(V4L2_CID_MPEG_MFC51_BASE+6)
-#define V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF			(V4L2_CID_MPEG_MFC51_BASE+7)
-#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY		(V4L2_CID_MPEG_MFC51_BASE+50)
-#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK			(V4L2_CID_MPEG_MFC51_BASE+51)
-#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH		(V4L2_CID_MPEG_MFC51_BASE+52)
-#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC		(V4L2_CID_MPEG_MFC51_BASE+53)
-#define V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P		(V4L2_CID_MPEG_MFC51_BASE+54)
-
-
-/*  Camera class control IDs */
-
-#define V4L2_CID_CAMERA_CLASS_BASE 	(V4L2_CTRL_CLASS_CAMERA | 0x900)
-#define V4L2_CID_CAMERA_CLASS 		(V4L2_CTRL_CLASS_CAMERA | 1)
-
-#define V4L2_CID_EXPOSURE_AUTO			(V4L2_CID_CAMERA_CLASS_BASE+1)
-enum  v4l2_exposure_auto_type {
-	V4L2_EXPOSURE_AUTO = 0,
-	V4L2_EXPOSURE_MANUAL = 1,
-	V4L2_EXPOSURE_SHUTTER_PRIORITY = 2,
-	V4L2_EXPOSURE_APERTURE_PRIORITY = 3
-};
-#define V4L2_CID_EXPOSURE_ABSOLUTE		(V4L2_CID_CAMERA_CLASS_BASE+2)
-#define V4L2_CID_EXPOSURE_AUTO_PRIORITY		(V4L2_CID_CAMERA_CLASS_BASE+3)
-
-#define V4L2_CID_PAN_RELATIVE			(V4L2_CID_CAMERA_CLASS_BASE+4)
-#define V4L2_CID_TILT_RELATIVE			(V4L2_CID_CAMERA_CLASS_BASE+5)
-#define V4L2_CID_PAN_RESET			(V4L2_CID_CAMERA_CLASS_BASE+6)
-#define V4L2_CID_TILT_RESET			(V4L2_CID_CAMERA_CLASS_BASE+7)
-
-#define V4L2_CID_PAN_ABSOLUTE			(V4L2_CID_CAMERA_CLASS_BASE+8)
-#define V4L2_CID_TILT_ABSOLUTE			(V4L2_CID_CAMERA_CLASS_BASE+9)
-
-#define V4L2_CID_FOCUS_ABSOLUTE			(V4L2_CID_CAMERA_CLASS_BASE+10)
-#define V4L2_CID_FOCUS_RELATIVE			(V4L2_CID_CAMERA_CLASS_BASE+11)
-#define V4L2_CID_FOCUS_AUTO			(V4L2_CID_CAMERA_CLASS_BASE+12)
-
-#define V4L2_CID_ZOOM_ABSOLUTE			(V4L2_CID_CAMERA_CLASS_BASE+13)
-#define V4L2_CID_ZOOM_RELATIVE			(V4L2_CID_CAMERA_CLASS_BASE+14)
-#define V4L2_CID_ZOOM_CONTINUOUS		(V4L2_CID_CAMERA_CLASS_BASE+15)
-
-#define V4L2_CID_PRIVACY			(V4L2_CID_CAMERA_CLASS_BASE+16)
-
-#define V4L2_CID_IRIS_ABSOLUTE			(V4L2_CID_CAMERA_CLASS_BASE+17)
-#define V4L2_CID_IRIS_RELATIVE			(V4L2_CID_CAMERA_CLASS_BASE+18)
-
-#define V4L2_CID_AUTO_EXPOSURE_BIAS		(V4L2_CID_CAMERA_CLASS_BASE+19)
-
-#define V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE	(V4L2_CID_CAMERA_CLASS_BASE+20)
-enum v4l2_auto_n_preset_white_balance {
-	V4L2_WHITE_BALANCE_MANUAL		= 0,
-	V4L2_WHITE_BALANCE_AUTO			= 1,
-	V4L2_WHITE_BALANCE_INCANDESCENT		= 2,
-	V4L2_WHITE_BALANCE_FLUORESCENT		= 3,
-	V4L2_WHITE_BALANCE_FLUORESCENT_H	= 4,
-	V4L2_WHITE_BALANCE_HORIZON		= 5,
-	V4L2_WHITE_BALANCE_DAYLIGHT		= 6,
-	V4L2_WHITE_BALANCE_FLASH		= 7,
-	V4L2_WHITE_BALANCE_CLOUDY		= 8,
-	V4L2_WHITE_BALANCE_SHADE		= 9,
-};
-
-#define V4L2_CID_WIDE_DYNAMIC_RANGE		(V4L2_CID_CAMERA_CLASS_BASE+21)
-#define V4L2_CID_IMAGE_STABILIZATION		(V4L2_CID_CAMERA_CLASS_BASE+22)
-
-#define V4L2_CID_ISO_SENSITIVITY		(V4L2_CID_CAMERA_CLASS_BASE+23)
-#define V4L2_CID_ISO_SENSITIVITY_AUTO		(V4L2_CID_CAMERA_CLASS_BASE+24)
-enum v4l2_iso_sensitivity_auto_type {
-	V4L2_ISO_SENSITIVITY_MANUAL		= 0,
-	V4L2_ISO_SENSITIVITY_AUTO		= 1,
-};
-
-#define V4L2_CID_EXPOSURE_METERING		(V4L2_CID_CAMERA_CLASS_BASE+25)
-enum v4l2_exposure_metering {
-	V4L2_EXPOSURE_METERING_AVERAGE		= 0,
-	V4L2_EXPOSURE_METERING_CENTER_WEIGHTED	= 1,
-	V4L2_EXPOSURE_METERING_SPOT		= 2,
-	V4L2_EXPOSURE_METERING_MATRIX		= 3,
-};
-
-#define V4L2_CID_SCENE_MODE			(V4L2_CID_CAMERA_CLASS_BASE+26)
-enum v4l2_scene_mode {
-	V4L2_SCENE_MODE_NONE			= 0,
-	V4L2_SCENE_MODE_BACKLIGHT		= 1,
-	V4L2_SCENE_MODE_BEACH_SNOW		= 2,
-	V4L2_SCENE_MODE_CANDLE_LIGHT		= 3,
-	V4L2_SCENE_MODE_DAWN_DUSK		= 4,
-	V4L2_SCENE_MODE_FALL_COLORS		= 5,
-	V4L2_SCENE_MODE_FIREWORKS		= 6,
-	V4L2_SCENE_MODE_LANDSCAPE		= 7,
-	V4L2_SCENE_MODE_NIGHT			= 8,
-	V4L2_SCENE_MODE_PARTY_INDOOR		= 9,
-	V4L2_SCENE_MODE_PORTRAIT		= 10,
-	V4L2_SCENE_MODE_SPORTS			= 11,
-	V4L2_SCENE_MODE_SUNSET			= 12,
-	V4L2_SCENE_MODE_TEXT			= 13,
-};
-
-#define V4L2_CID_3A_LOCK			(V4L2_CID_CAMERA_CLASS_BASE+27)
-#define V4L2_LOCK_EXPOSURE			(1 << 0)
-#define V4L2_LOCK_WHITE_BALANCE			(1 << 1)
-#define V4L2_LOCK_FOCUS				(1 << 2)
-
-#define V4L2_CID_AUTO_FOCUS_START		(V4L2_CID_CAMERA_CLASS_BASE+28)
-#define V4L2_CID_AUTO_FOCUS_STOP		(V4L2_CID_CAMERA_CLASS_BASE+29)
-#define V4L2_CID_AUTO_FOCUS_STATUS		(V4L2_CID_CAMERA_CLASS_BASE+30)
-#define V4L2_AUTO_FOCUS_STATUS_IDLE		(0 << 0)
-#define V4L2_AUTO_FOCUS_STATUS_BUSY		(1 << 0)
-#define V4L2_AUTO_FOCUS_STATUS_REACHED		(1 << 1)
-#define V4L2_AUTO_FOCUS_STATUS_FAILED		(1 << 2)
-
-#define V4L2_CID_AUTO_FOCUS_RANGE		(V4L2_CID_CAMERA_CLASS_BASE+31)
-enum v4l2_auto_focus_range {
-	V4L2_AUTO_FOCUS_RANGE_AUTO		= 0,
-	V4L2_AUTO_FOCUS_RANGE_NORMAL		= 1,
-	V4L2_AUTO_FOCUS_RANGE_MACRO		= 2,
-	V4L2_AUTO_FOCUS_RANGE_INFINITY		= 3,
-};
-
-#define V4L2_CID_PAN_SPEED			(V4L2_CID_CAMERA_CLASS_BASE+32)
-#define V4L2_CID_TILT_SPEED			(V4L2_CID_CAMERA_CLASS_BASE+33)
-
-/* FM Modulator class control IDs */
-
-#define V4L2_CID_FM_TX_CLASS_BASE		(V4L2_CTRL_CLASS_FM_TX | 0x900)
-#define V4L2_CID_FM_TX_CLASS			(V4L2_CTRL_CLASS_FM_TX | 1)
-
-#define V4L2_CID_RDS_TX_DEVIATION		(V4L2_CID_FM_TX_CLASS_BASE + 1)
-#define V4L2_CID_RDS_TX_PI			(V4L2_CID_FM_TX_CLASS_BASE + 2)
-#define V4L2_CID_RDS_TX_PTY			(V4L2_CID_FM_TX_CLASS_BASE + 3)
-#define V4L2_CID_RDS_TX_PS_NAME			(V4L2_CID_FM_TX_CLASS_BASE + 5)
-#define V4L2_CID_RDS_TX_RADIO_TEXT		(V4L2_CID_FM_TX_CLASS_BASE + 6)
-#define V4L2_CID_RDS_TX_MONO_STEREO		(V4L2_CID_FM_TX_CLASS_BASE + 7)
-#define V4L2_CID_RDS_TX_ARTIFICIAL_HEAD		(V4L2_CID_FM_TX_CLASS_BASE + 8)
-#define V4L2_CID_RDS_TX_COMPRESSED		(V4L2_CID_FM_TX_CLASS_BASE + 9)
-#define V4L2_CID_RDS_TX_DYNAMIC_PTY		(V4L2_CID_FM_TX_CLASS_BASE + 10)
-#define V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT	(V4L2_CID_FM_TX_CLASS_BASE + 11)
-#define V4L2_CID_RDS_TX_TRAFFIC_PROGRAM		(V4L2_CID_FM_TX_CLASS_BASE + 12)
-#define V4L2_CID_RDS_TX_MUSIC_SPEECH		(V4L2_CID_FM_TX_CLASS_BASE + 13)
-#define V4L2_CID_RDS_TX_ALT_FREQS_ENABLE	(V4L2_CID_FM_TX_CLASS_BASE + 14)
-#define V4L2_CID_RDS_TX_ALT_FREQS		(V4L2_CID_FM_TX_CLASS_BASE + 15)
-
-#define V4L2_CID_AUDIO_LIMITER_ENABLED		(V4L2_CID_FM_TX_CLASS_BASE + 64)
-#define V4L2_CID_AUDIO_LIMITER_RELEASE_TIME	(V4L2_CID_FM_TX_CLASS_BASE + 65)
-#define V4L2_CID_AUDIO_LIMITER_DEVIATION	(V4L2_CID_FM_TX_CLASS_BASE + 66)
-
-#define V4L2_CID_AUDIO_COMPRESSION_ENABLED	(V4L2_CID_FM_TX_CLASS_BASE + 80)
-#define V4L2_CID_AUDIO_COMPRESSION_GAIN		(V4L2_CID_FM_TX_CLASS_BASE + 81)
-#define V4L2_CID_AUDIO_COMPRESSION_THRESHOLD	(V4L2_CID_FM_TX_CLASS_BASE + 82)
-#define V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME	(V4L2_CID_FM_TX_CLASS_BASE + 83)
-#define V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME	(V4L2_CID_FM_TX_CLASS_BASE + 84)
-
-#define V4L2_CID_PILOT_TONE_ENABLED		(V4L2_CID_FM_TX_CLASS_BASE + 96)
-#define V4L2_CID_PILOT_TONE_DEVIATION		(V4L2_CID_FM_TX_CLASS_BASE + 97)
-#define V4L2_CID_PILOT_TONE_FREQUENCY		(V4L2_CID_FM_TX_CLASS_BASE + 98)
-
-#define V4L2_CID_TUNE_PREEMPHASIS		(V4L2_CID_FM_TX_CLASS_BASE + 112)
-enum v4l2_preemphasis {
-	V4L2_PREEMPHASIS_DISABLED	= 0,
-	V4L2_PREEMPHASIS_50_uS		= 1,
-	V4L2_PREEMPHASIS_75_uS		= 2,
-};
-#define V4L2_CID_TUNE_POWER_LEVEL		(V4L2_CID_FM_TX_CLASS_BASE + 113)
-#define V4L2_CID_TUNE_ANTENNA_CAPACITOR		(V4L2_CID_FM_TX_CLASS_BASE + 114)
-
-
-/* Flash and privacy (indicator) light controls */
-
-#define V4L2_CID_FLASH_CLASS_BASE		(V4L2_CTRL_CLASS_FLASH | 0x900)
-#define V4L2_CID_FLASH_CLASS			(V4L2_CTRL_CLASS_FLASH | 1)
-
-#define V4L2_CID_FLASH_LED_MODE			(V4L2_CID_FLASH_CLASS_BASE + 1)
-enum v4l2_flash_led_mode {
-	V4L2_FLASH_LED_MODE_NONE,
-	V4L2_FLASH_LED_MODE_FLASH,
-	V4L2_FLASH_LED_MODE_TORCH,
-};
-
-#define V4L2_CID_FLASH_STROBE_SOURCE		(V4L2_CID_FLASH_CLASS_BASE + 2)
-enum v4l2_flash_strobe_source {
-	V4L2_FLASH_STROBE_SOURCE_SOFTWARE,
-	V4L2_FLASH_STROBE_SOURCE_EXTERNAL,
-};
-
-#define V4L2_CID_FLASH_STROBE			(V4L2_CID_FLASH_CLASS_BASE + 3)
-#define V4L2_CID_FLASH_STROBE_STOP		(V4L2_CID_FLASH_CLASS_BASE + 4)
-#define V4L2_CID_FLASH_STROBE_STATUS		(V4L2_CID_FLASH_CLASS_BASE + 5)
-
-#define V4L2_CID_FLASH_TIMEOUT			(V4L2_CID_FLASH_CLASS_BASE + 6)
-#define V4L2_CID_FLASH_INTENSITY		(V4L2_CID_FLASH_CLASS_BASE + 7)
-#define V4L2_CID_FLASH_TORCH_INTENSITY		(V4L2_CID_FLASH_CLASS_BASE + 8)
-#define V4L2_CID_FLASH_INDICATOR_INTENSITY	(V4L2_CID_FLASH_CLASS_BASE + 9)
-
-#define V4L2_CID_FLASH_FAULT			(V4L2_CID_FLASH_CLASS_BASE + 10)
-#define V4L2_FLASH_FAULT_OVER_VOLTAGE		(1 << 0)
-#define V4L2_FLASH_FAULT_TIMEOUT		(1 << 1)
-#define V4L2_FLASH_FAULT_OVER_TEMPERATURE	(1 << 2)
-#define V4L2_FLASH_FAULT_SHORT_CIRCUIT		(1 << 3)
-#define V4L2_FLASH_FAULT_OVER_CURRENT		(1 << 4)
-#define V4L2_FLASH_FAULT_INDICATOR		(1 << 5)
-#define V4L2_FLASH_FAULT_UNDER_VOLTAGE		(1 << 6)
-#define V4L2_FLASH_FAULT_INPUT_VOLTAGE		(1 << 7)
-#define V4L2_FLASH_FAULT_LED_OVER_TEMPERATURE	(1 << 8)
-
-#define V4L2_CID_FLASH_CHARGE			(V4L2_CID_FLASH_CLASS_BASE + 11)
-#define V4L2_CID_FLASH_READY			(V4L2_CID_FLASH_CLASS_BASE + 12)
-
-
-/* JPEG-class control IDs */
-
-#define V4L2_CID_JPEG_CLASS_BASE		(V4L2_CTRL_CLASS_JPEG | 0x900)
-#define V4L2_CID_JPEG_CLASS			(V4L2_CTRL_CLASS_JPEG | 1)
-
-#define	V4L2_CID_JPEG_CHROMA_SUBSAMPLING	(V4L2_CID_JPEG_CLASS_BASE + 1)
-enum v4l2_jpeg_chroma_subsampling {
-	V4L2_JPEG_CHROMA_SUBSAMPLING_444	= 0,
-	V4L2_JPEG_CHROMA_SUBSAMPLING_422	= 1,
-	V4L2_JPEG_CHROMA_SUBSAMPLING_420	= 2,
-	V4L2_JPEG_CHROMA_SUBSAMPLING_411	= 3,
-	V4L2_JPEG_CHROMA_SUBSAMPLING_410	= 4,
-	V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY	= 5,
-};
-#define	V4L2_CID_JPEG_RESTART_INTERVAL		(V4L2_CID_JPEG_CLASS_BASE + 2)
-#define	V4L2_CID_JPEG_COMPRESSION_QUALITY	(V4L2_CID_JPEG_CLASS_BASE + 3)
-
-#define	V4L2_CID_JPEG_ACTIVE_MARKER		(V4L2_CID_JPEG_CLASS_BASE + 4)
-#define	V4L2_JPEG_ACTIVE_MARKER_APP0		(1 << 0)
-#define	V4L2_JPEG_ACTIVE_MARKER_APP1		(1 << 1)
-#define	V4L2_JPEG_ACTIVE_MARKER_COM		(1 << 16)
-#define	V4L2_JPEG_ACTIVE_MARKER_DQT		(1 << 17)
-#define	V4L2_JPEG_ACTIVE_MARKER_DHT		(1 << 18)
-
-
-/* Image source controls */
-#define V4L2_CID_IMAGE_SOURCE_CLASS_BASE	(V4L2_CTRL_CLASS_IMAGE_SOURCE | 0x900)
-#define V4L2_CID_IMAGE_SOURCE_CLASS		(V4L2_CTRL_CLASS_IMAGE_SOURCE | 1)
-
-#define V4L2_CID_VBLANK				(V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 1)
-#define V4L2_CID_HBLANK				(V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 2)
-#define V4L2_CID_ANALOGUE_GAIN			(V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 3)
-#define V4L2_CID_TEST_PATTERN_RED		(V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 4)
-#define V4L2_CID_TEST_PATTERN_GREENR		(V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 5)
-#define V4L2_CID_TEST_PATTERN_BLUE		(V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 6)
-#define V4L2_CID_TEST_PATTERN_GREENB		(V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 7)
-
-
-/* Image processing controls */
-
-#define V4L2_CID_IMAGE_PROC_CLASS_BASE		(V4L2_CTRL_CLASS_IMAGE_PROC | 0x900)
-#define V4L2_CID_IMAGE_PROC_CLASS		(V4L2_CTRL_CLASS_IMAGE_PROC | 1)
-
-#define V4L2_CID_LINK_FREQ			(V4L2_CID_IMAGE_PROC_CLASS_BASE + 1)
-#define V4L2_CID_PIXEL_RATE			(V4L2_CID_IMAGE_PROC_CLASS_BASE + 2)
-#define V4L2_CID_TEST_PATTERN			(V4L2_CID_IMAGE_PROC_CLASS_BASE + 3)
-
-
-/*  DV-class control IDs defined by V4L2 */
-#define V4L2_CID_DV_CLASS_BASE			(V4L2_CTRL_CLASS_DV | 0x900)
-#define V4L2_CID_DV_CLASS			(V4L2_CTRL_CLASS_DV | 1)
-
-#define	V4L2_CID_DV_TX_HOTPLUG			(V4L2_CID_DV_CLASS_BASE + 1)
-#define	V4L2_CID_DV_TX_RXSENSE			(V4L2_CID_DV_CLASS_BASE + 2)
-#define	V4L2_CID_DV_TX_EDID_PRESENT		(V4L2_CID_DV_CLASS_BASE + 3)
-#define	V4L2_CID_DV_TX_MODE			(V4L2_CID_DV_CLASS_BASE + 4)
-enum v4l2_dv_tx_mode {
-	V4L2_DV_TX_MODE_DVI_D	= 0,
-	V4L2_DV_TX_MODE_HDMI	= 1,
-};
-#define V4L2_CID_DV_TX_RGB_RANGE		(V4L2_CID_DV_CLASS_BASE + 5)
-enum v4l2_dv_rgb_range {
-	V4L2_DV_RGB_RANGE_AUTO	  = 0,
-	V4L2_DV_RGB_RANGE_LIMITED = 1,
-	V4L2_DV_RGB_RANGE_FULL	  = 2,
-};
-
-#define	V4L2_CID_DV_RX_POWER_PRESENT		(V4L2_CID_DV_CLASS_BASE + 100)
-#define V4L2_CID_DV_RX_RGB_RANGE		(V4L2_CID_DV_CLASS_BASE + 101)
-
-#define V4L2_CID_FM_RX_CLASS_BASE		(V4L2_CTRL_CLASS_FM_RX | 0x900)
-#define V4L2_CID_FM_RX_CLASS			(V4L2_CTRL_CLASS_FM_RX | 1)
-
-#define V4L2_CID_TUNE_DEEMPHASIS		(V4L2_CID_FM_RX_CLASS_BASE + 1)
-enum v4l2_deemphasis {
-	V4L2_DEEMPHASIS_DISABLED	= V4L2_PREEMPHASIS_DISABLED,
-	V4L2_DEEMPHASIS_50_uS		= V4L2_PREEMPHASIS_50_uS,
-	V4L2_DEEMPHASIS_75_uS		= V4L2_PREEMPHASIS_75_uS,
-};
-
-#define V4L2_CID_RDS_RECEPTION			(V4L2_CID_FM_RX_CLASS_BASE + 2)
-#define V4L2_CID_RDS_RX_PTY			(V4L2_CID_FM_RX_CLASS_BASE + 3)
-#define V4L2_CID_RDS_RX_PS_NAME			(V4L2_CID_FM_RX_CLASS_BASE + 4)
-#define V4L2_CID_RDS_RX_RADIO_TEXT		(V4L2_CID_FM_RX_CLASS_BASE + 5)
-#define V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT	(V4L2_CID_FM_RX_CLASS_BASE + 6)
-#define V4L2_CID_RDS_RX_TRAFFIC_PROGRAM		(V4L2_CID_FM_RX_CLASS_BASE + 7)
-#define V4L2_CID_RDS_RX_MUSIC_SPEECH		(V4L2_CID_FM_RX_CLASS_BASE + 8)
-
-#define V4L2_CID_RF_TUNER_CLASS_BASE		(V4L2_CTRL_CLASS_RF_TUNER | 0x900)
-#define V4L2_CID_RF_TUNER_CLASS			(V4L2_CTRL_CLASS_RF_TUNER | 1)
-
-#define V4L2_CID_RF_TUNER_BANDWIDTH_AUTO	(V4L2_CID_RF_TUNER_CLASS_BASE + 11)
-#define V4L2_CID_RF_TUNER_BANDWIDTH		(V4L2_CID_RF_TUNER_CLASS_BASE + 12)
-#define V4L2_CID_RF_TUNER_RF_GAIN		(V4L2_CID_RF_TUNER_CLASS_BASE + 32)
-#define V4L2_CID_RF_TUNER_LNA_GAIN_AUTO		(V4L2_CID_RF_TUNER_CLASS_BASE + 41)
-#define V4L2_CID_RF_TUNER_LNA_GAIN		(V4L2_CID_RF_TUNER_CLASS_BASE + 42)
-#define V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO	(V4L2_CID_RF_TUNER_CLASS_BASE + 51)
-#define V4L2_CID_RF_TUNER_MIXER_GAIN		(V4L2_CID_RF_TUNER_CLASS_BASE + 52)
-#define V4L2_CID_RF_TUNER_IF_GAIN_AUTO		(V4L2_CID_RF_TUNER_CLASS_BASE + 61)
-#define V4L2_CID_RF_TUNER_IF_GAIN		(V4L2_CID_RF_TUNER_CLASS_BASE + 62)
-#define V4L2_CID_RF_TUNER_PLL_LOCK			(V4L2_CID_RF_TUNER_CLASS_BASE + 91)
-
-
-/*  Detection-class control IDs defined by V4L2 */
-#define V4L2_CID_DETECT_CLASS_BASE		(V4L2_CTRL_CLASS_DETECT | 0x900)
-#define V4L2_CID_DETECT_CLASS			(V4L2_CTRL_CLASS_DETECT | 1)
-
-#define V4L2_CID_DETECT_MD_MODE			(V4L2_CID_DETECT_CLASS_BASE + 1)
-enum v4l2_detect_md_mode {
-	V4L2_DETECT_MD_MODE_DISABLED		= 0,
-	V4L2_DETECT_MD_MODE_GLOBAL		= 1,
-	V4L2_DETECT_MD_MODE_THRESHOLD_GRID	= 2,
-	V4L2_DETECT_MD_MODE_REGION_GRID		= 3,
-};
-#define V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD	(V4L2_CID_DETECT_CLASS_BASE + 2)
-#define V4L2_CID_DETECT_MD_THRESHOLD_GRID	(V4L2_CID_DETECT_CLASS_BASE + 3)
-#define V4L2_CID_DETECT_MD_REGION_GRID		(V4L2_CID_DETECT_CLASS_BASE + 4)
-
-
-/* Complex controls */
-
-#define V4L2_H264_SPS_CONSTRAINT_SET0_FLAG			0x01
-#define V4L2_H264_SPS_CONSTRAINT_SET1_FLAG			0x02
-#define V4L2_H264_SPS_CONSTRAINT_SET2_FLAG			0x04
-#define V4L2_H264_SPS_CONSTRAINT_SET3_FLAG			0x08
-#define V4L2_H264_SPS_CONSTRAINT_SET4_FLAG			0x10
-#define V4L2_H264_SPS_CONSTRAINT_SET5_FLAG			0x20
-
-#define V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE		0x01
-#define V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS	0x02
-#define V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO		0x04
-#define V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED	0x08
-#define V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY			0x10
-#define V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD		0x20
-#define V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE			0x40
-struct v4l2_ctrl_h264_sps {
-	__u8 profile_idc;
-	__u8 constraint_set_flags;
-	__u8 level_idc;
-	__u8 seq_parameter_set_id;
-	__u8 chroma_format_idc;
-	__u8 bit_depth_luma_minus8;
-	__u8 bit_depth_chroma_minus8;
-	__u8 log2_max_frame_num_minus4;
-	__u8 pic_order_cnt_type;
-	__u8 log2_max_pic_order_cnt_lsb_minus4;
-	__s32 offset_for_non_ref_pic;
-	__s32 offset_for_top_to_bottom_field;
-	__u8 num_ref_frames_in_pic_order_cnt_cycle;
-	__s32 offset_for_ref_frame[255];
-	__u8 max_num_ref_frames;
-	__u16 pic_width_in_mbs_minus1;
-	__u16 pic_height_in_map_units_minus1;
-	__u8 flags;
-};
-
-#define V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE				0x0001
-#define V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT	0x0002
-#define V4L2_H264_PPS_FLAG_WEIGHTED_PRED				0x0004
-#define V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT		0x0008
-#define V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED			0x0010
-#define V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT			0x0020
-#define V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE				0x0040
-#define V4L2_H264_PPS_FLAG_PIC_SCALING_MATRIX_PRESENT			0x0080
-struct v4l2_ctrl_h264_pps {
-	__u8 pic_parameter_set_id;
-	__u8 seq_parameter_set_id;
-	__u8 num_slice_groups_minus1;
-	__u8 num_ref_idx_l0_default_active_minus1;
-	__u8 num_ref_idx_l1_default_active_minus1;
-	__u8 weighted_bipred_idc;
-	__s8 pic_init_qp_minus26;
-	__s8 pic_init_qs_minus26;
-	__s8 chroma_qp_index_offset;
-	__s8 second_chroma_qp_index_offset;
-	__u8 flags;
-};
-
-struct v4l2_ctrl_h264_scaling_matrix {
-	__u8 scaling_list_4x4[6][16];
-	__u8 scaling_list_8x8[6][64];
-};
-
-struct v4l2_h264_weight_factors {
-	__s8 luma_weight[32];
-	__s8 luma_offset[32];
-	__s8 chroma_weight[32][2];
-	__s8 chroma_offset[32][2];
-};
-
-struct v4l2_h264_pred_weight_table {
-	__u8 luma_log2_weight_denom;
-	__u8 chroma_log2_weight_denom;
-	struct v4l2_h264_weight_factors weight_factors[2];
-};
-
-#define V4L2_SLICE_FLAG_FIELD_PIC		0x01
-#define V4L2_SLICE_FLAG_BOTTOM_FIELD		0x02
-#define V4L2_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED	0x04
-#define V4L2_SLICE_FLAG_SP_FOR_SWITCH		0x08
-struct v4l2_ctrl_h264_slice_param {
-	/* Size in bytes, including header */
-	__u32 size;
-	/* Offset in bits to slice_data() from the beginning of this slice. */
-	__u32 header_bit_size;
-
-	__u16 first_mb_in_slice;
-	__u8 slice_type;
-	__u8 pic_parameter_set_id;
-	__u8 colour_plane_id;
-	__u16 frame_num;
-	__u16 idr_pic_id;
-	__u16 pic_order_cnt_lsb;
-	__s32 delta_pic_order_cnt_bottom;
-	__s32 delta_pic_order_cnt0;
-	__s32 delta_pic_order_cnt1;
-	__u8 redundant_pic_cnt;
-
-	struct v4l2_h264_pred_weight_table pred_weight_table;
-	/* Size in bits of dec_ref_pic_marking() syntax element. */
-	__u32 dec_ref_pic_marking_bit_size;
-	/* Size in bits of pic order count syntax. */
-	__u32 pic_order_cnt_bit_size;
-
-	__u8 cabac_init_idc;
-	__s8 slice_qp_delta;
-	__s8 slice_qs_delta;
-	__u8 disable_deblocking_filter_idc;
-	__s8 slice_alpha_c0_offset_div2;
-	__s8 slice_beta_offset_div2;
-	__u32 slice_group_change_cycle;
-
-	__u8 num_ref_idx_l0_active_minus1;
-	__u8 num_ref_idx_l1_active_minus1;
-	/*  Entries on each list are indices
-	 *  into v4l2_ctrl_h264_decode_param.dpb[]. */
-	__u8 ref_pic_list0[32];
-	__u8 ref_pic_list1[32];
-
-	__u8 flags;
-};
-
-/* If not set, this entry is unused for reference. */
-#define V4L2_H264_DPB_ENTRY_FLAG_ACTIVE		0x01
-#define V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM	0x02
-struct v4l2_h264_dpb_entry {
-	__u32 buf_index; /* v4l2_buffer index */
-	__u16 frame_num;
-	__u16 pic_num;
-	/* Note that field is indicated by v4l2_buffer.field */
-	__s32 top_field_order_cnt;
-	__s32 bottom_field_order_cnt;
-	__u8 flags; /* V4L2_H264_DPB_ENTRY_FLAG_* */
-};
-
-struct v4l2_ctrl_h264_decode_param {
-	__u32 num_slices;
-	__u8 idr_pic_flag;
-	__u8 nal_ref_idc;
-	__s32 top_field_order_cnt;
-	__s32 bottom_field_order_cnt;
-	__u8 ref_pic_list_p0[32];
-	__u8 ref_pic_list_b0[32];
-	__u8 ref_pic_list_b1[32];
-	struct v4l2_h264_dpb_entry dpb[16];
-};
-
-#define V4L2_VP8_SEGMNT_HDR_FLAG_ENABLED              0x01
-#define V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_MAP           0x02
-#define V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_FEATURE_DATA  0x04
-struct v4l2_vp8_sgmnt_hdr {
-	__u8 segment_feature_mode;
-
-	__s8 quant_update[4];
-	__s8 lf_update[4];
-	__u8 segment_probs[3];
-
-	__u8 flags;
-};
-
-#define V4L2_VP8_LF_HDR_ADJ_ENABLE	0x01
-#define V4L2_VP8_LF_HDR_DELTA_UPDATE	0x02
-struct v4l2_vp8_loopfilter_hdr {
-	__u8 type;
-	__u8 level;
-	__u8 sharpness_level;
-	__s8 ref_frm_delta_magnitude[4];
-	__s8 mb_mode_delta_magnitude[4];
-
-	__u8 flags;
-};
-
-struct v4l2_vp8_quantization_hdr {
-	__u8 y_ac_qi;
-	__s8 y_dc_delta;
-	__s8 y2_dc_delta;
-	__s8 y2_ac_delta;
-	__s8 uv_dc_delta;
-	__s8 uv_ac_delta;
-	__u16 dequant_factors[4][3][2];
-};
-
-struct v4l2_vp8_entropy_hdr {
-	__u8 coeff_probs[4][8][3][11];
-	__u8 y_mode_probs[4];
-	__u8 uv_mode_probs[3];
-	__u8 mv_probs[2][19];
-};
-
-#define V4L2_VP8_FRAME_HDR_FLAG_EXPERIMENTAL		0x01
-#define V4L2_VP8_FRAME_HDR_FLAG_SHOW_FRAME		0x02
-#define V4L2_VP8_FRAME_HDR_FLAG_MB_NO_SKIP_COEFF	0x04
-struct v4l2_ctrl_vp8_frame_hdr {
-	/* 0: keyframe, 1: not a keyframe */
-	__u8 key_frame;
-	__u8 version;
-
-	/* Populated also if not a key frame */
-	__u16 width;
-	__u8 horizontal_scale;
-	__u16 height;
-	__u8 vertical_scale;
-
-	struct v4l2_vp8_sgmnt_hdr sgmnt_hdr;
-	struct v4l2_vp8_loopfilter_hdr lf_hdr;
-	struct v4l2_vp8_quantization_hdr quant_hdr;
-	struct v4l2_vp8_entropy_hdr entropy_hdr;
-
-	__u8 sign_bias_golden;
-	__u8 sign_bias_alternate;
-
-	__u8 prob_skip_false;
-	__u8 prob_intra;
-	__u8 prob_last;
-	__u8 prob_gf;
-
-	__u32 first_part_size;
-	__u32 first_part_offset;
-	/*
-	 * Offset in bits of MB data in first partition,
-	 * i.e. bit offset starting from first_part_offset.
-	 */
-	__u32 macroblock_bit_offset;
-
-	__u8 num_dct_parts;
-	__u32 dct_part_sizes[8];
-
-	__u8 bool_dec_range;
-	__u8 bool_dec_value;
-	__u8 bool_dec_count;
-
-	/* v4l2_buffer indices of reference frames */
-	__u32 last_frame;
-	__u32 golden_frame;
-	__u32 alt_frame;
-
-	__u8 flags;
-};
-
-#define V4L2_VP9_SGMNT_PARAM_FLAG_ENABLED		0x01
-#define V4L2_VP9_SGMNT_PARAM_FLAG_UPDATE_MAP		0x02
-#define V4L2_VP9_SGMNT_PARAM_FLAG_TEMPORAL_UPDATE	0x04
-#define V4L2_VP9_SGMNT_PARAM_FLAG_UPDATE_DATA		0x08
-#define V4L2_VP9_SGMNT_PARAM_FLAG_ABS_OR_DELTA_UPDATE	0x10
-struct v4l2_vp9_segmentation_params {
-	__u8 tree_probs[7];
-	__u8 pred_probs[3];
-	__u8 feature_enabled[8][4];
-	__s16 feature_data[8][4];
-
-	__u8 flags;
-};
-
-#define V4L2_VP9_LOOP_FLTR_FLAG_DELTA_ENABLED		0x01
-#define V4L2_VP9_LOOP_FLTR_FLAG_DELTA_UPDATE		0x02
-struct v4l2_vp9_loop_filter_params {
-	__u8 level;
-	__u8 sharpness;
-	__s8 deltas[4];
-	__s8 mode_deltas[2];
-	__u8 lvl_lookup[8][4][2];
-
-	__u8 flags;
-};
-
-#define V4L2_VP9_QUANT_PARAMS_FLAG_LOSSLESS		0x01
-struct v4l2_vp9_quantization_params {
-	__u8 base_q_idx;
-	__s8 delta_q_y_dc;
-	__s8 delta_q_uv_dc;
-	__s8 delta_q_uv_ac;
-
-	__u8 flags;
-};
-
-#define V4L2_VP9_FRAME_HDR_FLAG_SHOW_FRAME	0x01
-/* Error resilient mode enabled flag */
-#define V4L2_VP9_FRAME_HDR_FLAG_ERR_RES		0x02
-#define V4L2_VP9_FRAME_HDR_FLAG_FRAME_INTRA	0x04
-#define V4L2_VP9_FRAME_HDR_ALLOW_HIGH_PREC_MV	0x08
-#define V4L2_VP9_FRAME_HDR_REFRESH_FRAME_CTX	0x10
-#define V4L2_VP9_FRAME_HDR_PARALLEL_DEC_MODE	0x20
-struct v4l2_ctrl_vp9_frame_hdr {
-	__u8 profile;
-	/* 0: keyframe, 1: non-keyframe */
-	__u8 frame_type;
-
-	__u8 bit_depth;
-	__u8 color_space;
-	__u8 color_range;
-	__u8 subsampling_x;
-	__u8 subsampling_y;
-
-	__u32 frame_width;
-	__u32 frame_height;
-	__u32 render_width;
-	__u32 render_height;
-
-	__u8 reset_frame_context;
-
-	__u8 interpolation_filter;
-	__u8 frame_context_idx;
-
-	struct v4l2_vp9_loop_filter_params lf_params;
-	struct v4l2_vp9_quantization_params quant_params;
-	struct v4l2_vp9_segmentation_params sgmnt_params;
-
-	__u8 tile_cols_log2;
-	__u8 tile_rows_log2;
-
-	__u16 header_size_in_bytes;
-
-	__u8 flags;
-};
-
-struct v4l2_vp9_reference_frame {
-	 /* v4l2_buffer index */
-	__u32 buf_index;
-
-	__u32 frame_width;
-	__u32 frame_height;
-	__u8 bit_depth;
-	__u8 subsampling_x;
-	__u8 subsampling_y;
-};
-
-struct v4l2_ctrl_vp9_decode_param {
-	/* v4l2_buffer index for all reference frames (frame slots). */
-	__u32 ref_frames[8];
-
-	/* Active reference frames, [0]: last, [1]: golden, [2]: altref */
-	struct v4l2_vp9_reference_frame active_ref_frames[3];
-};
-
-struct v4l2_vp9_entropy_ctx {
-	__u8 tx_probs_8x8[2][1];
-	__u8 tx_probs_16x16[2][2];
-	__u8 tx_probs_32x32[2][3];
-
-	__u8 coef_probs[4][2][2][6][6][3];
-	__u8 skip_prob[3];
-	__u8 inter_mode_probs[7][3];
-	__u8 interp_filter_probs[4][2];
-	__u8 is_inter_prob[4];
-
-	__u8 comp_mode_prob[5];
-	__u8 single_ref_prob[5][2];
-	__u8 comp_ref_prob[5];
-
-	__u8 y_mode_probs[4][9];
-	__u8 uv_mode_probs[10][9];
-
-	__u8 partition_probs[16][3];
-
-	__u8 mv_joint_probs[3];
-	__u8 mv_sign_prob[2];
-	__u8 mv_class_probs[2][10];
-	__u8 mv_class0_bit_prob[2];
-	__u8 mv_bits_prob[2][10];
-	__u8 mv_class0_fr_probs[2][2][3];
-	__u8 mv_fr_probs[2][3];
-	__u8 mv_class0_hp_prob[2];
-	__u8 mv_hp_prob[2];
-};
-
-/* Entropy context state for current frame (frame_context_idx). */
-struct v4l2_ctrl_vp9_entropy {
-	__u8 tx_mode;
-	__u8 reference_mode;
-
-	/* Entropy context after load_probs2(). */
-	struct v4l2_vp9_entropy_ctx initial_entropy_ctx;
-
-	/*
-	 * Entropy context for the current decoding state: when passed to the
-	 * driver, contains the state of initial_entropy_ctx after parsing the
-	 * compressed header. After decoding is done (after vb2_buffer_done() is
-	 * called on the associated buffer), state as after refresh_probs().
-	 */
-	struct v4l2_vp9_entropy_ctx current_entropy_ctx;
-};
-
-#endif
diff --git a/vda/v4l2_controls_custom.h b/vda/v4l2_controls_custom.h
new file mode 100644
index 0000000..c34e417
--- /dev/null
+++ b/vda/v4l2_controls_custom.h
@@ -0,0 +1,411 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Note:
+// This header file has extracted some new structs and definitions in videodev2.h from ChromeOS
+// which is not upstreamed in Linux mainline. This should be removed once they are upstreamed.
+// TODO(johnylin): remove this file once it is upstreamed.
+
+#ifndef V4L2_CONTROLS_CUSTOM_H_
+#define V4L2_CONTROLS_CUSTOM_H_
+
+#include <linux/v4l2-controls.h>
+
+#define V4L2_CID_MPEG_VIDEO_H264_SPS (V4L2_CID_MPEG_BASE+383)
+#define V4L2_CID_MPEG_VIDEO_H264_PPS (V4L2_CID_MPEG_BASE+384)
+#define V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX (V4L2_CID_MPEG_BASE+385)
+#define V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAM (V4L2_CID_MPEG_BASE+386)
+#define V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAM (V4L2_CID_MPEG_BASE+387)
+#define V4L2_CID_MPEG_VIDEO_H264_SPS_PPS_BEFORE_IDR (V4L2_CID_MPEG_BASE+388)
+
+#define V4L2_CID_MPEG_VIDEO_VP8_FRAME_HDR (V4L2_CID_MPEG_BASE+512)
+
+#define V4L2_CID_MPEG_VIDEO_VP9_FRAME_HDR (V4L2_CID_MPEG_BASE+513)
+#define V4L2_CID_MPEG_VIDEO_VP9_DECODE_PARAM (V4L2_CID_MPEG_BASE+514)
+#define V4L2_CID_MPEG_VIDEO_VP9_ENTROPY (V4L2_CID_MPEG_BASE+515)
+
+/* Complex controls */
+
+#define V4L2_H264_SPS_CONSTRAINT_SET0_FLAG 0x01
+#define V4L2_H264_SPS_CONSTRAINT_SET1_FLAG 0x02
+#define V4L2_H264_SPS_CONSTRAINT_SET2_FLAG 0x04
+#define V4L2_H264_SPS_CONSTRAINT_SET3_FLAG 0x08
+#define V4L2_H264_SPS_CONSTRAINT_SET4_FLAG 0x10
+#define V4L2_H264_SPS_CONSTRAINT_SET5_FLAG 0x20
+
+#define V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE 0x01
+#define V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS 0x02
+#define V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO 0x04
+#define V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED 0x08
+#define V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY 0x10
+#define V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD 0x20
+#define V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE 0x40
+struct v4l2_ctrl_h264_sps {
+  __u8 profile_idc;
+  __u8 constraint_set_flags;
+  __u8 level_idc;
+  __u8 seq_parameter_set_id;
+  __u8 chroma_format_idc;
+  __u8 bit_depth_luma_minus8;
+  __u8 bit_depth_chroma_minus8;
+  __u8 log2_max_frame_num_minus4;
+  __u8 pic_order_cnt_type;
+  __u8 log2_max_pic_order_cnt_lsb_minus4;
+  __s32 offset_for_non_ref_pic;
+  __s32 offset_for_top_to_bottom_field;
+  __u8 num_ref_frames_in_pic_order_cnt_cycle;
+  __s32 offset_for_ref_frame[255];
+  __u8 max_num_ref_frames;
+  __u16 pic_width_in_mbs_minus1;
+  __u16 pic_height_in_map_units_minus1;
+  __u8 flags;
+};
+
+#define V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE 0x0001
+#define V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT 0x0002
+#define V4L2_H264_PPS_FLAG_WEIGHTED_PRED 0x0004
+#define V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT 0x0008
+#define V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED 0x0010
+#define V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT 0x0020
+#define V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE 0x0040
+#define V4L2_H264_PPS_FLAG_PIC_SCALING_MATRIX_PRESENT 0x0080
+struct v4l2_ctrl_h264_pps {
+  __u8 pic_parameter_set_id;
+  __u8 seq_parameter_set_id;
+  __u8 num_slice_groups_minus1;
+  __u8 num_ref_idx_l0_default_active_minus1;
+  __u8 num_ref_idx_l1_default_active_minus1;
+  __u8 weighted_bipred_idc;
+  __s8 pic_init_qp_minus26;
+  __s8 pic_init_qs_minus26;
+  __s8 chroma_qp_index_offset;
+  __s8 second_chroma_qp_index_offset;
+  __u8 flags;
+};
+
+struct v4l2_ctrl_h264_scaling_matrix {
+  __u8 scaling_list_4x4[6][16];
+  __u8 scaling_list_8x8[6][64];
+};
+
+struct v4l2_h264_weight_factors {
+  __s8 luma_weight[32];
+  __s8 luma_offset[32];
+  __s8 chroma_weight[32][2];
+  __s8 chroma_offset[32][2];
+};
+
+struct v4l2_h264_pred_weight_table {
+  __u8 luma_log2_weight_denom;
+  __u8 chroma_log2_weight_denom;
+  struct v4l2_h264_weight_factors weight_factors[2];
+};
+
+#define V4L2_SLICE_FLAG_FIELD_PIC 0x01
+#define V4L2_SLICE_FLAG_BOTTOM_FIELD 0x02
+#define V4L2_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED 0x04
+#define V4L2_SLICE_FLAG_SP_FOR_SWITCH 0x08
+struct v4l2_ctrl_h264_slice_param {
+  /* Size in bytes, including header */
+  __u32 size;
+  /* Offset in bits to slice_data() from the beginning of this slice. */
+  __u32 header_bit_size;
+
+  __u16 first_mb_in_slice;
+  __u8 slice_type;
+  __u8 pic_parameter_set_id;
+  __u8 colour_plane_id;
+  __u16 frame_num;
+  __u16 idr_pic_id;
+  __u16 pic_order_cnt_lsb;
+  __s32 delta_pic_order_cnt_bottom;
+  __s32 delta_pic_order_cnt0;
+  __s32 delta_pic_order_cnt1;
+  __u8 redundant_pic_cnt;
+
+  struct v4l2_h264_pred_weight_table pred_weight_table;
+  /* Size in bits of dec_ref_pic_marking() syntax element. */
+  __u32 dec_ref_pic_marking_bit_size;
+  /* Size in bits of pic order count syntax. */
+  __u32 pic_order_cnt_bit_size;
+
+  __u8 cabac_init_idc;
+  __s8 slice_qp_delta;
+  __s8 slice_qs_delta;
+  __u8 disable_deblocking_filter_idc;
+  __s8 slice_alpha_c0_offset_div2;
+  __s8 slice_beta_offset_div2;
+  __u32 slice_group_change_cycle;
+
+  __u8 num_ref_idx_l0_active_minus1;
+  __u8 num_ref_idx_l1_active_minus1;
+  /*  Entries on each list are indices
+   *  into v4l2_ctrl_h264_decode_param.dpb[]. */
+  __u8 ref_pic_list0[32];
+  __u8 ref_pic_list1[32];
+
+  __u8 flags;
+};
+
+/* If not set, this entry is unused for reference. */
+#define V4L2_H264_DPB_ENTRY_FLAG_ACTIVE 0x01
+#define V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM 0x02
+struct v4l2_h264_dpb_entry {
+  __u32 buf_index; /* v4l2_buffer index */
+  __u16 frame_num;
+  __u16 pic_num;
+  /* Note that field is indicated by v4l2_buffer.field */
+  __s32 top_field_order_cnt;
+  __s32 bottom_field_order_cnt;
+  __u8 flags; /* V4L2_H264_DPB_ENTRY_FLAG_* */
+};
+
+struct v4l2_ctrl_h264_decode_param {
+  __u32 num_slices;
+  __u8 idr_pic_flag;
+  __u8 nal_ref_idc;
+  __s32 top_field_order_cnt;
+  __s32 bottom_field_order_cnt;
+  __u8 ref_pic_list_p0[32];
+  __u8 ref_pic_list_b0[32];
+  __u8 ref_pic_list_b1[32];
+  struct v4l2_h264_dpb_entry dpb[16];
+};
+
+#define V4L2_VP8_SEGMNT_HDR_FLAG_ENABLED 0x01
+#define V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_MAP 0x02
+#define V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_FEATURE_DATA 0x04
+struct v4l2_vp8_sgmnt_hdr {
+  __u8 segment_feature_mode;
+
+  __s8 quant_update[4];
+  __s8 lf_update[4];
+  __u8 segment_probs[3];
+
+  __u8 flags;
+};
+
+#define V4L2_VP8_LF_HDR_ADJ_ENABLE 0x01
+#define V4L2_VP8_LF_HDR_DELTA_UPDATE 0x02
+struct v4l2_vp8_loopfilter_hdr {
+  __u8 type;
+  __u8 level;
+  __u8 sharpness_level;
+  __s8 ref_frm_delta_magnitude[4];
+  __s8 mb_mode_delta_magnitude[4];
+
+  __u8 flags;
+};
+
+struct v4l2_vp8_quantization_hdr {
+  __u8 y_ac_qi;
+  __s8 y_dc_delta;
+  __s8 y2_dc_delta;
+  __s8 y2_ac_delta;
+  __s8 uv_dc_delta;
+  __s8 uv_ac_delta;
+  __u16 dequant_factors[4][3][2];
+};
+
+struct v4l2_vp8_entropy_hdr {
+  __u8 coeff_probs[4][8][3][11];
+  __u8 y_mode_probs[4];
+  __u8 uv_mode_probs[3];
+  __u8 mv_probs[2][19];
+};
+
+#define V4L2_VP8_FRAME_HDR_FLAG_EXPERIMENTAL 0x01
+#define V4L2_VP8_FRAME_HDR_FLAG_SHOW_FRAME 0x02
+#define V4L2_VP8_FRAME_HDR_FLAG_MB_NO_SKIP_COEFF 0x04
+struct v4l2_ctrl_vp8_frame_hdr {
+  /* 0: keyframe, 1: not a keyframe */
+  __u8 key_frame;
+  __u8 version;
+
+  /* Populated also if not a key frame */
+  __u16 width;
+  __u8 horizontal_scale;
+  __u16 height;
+  __u8 vertical_scale;
+
+  struct v4l2_vp8_sgmnt_hdr sgmnt_hdr;
+  struct v4l2_vp8_loopfilter_hdr lf_hdr;
+  struct v4l2_vp8_quantization_hdr quant_hdr;
+  struct v4l2_vp8_entropy_hdr entropy_hdr;
+
+  __u8 sign_bias_golden;
+  __u8 sign_bias_alternate;
+
+  __u8 prob_skip_false;
+  __u8 prob_intra;
+  __u8 prob_last;
+  __u8 prob_gf;
+
+  __u32 first_part_size;
+  __u32 first_part_offset;
+  /*
+   * Offset in bits of MB data in first partition,
+   * i.e. bit offset starting from first_part_offset.
+   */
+  __u32 macroblock_bit_offset;
+
+  __u8 num_dct_parts;
+  __u32 dct_part_sizes[8];
+
+  __u8 bool_dec_range;
+  __u8 bool_dec_value;
+  __u8 bool_dec_count;
+
+  /* v4l2_buffer indices of reference frames */
+  __u32 last_frame;
+  __u32 golden_frame;
+  __u32 alt_frame;
+
+  __u8 flags;
+};
+
+#define V4L2_VP9_SGMNT_PARAM_FLAG_ENABLED 0x01
+#define V4L2_VP9_SGMNT_PARAM_FLAG_UPDATE_MAP 0x02
+#define V4L2_VP9_SGMNT_PARAM_FLAG_TEMPORAL_UPDATE 0x04
+#define V4L2_VP9_SGMNT_PARAM_FLAG_UPDATE_DATA 0x08
+#define V4L2_VP9_SGMNT_PARAM_FLAG_ABS_OR_DELTA_UPDATE 0x10
+struct v4l2_vp9_segmentation_params {
+  __u8 tree_probs[7];
+  __u8 pred_probs[3];
+  __u8 feature_enabled[8][4];
+  __s16 feature_data[8][4];
+
+  __u8 flags;
+};
+
+#define V4L2_VP9_LOOP_FLTR_FLAG_DELTA_ENABLED 0x01
+#define V4L2_VP9_LOOP_FLTR_FLAG_DELTA_UPDATE 0x02
+struct v4l2_vp9_loop_filter_params {
+  __u8 level;
+  __u8 sharpness;
+  __s8 deltas[4];
+  __s8 mode_deltas[2];
+  __u8 lvl_lookup[8][4][2];
+
+  __u8 flags;
+};
+
+#define V4L2_VP9_QUANT_PARAMS_FLAG_LOSSLESS 0x01
+struct v4l2_vp9_quantization_params {
+  __u8 base_q_idx;
+  __s8 delta_q_y_dc;
+  __s8 delta_q_uv_dc;
+  __s8 delta_q_uv_ac;
+
+  __u8 flags;
+};
+
+#define V4L2_VP9_FRAME_HDR_FLAG_SHOW_FRAME 0x01
+/* Error resilient mode enabled flag */
+#define V4L2_VP9_FRAME_HDR_FLAG_ERR_RES	0x02
+#define V4L2_VP9_FRAME_HDR_FLAG_FRAME_INTRA 0x04
+#define V4L2_VP9_FRAME_HDR_ALLOW_HIGH_PREC_MV 0x08
+#define V4L2_VP9_FRAME_HDR_REFRESH_FRAME_CTX 0x10
+#define V4L2_VP9_FRAME_HDR_PARALLEL_DEC_MODE 0x20
+struct v4l2_ctrl_vp9_frame_hdr {
+  __u8 profile;
+  /* 0: keyframe, 1: non-keyframe */
+  __u8 frame_type;
+
+  __u8 bit_depth;
+  __u8 color_space;
+  __u8 color_range;
+  __u8 subsampling_x;
+  __u8 subsampling_y;
+
+  __u32 frame_width;
+  __u32 frame_height;
+  __u32 render_width;
+  __u32 render_height;
+
+  __u8 reset_frame_context;
+
+  __u8 interpolation_filter;
+  __u8 frame_context_idx;
+
+  struct v4l2_vp9_loop_filter_params lf_params;
+  struct v4l2_vp9_quantization_params quant_params;
+  struct v4l2_vp9_segmentation_params sgmnt_params;
+
+  __u8 tile_cols_log2;
+  __u8 tile_rows_log2;
+
+  __u16 header_size_in_bytes;
+
+  __u8 flags;
+};
+
+struct v4l2_vp9_reference_frame {
+  /* v4l2_buffer index */
+  __u32 buf_index;
+
+  __u32 frame_width;
+  __u32 frame_height;
+  __u8 bit_depth;
+  __u8 subsampling_x;
+  __u8 subsampling_y;
+};
+
+struct v4l2_ctrl_vp9_decode_param {
+  /* v4l2_buffer index for all reference frames (frame slots). */
+  __u32 ref_frames[8];
+
+  /* Active reference frames, [0]: last, [1]: golden, [2]: altref */
+  struct v4l2_vp9_reference_frame active_ref_frames[3];
+};
+
+struct v4l2_vp9_entropy_ctx {
+  __u8 tx_probs_8x8[2][1];
+  __u8 tx_probs_16x16[2][2];
+  __u8 tx_probs_32x32[2][3];
+
+  __u8 coef_probs[4][2][2][6][6][3];
+  __u8 skip_prob[3];
+  __u8 inter_mode_probs[7][3];
+  __u8 interp_filter_probs[4][2];
+  __u8 is_inter_prob[4];
+
+  __u8 comp_mode_prob[5];
+  __u8 single_ref_prob[5][2];
+  __u8 comp_ref_prob[5];
+
+  __u8 y_mode_probs[4][9];
+  __u8 uv_mode_probs[10][9];
+
+  __u8 partition_probs[16][3];
+
+  __u8 mv_joint_probs[3];
+  __u8 mv_sign_prob[2];
+  __u8 mv_class_probs[2][10];
+  __u8 mv_class0_bit_prob[2];
+  __u8 mv_bits_prob[2][10];
+  __u8 mv_class0_fr_probs[2][2][3];
+  __u8 mv_fr_probs[2][3];
+  __u8 mv_class0_hp_prob[2];
+  __u8 mv_hp_prob[2];
+};
+
+/* Entropy context state for current frame (frame_context_idx). */
+struct v4l2_ctrl_vp9_entropy {
+  __u8 tx_mode;
+  __u8 reference_mode;
+
+  /* Entropy context after load_probs2(). */
+  struct v4l2_vp9_entropy_ctx initial_entropy_ctx;
+
+  /*
+   * Entropy context for the current decoding state: when passed to the
+   * driver, contains the state of initial_entropy_ctx after parsing the
+   * compressed header. After decoding is done (after vb2_buffer_done() is
+   * called on the associated buffer), state as after refresh_probs().
+   */
+  struct v4l2_vp9_entropy_ctx current_entropy_ctx;
+};
+
+#endif  // V4L2_CONTROLS_CUSTOM_H_
diff --git a/vda/v4l2_device.cc b/vda/v4l2_device.cc
index 16446d3..78ef474 100644
--- a/vda/v4l2_device.cc
+++ b/vda/v4l2_device.cc
@@ -1,6 +1,8 @@
 // Copyright 2014 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 09ea0d2
+// Note: it's also merged with generic_v4l2_device.cc (head: a9d98e6)
 
 #include <errno.h>
 #include <fcntl.h>
@@ -15,6 +17,10 @@
 #include "base/strings/stringprintf.h"
 #include "v4l2_device.h"
 
+#define DVLOGF(level) DVLOG(level) << __func__ << "(): "
+#define VLOGF(level) VLOG(level) << __func__ << "(): "
+#define VPLOGF(level) VPLOG(level) << __func__ << "(): "
+
 namespace media {
 
 V4L2Device::V4L2Device() {}
@@ -47,7 +53,7 @@
       return PIXEL_FORMAT_ARGB;
 
     default:
-      DVLOG(1) << "Add more cases as needed";
+      DVLOGF(1) << "Add more cases as needed";
       return PIXEL_FORMAT_UNKNOWN;
   }
 }
@@ -131,7 +137,7 @@
       break;
 
     default:
-      DVLOG(1) << "Unhandled pixelformat " << std::hex << "0x" << pix_fmt;
+      VLOGF(1) << "Unhandled pixelformat " << std::hex << "0x" << pix_fmt;
       return profiles;
   }
 
@@ -156,7 +162,7 @@
   nfds = 1;
 
   if (poll_device) {
-    DVLOG(3) << "Poll(): adding device fd to poll() set";
+    DVLOGF(5) << "Poll(): adding device fd to poll() set";
     pollfds[nfds].fd = device_fd_.get();
     pollfds[nfds].events = POLLIN | POLLOUT | POLLERR | POLLPRI;
     pollfd = nfds;
@@ -164,7 +170,7 @@
   }
 
   if (HANDLE_EINTR(poll(pollfds, nfds, -1)) == -1) {
-    DPLOG(ERROR) << "poll() failed";
+    VPLOGF(1) << "poll() failed";
     return false;
   }
   *event_pending = (pollfd != -1 && pollfds[pollfd].revents & POLLPRI);
@@ -185,19 +191,19 @@
 }
 
 bool V4L2Device::SetDevicePollInterrupt() {
-  DVLOG(3) << "SetDevicePollInterrupt()";
+  DVLOGF(4);
 
   const uint64_t buf = 1;
   if (HANDLE_EINTR(write(device_poll_interrupt_fd_.get(), &buf, sizeof(buf))) ==
       -1) {
-    DPLOG(ERROR) << "SetDevicePollInterrupt(): write() failed";
+    VPLOGF(1) << "write() failed";
     return false;
   }
   return true;
 }
 
 bool V4L2Device::ClearDevicePollInterrupt() {
-  DVLOG(3) << "ClearDevicePollInterrupt()";
+  DVLOGF(5);
 
   uint64_t buf;
   if (HANDLE_EINTR(read(device_poll_interrupt_fd_.get(), &buf, sizeof(buf))) ==
@@ -206,7 +212,7 @@
       // No interrupt flag set, and we're reading nonblocking.  Not an error.
       return true;
     } else {
-      DPLOG(ERROR) << "ClearDevicePollInterrupt(): read() failed";
+      VPLOGF(1) << "read() failed";
       return false;
     }
   }
@@ -214,22 +220,23 @@
 }
 
 bool V4L2Device::Open(Type type, uint32_t v4l2_pixfmt) {
+  VLOGF(2);
   std::string path = GetDevicePathFor(type, v4l2_pixfmt);
 
   if (path.empty()) {
-    DVLOG(1) << "No devices supporting " << std::hex << "0x" << v4l2_pixfmt
+    VLOGF(1) << "No devices supporting " << std::hex << "0x" << v4l2_pixfmt
              << " for type: " << static_cast<int>(type);
     return false;
   }
 
   if (!OpenDevicePath(path, type)) {
-    LOG(ERROR) << "Failed opening " << path;
+    VLOGF(1) << "Failed opening " << path;
     return false;
   }
 
   device_poll_interrupt_fd_.reset(eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC));
   if (!device_poll_interrupt_fd_.is_valid()) {
-    LOG(ERROR) << "Failed creating a poll interrupt fd";
+    VLOGF(1) << "Failed creating a poll interrupt fd";
     return false;
   }
 
@@ -240,6 +247,7 @@
     int index,
     size_t num_planes,
     enum v4l2_buf_type buf_type) {
+  VLOGF(2);
   DCHECK(V4L2_TYPE_IS_MULTIPLANAR(buf_type));
 
   std::vector<base::ScopedFD> dmabuf_fds;
@@ -270,7 +278,7 @@
   const auto& devices = GetDevicesForType(type);
   for (const auto& device : devices) {
     if (!OpenDevicePath(device.first, type)) {
-      LOG(ERROR) << "Failed opening " << device.first;
+      VLOGF(1) << "Failed opening " << device.first;
       continue;
     }
 
@@ -320,15 +328,15 @@
   }
   if (max_resolution->IsEmpty()) {
     max_resolution->SetSize(1920, 1088);
-    LOG(ERROR) << "GetSupportedResolution failed to get maximum resolution for "
-               << "fourcc " << std::hex << pixelformat
-               << ", fall back to " << max_resolution->ToString();
+    VLOGF(1) << "GetSupportedResolution failed to get maximum resolution for "
+             << "fourcc " << std::hex << pixelformat
+             << ", fall back to " << max_resolution->ToString();
   }
   if (min_resolution->IsEmpty()) {
     min_resolution->SetSize(16, 16);
-    LOG(ERROR) << "GetSupportedResolution failed to get minimum resolution for "
-               << "fourcc " << std::hex << pixelformat
-               << ", fall back to " << min_resolution->ToString();
+    VLOGF(1) << "GetSupportedResolution failed to get minimum resolution for "
+             << "fourcc " << std::hex << pixelformat
+             << ", fall back to " << min_resolution->ToString();
   }
 }
 
@@ -341,8 +349,8 @@
   fmtdesc.type = buf_type;
 
   for (; Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0; ++fmtdesc.index) {
-    DVLOG(1) << "Found " << fmtdesc.description << std::hex << " (0x"
-             << fmtdesc.pixelformat << ")";
+    DVLOGF(3) << "Found " << fmtdesc.description << std::hex << " (0x"
+              << fmtdesc.pixelformat << ")";
     pixelformats.push_back(fmtdesc.pixelformat);
   }
 
@@ -373,9 +381,9 @@
       profile.profile = video_codec_profile;
       profiles.push_back(profile);
 
-      DVLOG(1) << "Found decoder profile " << GetProfileName(profile.profile)
-               << ", resolutions: " << profile.min_resolution.ToString() << " "
-               << profile.max_resolution.ToString();
+      DVLOGF(3) << "Found decoder profile " << GetProfileName(profile.profile)
+                << ", resolutions: " << profile.min_resolution.ToString() << " "
+                << profile.max_resolution.ToString();
     }
   }
 
@@ -394,6 +402,7 @@
 }
 
 void V4L2Device::CloseDevice() {
+  VLOGF(2);
   device_fd_.reset();
 }
 
@@ -433,7 +442,7 @@
     const auto& supported_pixelformats =
         EnumerateSupportedPixelformats(buf_type);
     if (!supported_pixelformats.empty()) {
-      DVLOG(1) << "Found device: " << path;
+      DVLOGF(3) << "Found device: " << path;
       devices.push_back(std::make_pair(path, supported_pixelformats));
     }
 
diff --git a/vda/v4l2_device.h b/vda/v4l2_device.h
index 41dd616..e54a1e7 100644
--- a/vda/v4l2_device.h
+++ b/vda/v4l2_device.h
@@ -5,6 +5,8 @@
 // This file defines the V4L2Device interface which is used by the
 // V4L2DecodeAccelerator class to delegate/pass the device specific
 // handling of any of the functionalities.
+// Note: ported from Chromium commit head: fb70f64
+// Note: it's also merged with generic_v4l2_device.h (head: fb70f64)
 
 #ifndef V4L2_DEVICE_H_
 #define V4L2_DEVICE_H_
@@ -19,7 +21,7 @@
 #include "video_codecs.h"
 #include "video_decode_accelerator.h"
 #include "video_pixel_format.h"
-#include "videodev2.h"
+#include "videodev2_custom.h"
 
 // TODO(posciak): remove this once V4L2 headers are updated.
 #define V4L2_PIX_FMT_MT21 v4l2_fourcc('M', 'T', '2', '1')
diff --git a/vda/v4l2_slice_video_decode_accelerator.cc b/vda/v4l2_slice_video_decode_accelerator.cc
index 2d81020..4e82dbd 100644
--- a/vda/v4l2_slice_video_decode_accelerator.cc
+++ b/vda/v4l2_slice_video_decode_accelerator.cc
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 85fdf90
 
 #include "v4l2_slice_video_decode_accelerator.h"
 
@@ -27,21 +28,20 @@
 #include "base/threading/thread_task_runner_handle.h"
 #include "shared_memory_region.h"
 
-#define LOGF(level) LOG(level) << __func__ << "(): "
-#define DLOGF(level) DLOG(level) << __func__ << "(): "
 #define DVLOGF(level) DVLOG(level) << __func__ << "(): "
-#define PLOGF(level) PLOG(level) << __func__ << "(): "
+#define VLOGF(level) VLOG(level) << __func__ << "(): "
+#define VPLOGF(level) VPLOG(level) << __func__ << "(): "
 
-#define NOTIFY_ERROR(x)                         \
-  do {                                          \
-    LOGF(ERROR) << "Setting error state:" << x; \
-    SetErrorState(x);                           \
+#define NOTIFY_ERROR(x)                       \
+  do {                                        \
+    VLOGF(1) << "Setting error state: " << x; \
+    SetErrorState(x);                         \
   } while (0)
 
 #define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str) \
   do {                                                          \
     if (device_->Ioctl(type, arg) != 0) {                       \
-      PLOGF(ERROR) << "ioctl() failed: " << type_str;           \
+      VPLOGF(1) << "ioctl() failed: " << type_str;              \
       return value;                                             \
     }                                                           \
   } while (0)
@@ -52,10 +52,10 @@
 #define IOCTL_OR_ERROR_RETURN_FALSE(type, arg) \
   IOCTL_OR_ERROR_RETURN_VALUE(type, arg, false, #type)
 
-#define IOCTL_OR_LOG_ERROR(type, arg)              \
-  do {                                             \
-    if (device_->Ioctl(type, arg) != 0)            \
-      PLOGF(ERROR) << "ioctl() failed: " << #type; \
+#define IOCTL_OR_LOG_ERROR(type, arg)           \
+  do {                                          \
+    if (device_->Ioctl(type, arg) != 0)         \
+      VPLOGF(1) << "ioctl() failed: " << #type; \
   } while (0)
 
 namespace media {
@@ -84,6 +84,11 @@
   int input_record() const { return input_record_; }
   int output_record() const { return output_record_; }
   uint32_t config_store() const { return config_store_; }
+  Rect visible_rect() const { return visible_rect_; }
+
+  void set_visible_rect(const Rect& visible_rect) {
+    visible_rect_ = visible_rect;
+  }
 
   // Take references to each reference surface and keep them until the
   // target surface is decoded.
@@ -108,6 +113,7 @@
   int input_record_;
   int output_record_;
   uint32_t config_store_;
+  Rect visible_rect_;
 
   bool decoded_;
   ReleaseCB release_cb_;
@@ -474,7 +480,7 @@
 
 bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
                                                  Client* client) {
-  DVLOGF(3) << "profile: " << config.profile;
+  VLOGF(3) << "profile: " << config.profile;
   DCHECK(child_task_runner_->BelongsToCurrentThread());
   DCHECK_EQ(state_, kUninitialized);
 
@@ -506,8 +512,8 @@
       V4L2Device::VideoCodecProfileToV4L2PixFmt(video_profile_, true);
 
   if (!device_->Open(V4L2Device::Type::kDecoder, input_format_fourcc_)) {
-    DVLOGF(1) << "Failed to open device for profile: " << config.profile
-              << " fourcc: " << std::hex << "0x" << input_format_fourcc_;
+    VLOGF(1) << "Failed to open device for profile: " << config.profile
+             << " fourcc: " << std::hex << "0x" << input_format_fourcc_;
     return false;
   }
 
@@ -532,8 +538,8 @@
   const __u32 kCapsRequired = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
   IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps);
   if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
-    LOGF(ERROR) << "ioctl() failed: VIDIOC_QUERYCAP"
-                << ", caps check failed: 0x" << std::hex << caps.capabilities;
+    VLOGF(1) << "ioctl() failed: VIDIOC_QUERYCAP"
+             << ", caps check failed: 0x" << std::hex << caps.capabilities;
     return false;
   }
 
@@ -541,7 +547,7 @@
     return false;
 
   if (!decoder_thread_.Start()) {
-    DLOGF(ERROR) << "device thread failed to start";
+    VLOGF(1) << "device thread failed to start";
     return false;
   }
   decoder_thread_task_runner_ = decoder_thread_.task_runner();
@@ -554,12 +560,12 @@
       FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::InitializeTask,
                             base::Unretained(this)));
 
-  DVLOGF(1) << "V4L2SliceVideoDecodeAccelerator initialized";
+  VLOGF(2) << "V4L2SliceVideoDecodeAccelerator initialized";
   return true;
 }
 
 void V4L2SliceVideoDecodeAccelerator::InitializeTask() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
   DCHECK_EQ(state_, kInitialized);
 
@@ -572,7 +578,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::Destroy() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(child_task_runner_->BelongsToCurrentThread());
 
   if (decoder_thread_.IsRunning()) {
@@ -585,11 +591,11 @@
   }
 
   delete this;
-  DVLOGF(3) << "Destroyed";
+  VLOGF(2) << "Destroyed";
 }
 
 void V4L2SliceVideoDecodeAccelerator::DestroyTask() {
-  DVLOGF(3);
+  DVLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   state_ = kError;
@@ -675,7 +681,7 @@
   }
 
   if (output_format_fourcc_ == 0) {
-    LOGF(ERROR) << "Could not find a usable output format";
+    VLOGF(1) << "Could not find a usable output format";
     return false;
   }
 
@@ -691,7 +697,7 @@
 }
 
 bool V4L2SliceVideoDecodeAccelerator::CreateInputBuffers() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
   DCHECK(!input_streamon_);
   DCHECK(input_buffer_map_.empty());
@@ -703,7 +709,7 @@
   reqbufs.memory = V4L2_MEMORY_MMAP;
   IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS, &reqbufs);
   if (reqbufs.count < kNumInputBuffers) {
-    PLOGF(ERROR) << "Could not allocate enough output buffers";
+    VLOGF(1) << "Could not allocate enough output buffers";
     return false;
   }
   input_buffer_map_.resize(reqbufs.count);
@@ -712,7 +718,7 @@
 
     // Query for the MEMORY_MMAP pointer.
     struct v4l2_plane planes[VIDEO_MAX_PLANES];
-    struct v4l2_buffer buffer;
+    struct v4l2_buffer_custom buffer;
     memset(&buffer, 0, sizeof(buffer));
     memset(planes, 0, sizeof(planes));
     buffer.index = i;
@@ -727,7 +733,7 @@
                                   MAP_SHARED,
                                   buffer.m.planes[0].m.mem_offset);
     if (address == MAP_FAILED) {
-      PLOGF(ERROR) << "mmap() failed";
+      VLOGF(1) << "mmap() failed";
       return false;
     }
     input_buffer_map_[i].address = address;
@@ -738,29 +744,29 @@
 }
 
 bool V4L2SliceVideoDecodeAccelerator::CreateOutputBuffers() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
   DCHECK(!output_streamon_);
   DCHECK(output_buffer_map_.empty());
   DCHECK(surfaces_at_display_.empty());
   DCHECK(surfaces_at_device_.empty());
 
-  visible_size_ = decoder_->GetPicSize();
+  Size pic_size = decoder_->GetPicSize();
   size_t num_pictures = decoder_->GetRequiredNumOfPictures();
 
   DCHECK_GT(num_pictures, 0u);
-  DCHECK(!visible_size_.IsEmpty());
+  DCHECK(!pic_size.IsEmpty());
 
   struct v4l2_format format;
   memset(&format, 0, sizeof(format));
   format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
   format.fmt.pix_mp.pixelformat = output_format_fourcc_;
-  format.fmt.pix_mp.width = visible_size_.width();
-  format.fmt.pix_mp.height = visible_size_.height();
+  format.fmt.pix_mp.width = pic_size.width();
+  format.fmt.pix_mp.height = pic_size.height();
   format.fmt.pix_mp.num_planes = input_planes_count_;
 
   if (device_->Ioctl(VIDIOC_S_FMT, &format) != 0) {
-    PLOGF(ERROR) << "Failed setting format to: " << output_format_fourcc_;
+    VPLOGF(1) << "Failed setting format to: " << output_format_fourcc_;
     NOTIFY_ERROR(PLATFORM_FAILURE);
     return false;
   }
@@ -770,22 +776,17 @@
   DCHECK_EQ(coded_size_.width() % 16, 0);
   DCHECK_EQ(coded_size_.height() % 16, 0);
 
-  if (!Rect(coded_size_).Contains(Rect(visible_size_))) {
-    LOGF(ERROR) << "Got invalid adjusted coded size: "
-                << coded_size_.ToString();
+  if (!Rect(coded_size_).Contains(Rect(pic_size))) {
+    VLOGF(1) << "Got invalid adjusted coded size: " << coded_size_.ToString();
     return false;
   }
 
   DVLOGF(3) << "buffer_count=" << num_pictures
-            << ", visible size=" << visible_size_.ToString()
+            << ", pic size=" << pic_size.ToString()
             << ", coded size=" << coded_size_.ToString();
 
-  // With ALLOCATE mode the client can sample it as RGB and doesn't need to
-  // know the precise format.
   VideoPixelFormat pixel_format =
-      (output_mode_ == Config::OutputMode::IMPORT)
-          ? V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_)
-          : PIXEL_FORMAT_UNKNOWN;
+      V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_);
 
   child_task_runner_->PostTask(
       FROM_HERE,
@@ -805,7 +806,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::DestroyInputBuffers() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread() ||
          !decoder_thread_.IsRunning());
   DCHECK(!input_streamon_);
@@ -836,7 +837,7 @@
   DCHECK(child_task_runner_->BelongsToCurrentThread());
 
   for (auto picture_buffer_id : picture_buffer_ids) {
-    DVLOGF(1) << "dismissing PictureBuffer id=" << picture_buffer_id;
+    DVLOGF(4) << "dismissing PictureBuffer id=" << picture_buffer_id;
     client_->DismissPictureBuffer(picture_buffer_id);
   }
 
@@ -844,7 +845,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::DevicePollTask(bool poll_device) {
-  DVLOGF(4);
+  DVLOGF(3);
   DCHECK(device_poll_thread_.task_runner()->BelongsToCurrentThread());
 
   bool event_pending;
@@ -874,7 +875,7 @@
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   if (!device_poll_thread_.IsRunning()) {
-    DVLOGF(2) << "Device poll thread stopped, will not schedule poll";
+    DVLOGF(4) << "Device poll thread stopped, will not schedule poll";
     return;
   }
 
@@ -891,7 +892,7 @@
       FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DevicePollTask,
                             base::Unretained(this), true));
 
-  DVLOGF(2) << "buffer counts: "
+  DVLOGF(3) << "buffer counts: "
             << "INPUT[" << decoder_input_queue_.size() << "]"
             << " => DEVICE["
             << free_input_buffers_.size() << "+"
@@ -913,13 +914,13 @@
 
   if (!EnqueueInputRecord(dec_surface->input_record(),
                           dec_surface->config_store())) {
-    DVLOGF(1) << "Failed queueing an input buffer";
+    VLOGF(1) << "Failed queueing an input buffer";
     NOTIFY_ERROR(PLATFORM_FAILURE);
     return;
   }
 
   if (!EnqueueOutputRecord(dec_surface->output_record())) {
-    DVLOGF(1) << "Failed queueing an output buffer";
+    VLOGF(1) << "Failed queueing an output buffer";
     NOTIFY_ERROR(PLATFORM_FAILURE);
     return;
   }
@@ -935,10 +936,10 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::Dequeue() {
-  DVLOGF(3);
+  DVLOGF(4);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
-  struct v4l2_buffer dqbuf;
+  struct v4l2_buffer_custom dqbuf;
   struct v4l2_plane planes[VIDEO_MAX_PLANES];
   while (input_buffer_queued_count_ > 0) {
     DCHECK(input_streamon_);
@@ -953,7 +954,7 @@
         // EAGAIN if we're just out of buffers to dequeue.
         break;
       }
-      PLOGF(ERROR) << "ioctl() failed: VIDIOC_DQBUF";
+      VPLOGF(1) << "ioctl() failed: VIDIOC_DQBUF";
       NOTIFY_ERROR(PLATFORM_FAILURE);
       return;
     }
@@ -981,7 +982,7 @@
         // EAGAIN if we're just out of buffers to dequeue.
         break;
       }
-      PLOGF(ERROR) << "ioctl() failed: VIDIOC_DQBUF";
+      VPLOGF(1) << "ioctl() failed: VIDIOC_DQBUF";
       NOTIFY_ERROR(PLATFORM_FAILURE);
       return;
     }
@@ -989,13 +990,13 @@
     DCHECK(output_record.at_device);
     output_record.at_device = false;
     output_buffer_queued_count_--;
-    DVLOGF(3) << "Dequeued output=" << dqbuf.index
-              << " count " << output_buffer_queued_count_;
+    DVLOGF(4) << "Dequeued output=" << dqbuf.index << " count "
+              << output_buffer_queued_count_;
 
     V4L2DecodeSurfaceByOutputId::iterator it =
         surfaces_at_device_.find(dqbuf.index);
     if (it == surfaces_at_device_.end()) {
-      DLOGF(ERROR) << "Got invalid surface from device.";
+      VLOGF(1) << "Got invalid surface from device.";
       NOTIFY_ERROR(PLATFORM_FAILURE);
       return;
     }
@@ -1091,14 +1092,14 @@
 bool V4L2SliceVideoDecodeAccelerator::EnqueueInputRecord(
     int index,
     uint32_t config_store) {
-  DVLOGF(3);
+  DVLOGF(4);
   DCHECK_LT(index, static_cast<int>(input_buffer_map_.size()));
   DCHECK_GT(config_store, 0u);
 
   // Enqueue an input (VIDEO_OUTPUT) buffer for an input video frame.
   InputRecord& input_record = input_buffer_map_[index];
   DCHECK(!input_record.at_device);
-  struct v4l2_buffer qbuf;
+  struct v4l2_buffer_custom qbuf;
   struct v4l2_plane qbuf_planes[VIDEO_MAX_PLANES];
   memset(&qbuf, 0, sizeof(qbuf));
   memset(qbuf_planes, 0, sizeof(qbuf_planes));
@@ -1119,7 +1120,7 @@
 }
 
 bool V4L2SliceVideoDecodeAccelerator::EnqueueOutputRecord(int index) {
-  DVLOGF(3);
+  DVLOGF(4);
   DCHECK_LT(index, static_cast<int>(output_buffer_map_.size()));
 
   // Enqueue an output (VIDEO_CAPTURE) buffer.
@@ -1128,7 +1129,7 @@
   DCHECK(!output_record.at_client);
   DCHECK_NE(output_record.picture_id, -1);
 
-  struct v4l2_buffer qbuf;
+  struct v4l2_buffer_custom qbuf;
   struct v4l2_plane qbuf_planes[VIDEO_MAX_PLANES];
   memset(&qbuf, 0, sizeof(qbuf));
   memset(qbuf_planes, 0, sizeof(qbuf_planes));
@@ -1162,7 +1163,7 @@
 
   // Start up the device poll thread and schedule its first DevicePollTask().
   if (!device_poll_thread_.Start()) {
-    DLOGF(ERROR) << "Device thread failed to start";
+    VLOGF(1) << "Device thread failed to start";
     NOTIFY_ERROR(PLATFORM_FAILURE);
     return false;
   }
@@ -1192,7 +1193,7 @@
 
   // Signal the DevicePollTask() to stop, and stop the device poll thread.
   if (!device_->SetDevicePollInterrupt()) {
-    PLOGF(ERROR) << "SetDevicePollInterrupt(): failed";
+    VPLOGF(1) << "SetDevicePollInterrupt(): failed";
     NOTIFY_ERROR(PLATFORM_FAILURE);
     return false;
   }
@@ -1254,12 +1255,12 @@
 
 void V4L2SliceVideoDecodeAccelerator::Decode(
     const BitstreamBuffer& bitstream_buffer) {
-  DVLOGF(3) << "input_id=" << bitstream_buffer.id()
+  DVLOGF(4) << "input_id=" << bitstream_buffer.id()
             << ", size=" << bitstream_buffer.size();
   DCHECK(decode_task_runner_->BelongsToCurrentThread());
 
   if (bitstream_buffer.id() < 0) {
-    LOGF(ERROR) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
+    VLOGF(1) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
     if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
       base::SharedMemory::CloseHandle(bitstream_buffer.handle());
     NOTIFY_ERROR(INVALID_ARGUMENT);
@@ -1273,7 +1274,7 @@
 
 void V4L2SliceVideoDecodeAccelerator::DecodeTask(
     const BitstreamBuffer& bitstream_buffer) {
-  DVLOGF(3) << "input_id=" << bitstream_buffer.id()
+  DVLOGF(4) << "input_id=" << bitstream_buffer.id()
             << " size=" << bitstream_buffer.size();
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
@@ -1286,11 +1287,11 @@
     return;
 
   if (!bitstream_record->shm->Map()) {
-    LOGF(ERROR) << "Could not map bitstream_buffer";
+    VLOGF(1) << "Could not map bitstream_buffer";
     NOTIFY_ERROR(UNREADABLE_INPUT);
     return;
   }
-  DVLOGF(3) << "mapped at=" << bitstream_record->shm->memory();
+  DVLOGF(4) << "mapped at=" << bitstream_record->shm->memory();
 
   decoder_input_queue_.push(
       linked_ptr<BitstreamBufferRef>(bitstream_record.release()));
@@ -1334,7 +1335,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::DecodeBufferTask() {
-  DVLOGF(3);
+  DVLOGF(4);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   if (state_ != kDecoding) {
@@ -1347,7 +1348,7 @@
     res = decoder_->Decode();
     switch (res) {
       case AcceleratedVideoDecoder::kAllocateNewSurfaces:
-        DVLOGF(2) << "Decoder requesting a new set of surfaces";
+        VLOGF(2) << "Decoder requesting a new set of surfaces";
         InitiateSurfaceSetChange();
         return;
 
@@ -1368,7 +1369,7 @@
         return;
 
       case AcceleratedVideoDecoder::kDecodeError:
-        DVLOGF(1) << "Error decoding stream";
+        VLOGF(1) << "Error decoding stream";
         NOTIFY_ERROR(PLATFORM_FAILURE);
         return;
     }
@@ -1376,7 +1377,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::InitiateSurfaceSetChange() {
-  DVLOGF(2);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
   DCHECK_EQ(state_, kDecoding);
 
@@ -1386,7 +1387,7 @@
 }
 
 bool V4L2SliceVideoDecodeAccelerator::FinishSurfaceSetChange() {
-  DVLOGF(2);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   if (!surface_set_change_pending_)
@@ -1409,6 +1410,12 @@
     return false;
   }
 
+  // Dequeued decoded surfaces may be pended in pending_picture_ready_ if they
+  // are waiting for some pictures to be cleared. We should post them right away
+  // because they are about to be dismissed and destroyed for surface set
+  // change.
+  SendPictureReady();
+
   // This will return only once all buffers are dismissed and destroyed.
   // This does not wait until they are displayed however, as display retains
   // references to the buffers bound to textures and will release them
@@ -1424,12 +1431,12 @@
   }
 
   surface_set_change_pending_ = false;
-  DVLOGF(3) << "Surface set change finished";
+  VLOGF(2) << "Surface set change finished";
   return true;
 }
 
 bool V4L2SliceVideoDecodeAccelerator::DestroyOutputs(bool dismiss) {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
   std::vector<int32_t> picture_buffers_to_dismiss;
 
@@ -1442,7 +1449,7 @@
   }
 
   if (dismiss) {
-    DVLOGF(2) << "Scheduling picture dismissal";
+    VLOGF(2) << "Scheduling picture dismissal";
     base::WaitableEvent done(base::WaitableEvent::ResetPolicy::AUTOMATIC,
                              base::WaitableEvent::InitialState::NOT_SIGNALED);
     child_task_runner_->PostTask(
@@ -1457,7 +1464,7 @@
 }
 
 bool V4L2SliceVideoDecodeAccelerator::DestroyOutputBuffers() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread() ||
          !decoder_thread_.IsRunning());
   DCHECK(!output_streamon_);
@@ -1499,7 +1506,7 @@
 
 void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffers(
     const std::vector<PictureBuffer>& buffers) {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(child_task_runner_->BelongsToCurrentThread());
 
   decoder_thread_task_runner_->PostTask(
@@ -1510,16 +1517,16 @@
 
 void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffersTask(
     const std::vector<PictureBuffer>& buffers) {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
   DCHECK_EQ(state_, kAwaitingPictureBuffers);
 
   const uint32_t req_buffer_count = decoder_->GetRequiredNumOfPictures();
 
   if (buffers.size() < req_buffer_count) {
-    DLOG(ERROR) << "Failed to provide requested picture buffers. "
-                << "(Got " << buffers.size()
-                << ", requested " << req_buffer_count << ")";
+    VLOGF(1) << "Failed to provide requested picture buffers. "
+             << "(Got " << buffers.size() << ", requested " << req_buffer_count
+             << ")";
     NOTIFY_ERROR(INVALID_ARGUMENT);
     return;
   }
@@ -1535,7 +1542,7 @@
   IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS, &reqbufs);
 
   if (reqbufs.count != buffers.size()) {
-    DLOGF(ERROR) << "Could not allocate enough output buffers";
+    VLOGF(1) << "Could not allocate enough output buffers";
     NOTIFY_ERROR(PLATFORM_FAILURE);
     return;
   }
@@ -1588,18 +1595,26 @@
 
 void V4L2SliceVideoDecodeAccelerator::ImportBufferForPicture(
     int32_t picture_buffer_id,
-    const std::vector<base::FileDescriptor>& dmabuf_fds) {
+    VideoPixelFormat pixel_format,
+    const NativePixmapHandle& native_pixmap_handle) {
   DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
   DCHECK(child_task_runner_->BelongsToCurrentThread());
 
   auto passed_dmabuf_fds(base::WrapUnique(new std::vector<base::ScopedFD>()));
-  for (const auto& fd : dmabuf_fds) {
+  for (const auto& fd : native_pixmap_handle.fds) {
     DCHECK_NE(fd.fd, -1);
     passed_dmabuf_fds->push_back(base::ScopedFD(fd.fd));
   }
 
   if (output_mode_ != Config::OutputMode::IMPORT) {
-    LOGF(ERROR) << "Cannot import in non-import mode";
+    VLOGF(1) << "Cannot import in non-import mode";
+    NOTIFY_ERROR(INVALID_ARGUMENT);
+    return;
+  }
+
+  if (pixel_format !=
+      V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_)) {
+    VLOGF(1) << "Unsupported import format: " << pixel_format;
     NOTIFY_ERROR(INVALID_ARGUMENT);
     return;
   }
@@ -1633,7 +1648,7 @@
   }
 
   if (!iter->at_client) {
-    LOGF(ERROR) << "Cannot import buffer that not owned by client";
+    VLOGF(1) << "Cannot import buffer that not owned by client";
     NOTIFY_ERROR(INVALID_ARGUMENT);
     return;
   }
@@ -1665,7 +1680,7 @@
 
 void V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask(
     int32_t picture_buffer_id) {
-  DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
+  DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id;
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   V4L2DecodeSurfaceByPictureBufferId::iterator it =
@@ -1683,7 +1698,7 @@
 
   OutputRecord& output_record = output_buffer_map_[it->second->output_record()];
   if (output_record.at_device || !output_record.at_client) {
-    DVLOGF(1) << "picture_buffer_id not reusable";
+    VLOGF(1) << "picture_buffer_id not reusable";
     NOTIFY_ERROR(INVALID_ARGUMENT);
     return;
   }
@@ -1695,7 +1710,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::Flush() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(child_task_runner_->BelongsToCurrentThread());
 
   decoder_thread_task_runner_->PostTask(
@@ -1704,7 +1719,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::FlushTask() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   // Queue an empty buffer which - when reached - will trigger flush sequence.
@@ -1716,7 +1731,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::InitiateFlush() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   // This will trigger output for all remaining surfaces in the decoder.
@@ -1737,7 +1752,7 @@
 }
 
 bool V4L2SliceVideoDecodeAccelerator::FinishFlush() {
-  DVLOGF(3);
+  VLOGF(4);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   if (!decoder_flushing_)
@@ -1763,7 +1778,7 @@
   SendPictureReady();
 
   decoder_flushing_ = false;
-  DVLOGF(3) << "Flush finished";
+  VLOGF(2) << "Flush finished";
 
   child_task_runner_->PostTask(FROM_HERE,
                                base::Bind(&Client::NotifyFlushDone, client_));
@@ -1772,7 +1787,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::Reset() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(child_task_runner_->BelongsToCurrentThread());
 
   decoder_thread_task_runner_->PostTask(
@@ -1781,7 +1796,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::ResetTask() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   if (decoder_resetting_) {
@@ -1804,7 +1819,7 @@
 }
 
 bool V4L2SliceVideoDecodeAccelerator::FinishReset() {
-  DVLOGF(3);
+  VLOGF(4);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   if (!decoder_resetting_)
@@ -1835,7 +1850,7 @@
   }
 
   decoder_resetting_ = false;
-  DVLOGF(3) << "Reset finished";
+  VLOGF(2) << "Reset finished";
 
   child_task_runner_->PostTask(FROM_HERE,
                                base::Bind(&Client::NotifyResetDone, client_));
@@ -1899,7 +1914,7 @@
   size_t i = 0;
   for (const auto& pic : dpb) {
     if (i >= arraysize(v4l2_decode_param_.dpb)) {
-      DVLOGF(1) << "Invalid DPB size";
+      VLOGF(1) << "Invalid DPB size";
       break;
     }
 
@@ -1930,8 +1945,8 @@
     const H264Picture::Vector& ref_pic_listb0,
     const H264Picture::Vector& ref_pic_listb1,
     const scoped_refptr<H264Picture>& pic) {
-  struct v4l2_ext_control ctrl;
-  std::vector<struct v4l2_ext_control> ctrls;
+  struct v4l2_ext_control_custom ctrl;
+  std::vector<struct v4l2_ext_control_custom> ctrls;
 
   struct v4l2_ctrl_h264_sps v4l2_sps;
   memset(&v4l2_sps, 0, sizeof(v4l2_sps));
@@ -2079,7 +2094,7 @@
   scoped_refptr<V4L2DecodeSurface> dec_surface =
       H264PictureToV4L2DecodeSurface(pic);
 
-  struct v4l2_ext_controls ext_ctrls;
+  struct v4l2_ext_controls_custom ext_ctrls;
   memset(&ext_ctrls, 0, sizeof(ext_ctrls));
   ext_ctrls.count = ctrls.size();
   ext_ctrls.controls = &ctrls[0];
@@ -2109,7 +2124,7 @@
     const uint8_t* data,
     size_t size) {
   if (num_slices_ == kMaxSlices) {
-    LOGF(ERROR) << "Over limit of supported slices per frame";
+    VLOGF(1) << "Over limit of supported slices per frame";
     return false;
   }
 
@@ -2229,7 +2244,7 @@
   InputRecord& input_record = input_buffer_map_[index];
 
   if (input_record.bytes_used + size > input_record.length) {
-    DVLOGF(1) << "Input buffer too small";
+    VLOGF(1) << "Input buffer too small";
     return false;
   }
 
@@ -2241,7 +2256,7 @@
 }
 
 bool V4L2SliceVideoDecodeAccelerator::SubmitExtControls(
-    struct v4l2_ext_controls* ext_ctrls) {
+    struct v4l2_ext_controls_custom* ext_ctrls) {
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
   DCHECK_GT(ext_ctrls->config_store, 0u);
   IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_EXT_CTRLS, ext_ctrls);
@@ -2249,7 +2264,7 @@
 }
 
 bool V4L2SliceVideoDecodeAccelerator::GetExtControls(
-    struct v4l2_ext_controls* ext_ctrls) {
+    struct v4l2_ext_controls_custom* ext_ctrls) {
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
   DCHECK_GT(ext_ctrls->config_store, 0u);
   IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_G_EXT_CTRLS, ext_ctrls);
@@ -2274,8 +2289,8 @@
   v4l2_decode_param_.top_field_order_cnt = pic->top_field_order_cnt;
   v4l2_decode_param_.bottom_field_order_cnt = pic->bottom_field_order_cnt;
 
-  struct v4l2_ext_control ctrl;
-  std::vector<struct v4l2_ext_control> ctrls;
+  struct v4l2_ext_control_custom ctrl;
+  std::vector<struct v4l2_ext_control_custom> ctrls;
 
   memset(&ctrl, 0, sizeof(ctrl));
   ctrl.id = V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAM;
@@ -2289,7 +2304,7 @@
   ctrl.p_h264_decode_param = &v4l2_decode_param_;
   ctrls.push_back(ctrl);
 
-  struct v4l2_ext_controls ext_ctrls;
+  struct v4l2_ext_controls_custom ext_ctrls;
   memset(&ext_ctrls, 0, sizeof(ext_ctrls));
   ext_ctrls.count = ctrls.size();
   ext_ctrls.controls = &ctrls[0];
@@ -2307,6 +2322,7 @@
     const scoped_refptr<H264Picture>& pic) {
   scoped_refptr<V4L2DecodeSurface> dec_surface =
       H264PictureToV4L2DecodeSurface(pic);
+  dec_surface->set_visible_rect(pic->visible_rect);
   v4l2_dec_->SurfaceReady(dec_surface);
   return true;
 }
@@ -2509,13 +2525,13 @@
     v4l2_frame_hdr.alt_frame = VIDEO_MAX_FRAME;
   }
 
-  struct v4l2_ext_control ctrl;
+  struct v4l2_ext_control_custom ctrl;
   memset(&ctrl, 0, sizeof(ctrl));
   ctrl.id = V4L2_CID_MPEG_VIDEO_VP8_FRAME_HDR;
   ctrl.size = sizeof(v4l2_frame_hdr);
   ctrl.p_vp8_frame_hdr = &v4l2_frame_hdr;
 
-  struct v4l2_ext_controls ext_ctrls;
+  struct v4l2_ext_controls_custom ext_ctrls;
   memset(&ext_ctrls, 0, sizeof(ext_ctrls));
   ext_ctrls.count = 1;
   ext_ctrls.controls = &ctrl;
@@ -2538,7 +2554,7 @@
     const scoped_refptr<VP8Picture>& pic) {
   scoped_refptr<V4L2DecodeSurface> dec_surface =
       VP8PictureToV4L2DecodeSurface(pic);
-
+  dec_surface->set_visible_rect(pic->visible_rect);
   v4l2_dec_->SurfaceReady(dec_surface);
   return true;
 }
@@ -2735,9 +2751,9 @@
   FillV4L2VP9LoopFilterParams(lf_params, &v4l2_frame_hdr.lf_params);
   FillV4L2VP9SegmentationParams(segm_params, &v4l2_frame_hdr.sgmnt_params);
 
-  std::vector<struct v4l2_ext_control> ctrls;
+  std::vector<struct v4l2_ext_control_custom> ctrls;
 
-  struct v4l2_ext_control ctrl;
+  struct v4l2_ext_control_custom ctrl;
   memset(&ctrl, 0, sizeof(ctrl));
   ctrl.id = V4L2_CID_MPEG_VIDEO_VP9_FRAME_HDR;
   ctrl.size = sizeof(v4l2_frame_hdr);
@@ -2818,7 +2834,7 @@
   scoped_refptr<V4L2DecodeSurface> dec_surface =
       VP9PictureToV4L2DecodeSurface(pic);
 
-  struct v4l2_ext_controls ext_ctrls;
+  struct v4l2_ext_controls_custom ext_ctrls;
   memset(&ext_ctrls, 0, sizeof(ext_ctrls));
   ext_ctrls.count = ctrls.size();
   ext_ctrls.controls = &ctrls[0];
@@ -2841,7 +2857,7 @@
     const scoped_refptr<VP9Picture>& pic) {
   scoped_refptr<V4L2DecodeSurface> dec_surface =
       VP9PictureToV4L2DecodeSurface(pic);
-
+  dec_surface->set_visible_rect(pic->visible_rect);
   v4l2_dec_->SurfaceReady(dec_surface);
   return true;
 }
@@ -2887,7 +2903,7 @@
   struct v4l2_ctrl_vp9_entropy v4l2_entropy;
   memset(&v4l2_entropy, 0, sizeof(v4l2_entropy));
 
-  struct v4l2_ext_control ctrl;
+  struct v4l2_ext_control_custom ctrl;
   memset(&ctrl, 0, sizeof(ctrl));
   ctrl.id = V4L2_CID_MPEG_VIDEO_VP9_ENTROPY;
   ctrl.size = sizeof(v4l2_entropy);
@@ -2896,7 +2912,7 @@
   scoped_refptr<V4L2DecodeSurface> dec_surface =
       VP9PictureToV4L2DecodeSurface(pic);
 
-  struct v4l2_ext_controls ext_ctrls;
+  struct v4l2_ext_controls_custom ext_ctrls;
   memset(&ext_ctrls, 0, sizeof(ext_ctrls));
   ext_ctrls.count = 1;
   ext_ctrls.controls = &ctrl;
@@ -2921,13 +2937,13 @@
     const scoped_refptr<V4L2DecodeSurface>& dec_surface) {
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
-  DVLOGF(3) << "Submitting decode for surface: " << dec_surface->ToString();
+  DVLOGF(4) << "Submitting decode for surface: " << dec_surface->ToString();
   Enqueue(dec_surface);
 }
 
 void V4L2SliceVideoDecodeAccelerator::SurfaceReady(
     const scoped_refptr<V4L2DecodeSurface>& dec_surface) {
-  DVLOGF(3);
+  DVLOGF(4);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   decoder_display_queue_.push(dec_surface);
@@ -2965,14 +2981,12 @@
   DCHECK_NE(output_record.picture_id, -1);
   output_record.at_client = true;
 
-  // TODO(posciak): Use visible size from decoder here instead
-  // (crbug.com/402760). Passing (0, 0) results in the client using the
-  // visible size extracted from the container instead.
   Picture picture(output_record.picture_id, dec_surface->bitstream_id(),
-                  Rect(0, 0), false);
-  DVLOGF(3) << dec_surface->ToString()
+                  dec_surface->visible_rect(), true /* allow_overlay */);
+  DVLOGF(4) << dec_surface->ToString()
             << ", bitstream_id: " << picture.bitstream_buffer_id()
-            << ", picture_id: " << picture.picture_buffer_id();
+            << ", picture_id: " << picture.picture_buffer_id()
+            << ", visible_rect: " << picture.visible_rect().ToString();
   pending_picture_ready_.push(PictureRecord(output_record.cleared, picture));
   SendPictureReady();
   output_record.cleared = true;
@@ -3007,9 +3021,10 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::SendPictureReady() {
-  DVLOGF(3);
+  DVLOGF(4);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
-  bool resetting_or_flushing = (decoder_resetting_ || decoder_flushing_);
+  bool send_now =
+      (decoder_resetting_ || decoder_flushing_ || surface_set_change_pending_);
   while (!pending_picture_ready_.empty()) {
     bool cleared = pending_picture_ready_.front().cleared;
     const Picture& picture = pending_picture_ready_.front().picture;
@@ -3023,17 +3038,20 @@
           FROM_HERE,
           base::Bind(&Client::PictureReady, decode_client_, picture));
       pending_picture_ready_.pop();
-    } else if (!cleared || resetting_or_flushing) {
-      DVLOGF(3) << "cleared=" << pending_picture_ready_.front().cleared
+    } else if (!cleared || send_now) {
+      DVLOGF(4) << "cleared=" << pending_picture_ready_.front().cleared
                 << ", decoder_resetting_=" << decoder_resetting_
                 << ", decoder_flushing_=" << decoder_flushing_
+                << ", surface_set_change_pending_="
+                << surface_set_change_pending_
                 << ", picture_clearing_count_=" << picture_clearing_count_;
       DVLOGF(4) << "Posting picture ready to GPU for: "
                 << picture.picture_buffer_id();
       // If the picture is not cleared, post it to the child thread because it
       // has to be cleared in the child thread. A picture only needs to be
-      // cleared once. If the decoder is resetting or flushing, send all
-      // pictures to ensure PictureReady arrive before reset or flush done.
+      // cleared once. If the decoder is resetting or flushing or changing
+      // resolution, send all pictures to ensure PictureReady arrive before
+      // reset done, flush done, or picture dismissed.
       child_task_runner_->PostTaskAndReply(
           FROM_HERE, base::Bind(&Client::PictureReady, client_, picture),
           // Unretained is safe. If Client::PictureReady gets to run, |this| is
@@ -3052,7 +3070,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::PictureCleared() {
-  DVLOGF(3) << "clearing count=" << picture_clearing_count_;
+  DVLOGF(4) << "clearing count=" << picture_clearing_count_;
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
   DCHECK_GT(picture_clearing_count_, 0);
   picture_clearing_count_--;
diff --git a/vda/v4l2_slice_video_decode_accelerator.h b/vda/v4l2_slice_video_decode_accelerator.h
index 929066f..a85dfa1 100644
--- a/vda/v4l2_slice_video_decode_accelerator.h
+++ b/vda/v4l2_slice_video_decode_accelerator.h
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 85fdf90
 
 #ifndef V4L2_SLICE_VIDEO_DECODE_ACCELERATOR_H_
 #define V4L2_SLICE_VIDEO_DECODE_ACCELERATOR_H_
@@ -22,7 +23,7 @@
 #include "h264_decoder.h"
 #include "v4l2_device.h"
 #include "video_decode_accelerator.h"
-#include "videodev2.h"
+#include "videodev2_custom.h"
 #include "vp8_decoder.h"
 #include "vp9_decoder.h"
 
@@ -47,7 +48,8 @@
   void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
   void ImportBufferForPicture(
       int32_t picture_buffer_id,
-      const std::vector<base::FileDescriptor>& dmabuf_fds) override;
+      VideoPixelFormat pixel_format,
+      const NativePixmapHandle& native_pixmap_handle) override;
   void ReusePictureBuffer(int32_t picture_buffer_id) override;
   void Flush() override;
   void Reset() override;
@@ -77,6 +79,7 @@
   // Record for output buffers.
   struct OutputRecord {
     OutputRecord();
+    OutputRecord(OutputRecord&&) = default;
     bool at_device;
     bool at_client;
     int32_t picture_id;
@@ -103,11 +106,11 @@
   bool SubmitSlice(int index, const uint8_t* data, size_t size);
 
   // Submit controls in |ext_ctrls| to hardware. Return true on success.
-  bool SubmitExtControls(struct v4l2_ext_controls* ext_ctrls);
+  bool SubmitExtControls(struct v4l2_ext_controls_custom* ext_ctrls);
 
   // Gets current control values for controls in |ext_ctrls| from the driver.
   // Return true on success.
-  bool GetExtControls(struct v4l2_ext_controls* ext_ctrls);
+  bool GetExtControls(struct v4l2_ext_controls_custom* ext_ctrls);
 
   // Return true if the driver exposes V4L2 control |ctrl_id|, false otherwise.
   bool IsCtrlExposed(uint32_t ctrl_id);
@@ -234,9 +237,9 @@
   // file descriptors.
   void ImportBufferForPictureTask(
       int32_t picture_buffer_id,
-      // TODO(posciak): (crbug.com/561749) we should normally be able to pass
-      // the vector by itself via std::move, but it's not possible to do this
-      // if this method is used as a callback.
+      // TODO(posciak): (https://crbug.com/561749) we should normally be able to
+      // pass the vector by itself via std::move, but it's not possible to do
+      // this if this method is used as a callback.
       std::unique_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds);
 
   // Performed on decoder_thread_ as a consequence of poll() on decoder_thread_
@@ -366,7 +369,6 @@
   VideoCodecProfile video_profile_;
   uint32_t input_format_fourcc_;
   uint32_t output_format_fourcc_;
-  Size visible_size_;
   Size coded_size_;
 
   struct BitstreamBufferRef;
@@ -417,6 +419,7 @@
     bool cleared;  // Whether the texture is cleared and safe to render from.
     Picture picture;  // The decoded picture.
   };
+
   // Pictures that are ready but not sent to PictureReady yet.
   std::queue<PictureRecord> pending_picture_ready_;
 
diff --git a/vda/v4l2_video_decode_accelerator.cc b/vda/v4l2_video_decode_accelerator.cc
new file mode 100644
index 0000000..19b5b38
--- /dev/null
+++ b/vda/v4l2_video_decode_accelerator.cc
@@ -0,0 +1,2088 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 91175b1
+// Note: image processor is not ported.
+
+#include "v4l2_video_decode_accelerator.h"
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <string.h>
+#include <sys/eventfd.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/message_loop/message_loop.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/single_thread_task_runner.h"
+#include "base/memory/ptr_util.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "build/build_config.h"
+#include "h264_parser.h"
+#include "rect.h"
+#include "shared_memory_region.h"
+#include "videodev2_custom.h"
+
+#define DVLOGF(level) DVLOG(level) << __func__ << "(): "
+#define VLOGF(level) VLOG(level) << __func__ << "(): "
+#define VPLOGF(level) VPLOG(level) << __func__ << "(): "
+
+#define NOTIFY_ERROR(x)                      \
+  do {                                       \
+    VLOGF(1) << "Setting error state:" << x; \
+    SetErrorState(x);                        \
+  } while (0)
+
+#define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str) \
+  do {                                                          \
+    if (device_->Ioctl(type, arg) != 0) {                       \
+      VPLOGF(1) << "ioctl() failed: " << type_str;              \
+      NOTIFY_ERROR(PLATFORM_FAILURE);                           \
+      return value;                                             \
+    }                                                           \
+  } while (0)
+
+#define IOCTL_OR_ERROR_RETURN(type, arg) \
+  IOCTL_OR_ERROR_RETURN_VALUE(type, arg, ((void)0), #type)
+
+#define IOCTL_OR_ERROR_RETURN_FALSE(type, arg) \
+  IOCTL_OR_ERROR_RETURN_VALUE(type, arg, false, #type)
+
+#define IOCTL_OR_LOG_ERROR(type, arg)           \
+  do {                                          \
+    if (device_->Ioctl(type, arg) != 0)         \
+      VPLOGF(1) << "ioctl() failed: " << #type; \
+  } while (0)
+
+namespace media {
+
+// static
+const uint32_t V4L2VideoDecodeAccelerator::supported_input_fourccs_[] = {
+    V4L2_PIX_FMT_H264, V4L2_PIX_FMT_VP8, V4L2_PIX_FMT_VP9,
+};
+
+struct V4L2VideoDecodeAccelerator::BitstreamBufferRef {
+  BitstreamBufferRef(
+      base::WeakPtr<Client>& client,
+      scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
+      std::unique_ptr<SharedMemoryRegion> shm,
+      int32_t input_id);
+  ~BitstreamBufferRef();
+  const base::WeakPtr<Client> client;
+  const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner;
+  const std::unique_ptr<SharedMemoryRegion> shm;
+  size_t bytes_used;
+  const int32_t input_id;
+};
+
+V4L2VideoDecodeAccelerator::BitstreamBufferRef::BitstreamBufferRef(
+    base::WeakPtr<Client>& client,
+    scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
+    std::unique_ptr<SharedMemoryRegion> shm,
+    int32_t input_id)
+    : client(client),
+      client_task_runner(client_task_runner),
+      shm(std::move(shm)),
+      bytes_used(0),
+      input_id(input_id) {}
+
+V4L2VideoDecodeAccelerator::BitstreamBufferRef::~BitstreamBufferRef() {
+  if (input_id >= 0) {
+    client_task_runner->PostTask(
+        FROM_HERE,
+        base::Bind(&Client::NotifyEndOfBitstreamBuffer, client, input_id));
+  }
+}
+
+V4L2VideoDecodeAccelerator::InputRecord::InputRecord()
+    : at_device(false), address(NULL), length(0), bytes_used(0), input_id(-1) {}
+
+V4L2VideoDecodeAccelerator::InputRecord::~InputRecord() {}
+
+V4L2VideoDecodeAccelerator::OutputRecord::OutputRecord()
+    : state(kFree),
+      picture_id(-1),
+      cleared(false) {}
+
+V4L2VideoDecodeAccelerator::OutputRecord::~OutputRecord() {}
+
+V4L2VideoDecodeAccelerator::PictureRecord::PictureRecord(bool cleared,
+                                                         const Picture& picture)
+    : cleared(cleared), picture(picture) {}
+
+V4L2VideoDecodeAccelerator::PictureRecord::~PictureRecord() {}
+
+V4L2VideoDecodeAccelerator::V4L2VideoDecodeAccelerator(
+    const scoped_refptr<V4L2Device>& device)
+    : child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
+      decoder_thread_("V4L2DecoderThread"),
+      decoder_state_(kUninitialized),
+      output_mode_(Config::OutputMode::ALLOCATE),
+      device_(device),
+      decoder_delay_bitstream_buffer_id_(-1),
+      decoder_current_input_buffer_(-1),
+      decoder_decode_buffer_tasks_scheduled_(0),
+      decoder_frames_at_client_(0),
+      decoder_flushing_(false),
+      decoder_cmd_supported_(false),
+      flush_awaiting_last_output_buffer_(false),
+      reset_pending_(false),
+      decoder_partial_frame_pending_(false),
+      input_streamon_(false),
+      input_buffer_queued_count_(0),
+      output_streamon_(false),
+      output_buffer_queued_count_(0),
+      output_dpb_size_(0),
+      output_planes_count_(0),
+      picture_clearing_count_(0),
+      device_poll_thread_("V4L2DevicePollThread"),
+      video_profile_(VIDEO_CODEC_PROFILE_UNKNOWN),
+      input_format_fourcc_(0),
+      output_format_fourcc_(0),
+      weak_this_factory_(this) {
+  weak_this_ = weak_this_factory_.GetWeakPtr();
+}
+
+V4L2VideoDecodeAccelerator::~V4L2VideoDecodeAccelerator() {
+  DCHECK(!decoder_thread_.IsRunning());
+  DCHECK(!device_poll_thread_.IsRunning());
+  DVLOGF(2);
+
+  // These maps have members that should be manually destroyed, e.g. file
+  // descriptors, mmap() segments, etc.
+  DCHECK(input_buffer_map_.empty());
+  DCHECK(output_buffer_map_.empty());
+}
+
+bool V4L2VideoDecodeAccelerator::Initialize(const Config& config,
+                                            Client* client) {
+  VLOGF(2) << "profile: " << config.profile
+           << ", output_mode=" << static_cast<int>(config.output_mode);
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+  DCHECK_EQ(decoder_state_, kUninitialized);
+
+  if (config.output_mode != Config::OutputMode::IMPORT) {
+    NOTREACHED() << "Only IMPORT OutputModes are supported";
+    return false;
+  }
+
+  client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
+  client_ = client_ptr_factory_->GetWeakPtr();
+  // If we haven't been set up to decode on separate thread via
+  // TryToSetupDecodeOnSeparateThread(), use the main thread/client for
+  // decode tasks.
+  if (!decode_task_runner_) {
+    decode_task_runner_ = child_task_runner_;
+    DCHECK(!decode_client_);
+    decode_client_ = client_;
+  }
+
+  video_profile_ = config.profile;
+
+  input_format_fourcc_ =
+      V4L2Device::VideoCodecProfileToV4L2PixFmt(video_profile_, false);
+
+  if (!device_->Open(V4L2Device::Type::kDecoder, input_format_fourcc_)) {
+    VLOGF(1) << "Failed to open device for profile: " << config.profile
+             << " fourcc: " << std::hex << "0x" << input_format_fourcc_;
+    return false;
+  }
+
+  // Capabilities check.
+  struct v4l2_capability caps;
+  const __u32 kCapsRequired = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps);
+  if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
+    VLOGF(1) << "ioctl() failed: VIDIOC_QUERYCAP"
+             << ", caps check failed: 0x" << std::hex << caps.capabilities;
+    return false;
+  }
+
+  if (!SetupFormats())
+    return false;
+
+  if (video_profile_ >= H264PROFILE_MIN && video_profile_ <= H264PROFILE_MAX) {
+    decoder_h264_parser_.reset(new H264Parser());
+  }
+
+  if (!decoder_thread_.Start()) {
+    VLOGF(1) << "decoder thread failed to start";
+    return false;
+  }
+
+  decoder_state_ = kInitialized;
+  output_mode_ = config.output_mode;
+
+  // InitializeTask will NOTIFY_ERROR on failure.
+  decoder_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::InitializeTask,
+                            base::Unretained(this)));
+
+  return true;
+}
+
+void V4L2VideoDecodeAccelerator::InitializeTask() {
+  VLOGF(2);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_EQ(decoder_state_, kInitialized);
+
+  // Subscribe to the resolution change event.
+  struct v4l2_event_subscription sub;
+  memset(&sub, 0, sizeof(sub));
+  sub.type = V4L2_EVENT_SOURCE_CHANGE;
+  IOCTL_OR_ERROR_RETURN(VIDIOC_SUBSCRIBE_EVENT, &sub);
+
+  if (!CreateInputBuffers()) {
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  decoder_cmd_supported_ = IsDecoderCmdSupported();
+
+  if (!StartDevicePoll())
+    return;
+}
+
+void V4L2VideoDecodeAccelerator::Decode(
+    const BitstreamBuffer& bitstream_buffer) {
+  DVLOGF(4) << "input_id=" << bitstream_buffer.id()
+            << ", size=" << bitstream_buffer.size();
+  DCHECK(decode_task_runner_->BelongsToCurrentThread());
+
+  if (bitstream_buffer.id() < 0) {
+    VLOGF(1) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
+    if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
+      base::SharedMemory::CloseHandle(bitstream_buffer.handle());
+    NOTIFY_ERROR(INVALID_ARGUMENT);
+    return;
+  }
+
+  // DecodeTask() will take care of running a DecodeBufferTask().
+  decoder_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DecodeTask,
+                            base::Unretained(this), bitstream_buffer));
+}
+
+void V4L2VideoDecodeAccelerator::AssignPictureBuffers(
+    const std::vector<PictureBuffer>& buffers) {
+  VLOGF(2) << "buffer_count=" << buffers.size();
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+
+  decoder_thread_.task_runner()->PostTask(
+      FROM_HERE,
+      base::Bind(&V4L2VideoDecodeAccelerator::AssignPictureBuffersTask,
+                 base::Unretained(this), buffers));
+}
+
+void V4L2VideoDecodeAccelerator::AssignPictureBuffersTask(
+    const std::vector<PictureBuffer>& buffers) {
+  VLOGF(2);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_EQ(decoder_state_, kAwaitingPictureBuffers);
+
+  uint32_t req_buffer_count = output_dpb_size_ + kDpbOutputBufferExtraCount;
+
+  if (buffers.size() < req_buffer_count) {
+    VLOGF(1) << "Failed to provide requested picture buffers. (Got "
+             << buffers.size() << ", requested " << req_buffer_count << ")";
+    NOTIFY_ERROR(INVALID_ARGUMENT);
+    return;
+  }
+
+  // Allocate the output buffers.
+  struct v4l2_requestbuffers reqbufs;
+  memset(&reqbufs, 0, sizeof(reqbufs));
+  reqbufs.count = buffers.size();
+  reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  reqbufs.memory = V4L2_MEMORY_MMAP;
+  IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS, &reqbufs);
+
+  if (reqbufs.count != buffers.size()) {
+    VLOGF(1) << "Could not allocate enough output buffers";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  DCHECK(free_output_buffers_.empty());
+  DCHECK(output_buffer_map_.empty());
+  output_buffer_map_.resize(buffers.size());
+
+  // Always use IMPORT output mode for Android solution.
+  DCHECK_EQ(output_mode_, Config::OutputMode::IMPORT);
+
+  for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
+    OutputRecord& output_record = output_buffer_map_[i];
+    DCHECK_EQ(output_record.state, kFree);
+    DCHECK_EQ(output_record.picture_id, -1);
+    DCHECK_EQ(output_record.cleared, false);
+
+    output_record.picture_id = buffers[i].id();
+
+    // This will remain kAtClient until ImportBufferForPicture is called, either
+    // by the client, or by ourselves, if we are allocating.
+    output_record.state = kAtClient;
+
+    DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id;
+  }
+}
+
+void V4L2VideoDecodeAccelerator::ImportBufferForPicture(
+    int32_t picture_buffer_id,
+    VideoPixelFormat pixel_format,
+    const NativePixmapHandle& native_pixmap_handle) {
+  DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+
+  if (output_mode_ != Config::OutputMode::IMPORT) {
+    VLOGF(1) << "Cannot import in non-import mode";
+    NOTIFY_ERROR(INVALID_ARGUMENT);
+    return;
+  }
+
+  if (pixel_format !=
+      V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_)) {
+    VLOGF(1) << "Unsupported import format: " << pixel_format;
+    NOTIFY_ERROR(INVALID_ARGUMENT);
+    return;
+  }
+
+  std::vector<base::ScopedFD> dmabuf_fds;
+  for (const auto& fd : native_pixmap_handle.fds) {
+    DCHECK_NE(fd.fd, -1);
+    dmabuf_fds.push_back(base::ScopedFD(fd.fd));
+  }
+
+  decoder_thread_.task_runner()->PostTask(
+      FROM_HERE,
+      base::Bind(&V4L2VideoDecodeAccelerator::ImportBufferForPictureTask,
+                 base::Unretained(this), picture_buffer_id,
+                 base::Passed(&dmabuf_fds)));
+}
+
+void V4L2VideoDecodeAccelerator::ImportBufferForPictureTask(
+    int32_t picture_buffer_id,
+    std::vector<base::ScopedFD> dmabuf_fds) {
+  DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id
+            << ", dmabuf_fds.size()=" << dmabuf_fds.size();
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  const auto iter =
+      std::find_if(output_buffer_map_.begin(), output_buffer_map_.end(),
+                   [picture_buffer_id](const OutputRecord& output_record) {
+                     return output_record.picture_id == picture_buffer_id;
+                   });
+  if (iter == output_buffer_map_.end()) {
+    // It's possible that we've already posted a DismissPictureBuffer for this
+    // picture, but it has not yet executed when this ImportBufferForPicture was
+    // posted to us by the client. In that case just ignore this (we've already
+    // dismissed it and accounted for that).
+    DVLOGF(3) << "got picture id=" << picture_buffer_id
+              << " not in use (anymore?).";
+    return;
+  }
+
+  if (iter->state != kAtClient) {
+    VLOGF(1) << "Cannot import buffer not owned by client";
+    NOTIFY_ERROR(INVALID_ARGUMENT);
+    return;
+  }
+
+  size_t index = iter - output_buffer_map_.begin();
+  DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
+                       index),
+            0);
+
+  iter->state = kFree;
+
+  DCHECK_EQ(output_planes_count_, dmabuf_fds.size());
+
+  iter->processor_output_fds.swap(dmabuf_fds);
+  free_output_buffers_.push_back(index);
+  if (decoder_state_ != kChangingResolution) {
+      Enqueue();
+      ScheduleDecodeBufferTaskIfNeeded();
+  }
+}
+
+void V4L2VideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_buffer_id) {
+  DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id;
+  // Must be run on child thread, as we'll insert a sync in the EGL context.
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+
+  decoder_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ReusePictureBufferTask,
+                            base::Unretained(this), picture_buffer_id));
+}
+
+void V4L2VideoDecodeAccelerator::Flush() {
+  VLOGF(2);
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+  decoder_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::FlushTask,
+                            base::Unretained(this)));
+}
+
+void V4L2VideoDecodeAccelerator::Reset() {
+  VLOGF(2);
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+  decoder_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ResetTask,
+                            base::Unretained(this)));
+}
+
+void V4L2VideoDecodeAccelerator::Destroy() {
+  VLOGF(2);
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+
+  // We're destroying; cancel all callbacks.
+  client_ptr_factory_.reset();
+  weak_this_factory_.InvalidateWeakPtrs();
+
+  // If the decoder thread is running, destroy using posted task.
+  if (decoder_thread_.IsRunning()) {
+    decoder_thread_.task_runner()->PostTask(
+        FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DestroyTask,
+                              base::Unretained(this)));
+    // DestroyTask() will cause the decoder_thread_ to flush all tasks.
+    decoder_thread_.Stop();
+  } else {
+    // Otherwise, call the destroy task directly.
+    DestroyTask();
+  }
+
+  delete this;
+  VLOGF(2) << "Destroyed.";
+}
+
+bool V4L2VideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+    const base::WeakPtr<Client>& decode_client,
+    const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
+  VLOGF(2);
+  decode_client_ = decode_client;
+  decode_task_runner_ = decode_task_runner;
+  return true;
+}
+
+// static
+VideoDecodeAccelerator::SupportedProfiles
+V4L2VideoDecodeAccelerator::GetSupportedProfiles() {
+    scoped_refptr<V4L2Device> device(new V4L2Device());
+  if (!device)
+    return SupportedProfiles();
+
+  return device->GetSupportedDecodeProfiles(arraysize(supported_input_fourccs_),
+                                            supported_input_fourccs_);
+}
+
+void V4L2VideoDecodeAccelerator::DecodeTask(
+    const BitstreamBuffer& bitstream_buffer) {
+  DVLOGF(4) << "input_id=" << bitstream_buffer.id();
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_NE(decoder_state_, kUninitialized);
+
+  std::unique_ptr<BitstreamBufferRef> bitstream_record(new BitstreamBufferRef(
+      decode_client_, decode_task_runner_,
+      std::unique_ptr<SharedMemoryRegion>(
+          new SharedMemoryRegion(bitstream_buffer, true)),
+      bitstream_buffer.id()));
+
+  // Skip empty buffer.
+  if (bitstream_buffer.size() == 0)
+    return;
+
+  if (!bitstream_record->shm->Map()) {
+    VLOGF(1) << "could not map bitstream_buffer";
+    NOTIFY_ERROR(UNREADABLE_INPUT);
+    return;
+  }
+  DVLOGF(4) << "mapped at=" << bitstream_record->shm->memory();
+
+  if (decoder_state_ == kResetting || decoder_flushing_) {
+    // In the case that we're resetting or flushing, we need to delay decoding
+    // the BitstreamBuffers that come after the Reset() or Flush() call.  When
+    // we're here, we know that this DecodeTask() was scheduled by a Decode()
+    // call that came after (in the client thread) the Reset() or Flush() call;
+    // thus set up the delay if necessary.
+    if (decoder_delay_bitstream_buffer_id_ == -1)
+      decoder_delay_bitstream_buffer_id_ = bitstream_record->input_id;
+  } else if (decoder_state_ == kError) {
+    VLOGF(2) << "early out: kError state";
+    return;
+  }
+
+  decoder_input_queue_.push(
+      linked_ptr<BitstreamBufferRef>(bitstream_record.release()));
+  decoder_decode_buffer_tasks_scheduled_++;
+  DecodeBufferTask();
+}
+
+void V4L2VideoDecodeAccelerator::DecodeBufferTask() {
+  DVLOGF(4);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_NE(decoder_state_, kUninitialized);
+
+  decoder_decode_buffer_tasks_scheduled_--;
+
+  if (decoder_state_ != kInitialized && decoder_state_ != kDecoding) {
+    DVLOGF(3) << "early out: state=" << decoder_state_;
+    return;
+  }
+
+  if (decoder_current_bitstream_buffer_ == NULL) {
+    if (decoder_input_queue_.empty()) {
+      // We're waiting for a new buffer -- exit without scheduling a new task.
+      return;
+    }
+    linked_ptr<BitstreamBufferRef>& buffer_ref = decoder_input_queue_.front();
+    if (decoder_delay_bitstream_buffer_id_ == buffer_ref->input_id) {
+      // We're asked to delay decoding on this and subsequent buffers.
+      return;
+    }
+
+    // Setup to use the next buffer.
+    decoder_current_bitstream_buffer_.reset(buffer_ref.release());
+    decoder_input_queue_.pop();
+    const auto& shm = decoder_current_bitstream_buffer_->shm;
+    if (shm) {
+      DVLOGF(4) << "reading input_id="
+                << decoder_current_bitstream_buffer_->input_id
+                << ", addr=" << shm->memory() << ", size=" << shm->size();
+    } else {
+      DCHECK_EQ(decoder_current_bitstream_buffer_->input_id, kFlushBufferId);
+      DVLOGF(4) << "reading input_id=kFlushBufferId";
+    }
+  }
+  bool schedule_task = false;
+  size_t decoded_size = 0;
+  const auto& shm = decoder_current_bitstream_buffer_->shm;
+  if (!shm) {
+    // This is a dummy buffer, queued to flush the pipe.  Flush.
+    DCHECK_EQ(decoder_current_bitstream_buffer_->input_id, kFlushBufferId);
+    // Enqueue a buffer guaranteed to be empty.  To do that, we flush the
+    // current input, enqueue no data to the next frame, then flush that down.
+    schedule_task = true;
+    if (decoder_current_input_buffer_ != -1 &&
+        input_buffer_map_[decoder_current_input_buffer_].input_id !=
+            kFlushBufferId)
+      schedule_task = FlushInputFrame();
+
+    if (schedule_task && AppendToInputFrame(NULL, 0) && FlushInputFrame()) {
+      VLOGF(2) << "enqueued flush buffer";
+      decoder_partial_frame_pending_ = false;
+      schedule_task = true;
+    } else {
+      // If we failed to enqueue the empty buffer (due to pipeline
+      // backpressure), don't advance the bitstream buffer queue, and don't
+      // schedule the next task.  This bitstream buffer queue entry will get
+      // reprocessed when the pipeline frees up.
+      schedule_task = false;
+    }
+  } else if (shm->size() == 0) {
+    // This is a buffer queued from the client that has zero size.  Skip.
+    schedule_task = true;
+  } else {
+    // This is a buffer queued from the client, with actual contents.  Decode.
+    const uint8_t* const data =
+        reinterpret_cast<const uint8_t*>(shm->memory()) +
+        decoder_current_bitstream_buffer_->bytes_used;
+    const size_t data_size =
+        shm->size() - decoder_current_bitstream_buffer_->bytes_used;
+    if (!AdvanceFrameFragment(data, data_size, &decoded_size)) {
+      NOTIFY_ERROR(UNREADABLE_INPUT);
+      return;
+    }
+    // AdvanceFrameFragment should not return a size larger than the buffer
+    // size, even on invalid data.
+    CHECK_LE(decoded_size, data_size);
+
+    switch (decoder_state_) {
+      case kInitialized:
+        schedule_task = DecodeBufferInitial(data, decoded_size, &decoded_size);
+        break;
+      case kDecoding:
+        schedule_task = DecodeBufferContinue(data, decoded_size);
+        break;
+      default:
+        NOTIFY_ERROR(ILLEGAL_STATE);
+        return;
+    }
+  }
+  if (decoder_state_ == kError) {
+    // Failed during decode.
+    return;
+  }
+
+  if (schedule_task) {
+    decoder_current_bitstream_buffer_->bytes_used += decoded_size;
+    if ((shm ? shm->size() : 0) ==
+        decoder_current_bitstream_buffer_->bytes_used) {
+      // Our current bitstream buffer is done; return it.
+      int32_t input_id = decoder_current_bitstream_buffer_->input_id;
+      DVLOGF(4) << "finished input_id=" << input_id;
+      // BitstreamBufferRef destructor calls NotifyEndOfBitstreamBuffer().
+      decoder_current_bitstream_buffer_.reset();
+    }
+    ScheduleDecodeBufferTaskIfNeeded();
+  }
+}
+
+bool V4L2VideoDecodeAccelerator::AdvanceFrameFragment(const uint8_t* data,
+                                                      size_t size,
+                                                      size_t* endpos) {
+  if (video_profile_ >= H264PROFILE_MIN && video_profile_ <= H264PROFILE_MAX) {
+    // For H264, we need to feed HW one frame at a time.  This is going to take
+    // some parsing of our input stream.
+    decoder_h264_parser_->SetStream(data, size);
+    H264NALU nalu;
+    H264Parser::Result result;
+    *endpos = 0;
+
+    // Keep on peeking the next NALs while they don't indicate a frame
+    // boundary.
+    for (;;) {
+      bool end_of_frame = false;
+      result = decoder_h264_parser_->AdvanceToNextNALU(&nalu);
+      if (result == H264Parser::kInvalidStream ||
+          result == H264Parser::kUnsupportedStream)
+        return false;
+      if (result == H264Parser::kEOStream) {
+        // We've reached the end of the buffer before finding a frame boundary.
+        decoder_partial_frame_pending_ = true;
+        *endpos = size;
+        return true;
+      }
+      switch (nalu.nal_unit_type) {
+        case H264NALU::kNonIDRSlice:
+        case H264NALU::kIDRSlice:
+          if (nalu.size < 1)
+            return false;
+          // For these two, if the "first_mb_in_slice" field is zero, start a
+          // new frame and return.  This field is Exp-Golomb coded starting on
+          // the eighth data bit of the NAL; a zero value is encoded with a
+          // leading '1' bit in the byte, which we can detect as the byte being
+          // (unsigned) greater than or equal to 0x80.
+          if (nalu.data[1] >= 0x80) {
+            end_of_frame = true;
+            break;
+          }
+          break;
+        case H264NALU::kSEIMessage:
+        case H264NALU::kSPS:
+        case H264NALU::kPPS:
+        case H264NALU::kAUD:
+        case H264NALU::kEOSeq:
+        case H264NALU::kEOStream:
+        case H264NALU::kReserved14:
+        case H264NALU::kReserved15:
+        case H264NALU::kReserved16:
+        case H264NALU::kReserved17:
+        case H264NALU::kReserved18:
+          // These unconditionally signal a frame boundary.
+          end_of_frame = true;
+          break;
+        default:
+          // For all others, keep going.
+          break;
+      }
+      if (end_of_frame) {
+        if (!decoder_partial_frame_pending_ && *endpos == 0) {
+          // The frame was previously restarted, and we haven't filled the
+          // current frame with any contents yet.  Start the new frame here and
+          // continue parsing NALs.
+        } else {
+          // The frame wasn't previously restarted and/or we have contents for
+          // the current frame; signal the start of a new frame here: we don't
+          // have a partial frame anymore.
+          decoder_partial_frame_pending_ = false;
+          return true;
+        }
+      }
+      *endpos = (nalu.data + nalu.size) - data;
+    }
+    NOTREACHED();
+    return false;
+  } else {
+    DCHECK_GE(video_profile_, VP8PROFILE_MIN);
+    DCHECK_LE(video_profile_, VP9PROFILE_MAX);
+    // For VP8/9, we can just dump the entire buffer.  No fragmentation needed,
+    // and we never return a partial frame.
+    *endpos = size;
+    decoder_partial_frame_pending_ = false;
+    return true;
+  }
+}
+
+void V4L2VideoDecodeAccelerator::ScheduleDecodeBufferTaskIfNeeded() {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  // If we're behind on tasks, schedule another one.
+  int buffers_to_decode = decoder_input_queue_.size();
+  if (decoder_current_bitstream_buffer_ != NULL)
+    buffers_to_decode++;
+  if (decoder_decode_buffer_tasks_scheduled_ < buffers_to_decode) {
+    decoder_decode_buffer_tasks_scheduled_++;
+    decoder_thread_.task_runner()->PostTask(
+        FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DecodeBufferTask,
+                              base::Unretained(this)));
+  }
+}
+
+bool V4L2VideoDecodeAccelerator::DecodeBufferInitial(const void* data,
+                                                     size_t size,
+                                                     size_t* endpos) {
+  DVLOGF(3) << "data=" << data << ", size=" << size;
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_EQ(decoder_state_, kInitialized);
+  // Initial decode.  We haven't been able to get output stream format info yet.
+  // Get it, and start decoding.
+
+  // Copy in and send to HW.
+  if (!AppendToInputFrame(data, size))
+    return false;
+
+  // If we only have a partial frame, don't flush and process yet.
+  if (decoder_partial_frame_pending_)
+    return true;
+
+  if (!FlushInputFrame())
+    return false;
+
+  // Recycle buffers.
+  Dequeue();
+
+  *endpos = size;
+
+  // If an initial resolution change event is not done yet, a driver probably
+  // needs more stream to decode format.
+  // Return true and schedule next buffer without changing status to kDecoding.
+  // If the initial resolution change is done and coded size is known, we may
+  // still have to wait for AssignPictureBuffers() and output buffers to be
+  // allocated.
+  if (coded_size_.IsEmpty() || output_buffer_map_.empty()) {
+    // Need more stream to decode format, return true and schedule next buffer.
+    return true;
+  }
+
+  decoder_state_ = kDecoding;
+  ScheduleDecodeBufferTaskIfNeeded();
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::DecodeBufferContinue(const void* data,
+                                                      size_t size) {
+  DVLOGF(4) << "data=" << data << ", size=" << size;
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_EQ(decoder_state_, kDecoding);
+
+  // Both of these calls will set kError state if they fail.
+  // Only flush the frame if it's complete.
+  return (AppendToInputFrame(data, size) &&
+          (decoder_partial_frame_pending_ || FlushInputFrame()));
+}
+
+bool V4L2VideoDecodeAccelerator::AppendToInputFrame(const void* data,
+                                                    size_t size) {
+  DVLOGF(4);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_NE(decoder_state_, kUninitialized);
+  DCHECK_NE(decoder_state_, kResetting);
+  DCHECK_NE(decoder_state_, kError);
+  // This routine can handle data == NULL and size == 0, which occurs when
+  // we queue an empty buffer for the purposes of flushing the pipe.
+
+  // Flush if we're too big
+  if (decoder_current_input_buffer_ != -1) {
+    InputRecord& input_record =
+        input_buffer_map_[decoder_current_input_buffer_];
+    if (input_record.bytes_used + size > input_record.length) {
+      if (!FlushInputFrame())
+        return false;
+      decoder_current_input_buffer_ = -1;
+    }
+  }
+
+  // Try to get an available input buffer
+  if (decoder_current_input_buffer_ == -1) {
+    if (free_input_buffers_.empty()) {
+      // See if we can get more free buffers from HW
+      Dequeue();
+      if (free_input_buffers_.empty()) {
+        // Nope!
+        DVLOGF(4) << "stalled for input buffers";
+        return false;
+      }
+    }
+    decoder_current_input_buffer_ = free_input_buffers_.back();
+    free_input_buffers_.pop_back();
+    InputRecord& input_record =
+        input_buffer_map_[decoder_current_input_buffer_];
+    DCHECK_EQ(input_record.bytes_used, 0);
+    DCHECK_EQ(input_record.input_id, -1);
+    DCHECK(decoder_current_bitstream_buffer_ != NULL);
+    input_record.input_id = decoder_current_bitstream_buffer_->input_id;
+  }
+
+  DCHECK(data != NULL || size == 0);
+  if (size == 0) {
+    // If we asked for an empty buffer, return now.  We return only after
+    // getting the next input buffer, since we might actually want an empty
+    // input buffer for flushing purposes.
+    return true;
+  }
+
+  // Copy in to the buffer.
+  InputRecord& input_record = input_buffer_map_[decoder_current_input_buffer_];
+  if (size > input_record.length - input_record.bytes_used) {
+    VLOGF(1) << "over-size frame, erroring";
+    NOTIFY_ERROR(UNREADABLE_INPUT);
+    return false;
+  }
+  memcpy(reinterpret_cast<uint8_t*>(input_record.address) +
+             input_record.bytes_used,
+         data, size);
+  input_record.bytes_used += size;
+
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::FlushInputFrame() {
+  DVLOGF(4);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_NE(decoder_state_, kUninitialized);
+  DCHECK_NE(decoder_state_, kResetting);
+  DCHECK_NE(decoder_state_, kError);
+
+  if (decoder_current_input_buffer_ == -1)
+    return true;
+
+  InputRecord& input_record = input_buffer_map_[decoder_current_input_buffer_];
+  DCHECK_NE(input_record.input_id, -1);
+  DCHECK(input_record.input_id != kFlushBufferId ||
+         input_record.bytes_used == 0);
+  // * if input_id >= 0, this input buffer was prompted by a bitstream buffer we
+  //   got from the client.  We can skip it if it is empty.
+  // * if input_id < 0 (should be kFlushBufferId in this case), this input
+  //   buffer was prompted by a flush buffer, and should be queued even when
+  //   empty.
+  if (input_record.input_id >= 0 && input_record.bytes_used == 0) {
+    input_record.input_id = -1;
+    free_input_buffers_.push_back(decoder_current_input_buffer_);
+    decoder_current_input_buffer_ = -1;
+    return true;
+  }
+
+  // Queue it.
+  input_ready_queue_.push(decoder_current_input_buffer_);
+  decoder_current_input_buffer_ = -1;
+  DVLOGF(4) << "submitting input_id=" << input_record.input_id;
+  // Enqueue once since there's new available input for it.
+  Enqueue();
+
+  return (decoder_state_ != kError);
+}
+
+void V4L2VideoDecodeAccelerator::ServiceDeviceTask(bool event_pending) {
+  DVLOGF(4);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_NE(decoder_state_, kUninitialized);
+
+  if (decoder_state_ == kResetting) {
+    DVLOGF(3) << "early out: kResetting state";
+    return;
+  } else if (decoder_state_ == kError) {
+    DVLOGF(3) << "early out: kError state";
+    return;
+  } else if (decoder_state_ == kChangingResolution) {
+    DVLOGF(3) << "early out: kChangingResolution state";
+    return;
+  }
+
+  bool resolution_change_pending = false;
+  if (event_pending)
+    resolution_change_pending = DequeueResolutionChangeEvent();
+
+  if (!resolution_change_pending && coded_size_.IsEmpty()) {
+    // Some platforms do not send an initial resolution change event.
+    // To work around this, we need to keep checking if the initial resolution
+    // is known already by explicitly querying the format after each decode,
+    // regardless of whether we received an event.
+    // This needs to be done on initial resolution change,
+    // i.e. when coded_size_.IsEmpty().
+
+    // Try GetFormatInfo to check if an initial resolution change can be done.
+    struct v4l2_format format;
+    Size visible_size;
+    bool again;
+    if (GetFormatInfo(&format, &visible_size, &again) && !again) {
+      resolution_change_pending = true;
+      DequeueResolutionChangeEvent();
+    }
+  }
+
+  Dequeue();
+  Enqueue();
+
+  // Clear the interrupt fd.
+  if (!device_->ClearDevicePollInterrupt()) {
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  bool poll_device = false;
+  // Add fd, if we should poll on it.
+  // Can be polled as soon as either input or output buffers are queued.
+  if (input_buffer_queued_count_ + output_buffer_queued_count_ > 0)
+    poll_device = true;
+
+  // ServiceDeviceTask() should only ever be scheduled from DevicePollTask(),
+  // so either:
+  // * device_poll_thread_ is running normally
+  // * device_poll_thread_ scheduled us, but then a ResetTask() or DestroyTask()
+  //   shut it down, in which case we're either in kResetting or kError states
+  //   respectively, and we should have early-outed already.
+  DCHECK(device_poll_thread_.message_loop());
+  // Queue the DevicePollTask() now.
+  device_poll_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DevicePollTask,
+                            base::Unretained(this), poll_device));
+
+  DVLOGF(3) << "ServiceDeviceTask(): buffer counts: DEC["
+            << decoder_input_queue_.size() << "->"
+            << input_ready_queue_.size() << "] => DEVICE["
+            << free_input_buffers_.size() << "+"
+            << input_buffer_queued_count_ << "/"
+            << input_buffer_map_.size() << "->"
+            << free_output_buffers_.size() << "+"
+            << output_buffer_queued_count_ << "/"
+            << output_buffer_map_.size() << "] => CLIENT["
+            << decoder_frames_at_client_ << "]";
+
+  ScheduleDecodeBufferTaskIfNeeded();
+  if (resolution_change_pending)
+    StartResolutionChange();
+}
+
+void V4L2VideoDecodeAccelerator::Enqueue() {
+  DVLOGF(4);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_NE(decoder_state_, kUninitialized);
+
+  // Drain the pipe of completed decode buffers.
+  const int old_inputs_queued = input_buffer_queued_count_;
+  while (!input_ready_queue_.empty()) {
+    const int buffer = input_ready_queue_.front();
+    InputRecord& input_record = input_buffer_map_[buffer];
+    if (input_record.input_id == kFlushBufferId && decoder_cmd_supported_) {
+      // Send the flush command after all input buffers are dequeued. This makes
+      // sure all previous resolution changes have been handled because the
+      // driver must hold the input buffer that triggers resolution change. The
+      // driver cannot decode data in it without new output buffers. If we send
+      // the flush now and a queued input buffer triggers resolution change
+      // later, the driver will send an output buffer that has
+      // V4L2_BUF_FLAG_LAST. But some queued input buffer have not been decoded
+      // yet. Also, V4L2VDA calls STREAMOFF and STREAMON after resolution
+      // change. They implicitly send a V4L2_DEC_CMD_STOP and V4L2_DEC_CMD_START
+      // to the decoder.
+      if (input_buffer_queued_count_ == 0) {
+        if (!SendDecoderCmdStop())
+          return;
+        input_ready_queue_.pop();
+        free_input_buffers_.push_back(buffer);
+        input_record.input_id = -1;
+      } else {
+        break;
+      }
+    } else if (!EnqueueInputRecord())
+      return;
+  }
+  if (old_inputs_queued == 0 && input_buffer_queued_count_ != 0) {
+    // We just started up a previously empty queue.
+    // Queue state changed; signal interrupt.
+    if (!device_->SetDevicePollInterrupt()) {
+      VPLOGF(1) << "SetDevicePollInterrupt failed";
+      NOTIFY_ERROR(PLATFORM_FAILURE);
+      return;
+    }
+    // Start VIDIOC_STREAMON if we haven't yet.
+    if (!input_streamon_) {
+      __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+      IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMON, &type);
+      input_streamon_ = true;
+    }
+  }
+
+  // Enqueue all the outputs we can.
+  const int old_outputs_queued = output_buffer_queued_count_;
+  while (!free_output_buffers_.empty()) {
+    if (!EnqueueOutputRecord())
+      return;
+  }
+  if (old_outputs_queued == 0 && output_buffer_queued_count_ != 0) {
+    // We just started up a previously empty queue.
+    // Queue state changed; signal interrupt.
+    if (!device_->SetDevicePollInterrupt()) {
+      VPLOGF(1) << "SetDevicePollInterrupt(): failed";
+      NOTIFY_ERROR(PLATFORM_FAILURE);
+      return;
+    }
+    // Start VIDIOC_STREAMON if we haven't yet.
+    if (!output_streamon_) {
+      __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+      IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMON, &type);
+      output_streamon_ = true;
+    }
+  }
+}
+
+bool V4L2VideoDecodeAccelerator::DequeueResolutionChangeEvent() {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_NE(decoder_state_, kUninitialized);
+  DVLOGF(3);
+
+  struct v4l2_event ev;
+  memset(&ev, 0, sizeof(ev));
+
+  while (device_->Ioctl(VIDIOC_DQEVENT, &ev) == 0) {
+    if (ev.type == V4L2_EVENT_SOURCE_CHANGE) {
+      if (ev.u.src_change.changes & V4L2_EVENT_SRC_CH_RESOLUTION) {
+        VLOGF(2) << "got resolution change event.";
+        return true;
+      }
+    } else {
+      VLOGF(1) << "got an event (" << ev.type << ") we haven't subscribed to.";
+    }
+  }
+  return false;
+}
+
+void V4L2VideoDecodeAccelerator::Dequeue() {
+  DVLOGF(4);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_NE(decoder_state_, kUninitialized);
+
+  while (input_buffer_queued_count_ > 0) {
+    if (!DequeueInputBuffer())
+      break;
+  }
+  while (output_buffer_queued_count_ > 0) {
+    if (!DequeueOutputBuffer())
+      break;
+  }
+  NotifyFlushDoneIfNeeded();
+}
+
+bool V4L2VideoDecodeAccelerator::DequeueInputBuffer() {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_GT(input_buffer_queued_count_, 0);
+  DCHECK(input_streamon_);
+
+  // Dequeue a completed input (VIDEO_OUTPUT) buffer, and recycle to the free
+  // list.
+  struct v4l2_buffer_custom dqbuf;
+  struct v4l2_plane planes[1];
+  memset(&dqbuf, 0, sizeof(dqbuf));
+  memset(planes, 0, sizeof(planes));
+  dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  dqbuf.memory = V4L2_MEMORY_MMAP;
+  dqbuf.m.planes = planes;
+  dqbuf.length = 1;
+  if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
+    if (errno == EAGAIN) {
+      // EAGAIN if we're just out of buffers to dequeue.
+      return false;
+    }
+    VPLOGF(1) << "ioctl() failed: VIDIOC_DQBUF";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return false;
+  }
+  InputRecord& input_record = input_buffer_map_[dqbuf.index];
+  DCHECK(input_record.at_device);
+  free_input_buffers_.push_back(dqbuf.index);
+  input_record.at_device = false;
+  input_record.bytes_used = 0;
+  input_record.input_id = -1;
+  input_buffer_queued_count_--;
+
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::DequeueOutputBuffer() {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_GT(output_buffer_queued_count_, 0);
+  DCHECK(output_streamon_);
+
+  // Dequeue a completed output (VIDEO_CAPTURE) buffer, and queue to the
+  // completed queue.
+  struct v4l2_buffer_custom dqbuf;
+  std::unique_ptr<struct v4l2_plane[]> planes(
+      new v4l2_plane[output_planes_count_]);
+  memset(&dqbuf, 0, sizeof(dqbuf));
+  memset(planes.get(), 0, sizeof(struct v4l2_plane) * output_planes_count_);
+  dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  dqbuf.memory = V4L2_MEMORY_MMAP;
+  dqbuf.m.planes = planes.get();
+  dqbuf.length = output_planes_count_;
+  if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
+    if (errno == EAGAIN) {
+      // EAGAIN if we're just out of buffers to dequeue.
+      return false;
+    } else if (errno == EPIPE) {
+      DVLOGF(3) << "Got EPIPE. Last output buffer was already dequeued.";
+      return false;
+    }
+    VPLOGF(1) << "ioctl() failed: VIDIOC_DQBUF";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return false;
+  }
+  OutputRecord& output_record = output_buffer_map_[dqbuf.index];
+  DCHECK_EQ(output_record.state, kAtDevice);
+  DCHECK_NE(output_record.picture_id, -1);
+  output_buffer_queued_count_--;
+  if (dqbuf.m.planes[0].bytesused == 0) {
+    // This is an empty output buffer returned as part of a flush.
+    output_record.state = kFree;
+    free_output_buffers_.push_back(dqbuf.index);
+  } else {
+    int32_t bitstream_buffer_id = dqbuf.timestamp.tv_sec;
+    DCHECK_GE(bitstream_buffer_id, 0);
+    DVLOGF(4) << "Dequeue output buffer: dqbuf index=" << dqbuf.index
+              << " bitstream input_id=" << bitstream_buffer_id;
+    output_record.state = kAtClient;
+    decoder_frames_at_client_++;
+
+    const Picture picture(output_record.picture_id, bitstream_buffer_id,
+                          Rect(visible_size_), false);
+    pending_picture_ready_.push(PictureRecord(output_record.cleared, picture));
+    SendPictureReady();
+    output_record.cleared = true;
+  }
+  if (dqbuf.flags & V4L2_BUF_FLAG_LAST) {
+    DVLOGF(3) << "Got last output buffer. Waiting last buffer="
+              << flush_awaiting_last_output_buffer_;
+    if (flush_awaiting_last_output_buffer_) {
+      flush_awaiting_last_output_buffer_ = false;
+      struct v4l2_decoder_cmd cmd;
+      memset(&cmd, 0, sizeof(cmd));
+      cmd.cmd = V4L2_DEC_CMD_START;
+      IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_DECODER_CMD, &cmd);
+    }
+  }
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::EnqueueInputRecord() {
+  DVLOGF(4);
+  DCHECK(!input_ready_queue_.empty());
+
+  // Enqueue an input (VIDEO_OUTPUT) buffer.
+  const int buffer = input_ready_queue_.front();
+  InputRecord& input_record = input_buffer_map_[buffer];
+  DCHECK(!input_record.at_device);
+  struct v4l2_buffer_custom qbuf;
+  struct v4l2_plane qbuf_plane;
+  memset(&qbuf, 0, sizeof(qbuf));
+  memset(&qbuf_plane, 0, sizeof(qbuf_plane));
+  qbuf.index = buffer;
+  qbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  qbuf.timestamp.tv_sec = input_record.input_id;
+  qbuf.memory = V4L2_MEMORY_MMAP;
+  qbuf.m.planes = &qbuf_plane;
+  qbuf.m.planes[0].bytesused = input_record.bytes_used;
+  qbuf.length = 1;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
+  input_ready_queue_.pop();
+  input_record.at_device = true;
+  input_buffer_queued_count_++;
+  DVLOGF(4) << "enqueued input_id=" << input_record.input_id
+            << " size=" << input_record.bytes_used;
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::EnqueueOutputRecord() {
+  DCHECK(!free_output_buffers_.empty());
+
+  // Enqueue an output (VIDEO_CAPTURE) buffer.
+  const int buffer = free_output_buffers_.front();
+  DVLOGF(4) << "buffer " << buffer;
+  OutputRecord& output_record = output_buffer_map_[buffer];
+  DCHECK_EQ(output_record.state, kFree);
+  DCHECK_NE(output_record.picture_id, -1);
+  struct v4l2_buffer_custom qbuf;
+  std::unique_ptr<struct v4l2_plane[]> qbuf_planes(
+      new v4l2_plane[output_planes_count_]);
+  memset(&qbuf, 0, sizeof(qbuf));
+  memset(qbuf_planes.get(), 0,
+         sizeof(struct v4l2_plane) * output_planes_count_);
+  qbuf.index = buffer;
+  qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  qbuf.memory = V4L2_MEMORY_MMAP;
+  qbuf.m.planes = qbuf_planes.get();
+  qbuf.length = output_planes_count_;
+  DVLOGF(4) << "qbuf.index=" << qbuf.index;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
+  free_output_buffers_.pop_front();
+  output_record.state = kAtDevice;
+  output_buffer_queued_count_++;
+  return true;
+}
+
+void V4L2VideoDecodeAccelerator::ReusePictureBufferTask(int32_t picture_buffer_id) {
+  DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id;
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  // We run ReusePictureBufferTask even if we're in kResetting.
+  if (decoder_state_ == kError) {
+    DVLOGF(4) << "early out: kError state";
+    return;
+  }
+
+  if (decoder_state_ == kChangingResolution) {
+    DVLOGF(4) << "early out: kChangingResolution";
+    return;
+  }
+
+  size_t index;
+  for (index = 0; index < output_buffer_map_.size(); ++index)
+    if (output_buffer_map_[index].picture_id == picture_buffer_id)
+      break;
+
+  if (index >= output_buffer_map_.size()) {
+    // It's possible that we've already posted a DismissPictureBuffer for this
+    // picture, but it has not yet executed when this ReusePictureBuffer was
+    // posted to us by the client. In that case just ignore this (we've already
+    // dismissed it and accounted for that) and let the sync object get
+    // destroyed.
+    DVLOGF(3) << "got picture id= " << picture_buffer_id
+              << " not in use (anymore?).";
+    return;
+  }
+
+  OutputRecord& output_record = output_buffer_map_[index];
+  if (output_record.state != kAtClient) {
+    VLOGF(1) << "picture_buffer_id not reusable";
+    NOTIFY_ERROR(INVALID_ARGUMENT);
+    return;
+  }
+
+  output_record.state = kFree;
+  free_output_buffers_.push_back(index);
+  decoder_frames_at_client_--;
+  // We got a buffer back, so enqueue it back.
+  Enqueue();
+}
+
+void V4L2VideoDecodeAccelerator::FlushTask() {
+  VLOGF(2);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  if (decoder_state_ == kError) {
+    VLOGF(2) << "early out: kError state";
+    return;
+  }
+
+  // We don't support stacked flushing.
+  DCHECK(!decoder_flushing_);
+
+  // Queue up an empty buffer -- this triggers the flush.
+  decoder_input_queue_.push(
+      linked_ptr<BitstreamBufferRef>(new BitstreamBufferRef(
+          decode_client_, decode_task_runner_, nullptr, kFlushBufferId)));
+  decoder_flushing_ = true;
+  SendPictureReady();  // Send all pending PictureReady.
+
+  ScheduleDecodeBufferTaskIfNeeded();
+}
+
+void V4L2VideoDecodeAccelerator::NotifyFlushDoneIfNeeded() {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  if (!decoder_flushing_)
+    return;
+
+  // Pipeline is empty when:
+  // * Decoder input queue is empty of non-delayed buffers.
+  // * There is no currently filling input buffer.
+  // * Input holding queue is empty.
+  // * All input (VIDEO_OUTPUT) buffers are returned.
+  // * All image processor buffers are returned.
+  if (!decoder_input_queue_.empty()) {
+    if (decoder_input_queue_.front()->input_id !=
+        decoder_delay_bitstream_buffer_id_) {
+      DVLOGF(3) << "Some input bitstream buffers are not queued.";
+      return;
+    }
+  }
+  if (decoder_current_input_buffer_ != -1) {
+    DVLOGF(3) << "Current input buffer != -1";
+    return;
+  }
+  if ((input_ready_queue_.size() + input_buffer_queued_count_) != 0) {
+    DVLOGF(3) << "Some input buffers are not dequeued.";
+    return;
+  }
+  if (flush_awaiting_last_output_buffer_) {
+    DVLOGF(3) << "Waiting for last output buffer.";
+    return;
+  }
+
+  // TODO(posciak): https://crbug.com/270039. Exynos requires a
+  // streamoff-streamon sequence after flush to continue, even if we are not
+  // resetting. This would make sense, because we don't really want to resume
+  // from a non-resume point (e.g. not from an IDR) if we are flushed.
+  // MSE player however triggers a Flush() on chunk end, but never Reset(). One
+  // could argue either way, or even say that Flush() is not needed/harmful when
+  // transitioning to next chunk.
+  // For now, do the streamoff-streamon cycle to satisfy Exynos and not freeze
+  // when doing MSE. This should be harmless otherwise.
+  if (!(StopDevicePoll() && StopOutputStream() && StopInputStream()))
+    return;
+
+  if (!StartDevicePoll())
+    return;
+
+  decoder_delay_bitstream_buffer_id_ = -1;
+  decoder_flushing_ = false;
+  VLOGF(2) << "returning flush";
+  child_task_runner_->PostTask(FROM_HERE,
+                               base::Bind(&Client::NotifyFlushDone, client_));
+
+  // While we were flushing, we early-outed DecodeBufferTask()s.
+  ScheduleDecodeBufferTaskIfNeeded();
+}
+
+bool V4L2VideoDecodeAccelerator::IsDecoderCmdSupported() {
+  // CMD_STOP should always succeed. If the decoder is started, the command can
+  // flush it. If the decoder is stopped, the command does nothing. We use this
+  // to know if a driver supports V4L2_DEC_CMD_STOP to flush.
+  struct v4l2_decoder_cmd cmd;
+  memset(&cmd, 0, sizeof(cmd));
+  cmd.cmd = V4L2_DEC_CMD_STOP;
+  if (device_->Ioctl(VIDIOC_TRY_DECODER_CMD, &cmd) != 0) {
+    VLOGF(2) << "V4L2_DEC_CMD_STOP is not supported.";
+    return false;
+  }
+
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::SendDecoderCmdStop() {
+  VLOGF(2);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK(!flush_awaiting_last_output_buffer_);
+
+  struct v4l2_decoder_cmd cmd;
+  memset(&cmd, 0, sizeof(cmd));
+  cmd.cmd = V4L2_DEC_CMD_STOP;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_DECODER_CMD, &cmd);
+  flush_awaiting_last_output_buffer_ = true;
+
+  return true;
+}
+
+void V4L2VideoDecodeAccelerator::ResetTask() {
+  VLOGF(2);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  if (decoder_state_ == kError) {
+    VLOGF(2) << "early out: kError state";
+    return;
+  }
+  decoder_current_bitstream_buffer_.reset();
+  while (!decoder_input_queue_.empty())
+    decoder_input_queue_.pop();
+
+  decoder_current_input_buffer_ = -1;
+
+  // If we are in the middle of switching resolutions or awaiting picture
+  // buffers, postpone reset until it's done. We don't have to worry about
+  // timing of this wrt to decoding, because output pipe is already
+  // stopped if we are changing resolution. We will come back here after
+  // we are done.
+  DCHECK(!reset_pending_);
+  if (decoder_state_ == kChangingResolution ||
+      decoder_state_ == kAwaitingPictureBuffers) {
+    reset_pending_ = true;
+    return;
+  }
+  FinishReset();
+}
+
+void V4L2VideoDecodeAccelerator::FinishReset() {
+  VLOGF(2);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  reset_pending_ = false;
+  // After the output stream is stopped, the codec should not post any
+  // resolution change events. So we dequeue the resolution change event
+  // afterwards. The event could be posted before or while stopping the output
+  // stream. The codec will expect the buffer of new size after the seek, so
+  // we need to handle the resolution change event first.
+  if (!(StopDevicePoll() && StopOutputStream()))
+    return;
+
+  if (DequeueResolutionChangeEvent()) {
+    reset_pending_ = true;
+    StartResolutionChange();
+    return;
+  }
+
+  if (!StopInputStream())
+    return;
+
+  // If we were flushing, we'll never return any more BitstreamBuffers or
+  // PictureBuffers; they have all been dropped and returned by now.
+  NotifyFlushDoneIfNeeded();
+
+  // Mark that we're resetting, then enqueue a ResetDoneTask().  All intervening
+  // jobs will early-out in the kResetting state.
+  decoder_state_ = kResetting;
+  SendPictureReady();  // Send all pending PictureReady.
+  decoder_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ResetDoneTask,
+                            base::Unretained(this)));
+}
+
+void V4L2VideoDecodeAccelerator::ResetDoneTask() {
+  VLOGF(2);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  if (decoder_state_ == kError) {
+    VLOGF(2) << "early out: kError state";
+    return;
+  }
+
+  // Start poll thread if NotifyFlushDoneIfNeeded has not already.
+  if (!device_poll_thread_.IsRunning()) {
+    if (!StartDevicePoll())
+      return;
+  }
+
+  // Reset format-specific bits.
+  if (video_profile_ >= H264PROFILE_MIN && video_profile_ <= H264PROFILE_MAX) {
+    decoder_h264_parser_.reset(new H264Parser());
+  }
+
+  // Jobs drained, we're finished resetting.
+  DCHECK_EQ(decoder_state_, kResetting);
+  decoder_state_ = kInitialized;
+
+  decoder_partial_frame_pending_ = false;
+  decoder_delay_bitstream_buffer_id_ = -1;
+  child_task_runner_->PostTask(FROM_HERE,
+                               base::Bind(&Client::NotifyResetDone, client_));
+
+  // While we were resetting, we early-outed DecodeBufferTask()s.
+  ScheduleDecodeBufferTaskIfNeeded();
+}
+
+void V4L2VideoDecodeAccelerator::DestroyTask() {
+  VLOGF(2);
+
+  // DestroyTask() should run regardless of decoder_state_.
+
+  StopDevicePoll();
+  StopOutputStream();
+  StopInputStream();
+
+  decoder_current_bitstream_buffer_.reset();
+  decoder_current_input_buffer_ = -1;
+  decoder_decode_buffer_tasks_scheduled_ = 0;
+  decoder_frames_at_client_ = 0;
+  while (!decoder_input_queue_.empty())
+    decoder_input_queue_.pop();
+  decoder_flushing_ = false;
+
+  // Set our state to kError.  Just in case.
+  decoder_state_ = kError;
+
+  DestroyInputBuffers();
+  DestroyOutputBuffers();
+}
+
+bool V4L2VideoDecodeAccelerator::StartDevicePoll() {
+  DVLOGF(3);
+  DCHECK(!device_poll_thread_.IsRunning());
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  // Start up the device poll thread and schedule its first DevicePollTask().
+  if (!device_poll_thread_.Start()) {
+    VLOGF(1) << "Device thread failed to start";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return false;
+  }
+  device_poll_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DevicePollTask,
+                            base::Unretained(this), 0));
+
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::StopDevicePoll() {
+  DVLOGF(3);
+
+  if (!device_poll_thread_.IsRunning())
+    return true;
+
+  if (decoder_thread_.IsRunning())
+    DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  // Signal the DevicePollTask() to stop, and stop the device poll thread.
+  if (!device_->SetDevicePollInterrupt()) {
+    VPLOGF(1) << "SetDevicePollInterrupt(): failed";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return false;
+  }
+  device_poll_thread_.Stop();
+  // Clear the interrupt now, to be sure.
+  if (!device_->ClearDevicePollInterrupt()) {
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return false;
+  }
+  DVLOGF(3) << "device poll stopped";
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::StopOutputStream() {
+  VLOGF(2);
+  if (!output_streamon_)
+    return true;
+
+  __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF, &type);
+  output_streamon_ = false;
+
+  // Output stream is stopped. No need to wait for the buffer anymore.
+  flush_awaiting_last_output_buffer_ = false;
+
+  for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
+    // After streamoff, the device drops ownership of all buffers, even if we
+    // don't dequeue them explicitly. Some of them may still be owned by the
+    // client however. Reuse only those that aren't.
+    OutputRecord& output_record = output_buffer_map_[i];
+    if (output_record.state == kAtDevice) {
+      output_record.state = kFree;
+      free_output_buffers_.push_back(i);
+    }
+  }
+  output_buffer_queued_count_ = 0;
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::StopInputStream() {
+  VLOGF(2);
+  if (!input_streamon_)
+    return true;
+
+  __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF, &type);
+  input_streamon_ = false;
+
+  // Reset accounting info for input.
+  while (!input_ready_queue_.empty())
+    input_ready_queue_.pop();
+  free_input_buffers_.clear();
+  for (size_t i = 0; i < input_buffer_map_.size(); ++i) {
+    free_input_buffers_.push_back(i);
+    input_buffer_map_[i].at_device = false;
+    input_buffer_map_[i].bytes_used = 0;
+    input_buffer_map_[i].input_id = -1;
+  }
+  input_buffer_queued_count_ = 0;
+
+  return true;
+}
+
+void V4L2VideoDecodeAccelerator::StartResolutionChange() {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_NE(decoder_state_, kUninitialized);
+  DCHECK_NE(decoder_state_, kResetting);
+
+  VLOGF(2) << "Initiate resolution change";
+
+  if (!(StopDevicePoll() && StopOutputStream()))
+    return;
+
+  decoder_state_ = kChangingResolution;
+  SendPictureReady();  // Send all pending PictureReady.
+
+  if (!DestroyOutputBuffers()) {
+    VLOGF(1) << "Failed destroying output buffers.";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  FinishResolutionChange();
+}
+
+void V4L2VideoDecodeAccelerator::FinishResolutionChange() {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_EQ(decoder_state_, kChangingResolution);
+  VLOGF(2);
+
+  if (decoder_state_ == kError) {
+    VLOGF(2) << "early out: kError state";
+    return;
+  }
+
+  struct v4l2_format format;
+  bool again;
+  Size visible_size;
+  bool ret = GetFormatInfo(&format, &visible_size, &again);
+  if (!ret || again) {
+    VLOGF(1) << "Couldn't get format information after resolution change";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  if (!CreateBuffersForFormat(format, visible_size)) {
+    VLOGF(1) << "Couldn't reallocate buffers after resolution change";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  if (!StartDevicePoll())
+    return;
+}
+
+void V4L2VideoDecodeAccelerator::DevicePollTask(bool poll_device) {
+  DVLOGF(4);
+  DCHECK(device_poll_thread_.task_runner()->BelongsToCurrentThread());
+
+  bool event_pending = false;
+
+  if (!device_->Poll(poll_device, &event_pending)) {
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  // All processing should happen on ServiceDeviceTask(), since we shouldn't
+  // touch decoder state from this thread.
+  decoder_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ServiceDeviceTask,
+                            base::Unretained(this), event_pending));
+}
+
+void V4L2VideoDecodeAccelerator::NotifyError(Error error) {
+  VLOGF(1);
+
+  if (!child_task_runner_->BelongsToCurrentThread()) {
+    child_task_runner_->PostTask(
+        FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::NotifyError,
+                              weak_this_, error));
+    return;
+  }
+
+  if (client_) {
+    client_->NotifyError(error);
+    client_ptr_factory_.reset();
+  }
+}
+
+void V4L2VideoDecodeAccelerator::SetErrorState(Error error) {
+  // We can touch decoder_state_ only if this is the decoder thread or the
+  // decoder thread isn't running.
+  if (decoder_thread_.task_runner() &&
+      !decoder_thread_.task_runner()->BelongsToCurrentThread()) {
+    decoder_thread_.task_runner()->PostTask(
+        FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::SetErrorState,
+                              base::Unretained(this), error));
+    return;
+  }
+
+  // Post NotifyError only if we are already initialized, as the API does
+  // not allow doing so before that.
+  if (decoder_state_ != kError && decoder_state_ != kUninitialized)
+    NotifyError(error);
+
+  decoder_state_ = kError;
+}
+
+bool V4L2VideoDecodeAccelerator::GetFormatInfo(struct v4l2_format* format,
+                                               Size* visible_size,
+                                               bool* again) {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  *again = false;
+  memset(format, 0, sizeof(*format));
+  format->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  if (device_->Ioctl(VIDIOC_G_FMT, format) != 0) {
+    if (errno == EINVAL) {
+      // EINVAL means we haven't seen sufficient stream to decode the format.
+      *again = true;
+      return true;
+    } else {
+      VPLOGF(1) << "ioctl() failed: VIDIOC_G_FMT";
+      NOTIFY_ERROR(PLATFORM_FAILURE);
+      return false;
+    }
+  }
+
+  // Make sure we are still getting the format we set on initialization.
+  if (format->fmt.pix_mp.pixelformat != output_format_fourcc_) {
+    VLOGF(1) << "Unexpected format from G_FMT on output";
+    return false;
+  }
+
+  Size coded_size(format->fmt.pix_mp.width, format->fmt.pix_mp.height);
+  if (visible_size != nullptr)
+    *visible_size = GetVisibleSize(coded_size);
+
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::CreateBuffersForFormat(
+    const struct v4l2_format& format,
+    const Size& visible_size) {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  output_planes_count_ = format.fmt.pix_mp.num_planes;
+  coded_size_.SetSize(format.fmt.pix_mp.width, format.fmt.pix_mp.height);
+  visible_size_ = visible_size;
+
+  VLOGF(2) << "new resolution: " << coded_size_.ToString()
+           << ", visible size: " << visible_size_.ToString()
+           << ", decoder output planes count: " << output_planes_count_;
+
+  return CreateOutputBuffers();
+}
+
+Size V4L2VideoDecodeAccelerator::GetVisibleSize(
+    const Size& coded_size) {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  struct v4l2_rect* visible_rect;
+  struct v4l2_selection selection_arg;
+  memset(&selection_arg, 0, sizeof(selection_arg));
+  selection_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+  selection_arg.target = V4L2_SEL_TGT_COMPOSE;
+
+  if (device_->Ioctl(VIDIOC_G_SELECTION, &selection_arg) == 0) {
+    VLOGF(2) << "VIDIOC_G_SELECTION is supported";
+    visible_rect = &selection_arg.r;
+  } else {
+    VLOGF(2) << "Fallback to VIDIOC_G_CROP";
+    struct v4l2_crop crop_arg;
+    memset(&crop_arg, 0, sizeof(crop_arg));
+    crop_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+
+    if (device_->Ioctl(VIDIOC_G_CROP, &crop_arg) != 0) {
+      VPLOGF(1) << "ioctl() VIDIOC_G_CROP failed";
+      return coded_size;
+    }
+    visible_rect = &crop_arg.c;
+  }
+
+  Rect rect(visible_rect->left, visible_rect->top, visible_rect->width,
+            visible_rect->height);
+  VLOGF(2) << "visible rectangle is " << rect.ToString();
+  if (!Rect(coded_size).Contains(rect)) {
+    DVLOGF(3) << "visible rectangle " << rect.ToString()
+              << " is not inside coded size " << coded_size.ToString();
+    return coded_size;
+  }
+  if (rect.IsEmpty()) {
+    VLOGF(1) << "visible size is empty";
+    return coded_size;
+  }
+
+  // Chrome assume picture frame is coded at (0, 0).
+  if (rect.x() != 0 || rect.y() != 0) {
+    VLOGF(1) << "Unexpected visible rectangle " << rect.ToString()
+             << ", top-left is not origin";
+    return coded_size;
+  }
+
+  return rect.size();
+}
+
+bool V4L2VideoDecodeAccelerator::CreateInputBuffers() {
+  VLOGF(2);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  // We always run this as we prepare to initialize.
+  DCHECK_EQ(decoder_state_, kInitialized);
+  DCHECK(!input_streamon_);
+  DCHECK(input_buffer_map_.empty());
+
+  struct v4l2_requestbuffers reqbufs;
+  memset(&reqbufs, 0, sizeof(reqbufs));
+  reqbufs.count = kInputBufferCount;
+  reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  reqbufs.memory = V4L2_MEMORY_MMAP;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS, &reqbufs);
+  input_buffer_map_.resize(reqbufs.count);
+  for (size_t i = 0; i < input_buffer_map_.size(); ++i) {
+    free_input_buffers_.push_back(i);
+
+    // Query for the MEMORY_MMAP pointer.
+    struct v4l2_plane planes[1];
+    struct v4l2_buffer_custom buffer;
+    memset(&buffer, 0, sizeof(buffer));
+    memset(planes, 0, sizeof(planes));
+    buffer.index = i;
+    buffer.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+    buffer.memory = V4L2_MEMORY_MMAP;
+    buffer.m.planes = planes;
+    buffer.length = 1;
+    IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF, &buffer);
+    void* address = device_->Mmap(NULL,
+                                  buffer.m.planes[0].length,
+                                  PROT_READ | PROT_WRITE,
+                                  MAP_SHARED,
+                                  buffer.m.planes[0].m.mem_offset);
+    if (address == MAP_FAILED) {
+      VPLOGF(1) << "mmap() failed";
+      return false;
+    }
+    input_buffer_map_[i].address = address;
+    input_buffer_map_[i].length = buffer.m.planes[0].length;
+  }
+
+  return true;
+}
+
+static bool IsSupportedOutputFormat(uint32_t v4l2_format) {
+  // Only support V4L2_PIX_FMT_NV12 output format for now.
+  // TODO(johnylin): add more supported format if necessary.
+  uint32_t kSupportedOutputFmtFourcc[] = { V4L2_PIX_FMT_NV12 };
+  return std::find(
+      kSupportedOutputFmtFourcc,
+      kSupportedOutputFmtFourcc + arraysize(kSupportedOutputFmtFourcc),
+      v4l2_format) !=
+          kSupportedOutputFmtFourcc + arraysize(kSupportedOutputFmtFourcc);
+}
+
+bool V4L2VideoDecodeAccelerator::SetupFormats() {
+  // We always run this as we prepare to initialize.
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+  DCHECK_EQ(decoder_state_, kUninitialized);
+  DCHECK(!input_streamon_);
+  DCHECK(!output_streamon_);
+
+  size_t input_size;
+  Size max_resolution, min_resolution;
+  device_->GetSupportedResolution(input_format_fourcc_, &min_resolution,
+                                  &max_resolution);
+  if (max_resolution.width() > 1920 && max_resolution.height() > 1088)
+    input_size = kInputBufferMaxSizeFor4k;
+  else
+    input_size = kInputBufferMaxSizeFor1080p;
+
+  struct v4l2_fmtdesc fmtdesc;
+  memset(&fmtdesc, 0, sizeof(fmtdesc));
+  fmtdesc.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  bool is_format_supported = false;
+  while (device_->Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0) {
+    if (fmtdesc.pixelformat == input_format_fourcc_) {
+      is_format_supported = true;
+      break;
+    }
+    ++fmtdesc.index;
+  }
+
+  if (!is_format_supported) {
+    VLOGF(1) << "Input fourcc " << input_format_fourcc_
+             << " not supported by device.";
+    return false;
+  }
+
+  struct v4l2_format format;
+  memset(&format, 0, sizeof(format));
+  format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  format.fmt.pix_mp.pixelformat = input_format_fourcc_;
+  format.fmt.pix_mp.plane_fmt[0].sizeimage = input_size;
+  format.fmt.pix_mp.num_planes = 1;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT, &format);
+
+  // We have to set up the format for output, because the driver may not allow
+  // changing it once we start streaming; whether it can support our chosen
+  // output format or not may depend on the input format.
+  memset(&fmtdesc, 0, sizeof(fmtdesc));
+  fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  while (device_->Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0) {
+    if (IsSupportedOutputFormat(fmtdesc.pixelformat)) {
+      output_format_fourcc_ = fmtdesc.pixelformat;
+      break;
+    }
+    ++fmtdesc.index;
+  }
+
+  if (output_format_fourcc_ == 0) {
+    VLOGF(2) << "Image processor not available";
+    return false;
+  }
+  VLOGF(2) << "Output format=" << output_format_fourcc_;
+
+  // Just set the fourcc for output; resolution, etc., will come from the
+  // driver once it extracts it from the stream.
+  memset(&format, 0, sizeof(format));
+  format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  format.fmt.pix_mp.pixelformat = output_format_fourcc_;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT, &format);
+
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::CreateOutputBuffers() {
+  VLOGF(2);
+  DCHECK(decoder_state_ == kInitialized ||
+         decoder_state_ == kChangingResolution);
+  DCHECK(!output_streamon_);
+  DCHECK(output_buffer_map_.empty());
+  DCHECK_EQ(output_mode_, Config::OutputMode::IMPORT);
+
+  // Number of output buffers we need.
+  struct v4l2_control ctrl;
+  memset(&ctrl, 0, sizeof(ctrl));
+  ctrl.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_G_CTRL, &ctrl);
+  output_dpb_size_ = ctrl.value;
+
+  // Output format setup in Initialize().
+
+  uint32_t buffer_count = output_dpb_size_ + kDpbOutputBufferExtraCount;
+
+  VideoPixelFormat pixel_format =
+      V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_);
+
+  child_task_runner_->PostTask(
+      FROM_HERE, base::Bind(&Client::ProvidePictureBuffers, client_,
+                            buffer_count, pixel_format, coded_size_));
+
+
+  // Go into kAwaitingPictureBuffers to prevent us from doing any more decoding
+  // or event handling while we are waiting for AssignPictureBuffers(). Not
+  // having Pictures available would not have prevented us from making decoding
+  // progress entirely e.g. in the case of H.264 where we could further decode
+  // non-slice NALUs and could even get another resolution change before we were
+  // done with this one. After we get the buffers, we'll go back into kIdle and
+  // kick off further event processing, and eventually go back into kDecoding
+  // once no more events are pending (if any).
+  decoder_state_ = kAwaitingPictureBuffers;
+
+  return true;
+}
+
+void V4L2VideoDecodeAccelerator::DestroyInputBuffers() {
+  VLOGF(2);
+  DCHECK(!decoder_thread_.IsRunning() ||
+         decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK(!input_streamon_);
+
+  if (input_buffer_map_.empty())
+    return;
+
+  for (size_t i = 0; i < input_buffer_map_.size(); ++i) {
+    if (input_buffer_map_[i].address != NULL) {
+      device_->Munmap(input_buffer_map_[i].address,
+                      input_buffer_map_[i].length);
+    }
+  }
+
+  struct v4l2_requestbuffers reqbufs;
+  memset(&reqbufs, 0, sizeof(reqbufs));
+  reqbufs.count = 0;
+  reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  reqbufs.memory = V4L2_MEMORY_MMAP;
+  IOCTL_OR_LOG_ERROR(VIDIOC_REQBUFS, &reqbufs);
+
+  input_buffer_map_.clear();
+  free_input_buffers_.clear();
+}
+
+bool V4L2VideoDecodeAccelerator::DestroyOutputBuffers() {
+  VLOGF(2);
+  DCHECK(!decoder_thread_.IsRunning() ||
+         decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK(!output_streamon_);
+  bool success = true;
+
+  if (output_buffer_map_.empty())
+    return true;
+
+  for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
+    OutputRecord& output_record = output_buffer_map_[i];
+
+    DVLOGF(3) << "dismissing PictureBuffer id=" << output_record.picture_id;
+    child_task_runner_->PostTask(
+        FROM_HERE, base::Bind(&Client::DismissPictureBuffer, client_,
+                              output_record.picture_id));
+  }
+
+  struct v4l2_requestbuffers reqbufs;
+  memset(&reqbufs, 0, sizeof(reqbufs));
+  reqbufs.count = 0;
+  reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  reqbufs.memory = V4L2_MEMORY_MMAP;
+  if (device_->Ioctl(VIDIOC_REQBUFS, &reqbufs) != 0) {
+    VPLOGF(1) << "ioctl() failed: VIDIOC_REQBUFS";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    success = false;
+  }
+
+  output_buffer_map_.clear();
+  while (!free_output_buffers_.empty())
+    free_output_buffers_.pop_front();
+  output_buffer_queued_count_ = 0;
+  // The client may still hold some buffers. The texture holds a reference to
+  // the buffer. It is OK to free the buffer and destroy EGLImage here.
+  decoder_frames_at_client_ = 0;
+
+  return success;
+}
+
+void V4L2VideoDecodeAccelerator::SendPictureReady() {
+  DVLOGF(4);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  bool send_now = (decoder_state_ == kChangingResolution ||
+                   decoder_state_ == kResetting || decoder_flushing_);
+  while (pending_picture_ready_.size() > 0) {
+    bool cleared = pending_picture_ready_.front().cleared;
+    const Picture& picture = pending_picture_ready_.front().picture;
+    if (cleared && picture_clearing_count_ == 0) {
+      // This picture is cleared. It can be posted to a thread different than
+      // the main GPU thread to reduce latency. This should be the case after
+      // all pictures are cleared at the beginning.
+      decode_task_runner_->PostTask(
+          FROM_HERE,
+          base::Bind(&Client::PictureReady, decode_client_, picture));
+      pending_picture_ready_.pop();
+    } else if (!cleared || send_now) {
+      DVLOGF(4) << "cleared=" << pending_picture_ready_.front().cleared
+                << ", decoder_state_=" << decoder_state_
+                << ", decoder_flushing_=" << decoder_flushing_
+                << ", picture_clearing_count_=" << picture_clearing_count_;
+      // If the picture is not cleared, post it to the child thread because it
+      // has to be cleared in the child thread. A picture only needs to be
+      // cleared once. If the decoder is changing resolution, resetting or
+      // flushing, send all pictures to ensure PictureReady arrive before
+      // ProvidePictureBuffers, NotifyResetDone, or NotifyFlushDone.
+      child_task_runner_->PostTaskAndReply(
+          FROM_HERE, base::Bind(&Client::PictureReady, client_, picture),
+          // Unretained is safe. If Client::PictureReady gets to run, |this| is
+          // alive. Destroy() will wait the decode thread to finish.
+          base::Bind(&V4L2VideoDecodeAccelerator::PictureCleared,
+                     base::Unretained(this)));
+      picture_clearing_count_++;
+      pending_picture_ready_.pop();
+    } else {
+      // This picture is cleared. But some pictures are about to be cleared on
+      // the child thread. To preserve the order, do not send this until those
+      // pictures are cleared.
+      break;
+    }
+  }
+}
+
+void V4L2VideoDecodeAccelerator::PictureCleared() {
+  DVLOGF(4) << "clearing count=" << picture_clearing_count_;
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_GT(picture_clearing_count_, 0);
+  picture_clearing_count_--;
+  SendPictureReady();
+}
+
+}  // namespace media
diff --git a/vda/v4l2_video_decode_accelerator.h b/vda/v4l2_video_decode_accelerator.h
new file mode 100644
index 0000000..e18cab4
--- /dev/null
+++ b/vda/v4l2_video_decode_accelerator.h
@@ -0,0 +1,513 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains an implementation of VideoDecodeAccelerator
+// that utilizes hardware video decoders, which expose Video4Linux 2 API
+// (http://linuxtv.org/downloads/v4l-dvb-apis/).
+// Note: ported from Chromium commit head: 85fdf90
+// Note: image processor is not ported.
+
+#ifndef MEDIA_GPU_V4L2_VIDEO_DECODE_ACCELERATOR_H_
+#define MEDIA_GPU_V4L2_VIDEO_DECODE_ACCELERATOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <list>
+#include <memory>
+#include <queue>
+#include <vector>
+
+#include "base/callback_forward.h"
+#include "base/macros.h"
+#include "base/memory/linked_ptr.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "picture.h"
+#include "size.h"
+#include "v4l2_device.h"
+#include "video_decode_accelerator.h"
+
+namespace media {
+
+class H264Parser;
+
+// This class handles video accelerators directly through a V4L2 device exported
+// by the hardware blocks.
+//
+// The threading model of this class is driven by the fact that it needs to
+// interface two fundamentally different event queues -- the one Chromium
+// provides through MessageLoop, and the one driven by the V4L2 devices which
+// is waited on with epoll().  There are three threads involved in this class:
+//
+// * The child thread, which is the main GPU process thread which calls the
+//   VideoDecodeAccelerator entry points.  Calls from this thread
+//   generally do not block (with the exception of Initialize() and Destroy()).
+//   They post tasks to the decoder_thread_, which actually services the task
+//   and calls back when complete through the
+//   VideoDecodeAccelerator::Client interface.
+// * The decoder_thread_, owned by this class.  It services API tasks, through
+//   the *Task() routines, as well as V4L2 device events, through
+//   ServiceDeviceTask().  Almost all state modification is done on this thread
+//   (this doesn't include buffer (re)allocation sequence, see below).
+// * The device_poll_thread_, owned by this class.  All it does is epoll() on
+//   the V4L2 in DevicePollTask() and schedule a ServiceDeviceTask() on the
+//   decoder_thread_ when something interesting happens.
+//   TODO(sheu): replace this thread with an TYPE_IO decoder_thread_.
+//
+// Note that this class has (almost) no locks, apart from the pictures_assigned_
+// WaitableEvent. Everything (apart from buffer (re)allocation) is serviced on
+// the decoder_thread_, so there are no synchronization issues.
+// ... well, there are, but it's a matter of getting messages posted in the
+// right order, not fiddling with locks.
+// Buffer creation is a two-step process that is serviced partially on the
+// Child thread, because we need to wait for the client to provide textures
+// for the buffers we allocate. We cannot keep the decoder thread running while
+// the client allocates Pictures for us, because we need to REQBUFS first to get
+// the required number of output buffers from the device and that cannot be done
+// unless we free the previous set of buffers, leaving the decoding in a
+// inoperable state for the duration of the wait for Pictures. So to prevent
+// subtle races (esp. if we get Reset() in the meantime), we block the decoder
+// thread while we wait for AssignPictureBuffers from the client.
+//
+// V4L2VideoDecodeAccelerator may use image processor to convert the output.
+// There are three cases:
+// Flush: V4L2VDA should wait until image processor returns all processed
+//   frames.
+// Reset: V4L2VDA doesn't need to wait for image processor. When image processor
+//   returns an old frame, drop it.
+// Resolution change: V4L2VDA destroy image processor when destroying output
+//   buffrers. We cannot drop any frame during resolution change. So V4L2VDA
+//   should destroy output buffers after image processor returns all the frames.
+class V4L2VideoDecodeAccelerator
+    : public VideoDecodeAccelerator {
+ public:
+  V4L2VideoDecodeAccelerator(
+      const scoped_refptr<V4L2Device>& device);
+  ~V4L2VideoDecodeAccelerator() override;
+
+  // VideoDecodeAccelerator implementation.
+  // Note: Initialize() and Destroy() are synchronous.
+  bool Initialize(const Config& config, Client* client) override;
+  void Decode(const BitstreamBuffer& bitstream_buffer) override;
+  void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
+  void ImportBufferForPicture(
+      int32_t picture_buffer_id,
+      VideoPixelFormat pixel_format,
+      const NativePixmapHandle& native_pixmap_handle) override;
+  void ReusePictureBuffer(int32_t picture_buffer_id) override;
+  void Flush() override;
+  void Reset() override;
+  void Destroy() override;
+  bool TryToSetupDecodeOnSeparateThread(
+      const base::WeakPtr<Client>& decode_client,
+      const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
+      override;
+
+  static VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles();
+
+ private:
+  // These are rather subjectively tuned.
+  enum {
+    kInputBufferCount = 8,
+    // TODO(posciak): determine input buffer size based on level limits.
+    // See http://crbug.com/255116.
+    // Input bitstream buffer size for up to 1080p streams.
+    kInputBufferMaxSizeFor1080p = 1024 * 1024,
+    // Input bitstream buffer size for up to 4k streams.
+    kInputBufferMaxSizeFor4k = 4 * kInputBufferMaxSizeFor1080p,
+    // This is originally from media/base/limits.h in Chromium.
+    kMaxVideoFrames = 4,
+    // Number of output buffers to use for each VDA stage above what's required
+    // by the decoder (e.g. DPB size, in H264).  We need
+    // limits::kMaxVideoFrames to fill up the GpuVideoDecode pipeline,
+    // and +1 for a frame in transit.
+    kDpbOutputBufferExtraCount = kMaxVideoFrames + 1,
+    // Number of extra output buffers if image processor is used.
+    kDpbOutputBufferExtraCountForImageProcessor = 1,
+  };
+
+  // Internal state of the decoder.
+  enum State {
+    kUninitialized,  // Initialize() not yet called.
+    kInitialized,    // Initialize() returned true; ready to start decoding.
+    kDecoding,       // DecodeBufferInitial() successful; decoding frames.
+    kResetting,      // Presently resetting.
+    // Performing resolution change and waiting for image processor to return
+    // all frames.
+    kChangingResolution,
+    // Requested new PictureBuffers via ProvidePictureBuffers(), awaiting
+    // AssignPictureBuffers().
+    kAwaitingPictureBuffers,
+    kError,  // Error in kDecoding state.
+  };
+
+  enum OutputRecordState {
+    kFree,         // Ready to be queued to the device.
+    kAtDevice,     // Held by device.
+    kAtProcessor,  // Held by image processor.
+    kAtClient,     // Held by client of V4L2VideoDecodeAccelerator.
+  };
+
+  enum BufferId {
+    kFlushBufferId = -2  // Buffer id for flush buffer, queued by FlushTask().
+  };
+
+  // Auto-destruction reference for BitstreamBuffer, for message-passing from
+  // Decode() to DecodeTask().
+  struct BitstreamBufferRef;
+
+  // Record for decoded pictures that can be sent to PictureReady.
+  struct PictureRecord {
+    PictureRecord(bool cleared, const Picture& picture);
+    ~PictureRecord();
+    bool cleared;     // Whether the texture is cleared and safe to render from.
+    Picture picture;  // The decoded picture.
+  };
+
+  // Record for input buffers.
+  struct InputRecord {
+    InputRecord();
+    ~InputRecord();
+    bool at_device;    // held by device.
+    void* address;     // mmap() address.
+    size_t length;     // mmap() length.
+    off_t bytes_used;  // bytes filled in the mmap() segment.
+    int32_t input_id;  // triggering input_id as given to Decode().
+  };
+
+  // Record for output buffers.
+  struct OutputRecord {
+    OutputRecord();
+    OutputRecord(OutputRecord&&) = default;
+    ~OutputRecord();
+    OutputRecordState state;
+    int32_t picture_id;     // picture buffer id as returned to PictureReady().
+    bool cleared;           // Whether the texture is cleared and safe to render
+                            // from. See TextureManager for details.
+    // Output fds of the processor. Used only when OutputMode is IMPORT.
+    std::vector<base::ScopedFD> processor_output_fds;
+  };
+
+  //
+  // Decoding tasks, to be run on decode_thread_.
+  //
+
+  // Task to finish initialization on decoder_thread_.
+  void InitializeTask();
+
+  // Enqueue a BitstreamBuffer to decode.  This will enqueue a buffer to the
+  // decoder_input_queue_, then queue a DecodeBufferTask() to actually decode
+  // the buffer.
+  void DecodeTask(const BitstreamBuffer& bitstream_buffer);
+
+  // Decode from the buffers queued in decoder_input_queue_.  Calls
+  // DecodeBufferInitial() or DecodeBufferContinue() as appropriate.
+  void DecodeBufferTask();
+  // Advance to the next fragment that begins a frame.
+  bool AdvanceFrameFragment(const uint8_t* data, size_t size, size_t* endpos);
+  // Schedule another DecodeBufferTask() if we're behind.
+  void ScheduleDecodeBufferTaskIfNeeded();
+
+  // Return true if we should continue to schedule DecodeBufferTask()s after
+  // completion.  Store the amount of input actually consumed in |endpos|.
+  bool DecodeBufferInitial(const void* data, size_t size, size_t* endpos);
+  bool DecodeBufferContinue(const void* data, size_t size);
+
+  // Accumulate data for the next frame to decode.  May return false in
+  // non-error conditions; for example when pipeline is full and should be
+  // retried later.
+  bool AppendToInputFrame(const void* data, size_t size);
+  // Flush data for one decoded frame.
+  bool FlushInputFrame();
+
+  // Allocate V4L2 buffers and assign them to |buffers| provided by the client
+  // via AssignPictureBuffers() on decoder thread.
+  void AssignPictureBuffersTask(const std::vector<PictureBuffer>& buffers);
+
+  // Use buffer backed by dmabuf file descriptors in |dmabuf_fds| for the
+  // OutputRecord associated with |picture_buffer_id|, taking ownership of the
+  // file descriptors.
+  void ImportBufferForPictureTask(int32_t picture_buffer_id,
+                                  std::vector<base::ScopedFD> dmabuf_fds);
+
+  // Service I/O on the V4L2 devices.  This task should only be scheduled from
+  // DevicePollTask().  If |event_pending| is true, one or more events
+  // on file descriptor are pending.
+  void ServiceDeviceTask(bool event_pending);
+  // Handle the various device queues.
+  void Enqueue();
+  void Dequeue();
+  // Dequeue one input buffer. Return true if success.
+  bool DequeueInputBuffer();
+  // Dequeue one output buffer. Return true if success.
+  bool DequeueOutputBuffer();
+
+  // Return true if there is a resolution change event pending.
+  bool DequeueResolutionChangeEvent();
+
+  // Enqueue a buffer on the corresponding queue.
+  bool EnqueueInputRecord();
+  bool EnqueueOutputRecord();
+
+  // Process a ReusePictureBuffer() API call.  The API call create an EGLSync
+  // object on the main (GPU process) thread; we will record this object so we
+  // can wait on it before reusing the buffer.
+  void ReusePictureBufferTask(int32_t picture_buffer_id);
+
+  // Flush() task.  Child thread should not submit any more buffers until it
+  // receives the NotifyFlushDone callback.  This task will schedule an empty
+  // BitstreamBufferRef (with input_id == kFlushBufferId) to perform the flush.
+  void FlushTask();
+  // Notify the client of a flush completion, if required.  This should be
+  // called any time a relevant queue could potentially be emptied: see
+  // function definition.
+  void NotifyFlushDoneIfNeeded();
+  // Returns true if VIDIOC_DECODER_CMD is supported.
+  bool IsDecoderCmdSupported();
+  // Send V4L2_DEC_CMD_START to the driver. Return true if success.
+  bool SendDecoderCmdStop();
+
+  // Reset() task.  Drop all input buffers. If V4L2VDA is not doing resolution
+  // change or waiting picture buffers, call FinishReset.
+  void ResetTask();
+  // This will schedule a ResetDoneTask() that will send the NotifyResetDone
+  // callback, then set the decoder state to kResetting so that all intervening
+  // tasks will drain.
+  void FinishReset();
+  void ResetDoneTask();
+
+  // Device destruction task.
+  void DestroyTask();
+
+  // Start |device_poll_thread_|.
+  bool StartDevicePoll();
+
+  // Stop |device_poll_thread_|.
+  bool StopDevicePoll();
+
+  bool StopInputStream();
+  bool StopOutputStream();
+
+  void StartResolutionChange();
+  void FinishResolutionChange();
+
+  // Try to get output format and visible size, detected after parsing the
+  // beginning of the stream. Sets |again| to true if more parsing is needed.
+  // |visible_size| could be nullptr and ignored.
+  bool GetFormatInfo(struct v4l2_format* format,
+                     Size* visible_size,
+                     bool* again);
+  // Create output buffers for the given |format| and |visible_size|.
+  bool CreateBuffersForFormat(const struct v4l2_format& format,
+                              const Size& visible_size);
+
+  // Try to get |visible_size|. Return visible size, or, if querying it is not
+  // supported or produces invalid size, return |coded_size| instead.
+  Size GetVisibleSize(const Size& coded_size);
+
+  //
+  // Device tasks, to be run on device_poll_thread_.
+  //
+
+  // The device task.
+  void DevicePollTask(bool poll_device);
+
+  //
+  // Safe from any thread.
+  //
+
+  // Error notification (using PostTask() to child thread, if necessary).
+  void NotifyError(Error error);
+
+  // Set the decoder_state_ to kError and notify the client (if necessary).
+  void SetErrorState(Error error);
+
+  //
+  // Other utility functions.  Called on decoder_thread_, unless
+  // decoder_thread_ is not yet started, in which case the child thread can call
+  // these (e.g. in Initialize() or Destroy()).
+  //
+
+  // Create the buffers we need.
+  bool CreateInputBuffers();
+  bool CreateOutputBuffers();
+
+  // Destroy buffers.
+  void DestroyInputBuffers();
+  // In contrast to DestroyInputBuffers, which is called only on destruction,
+  // we call DestroyOutputBuffers also during playback, on resolution change.
+  // Even if anything fails along the way, we still want to go on and clean
+  // up as much as possible, so return false if this happens, so that the
+  // caller can error out on resolution change.
+  bool DestroyOutputBuffers();
+
+  // Set input and output formats before starting decode.
+  bool SetupFormats();
+
+  //
+  // Methods run on child thread.
+  //
+
+  // Send decoded pictures to PictureReady.
+  void SendPictureReady();
+
+  // Callback that indicates a picture has been cleared.
+  void PictureCleared();
+
+  // Our original calling task runner for the child thread.
+  scoped_refptr<base::SingleThreadTaskRunner> child_task_runner_;
+
+  // Task runner Decode() and PictureReady() run on.
+  scoped_refptr<base::SingleThreadTaskRunner> decode_task_runner_;
+
+  // WeakPtr<> pointing to |this| for use in posting tasks from the decoder or
+  // device worker threads back to the child thread.  Because the worker threads
+  // are members of this class, any task running on those threads is guaranteed
+  // that this object is still alive.  As a result, tasks posted from the child
+  // thread to the decoder or device thread should use base::Unretained(this),
+  // and tasks posted the other way should use |weak_this_|.
+  base::WeakPtr<V4L2VideoDecodeAccelerator> weak_this_;
+
+  // To expose client callbacks from VideoDecodeAccelerator.
+  // NOTE: all calls to these objects *MUST* be executed on
+  // child_task_runner_.
+  std::unique_ptr<base::WeakPtrFactory<Client>> client_ptr_factory_;
+  base::WeakPtr<Client> client_;
+  // Callbacks to |decode_client_| must be executed on |decode_task_runner_|.
+  base::WeakPtr<Client> decode_client_;
+
+  //
+  // Decoder state, owned and operated by decoder_thread_.
+  // Before decoder_thread_ has started, the decoder state is managed by
+  // the child (main) thread.  After decoder_thread_ has started, the decoder
+  // thread should be the only one managing these.
+  //
+
+  // This thread services tasks posted from the VDA API entry points by the
+  // child thread and device service callbacks posted from the device thread.
+  base::Thread decoder_thread_;
+  // Decoder state machine state.
+  State decoder_state_;
+
+  Config::OutputMode output_mode_;
+
+  // BitstreamBuffer we're presently reading.
+  std::unique_ptr<BitstreamBufferRef> decoder_current_bitstream_buffer_;
+  // The V4L2Device this class is operating upon.
+  scoped_refptr<V4L2Device> device_;
+  // FlushTask() and ResetTask() should not affect buffers that have been
+  // queued afterwards.  For flushing or resetting the pipeline then, we will
+  // delay these buffers until after the flush or reset completes.
+  int decoder_delay_bitstream_buffer_id_;
+  // Input buffer we're presently filling.
+  int decoder_current_input_buffer_;
+  // We track the number of buffer decode tasks we have scheduled, since each
+  // task execution should complete one buffer.  If we fall behind (due to
+  // resource backpressure, etc.), we'll have to schedule more to catch up.
+  int decoder_decode_buffer_tasks_scheduled_;
+  // Picture buffers held by the client.
+  int decoder_frames_at_client_;
+
+  // Are we flushing?
+  bool decoder_flushing_;
+  // True if VIDIOC_DECODER_CMD is supported.
+  bool decoder_cmd_supported_;
+  // True if flushing is waiting for last output buffer. After
+  // VIDIOC_DECODER_CMD is sent to the driver, this flag will be set to true to
+  // wait for the last output buffer. When this flag is true, flush done will
+  // not be sent. After an output buffer that has the flag V4L2_BUF_FLAG_LAST is
+  // received, this is set to false.
+  bool flush_awaiting_last_output_buffer_;
+
+  // Got a reset request while we were performing resolution change or waiting
+  // picture buffers.
+  bool reset_pending_;
+  // Input queue for decoder_thread_: BitstreamBuffers in.
+  std::queue<linked_ptr<BitstreamBufferRef>> decoder_input_queue_;
+  // For H264 decode, hardware requires that we send it frame-sized chunks.
+  // We'll need to parse the stream.
+  std::unique_ptr<H264Parser> decoder_h264_parser_;
+  // Set if the decoder has a pending incomplete frame in an input buffer.
+  bool decoder_partial_frame_pending_;
+
+  //
+  // Hardware state and associated queues.  Since decoder_thread_ services
+  // the hardware, decoder_thread_ owns these too.
+  // output_buffer_map_, free_output_buffers_ and output_planes_count_ are an
+  // exception during the buffer (re)allocation sequence, when the
+  // decoder_thread_ is blocked briefly while the Child thread manipulates
+  // them.
+  //
+
+  // Completed decode buffers.
+  std::queue<int> input_ready_queue_;
+
+  // Input buffer state.
+  bool input_streamon_;
+  // Input buffers enqueued to device.
+  int input_buffer_queued_count_;
+  // Input buffers ready to use, as a LIFO since we don't care about ordering.
+  std::vector<int> free_input_buffers_;
+  // Mapping of int index to input buffer record.
+  std::vector<InputRecord> input_buffer_map_;
+
+  // Output buffer state.
+  bool output_streamon_;
+  // Output buffers enqueued to device.
+  int output_buffer_queued_count_;
+  // Output buffers ready to use, as a FIFO since we want oldest-first to hide
+  // synchronization latency with GL.
+  std::list<int> free_output_buffers_;
+  // Mapping of int index to output buffer record.
+  std::vector<OutputRecord> output_buffer_map_;
+  // Required size of DPB for decoding.
+  int output_dpb_size_;
+
+  // Number of planes (i.e. separate memory buffers) for output.
+  size_t output_planes_count_;
+
+  // Pictures that are ready but not sent to PictureReady yet.
+  std::queue<PictureRecord> pending_picture_ready_;
+
+  // The number of pictures that are sent to PictureReady and will be cleared.
+  int picture_clearing_count_;
+
+  // Output picture coded size.
+  Size coded_size_;
+
+  // Output picture visible size.
+  Size visible_size_;
+
+  //
+  // The device polling thread handles notifications of V4L2 device changes.
+  //
+
+  // The thread.
+  base::Thread device_poll_thread_;
+
+  //
+  // Other state, held by the child (main) thread.
+  //
+
+  // The codec we'll be decoding for.
+  VideoCodecProfile video_profile_;
+  // Chosen input format for video_profile_.
+  uint32_t input_format_fourcc_;
+  // Chosen output format.
+  uint32_t output_format_fourcc_;
+
+  // Input format V4L2 fourccs this class supports.
+  static const uint32_t supported_input_fourccs_[];
+
+  // The WeakPtrFactory for |weak_this_|.
+  base::WeakPtrFactory<V4L2VideoDecodeAccelerator> weak_this_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(V4L2VideoDecodeAccelerator);
+};
+
+}  // namespace media
+
+#endif  // MEDIA_GPU_V4L2_VIDEO_DECODE_ACCELERATOR_H_
diff --git a/vda/video_codecs.cc b/vda/video_codecs.cc
index 995ee38..61d0708 100644
--- a/vda/video_codecs.cc
+++ b/vda/video_codecs.cc
@@ -1,6 +1,8 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: b03fc92
+// Note: only necessary functions are ported.
 
 #include "video_codecs.h"
 
diff --git a/vda/video_codecs.h b/vda/video_codecs.h
index 30df7ec..2c88d50 100644
--- a/vda/video_codecs.h
+++ b/vda/video_codecs.h
@@ -1,6 +1,8 @@
 // Copyright 2012 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: b03fc92
+// Note: only necessary functions are ported.
 
 #ifndef VIDEO_CODECS_H_
 #define VIDEO_CODECS_H_
diff --git a/vda/video_decode_accelerator.cc b/vda/video_decode_accelerator.cc
index 49afd44..e74d1ec 100644
--- a/vda/video_decode_accelerator.cc
+++ b/vda/video_decode_accelerator.cc
@@ -1,6 +1,7 @@
 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 85fdf90
 
 #include "base/logging.h"
 
@@ -27,7 +28,7 @@
   NOTREACHED() << "By default deferred initialization is not supported.";
 }
 
-VideoDecodeAccelerator::~VideoDecodeAccelerator() {}
+VideoDecodeAccelerator::~VideoDecodeAccelerator() = default;
 
 bool VideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
     const base::WeakPtr<Client>& decode_client,
@@ -39,21 +40,22 @@
 
 void VideoDecodeAccelerator::ImportBufferForPicture(
     int32_t picture_buffer_id,
-    const std::vector<base::FileDescriptor>& dmabuf_fds) {
+    VideoPixelFormat pixel_format,
+    const NativePixmapHandle& native_pixmap_handle) {
   NOTREACHED() << "Buffer import not supported.";
 }
 
 VideoDecodeAccelerator::SupportedProfile::SupportedProfile()
     : profile(VIDEO_CODEC_PROFILE_UNKNOWN), encrypted_only(false) {}
 
-VideoDecodeAccelerator::SupportedProfile::~SupportedProfile() {}
+VideoDecodeAccelerator::SupportedProfile::~SupportedProfile() = default;
 
 VideoDecodeAccelerator::Capabilities::Capabilities() : flags(NO_FLAGS) {}
 
 VideoDecodeAccelerator::Capabilities::Capabilities(const Capabilities& other) =
     default;
 
-VideoDecodeAccelerator::Capabilities::~Capabilities() {}
+VideoDecodeAccelerator::Capabilities::~Capabilities() = default;
 
 std::string VideoDecodeAccelerator::Capabilities::AsHumanReadableString()
     const {
diff --git a/vda/video_decode_accelerator.h b/vda/video_decode_accelerator.h
index 8343abe..10601be 100644
--- a/vda/video_decode_accelerator.h
+++ b/vda/video_decode_accelerator.h
@@ -1,17 +1,18 @@
 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 85fdf90
 
 #ifndef VIDEO_DECODE_ACCELERATOR_H_
 #define VIDEO_DECODE_ACCELERATOR_H_
 
 #include <vector>
 
-#include "base/file_descriptor_posix.h"
 #include "base/memory/ref_counted.h"
 #include "base/memory/weak_ptr.h"
 
 #include "bitstream_buffer.h"
+#include "native_pixmap_handle.h"
 #include "picture.h"
 #include "size.h"
 #include "video_codecs.h"
@@ -245,16 +246,21 @@
   virtual void AssignPictureBuffers(
       const std::vector<PictureBuffer>& buffers) = 0;
 
-  // Imports |dmabuf_fds| as backing memory for picture buffer
-  // associated with |picture_buffer_id|. This can only be be used if the VDA
-  // has been Initialize()d with config.output_mode = IMPORT, and should be
-  // preceded by a call to AssignPictureBuffers() to set up the number of
-  // PictureBuffers and their details.
+  // Imports |gpu_memory_buffer_handle|, pointing to a buffer in |pixel_format|,
+  // as backing memory for picture buffer associated with |picture_buffer_id|.
+  // This can only be be used if the VDA has been Initialize()d with
+  // config.output_mode = IMPORT, and should be preceded by a call to
+  // AssignPictureBuffers() to set up the number of PictureBuffers and their
+  // details.
+  // The |pixel_format| used here may be different from the |pixel_format|
+  // required in ProvidePictureBuffers(). If the buffer cannot be imported an
+  // error should be notified via NotifyError().
   // After this call, the VDA becomes the owner of those file descriptors,
   // and is responsible for closing it after use, also on import failure.
   virtual void ImportBufferForPicture(
       int32_t picture_buffer_id,
-      const std::vector<base::FileDescriptor>& dmabuf_fds);
+      VideoPixelFormat pixel_format,
+      const NativePixmapHandle& native_pixmap_handle);
 
   // Sends picture buffers to be reused by the decoder. This needs to be called
   // for each buffer that has been processed so that decoder may know onto which
diff --git a/vda/video_pixel_format.h b/vda/video_pixel_format.h
index d593dad..7f75cc4 100644
--- a/vda/video_pixel_format.h
+++ b/vda/video_pixel_format.h
@@ -1,6 +1,8 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 006301b
+// Note: only necessary functions are ported from video_types.h
 
 #ifndef VIDEO_PIXEL_FORMAT_H_
 #define VIDEO_PIXEL_FORMAT_H_
@@ -15,10 +17,14 @@
   PIXEL_FORMAT_UNKNOWN = 0,  // Unknown or unspecified format value.
   PIXEL_FORMAT_I420 =
       1,  // 12bpp YUV planar 1x1 Y, 2x2 UV samples, a.k.a. YU12.
-  PIXEL_FORMAT_YV12 = 2,   // 12bpp YVU planar 1x1 Y, 2x2 VU samples.
-  PIXEL_FORMAT_YV16 = 3,   // 16bpp YVU planar 1x1 Y, 2x1 VU samples.
-  PIXEL_FORMAT_YV12A = 4,  // 20bpp YUVA planar 1x1 Y, 2x2 VU, 1x1 A samples.
-  PIXEL_FORMAT_YV24 = 5,   // 24bpp YUV planar, no subsampling.
+
+  // Note: Chrome does not actually support YVU compositing, so you probably
+  // don't actually want to use this. See http://crbug.com/784627.
+  PIXEL_FORMAT_YV12 = 2,  // 12bpp YVU planar 1x1 Y, 2x2 VU samples.
+
+  PIXEL_FORMAT_I422 = 3,   // 16bpp YUV planar 1x1 Y, 2x1 UV samples.
+  PIXEL_FORMAT_I420A = 4,  // 20bpp YUVA planar 1x1 Y, 2x2 UV, 1x1 A samples.
+  PIXEL_FORMAT_I444 = 5,   // 24bpp YUV planar, no subsampling.
   PIXEL_FORMAT_NV12 =
       6,  // 12bpp with Y plane followed by a 2x2 interleaved UV plane.
   PIXEL_FORMAT_NV21 =
@@ -42,6 +48,8 @@
   // Plane size = Row pitch * (((height+31)/32)*32)
   PIXEL_FORMAT_MT21 = 15,
 
+  // The P* in the formats below designates the number of bits per pixel. I.e.
+  // P9 is 9-bits per pixel, P10 is 10-bits per pixel, etc.
   PIXEL_FORMAT_YUV420P9 = 16,
   PIXEL_FORMAT_YUV420P10 = 17,
   PIXEL_FORMAT_YUV422P9 = 18,
@@ -53,15 +61,12 @@
   PIXEL_FORMAT_YUV422P12 = 23,
   PIXEL_FORMAT_YUV444P12 = 24,
 
-  PIXEL_FORMAT_Y8 = 25,   // single 8bpp plane.
+  /* PIXEL_FORMAT_Y8 = 25, Deprecated */
   PIXEL_FORMAT_Y16 = 26,  // single 16bpp plane.
 
-  PIXEL_FORMAT_I422 =
-      27,  // 16bpp YUV planar 1x1 Y, 2x1 UV samples, a.k.a. YU16.
-
   // Please update UMA histogram enumeration when adding new formats here.
   PIXEL_FORMAT_MAX =
-      PIXEL_FORMAT_I422,  // Must always be equal to largest entry logged.
+      PIXEL_FORMAT_Y16,  // Must always be equal to largest entry logged.
 };
 
 }  // namespace media
diff --git a/vda/videodev2.h b/vda/videodev2.h
deleted file mode 100644
index 54dc343..0000000
--- a/vda/videodev2.h
+++ /dev/null
@@ -1,2338 +0,0 @@
-/*
- *  Video for Linux Two header file
- *
- *  Copyright (C) 1999-2012 the contributors
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  Alternatively you can redistribute this file under the terms of the
- *  BSD license as stated below:
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- *  3. The names of its contributors may not be used to endorse or promote
- *     products derived from this software without specific prior written
- *     permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
- *  TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- *  PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *	Header file for v4l or V4L2 drivers and applications
- * with public API.
- * All kernel-specific stuff were moved to media/v4l2-dev.h, so
- * no #if __KERNEL tests are allowed here
- *
- *	See http://linuxtv.org for more info
- *
- *	Author: Bill Dirks <bill@thedirks.org>
- *		Justin Schoeman
- *              Hans Verkuil <hverkuil@xs4all.nl>
- *		et al.
- */
-
-// Note:
-// This header file is obtained from ChromeOS which is not upstreamed in Linux
-// mainline. This should be removed once it is upstreamed.
-// TODO(johnylin): remove this file once it is upstreamed.
-
-#ifndef _UAPI__LINUX_VIDEODEV2_H
-#define _UAPI__LINUX_VIDEODEV2_H
-
-#ifndef __KERNEL__
-#include <sys/time.h>
-#endif
-#include <linux/compiler.h>
-#include <linux/ioctl.h>
-#include <linux/types.h>
-#include <linux/v4l2-common.h>
-//#include <linux/v4l2-controls.h>
-#include <v4l2-controls.h>  // use local v4l2-controls.h file
-
-/*
- * Common stuff for both V4L1 and V4L2
- * Moved from videodev.h
- */
-#define VIDEO_MAX_FRAME               32
-#define VIDEO_MAX_PLANES               8
-
-/*
- *	M I S C E L L A N E O U S
- */
-
-/*  Four-character-code (FOURCC) */
-#define v4l2_fourcc(a, b, c, d)\
-	((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24))
-#define v4l2_fourcc_be(a, b, c, d)	(v4l2_fourcc(a, b, c, d) | (1 << 31))
-
-/*
- *	E N U M S
- */
-enum v4l2_field {
-	V4L2_FIELD_ANY           = 0, /* driver can choose from none,
-					 top, bottom, interlaced
-					 depending on whatever it thinks
-					 is approximate ... */
-	V4L2_FIELD_NONE          = 1, /* this device has no fields ... */
-	V4L2_FIELD_TOP           = 2, /* top field only */
-	V4L2_FIELD_BOTTOM        = 3, /* bottom field only */
-	V4L2_FIELD_INTERLACED    = 4, /* both fields interlaced */
-	V4L2_FIELD_SEQ_TB        = 5, /* both fields sequential into one
-					 buffer, top-bottom order */
-	V4L2_FIELD_SEQ_BT        = 6, /* same as above + bottom-top order */
-	V4L2_FIELD_ALTERNATE     = 7, /* both fields alternating into
-					 separate buffers */
-	V4L2_FIELD_INTERLACED_TB = 8, /* both fields interlaced, top field
-					 first and the top field is
-					 transmitted first */
-	V4L2_FIELD_INTERLACED_BT = 9, /* both fields interlaced, top field
-					 first and the bottom field is
-					 transmitted first */
-};
-#define V4L2_FIELD_HAS_TOP(field)	\
-	((field) == V4L2_FIELD_TOP 	||\
-	 (field) == V4L2_FIELD_INTERLACED ||\
-	 (field) == V4L2_FIELD_INTERLACED_TB ||\
-	 (field) == V4L2_FIELD_INTERLACED_BT ||\
-	 (field) == V4L2_FIELD_SEQ_TB	||\
-	 (field) == V4L2_FIELD_SEQ_BT)
-#define V4L2_FIELD_HAS_BOTTOM(field)	\
-	((field) == V4L2_FIELD_BOTTOM 	||\
-	 (field) == V4L2_FIELD_INTERLACED ||\
-	 (field) == V4L2_FIELD_INTERLACED_TB ||\
-	 (field) == V4L2_FIELD_INTERLACED_BT ||\
-	 (field) == V4L2_FIELD_SEQ_TB	||\
-	 (field) == V4L2_FIELD_SEQ_BT)
-#define V4L2_FIELD_HAS_BOTH(field)	\
-	((field) == V4L2_FIELD_INTERLACED ||\
-	 (field) == V4L2_FIELD_INTERLACED_TB ||\
-	 (field) == V4L2_FIELD_INTERLACED_BT ||\
-	 (field) == V4L2_FIELD_SEQ_TB ||\
-	 (field) == V4L2_FIELD_SEQ_BT)
-#define V4L2_FIELD_HAS_T_OR_B(field)	\
-	((field) == V4L2_FIELD_BOTTOM ||\
-	 (field) == V4L2_FIELD_TOP ||\
-	 (field) == V4L2_FIELD_ALTERNATE)
-
-enum v4l2_buf_type {
-	V4L2_BUF_TYPE_VIDEO_CAPTURE        = 1,
-	V4L2_BUF_TYPE_VIDEO_OUTPUT         = 2,
-	V4L2_BUF_TYPE_VIDEO_OVERLAY        = 3,
-	V4L2_BUF_TYPE_VBI_CAPTURE          = 4,
-	V4L2_BUF_TYPE_VBI_OUTPUT           = 5,
-	V4L2_BUF_TYPE_SLICED_VBI_CAPTURE   = 6,
-	V4L2_BUF_TYPE_SLICED_VBI_OUTPUT    = 7,
-#if 1
-	/* Experimental */
-	V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8,
-#endif
-	V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE = 9,
-	V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE  = 10,
-	V4L2_BUF_TYPE_SDR_CAPTURE          = 11,
-	V4L2_BUF_TYPE_SDR_OUTPUT           = 12,
-	/* Deprecated, do not use */
-	V4L2_BUF_TYPE_PRIVATE              = 0x80,
-};
-
-#define V4L2_TYPE_IS_MULTIPLANAR(type)			\
-	((type) == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE	\
-	 || (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
-
-#define V4L2_TYPE_IS_OUTPUT(type)				\
-	((type) == V4L2_BUF_TYPE_VIDEO_OUTPUT			\
-	 || (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE		\
-	 || (type) == V4L2_BUF_TYPE_VIDEO_OVERLAY		\
-	 || (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY	\
-	 || (type) == V4L2_BUF_TYPE_VBI_OUTPUT			\
-	 || (type) == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT		\
-	 || (type) == V4L2_BUF_TYPE_SDR_OUTPUT)
-
-enum v4l2_tuner_type {
-	V4L2_TUNER_RADIO	     = 1,
-	V4L2_TUNER_ANALOG_TV	     = 2,
-	V4L2_TUNER_DIGITAL_TV	     = 3,
-	V4L2_TUNER_SDR               = 4,
-	V4L2_TUNER_RF                = 5,
-};
-
-/* Deprecated, do not use */
-#define V4L2_TUNER_ADC  V4L2_TUNER_SDR
-
-enum v4l2_memory {
-	V4L2_MEMORY_MMAP             = 1,
-	V4L2_MEMORY_USERPTR          = 2,
-	V4L2_MEMORY_OVERLAY          = 3,
-	V4L2_MEMORY_DMABUF           = 4,
-};
-
-/* see also http://vektor.theorem.ca/graphics/ycbcr/ */
-enum v4l2_colorspace {
-	/*
-	 * Default colorspace, i.e. let the driver figure it out.
-	 * Can only be used with video capture.
-	 */
-	V4L2_COLORSPACE_DEFAULT       = 0,
-
-	/* SMPTE 170M: used for broadcast NTSC/PAL SDTV */
-	V4L2_COLORSPACE_SMPTE170M     = 1,
-
-	/* Obsolete pre-1998 SMPTE 240M HDTV standard, superseded by Rec 709 */
-	V4L2_COLORSPACE_SMPTE240M     = 2,
-
-	/* Rec.709: used for HDTV */
-	V4L2_COLORSPACE_REC709        = 3,
-
-	/*
-	 * Deprecated, do not use. No driver will ever return this. This was
-	 * based on a misunderstanding of the bt878 datasheet.
-	 */
-	V4L2_COLORSPACE_BT878         = 4,
-
-	/*
-	 * NTSC 1953 colorspace. This only makes sense when dealing with
-	 * really, really old NTSC recordings. Superseded by SMPTE 170M.
-	 */
-	V4L2_COLORSPACE_470_SYSTEM_M  = 5,
-
-	/*
-	 * EBU Tech 3213 PAL/SECAM colorspace. This only makes sense when
-	 * dealing with really old PAL/SECAM recordings. Superseded by
-	 * SMPTE 170M.
-	 */
-	V4L2_COLORSPACE_470_SYSTEM_BG = 6,
-
-	/*
-	 * Effectively shorthand for V4L2_COLORSPACE_SRGB, V4L2_YCBCR_ENC_601
-	 * and V4L2_QUANTIZATION_FULL_RANGE. To be used for (Motion-)JPEG.
-	 */
-	V4L2_COLORSPACE_JPEG          = 7,
-
-	/* For RGB colorspaces such as produces by most webcams. */
-	V4L2_COLORSPACE_SRGB          = 8,
-
-	/* AdobeRGB colorspace */
-	V4L2_COLORSPACE_ADOBERGB      = 9,
-
-	/* BT.2020 colorspace, used for UHDTV. */
-	V4L2_COLORSPACE_BT2020        = 10,
-
-	/* Raw colorspace: for RAW unprocessed images */
-	V4L2_COLORSPACE_RAW           = 11,
-
-	/* DCI-P3 colorspace, used by cinema projectors */
-	V4L2_COLORSPACE_DCI_P3        = 12,
-};
-
-/*
- * Determine how COLORSPACE_DEFAULT should map to a proper colorspace.
- * This depends on whether this is a SDTV image (use SMPTE 170M), an
- * HDTV image (use Rec. 709), or something else (use sRGB).
- */
-#define V4L2_MAP_COLORSPACE_DEFAULT(is_sdtv, is_hdtv) \
-	((is_sdtv) ? V4L2_COLORSPACE_SMPTE170M : \
-	 ((is_hdtv) ? V4L2_COLORSPACE_REC709 : V4L2_COLORSPACE_SRGB))
-
-enum v4l2_xfer_func {
-	/*
-	 * Mapping of V4L2_XFER_FUNC_DEFAULT to actual transfer functions
-	 * for the various colorspaces:
-	 *
-	 * V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_470_SYSTEM_M,
-	 * V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_REC709 and
-	 * V4L2_COLORSPACE_BT2020: V4L2_XFER_FUNC_709
-	 *
-	 * V4L2_COLORSPACE_SRGB, V4L2_COLORSPACE_JPEG: V4L2_XFER_FUNC_SRGB
-	 *
-	 * V4L2_COLORSPACE_ADOBERGB: V4L2_XFER_FUNC_ADOBERGB
-	 *
-	 * V4L2_COLORSPACE_SMPTE240M: V4L2_XFER_FUNC_SMPTE240M
-	 *
-	 * V4L2_COLORSPACE_RAW: V4L2_XFER_FUNC_NONE
-	 *
-	 * V4L2_COLORSPACE_DCI_P3: V4L2_XFER_FUNC_DCI_P3
-	 */
-	V4L2_XFER_FUNC_DEFAULT     = 0,
-	V4L2_XFER_FUNC_709         = 1,
-	V4L2_XFER_FUNC_SRGB        = 2,
-	V4L2_XFER_FUNC_ADOBERGB    = 3,
-	V4L2_XFER_FUNC_SMPTE240M   = 4,
-	V4L2_XFER_FUNC_NONE        = 5,
-	V4L2_XFER_FUNC_DCI_P3      = 6,
-	V4L2_XFER_FUNC_SMPTE2084   = 7,
-};
-
-/*
- * Determine how XFER_FUNC_DEFAULT should map to a proper transfer function.
- * This depends on the colorspace.
- */
-#define V4L2_MAP_XFER_FUNC_DEFAULT(colsp) \
-	((colsp) == V4L2_COLORSPACE_ADOBERGB ? V4L2_XFER_FUNC_ADOBERGB : \
-	 ((colsp) == V4L2_COLORSPACE_SMPTE240M ? V4L2_XFER_FUNC_SMPTE240M : \
-	  ((colsp) == V4L2_COLORSPACE_DCI_P3 ? V4L2_XFER_FUNC_DCI_P3 : \
-	   ((colsp) == V4L2_COLORSPACE_RAW ? V4L2_XFER_FUNC_NONE : \
-	    ((colsp) == V4L2_COLORSPACE_SRGB || (colsp) == V4L2_COLORSPACE_JPEG ? \
-	     V4L2_XFER_FUNC_SRGB : V4L2_XFER_FUNC_709)))))
-
-enum v4l2_ycbcr_encoding {
-	/*
-	 * Mapping of V4L2_YCBCR_ENC_DEFAULT to actual encodings for the
-	 * various colorspaces:
-	 *
-	 * V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_470_SYSTEM_M,
-	 * V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_ADOBERGB and
-	 * V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601
-	 *
-	 * V4L2_COLORSPACE_REC709 and V4L2_COLORSPACE_DCI_P3: V4L2_YCBCR_ENC_709
-	 *
-	 * V4L2_COLORSPACE_SRGB: V4L2_YCBCR_ENC_SYCC
-	 *
-	 * V4L2_COLORSPACE_BT2020: V4L2_YCBCR_ENC_BT2020
-	 *
-	 * V4L2_COLORSPACE_SMPTE240M: V4L2_YCBCR_ENC_SMPTE240M
-	 */
-	V4L2_YCBCR_ENC_DEFAULT        = 0,
-
-	/* ITU-R 601 -- SDTV */
-	V4L2_YCBCR_ENC_601            = 1,
-
-	/* Rec. 709 -- HDTV */
-	V4L2_YCBCR_ENC_709            = 2,
-
-	/* ITU-R 601/EN 61966-2-4 Extended Gamut -- SDTV */
-	V4L2_YCBCR_ENC_XV601          = 3,
-
-	/* Rec. 709/EN 61966-2-4 Extended Gamut -- HDTV */
-	V4L2_YCBCR_ENC_XV709          = 4,
-
-	/* sYCC (Y'CbCr encoding of sRGB) */
-	V4L2_YCBCR_ENC_SYCC           = 5,
-
-	/* BT.2020 Non-constant Luminance Y'CbCr */
-	V4L2_YCBCR_ENC_BT2020         = 6,
-
-	/* BT.2020 Constant Luminance Y'CbcCrc */
-	V4L2_YCBCR_ENC_BT2020_CONST_LUM = 7,
-
-	/* SMPTE 240M -- Obsolete HDTV */
-	V4L2_YCBCR_ENC_SMPTE240M      = 8,
-};
-
-/*
- * Determine how YCBCR_ENC_DEFAULT should map to a proper Y'CbCr encoding.
- * This depends on the colorspace.
- */
-#define V4L2_MAP_YCBCR_ENC_DEFAULT(colsp) \
-	(((colsp) == V4L2_COLORSPACE_REC709 || \
-	  (colsp) == V4L2_COLORSPACE_DCI_P3) ? V4L2_YCBCR_ENC_709 : \
-	 ((colsp) == V4L2_COLORSPACE_BT2020 ? V4L2_YCBCR_ENC_BT2020 : \
-	  ((colsp) == V4L2_COLORSPACE_SMPTE240M ? V4L2_YCBCR_ENC_SMPTE240M : \
-	   V4L2_YCBCR_ENC_601)))
-
-enum v4l2_quantization {
-	/*
-	 * The default for R'G'B' quantization is always full range, except
-	 * for the BT2020 colorspace. For Y'CbCr the quantization is always
-	 * limited range, except for COLORSPACE_JPEG, SYCC, XV601 or XV709:
-	 * those are full range.
-	 */
-	V4L2_QUANTIZATION_DEFAULT     = 0,
-	V4L2_QUANTIZATION_FULL_RANGE  = 1,
-	V4L2_QUANTIZATION_LIM_RANGE   = 2,
-};
-
-/*
- * Determine how QUANTIZATION_DEFAULT should map to a proper quantization.
- * This depends on whether the image is RGB or not, the colorspace and the
- * Y'CbCr encoding.
- */
-#define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb, colsp, ycbcr_enc) \
-	(((is_rgb) && (colsp) == V4L2_COLORSPACE_BT2020) ? V4L2_QUANTIZATION_LIM_RANGE : \
-	 (((is_rgb) || (ycbcr_enc) == V4L2_YCBCR_ENC_XV601 || \
-	  (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) ? \
-	 V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
-
-enum v4l2_priority {
-	V4L2_PRIORITY_UNSET       = 0,  /* not initialized */
-	V4L2_PRIORITY_BACKGROUND  = 1,
-	V4L2_PRIORITY_INTERACTIVE = 2,
-	V4L2_PRIORITY_RECORD      = 3,
-	V4L2_PRIORITY_DEFAULT     = V4L2_PRIORITY_INTERACTIVE,
-};
-
-struct v4l2_rect {
-	__s32   left;
-	__s32   top;
-	__u32   width;
-	__u32   height;
-};
-
-struct v4l2_fract {
-	__u32   numerator;
-	__u32   denominator;
-};
-
-/**
-  * struct v4l2_capability - Describes V4L2 device caps returned by VIDIOC_QUERYCAP
-  *
-  * @driver:	   name of the driver module (e.g. "bttv")
-  * @card:	   name of the card (e.g. "Hauppauge WinTV")
-  * @bus_info:	   name of the bus (e.g. "PCI:" + pci_name(pci_dev) )
-  * @version:	   KERNEL_VERSION
-  * @capabilities: capabilities of the physical device as a whole
-  * @device_caps:  capabilities accessed via this particular device (node)
-  * @reserved:	   reserved fields for future extensions
-  */
-struct v4l2_capability {
-	__u8	driver[16];
-	__u8	card[32];
-	__u8	bus_info[32];
-	__u32   version;
-	__u32	capabilities;
-	__u32	device_caps;
-	__u32	reserved[3];
-};
-
-/* Values for 'capabilities' field */
-#define V4L2_CAP_VIDEO_CAPTURE		0x00000001  /* Is a video capture device */
-#define V4L2_CAP_VIDEO_OUTPUT		0x00000002  /* Is a video output device */
-#define V4L2_CAP_VIDEO_OVERLAY		0x00000004  /* Can do video overlay */
-#define V4L2_CAP_VBI_CAPTURE		0x00000010  /* Is a raw VBI capture device */
-#define V4L2_CAP_VBI_OUTPUT		0x00000020  /* Is a raw VBI output device */
-#define V4L2_CAP_SLICED_VBI_CAPTURE	0x00000040  /* Is a sliced VBI capture device */
-#define V4L2_CAP_SLICED_VBI_OUTPUT	0x00000080  /* Is a sliced VBI output device */
-#define V4L2_CAP_RDS_CAPTURE		0x00000100  /* RDS data capture */
-#define V4L2_CAP_VIDEO_OUTPUT_OVERLAY	0x00000200  /* Can do video output overlay */
-#define V4L2_CAP_HW_FREQ_SEEK		0x00000400  /* Can do hardware frequency seek  */
-#define V4L2_CAP_RDS_OUTPUT		0x00000800  /* Is an RDS encoder */
-
-/* Is a video capture device that supports multiplanar formats */
-#define V4L2_CAP_VIDEO_CAPTURE_MPLANE	0x00001000
-/* Is a video output device that supports multiplanar formats */
-#define V4L2_CAP_VIDEO_OUTPUT_MPLANE	0x00002000
-/* Is a video mem-to-mem device that supports multiplanar formats */
-#define V4L2_CAP_VIDEO_M2M_MPLANE	0x00004000
-/* Is a video mem-to-mem device */
-#define V4L2_CAP_VIDEO_M2M		0x00008000
-
-#define V4L2_CAP_TUNER			0x00010000  /* has a tuner */
-#define V4L2_CAP_AUDIO			0x00020000  /* has audio support */
-#define V4L2_CAP_RADIO			0x00040000  /* is a radio device */
-#define V4L2_CAP_MODULATOR		0x00080000  /* has a modulator */
-
-#define V4L2_CAP_SDR_CAPTURE		0x00100000  /* Is a SDR capture device */
-#define V4L2_CAP_EXT_PIX_FORMAT		0x00200000  /* Supports the extended pixel format */
-#define V4L2_CAP_SDR_OUTPUT		0x00400000  /* Is a SDR output device */
-
-#define V4L2_CAP_READWRITE              0x01000000  /* read/write systemcalls */
-#define V4L2_CAP_ASYNCIO                0x02000000  /* async I/O */
-#define V4L2_CAP_STREAMING              0x04000000  /* streaming I/O ioctls */
-
-#define V4L2_CAP_DEVICE_CAPS            0x80000000  /* sets device capabilities field */
-
-/*
- *	V I D E O   I M A G E   F O R M A T
- */
-struct v4l2_pix_format {
-	__u32         		width;
-	__u32			height;
-	__u32			pixelformat;
-	__u32			field;		/* enum v4l2_field */
-	__u32            	bytesperline;	/* for padding, zero if unused */
-	__u32          		sizeimage;
-	__u32			colorspace;	/* enum v4l2_colorspace */
-	__u32			priv;		/* private data, depends on pixelformat */
-	__u32			flags;		/* format flags (V4L2_PIX_FMT_FLAG_*) */
-	__u32			ycbcr_enc;	/* enum v4l2_ycbcr_encoding */
-	__u32			quantization;	/* enum v4l2_quantization */
-	__u32			xfer_func;	/* enum v4l2_xfer_func */
-};
-
-/*      Pixel format         FOURCC                          depth  Description  */
-
-/* RGB formats */
-#define V4L2_PIX_FMT_RGB332  v4l2_fourcc('R', 'G', 'B', '1') /*  8  RGB-3-3-2     */
-#define V4L2_PIX_FMT_RGB444  v4l2_fourcc('R', '4', '4', '4') /* 16  xxxxrrrr ggggbbbb */
-#define V4L2_PIX_FMT_ARGB444 v4l2_fourcc('A', 'R', '1', '2') /* 16  aaaarrrr ggggbbbb */
-#define V4L2_PIX_FMT_XRGB444 v4l2_fourcc('X', 'R', '1', '2') /* 16  xxxxrrrr ggggbbbb */
-#define V4L2_PIX_FMT_RGB555  v4l2_fourcc('R', 'G', 'B', 'O') /* 16  RGB-5-5-5     */
-#define V4L2_PIX_FMT_ARGB555 v4l2_fourcc('A', 'R', '1', '5') /* 16  ARGB-1-5-5-5  */
-#define V4L2_PIX_FMT_XRGB555 v4l2_fourcc('X', 'R', '1', '5') /* 16  XRGB-1-5-5-5  */
-#define V4L2_PIX_FMT_RGB565  v4l2_fourcc('R', 'G', 'B', 'P') /* 16  RGB-5-6-5     */
-#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16  RGB-5-5-5 BE  */
-#define V4L2_PIX_FMT_ARGB555X v4l2_fourcc_be('A', 'R', '1', '5') /* 16  ARGB-5-5-5 BE */
-#define V4L2_PIX_FMT_XRGB555X v4l2_fourcc_be('X', 'R', '1', '5') /* 16  XRGB-5-5-5 BE */
-#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16  RGB-5-6-5 BE  */
-#define V4L2_PIX_FMT_BGR666  v4l2_fourcc('B', 'G', 'R', 'H') /* 18  BGR-6-6-6	  */
-#define V4L2_PIX_FMT_BGR24   v4l2_fourcc('B', 'G', 'R', '3') /* 24  BGR-8-8-8     */
-#define V4L2_PIX_FMT_RGB24   v4l2_fourcc('R', 'G', 'B', '3') /* 24  RGB-8-8-8     */
-#define V4L2_PIX_FMT_BGR32   v4l2_fourcc('B', 'G', 'R', '4') /* 32  BGR-8-8-8-8   */
-#define V4L2_PIX_FMT_ABGR32  v4l2_fourcc('A', 'R', '2', '4') /* 32  BGRA-8-8-8-8  */
-#define V4L2_PIX_FMT_XBGR32  v4l2_fourcc('X', 'R', '2', '4') /* 32  BGRX-8-8-8-8  */
-#define V4L2_PIX_FMT_RGB32   v4l2_fourcc('R', 'G', 'B', '4') /* 32  RGB-8-8-8-8   */
-#define V4L2_PIX_FMT_ARGB32  v4l2_fourcc('B', 'A', '2', '4') /* 32  ARGB-8-8-8-8  */
-#define V4L2_PIX_FMT_XRGB32  v4l2_fourcc('B', 'X', '2', '4') /* 32  XRGB-8-8-8-8  */
-
-/* Grey formats */
-#define V4L2_PIX_FMT_GREY    v4l2_fourcc('G', 'R', 'E', 'Y') /*  8  Greyscale     */
-#define V4L2_PIX_FMT_Y4      v4l2_fourcc('Y', '0', '4', ' ') /*  4  Greyscale     */
-#define V4L2_PIX_FMT_Y6      v4l2_fourcc('Y', '0', '6', ' ') /*  6  Greyscale     */
-#define V4L2_PIX_FMT_Y10     v4l2_fourcc('Y', '1', '0', ' ') /* 10  Greyscale     */
-#define V4L2_PIX_FMT_Y12     v4l2_fourcc('Y', '1', '2', ' ') /* 12  Greyscale     */
-#define V4L2_PIX_FMT_Y16     v4l2_fourcc('Y', '1', '6', ' ') /* 16  Greyscale     */
-#define V4L2_PIX_FMT_Y16_BE  v4l2_fourcc_be('Y', '1', '6', ' ') /* 16  Greyscale BE  */
-
-/* Grey bit-packed formats */
-#define V4L2_PIX_FMT_Y10BPACK    v4l2_fourcc('Y', '1', '0', 'B') /* 10  Greyscale bit-packed */
-
-/* Palette formats */
-#define V4L2_PIX_FMT_PAL8    v4l2_fourcc('P', 'A', 'L', '8') /*  8  8-bit palette */
-
-/* Chrominance formats */
-#define V4L2_PIX_FMT_UV8     v4l2_fourcc('U', 'V', '8', ' ') /*  8  UV 4:4 */
-
-/* Luminance+Chrominance formats */
-#define V4L2_PIX_FMT_YVU410  v4l2_fourcc('Y', 'V', 'U', '9') /*  9  YVU 4:1:0     */
-#define V4L2_PIX_FMT_YVU420  v4l2_fourcc('Y', 'V', '1', '2') /* 12  YVU 4:2:0     */
-#define V4L2_PIX_FMT_YUYV    v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16  YUV 4:2:2     */
-#define V4L2_PIX_FMT_YYUV    v4l2_fourcc('Y', 'Y', 'U', 'V') /* 16  YUV 4:2:2     */
-#define V4L2_PIX_FMT_YVYU    v4l2_fourcc('Y', 'V', 'Y', 'U') /* 16 YVU 4:2:2 */
-#define V4L2_PIX_FMT_UYVY    v4l2_fourcc('U', 'Y', 'V', 'Y') /* 16  YUV 4:2:2     */
-#define V4L2_PIX_FMT_VYUY    v4l2_fourcc('V', 'Y', 'U', 'Y') /* 16  YUV 4:2:2     */
-#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4', '2', '2', 'P') /* 16  YVU422 planar */
-#define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4', '1', '1', 'P') /* 16  YVU411 planar */
-#define V4L2_PIX_FMT_Y41P    v4l2_fourcc('Y', '4', '1', 'P') /* 12  YUV 4:1:1     */
-#define V4L2_PIX_FMT_YUV444  v4l2_fourcc('Y', '4', '4', '4') /* 16  xxxxyyyy uuuuvvvv */
-#define V4L2_PIX_FMT_YUV555  v4l2_fourcc('Y', 'U', 'V', 'O') /* 16  YUV-5-5-5     */
-#define V4L2_PIX_FMT_YUV565  v4l2_fourcc('Y', 'U', 'V', 'P') /* 16  YUV-5-6-5     */
-#define V4L2_PIX_FMT_YUV32   v4l2_fourcc('Y', 'U', 'V', '4') /* 32  YUV-8-8-8-8   */
-#define V4L2_PIX_FMT_YUV410  v4l2_fourcc('Y', 'U', 'V', '9') /*  9  YUV 4:1:0     */
-#define V4L2_PIX_FMT_YUV420  v4l2_fourcc('Y', 'U', '1', '2') /* 12  YUV 4:2:0     */
-#define V4L2_PIX_FMT_HI240   v4l2_fourcc('H', 'I', '2', '4') /*  8  8-bit color   */
-#define V4L2_PIX_FMT_HM12    v4l2_fourcc('H', 'M', '1', '2') /*  8  YUV 4:2:0 16x16 macroblocks */
-#define V4L2_PIX_FMT_M420    v4l2_fourcc('M', '4', '2', '0') /* 12  YUV 4:2:0 2 lines y, 1 line uv interleaved */
-
-/* two planes -- one Y, one Cr + Cb interleaved  */
-#define V4L2_PIX_FMT_NV12    v4l2_fourcc('N', 'V', '1', '2') /* 12  Y/CbCr 4:2:0  */
-#define V4L2_PIX_FMT_NV21    v4l2_fourcc('N', 'V', '2', '1') /* 12  Y/CrCb 4:2:0  */
-#define V4L2_PIX_FMT_NV16    v4l2_fourcc('N', 'V', '1', '6') /* 16  Y/CbCr 4:2:2  */
-#define V4L2_PIX_FMT_NV61    v4l2_fourcc('N', 'V', '6', '1') /* 16  Y/CrCb 4:2:2  */
-#define V4L2_PIX_FMT_NV24    v4l2_fourcc('N', 'V', '2', '4') /* 24  Y/CbCr 4:4:4  */
-#define V4L2_PIX_FMT_NV42    v4l2_fourcc('N', 'V', '4', '2') /* 24  Y/CrCb 4:4:4  */
-
-/* two non contiguous planes - one Y, one Cr + Cb interleaved  */
-#define V4L2_PIX_FMT_NV12M   v4l2_fourcc('N', 'M', '1', '2') /* 12  Y/CbCr 4:2:0  */
-#define V4L2_PIX_FMT_NV21M   v4l2_fourcc('N', 'M', '2', '1') /* 21  Y/CrCb 4:2:0  */
-#define V4L2_PIX_FMT_NV16M   v4l2_fourcc('N', 'M', '1', '6') /* 16  Y/CbCr 4:2:2  */
-#define V4L2_PIX_FMT_NV61M   v4l2_fourcc('N', 'M', '6', '1') /* 16  Y/CrCb 4:2:2  */
-#define V4L2_PIX_FMT_NV12MT  v4l2_fourcc('T', 'M', '1', '2') /* 12  Y/CbCr 4:2:0 64x32 macroblocks */
-#define V4L2_PIX_FMT_NV12MT_16X16 v4l2_fourcc('V', 'M', '1', '2') /* 12  Y/CbCr 4:2:0 16x16 macroblocks */
-
-/* three non contiguous planes - Y, Cb, Cr */
-#define V4L2_PIX_FMT_YUV420M v4l2_fourcc('Y', 'M', '1', '2') /* 12  YUV420 planar */
-#define V4L2_PIX_FMT_YVU420M v4l2_fourcc('Y', 'M', '2', '1') /* 12  YVU420 planar */
-#define V4L2_PIX_FMT_YUV422M v4l2_fourcc('Y', 'M', '1', '6') /* 16  YUV422 planar */
-#define V4L2_PIX_FMT_YVU422M v4l2_fourcc('Y', 'M', '6', '1') /* 16  YVU422 planar */
-#define V4L2_PIX_FMT_YUV444M v4l2_fourcc('Y', 'M', '2', '4') /* 24  YUV444 planar */
-#define V4L2_PIX_FMT_YVU444M v4l2_fourcc('Y', 'M', '4', '2') /* 24  YVU444 planar */
-
-/* Bayer formats - see http://www.siliconimaging.com/RGB%20Bayer.htm */
-#define V4L2_PIX_FMT_SBGGR8  v4l2_fourcc('B', 'A', '8', '1') /*  8  BGBG.. GRGR.. */
-#define V4L2_PIX_FMT_SGBRG8  v4l2_fourcc('G', 'B', 'R', 'G') /*  8  GBGB.. RGRG.. */
-#define V4L2_PIX_FMT_SGRBG8  v4l2_fourcc('G', 'R', 'B', 'G') /*  8  GRGR.. BGBG.. */
-#define V4L2_PIX_FMT_SRGGB8  v4l2_fourcc('R', 'G', 'G', 'B') /*  8  RGRG.. GBGB.. */
-#define V4L2_PIX_FMT_SBGGR10 v4l2_fourcc('B', 'G', '1', '0') /* 10  BGBG.. GRGR.. */
-#define V4L2_PIX_FMT_SGBRG10 v4l2_fourcc('G', 'B', '1', '0') /* 10  GBGB.. RGRG.. */
-#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10  GRGR.. BGBG.. */
-#define V4L2_PIX_FMT_SRGGB10 v4l2_fourcc('R', 'G', '1', '0') /* 10  RGRG.. GBGB.. */
-	/* 10bit raw bayer packed, 5 bytes for every 4 pixels */
-#define V4L2_PIX_FMT_SBGGR10P v4l2_fourcc('p', 'B', 'A', 'A')
-#define V4L2_PIX_FMT_SGBRG10P v4l2_fourcc('p', 'G', 'A', 'A')
-#define V4L2_PIX_FMT_SGRBG10P v4l2_fourcc('p', 'g', 'A', 'A')
-#define V4L2_PIX_FMT_SRGGB10P v4l2_fourcc('p', 'R', 'A', 'A')
-	/* 10bit raw bayer a-law compressed to 8 bits */
-#define V4L2_PIX_FMT_SBGGR10ALAW8 v4l2_fourcc('a', 'B', 'A', '8')
-#define V4L2_PIX_FMT_SGBRG10ALAW8 v4l2_fourcc('a', 'G', 'A', '8')
-#define V4L2_PIX_FMT_SGRBG10ALAW8 v4l2_fourcc('a', 'g', 'A', '8')
-#define V4L2_PIX_FMT_SRGGB10ALAW8 v4l2_fourcc('a', 'R', 'A', '8')
-	/* 10bit raw bayer DPCM compressed to 8 bits */
-#define V4L2_PIX_FMT_SBGGR10DPCM8 v4l2_fourcc('b', 'B', 'A', '8')
-#define V4L2_PIX_FMT_SGBRG10DPCM8 v4l2_fourcc('b', 'G', 'A', '8')
-#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0')
-#define V4L2_PIX_FMT_SRGGB10DPCM8 v4l2_fourcc('b', 'R', 'A', '8')
-#define V4L2_PIX_FMT_SBGGR12 v4l2_fourcc('B', 'G', '1', '2') /* 12  BGBG.. GRGR.. */
-#define V4L2_PIX_FMT_SGBRG12 v4l2_fourcc('G', 'B', '1', '2') /* 12  GBGB.. RGRG.. */
-#define V4L2_PIX_FMT_SGRBG12 v4l2_fourcc('B', 'A', '1', '2') /* 12  GRGR.. BGBG.. */
-#define V4L2_PIX_FMT_SRGGB12 v4l2_fourcc('R', 'G', '1', '2') /* 12  RGRG.. GBGB.. */
-#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16  BGBG.. GRGR.. */
-
-/* compressed formats */
-#define V4L2_PIX_FMT_MJPEG    v4l2_fourcc('M', 'J', 'P', 'G') /* Motion-JPEG   */
-#define V4L2_PIX_FMT_JPEG     v4l2_fourcc('J', 'P', 'E', 'G') /* JFIF JPEG     */
-#define V4L2_PIX_FMT_DV       v4l2_fourcc('d', 'v', 's', 'd') /* 1394          */
-#define V4L2_PIX_FMT_MPEG     v4l2_fourcc('M', 'P', 'E', 'G') /* MPEG-1/2/4 Multiplexed */
-#define V4L2_PIX_FMT_H264     v4l2_fourcc('H', '2', '6', '4') /* H264 with start codes */
-#define V4L2_PIX_FMT_H264_NO_SC v4l2_fourcc('A', 'V', 'C', '1') /* H264 without start codes */
-#define V4L2_PIX_FMT_H264_MVC v4l2_fourcc('M', '2', '6', '4') /* H264 MVC */
-#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */
-#define V4L2_PIX_FMT_H263     v4l2_fourcc('H', '2', '6', '3') /* H263          */
-#define V4L2_PIX_FMT_MPEG1    v4l2_fourcc('M', 'P', 'G', '1') /* MPEG-1 ES     */
-#define V4L2_PIX_FMT_MPEG2    v4l2_fourcc('M', 'P', 'G', '2') /* MPEG-2 ES     */
-#define V4L2_PIX_FMT_MPEG4    v4l2_fourcc('M', 'P', 'G', '4') /* MPEG-4 part 2 ES */
-#define V4L2_PIX_FMT_XVID     v4l2_fourcc('X', 'V', 'I', 'D') /* Xvid           */
-#define V4L2_PIX_FMT_VC1_ANNEX_G v4l2_fourcc('V', 'C', '1', 'G') /* SMPTE 421M Annex G compliant stream */
-#define V4L2_PIX_FMT_VC1_ANNEX_L v4l2_fourcc('V', 'C', '1', 'L') /* SMPTE 421M Annex L compliant stream */
-#define V4L2_PIX_FMT_VP8      v4l2_fourcc('V', 'P', '8', '0') /* VP8 */
-#define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F') /* VP8 parsed frames */
-#define V4L2_PIX_FMT_VP9      v4l2_fourcc('V', 'P', '9', '0') /* VP9 */
-#define V4L2_PIX_FMT_VP9_FRAME v4l2_fourcc('V', 'P', '9', 'F') /* VP9 parsed frames */
-
-/*  Vendor-specific formats   */
-#define V4L2_PIX_FMT_CPIA1    v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
-#define V4L2_PIX_FMT_WNVA     v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */
-#define V4L2_PIX_FMT_SN9C10X  v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */
-#define V4L2_PIX_FMT_SN9C20X_I420 v4l2_fourcc('S', '9', '2', '0') /* SN9C20x YUV 4:2:0 */
-#define V4L2_PIX_FMT_PWC1     v4l2_fourcc('P', 'W', 'C', '1') /* pwc older webcam */
-#define V4L2_PIX_FMT_PWC2     v4l2_fourcc('P', 'W', 'C', '2') /* pwc newer webcam */
-#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E', '6', '2', '5') /* ET61X251 compression */
-#define V4L2_PIX_FMT_SPCA501  v4l2_fourcc('S', '5', '0', '1') /* YUYV per line */
-#define V4L2_PIX_FMT_SPCA505  v4l2_fourcc('S', '5', '0', '5') /* YYUV per line */
-#define V4L2_PIX_FMT_SPCA508  v4l2_fourcc('S', '5', '0', '8') /* YUVY per line */
-#define V4L2_PIX_FMT_SPCA561  v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */
-#define V4L2_PIX_FMT_PAC207   v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */
-#define V4L2_PIX_FMT_MR97310A v4l2_fourcc('M', '3', '1', '0') /* compressed BGGR bayer */
-#define V4L2_PIX_FMT_JL2005BCD v4l2_fourcc('J', 'L', '2', '0') /* compressed RGGB bayer */
-#define V4L2_PIX_FMT_SN9C2028 v4l2_fourcc('S', 'O', 'N', 'X') /* compressed GBRG bayer */
-#define V4L2_PIX_FMT_SQ905C   v4l2_fourcc('9', '0', '5', 'C') /* compressed RGGB bayer */
-#define V4L2_PIX_FMT_PJPG     v4l2_fourcc('P', 'J', 'P', 'G') /* Pixart 73xx JPEG */
-#define V4L2_PIX_FMT_OV511    v4l2_fourcc('O', '5', '1', '1') /* ov511 JPEG */
-#define V4L2_PIX_FMT_OV518    v4l2_fourcc('O', '5', '1', '8') /* ov518 JPEG */
-#define V4L2_PIX_FMT_STV0680  v4l2_fourcc('S', '6', '8', '0') /* stv0680 bayer */
-#define V4L2_PIX_FMT_TM6000   v4l2_fourcc('T', 'M', '6', '0') /* tm5600/tm60x0 */
-#define V4L2_PIX_FMT_CIT_YYVYUY v4l2_fourcc('C', 'I', 'T', 'V') /* one line of Y then 1 line of VYUY */
-#define V4L2_PIX_FMT_KONICA420  v4l2_fourcc('K', 'O', 'N', 'I') /* YUV420 planar in blocks of 256 pixels */
-#define V4L2_PIX_FMT_JPGL	v4l2_fourcc('J', 'P', 'G', 'L') /* JPEG-Lite */
-#define V4L2_PIX_FMT_SE401      v4l2_fourcc('S', '4', '0', '1') /* se401 janggu compressed rgb */
-#define V4L2_PIX_FMT_S5C_UYVY_JPG v4l2_fourcc('S', '5', 'C', 'I') /* S5C73M3 interleaved UYVY/JPEG */
-#define V4L2_PIX_FMT_Y8I      v4l2_fourcc('Y', '8', 'I', ' ') /* Greyscale 8-bit L/R interleaved */
-#define V4L2_PIX_FMT_Y12I     v4l2_fourcc('Y', '1', '2', 'I') /* Greyscale 12-bit L/R interleaved */
-#define V4L2_PIX_FMT_Z16      v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
-#define V4L2_PIX_FMT_MT21C    v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode  */
-
-/* SDR formats - used only for Software Defined Radio devices */
-#define V4L2_SDR_FMT_CU8          v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */
-#define V4L2_SDR_FMT_CU16LE       v4l2_fourcc('C', 'U', '1', '6') /* IQ u16le */
-#define V4L2_SDR_FMT_CS8          v4l2_fourcc('C', 'S', '0', '8') /* complex s8 */
-#define V4L2_SDR_FMT_CS14LE       v4l2_fourcc('C', 'S', '1', '4') /* complex s14le */
-#define V4L2_SDR_FMT_RU12LE       v4l2_fourcc('R', 'U', '1', '2') /* real u12le */
-
-/* priv field value to indicates that subsequent fields are valid. */
-#define V4L2_PIX_FMT_PRIV_MAGIC		0xfeedcafe
-
-/* Flags */
-#define V4L2_PIX_FMT_FLAG_PREMUL_ALPHA	0x00000001
-
-/*
- *	F O R M A T   E N U M E R A T I O N
- */
-struct v4l2_fmtdesc {
-	__u32		    index;             /* Format number      */
-	__u32		    type;              /* enum v4l2_buf_type */
-	__u32               flags;
-	__u8		    description[32];   /* Description string */
-	__u32		    pixelformat;       /* Format fourcc      */
-	__u32		    reserved[4];
-};
-
-#define V4L2_FMT_FLAG_COMPRESSED 0x0001
-#define V4L2_FMT_FLAG_EMULATED   0x0002
-
-#if 1
-	/* Experimental Frame Size and frame rate enumeration */
-/*
- *	F R A M E   S I Z E   E N U M E R A T I O N
- */
-enum v4l2_frmsizetypes {
-	V4L2_FRMSIZE_TYPE_DISCRETE	= 1,
-	V4L2_FRMSIZE_TYPE_CONTINUOUS	= 2,
-	V4L2_FRMSIZE_TYPE_STEPWISE	= 3,
-};
-
-struct v4l2_frmsize_discrete {
-	__u32			width;		/* Frame width [pixel] */
-	__u32			height;		/* Frame height [pixel] */
-};
-
-struct v4l2_frmsize_stepwise {
-	__u32			min_width;	/* Minimum frame width [pixel] */
-	__u32			max_width;	/* Maximum frame width [pixel] */
-	__u32			step_width;	/* Frame width step size [pixel] */
-	__u32			min_height;	/* Minimum frame height [pixel] */
-	__u32			max_height;	/* Maximum frame height [pixel] */
-	__u32			step_height;	/* Frame height step size [pixel] */
-};
-
-struct v4l2_frmsizeenum {
-	__u32			index;		/* Frame size number */
-	__u32			pixel_format;	/* Pixel format */
-	__u32			type;		/* Frame size type the device supports. */
-
-	union {					/* Frame size */
-		struct v4l2_frmsize_discrete	discrete;
-		struct v4l2_frmsize_stepwise	stepwise;
-	};
-
-	__u32   reserved[2];			/* Reserved space for future use */
-};
-
-/*
- *	F R A M E   R A T E   E N U M E R A T I O N
- */
-enum v4l2_frmivaltypes {
-	V4L2_FRMIVAL_TYPE_DISCRETE	= 1,
-	V4L2_FRMIVAL_TYPE_CONTINUOUS	= 2,
-	V4L2_FRMIVAL_TYPE_STEPWISE	= 3,
-};
-
-struct v4l2_frmival_stepwise {
-	struct v4l2_fract	min;		/* Minimum frame interval [s] */
-	struct v4l2_fract	max;		/* Maximum frame interval [s] */
-	struct v4l2_fract	step;		/* Frame interval step size [s] */
-};
-
-struct v4l2_frmivalenum {
-	__u32			index;		/* Frame format index */
-	__u32			pixel_format;	/* Pixel format */
-	__u32			width;		/* Frame width */
-	__u32			height;		/* Frame height */
-	__u32			type;		/* Frame interval type the device supports. */
-
-	union {					/* Frame interval */
-		struct v4l2_fract		discrete;
-		struct v4l2_frmival_stepwise	stepwise;
-	};
-
-	__u32	reserved[2];			/* Reserved space for future use */
-};
-#endif
-
-/*
- *	T I M E C O D E
- */
-struct v4l2_timecode {
-	__u32	type;
-	__u32	flags;
-	__u8	frames;
-	__u8	seconds;
-	__u8	minutes;
-	__u8	hours;
-	__u8	userbits[4];
-};
-
-/*  Type  */
-#define V4L2_TC_TYPE_24FPS		1
-#define V4L2_TC_TYPE_25FPS		2
-#define V4L2_TC_TYPE_30FPS		3
-#define V4L2_TC_TYPE_50FPS		4
-#define V4L2_TC_TYPE_60FPS		5
-
-/*  Flags  */
-#define V4L2_TC_FLAG_DROPFRAME		0x0001 /* "drop-frame" mode */
-#define V4L2_TC_FLAG_COLORFRAME		0x0002
-#define V4L2_TC_USERBITS_field		0x000C
-#define V4L2_TC_USERBITS_USERDEFINED	0x0000
-#define V4L2_TC_USERBITS_8BITCHARS	0x0008
-/* The above is based on SMPTE timecodes */
-
-struct v4l2_jpegcompression {
-	int quality;
-
-	int  APPn;              /* Number of APP segment to be written,
-				 * must be 0..15 */
-	int  APP_len;           /* Length of data in JPEG APPn segment */
-	char APP_data[60];      /* Data in the JPEG APPn segment. */
-
-	int  COM_len;           /* Length of data in JPEG COM segment */
-	char COM_data[60];      /* Data in JPEG COM segment */
-
-	__u32 jpeg_markers;     /* Which markers should go into the JPEG
-				 * output. Unless you exactly know what
-				 * you do, leave them untouched.
-				 * Including less markers will make the
-				 * resulting code smaller, but there will
-				 * be fewer applications which can read it.
-				 * The presence of the APP and COM marker
-				 * is influenced by APP_len and COM_len
-				 * ONLY, not by this property! */
-
-#define V4L2_JPEG_MARKER_DHT (1<<3)    /* Define Huffman Tables */
-#define V4L2_JPEG_MARKER_DQT (1<<4)    /* Define Quantization Tables */
-#define V4L2_JPEG_MARKER_DRI (1<<5)    /* Define Restart Interval */
-#define V4L2_JPEG_MARKER_COM (1<<6)    /* Comment segment */
-#define V4L2_JPEG_MARKER_APP (1<<7)    /* App segment, driver will
-					* always use APP0 */
-};
-
-/*
- *	M E M O R Y - M A P P I N G   B U F F E R S
- */
-struct v4l2_requestbuffers {
-	__u32			count;
-	__u32			type;		/* enum v4l2_buf_type */
-	__u32			memory;		/* enum v4l2_memory */
-	__u32			reserved[2];
-};
-
-/**
- * struct v4l2_plane - plane info for multi-planar buffers
- * @bytesused:		number of bytes occupied by data in the plane (payload)
- * @length:		size of this plane (NOT the payload) in bytes
- * @mem_offset:		when memory in the associated struct v4l2_buffer is
- *			V4L2_MEMORY_MMAP, equals the offset from the start of
- *			the device memory for this plane (or is a "cookie" that
- *			should be passed to mmap() called on the video node)
- * @userptr:		when memory is V4L2_MEMORY_USERPTR, a userspace pointer
- *			pointing to this plane
- * @fd:			when memory is V4L2_MEMORY_DMABUF, a userspace file
- *			descriptor associated with this plane
- * @data_offset:	offset in the plane to the start of data; usually 0,
- *			unless there is a header in front of the data
- *
- * Multi-planar buffers consist of one or more planes, e.g. an YCbCr buffer
- * with two planes can have one plane for Y, and another for interleaved CbCr
- * components. Each plane can reside in a separate memory buffer, or even in
- * a completely separate memory node (e.g. in embedded devices).
- */
-struct v4l2_plane {
-	__u32			bytesused;
-	__u32			length;
-	union {
-		__u32		mem_offset;
-		unsigned long	userptr;
-		__s32		fd;
-	} m;
-	__u32			data_offset;
-	__u32			reserved[11];
-};
-
-/**
- * struct v4l2_buffer - video buffer info
- * @index:	id number of the buffer
- * @type:	enum v4l2_buf_type; buffer type (type == *_MPLANE for
- *		multiplanar buffers);
- * @bytesused:	number of bytes occupied by data in the buffer (payload);
- *		unused (set to 0) for multiplanar buffers
- * @flags:	buffer informational flags
- * @field:	enum v4l2_field; field order of the image in the buffer
- * @timestamp:	frame timestamp
- * @timecode:	frame timecode
- * @sequence:	sequence count of this frame
- * @memory:	enum v4l2_memory; the method, in which the actual video data is
- *		passed
- * @offset:	for non-multiplanar buffers with memory == V4L2_MEMORY_MMAP;
- *		offset from the start of the device memory for this plane,
- *		(or a "cookie" that should be passed to mmap() as offset)
- * @userptr:	for non-multiplanar buffers with memory == V4L2_MEMORY_USERPTR;
- *		a userspace pointer pointing to this buffer
- * @fd:		for non-multiplanar buffers with memory == V4L2_MEMORY_DMABUF;
- *		a userspace file descriptor associated with this buffer
- * @planes:	for multiplanar buffers; userspace pointer to the array of plane
- *		info structs for this buffer
- * @length:	size in bytes of the buffer (NOT its payload) for single-plane
- *		buffers (when type != *_MPLANE); number of elements in the
- *		planes array for multi-plane buffers
- * @config_store: this buffer should use this configuration store
- *
- * Contains data exchanged by application and driver using one of the Streaming
- * I/O methods.
- */
-struct v4l2_buffer {
-	__u32			index;
-	__u32			type;
-	__u32			bytesused;
-	__u32			flags;
-	__u32			field;
-	struct timeval		timestamp;
-	struct v4l2_timecode	timecode;
-	__u32			sequence;
-
-	/* memory location */
-	__u32			memory;
-	union {
-		__u32           offset;
-		unsigned long   userptr;
-		struct v4l2_plane *planes;
-		__s32		fd;
-	} m;
-	__u32			length;
-	__u32			config_store;
-	__u32			reserved;
-};
-
-/*  Flags for 'flags' field */
-/* Buffer is mapped (flag) */
-#define V4L2_BUF_FLAG_MAPPED			0x00000001
-/* Buffer is queued for processing */
-#define V4L2_BUF_FLAG_QUEUED			0x00000002
-/* Buffer is ready */
-#define V4L2_BUF_FLAG_DONE			0x00000004
-/* Image is a keyframe (I-frame) */
-#define V4L2_BUF_FLAG_KEYFRAME			0x00000008
-/* Image is a P-frame */
-#define V4L2_BUF_FLAG_PFRAME			0x00000010
-/* Image is a B-frame */
-#define V4L2_BUF_FLAG_BFRAME			0x00000020
-/* Buffer is ready, but the data contained within is corrupted. */
-#define V4L2_BUF_FLAG_ERROR			0x00000040
-/* timecode field is valid */
-#define V4L2_BUF_FLAG_TIMECODE			0x00000100
-/* Buffer is prepared for queuing */
-#define V4L2_BUF_FLAG_PREPARED			0x00000400
-/* Cache handling flags */
-#define V4L2_BUF_FLAG_NO_CACHE_INVALIDATE	0x00000800
-#define V4L2_BUF_FLAG_NO_CACHE_CLEAN		0x00001000
-/* Timestamp type */
-#define V4L2_BUF_FLAG_TIMESTAMP_MASK		0x0000e000
-#define V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN		0x00000000
-#define V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC	0x00002000
-#define V4L2_BUF_FLAG_TIMESTAMP_COPY		0x00004000
-/* Timestamp sources. */
-#define V4L2_BUF_FLAG_TSTAMP_SRC_MASK		0x00070000
-#define V4L2_BUF_FLAG_TSTAMP_SRC_EOF		0x00000000
-#define V4L2_BUF_FLAG_TSTAMP_SRC_SOE		0x00010000
-/* mem2mem encoder/decoder */
-#define V4L2_BUF_FLAG_LAST			0x00100000
-
-/**
- * struct v4l2_exportbuffer - export of video buffer as DMABUF file descriptor
- *
- * @index:	id number of the buffer
- * @type:	enum v4l2_buf_type; buffer type (type == *_MPLANE for
- *		multiplanar buffers);
- * @plane:	index of the plane to be exported, 0 for single plane queues
- * @flags:	flags for newly created file, currently only O_CLOEXEC is
- *		supported, refer to manual of open syscall for more details
- * @fd:		file descriptor associated with DMABUF (set by driver)
- *
- * Contains data used for exporting a video buffer as DMABUF file descriptor.
- * The buffer is identified by a 'cookie' returned by VIDIOC_QUERYBUF
- * (identical to the cookie used to mmap() the buffer to userspace). All
- * reserved fields must be set to zero. The field reserved0 is expected to
- * become a structure 'type' allowing an alternative layout of the structure
- * content. Therefore this field should not be used for any other extensions.
- */
-struct v4l2_exportbuffer {
-	__u32		type; /* enum v4l2_buf_type */
-	__u32		index;
-	__u32		plane;
-	__u32		flags;
-	__s32		fd;
-	__u32		reserved[11];
-};
-
-/*
- *	O V E R L A Y   P R E V I E W
- */
-struct v4l2_framebuffer {
-	__u32			capability;
-	__u32			flags;
-/* FIXME: in theory we should pass something like PCI device + memory
- * region + offset instead of some physical address */
-	void                    *base;
-	struct {
-		__u32		width;
-		__u32		height;
-		__u32		pixelformat;
-		__u32		field;		/* enum v4l2_field */
-		__u32		bytesperline;	/* for padding, zero if unused */
-		__u32		sizeimage;
-		__u32		colorspace;	/* enum v4l2_colorspace */
-		__u32		priv;		/* reserved field, set to 0 */
-	} fmt;
-};
-/*  Flags for the 'capability' field. Read only */
-#define V4L2_FBUF_CAP_EXTERNOVERLAY	0x0001
-#define V4L2_FBUF_CAP_CHROMAKEY		0x0002
-#define V4L2_FBUF_CAP_LIST_CLIPPING     0x0004
-#define V4L2_FBUF_CAP_BITMAP_CLIPPING	0x0008
-#define V4L2_FBUF_CAP_LOCAL_ALPHA	0x0010
-#define V4L2_FBUF_CAP_GLOBAL_ALPHA	0x0020
-#define V4L2_FBUF_CAP_LOCAL_INV_ALPHA	0x0040
-#define V4L2_FBUF_CAP_SRC_CHROMAKEY	0x0080
-/*  Flags for the 'flags' field. */
-#define V4L2_FBUF_FLAG_PRIMARY		0x0001
-#define V4L2_FBUF_FLAG_OVERLAY		0x0002
-#define V4L2_FBUF_FLAG_CHROMAKEY	0x0004
-#define V4L2_FBUF_FLAG_LOCAL_ALPHA	0x0008
-#define V4L2_FBUF_FLAG_GLOBAL_ALPHA	0x0010
-#define V4L2_FBUF_FLAG_LOCAL_INV_ALPHA	0x0020
-#define V4L2_FBUF_FLAG_SRC_CHROMAKEY	0x0040
-
-struct v4l2_clip {
-	struct v4l2_rect        c;
-	struct v4l2_clip	__user *next;
-};
-
-struct v4l2_window {
-	struct v4l2_rect        w;
-	__u32			field;	 /* enum v4l2_field */
-	__u32			chromakey;
-	struct v4l2_clip	__user *clips;
-	__u32			clipcount;
-	void			__user *bitmap;
-	__u8                    global_alpha;
-};
-
-/*
- *	C A P T U R E   P A R A M E T E R S
- */
-struct v4l2_captureparm {
-	__u32		   capability;	  /*  Supported modes */
-	__u32		   capturemode;	  /*  Current mode */
-	struct v4l2_fract  timeperframe;  /*  Time per frame in seconds */
-	__u32		   extendedmode;  /*  Driver-specific extensions */
-	__u32              readbuffers;   /*  # of buffers for read */
-	__u32		   reserved[4];
-};
-
-/*  Flags for 'capability' and 'capturemode' fields */
-#define V4L2_MODE_HIGHQUALITY	0x0001	/*  High quality imaging mode */
-#define V4L2_CAP_TIMEPERFRAME	0x1000	/*  timeperframe field is supported */
-
-struct v4l2_outputparm {
-	__u32		   capability;	 /*  Supported modes */
-	__u32		   outputmode;	 /*  Current mode */
-	struct v4l2_fract  timeperframe; /*  Time per frame in seconds */
-	__u32		   extendedmode; /*  Driver-specific extensions */
-	__u32              writebuffers; /*  # of buffers for write */
-	__u32		   reserved[4];
-};
-
-/*
- *	I N P U T   I M A G E   C R O P P I N G
- */
-struct v4l2_cropcap {
-	__u32			type;	/* enum v4l2_buf_type */
-	struct v4l2_rect        bounds;
-	struct v4l2_rect        defrect;
-	struct v4l2_fract       pixelaspect;
-};
-
-struct v4l2_crop {
-	__u32			type;	/* enum v4l2_buf_type */
-	struct v4l2_rect        c;
-};
-
-/**
- * struct v4l2_selection - selection info
- * @type:	buffer type (do not use *_MPLANE types)
- * @target:	Selection target, used to choose one of possible rectangles;
- *		defined in v4l2-common.h; V4L2_SEL_TGT_* .
- * @flags:	constraints flags, defined in v4l2-common.h; V4L2_SEL_FLAG_*.
- * @r:		coordinates of selection window
- * @reserved:	for future use, rounds structure size to 64 bytes, set to zero
- *
- * Hardware may use multiple helper windows to process a video stream.
- * The structure is used to exchange this selection areas between
- * an application and a driver.
- */
-struct v4l2_selection {
-	__u32			type;
-	__u32			target;
-	__u32                   flags;
-	struct v4l2_rect        r;
-	__u32                   reserved[9];
-};
-
-
-/*
- *      A N A L O G   V I D E O   S T A N D A R D
- */
-
-typedef __u64 v4l2_std_id;
-
-/* one bit for each */
-#define V4L2_STD_PAL_B          ((v4l2_std_id)0x00000001)
-#define V4L2_STD_PAL_B1         ((v4l2_std_id)0x00000002)
-#define V4L2_STD_PAL_G          ((v4l2_std_id)0x00000004)
-#define V4L2_STD_PAL_H          ((v4l2_std_id)0x00000008)
-#define V4L2_STD_PAL_I          ((v4l2_std_id)0x00000010)
-#define V4L2_STD_PAL_D          ((v4l2_std_id)0x00000020)
-#define V4L2_STD_PAL_D1         ((v4l2_std_id)0x00000040)
-#define V4L2_STD_PAL_K          ((v4l2_std_id)0x00000080)
-
-#define V4L2_STD_PAL_M          ((v4l2_std_id)0x00000100)
-#define V4L2_STD_PAL_N          ((v4l2_std_id)0x00000200)
-#define V4L2_STD_PAL_Nc         ((v4l2_std_id)0x00000400)
-#define V4L2_STD_PAL_60         ((v4l2_std_id)0x00000800)
-
-#define V4L2_STD_NTSC_M         ((v4l2_std_id)0x00001000)	/* BTSC */
-#define V4L2_STD_NTSC_M_JP      ((v4l2_std_id)0x00002000)	/* EIA-J */
-#define V4L2_STD_NTSC_443       ((v4l2_std_id)0x00004000)
-#define V4L2_STD_NTSC_M_KR      ((v4l2_std_id)0x00008000)	/* FM A2 */
-
-#define V4L2_STD_SECAM_B        ((v4l2_std_id)0x00010000)
-#define V4L2_STD_SECAM_D        ((v4l2_std_id)0x00020000)
-#define V4L2_STD_SECAM_G        ((v4l2_std_id)0x00040000)
-#define V4L2_STD_SECAM_H        ((v4l2_std_id)0x00080000)
-#define V4L2_STD_SECAM_K        ((v4l2_std_id)0x00100000)
-#define V4L2_STD_SECAM_K1       ((v4l2_std_id)0x00200000)
-#define V4L2_STD_SECAM_L        ((v4l2_std_id)0x00400000)
-#define V4L2_STD_SECAM_LC       ((v4l2_std_id)0x00800000)
-
-/* ATSC/HDTV */
-#define V4L2_STD_ATSC_8_VSB     ((v4l2_std_id)0x01000000)
-#define V4L2_STD_ATSC_16_VSB    ((v4l2_std_id)0x02000000)
-
-/* FIXME:
-   Although std_id is 64 bits, there is an issue on PPC32 architecture that
-   makes switch(__u64) to break. So, there's a hack on v4l2-common.c rounding
-   this value to 32 bits.
-   As, currently, the max value is for V4L2_STD_ATSC_16_VSB (30 bits wide),
-   it should work fine. However, if needed to add more than two standards,
-   v4l2-common.c should be fixed.
- */
-
-/*
- * Some macros to merge video standards in order to make live easier for the
- * drivers and V4L2 applications
- */
-
-/*
- * "Common" NTSC/M - It should be noticed that V4L2_STD_NTSC_443 is
- * Missing here.
- */
-#define V4L2_STD_NTSC           (V4L2_STD_NTSC_M	|\
-				 V4L2_STD_NTSC_M_JP     |\
-				 V4L2_STD_NTSC_M_KR)
-/* Secam macros */
-#define V4L2_STD_SECAM_DK      	(V4L2_STD_SECAM_D	|\
-				 V4L2_STD_SECAM_K	|\
-				 V4L2_STD_SECAM_K1)
-/* All Secam Standards */
-#define V4L2_STD_SECAM		(V4L2_STD_SECAM_B	|\
-				 V4L2_STD_SECAM_G	|\
-				 V4L2_STD_SECAM_H	|\
-				 V4L2_STD_SECAM_DK	|\
-				 V4L2_STD_SECAM_L       |\
-				 V4L2_STD_SECAM_LC)
-/* PAL macros */
-#define V4L2_STD_PAL_BG		(V4L2_STD_PAL_B		|\
-				 V4L2_STD_PAL_B1	|\
-				 V4L2_STD_PAL_G)
-#define V4L2_STD_PAL_DK		(V4L2_STD_PAL_D		|\
-				 V4L2_STD_PAL_D1	|\
-				 V4L2_STD_PAL_K)
-/*
- * "Common" PAL - This macro is there to be compatible with the old
- * V4L1 concept of "PAL": /BGDKHI.
- * Several PAL standards are missing here: /M, /N and /Nc
- */
-#define V4L2_STD_PAL		(V4L2_STD_PAL_BG	|\
-				 V4L2_STD_PAL_DK	|\
-				 V4L2_STD_PAL_H		|\
-				 V4L2_STD_PAL_I)
-/* Chroma "agnostic" standards */
-#define V4L2_STD_B		(V4L2_STD_PAL_B		|\
-				 V4L2_STD_PAL_B1	|\
-				 V4L2_STD_SECAM_B)
-#define V4L2_STD_G		(V4L2_STD_PAL_G		|\
-				 V4L2_STD_SECAM_G)
-#define V4L2_STD_H		(V4L2_STD_PAL_H		|\
-				 V4L2_STD_SECAM_H)
-#define V4L2_STD_L		(V4L2_STD_SECAM_L	|\
-				 V4L2_STD_SECAM_LC)
-#define V4L2_STD_GH		(V4L2_STD_G		|\
-				 V4L2_STD_H)
-#define V4L2_STD_DK		(V4L2_STD_PAL_DK	|\
-				 V4L2_STD_SECAM_DK)
-#define V4L2_STD_BG		(V4L2_STD_B		|\
-				 V4L2_STD_G)
-#define V4L2_STD_MN		(V4L2_STD_PAL_M		|\
-				 V4L2_STD_PAL_N		|\
-				 V4L2_STD_PAL_Nc	|\
-				 V4L2_STD_NTSC)
-
-/* Standards where MTS/BTSC stereo could be found */
-#define V4L2_STD_MTS		(V4L2_STD_NTSC_M	|\
-				 V4L2_STD_PAL_M		|\
-				 V4L2_STD_PAL_N		|\
-				 V4L2_STD_PAL_Nc)
-
-/* Standards for Countries with 60Hz Line frequency */
-#define V4L2_STD_525_60		(V4L2_STD_PAL_M		|\
-				 V4L2_STD_PAL_60	|\
-				 V4L2_STD_NTSC		|\
-				 V4L2_STD_NTSC_443)
-/* Standards for Countries with 50Hz Line frequency */
-#define V4L2_STD_625_50		(V4L2_STD_PAL		|\
-				 V4L2_STD_PAL_N		|\
-				 V4L2_STD_PAL_Nc	|\
-				 V4L2_STD_SECAM)
-
-#define V4L2_STD_ATSC           (V4L2_STD_ATSC_8_VSB    |\
-				 V4L2_STD_ATSC_16_VSB)
-/* Macros with none and all analog standards */
-#define V4L2_STD_UNKNOWN        0
-#define V4L2_STD_ALL            (V4L2_STD_525_60	|\
-				 V4L2_STD_625_50)
-
-struct v4l2_standard {
-	__u32		     index;
-	v4l2_std_id          id;
-	__u8		     name[24];
-	struct v4l2_fract    frameperiod; /* Frames, not fields */
-	__u32		     framelines;
-	__u32		     reserved[4];
-};
-
-/*
- *	D V 	B T	T I M I N G S
- */
-
-/** struct v4l2_bt_timings - BT.656/BT.1120 timing data
- * @width:	total width of the active video in pixels
- * @height:	total height of the active video in lines
- * @interlaced:	Interlaced or progressive
- * @polarities:	Positive or negative polarities
- * @pixelclock:	Pixel clock in HZ. Ex. 74.25MHz->74250000
- * @hfrontporch:Horizontal front porch in pixels
- * @hsync:	Horizontal Sync length in pixels
- * @hbackporch:	Horizontal back porch in pixels
- * @vfrontporch:Vertical front porch in lines
- * @vsync:	Vertical Sync length in lines
- * @vbackporch:	Vertical back porch in lines
- * @il_vfrontporch:Vertical front porch for the even field
- *		(aka field 2) of interlaced field formats
- * @il_vsync:	Vertical Sync length for the even field
- *		(aka field 2) of interlaced field formats
- * @il_vbackporch:Vertical back porch for the even field
- *		(aka field 2) of interlaced field formats
- * @standards:	Standards the timing belongs to
- * @flags:	Flags
- * @reserved:	Reserved fields, must be zeroed.
- *
- * A note regarding vertical interlaced timings: height refers to the total
- * height of the active video frame (= two fields). The blanking timings refer
- * to the blanking of each field. So the height of the total frame is
- * calculated as follows:
- *
- * tot_height = height + vfrontporch + vsync + vbackporch +
- *                       il_vfrontporch + il_vsync + il_vbackporch
- *
- * The active height of each field is height / 2.
- */
-struct v4l2_bt_timings {
-	__u32	width;
-	__u32	height;
-	__u32	interlaced;
-	__u32	polarities;
-	__u64	pixelclock;
-	__u32	hfrontporch;
-	__u32	hsync;
-	__u32	hbackporch;
-	__u32	vfrontporch;
-	__u32	vsync;
-	__u32	vbackporch;
-	__u32	il_vfrontporch;
-	__u32	il_vsync;
-	__u32	il_vbackporch;
-	__u32	standards;
-	__u32	flags;
-	__u32	reserved[14];
-} __attribute__ ((packed));
-
-/* Interlaced or progressive format */
-#define	V4L2_DV_PROGRESSIVE	0
-#define	V4L2_DV_INTERLACED	1
-
-/* Polarities. If bit is not set, it is assumed to be negative polarity */
-#define V4L2_DV_VSYNC_POS_POL	0x00000001
-#define V4L2_DV_HSYNC_POS_POL	0x00000002
-
-/* Timings standards */
-#define V4L2_DV_BT_STD_CEA861	(1 << 0)  /* CEA-861 Digital TV Profile */
-#define V4L2_DV_BT_STD_DMT	(1 << 1)  /* VESA Discrete Monitor Timings */
-#define V4L2_DV_BT_STD_CVT	(1 << 2)  /* VESA Coordinated Video Timings */
-#define V4L2_DV_BT_STD_GTF	(1 << 3)  /* VESA Generalized Timings Formula */
-
-/* Flags */
-
-/* CVT/GTF specific: timing uses reduced blanking (CVT) or the 'Secondary
-   GTF' curve (GTF). In both cases the horizontal and/or vertical blanking
-   intervals are reduced, allowing a higher resolution over the same
-   bandwidth. This is a read-only flag. */
-#define V4L2_DV_FL_REDUCED_BLANKING		(1 << 0)
-/* CEA-861 specific: set for CEA-861 formats with a framerate of a multiple
-   of six. These formats can be optionally played at 1 / 1.001 speed.
-   This is a read-only flag. */
-#define V4L2_DV_FL_CAN_REDUCE_FPS		(1 << 1)
-/* CEA-861 specific: only valid for video transmitters, the flag is cleared
-   by receivers.
-   If the framerate of the format is a multiple of six, then the pixelclock
-   used to set up the transmitter is divided by 1.001 to make it compatible
-   with 60 Hz based standards such as NTSC and PAL-M that use a framerate of
-   29.97 Hz. Otherwise this flag is cleared. If the transmitter can't generate
-   such frequencies, then the flag will also be cleared. */
-#define V4L2_DV_FL_REDUCED_FPS			(1 << 2)
-/* Specific to interlaced formats: if set, then field 1 is really one half-line
-   longer and field 2 is really one half-line shorter, so each field has
-   exactly the same number of half-lines. Whether half-lines can be detected
-   or used depends on the hardware. */
-#define V4L2_DV_FL_HALF_LINE			(1 << 3)
-/* If set, then this is a Consumer Electronics (CE) video format. Such formats
- * differ from other formats (commonly called IT formats) in that if RGB
- * encoding is used then by default the RGB values use limited range (i.e.
- * use the range 16-235) as opposed to 0-255. All formats defined in CEA-861
- * except for the 640x480 format are CE formats. */
-#define V4L2_DV_FL_IS_CE_VIDEO			(1 << 4)
-
-/* A few useful defines to calculate the total blanking and frame sizes */
-#define V4L2_DV_BT_BLANKING_WIDTH(bt) \
-	((bt)->hfrontporch + (bt)->hsync + (bt)->hbackporch)
-#define V4L2_DV_BT_FRAME_WIDTH(bt) \
-	((bt)->width + V4L2_DV_BT_BLANKING_WIDTH(bt))
-#define V4L2_DV_BT_BLANKING_HEIGHT(bt) \
-	((bt)->vfrontporch + (bt)->vsync + (bt)->vbackporch + \
-	 (bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch)
-#define V4L2_DV_BT_FRAME_HEIGHT(bt) \
-	((bt)->height + V4L2_DV_BT_BLANKING_HEIGHT(bt))
-
-/** struct v4l2_dv_timings - DV timings
- * @type:	the type of the timings
- * @bt:	BT656/1120 timings
- */
-struct v4l2_dv_timings {
-	__u32 type;
-	union {
-		struct v4l2_bt_timings	bt;
-		__u32	reserved[32];
-	};
-} __attribute__ ((packed));
-
-/* Values for the type field */
-#define V4L2_DV_BT_656_1120	0	/* BT.656/1120 timing type */
-
-
-/** struct v4l2_enum_dv_timings - DV timings enumeration
- * @index:	enumeration index
- * @pad:	the pad number for which to enumerate timings (used with
- *		v4l-subdev nodes only)
- * @reserved:	must be zeroed
- * @timings:	the timings for the given index
- */
-struct v4l2_enum_dv_timings {
-	__u32 index;
-	__u32 pad;
-	__u32 reserved[2];
-	struct v4l2_dv_timings timings;
-};
-
-/** struct v4l2_bt_timings_cap - BT.656/BT.1120 timing capabilities
- * @min_width:		width in pixels
- * @max_width:		width in pixels
- * @min_height:		height in lines
- * @max_height:		height in lines
- * @min_pixelclock:	Pixel clock in HZ. Ex. 74.25MHz->74250000
- * @max_pixelclock:	Pixel clock in HZ. Ex. 74.25MHz->74250000
- * @standards:		Supported standards
- * @capabilities:	Supported capabilities
- * @reserved:		Must be zeroed
- */
-struct v4l2_bt_timings_cap {
-	__u32	min_width;
-	__u32	max_width;
-	__u32	min_height;
-	__u32	max_height;
-	__u64	min_pixelclock;
-	__u64	max_pixelclock;
-	__u32	standards;
-	__u32	capabilities;
-	__u32	reserved[16];
-} __attribute__ ((packed));
-
-/* Supports interlaced formats */
-#define V4L2_DV_BT_CAP_INTERLACED	(1 << 0)
-/* Supports progressive formats */
-#define V4L2_DV_BT_CAP_PROGRESSIVE	(1 << 1)
-/* Supports CVT/GTF reduced blanking */
-#define V4L2_DV_BT_CAP_REDUCED_BLANKING	(1 << 2)
-/* Supports custom formats */
-#define V4L2_DV_BT_CAP_CUSTOM		(1 << 3)
-
-/** struct v4l2_dv_timings_cap - DV timings capabilities
- * @type:	the type of the timings (same as in struct v4l2_dv_timings)
- * @pad:	the pad number for which to query capabilities (used with
- *		v4l-subdev nodes only)
- * @bt:		the BT656/1120 timings capabilities
- */
-struct v4l2_dv_timings_cap {
-	__u32 type;
-	__u32 pad;
-	__u32 reserved[2];
-	union {
-		struct v4l2_bt_timings_cap bt;
-		__u32 raw_data[32];
-	};
-};
-
-
-/*
- *	V I D E O   I N P U T S
- */
-struct v4l2_input {
-	__u32	     index;		/*  Which input */
-	__u8	     name[32];		/*  Label */
-	__u32	     type;		/*  Type of input */
-	__u32	     audioset;		/*  Associated audios (bitfield) */
-	__u32        tuner;             /*  enum v4l2_tuner_type */
-	v4l2_std_id  std;
-	__u32	     status;
-	__u32	     capabilities;
-	__u32	     reserved[3];
-};
-
-/*  Values for the 'type' field */
-#define V4L2_INPUT_TYPE_TUNER		1
-#define V4L2_INPUT_TYPE_CAMERA		2
-
-/* field 'status' - general */
-#define V4L2_IN_ST_NO_POWER    0x00000001  /* Attached device is off */
-#define V4L2_IN_ST_NO_SIGNAL   0x00000002
-#define V4L2_IN_ST_NO_COLOR    0x00000004
-
-/* field 'status' - sensor orientation */
-/* If sensor is mounted upside down set both bits */
-#define V4L2_IN_ST_HFLIP       0x00000010 /* Frames are flipped horizontally */
-#define V4L2_IN_ST_VFLIP       0x00000020 /* Frames are flipped vertically */
-
-/* field 'status' - analog */
-#define V4L2_IN_ST_NO_H_LOCK   0x00000100  /* No horizontal sync lock */
-#define V4L2_IN_ST_COLOR_KILL  0x00000200  /* Color killer is active */
-
-/* field 'status' - digital */
-#define V4L2_IN_ST_NO_SYNC     0x00010000  /* No synchronization lock */
-#define V4L2_IN_ST_NO_EQU      0x00020000  /* No equalizer lock */
-#define V4L2_IN_ST_NO_CARRIER  0x00040000  /* Carrier recovery failed */
-
-/* field 'status' - VCR and set-top box */
-#define V4L2_IN_ST_MACROVISION 0x01000000  /* Macrovision detected */
-#define V4L2_IN_ST_NO_ACCESS   0x02000000  /* Conditional access denied */
-#define V4L2_IN_ST_VTR         0x04000000  /* VTR time constant */
-
-/* capabilities flags */
-#define V4L2_IN_CAP_DV_TIMINGS		0x00000002 /* Supports S_DV_TIMINGS */
-#define V4L2_IN_CAP_CUSTOM_TIMINGS	V4L2_IN_CAP_DV_TIMINGS /* For compatibility */
-#define V4L2_IN_CAP_STD			0x00000004 /* Supports S_STD */
-#define V4L2_IN_CAP_NATIVE_SIZE		0x00000008 /* Supports setting native size */
-
-/*
- *	V I D E O   O U T P U T S
- */
-struct v4l2_output {
-	__u32	     index;		/*  Which output */
-	__u8	     name[32];		/*  Label */
-	__u32	     type;		/*  Type of output */
-	__u32	     audioset;		/*  Associated audios (bitfield) */
-	__u32	     modulator;         /*  Associated modulator */
-	v4l2_std_id  std;
-	__u32	     capabilities;
-	__u32	     reserved[3];
-};
-/*  Values for the 'type' field */
-#define V4L2_OUTPUT_TYPE_MODULATOR		1
-#define V4L2_OUTPUT_TYPE_ANALOG			2
-#define V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY	3
-
-/* capabilities flags */
-#define V4L2_OUT_CAP_DV_TIMINGS		0x00000002 /* Supports S_DV_TIMINGS */
-#define V4L2_OUT_CAP_CUSTOM_TIMINGS	V4L2_OUT_CAP_DV_TIMINGS /* For compatibility */
-#define V4L2_OUT_CAP_STD		0x00000004 /* Supports S_STD */
-#define V4L2_OUT_CAP_NATIVE_SIZE	0x00000008 /* Supports setting native size */
-
-/*
- *	C O N T R O L S
- */
-struct v4l2_control {
-	__u32		     id;
-	__s32		     value;
-};
-
-struct v4l2_ext_control {
-	__u32 id;
-	__u32 size;
-	__u32 reserved2[1];
-	union {
-		__s32 value;
-		__s64 value64;
-		char __user *string;
-		__u8 __user *p_u8;
-		__u16 __user *p_u16;
-		__u32 __user *p_u32;
-		struct v4l2_ctrl_h264_sps __user *p_h264_sps;
-		struct v4l2_ctrl_h264_pps __user *p_h264_pps;
-		struct v4l2_ctrl_h264_scaling_matrix __user *p_h264_scal_mtrx;
-		struct v4l2_ctrl_h264_slice_param __user *p_h264_slice_param;
-		struct v4l2_ctrl_h264_decode_param __user *p_h264_decode_param;
-		struct v4l2_ctrl_vp8_frame_hdr __user *p_vp8_frame_hdr;
-		struct v4l2_ctrl_vp9_frame_hdr __user *p_vp9_frame_hdr;
-		struct v4l2_ctrl_vp9_decode_param __user *p_vp9_decode_param;
-		struct v4l2_ctrl_vp9_entropy __user *p_vp9_entropy;
-		void __user *ptr;
-	};
-} __attribute__ ((packed));
-
-struct v4l2_ext_controls {
-	union {
-		__u32 ctrl_class;
-		__u32 config_store;
-	};
-	__u32 count;
-	__u32 error_idx;
-	__u32 reserved[2];
-	struct v4l2_ext_control *controls;
-};
-
-#define V4L2_CTRL_ID_MASK      	  (0x0fffffff)
-#define V4L2_CTRL_ID2CLASS(id)    ((id) & 0x0fff0000UL)
-#define V4L2_CTRL_DRIVER_PRIV(id) (((id) & 0xffff) >= 0x1000)
-#define V4L2_CTRL_MAX_DIMS	  (4)
-
-enum v4l2_ctrl_type {
-	V4L2_CTRL_TYPE_INTEGER	     = 1,
-	V4L2_CTRL_TYPE_BOOLEAN	     = 2,
-	V4L2_CTRL_TYPE_MENU	     = 3,
-	V4L2_CTRL_TYPE_BUTTON	     = 4,
-	V4L2_CTRL_TYPE_INTEGER64     = 5,
-	V4L2_CTRL_TYPE_CTRL_CLASS    = 6,
-	V4L2_CTRL_TYPE_STRING        = 7,
-	V4L2_CTRL_TYPE_BITMASK       = 8,
-	V4L2_CTRL_TYPE_INTEGER_MENU  = 9,
-
-	/* Compound types are >= 0x0100 */
-	V4L2_CTRL_COMPOUND_TYPES     = 0x0100,
-	V4L2_CTRL_TYPE_U8	     = 0x0100,
-	V4L2_CTRL_TYPE_U16	     = 0x0101,
-	V4L2_CTRL_TYPE_U32	     = 0x0102,
-	V4L2_CTRL_TYPE_H264_SPS      = 0x0103,
-	V4L2_CTRL_TYPE_H264_PPS      = 0x0104,
-	V4L2_CTRL_TYPE_H264_SCALING_MATRIX = 0x0105,
-	V4L2_CTRL_TYPE_H264_SLICE_PARAM = 0x0106,
-	V4L2_CTRL_TYPE_H264_DECODE_PARAM = 0x0107,
-	V4L2_CTRL_TYPE_VP8_FRAME_HDR	= 0x108,
-	V4L2_CTRL_TYPE_VP9_FRAME_HDR	= 0x109,
-	V4L2_CTRL_TYPE_VP9_DECODE_PARAM	= 0x110,
-	V4L2_CTRL_TYPE_VP9_ENTROPY	= 0x111,
-
-	V4L2_CTRL_TYPE_PRIVATE       = 0xffff,
-};
-
-/*  Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
-struct v4l2_queryctrl {
-	__u32		     id;
-	__u32		     type;	/* enum v4l2_ctrl_type */
-	__u8		     name[32];	/* Whatever */
-	__s32		     minimum;	/* Note signedness */
-	__s32		     maximum;
-	__s32		     step;
-	__s32		     default_value;
-	__u32                flags;
-	__u32		     reserved[2];
-};
-
-/*  Used in the VIDIOC_QUERY_EXT_CTRL ioctl for querying extended controls */
-struct v4l2_query_ext_ctrl {
-	__u32		     id;
-	__u32		     type;
-	char		     name[32];
-	__s64		     minimum;
-	__s64		     maximum;
-	__u64		     step;
-	__s64		     default_value;
-	__u32                flags;
-	__u32                elem_size;
-	__u32                elems;
-	__u32                nr_of_dims;
-	__u32                dims[V4L2_CTRL_MAX_DIMS];
-	__u32		     reserved[32];
-};
-
-/*  Used in the VIDIOC_QUERYMENU ioctl for querying menu items */
-struct v4l2_querymenu {
-	__u32		id;
-	__u32		index;
-	union {
-		__u8	name[32];	/* Whatever */
-		__s64	value;
-	};
-	__u32		reserved;
-} __attribute__ ((packed));
-
-/*  Control flags  */
-#define V4L2_CTRL_FLAG_DISABLED		0x0001
-#define V4L2_CTRL_FLAG_GRABBED		0x0002
-#define V4L2_CTRL_FLAG_READ_ONLY 	0x0004
-#define V4L2_CTRL_FLAG_UPDATE 		0x0008
-#define V4L2_CTRL_FLAG_INACTIVE 	0x0010
-#define V4L2_CTRL_FLAG_SLIDER 		0x0020
-#define V4L2_CTRL_FLAG_WRITE_ONLY 	0x0040
-#define V4L2_CTRL_FLAG_VOLATILE		0x0080
-#define V4L2_CTRL_FLAG_HAS_PAYLOAD	0x0100
-#define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE	0x0200
-#define V4L2_CTRL_FLAG_CAN_STORE	0x0400
-
-/*  Query flags, to be ORed with the control ID */
-#define V4L2_CTRL_FLAG_NEXT_CTRL	0x80000000
-#define V4L2_CTRL_FLAG_NEXT_COMPOUND	0x40000000
-
-/*  User-class control IDs defined by V4L2 */
-#define V4L2_CID_MAX_CTRLS		1024
-/*  IDs reserved for driver specific controls */
-#define V4L2_CID_PRIVATE_BASE		0x08000000
-
-
-/*
- *	T U N I N G
- */
-struct v4l2_tuner {
-	__u32                   index;
-	__u8			name[32];
-	__u32			type;	/* enum v4l2_tuner_type */
-	__u32			capability;
-	__u32			rangelow;
-	__u32			rangehigh;
-	__u32			rxsubchans;
-	__u32			audmode;
-	__s32			signal;
-	__s32			afc;
-	__u32			reserved[4];
-};
-
-struct v4l2_modulator {
-	__u32			index;
-	__u8			name[32];
-	__u32			capability;
-	__u32			rangelow;
-	__u32			rangehigh;
-	__u32			txsubchans;
-	__u32			type;	/* enum v4l2_tuner_type */
-	__u32			reserved[3];
-};
-
-/*  Flags for the 'capability' field */
-#define V4L2_TUNER_CAP_LOW		0x0001
-#define V4L2_TUNER_CAP_NORM		0x0002
-#define V4L2_TUNER_CAP_HWSEEK_BOUNDED	0x0004
-#define V4L2_TUNER_CAP_HWSEEK_WRAP	0x0008
-#define V4L2_TUNER_CAP_STEREO		0x0010
-#define V4L2_TUNER_CAP_LANG2		0x0020
-#define V4L2_TUNER_CAP_SAP		0x0020
-#define V4L2_TUNER_CAP_LANG1		0x0040
-#define V4L2_TUNER_CAP_RDS		0x0080
-#define V4L2_TUNER_CAP_RDS_BLOCK_IO	0x0100
-#define V4L2_TUNER_CAP_RDS_CONTROLS	0x0200
-#define V4L2_TUNER_CAP_FREQ_BANDS	0x0400
-#define V4L2_TUNER_CAP_HWSEEK_PROG_LIM	0x0800
-#define V4L2_TUNER_CAP_1HZ		0x1000
-
-/*  Flags for the 'rxsubchans' field */
-#define V4L2_TUNER_SUB_MONO		0x0001
-#define V4L2_TUNER_SUB_STEREO		0x0002
-#define V4L2_TUNER_SUB_LANG2		0x0004
-#define V4L2_TUNER_SUB_SAP		0x0004
-#define V4L2_TUNER_SUB_LANG1		0x0008
-#define V4L2_TUNER_SUB_RDS		0x0010
-
-/*  Values for the 'audmode' field */
-#define V4L2_TUNER_MODE_MONO		0x0000
-#define V4L2_TUNER_MODE_STEREO		0x0001
-#define V4L2_TUNER_MODE_LANG2		0x0002
-#define V4L2_TUNER_MODE_SAP		0x0002
-#define V4L2_TUNER_MODE_LANG1		0x0003
-#define V4L2_TUNER_MODE_LANG1_LANG2	0x0004
-
-struct v4l2_frequency {
-	__u32	tuner;
-	__u32	type;	/* enum v4l2_tuner_type */
-	__u32	frequency;
-	__u32	reserved[8];
-};
-
-#define V4L2_BAND_MODULATION_VSB	(1 << 1)
-#define V4L2_BAND_MODULATION_FM		(1 << 2)
-#define V4L2_BAND_MODULATION_AM		(1 << 3)
-
-struct v4l2_frequency_band {
-	__u32	tuner;
-	__u32	type;	/* enum v4l2_tuner_type */
-	__u32	index;
-	__u32	capability;
-	__u32	rangelow;
-	__u32	rangehigh;
-	__u32	modulation;
-	__u32	reserved[9];
-};
-
-struct v4l2_hw_freq_seek {
-	__u32	tuner;
-	__u32	type;	/* enum v4l2_tuner_type */
-	__u32	seek_upward;
-	__u32	wrap_around;
-	__u32	spacing;
-	__u32	rangelow;
-	__u32	rangehigh;
-	__u32	reserved[5];
-};
-
-/*
- *	R D S
- */
-
-struct v4l2_rds_data {
-	__u8 	lsb;
-	__u8 	msb;
-	__u8 	block;
-} __attribute__ ((packed));
-
-#define V4L2_RDS_BLOCK_MSK 	 0x7
-#define V4L2_RDS_BLOCK_A 	 0
-#define V4L2_RDS_BLOCK_B 	 1
-#define V4L2_RDS_BLOCK_C 	 2
-#define V4L2_RDS_BLOCK_D 	 3
-#define V4L2_RDS_BLOCK_C_ALT 	 4
-#define V4L2_RDS_BLOCK_INVALID 	 7
-
-#define V4L2_RDS_BLOCK_CORRECTED 0x40
-#define V4L2_RDS_BLOCK_ERROR 	 0x80
-
-/*
- *	A U D I O
- */
-struct v4l2_audio {
-	__u32	index;
-	__u8	name[32];
-	__u32	capability;
-	__u32	mode;
-	__u32	reserved[2];
-};
-
-/*  Flags for the 'capability' field */
-#define V4L2_AUDCAP_STEREO		0x00001
-#define V4L2_AUDCAP_AVL			0x00002
-
-/*  Flags for the 'mode' field */
-#define V4L2_AUDMODE_AVL		0x00001
-
-struct v4l2_audioout {
-	__u32	index;
-	__u8	name[32];
-	__u32	capability;
-	__u32	mode;
-	__u32	reserved[2];
-};
-
-/*
- *	M P E G   S E R V I C E S
- *
- *	NOTE: EXPERIMENTAL API
- */
-#if 1
-#define V4L2_ENC_IDX_FRAME_I    (0)
-#define V4L2_ENC_IDX_FRAME_P    (1)
-#define V4L2_ENC_IDX_FRAME_B    (2)
-#define V4L2_ENC_IDX_FRAME_MASK (0xf)
-
-struct v4l2_enc_idx_entry {
-	__u64 offset;
-	__u64 pts;
-	__u32 length;
-	__u32 flags;
-	__u32 reserved[2];
-};
-
-#define V4L2_ENC_IDX_ENTRIES (64)
-struct v4l2_enc_idx {
-	__u32 entries;
-	__u32 entries_cap;
-	__u32 reserved[4];
-	struct v4l2_enc_idx_entry entry[V4L2_ENC_IDX_ENTRIES];
-};
-
-
-#define V4L2_ENC_CMD_START      (0)
-#define V4L2_ENC_CMD_STOP       (1)
-#define V4L2_ENC_CMD_PAUSE      (2)
-#define V4L2_ENC_CMD_RESUME     (3)
-
-/* Flags for V4L2_ENC_CMD_STOP */
-#define V4L2_ENC_CMD_STOP_AT_GOP_END    (1 << 0)
-
-struct v4l2_encoder_cmd {
-	__u32 cmd;
-	__u32 flags;
-	union {
-		struct {
-			__u32 data[8];
-		} raw;
-	};
-};
-
-/* Decoder commands */
-#define V4L2_DEC_CMD_START       (0)
-#define V4L2_DEC_CMD_STOP        (1)
-#define V4L2_DEC_CMD_PAUSE       (2)
-#define V4L2_DEC_CMD_RESUME      (3)
-
-/* Flags for V4L2_DEC_CMD_START */
-#define V4L2_DEC_CMD_START_MUTE_AUDIO	(1 << 0)
-
-/* Flags for V4L2_DEC_CMD_PAUSE */
-#define V4L2_DEC_CMD_PAUSE_TO_BLACK	(1 << 0)
-
-/* Flags for V4L2_DEC_CMD_STOP */
-#define V4L2_DEC_CMD_STOP_TO_BLACK	(1 << 0)
-#define V4L2_DEC_CMD_STOP_IMMEDIATELY	(1 << 1)
-
-/* Play format requirements (returned by the driver): */
-
-/* The decoder has no special format requirements */
-#define V4L2_DEC_START_FMT_NONE		(0)
-/* The decoder requires full GOPs */
-#define V4L2_DEC_START_FMT_GOP		(1)
-
-/* The structure must be zeroed before use by the application
-   This ensures it can be extended safely in the future. */
-struct v4l2_decoder_cmd {
-	__u32 cmd;
-	__u32 flags;
-	union {
-		struct {
-			__u64 pts;
-		} stop;
-
-		struct {
-			/* 0 or 1000 specifies normal speed,
-			   1 specifies forward single stepping,
-			   -1 specifies backward single stepping,
-			   >1: playback at speed/1000 of the normal speed,
-			   <-1: reverse playback at (-speed/1000) of the normal speed. */
-			__s32 speed;
-			__u32 format;
-		} start;
-
-		struct {
-			__u32 data[16];
-		} raw;
-	};
-};
-#endif
-
-
-/*
- *	D A T A   S E R V I C E S   ( V B I )
- *
- *	Data services API by Michael Schimek
- */
-
-/* Raw VBI */
-struct v4l2_vbi_format {
-	__u32	sampling_rate;		/* in 1 Hz */
-	__u32	offset;
-	__u32	samples_per_line;
-	__u32	sample_format;		/* V4L2_PIX_FMT_* */
-	__s32	start[2];
-	__u32	count[2];
-	__u32	flags;			/* V4L2_VBI_* */
-	__u32	reserved[2];		/* must be zero */
-};
-
-/*  VBI flags  */
-#define V4L2_VBI_UNSYNC		(1 << 0)
-#define V4L2_VBI_INTERLACED	(1 << 1)
-
-/* ITU-R start lines for each field */
-#define V4L2_VBI_ITU_525_F1_START (1)
-#define V4L2_VBI_ITU_525_F2_START (264)
-#define V4L2_VBI_ITU_625_F1_START (1)
-#define V4L2_VBI_ITU_625_F2_START (314)
-
-/* Sliced VBI
- *
- *    This implements is a proposal V4L2 API to allow SLICED VBI
- * required for some hardware encoders. It should change without
- * notice in the definitive implementation.
- */
-
-struct v4l2_sliced_vbi_format {
-	__u16   service_set;
-	/* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field
-	   service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field
-				 (equals frame lines 313-336 for 625 line video
-				  standards, 263-286 for 525 line standards) */
-	__u16   service_lines[2][24];
-	__u32   io_size;
-	__u32   reserved[2];            /* must be zero */
-};
-
-/* Teletext World System Teletext
-   (WST), defined on ITU-R BT.653-2 */
-#define V4L2_SLICED_TELETEXT_B          (0x0001)
-/* Video Program System, defined on ETS 300 231*/
-#define V4L2_SLICED_VPS                 (0x0400)
-/* Closed Caption, defined on EIA-608 */
-#define V4L2_SLICED_CAPTION_525         (0x1000)
-/* Wide Screen System, defined on ITU-R BT1119.1 */
-#define V4L2_SLICED_WSS_625             (0x4000)
-
-#define V4L2_SLICED_VBI_525             (V4L2_SLICED_CAPTION_525)
-#define V4L2_SLICED_VBI_625             (V4L2_SLICED_TELETEXT_B | V4L2_SLICED_VPS | V4L2_SLICED_WSS_625)
-
-struct v4l2_sliced_vbi_cap {
-	__u16   service_set;
-	/* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field
-	   service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field
-				 (equals frame lines 313-336 for 625 line video
-				  standards, 263-286 for 525 line standards) */
-	__u16   service_lines[2][24];
-	__u32	type;		/* enum v4l2_buf_type */
-	__u32   reserved[3];    /* must be 0 */
-};
-
-struct v4l2_sliced_vbi_data {
-	__u32   id;
-	__u32   field;          /* 0: first field, 1: second field */
-	__u32   line;           /* 1-23 */
-	__u32   reserved;       /* must be 0 */
-	__u8    data[48];
-};
-
-/*
- * Sliced VBI data inserted into MPEG Streams
- */
-
-/*
- * V4L2_MPEG_STREAM_VBI_FMT_IVTV:
- *
- * Structure of payload contained in an MPEG 2 Private Stream 1 PES Packet in an
- * MPEG-2 Program Pack that contains V4L2_MPEG_STREAM_VBI_FMT_IVTV Sliced VBI
- * data
- *
- * Note, the MPEG-2 Program Pack and Private Stream 1 PES packet header
- * definitions are not included here.  See the MPEG-2 specifications for details
- * on these headers.
- */
-
-/* Line type IDs */
-#define V4L2_MPEG_VBI_IVTV_TELETEXT_B     (1)
-#define V4L2_MPEG_VBI_IVTV_CAPTION_525    (4)
-#define V4L2_MPEG_VBI_IVTV_WSS_625        (5)
-#define V4L2_MPEG_VBI_IVTV_VPS            (7)
-
-struct v4l2_mpeg_vbi_itv0_line {
-	__u8 id;	/* One of V4L2_MPEG_VBI_IVTV_* above */
-	__u8 data[42];	/* Sliced VBI data for the line */
-} __attribute__ ((packed));
-
-struct v4l2_mpeg_vbi_itv0 {
-	__le32 linemask[2]; /* Bitmasks of VBI service lines present */
-	struct v4l2_mpeg_vbi_itv0_line line[35];
-} __attribute__ ((packed));
-
-struct v4l2_mpeg_vbi_ITV0 {
-	struct v4l2_mpeg_vbi_itv0_line line[36];
-} __attribute__ ((packed));
-
-#define V4L2_MPEG_VBI_IVTV_MAGIC0	"itv0"
-#define V4L2_MPEG_VBI_IVTV_MAGIC1	"ITV0"
-
-struct v4l2_mpeg_vbi_fmt_ivtv {
-	__u8 magic[4];
-	union {
-		struct v4l2_mpeg_vbi_itv0 itv0;
-		struct v4l2_mpeg_vbi_ITV0 ITV0;
-	};
-} __attribute__ ((packed));
-
-/*
- *	A G G R E G A T E   S T R U C T U R E S
- */
-
-/**
- * struct v4l2_plane_pix_format - additional, per-plane format definition
- * @sizeimage:		maximum size in bytes required for data, for which
- *			this plane will be used
- * @bytesperline:	distance in bytes between the leftmost pixels in two
- *			adjacent lines
- */
-struct v4l2_plane_pix_format {
-	__u32		sizeimage;
-	__u32		bytesperline;
-	__u16		reserved[6];
-} __attribute__ ((packed));
-
-/**
- * struct v4l2_pix_format_mplane - multiplanar format definition
- * @width:		image width in pixels
- * @height:		image height in pixels
- * @pixelformat:	little endian four character code (fourcc)
- * @field:		enum v4l2_field; field order (for interlaced video)
- * @colorspace:		enum v4l2_colorspace; supplemental to pixelformat
- * @plane_fmt:		per-plane information
- * @num_planes:		number of planes for this format
- * @flags:		format flags (V4L2_PIX_FMT_FLAG_*)
- * @ycbcr_enc:		enum v4l2_ycbcr_encoding, Y'CbCr encoding
- * @quantization:	enum v4l2_quantization, colorspace quantization
- * @xfer_func:		enum v4l2_xfer_func, colorspace transfer function
- */
-struct v4l2_pix_format_mplane {
-	__u32				width;
-	__u32				height;
-	__u32				pixelformat;
-	__u32				field;
-	__u32				colorspace;
-
-	struct v4l2_plane_pix_format	plane_fmt[VIDEO_MAX_PLANES];
-	__u8				num_planes;
-	__u8				flags;
-	__u8				ycbcr_enc;
-	__u8				quantization;
-	__u8				xfer_func;
-	__u8				reserved[7];
-} __attribute__ ((packed));
-
-/**
- * struct v4l2_sdr_format - SDR format definition
- * @pixelformat:	little endian four character code (fourcc)
- * @buffersize:		maximum size in bytes required for data
- */
-struct v4l2_sdr_format {
-	__u32				pixelformat;
-	__u32				buffersize;
-	__u8				reserved[24];
-} __attribute__ ((packed));
-
-/**
- * struct v4l2_format - stream data format
- * @type:	enum v4l2_buf_type; type of the data stream
- * @pix:	definition of an image format
- * @pix_mp:	definition of a multiplanar image format
- * @win:	definition of an overlaid image
- * @vbi:	raw VBI capture or output parameters
- * @sliced:	sliced VBI capture or output parameters
- * @raw_data:	placeholder for future extensions and custom formats
- */
-struct v4l2_format {
-	__u32	 type;
-	union {
-		struct v4l2_pix_format		pix;     /* V4L2_BUF_TYPE_VIDEO_CAPTURE */
-		struct v4l2_pix_format_mplane	pix_mp;  /* V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE */
-		struct v4l2_window		win;     /* V4L2_BUF_TYPE_VIDEO_OVERLAY */
-		struct v4l2_vbi_format		vbi;     /* V4L2_BUF_TYPE_VBI_CAPTURE */
-		struct v4l2_sliced_vbi_format	sliced;  /* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE */
-		struct v4l2_sdr_format		sdr;     /* V4L2_BUF_TYPE_SDR_CAPTURE */
-		__u8	raw_data[200];                   /* user-defined */
-	} fmt;
-};
-
-/*	Stream type-dependent parameters
- */
-struct v4l2_streamparm {
-	__u32	 type;			/* enum v4l2_buf_type */
-	union {
-		struct v4l2_captureparm	capture;
-		struct v4l2_outputparm	output;
-		__u8	raw_data[200];  /* user-defined */
-	} parm;
-};
-
-/*
- *	E V E N T S
- */
-
-#define V4L2_EVENT_ALL				0
-#define V4L2_EVENT_VSYNC			1
-#define V4L2_EVENT_EOS				2
-#define V4L2_EVENT_CTRL				3
-#define V4L2_EVENT_FRAME_SYNC			4
-#define V4L2_EVENT_SOURCE_CHANGE		5
-#define V4L2_EVENT_MOTION_DET			6
-#define V4L2_EVENT_PRIVATE_START		0x08000000
-
-/* Payload for V4L2_EVENT_VSYNC */
-struct v4l2_event_vsync {
-	/* Can be V4L2_FIELD_ANY, _NONE, _TOP or _BOTTOM */
-	__u8 field;
-} __attribute__ ((packed));
-
-/* Payload for V4L2_EVENT_CTRL */
-#define V4L2_EVENT_CTRL_CH_VALUE		(1 << 0)
-#define V4L2_EVENT_CTRL_CH_FLAGS		(1 << 1)
-#define V4L2_EVENT_CTRL_CH_RANGE		(1 << 2)
-
-struct v4l2_event_ctrl {
-	__u32 changes;
-	__u32 type;
-	union {
-		__s32 value;
-		__s64 value64;
-	};
-	__u32 flags;
-	__s32 minimum;
-	__s32 maximum;
-	__s32 step;
-	__s32 default_value;
-};
-
-struct v4l2_event_frame_sync {
-	__u32 frame_sequence;
-};
-
-#define V4L2_EVENT_SRC_CH_RESOLUTION		(1 << 0)
-
-struct v4l2_event_src_change {
-	__u32 changes;
-};
-
-#define V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ	(1 << 0)
-
-/**
- * struct v4l2_event_motion_det - motion detection event
- * @flags:             if V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ is set, then the
- *                     frame_sequence field is valid.
- * @frame_sequence:    the frame sequence number associated with this event.
- * @region_mask:       which regions detected motion.
- */
-struct v4l2_event_motion_det {
-	__u32 flags;
-	__u32 frame_sequence;
-	__u32 region_mask;
-};
-
-struct v4l2_event {
-	__u32				type;
-	union {
-		struct v4l2_event_vsync		vsync;
-		struct v4l2_event_ctrl		ctrl;
-		struct v4l2_event_frame_sync	frame_sync;
-		struct v4l2_event_src_change	src_change;
-		struct v4l2_event_motion_det	motion_det;
-		__u8				data[64];
-	} u;
-	__u32				pending;
-	__u32				sequence;
-	struct timespec			timestamp;
-	__u32				id;
-	__u32				reserved[8];
-};
-
-#define V4L2_EVENT_SUB_FL_SEND_INITIAL		(1 << 0)
-#define V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK	(1 << 1)
-
-struct v4l2_event_subscription {
-	__u32				type;
-	__u32				id;
-	__u32				flags;
-	__u32				reserved[5];
-};
-
-/*
- *	A D V A N C E D   D E B U G G I N G
- *
- *	NOTE: EXPERIMENTAL API, NEVER RELY ON THIS IN APPLICATIONS!
- *	FOR DEBUGGING, TESTING AND INTERNAL USE ONLY!
- */
-
-/* VIDIOC_DBG_G_REGISTER and VIDIOC_DBG_S_REGISTER */
-
-#define V4L2_CHIP_MATCH_BRIDGE      0  /* Match against chip ID on the bridge (0 for the bridge) */
-#define V4L2_CHIP_MATCH_SUBDEV      4  /* Match against subdev index */
-
-/* The following four defines are no longer in use */
-#define V4L2_CHIP_MATCH_HOST V4L2_CHIP_MATCH_BRIDGE
-#define V4L2_CHIP_MATCH_I2C_DRIVER  1  /* Match against I2C driver name */
-#define V4L2_CHIP_MATCH_I2C_ADDR    2  /* Match against I2C 7-bit address */
-#define V4L2_CHIP_MATCH_AC97        3  /* Match against ancillary AC97 chip */
-
-struct v4l2_dbg_match {
-	__u32 type; /* Match type */
-	union {     /* Match this chip, meaning determined by type */
-		__u32 addr;
-		char name[32];
-	};
-} __attribute__ ((packed));
-
-struct v4l2_dbg_register {
-	struct v4l2_dbg_match match;
-	__u32 size;	/* register size in bytes */
-	__u64 reg;
-	__u64 val;
-} __attribute__ ((packed));
-
-#define V4L2_CHIP_FL_READABLE (1 << 0)
-#define V4L2_CHIP_FL_WRITABLE (1 << 1)
-
-/* VIDIOC_DBG_G_CHIP_INFO */
-struct v4l2_dbg_chip_info {
-	struct v4l2_dbg_match match;
-	char name[32];
-	__u32 flags;
-	__u32 reserved[32];
-} __attribute__ ((packed));
-
-/**
- * struct v4l2_create_buffers - VIDIOC_CREATE_BUFS argument
- * @index:	on return, index of the first created buffer
- * @count:	entry: number of requested buffers,
- *		return: number of created buffers
- * @memory:	enum v4l2_memory; buffer memory type
- * @format:	frame format, for which buffers are requested
- * @reserved:	future extensions
- */
-struct v4l2_create_buffers {
-	__u32			index;
-	__u32			count;
-	__u32			memory;
-	struct v4l2_format	format;
-	__u32			reserved[8];
-};
-
-/*
- *	I O C T L   C O D E S   F O R   V I D E O   D E V I C E S
- *
- */
-#define VIDIOC_QUERYCAP		 _IOR('V',  0, struct v4l2_capability)
-#define VIDIOC_RESERVED		  _IO('V',  1)
-#define VIDIOC_ENUM_FMT         _IOWR('V',  2, struct v4l2_fmtdesc)
-#define VIDIOC_G_FMT		_IOWR('V',  4, struct v4l2_format)
-#define VIDIOC_S_FMT		_IOWR('V',  5, struct v4l2_format)
-#define VIDIOC_REQBUFS		_IOWR('V',  8, struct v4l2_requestbuffers)
-#define VIDIOC_QUERYBUF		_IOWR('V',  9, struct v4l2_buffer)
-#define VIDIOC_G_FBUF		 _IOR('V', 10, struct v4l2_framebuffer)
-#define VIDIOC_S_FBUF		 _IOW('V', 11, struct v4l2_framebuffer)
-#define VIDIOC_OVERLAY		 _IOW('V', 14, int)
-#define VIDIOC_QBUF		_IOWR('V', 15, struct v4l2_buffer)
-#define VIDIOC_EXPBUF		_IOWR('V', 16, struct v4l2_exportbuffer)
-#define VIDIOC_DQBUF		_IOWR('V', 17, struct v4l2_buffer)
-#define VIDIOC_STREAMON		 _IOW('V', 18, int)
-#define VIDIOC_STREAMOFF	 _IOW('V', 19, int)
-#define VIDIOC_G_PARM		_IOWR('V', 21, struct v4l2_streamparm)
-#define VIDIOC_S_PARM		_IOWR('V', 22, struct v4l2_streamparm)
-#define VIDIOC_G_STD		 _IOR('V', 23, v4l2_std_id)
-#define VIDIOC_S_STD		 _IOW('V', 24, v4l2_std_id)
-#define VIDIOC_ENUMSTD		_IOWR('V', 25, struct v4l2_standard)
-#define VIDIOC_ENUMINPUT	_IOWR('V', 26, struct v4l2_input)
-#define VIDIOC_G_CTRL		_IOWR('V', 27, struct v4l2_control)
-#define VIDIOC_S_CTRL		_IOWR('V', 28, struct v4l2_control)
-#define VIDIOC_G_TUNER		_IOWR('V', 29, struct v4l2_tuner)
-#define VIDIOC_S_TUNER		 _IOW('V', 30, struct v4l2_tuner)
-#define VIDIOC_G_AUDIO		 _IOR('V', 33, struct v4l2_audio)
-#define VIDIOC_S_AUDIO		 _IOW('V', 34, struct v4l2_audio)
-#define VIDIOC_QUERYCTRL	_IOWR('V', 36, struct v4l2_queryctrl)
-#define VIDIOC_QUERYMENU	_IOWR('V', 37, struct v4l2_querymenu)
-#define VIDIOC_G_INPUT		 _IOR('V', 38, int)
-#define VIDIOC_S_INPUT		_IOWR('V', 39, int)
-#define VIDIOC_G_EDID		_IOWR('V', 40, struct v4l2_edid)
-#define VIDIOC_S_EDID		_IOWR('V', 41, struct v4l2_edid)
-#define VIDIOC_G_OUTPUT		 _IOR('V', 46, int)
-#define VIDIOC_S_OUTPUT		_IOWR('V', 47, int)
-#define VIDIOC_ENUMOUTPUT	_IOWR('V', 48, struct v4l2_output)
-#define VIDIOC_G_AUDOUT		 _IOR('V', 49, struct v4l2_audioout)
-#define VIDIOC_S_AUDOUT		 _IOW('V', 50, struct v4l2_audioout)
-#define VIDIOC_G_MODULATOR	_IOWR('V', 54, struct v4l2_modulator)
-#define VIDIOC_S_MODULATOR	 _IOW('V', 55, struct v4l2_modulator)
-#define VIDIOC_G_FREQUENCY	_IOWR('V', 56, struct v4l2_frequency)
-#define VIDIOC_S_FREQUENCY	 _IOW('V', 57, struct v4l2_frequency)
-#define VIDIOC_CROPCAP		_IOWR('V', 58, struct v4l2_cropcap)
-#define VIDIOC_G_CROP		_IOWR('V', 59, struct v4l2_crop)
-#define VIDIOC_S_CROP		 _IOW('V', 60, struct v4l2_crop)
-#define VIDIOC_G_JPEGCOMP	 _IOR('V', 61, struct v4l2_jpegcompression)
-#define VIDIOC_S_JPEGCOMP	 _IOW('V', 62, struct v4l2_jpegcompression)
-#define VIDIOC_QUERYSTD      	 _IOR('V', 63, v4l2_std_id)
-#define VIDIOC_TRY_FMT      	_IOWR('V', 64, struct v4l2_format)
-#define VIDIOC_ENUMAUDIO	_IOWR('V', 65, struct v4l2_audio)
-#define VIDIOC_ENUMAUDOUT	_IOWR('V', 66, struct v4l2_audioout)
-#define VIDIOC_G_PRIORITY	 _IOR('V', 67, __u32) /* enum v4l2_priority */
-#define VIDIOC_S_PRIORITY	 _IOW('V', 68, __u32) /* enum v4l2_priority */
-#define VIDIOC_G_SLICED_VBI_CAP _IOWR('V', 69, struct v4l2_sliced_vbi_cap)
-#define VIDIOC_LOG_STATUS         _IO('V', 70)
-#define VIDIOC_G_EXT_CTRLS	_IOWR('V', 71, struct v4l2_ext_controls)
-#define VIDIOC_S_EXT_CTRLS	_IOWR('V', 72, struct v4l2_ext_controls)
-#define VIDIOC_TRY_EXT_CTRLS	_IOWR('V', 73, struct v4l2_ext_controls)
-#define VIDIOC_ENUM_FRAMESIZES	_IOWR('V', 74, struct v4l2_frmsizeenum)
-#define VIDIOC_ENUM_FRAMEINTERVALS _IOWR('V', 75, struct v4l2_frmivalenum)
-#define VIDIOC_G_ENC_INDEX       _IOR('V', 76, struct v4l2_enc_idx)
-#define VIDIOC_ENCODER_CMD      _IOWR('V', 77, struct v4l2_encoder_cmd)
-#define VIDIOC_TRY_ENCODER_CMD  _IOWR('V', 78, struct v4l2_encoder_cmd)
-
-/* Experimental, meant for debugging, testing and internal use.
-   Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined.
-   You must be root to use these ioctls. Never use these in applications! */
-#define	VIDIOC_DBG_S_REGISTER 	 _IOW('V', 79, struct v4l2_dbg_register)
-#define	VIDIOC_DBG_G_REGISTER 	_IOWR('V', 80, struct v4l2_dbg_register)
-
-#define VIDIOC_S_HW_FREQ_SEEK	 _IOW('V', 82, struct v4l2_hw_freq_seek)
-
-#define	VIDIOC_S_DV_TIMINGS	_IOWR('V', 87, struct v4l2_dv_timings)
-#define	VIDIOC_G_DV_TIMINGS	_IOWR('V', 88, struct v4l2_dv_timings)
-#define	VIDIOC_DQEVENT		 _IOR('V', 89, struct v4l2_event)
-#define	VIDIOC_SUBSCRIBE_EVENT	 _IOW('V', 90, struct v4l2_event_subscription)
-#define	VIDIOC_UNSUBSCRIBE_EVENT _IOW('V', 91, struct v4l2_event_subscription)
-
-/* Experimental, the below two ioctls may change over the next couple of kernel
-   versions */
-#define VIDIOC_CREATE_BUFS	_IOWR('V', 92, struct v4l2_create_buffers)
-#define VIDIOC_PREPARE_BUF	_IOWR('V', 93, struct v4l2_buffer)
-
-/* Experimental selection API */
-#define VIDIOC_G_SELECTION	_IOWR('V', 94, struct v4l2_selection)
-#define VIDIOC_S_SELECTION	_IOWR('V', 95, struct v4l2_selection)
-
-/* Experimental, these two ioctls may change over the next couple of kernel
-   versions. */
-#define VIDIOC_DECODER_CMD	_IOWR('V', 96, struct v4l2_decoder_cmd)
-#define VIDIOC_TRY_DECODER_CMD	_IOWR('V', 97, struct v4l2_decoder_cmd)
-
-/* Experimental, these three ioctls may change over the next couple of kernel
-   versions. */
-#define VIDIOC_ENUM_DV_TIMINGS  _IOWR('V', 98, struct v4l2_enum_dv_timings)
-#define VIDIOC_QUERY_DV_TIMINGS  _IOR('V', 99, struct v4l2_dv_timings)
-#define VIDIOC_DV_TIMINGS_CAP   _IOWR('V', 100, struct v4l2_dv_timings_cap)
-
-/* Experimental, this ioctl may change over the next couple of kernel
-   versions. */
-#define VIDIOC_ENUM_FREQ_BANDS	_IOWR('V', 101, struct v4l2_frequency_band)
-
-/* Experimental, meant for debugging, testing and internal use.
-   Never use these in applications! */
-#define VIDIOC_DBG_G_CHIP_INFO  _IOWR('V', 102, struct v4l2_dbg_chip_info)
-
-#define VIDIOC_QUERY_EXT_CTRL	_IOWR('V', 103, struct v4l2_query_ext_ctrl)
-
-/* Reminder: when adding new ioctls please add support for them to
-   drivers/media/v4l2-core/v4l2-compat-ioctl32.c as well! */
-
-#define BASE_VIDIOC_PRIVATE	192		/* 192-255 are private */
-
-#endif /* _UAPI__LINUX_VIDEODEV2_H */
diff --git a/vda/videodev2_custom.h b/vda/videodev2_custom.h
new file mode 100644
index 0000000..258b8fe
--- /dev/null
+++ b/vda/videodev2_custom.h
@@ -0,0 +1,111 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Note:
+// This header file has extracted some new structs and definitions in videodev2.h from ChromeOS
+// which is not upstreamed in Linux mainline. This should be removed once they are upstreamed.
+// TODO(johnylin): remove this file once it is upstreamed.
+
+#ifndef VIDEODEV2_CUSTOM_H_
+#define VIDEODEV2_CUSTOM_H_
+
+#include "v4l2_controls_custom.h"
+
+#include <linux/videodev2.h>
+
+/* compressed formats */
+#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */
+#define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F') /* VP8 parsed frames */
+#define V4L2_PIX_FMT_VP9_FRAME v4l2_fourcc('V', 'P', '9', 'F') /* VP9 parsed frames */
+
+struct v4l2_ext_control_custom {
+  __u32 id;
+  __u32 size;
+  __u32 reserved2[1];
+  union {
+    __s32 value;
+    __s64 value64;
+    char __user *string;
+    __u8 __user *p_u8;
+    __u16 __user *p_u16;
+    __u32 __user *p_u32;
+    struct v4l2_ctrl_h264_sps __user *p_h264_sps;
+    struct v4l2_ctrl_h264_pps __user *p_h264_pps;
+    struct v4l2_ctrl_h264_scaling_matrix __user *p_h264_scal_mtrx;
+    struct v4l2_ctrl_h264_slice_param __user *p_h264_slice_param;
+    struct v4l2_ctrl_h264_decode_param __user *p_h264_decode_param;
+    struct v4l2_ctrl_vp8_frame_hdr __user *p_vp8_frame_hdr;
+    struct v4l2_ctrl_vp9_frame_hdr __user *p_vp9_frame_hdr;
+    struct v4l2_ctrl_vp9_decode_param __user *p_vp9_decode_param;
+    struct v4l2_ctrl_vp9_entropy __user *p_vp9_entropy;
+    void __user *ptr;
+  };
+} __attribute__ ((packed));
+
+struct v4l2_ext_controls_custom {
+  union {
+    __u32 ctrl_class;
+    __u32 config_store;
+  };
+  __u32 count;
+  __u32 error_idx;
+  __u32 reserved[2];
+  struct v4l2_ext_control_custom *controls;
+};
+
+/**
+ * struct v4l2_buffer - video buffer info
+ * @index:	id number of the buffer
+ * @type:	enum v4l2_buf_type; buffer type (type == *_MPLANE for
+ *		multiplanar buffers);
+ * @bytesused:	number of bytes occupied by data in the buffer (payload);
+ *		unused (set to 0) for multiplanar buffers
+ * @flags:	buffer informational flags
+ * @field:	enum v4l2_field; field order of the image in the buffer
+ * @timestamp:	frame timestamp
+ * @timecode:	frame timecode
+ * @sequence:	sequence count of this frame
+ * @memory:	enum v4l2_memory; the method, in which the actual video data is
+ *		passed
+ * @offset:	for non-multiplanar buffers with memory == V4L2_MEMORY_MMAP;
+ *		offset from the start of the device memory for this plane,
+ *		(or a "cookie" that should be passed to mmap() as offset)
+ * @userptr:	for non-multiplanar buffers with memory == V4L2_MEMORY_USERPTR;
+ *		a userspace pointer pointing to this buffer
+ * @fd:		for non-multiplanar buffers with memory == V4L2_MEMORY_DMABUF;
+ *		a userspace file descriptor associated with this buffer
+ * @planes:	for multiplanar buffers; userspace pointer to the array of plane
+ *		info structs for this buffer
+ * @length:	size in bytes of the buffer (NOT its payload) for single-plane
+ *		buffers (when type != *_MPLANE); number of elements in the
+ *		planes array for multi-plane buffers
+ * @config_store: this buffer should use this configuration store
+ *
+ * Contains data exchanged by application and driver using one of the Streaming
+ * I/O methods.
+ */
+struct v4l2_buffer_custom {
+  __u32	index;
+  __u32	type;
+  __u32	bytesused;
+  __u32	flags;
+  __u32	field;
+  struct timeval timestamp;
+  struct v4l2_timecode timecode;
+  __u32 sequence;
+
+  /* memory location */
+  __u32 memory;
+  union {
+    __u32 offset;
+    unsigned long userptr;
+    struct v4l2_plane *planes;
+    __s32 fd;
+  } m;
+  __u32	length;
+  __u32	config_store;
+  __u32 reserved;
+};
+
+#endif  // VIDEODEV2_CUSTOM_H_
diff --git a/vda/vp8_bool_decoder.cc b/vda/vp8_bool_decoder.cc
index e42aef0..68f06d0 100644
--- a/vda/vp8_bool_decoder.cc
+++ b/vda/vp8_bool_decoder.cc
@@ -2,6 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 //
+// Note: ported from Chromium commit head: 9b6f429
 
 /*
  * Copyright (c) 2010, The WebM Project authors. All rights reserved.
@@ -104,7 +105,7 @@
   int shift = VP8_BD_VALUE_BIT - CHAR_BIT - (count_ + CHAR_BIT);
   size_t bytes_left = user_buffer_end_ - user_buffer_;
   size_t bits_left = bytes_left * CHAR_BIT;
-  int x = static_cast<int>(shift + CHAR_BIT - bits_left);
+  int x = shift + CHAR_BIT - static_cast<int>(bits_left);
   int loop_end = 0;
 
   if (x >= 0) {
@@ -140,7 +141,7 @@
   size_t shift = kVp8Norm[range_];
   range_ <<= shift;
   value_ <<= shift;
-  count_ -= shift;
+  count_ -= static_cast<int>(shift);
 
   DCHECK_EQ(1U, (range_ >> 7));  // In the range [128, 255].
 
diff --git a/vda/vp8_bool_decoder.h b/vda/vp8_bool_decoder.h
index 445fd68..4b8e3a5 100644
--- a/vda/vp8_bool_decoder.h
+++ b/vda/vp8_bool_decoder.h
@@ -2,6 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 //
+// Note: ported from Chromium commit head: 1323b9c
 
 /*
  * Copyright (c) 2010, The WebM Project authors. All rights reserved.
diff --git a/vda/vp8_decoder.cc b/vda/vp8_decoder.cc
index d9ee6e4..cd2d58b 100644
--- a/vda/vp8_decoder.cc
+++ b/vda/vp8_decoder.cc
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 7441087
 
 #include "vp8_decoder.h"
 
@@ -93,6 +94,7 @@
   if (!curr_pic_)
     return kRanOutOfSurfaces;
 
+  curr_pic_->visible_rect = Rect(pic_size_);
   if (!DecodeAndOutputCurrentFrame())
     return kDecodeError;
 
diff --git a/vda/vp8_decoder.h b/vda/vp8_decoder.h
index 653da40..58211f6 100644
--- a/vda/vp8_decoder.h
+++ b/vda/vp8_decoder.h
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 60f9667
 
 #ifndef VP8_DECODER_H_
 #define VP8_DECODER_H_
diff --git a/vda/vp8_parser.cc b/vda/vp8_parser.cc
index 46eb669..5367545 100644
--- a/vda/vp8_parser.cc
+++ b/vda/vp8_parser.cc
@@ -4,6 +4,8 @@
 //
 // This file contains an implementation of a VP8 raw stream parser,
 // as defined in RFC 6386.
+// Note: ported from Chromium commit head: 2de6929
+
 
 #include "base/logging.h"
 #include "vp8_parser.h"
@@ -51,8 +53,7 @@
 Vp8Parser::Vp8Parser() : stream_(nullptr), bytes_left_(0) {
 }
 
-Vp8Parser::~Vp8Parser() {
-}
+Vp8Parser::~Vp8Parser() = default;
 
 bool Vp8Parser::ParseFrame(const uint8_t* ptr,
                            size_t frame_size,
diff --git a/vda/vp8_parser.h b/vda/vp8_parser.h
index ef9326c..c75e6cc 100644
--- a/vda/vp8_parser.h
+++ b/vda/vp8_parser.h
@@ -4,6 +4,7 @@
 //
 // This file contains an implementation of a VP8 raw stream parser,
 // as defined in RFC 6386.
+// Note: ported from Chromium commit head: 1323b9c
 
 #ifndef VP8_PARSER_H_
 #define VP8_PARSER_H_
diff --git a/vda/vp8_picture.cc b/vda/vp8_picture.cc
index 59938aa..b9030ce 100644
--- a/vda/vp8_picture.cc
+++ b/vda/vp8_picture.cc
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 6e70beb
 
 #include "vp8_picture.h"
 
diff --git a/vda/vp8_picture.h b/vda/vp8_picture.h
index eb253a4..bd04ec7 100644
--- a/vda/vp8_picture.h
+++ b/vda/vp8_picture.h
@@ -1,25 +1,30 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 70340ce
 
 #ifndef VP8_PICTURE_H_
 #define VP8_PICTURE_H_
 
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
+#include "rect.h"
 
 namespace media {
 
 class V4L2VP8Picture;
 
-class VP8Picture : public base::RefCounted<VP8Picture> {
+class VP8Picture : public base::RefCountedThreadSafe<VP8Picture> {
  public:
   VP8Picture();
 
   virtual V4L2VP8Picture* AsV4L2VP8Picture();
 
+  // The visible size of picture.
+  Rect visible_rect;
+
  protected:
-  friend class base::RefCounted<VP8Picture>;
+  friend class base::RefCountedThreadSafe<VP8Picture>;
   virtual ~VP8Picture();
 
   DISALLOW_COPY_AND_ASSIGN(VP8Picture);
diff --git a/vda/vp9_bool_decoder.cc b/vda/vp9_bool_decoder.cc
index bf227b2..1d2b6f4 100644
--- a/vda/vp9_bool_decoder.cc
+++ b/vda/vp9_bool_decoder.cc
@@ -1,6 +1,7 @@
 // Copyright 2016 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 1323b9c
 
 #include "vp9_bool_decoder.h"
 
@@ -35,9 +36,9 @@
 };
 }  // namespace
 
-Vp9BoolDecoder::Vp9BoolDecoder() {}
+Vp9BoolDecoder::Vp9BoolDecoder() = default;
 
-Vp9BoolDecoder::~Vp9BoolDecoder() {}
+Vp9BoolDecoder::~Vp9BoolDecoder() = default;
 
 // 9.2.1 Initialization process for Boolean decoder
 bool Vp9BoolDecoder::Initialize(const uint8_t* data, size_t size) {
diff --git a/vda/vp9_bool_decoder.h b/vda/vp9_bool_decoder.h
index 3862e51..50c386f 100644
--- a/vda/vp9_bool_decoder.h
+++ b/vda/vp9_bool_decoder.h
@@ -1,6 +1,7 @@
 // Copyright 2016 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: e5a9a62
 
 #ifndef VP9_BOOL_DECODER_H_
 #define VP9_BOOL_DECODER_H_
diff --git a/vda/vp9_compressed_header_parser.cc b/vda/vp9_compressed_header_parser.cc
index d5ee772..524472f 100644
--- a/vda/vp9_compressed_header_parser.cc
+++ b/vda/vp9_compressed_header_parser.cc
@@ -1,6 +1,7 @@
 // Copyright 2016 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
 
 #include "vp9_compressed_header_parser.h"
 
@@ -59,7 +60,7 @@
 
 }  // namespace
 
-Vp9CompressedHeaderParser::Vp9CompressedHeaderParser() {}
+Vp9CompressedHeaderParser::Vp9CompressedHeaderParser() = default;
 
 // 6.3.1 Tx mode syntax
 void Vp9CompressedHeaderParser::ReadTxMode(Vp9FrameHeader* fhdr) {
diff --git a/vda/vp9_compressed_header_parser.h b/vda/vp9_compressed_header_parser.h
index 032a880..5f5ff56 100644
--- a/vda/vp9_compressed_header_parser.h
+++ b/vda/vp9_compressed_header_parser.h
@@ -1,6 +1,7 @@
 // Copyright 2016 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: e5a9a62
 
 #ifndef VP9_COMPRESSED_HEADER_PARSER_H_
 #define VP9_COMPRESSED_HEADER_PARSER_H_
diff --git a/vda/vp9_decoder.cc b/vda/vp9_decoder.cc
index 2ea6d16..d8af03d 100644
--- a/vda/vp9_decoder.cc
+++ b/vda/vp9_decoder.cc
@@ -1,7 +1,9 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 7441087
 
+#include "rect.h"
 #include "vp9_decoder.h"
 
 #include <memory>
@@ -136,6 +138,18 @@
     if (!pic)
       return kRanOutOfSurfaces;
 
+    Rect new_render_rect(curr_frame_hdr_->render_width,
+                         curr_frame_hdr_->render_height);
+    // For safety, check the validity of render size or leave it as (0, 0).
+    if (!Rect(pic_size_).Contains(new_render_rect)) {
+      DVLOG(1) << "Render size exceeds picture size. render size: "
+               << new_render_rect.ToString()
+               << ", picture size: " << pic_size_.ToString();
+      new_render_rect = Rect();
+    }
+    DVLOG(2) << "Render resolution: " << new_render_rect.ToString();
+
+    pic->visible_rect = new_render_rect;
     pic->frame_hdr.reset(curr_frame_hdr_.release());
 
     if (!DecodeAndOutputPicture(pic)) {
diff --git a/vda/vp9_decoder.h b/vda/vp9_decoder.h
index 77a8d88..cdbcd69 100644
--- a/vda/vp9_decoder.h
+++ b/vda/vp9_decoder.h
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 77118c9
 
 #ifndef VP9_DECODER_H_
 #define VP9_DECODER_H_
diff --git a/vda/vp9_parser.cc b/vda/vp9_parser.cc
index de51c7b..bbd90b9 100644
--- a/vda/vp9_parser.cc
+++ b/vda/vp9_parser.cc
@@ -8,6 +8,7 @@
 //  1 something wrong in bitstream
 //  2 parsing steps
 //  3 parsed values (selected)
+// Note: ported from Chromium commit head: 2de6929
 
 #include "vp9_parser.h"
 
@@ -22,6 +23,133 @@
 
 namespace media {
 
+namespace {
+
+// Coefficients extracted verbatim from "VP9 Bitstream & Decoding Process
+// Specification" Version 0.6, Sec 8.6.1 Dequantization functions, see:
+// https://www.webmproject.org/vp9/#draft-vp9-bitstream-and-decoding-process-specification
+constexpr size_t kQIndexRange = 256;
+// clang-format off
+// libva is the only user of high bit depth VP9 formats and only supports
+// 10 bits per component, see https://github.com/01org/libva/issues/137.
+// TODO(mcasas): Add the 12 bit versions of these tables.
+const int16_t kDcQLookup[][kQIndexRange] = {
+    {
+        4,    8,    8,    9,    10,   11,   12,   12,  13,   14,   15,   16,
+        17,   18,   19,   19,   20,   21,   22,   23,  24,   25,   26,   26,
+        27,   28,   29,   30,   31,   32,   32,   33,  34,   35,   36,   37,
+        38,   38,   39,   40,   41,   42,   43,   43,  44,   45,   46,   47,
+        48,   48,   49,   50,   51,   52,   53,   53,  54,   55,   56,   57,
+        57,   58,   59,   60,   61,   62,   62,   63,  64,   65,   66,   66,
+        67,   68,   69,   70,   70,   71,   72,   73,  74,   74,   75,   76,
+        77,   78,   78,   79,   80,   81,   81,   82,  83,   84,   85,   85,
+        87,   88,   90,   92,   93,   95,   96,   98,  99,   101,  102,  104,
+        105,  107,  108,  110,  111,  113,  114,  116, 117,  118,  120,  121,
+        123,  125,  127,  129,  131,  134,  136,  138, 140,  142,  144,  146,
+        148,  150,  152,  154,  156,  158,  161,  164, 166,  169,  172,  174,
+        177,  180,  182,  185,  187,  190,  192,  195, 199,  202,  205,  208,
+        211,  214,  217,  220,  223,  226,  230,  233, 237,  240,  243,  247,
+        250,  253,  257,  261,  265,  269,  272,  276, 280,  284,  288,  292,
+        296,  300,  304,  309,  313,  317,  322,  326, 330,  335,  340,  344,
+        349,  354,  359,  364,  369,  374,  379,  384, 389,  395,  400,  406,
+        411,  417,  423,  429,  435,  441,  447,  454, 461,  467,  475,  482,
+        489,  497,  505,  513,  522,  530,  539,  549, 559,  569,  579,  590,
+        602,  614,  626,  640,  654,  668,  684,  700, 717,  736,  755,  775,
+        796,  819,  843,  869,  896,  925,  955,  988, 1022, 1058, 1098, 1139,
+        1184, 1232, 1282, 1336,
+    },
+    {
+        4,    9,    10,   13,   15,   17,   20,   22,   25,   28,   31,   34,
+        37,   40,   43,   47,   50,   53,   57,   60,   64,   68,   71,   75,
+        78,   82,   86,   90,   93,   97,   101,  105,  109,  113,  116,  120,
+        124,  128,  132,  136,  140,  143,  147,  151,  155,  159,  163,  166,
+        170,  174,  178,  182,  185,  189,  193,  197,  200,  204,  208,  212,
+        215,  219,  223,  226,  230,  233,  237,  241,  244,  248,  251,  255,
+        259,  262,  266,  269,  273,  276,  280,  283,  287,  290,  293,  297,
+        300,  304,  307,  310,  314,  317,  321,  324,  327,  331,  334,  337,
+        343,  350,  356,  362,  369,  375,  381,  387,  394,  400,  406,  412,
+        418,  424,  430,  436,  442,  448,  454,  460,  466,  472,  478,  484,
+        490,  499,  507,  516,  525,  533,  542,  550,  559,  567,  576,  584,
+        592,  601,  609,  617,  625,  634,  644,  655,  666,  676,  687,  698,
+        708,  718,  729,  739,  749,  759,  770,  782,  795,  807,  819,  831,
+        844,  856,  868,  880,  891,  906,  920,  933,  947,  961,  975,  988,
+        1001, 1015, 1030, 1045, 1061, 1076, 1090, 1105, 1120, 1137, 1153, 1170,
+        1186, 1202, 1218, 1236, 1253, 1271, 1288, 1306, 1323, 1342, 1361, 1379,
+        1398, 1416, 1436, 1456, 1476, 1496, 1516, 1537, 1559, 1580, 1601, 1624,
+        1647, 1670, 1692, 1717, 1741, 1766, 1791, 1817, 1844, 1871, 1900, 1929,
+        1958, 1990, 2021, 2054, 2088, 2123, 2159, 2197, 2236, 2276, 2319, 2363,
+        2410, 2458, 2508, 2561, 2616, 2675, 2737, 2802, 2871, 2944, 3020, 3102,
+        3188, 3280, 3375, 3478, 3586, 3702, 3823, 3953, 4089, 4236, 4394, 4559,
+        4737, 4929, 5130, 5347
+   }
+};
+
+const int16_t kAcQLookup[][kQIndexRange] = {
+    {
+        4,    8,    9,    10,   11,   12,   13,   14,   15,   16,   17,   18,
+        19,   20,   21,   22,   23,   24,   25,   26,   27,   28,   29,   30,
+        31,   32,   33,   34,   35,   36,   37,   38,   39,   40,   41,   42,
+        43,   44,   45,   46,   47,   48,   49,   50,   51,   52,   53,   54,
+        55,   56,   57,   58,   59,   60,   61,   62,   63,   64,   65,   66,
+        67,   68,   69,   70,   71,   72,   73,   74,   75,   76,   77,   78,
+        79,   80,   81,   82,   83,   84,   85,   86,   87,   88,   89,   90,
+        91,   92,   93,   94,   95,   96,   97,   98,   99,   100,  101,  102,
+        104,  106,  108,  110,  112,  114,  116,  118,  120,  122,  124,  126,
+        128,  130,  132,  134,  136,  138,  140,  142,  144,  146,  148,  150,
+        152,  155,  158,  161,  164,  167,  170,  173,  176,  179,  182,  185,
+        188,  191,  194,  197,  200,  203,  207,  211,  215,  219,  223,  227,
+        231,  235,  239,  243,  247,  251,  255,  260,  265,  270,  275,  280,
+        285,  290,  295,  300,  305,  311,  317,  323,  329,  335,  341,  347,
+        353,  359,  366,  373,  380,  387,  394,  401,  408,  416,  424,  432,
+        440,  448,  456,  465,  474,  483,  492,  501,  510,  520,  530,  540,
+        550,  560,  571,  582,  593,  604,  615,  627,  639,  651,  663,  676,
+        689,  702,  715,  729,  743,  757,  771,  786,  801,  816,  832,  848,
+        864,  881,  898,  915,  933,  951,  969,  988,  1007, 1026, 1046, 1066,
+        1087, 1108, 1129, 1151, 1173, 1196, 1219, 1243, 1267, 1292, 1317, 1343,
+        1369, 1396, 1423, 1451, 1479, 1508, 1537, 1567, 1597, 1628, 1660, 1692,
+        1725, 1759, 1793, 1828,
+    },
+    {
+        4,    9,    11,   13,   16,   18,   21,   24,   27,   30,   33,   37,
+        40,   44,   48,   51,   55,   59,   63,   67,   71,   75,   79,   83,
+        88,   92,   96,   100,  105,  109,  114,  118,  122,  127,  131,  136,
+        140,  145,  149,  154,  158,  163,  168,  172,  177,  181,  186,  190,
+        195,  199,  204,  208,  213,  217,  222,  226,  231,  235,  240,  244,
+        249,  253,  258,  262,  267,  271,  275,  280,  284,  289,  293,  297,
+        302,  306,  311,  315,  319,  324,  328,  332,  337,  341,  345,  349,
+        354,  358,  362,  367,  371,  375,  379,  384,  388,  392,  396,  401,
+        409,  417,  425,  433,  441,  449,  458,  466,  474,  482,  490,  498,
+        506,  514,  523,  531,  539,  547,  555,  563,  571,  579,  588,  596,
+        604,  616,  628,  640,  652,  664,  676,  688,  700,  713,  725,  737,
+        749,  761,  773,  785,  797,  809,  825,  841,  857,  873,  889,  905,
+        922,  938,  954,  970,  986,  1002, 1018, 1038, 1058, 1078, 1098, 1118,
+        1138, 1158, 1178, 1198, 1218, 1242, 1266, 1290, 1314, 1338, 1362, 1386,
+        1411, 1435, 1463, 1491, 1519, 1547, 1575, 1603, 1631, 1663, 1695, 1727,
+        1759, 1791, 1823, 1859, 1895, 1931, 1967, 2003, 2039, 2079, 2119, 2159,
+        2199, 2239, 2283, 2327, 2371, 2415, 2459, 2507, 2555, 2603, 2651, 2703,
+        2755, 2807, 2859, 2915, 2971, 3027, 3083, 3143, 3203, 3263, 3327, 3391,
+        3455, 3523, 3591, 3659, 3731, 3803, 3876, 3952, 4028, 4104, 4184, 4264,
+        4348, 4432, 4516, 4604, 4692, 4784, 4876, 4972, 5068, 5168, 5268, 5372,
+        5476, 5584, 5692, 5804, 5916, 6032, 6148, 6268, 6388, 6512, 6640, 6768,
+        6900, 7036, 7172, 7312
+   }
+};
+// clang-format on
+
+static_assert(arraysize(kDcQLookup[0]) == arraysize(kAcQLookup[0]),
+              "quantizer lookup arrays of incorrect size");
+
+size_t ClampQ(size_t q) {
+  return std::min(q, kQIndexRange - 1);
+}
+
+int ClampLf(int lf) {
+  const int kMaxLoopFilterLevel = 63;
+  return std::min(std::max(0, lf), kMaxLoopFilterLevel);
+}
+
+}  // namespace
+
 bool Vp9FrameHeader::IsKeyframe() const {
   // When show_existing_frame is true, the frame header does not precede an
   // actual frame to be decoded, so frame_type does not apply (and is not read
@@ -107,7 +235,7 @@
 Vp9Parser::Context::Vp9FrameContextManager::Vp9FrameContextManager()
     : weak_ptr_factory_(this) {}
 
-Vp9Parser::Context::Vp9FrameContextManager::~Vp9FrameContextManager() {}
+Vp9Parser::Context::Vp9FrameContextManager::~Vp9FrameContextManager() = default;
 
 const Vp9FrameContext&
 Vp9Parser::Context::Vp9FrameContextManager::frame_context() const {
@@ -205,7 +333,7 @@
   Reset();
 }
 
-Vp9Parser::~Vp9Parser() {}
+Vp9Parser::~Vp9Parser() = default;
 
 void Vp9Parser::SetStream(const uint8_t* stream, off_t stream_size) {
   DCHECK(stream);
@@ -223,14 +351,106 @@
   context_.Reset();
 }
 
+bool Vp9Parser::ParseUncompressedHeader(const FrameInfo& frame_info,
+                                        Vp9FrameHeader* fhdr,
+                                        Result* result) {
+  memset(&curr_frame_header_, 0, sizeof(curr_frame_header_));
+  *result = kInvalidStream;
+
+  Vp9UncompressedHeaderParser uncompressed_parser(&context_);
+  if (!uncompressed_parser.Parse(frame_info.ptr, frame_info.size,
+                                 &curr_frame_header_)) {
+    *result = kInvalidStream;
+    return true;
+  }
+
+  if (curr_frame_header_.header_size_in_bytes == 0) {
+    // Verify padding bits are zero.
+    for (off_t i = curr_frame_header_.uncompressed_header_size;
+         i < frame_info.size; i++) {
+      if (frame_info.ptr[i] != 0) {
+        DVLOG(1) << "Padding bits are not zeros.";
+        *result = kInvalidStream;
+        return true;
+      }
+    }
+    *fhdr = curr_frame_header_;
+    *result = kOk;
+    return true;
+  }
+  if (curr_frame_header_.uncompressed_header_size +
+          curr_frame_header_.header_size_in_bytes >
+      base::checked_cast<size_t>(frame_info.size)) {
+    DVLOG(1) << "header_size_in_bytes="
+             << curr_frame_header_.header_size_in_bytes
+             << " is larger than bytes left in buffer: "
+             << frame_info.size - curr_frame_header_.uncompressed_header_size;
+    *result = kInvalidStream;
+    return true;
+  }
+
+  return false;
+}
+
+bool Vp9Parser::ParseCompressedHeader(const FrameInfo& frame_info,
+                                      Result* result) {
+  *result = kInvalidStream;
+  size_t frame_context_idx = curr_frame_header_.frame_context_idx;
+  const Context::Vp9FrameContextManager& context_to_load =
+      context_.frame_context_managers_[frame_context_idx];
+  if (!context_to_load.initialized()) {
+    // 8.2 Frame order constraints
+    // must load an initialized set of probabilities.
+    DVLOG(1) << "loading uninitialized frame context, index="
+             << frame_context_idx;
+    *result = kInvalidStream;
+    return true;
+  }
+  if (context_to_load.needs_client_update()) {
+    DVLOG(3) << "waiting frame_context_idx=" << frame_context_idx
+             << " to update";
+    curr_frame_info_ = frame_info;
+    *result = kAwaitingRefresh;
+    return true;
+  }
+  curr_frame_header_.initial_frame_context = curr_frame_header_.frame_context =
+      context_to_load.frame_context();
+
+  Vp9CompressedHeaderParser compressed_parser;
+  if (!compressed_parser.Parse(
+          frame_info.ptr + curr_frame_header_.uncompressed_header_size,
+          curr_frame_header_.header_size_in_bytes, &curr_frame_header_)) {
+    *result = kInvalidStream;
+    return true;
+  }
+
+  if (curr_frame_header_.refresh_frame_context) {
+    // In frame parallel mode, we can refresh the context without decoding
+    // tile data.
+    if (curr_frame_header_.frame_parallel_decoding_mode) {
+      context_.UpdateFrameContext(frame_context_idx,
+                                  curr_frame_header_.frame_context);
+    } else {
+      context_.MarkFrameContextForUpdate(frame_context_idx);
+    }
+  }
+  return false;
+}
+
 Vp9Parser::Result Vp9Parser::ParseNextFrame(Vp9FrameHeader* fhdr) {
   DCHECK(fhdr);
   DVLOG(2) << "ParseNextFrame";
+  FrameInfo frame_info;
+  Result result;
 
   // If |curr_frame_info_| is valid, uncompressed header was parsed into
   // |curr_frame_header_| and we are awaiting context update to proceed with
   // compressed header parsing.
-  if (!curr_frame_info_.IsValid()) {
+  if (curr_frame_info_.IsValid()) {
+    DCHECK(parsing_compressed_header_);
+    frame_info = curr_frame_info_;
+    curr_frame_info_.Reset();
+  } else {
     if (frames_.empty()) {
       // No frames to be decoded, if there is no more stream, request more.
       if (!stream_)
@@ -244,85 +464,26 @@
       }
     }
 
-    curr_frame_info_ = frames_.front();
+    frame_info = frames_.front();
     frames_.pop_front();
 
-    memset(&curr_frame_header_, 0, sizeof(curr_frame_header_));
-
-    Vp9UncompressedHeaderParser uncompressed_parser(&context_);
-    if (!uncompressed_parser.Parse(curr_frame_info_.ptr, curr_frame_info_.size,
-                                   &curr_frame_header_))
-      return kInvalidStream;
-
-    if (curr_frame_header_.header_size_in_bytes == 0) {
-      // Verify padding bits are zero.
-      for (off_t i = curr_frame_header_.uncompressed_header_size;
-           i < curr_frame_info_.size; i++) {
-        if (curr_frame_info_.ptr[i] != 0) {
-          DVLOG(1) << "Padding bits are not zeros.";
-          return kInvalidStream;
-        }
-      }
-      *fhdr = curr_frame_header_;
-      curr_frame_info_.Reset();
-      return kOk;
-    }
-    if (curr_frame_header_.uncompressed_header_size +
-            curr_frame_header_.header_size_in_bytes >
-        base::checked_cast<size_t>(curr_frame_info_.size)) {
-      DVLOG(1) << "header_size_in_bytes="
-               << curr_frame_header_.header_size_in_bytes
-               << " is larger than bytes left in buffer: "
-               << curr_frame_info_.size -
-                      curr_frame_header_.uncompressed_header_size;
-      return kInvalidStream;
-    }
+    if (ParseUncompressedHeader(frame_info, fhdr, &result))
+      return result;
   }
 
   if (parsing_compressed_header_) {
-    size_t frame_context_idx = curr_frame_header_.frame_context_idx;
-    const Context::Vp9FrameContextManager& context_to_load =
-        context_.frame_context_managers_[frame_context_idx];
-    if (!context_to_load.initialized()) {
-      // 8.2 Frame order constraints
-      // must load an initialized set of probabilities.
-      DVLOG(1) << "loading uninitialized frame context, index="
-               << frame_context_idx;
-      return kInvalidStream;
-    }
-    if (context_to_load.needs_client_update()) {
-      DVLOG(3) << "waiting frame_context_idx=" << frame_context_idx
-               << " to update";
-      return kAwaitingRefresh;
-    }
-    curr_frame_header_.initial_frame_context =
-        curr_frame_header_.frame_context = context_to_load.frame_context();
-
-    Vp9CompressedHeaderParser compressed_parser;
-    if (!compressed_parser.Parse(
-            curr_frame_info_.ptr + curr_frame_header_.uncompressed_header_size,
-            curr_frame_header_.header_size_in_bytes, &curr_frame_header_)) {
-      return kInvalidStream;
-    }
-
-    if (curr_frame_header_.refresh_frame_context) {
-      // In frame parallel mode, we can refresh the context without decoding
-      // tile data.
-      if (curr_frame_header_.frame_parallel_decoding_mode) {
-        context_.UpdateFrameContext(frame_context_idx,
-                                    curr_frame_header_.frame_context);
-      } else {
-        context_.MarkFrameContextForUpdate(frame_context_idx);
-      }
+    if (ParseCompressedHeader(frame_info, &result)) {
+      DCHECK(result != kAwaitingRefresh || curr_frame_info_.IsValid());
+      return result;
     }
   }
 
-  SetupSegmentationDequant();
+  if (!SetupSegmentationDequant())
+    return kInvalidStream;
   SetupLoopFilter();
   UpdateSlots();
 
   *fhdr = curr_frame_header_;
-  curr_frame_info_.Reset();
   return kOk;
 }
 
@@ -398,86 +559,6 @@
   return frames;
 }
 
-// 8.6.1
-const size_t QINDEX_RANGE = 256;
-const int16_t kDcQLookup[QINDEX_RANGE] = {
-  4,       8,    8,    9,   10,   11,   12,   12,
-  13,     14,   15,   16,   17,   18,   19,   19,
-  20,     21,   22,   23,   24,   25,   26,   26,
-  27,     28,   29,   30,   31,   32,   32,   33,
-  34,     35,   36,   37,   38,   38,   39,   40,
-  41,     42,   43,   43,   44,   45,   46,   47,
-  48,     48,   49,   50,   51,   52,   53,   53,
-  54,     55,   56,   57,   57,   58,   59,   60,
-  61,     62,   62,   63,   64,   65,   66,   66,
-  67,     68,   69,   70,   70,   71,   72,   73,
-  74,     74,   75,   76,   77,   78,   78,   79,
-  80,     81,   81,   82,   83,   84,   85,   85,
-  87,     88,   90,   92,   93,   95,   96,   98,
-  99,    101,  102,  104,  105,  107,  108,  110,
-  111,   113,  114,  116,  117,  118,  120,  121,
-  123,   125,  127,  129,  131,  134,  136,  138,
-  140,   142,  144,  146,  148,  150,  152,  154,
-  156,   158,  161,  164,  166,  169,  172,  174,
-  177,   180,  182,  185,  187,  190,  192,  195,
-  199,   202,  205,  208,  211,  214,  217,  220,
-  223,   226,  230,  233,  237,  240,  243,  247,
-  250,   253,  257,  261,  265,  269,  272,  276,
-  280,   284,  288,  292,  296,  300,  304,  309,
-  313,   317,  322,  326,  330,  335,  340,  344,
-  349,   354,  359,  364,  369,  374,  379,  384,
-  389,   395,  400,  406,  411,  417,  423,  429,
-  435,   441,  447,  454,  461,  467,  475,  482,
-  489,   497,  505,  513,  522,  530,  539,  549,
-  559,   569,  579,  590,  602,  614,  626,  640,
-  654,   668,  684,  700,  717,  736,  755,  775,
-  796,   819,  843,  869,  896,  925,  955,  988,
-  1022, 1058, 1098, 1139, 1184, 1232, 1282, 1336,
-};
-
-const int16_t kAcQLookup[QINDEX_RANGE] = {
-  4,       8,    9,   10,   11,   12,   13,   14,
-  15,     16,   17,   18,   19,   20,   21,   22,
-  23,     24,   25,   26,   27,   28,   29,   30,
-  31,     32,   33,   34,   35,   36,   37,   38,
-  39,     40,   41,   42,   43,   44,   45,   46,
-  47,     48,   49,   50,   51,   52,   53,   54,
-  55,     56,   57,   58,   59,   60,   61,   62,
-  63,     64,   65,   66,   67,   68,   69,   70,
-  71,     72,   73,   74,   75,   76,   77,   78,
-  79,     80,   81,   82,   83,   84,   85,   86,
-  87,     88,   89,   90,   91,   92,   93,   94,
-  95,     96,   97,   98,   99,  100,  101,  102,
-  104,   106,  108,  110,  112,  114,  116,  118,
-  120,   122,  124,  126,  128,  130,  132,  134,
-  136,   138,  140,  142,  144,  146,  148,  150,
-  152,   155,  158,  161,  164,  167,  170,  173,
-  176,   179,  182,  185,  188,  191,  194,  197,
-  200,   203,  207,  211,  215,  219,  223,  227,
-  231,   235,  239,  243,  247,  251,  255,  260,
-  265,   270,  275,  280,  285,  290,  295,  300,
-  305,   311,  317,  323,  329,  335,  341,  347,
-  353,   359,  366,  373,  380,  387,  394,  401,
-  408,   416,  424,  432,  440,  448,  456,  465,
-  474,   483,  492,  501,  510,  520,  530,  540,
-  550,   560,  571,  582,  593,  604,  615,  627,
-  639,   651,  663,  676,  689,  702,  715,  729,
-  743,   757,  771,  786,  801,  816,  832,  848,
-  864,   881,  898,  915,  933,  951,  969,  988,
-  1007, 1026, 1046, 1066, 1087, 1108, 1129, 1151,
-  1173, 1196, 1219, 1243, 1267, 1292, 1317, 1343,
-  1369, 1396, 1423, 1451, 1479, 1508, 1537, 1567,
-  1597, 1628, 1660, 1692, 1725, 1759, 1793, 1828,
-};
-
-static_assert(arraysize(kDcQLookup) == arraysize(kAcQLookup),
-              "quantizer lookup arrays of incorrect size");
-
-static size_t ClampQ(size_t q) {
-  return std::min(std::max(static_cast<size_t>(0), q),
-                  arraysize(kDcQLookup) - 1);
-}
-
 // 8.6.1 Dequantization functions
 size_t Vp9Parser::GetQIndex(const Vp9QuantizationParams& quant,
                             size_t segid) const {
@@ -497,40 +578,40 @@
 }
 
 // 8.6.1 Dequantization functions
-void Vp9Parser::SetupSegmentationDequant() {
+bool Vp9Parser::SetupSegmentationDequant() {
   const Vp9QuantizationParams& quant = curr_frame_header_.quant_params;
   Vp9SegmentationParams& segmentation = context_.segmentation_;
 
-  DLOG_IF(ERROR, curr_frame_header_.bit_depth > 8)
-      << "bit_depth > 8 is not supported "
-         "yet, kDcQLookup and kAcQLookup "
-         "need extended";
+  if (curr_frame_header_.bit_depth > 10) {
+    DLOG(ERROR) << "bit_depth > 10 is not supported yet, kDcQLookup and "
+                   "kAcQLookup need to be extended";
+    return false;
+  }
+  const size_t bit_depth_index = (curr_frame_header_.bit_depth == 8) ? 0 : 1;
+
   if (segmentation.enabled) {
     for (size_t i = 0; i < Vp9SegmentationParams::kNumSegments; ++i) {
       const size_t q_index = GetQIndex(quant, i);
       segmentation.y_dequant[i][0] =
-          kDcQLookup[ClampQ(q_index + quant.delta_q_y_dc)];
-      segmentation.y_dequant[i][1] = kAcQLookup[ClampQ(q_index)];
+          kDcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_y_dc)];
+      segmentation.y_dequant[i][1] =
+          kAcQLookup[bit_depth_index][ClampQ(q_index)];
       segmentation.uv_dequant[i][0] =
-          kDcQLookup[ClampQ(q_index + quant.delta_q_uv_dc)];
+          kDcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_uv_dc)];
       segmentation.uv_dequant[i][1] =
-          kAcQLookup[ClampQ(q_index + quant.delta_q_uv_ac)];
+          kAcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_uv_ac)];
     }
   } else {
     const size_t q_index = quant.base_q_idx;
     segmentation.y_dequant[0][0] =
-        kDcQLookup[ClampQ(q_index + quant.delta_q_y_dc)];
-    segmentation.y_dequant[0][1] = kAcQLookup[ClampQ(q_index)];
+        kDcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_y_dc)];
+    segmentation.y_dequant[0][1] = kAcQLookup[bit_depth_index][ClampQ(q_index)];
     segmentation.uv_dequant[0][0] =
-        kDcQLookup[ClampQ(q_index + quant.delta_q_uv_dc)];
+        kDcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_uv_dc)];
     segmentation.uv_dequant[0][1] =
-        kAcQLookup[ClampQ(q_index + quant.delta_q_uv_ac)];
+        kAcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_uv_ac)];
   }
-}
-
-static int ClampLf(int lf) {
-  const int kMaxLoopFilterLevel = 63;
-  return std::min(std::max(0, lf), kMaxLoopFilterLevel);
+  return true;
 }
 
 // 8.8.1 Loop filter frame init process
diff --git a/vda/vp9_parser.h b/vda/vp9_parser.h
index c6e1d9f..ab1fa57 100644
--- a/vda/vp9_parser.h
+++ b/vda/vp9_parser.h
@@ -9,6 +9,7 @@
 //
 // See media::VP9Decoder for example usage.
 //
+// Note: ported from Chromium commit head: ec6c6e0
 #ifndef VP9_PARSER_H_
 #define VP9_PARSER_H_
 
@@ -404,8 +405,22 @@
 
   std::deque<FrameInfo> ParseSuperframe();
 
+  // Returns true and populates |result| with the parsing result if parsing of
+  // current frame is finished (possibly unsuccessfully). |fhdr| will only be
+  // populated and valid if |result| is kOk. Otherwise return false, indicating
+  // that the compressed header must be parsed next.
+  bool ParseUncompressedHeader(const FrameInfo& frame_info,
+                               Vp9FrameHeader* fhdr,
+                               Result* result);
+
+  // Returns true if parsing of current frame is finished and |result| will be
+  // populated with value of parsing result. Otherwise, needs to continue setup
+  // current frame.
+  bool ParseCompressedHeader(const FrameInfo& frame_info, Result* result);
+
   size_t GetQIndex(const Vp9QuantizationParams& quant, size_t segid) const;
-  void SetupSegmentationDequant();
+  // Returns true if the setup succeeded.
+  bool SetupSegmentationDequant();
   void SetupLoopFilter();
   void UpdateSlots();
 
@@ -415,7 +430,7 @@
   // Remaining bytes in stream_.
   off_t bytes_left_;
 
-  bool parsing_compressed_header_;
+  const bool parsing_compressed_header_;
 
   // FrameInfo for the remaining frames in the current superframe to be parsed.
   std::deque<FrameInfo> frames_;
diff --git a/vda/vp9_picture.cc b/vda/vp9_picture.cc
index a99427f..df2c3b0 100644
--- a/vda/vp9_picture.cc
+++ b/vda/vp9_picture.cc
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 6e70beb
 
 #include "vp9_picture.h"
 
diff --git a/vda/vp9_picture.h b/vda/vp9_picture.h
index 23e299b..efff37b 100644
--- a/vda/vp9_picture.h
+++ b/vda/vp9_picture.h
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 70340ce
 
 #ifndef VP9_PICTURE_H_
 #define VP9_PICTURE_H_
@@ -9,13 +10,14 @@
 
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
+#include "rect.h"
 #include "vp9_parser.h"
 
 namespace media {
 
 class V4L2VP9Picture;
 
-class VP9Picture : public base::RefCounted<VP9Picture> {
+class VP9Picture : public base::RefCountedThreadSafe<VP9Picture> {
  public:
   VP9Picture();
 
@@ -23,8 +25,13 @@
 
   std::unique_ptr<Vp9FrameHeader> frame_hdr;
 
+  // The visible size of picture. This could be either parsed from frame
+  // header, or set to Rect(0, 0) for indicating invalid values or
+  // not available.
+  Rect visible_rect;
+
  protected:
-  friend class base::RefCounted<VP9Picture>;
+  friend class base::RefCountedThreadSafe<VP9Picture>;
   virtual ~VP9Picture();
 
   DISALLOW_COPY_AND_ASSIGN(VP9Picture);
diff --git a/vda/vp9_raw_bits_reader.cc b/vda/vp9_raw_bits_reader.cc
index 7cad4d9..dea06e0 100644
--- a/vda/vp9_raw_bits_reader.cc
+++ b/vda/vp9_raw_bits_reader.cc
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
 
 #include "vp9_raw_bits_reader.h"
 
@@ -13,7 +14,7 @@
 
 Vp9RawBitsReader::Vp9RawBitsReader() : valid_(true) {}
 
-Vp9RawBitsReader::~Vp9RawBitsReader() {}
+Vp9RawBitsReader::~Vp9RawBitsReader() = default;
 
 void Vp9RawBitsReader::Initialize(const uint8_t* data, size_t size) {
   DCHECK(data);
diff --git a/vda/vp9_raw_bits_reader.h b/vda/vp9_raw_bits_reader.h
index 9f112b8..04ad413 100644
--- a/vda/vp9_raw_bits_reader.h
+++ b/vda/vp9_raw_bits_reader.h
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: e5a9a62
 
 #ifndef VP9_RAW_BITS_READER_H_
 #define VP9_RAW_BITS_READER_H_
diff --git a/vda/vp9_uncompressed_header_parser.cc b/vda/vp9_uncompressed_header_parser.cc
index 067b40c..f6dc2eb 100644
--- a/vda/vp9_uncompressed_header_parser.cc
+++ b/vda/vp9_uncompressed_header_parser.cc
@@ -1,6 +1,7 @@
 // Copyright 2016 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: f06caa0
 
 #include "vp9_uncompressed_header_parser.h"
 
@@ -789,7 +790,7 @@
       for (size_t i = 0; i < Vp9LoopFilterParams::kNumModeDeltas; i++) {
         loop_filter.update_mode_deltas[i] = reader_.ReadBool();
         if (loop_filter.update_mode_deltas[i])
-          loop_filter.mode_deltas[i] = reader_.ReadLiteral(6);
+          loop_filter.mode_deltas[i] = reader_.ReadSignedLiteral(6);
       }
     }
   }
diff --git a/vda/vp9_uncompressed_header_parser.h b/vda/vp9_uncompressed_header_parser.h
index 655ba38..6780d38 100644
--- a/vda/vp9_uncompressed_header_parser.h
+++ b/vda/vp9_uncompressed_header_parser.h
@@ -1,6 +1,7 @@
 // Copyright 2016 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: e5a9a62
 
 #ifndef VP9_UNCOMPRESSED_HEADER_PARSER_H_
 #define VP9_UNCOMPRESSED_HEADER_PARSER_H_