msm: camera: Add Face Detection driver

Enable FD (Face Detection) driver to perform FD HW
functionality. This driver enables HW to process a
given frame to detect faces present in the fov.

Change-Id: I5c1b985d5f65f5613dcd39fb3b51169ad3442b76
Signed-off-by: Pavan Kumar Chilamkurthi <pchilamk@codeaurora.org>
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-fd.txt b/Documentation/devicetree/bindings/media/video/msm-cam-fd.txt
new file mode 100644
index 0000000..cf551f6
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-fd.txt
@@ -0,0 +1,149 @@
+* Qualcomm Technologies, Inc. MSM Camera FD
+
+The MSM camera Face Detection device provides dependency definitions
+for enabling Camera FD HW. MSM camera FD is implemented in multiple
+device nodes. The root FD device node has properties defined to hint
+the driver about the FD HW nodes available during the probe sequence.
+Each node has multiple properties defined for interrupts, clocks and
+regulators.
+
+=======================
+Required Node Structure
+=======================
+FD root interface node takes care of the handling Face Detection high level
+driver handling and controls underlying FD hardware present.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-fd".
+
+- compat-hw-name
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,fd".
+
+- num-fd
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported FD HW blocks.
+
+Example:
+	qcom,cam-fd {
+		compatible = "qcom,cam-fd";
+		compat-hw-name = "qcom,fd";
+		num-fd = <1>;
+	};
+
+=======================
+Required Node Structure
+=======================
+FD Node provides interface for Face Detection hardware driver
+about the device register map, interrupt map, clocks, regulators.
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Node instance number.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,fd41".
+
+- reg-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the register resources.
+
+- reg
+  Usage: optional
+  Value type: <u32>
+  Definition: Register values.
+
+- reg-cam-base
+  Usage: optional
+  Value type: <u32>
+  Definition: Offset of the register space compared to
+              to Camera base register space.
+
+- interrupt-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: optional
+  Value type: <u32>
+  Definition: Interrupt line associated with FD HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for FD HW.
+
+- camss-vdd-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed
+              in "regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for FD HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks required for FD HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- src-clock-name
+  Usage: required
+  Value type: <string>
+  Definition: Source clock name.
+
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition: List of strings corresponds clock-rates levels.
+  Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo.
+
+Examples:
+	cam_fd: qcom,fd@ac5a000 {
+		cell-index = <0>;
+		compatible = "qcom,fd41";
+		reg-names = "fd_core", "fd_wrapper";
+		reg = <0xac5a000 0x1000>,
+			<0xac5b000 0x400>;
+		reg-cam-base = <0x5a000 0x5b000>;
+		interrupt-names = "fd";
+		interrupts = <0 462 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "gcc_ahb_clk",
+			"gcc_axi_clk",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"fd_core_clk_src",
+			"fd_core_clk",
+			"fd_core_uar_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_FD_CORE_CLK_SRC>,
+			<&clock_camcc CAM_CC_FD_CORE_CLK>,
+			<&clock_camcc CAM_CC_FD_CORE_UAR_CLK>;
+		src-clock-name = "fd_core_clk_src";
+		clock-cntl-level = "svs";
+		clock-rates = <0 0 0 0 0 400000000 0 0>;
+	};
+
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
index 800c9ea..48fa1c0 100644
--- a/drivers/media/platform/msm/camera/Makefile
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -9,3 +9,4 @@
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_module/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd/
diff --git a/drivers/media/platform/msm/camera/cam_fd/Makefile b/drivers/media/platform/msm/camera/cam_fd/Makefile
new file mode 100644
index 0000000..f8177e8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd/fd_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += fd_hw_mgr/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd_dev.o cam_fd_context.o
diff --git a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
new file mode 100644
index 0000000..4c29ffd
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
@@ -0,0 +1,236 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_debug_util.h"
+#include "cam_fd_context.h"
+
+/* Functions in Available state */
+static int __cam_fd_ctx_acquire_dev_in_available(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Acquire dev, rc=%d", rc);
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_ACQUIRED;
+
+	return rc;
+}
+
+/* Functions in Acquired state */
+static int __cam_fd_ctx_release_dev_in_acquired(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_release_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Release dev, rc=%d", rc);
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_AVAILABLE;
+
+	return rc;
+}
+
+static int __cam_fd_ctx_config_dev_in_acquired(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_prepare_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Prepare dev, rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int __cam_fd_ctx_start_dev_in_acquired(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_start_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Start dev, rc=%d", rc);
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_ACTIVATED;
+
+	return rc;
+}
+
+/* Functions in Activated state */
+static int __cam_fd_ctx_stop_dev_in_activated(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_stop_dev_to_hw(ctx);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Stop dev, rc=%d", rc);
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_ACQUIRED;
+
+	return rc;
+}
+
+static int __cam_fd_ctx_release_dev_in_activated(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = __cam_fd_ctx_stop_dev_in_activated(ctx, NULL);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Stop dev, rc=%d", rc);
+		return rc;
+	}
+
+	rc = __cam_fd_ctx_release_dev_in_acquired(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Release dev, rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int __cam_fd_ctx_config_dev_in_activated(
+	struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_prepare_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Prepare dev, rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int __cam_fd_ctx_handle_irq_in_activated(void *context,
+	uint32_t evt_id, void *evt_data)
+{
+	int rc;
+
+	rc = cam_context_buf_done_from_hw(context, evt_data, evt_id);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in buf done, rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+	cam_fd_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+	/* Uninit */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Available */
+	{
+		.ioctl_ops = {
+			.acquire_dev = __cam_fd_ctx_acquire_dev_in_available,
+		},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Acquired */
+	{
+		.ioctl_ops = {
+			.release_dev = __cam_fd_ctx_release_dev_in_acquired,
+			.config_dev = __cam_fd_ctx_config_dev_in_acquired,
+			.start_dev = __cam_fd_ctx_start_dev_in_acquired,
+		},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Ready */
+	{
+		.ioctl_ops = { },
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Activated */
+	{
+		.ioctl_ops = {
+			.stop_dev = __cam_fd_ctx_stop_dev_in_activated,
+			.release_dev = __cam_fd_ctx_release_dev_in_activated,
+			.config_dev = __cam_fd_ctx_config_dev_in_activated,
+		},
+		.crm_ops = {},
+		.irq_ops = __cam_fd_ctx_handle_irq_in_activated,
+	},
+};
+
+
+int cam_fd_context_init(struct cam_fd_context *fd_ctx,
+	struct cam_context *base_ctx, struct cam_hw_mgr_intf *hw_intf)
+{
+	int rc;
+
+	if (!base_ctx || !fd_ctx) {
+		CAM_ERR(CAM_FD, "Invalid Context %pK %pK", base_ctx, fd_ctx);
+		return -EINVAL;
+	}
+
+	memset(fd_ctx, 0, sizeof(*fd_ctx));
+
+	rc = cam_context_init(base_ctx, NULL, hw_intf, fd_ctx->req_base,
+		CAM_CTX_REQ_MAX);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Camera Context Base init failed, rc=%d", rc);
+		return rc;
+	}
+
+	fd_ctx->base = base_ctx;
+	base_ctx->ctx_priv = fd_ctx;
+	base_ctx->state_machine = cam_fd_ctx_state_machine;
+
+	return rc;
+}
+
+int cam_fd_context_deinit(struct cam_fd_context *fd_ctx)
+{
+	int rc = 0;
+
+	if (!fd_ctx || !fd_ctx->base) {
+		CAM_ERR(CAM_FD, "Invalid inputs %pK", fd_ctx);
+		return -EINVAL;
+	}
+
+	rc = cam_context_deinit(fd_ctx->base);
+	if (rc)
+		CAM_ERR(CAM_FD, "Error in base deinit, rc=%d", rc);
+
+	memset(fd_ctx, 0, sizeof(*fd_ctx));
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.h b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.h
new file mode 100644
index 0000000..6aa5edb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.h
@@ -0,0 +1,36 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FD_CONTEXT_H_
+#define _CAM_FD_CONTEXT_H_
+
+#include "cam_context.h"
+#include "cam_context_utils.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_interface.h"
+
+/**
+ * struct cam_fd_context - Face Detection context information
+ *
+ * @base     : Base context pointer for this FD context
+ * @req_base : List of base requests for this FD context
+ */
+struct cam_fd_context {
+	struct cam_context       *base;
+	struct cam_ctx_request    req_base[CAM_CTX_REQ_MAX];
+};
+
+int cam_fd_context_init(struct cam_fd_context *fd_ctx,
+	struct cam_context *base_ctx, struct cam_hw_mgr_intf *hw_intf);
+int cam_fd_context_deinit(struct cam_fd_context *ctx);
+
+#endif /* _CAM_FD_CONTEXT_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_fd/cam_fd_dev.c b/drivers/media/platform/msm/camera/cam_fd/cam_fd_dev.c
new file mode 100644
index 0000000..27f5518
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/cam_fd_dev.c
@@ -0,0 +1,205 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_subdev.h"
+#include "cam_node.h"
+#include "cam_fd_context.h"
+#include "cam_fd_hw_mgr.h"
+#include "cam_fd_hw_mgr_intf.h"
+
+#define CAM_FD_DEV_NAME "cam-fd"
+
+/**
+ * struct cam_fd_dev - FD device information
+ *
+ * @sd:         Subdev information
+ * @base_ctx:   List of base contexts
+ * @fd_ctx:     List of FD contexts
+ * @lock:       Mutex handle
+ * @open_cnt:   FD subdev open count
+ * @probe_done: Whether FD probe is completed
+ */
+struct cam_fd_dev {
+	struct cam_subdev     sd;
+	struct cam_context    base_ctx[CAM_CTX_MAX];
+	struct cam_fd_context fd_ctx[CAM_CTX_MAX];
+	struct mutex          lock;
+	uint32_t              open_cnt;
+	bool                  probe_done;
+};
+
+static struct cam_fd_dev g_fd_dev;
+
+static int cam_fd_dev_open(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_fd_dev *fd_dev = &g_fd_dev;
+
+	if (!fd_dev->probe_done) {
+		CAM_ERR(CAM_FD, "FD Dev not initialized, fd_dev=%pK", fd_dev);
+		return -ENODEV;
+	}
+
+	mutex_lock(&fd_dev->lock);
+	fd_dev->open_cnt++;
+	CAM_DBG(CAM_FD, "FD Subdev open count %d", fd_dev->open_cnt);
+	mutex_unlock(&fd_dev->lock);
+
+	return 0;
+}
+
+static int cam_fd_dev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_fd_dev *fd_dev = &g_fd_dev;
+
+	if (!fd_dev->probe_done) {
+		CAM_ERR(CAM_FD, "FD Dev not initialized, fd_dev=%pK", fd_dev);
+		return -ENODEV;
+	}
+
+	mutex_lock(&fd_dev->lock);
+	fd_dev->open_cnt--;
+	CAM_DBG(CAM_FD, "FD Subdev open count %d", fd_dev->open_cnt);
+	mutex_unlock(&fd_dev->lock);
+
+	return 0;
+}
+
+static const struct v4l2_subdev_internal_ops cam_fd_subdev_internal_ops = {
+	.open = cam_fd_dev_open,
+	.close = cam_fd_dev_close,
+};
+
+static int cam_fd_dev_probe(struct platform_device *pdev)
+{
+	int rc;
+	int i;
+	struct cam_hw_mgr_intf hw_mgr_intf;
+	struct cam_node *node;
+
+	g_fd_dev.sd.internal_ops = &cam_fd_subdev_internal_ops;
+
+	/* Initialze the v4l2 subdevice first. (create cam_node) */
+	rc = cam_subdev_probe(&g_fd_dev.sd, pdev, CAM_FD_DEV_NAME,
+		CAM_FD_DEVICE_TYPE);
+	if (rc) {
+		CAM_ERR(CAM_FD, "FD cam_subdev_probe failed, rc=%d", rc);
+		return rc;
+	}
+	node = (struct cam_node *) g_fd_dev.sd.token;
+
+	rc = cam_fd_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in initializing FD HW manager, rc=%d",
+			rc);
+		goto unregister_subdev;
+	}
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_fd_context_init(&g_fd_dev.fd_ctx[i],
+			&g_fd_dev.base_ctx[i], &node->hw_mgr_intf);
+		if (rc) {
+			CAM_ERR(CAM_FD, "FD context init failed i=%d, rc=%d",
+				i, rc);
+			goto deinit_ctx;
+		}
+	}
+
+	rc = cam_node_init(node, &hw_mgr_intf, g_fd_dev.base_ctx, CAM_CTX_MAX,
+		CAM_FD_DEV_NAME);
+	if (rc) {
+		CAM_ERR(CAM_FD, "FD node init failed, rc=%d", rc);
+		goto deinit_ctx;
+	}
+
+	mutex_init(&g_fd_dev.lock);
+	g_fd_dev.probe_done = true;
+
+	CAM_DBG(CAM_FD, "Camera FD probe complete");
+
+	return 0;
+
+deinit_ctx:
+	for (--i; i >= 0; i--) {
+		if (cam_fd_context_deinit(&g_fd_dev.fd_ctx[i]))
+			CAM_ERR(CAM_FD, "FD context %d deinit failed", i);
+	}
+unregister_subdev:
+	if (cam_subdev_remove(&g_fd_dev.sd))
+		CAM_ERR(CAM_FD, "Failed in subdev remove");
+
+	return rc;
+}
+
+static int cam_fd_dev_remove(struct platform_device *pdev)
+{
+	int i, rc;
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_fd_context_deinit(&g_fd_dev.fd_ctx[i]);
+		if (rc)
+			CAM_ERR(CAM_FD, "FD context %d deinit failed, rc=%d",
+				i, rc);
+	}
+
+	rc = cam_fd_hw_mgr_deinit(pdev->dev.of_node);
+	if (rc)
+		CAM_ERR(CAM_FD, "Failed in hw mgr deinit, rc=%d", rc);
+
+	rc = cam_subdev_remove(&g_fd_dev.sd);
+	if (rc)
+		CAM_ERR(CAM_FD, "Unregister failed, rc=%d", rc);
+
+	mutex_destroy(&g_fd_dev.lock);
+	g_fd_dev.probe_done = false;
+
+	return rc;
+}
+
+static const struct of_device_id cam_fd_dt_match[] = {
+	{
+		.compatible = "qcom,cam-fd"
+	},
+	{}
+};
+
+static struct platform_driver cam_fd_driver = {
+	.probe = cam_fd_dev_probe,
+	.remove = cam_fd_dev_remove,
+	.driver = {
+		.name = "cam_fd",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_fd_dt_match,
+	},
+};
+
+static int __init cam_fd_dev_init_module(void)
+{
+	return platform_driver_register(&cam_fd_driver);
+}
+
+static void __exit cam_fd_dev_exit_module(void)
+{
+	platform_driver_unregister(&cam_fd_driver);
+}
+
+module_init(cam_fd_dev_init_module);
+module_exit(cam_fd_dev_exit_module);
+MODULE_DESCRIPTION("MSM FD driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/Makefile b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/Makefile
new file mode 100644
index 0000000..e7478cb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd/fd_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += fd_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
new file mode 100644
index 0000000..d226e17
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
@@ -0,0 +1,1663 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+
+#include "cam_io_util.h"
+#include "cam_soc_util.h"
+#include "cam_mem_mgr_api.h"
+#include "cam_smmu_api.h"
+#include "cam_packet_util.h"
+#include "cam_fd_context.h"
+#include "cam_fd_hw_intf.h"
+#include "cam_fd_hw_core.h"
+#include "cam_fd_hw_soc.h"
+#include "cam_fd_hw_mgr_intf.h"
+#include "cam_fd_hw_mgr.h"
+
+static struct cam_fd_hw_mgr g_fd_hw_mgr;
+
+static int cam_fd_mgr_util_packet_validate(struct cam_packet *packet)
+{
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	int i, rc;
+
+	if (!packet)
+		return -EINVAL;
+
+	CAM_DBG(CAM_FD, "Packet request=%d, op_code=0x%x, size=%d, flags=%d",
+		packet->header.request_id, packet->header.op_code,
+		packet->header.size, packet->header.flags);
+	CAM_DBG(CAM_FD,
+		"Packet cmdbuf(offset=%d, num=%d) io(offset=%d, num=%d)",
+		packet->cmd_buf_offset, packet->num_cmd_buf,
+		packet->io_configs_offset, packet->num_io_configs);
+	CAM_DBG(CAM_FD,
+		"Packet Patch(offset=%d, num=%d) kmd(offset=%d, num=%d)",
+		packet->patch_offset, packet->num_patches,
+		packet->kmd_cmd_buf_offset, packet->kmd_cmd_buf_index);
+
+	if (cam_packet_util_validate_packet(packet)) {
+		CAM_ERR(CAM_FD, "invalid packet:%d %d %d %d %d",
+			packet->kmd_cmd_buf_index,
+			packet->num_cmd_buf, packet->cmd_buf_offset,
+			packet->io_configs_offset, packet->header.size);
+		return -EINVAL;
+	}
+
+	/* All buffers must come through io config, do not support patching */
+	if (packet->num_patches || !packet->num_io_configs) {
+		CAM_ERR(CAM_FD, "wrong number of cmd/patch info: %u %u",
+			packet->num_cmd_buf, packet->num_patches);
+		return -EINVAL;
+	}
+
+	/* KMD Buf index can never be greater than or equal to num cmd bufs */
+	if (packet->kmd_cmd_buf_index >= packet->num_cmd_buf) {
+		CAM_ERR(CAM_FD, "Invalid kmd index %d (%d)",
+			packet->kmd_cmd_buf_index, packet->num_cmd_buf);
+		return -EINVAL;
+	}
+
+	if ((packet->header.op_code & 0xff) !=
+		CAM_PACKET_OPCODES_FD_FRAME_UPDATE) {
+		CAM_ERR(CAM_FD, "Invalid op_code %u",
+			packet->header.op_code & 0xff);
+		return -EINVAL;
+	}
+
+	cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)&packet->payload +
+		packet->cmd_buf_offset);
+
+	for (i = 0; i < packet->num_cmd_buf; i++) {
+		/*
+		 * We can allow 0 length cmd buffer. This can happen in case
+		 * umd gives an empty cmd buffer as kmd buffer
+		 */
+		if (!cmd_desc[i].length)
+			continue;
+
+		if ((cmd_desc[i].meta_data != CAM_FD_CMD_BUFFER_ID_GENERIC) &&
+			(cmd_desc[i].meta_data != CAM_FD_CMD_BUFFER_ID_CDM)) {
+			CAM_ERR(CAM_FD, "Invalid meta_data [%d] %u", i,
+				cmd_desc[i].meta_data);
+			return -EINVAL;
+		}
+
+		CAM_DBG(CAM_FD,
+			"CmdBuf[%d] hdl=%d, offset=%d, size=%d, len=%d, type=%d, meta_data=%d",
+			i,
+			cmd_desc[i].mem_handle, cmd_desc[i].offset,
+			cmd_desc[i].size, cmd_desc[i].length, cmd_desc[i].type,
+			cmd_desc[i].meta_data);
+
+		rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
+		if (rc) {
+			CAM_ERR(CAM_FD, "Invalid cmd buffer %d", i);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int cam_fd_mgr_util_put_ctx(
+	struct list_head *src_list,
+	struct cam_fd_hw_mgr_ctx **fd_ctx)
+{
+	int rc = 0;
+	struct cam_fd_hw_mgr_ctx *ctx_ptr = NULL;
+
+	mutex_lock(&g_fd_hw_mgr.ctx_mutex);
+	ctx_ptr = *fd_ctx;
+	if (ctx_ptr)
+		list_add_tail(&ctx_ptr->list, src_list);
+	*fd_ctx = NULL;
+	mutex_unlock(&g_fd_hw_mgr.ctx_mutex);
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_get_ctx(
+	struct list_head *src_list,
+	struct cam_fd_hw_mgr_ctx **fd_ctx)
+{
+	int rc = 0;
+	struct cam_fd_hw_mgr_ctx *ctx_ptr = NULL;
+
+	mutex_lock(&g_fd_hw_mgr.ctx_mutex);
+	if (!list_empty(src_list)) {
+		ctx_ptr = list_first_entry(src_list,
+			struct cam_fd_hw_mgr_ctx, list);
+		list_del_init(&ctx_ptr->list);
+	} else {
+		CAM_ERR(CAM_FD, "No more free fd hw mgr ctx");
+		rc = -1;
+	}
+	*fd_ctx = ctx_ptr;
+	mutex_unlock(&g_fd_hw_mgr.ctx_mutex);
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_put_frame_req(
+	struct list_head *src_list,
+	struct cam_fd_mgr_frame_request **frame_req)
+{
+	int rc = 0;
+	struct cam_fd_mgr_frame_request *req_ptr = NULL;
+
+	mutex_lock(&g_fd_hw_mgr.frame_req_mutex);
+	req_ptr = *frame_req;
+	if (req_ptr)
+		list_add_tail(&req_ptr->list, src_list);
+	*frame_req = NULL;
+	mutex_unlock(&g_fd_hw_mgr.frame_req_mutex);
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_get_frame_req(
+	struct list_head *src_list,
+	struct cam_fd_mgr_frame_request **frame_req)
+{
+	int rc = 0;
+	struct cam_fd_mgr_frame_request *req_ptr = NULL;
+
+	mutex_lock(&g_fd_hw_mgr.frame_req_mutex);
+	if (!list_empty(src_list)) {
+		req_ptr = list_first_entry(src_list,
+			struct cam_fd_mgr_frame_request, list);
+		list_del_init(&req_ptr->list);
+	} else {
+		CAM_DBG(CAM_FD, "Frame req not available");
+		rc = -EPERM;
+	}
+	*frame_req = req_ptr;
+	mutex_unlock(&g_fd_hw_mgr.frame_req_mutex);
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_get_device(struct cam_fd_hw_mgr *hw_mgr,
+	struct cam_fd_hw_mgr_ctx *hw_ctx, struct cam_fd_device **hw_device)
+{
+	if (!hw_mgr || !hw_ctx || !hw_device) {
+		CAM_ERR(CAM_FD, "Invalid input %pK %pK %pK", hw_mgr, hw_ctx,
+			hw_device);
+		return -EINVAL;
+	}
+
+	if ((hw_ctx->device_index < 0) ||
+		(hw_ctx->device_index >= CAM_FD_HW_MAX)) {
+		CAM_ERR(CAM_FD, "Invalid device indx %d", hw_ctx->device_index);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_FD, "ctx index=%d, hw_ctx=%d", hw_ctx->ctx_index,
+		hw_ctx->device_index);
+
+	*hw_device = &hw_mgr->hw_device[hw_ctx->device_index];
+
+	return 0;
+}
+
+static int cam_fd_mgr_util_release_device(struct cam_fd_hw_mgr *hw_mgr,
+	struct cam_fd_hw_mgr_ctx *hw_ctx)
+{
+	struct cam_fd_device *hw_device;
+	struct cam_fd_hw_release_args hw_release_args;
+	int rc;
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	if (hw_device->hw_intf->hw_ops.release) {
+		hw_release_args.hw_ctx = hw_ctx;
+		hw_release_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+		rc = hw_device->hw_intf->hw_ops.release(
+			hw_device->hw_intf->hw_priv, &hw_release_args,
+			sizeof(hw_release_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in HW release %d", rc);
+			return rc;
+		}
+	} else {
+		CAM_ERR(CAM_FD, "Invalid release function");
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	hw_device->num_ctxts--;
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	hw_ctx->device_index = -1;
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_select_device(struct cam_fd_hw_mgr *hw_mgr,
+	struct cam_fd_hw_mgr_ctx *hw_ctx,
+	struct cam_fd_acquire_dev_info *fd_acquire_args)
+{
+	int i, rc;
+	struct cam_fd_hw_reserve_args hw_reserve_args;
+	struct cam_fd_device *hw_device = NULL;
+
+	if (!hw_mgr || !hw_ctx || !fd_acquire_args) {
+		CAM_ERR(CAM_FD, "Invalid input %pK %pK %pK", hw_mgr, hw_ctx,
+			fd_acquire_args);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	/* Check if a device is free which can satisfy the requirements */
+	for (i = 0; i < hw_mgr->num_devices; i++) {
+		hw_device = &hw_mgr->hw_device[i];
+		CAM_DBG(CAM_FD,
+			"[%d] : num_ctxts=%d, modes=%d, raw_results=%d",
+			i, hw_device->num_ctxts,
+			hw_device->hw_caps.supported_modes,
+			hw_device->hw_caps.raw_results_available);
+		if ((hw_device->num_ctxts == 0) &&
+			(fd_acquire_args->mode &
+			hw_device->hw_caps.supported_modes) &&
+			(!fd_acquire_args->get_raw_results ||
+			hw_device->hw_caps.raw_results_available)) {
+			CAM_DBG(CAM_FD, "Found dedicated HW Index=%d", i);
+			hw_device->num_ctxts++;
+			break;
+		}
+	}
+
+	/*
+	 * We couldn't find a free HW which meets requirement, now check if
+	 * there is a HW which meets acquire requirements
+	 */
+	if (i == hw_mgr->num_devices) {
+		for (i = 0; i < hw_mgr->num_devices; i++) {
+			hw_device = &hw_mgr->hw_device[i];
+			if ((fd_acquire_args->mode &
+				hw_device->hw_caps.supported_modes) &&
+				(!fd_acquire_args->get_raw_results ||
+				hw_device->hw_caps.raw_results_available)) {
+				hw_device->num_ctxts++;
+				CAM_DBG(CAM_FD, "Found sharing HW Index=%d", i);
+				break;
+			}
+		}
+	}
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	if ((i == hw_mgr->num_devices) || !hw_device) {
+		CAM_ERR(CAM_FD, "Couldn't acquire HW %d %d",
+			fd_acquire_args->mode,
+			fd_acquire_args->get_raw_results);
+		return -EBUSY;
+	}
+
+	CAM_DBG(CAM_FD, "Device index %d selected for this acquire", i);
+
+	/* Check if we can reserve this HW */
+	if (hw_device->hw_intf->hw_ops.reserve) {
+		hw_reserve_args.hw_ctx = hw_ctx;
+		hw_reserve_args.mode = fd_acquire_args->mode;
+		rc = hw_device->hw_intf->hw_ops.reserve(
+			hw_device->hw_intf->hw_priv, &hw_reserve_args,
+			sizeof(hw_reserve_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in HW reserve %d", rc);
+			return rc;
+		}
+		hw_ctx->ctx_hw_private = hw_reserve_args.ctx_hw_private;
+	} else {
+		CAM_ERR(CAM_FD, "Invalid reserve function");
+		return -EPERM;
+	}
+
+	/* Update required info in hw context */
+	hw_ctx->device_index = i;
+
+	CAM_DBG(CAM_FD, "ctx index=%d, hw_ctx=%d", hw_ctx->ctx_index,
+		hw_ctx->device_index);
+
+	return 0;
+}
+
+static int cam_fd_mgr_util_pdev_get_hw_intf(struct device_node *of_node,
+	int i, struct cam_hw_intf **device_hw_intf)
+{
+	struct device_node *device_node = NULL;
+	struct platform_device *child_pdev = NULL;
+	struct cam_hw_intf *hw_intf = NULL;
+	const char *name = NULL;
+	int rc;
+
+	rc = of_property_read_string_index(of_node, "compat-hw-name", i, &name);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Getting dev object name failed %d %d", i, rc);
+		goto put_node;
+	}
+
+	device_node = of_find_node_by_name(NULL, name);
+	if (!device_node) {
+		CAM_ERR(CAM_FD, "Cannot find node in dtsi %s", name);
+		return -ENODEV;
+	}
+
+	child_pdev = of_find_device_by_node(device_node);
+	if (!child_pdev) {
+		CAM_ERR(CAM_FD, "Failed to find device on bus %s",
+			device_node->name);
+		rc = -ENODEV;
+		goto put_node;
+	}
+
+	hw_intf = (struct cam_hw_intf *)platform_get_drvdata(child_pdev);
+	if (!hw_intf) {
+		CAM_ERR(CAM_FD, "No driver data for child device");
+		rc = -ENODEV;
+		goto put_node;
+	}
+
+	CAM_DBG(CAM_FD, "child type %d index %d child_intf %pK",
+		hw_intf->hw_type, hw_intf->hw_idx, hw_intf);
+
+	if (hw_intf->hw_idx >= CAM_FD_HW_MAX) {
+		CAM_ERR(CAM_FD, "hw_idx invalid %d", hw_intf->hw_idx);
+		rc = -ENODEV;
+		goto put_node;
+	}
+
+	rc = 0;
+	*device_hw_intf = hw_intf;
+
+put_node:
+	of_node_put(device_node);
+
+	return rc;
+}
+
+static int cam_fd_packet_generic_blob_handler(void *user_data,
+	uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
+{
+	struct cam_fd_hw_cmd_prestart_args *prestart_args =
+		(struct cam_fd_hw_cmd_prestart_args *)user_data;
+
+	if (!blob_data || (blob_size == 0)) {
+		CAM_ERR(CAM_FD, "Invalid blob info %pK %d", blob_data,
+			blob_size);
+		return -EINVAL;
+	}
+
+	if (!prestart_args) {
+		CAM_ERR(CAM_FD, "Invalid user data");
+		return -EINVAL;
+	}
+
+	switch (blob_type) {
+	case CAM_FD_BLOB_TYPE_RAW_RESULTS_REQUIRED: {
+		uint32_t *get_raw_results = (uint32_t *)blob_data;
+
+		if (sizeof(uint32_t) != blob_size) {
+			CAM_ERR(CAM_FD, "Invalid blob size %d %d",
+				sizeof(uint32_t), blob_size);
+			return -EINVAL;
+		}
+
+		prestart_args->get_raw_results = *get_raw_results;
+		break;
+	}
+	case CAM_FD_BLOB_TYPE_SOC_CLOCK_BW_REQUEST: {
+		struct cam_fd_soc_clock_bw_request *clk_req =
+			(struct cam_fd_soc_clock_bw_request *)blob_data;
+
+		if (sizeof(struct cam_fd_soc_clock_bw_request) != blob_size) {
+			CAM_ERR(CAM_FD, "Invalid blob size %d %d",
+				sizeof(struct cam_fd_soc_clock_bw_request),
+				blob_size);
+			return -EINVAL;
+		}
+
+		CAM_DBG(CAM_FD, "SOC Clk Request clock=%lld, bw=%lld",
+			clk_req->clock_rate, clk_req->bandwidth);
+
+		break;
+	}
+	default:
+		CAM_WARN(CAM_FD, "Unknown blob type %d", blob_type);
+		break;
+	}
+
+	return 0;
+}
+
+static int cam_fd_mgr_util_parse_generic_cmd_buffer(
+	struct cam_fd_hw_mgr_ctx *hw_ctx, struct cam_packet *packet,
+	struct cam_fd_hw_cmd_prestart_args *prestart_args)
+{
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	int i, rc = 0;
+
+	cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)&packet->payload +
+		packet->cmd_buf_offset);
+
+	for (i = 0; i < packet->num_cmd_buf; i++) {
+		if (!cmd_desc[i].length)
+			continue;
+
+		if (cmd_desc[i].meta_data == CAM_FD_CMD_BUFFER_ID_CDM)
+			continue;
+
+		rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
+		if (rc)
+			return rc;
+
+		rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i],
+			cam_fd_packet_generic_blob_handler, prestart_args);
+		if (rc)
+			CAM_ERR(CAM_FD, "Failed in processing blobs %d", rc);
+
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_prepare_io_buf_info(int32_t iommu_hdl,
+	struct cam_hw_prepare_update_args *prepare,
+	struct cam_fd_hw_io_buffer *input_buf,
+	struct cam_fd_hw_io_buffer *output_buf, uint32_t io_buf_size)
+{
+	int rc = -EINVAL;
+	uint32_t i, j, plane, num_out_buf, num_in_buf;
+	struct cam_buf_io_cfg *io_cfg;
+	uint64_t io_addr[CAM_PACKET_MAX_PLANES];
+	uint64_t cpu_addr[CAM_PACKET_MAX_PLANES];
+	size_t size;
+
+	/* Get IO Buf information */
+	num_out_buf = 0;
+	num_in_buf  = 0;
+	io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
+		&prepare->packet->payload + prepare->packet->io_configs_offset);
+
+	for (i = 0; i < prepare->packet->num_io_configs; i++) {
+		CAM_DBG(CAM_FD,
+			"IOConfig[%d] : handle[%d] Dir[%d] Res[%d] Fence[%d], Format[%d]",
+			i, io_cfg[i].mem_handle[0], io_cfg[i].direction,
+			io_cfg[i].resource_type,
+			io_cfg[i].fence, io_cfg[i].format);
+
+		if ((num_in_buf >= io_buf_size) ||
+			(num_out_buf >= io_buf_size)) {
+			CAM_ERR(CAM_FD, "Invalid number of buffers %d %d %d",
+				num_in_buf, num_out_buf, io_buf_size);
+			return -EINVAL;
+		}
+
+		memset(io_addr, 0x0, sizeof(io_addr));
+		for (plane = 0; plane < CAM_PACKET_MAX_PLANES; plane++) {
+			if (!io_cfg[i].mem_handle[plane])
+				break;
+
+			rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[plane],
+				iommu_hdl, &io_addr[plane], &size);
+			if ((rc) || (io_addr[plane] >> 32)) {
+				CAM_ERR(CAM_FD, "Invalid io addr for %d %d",
+					plane, rc);
+				return -ENOMEM;
+			}
+
+			/*
+			 * Buffers may be accessed by CPU as well, we do not
+			 * know at this point, so get both and send to HW layer
+			 */
+			rc = cam_mem_get_cpu_buf(io_cfg[i].mem_handle[plane],
+				&cpu_addr[plane], &size);
+			if (rc) {
+				CAM_ERR(CAM_FD, "unable to get buf address");
+				return rc;
+			}
+
+			io_addr[plane] += io_cfg[i].offsets[plane];
+			cpu_addr[plane] += io_cfg[i].offsets[plane];
+
+			CAM_DBG(CAM_FD, "IO Address[%d][%d] : %pK, %pK",
+				io_cfg[i].direction, plane, io_addr[plane],
+				cpu_addr[plane]);
+		}
+
+		switch (io_cfg[i].direction) {
+		case CAM_BUF_INPUT: {
+			prepare->in_map_entries[num_in_buf].resource_handle =
+				io_cfg[i].resource_type;
+			prepare->in_map_entries[num_in_buf].sync_id =
+				io_cfg[i].fence;
+
+			input_buf[num_in_buf].valid = true;
+			for (j = 0; j < plane; j++) {
+				input_buf[num_in_buf].io_addr[j] = io_addr[j];
+				input_buf[num_in_buf].cpu_addr[j] = cpu_addr[j];
+			}
+			input_buf[num_in_buf].num_buf = plane;
+			input_buf[num_in_buf].io_cfg = &io_cfg[i];
+
+			num_in_buf++;
+			break;
+		}
+		case CAM_BUF_OUTPUT: {
+			prepare->out_map_entries[num_out_buf].resource_handle =
+				io_cfg[i].resource_type;
+			prepare->out_map_entries[num_out_buf].sync_id =
+				io_cfg[i].fence;
+
+			output_buf[num_out_buf].valid = true;
+			for (j = 0; j < plane; j++) {
+				output_buf[num_out_buf].io_addr[j] = io_addr[j];
+				output_buf[num_out_buf].cpu_addr[j] =
+					cpu_addr[j];
+			}
+			output_buf[num_out_buf].num_buf = plane;
+			output_buf[num_out_buf].io_cfg = &io_cfg[i];
+
+			num_out_buf++;
+			break;
+		}
+		default:
+			CAM_ERR(CAM_FD, "Unsupported io direction %d",
+				io_cfg[i].direction);
+			return -EINVAL;
+		}
+	}
+
+	prepare->num_in_map_entries  = num_in_buf;
+	prepare->num_out_map_entries = num_out_buf;
+
+	return 0;
+}
+
+static int cam_fd_mgr_util_prepare_hw_update_entries(
+	struct cam_fd_hw_mgr *hw_mgr,
+	struct cam_hw_prepare_update_args *prepare,
+	struct cam_fd_hw_cmd_prestart_args *prestart_args,
+	struct cam_kmd_buf_info *kmd_buf_info)
+{
+	int i, rc;
+	struct cam_hw_update_entry *hw_entry;
+	uint32_t num_ent;
+	struct cam_fd_hw_mgr_ctx *hw_ctx =
+		(struct cam_fd_hw_mgr_ctx *)prepare->ctxt_to_hw_map;
+	struct cam_fd_device *hw_device;
+	uint32_t kmd_buf_max_size, kmd_buf_used_bytes = 0;
+	uint32_t *kmd_buf_addr;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	kmd_buf_addr = (uint32_t *)((uint8_t *)kmd_buf_info->cpu_addr +
+		kmd_buf_info->used_bytes);
+	kmd_buf_max_size = kmd_buf_info->size - kmd_buf_info->used_bytes;
+
+	prestart_args->cmd_buf_addr = kmd_buf_addr;
+	prestart_args->size  = kmd_buf_max_size;
+	prestart_args->pre_config_buf_size = 0;
+	prestart_args->post_config_buf_size = 0;
+
+	if (hw_device->hw_intf->hw_ops.process_cmd) {
+		rc = hw_device->hw_intf->hw_ops.process_cmd(
+			hw_device->hw_intf->hw_priv, CAM_FD_HW_CMD_PRESTART,
+			prestart_args,
+			sizeof(struct cam_fd_hw_cmd_prestart_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in CMD_PRESTART %d", rc);
+			return rc;
+		}
+	}
+
+	kmd_buf_used_bytes += prestart_args->pre_config_buf_size;
+	kmd_buf_used_bytes += prestart_args->post_config_buf_size;
+
+	/* HW layer is expected to add commands */
+	if (!kmd_buf_used_bytes || (kmd_buf_used_bytes > kmd_buf_max_size)) {
+		CAM_ERR(CAM_FD, "Invalid kmd used bytes %d (%d)",
+			kmd_buf_used_bytes, kmd_buf_max_size);
+		return -ENOMEM;
+	}
+
+	hw_entry = prepare->hw_update_entries;
+	num_ent = 0;
+
+	if (prestart_args->pre_config_buf_size) {
+		if ((num_ent + 1) >= prepare->max_hw_update_entries) {
+			CAM_ERR(CAM_FD, "Insufficient  HW entries :%d %d",
+				num_ent, prepare->max_hw_update_entries);
+			return -EINVAL;
+		}
+
+		hw_entry[num_ent].handle = kmd_buf_info->handle;
+		hw_entry[num_ent].len  = prestart_args->pre_config_buf_size;
+		hw_entry[num_ent].offset = kmd_buf_info->offset;
+
+		kmd_buf_info->used_bytes += prestart_args->pre_config_buf_size;
+		kmd_buf_info->offset += prestart_args->pre_config_buf_size;
+		num_ent++;
+	}
+
+	/*
+	 * set the cmd_desc to point the first command descriptor in the
+	 * packet and update hw entries with CDM command buffers
+	 */
+	cmd_desc = (struct cam_cmd_buf_desc *)((uint8_t *)
+		&prepare->packet->payload + prepare->packet->cmd_buf_offset);
+
+	for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
+		if (!cmd_desc[i].length)
+			continue;
+
+		if (cmd_desc[i].meta_data != CAM_FD_CMD_BUFFER_ID_CDM)
+			continue;
+
+		if (num_ent + 1 >= prepare->max_hw_update_entries) {
+			CAM_ERR(CAM_FD, "Insufficient  HW entries :%d %d",
+				num_ent, prepare->max_hw_update_entries);
+			return -EINVAL;
+		}
+
+		hw_entry[num_ent].handle = cmd_desc[i].mem_handle;
+		hw_entry[num_ent].len = cmd_desc[i].length;
+		hw_entry[num_ent].offset = cmd_desc[i].offset;
+		num_ent++;
+	}
+
+	if (prestart_args->post_config_buf_size) {
+		if (num_ent + 1 >= prepare->max_hw_update_entries) {
+			CAM_ERR(CAM_FD, "Insufficient  HW entries :%d %d",
+				num_ent, prepare->max_hw_update_entries);
+			return -EINVAL;
+		}
+
+		hw_entry[num_ent].handle = kmd_buf_info->handle;
+		hw_entry[num_ent].len    = prestart_args->post_config_buf_size;
+		hw_entry[num_ent].offset = kmd_buf_info->offset;
+
+		kmd_buf_info->used_bytes += prestart_args->post_config_buf_size;
+		kmd_buf_info->offset     += prestart_args->post_config_buf_size;
+
+		num_ent++;
+	}
+
+	prepare->num_hw_update_entries = num_ent;
+
+	CAM_DBG(CAM_FD, "FinalConfig : hw_entries=%d, Sync(in=%d, out=%d)",
+		prepare->num_hw_update_entries, prepare->num_in_map_entries,
+		prepare->num_out_map_entries);
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_submit_frame(void *priv, void *data)
+{
+	struct cam_fd_device *hw_device;
+	struct cam_fd_hw_mgr *hw_mgr;
+	struct cam_fd_mgr_frame_request *frame_req;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	struct cam_fd_hw_cmd_start_args start_args;
+	int rc;
+
+	if (!priv) {
+		CAM_ERR(CAM_FD, "Invalid data");
+		return -EINVAL;
+	}
+
+	hw_mgr = (struct cam_fd_hw_mgr *)priv;
+	mutex_lock(&hw_mgr->frame_req_mutex);
+
+	/* Check if we have any frames pending in high priority list */
+	if (!list_empty(&hw_mgr->frame_pending_list_high)) {
+		CAM_DBG(CAM_FD, "Pending frames in high priority list");
+		frame_req = list_first_entry(&hw_mgr->frame_pending_list_high,
+			struct cam_fd_mgr_frame_request, list);
+	} else if (!list_empty(&hw_mgr->frame_pending_list_normal)) {
+		CAM_DBG(CAM_FD, "Pending frames in normal priority list");
+		frame_req = list_first_entry(&hw_mgr->frame_pending_list_normal,
+			struct cam_fd_mgr_frame_request, list);
+	} else {
+		mutex_unlock(&hw_mgr->frame_req_mutex);
+		CAM_DBG(CAM_FD, "No pending frames");
+		return 0;
+	}
+
+	CAM_DBG(CAM_FD, "FrameSubmit : Frame[%lld]", frame_req->request_id);
+	hw_ctx = frame_req->hw_ctx;
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		mutex_unlock(&hw_mgr->frame_req_mutex);
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	mutex_lock(&hw_device->lock);
+	if (hw_device->ready_to_process == false) {
+		mutex_unlock(&hw_device->lock);
+		mutex_unlock(&hw_mgr->frame_req_mutex);
+		return -EBUSY;
+	}
+
+	list_del_init(&frame_req->list);
+	mutex_unlock(&hw_mgr->frame_req_mutex);
+
+	if (hw_device->hw_intf->hw_ops.start) {
+		start_args.hw_ctx = hw_ctx;
+		start_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+		start_args.hw_req_private = &frame_req->hw_req_private;
+		start_args.hw_update_entries = frame_req->hw_update_entries;
+		start_args.num_hw_update_entries =
+			frame_req->num_hw_update_entries;
+
+		rc = hw_device->hw_intf->hw_ops.start(
+			hw_device->hw_intf->hw_priv, &start_args,
+			sizeof(start_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in HW Start %d", rc);
+			mutex_unlock(&hw_device->lock);
+			goto put_req_into_free_list;
+		}
+	} else {
+		CAM_ERR(CAM_FD, "Invalid hw_ops.start");
+		mutex_unlock(&hw_device->lock);
+		rc = -EPERM;
+		goto put_req_into_free_list;
+	}
+
+	hw_device->ready_to_process = false;
+	mutex_unlock(&hw_device->lock);
+
+	rc = cam_fd_mgr_util_put_frame_req(
+		&hw_mgr->frame_processing_list, &frame_req);
+	if (rc) {
+		CAM_ERR(CAM_FD,
+			"Failed in putting frame req in processing list");
+		goto stop_unlock;
+	}
+
+	return rc;
+
+stop_unlock:
+	if (hw_device->hw_intf->hw_ops.stop) {
+		struct cam_fd_hw_stop_args stop_args;
+
+		stop_args.hw_ctx = hw_ctx;
+		stop_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+		stop_args.hw_req_private = &frame_req->hw_req_private;
+		if (hw_device->hw_intf->hw_ops.stop(
+			hw_device->hw_intf->hw_priv, &stop_args,
+			sizeof(stop_args)))
+			CAM_ERR(CAM_FD, "Failed in HW Stop %d", rc);
+	}
+put_req_into_free_list:
+	cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list, &frame_req);
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_schedule_frame_worker_task(
+	struct cam_fd_hw_mgr *hw_mgr)
+{
+	int32_t rc = 0;
+	struct crm_workq_task *task;
+	struct cam_fd_mgr_work_data *work_data;
+
+	task = cam_req_mgr_workq_get_task(hw_mgr->work);
+	if (!task) {
+		CAM_ERR(CAM_FD, "no empty task available");
+		return -ENOMEM;
+	}
+
+	work_data = (struct cam_fd_mgr_work_data *)task->payload;
+	work_data->type = CAM_FD_WORK_FRAME;
+
+	task->process_cb = cam_fd_mgr_util_submit_frame;
+	rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, CRM_TASK_PRIORITY_0);
+
+	return rc;
+}
+
+static int32_t cam_fd_mgr_workq_irq_cb(void *priv, void *data)
+{
+	struct cam_fd_device *hw_device = NULL;
+	struct cam_fd_hw_mgr *hw_mgr;
+	struct cam_fd_mgr_work_data *work_data;
+	struct cam_fd_mgr_frame_request *frame_req = NULL;
+	enum cam_fd_hw_irq_type irq_type;
+	bool frame_abort = true;
+	int rc;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_FD, "Invalid data %pK %pK", data, priv);
+		return -EINVAL;
+	}
+
+	hw_mgr = (struct cam_fd_hw_mgr *)priv;
+	work_data = (struct cam_fd_mgr_work_data *)data;
+	irq_type = work_data->irq_type;
+
+	CAM_DBG(CAM_FD, "FD IRQ type=%d", irq_type);
+
+	if (irq_type == CAM_FD_IRQ_HALT_DONE) {
+		/* HALT would be followed by a RESET, ignore this */
+		CAM_DBG(CAM_FD, "HALT IRQ callback");
+		return 0;
+	}
+
+	/* Get the frame from processing list */
+	rc = cam_fd_mgr_util_get_frame_req(&hw_mgr->frame_processing_list,
+		&frame_req);
+	if (rc || !frame_req) {
+		/*
+		 * This can happen if reset is triggered while no frames
+		 * were pending, so not an error, just continue to check if
+		 * there are any pending frames and submit
+		 */
+		CAM_DBG(CAM_FD, "No Frame in processing list, rc=%d", rc);
+		goto submit_next_frame;
+	}
+
+	if (!frame_req->hw_ctx) {
+		CAM_ERR(CAM_FD, "Invalid Frame request %lld",
+			frame_req->request_id);
+		goto put_req_in_free_list;
+	}
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, frame_req->hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		goto put_req_in_free_list;
+	}
+
+	/* Read frame results first */
+	if (irq_type == CAM_FD_IRQ_FRAME_DONE) {
+		struct cam_fd_hw_frame_done_args frame_done_args;
+
+		CAM_DBG(CAM_FD, "FrameDone : Frame[%lld]",
+			frame_req->request_id);
+
+		frame_done_args.hw_ctx = frame_req->hw_ctx;
+		frame_done_args.ctx_hw_private =
+			frame_req->hw_ctx->ctx_hw_private;
+		frame_done_args.request_id = frame_req->request_id;
+		frame_done_args.hw_req_private = &frame_req->hw_req_private;
+
+		if (hw_device->hw_intf->hw_ops.process_cmd) {
+			rc = hw_device->hw_intf->hw_ops.process_cmd(
+				hw_device->hw_intf->hw_priv,
+				CAM_FD_HW_CMD_FRAME_DONE,
+				&frame_done_args, sizeof(frame_done_args));
+			if (rc) {
+				CAM_ERR(CAM_FD, "Failed in CMD_PRESTART %d",
+					rc);
+				frame_abort = true;
+				goto notify_context;
+			}
+		}
+
+		frame_abort = false;
+	}
+
+notify_context:
+	/* Do a callback to inform frame done or stop done */
+	if (frame_req->hw_ctx->event_cb) {
+		struct cam_hw_done_event_data buf_data;
+
+		CAM_DBG(CAM_FD, "FrameHALT : Frame[%lld]",
+			frame_req->request_id);
+
+		buf_data.num_handles = frame_req->num_hw_update_entries;
+		buf_data.request_id = frame_req->request_id;
+
+		rc = frame_req->hw_ctx->event_cb(frame_req->hw_ctx->cb_priv,
+			frame_abort, &buf_data);
+		if (rc)
+			CAM_ERR(CAM_FD, "Error in event cb handling %d", rc);
+	}
+
+	/*
+	 * Now we can set hw device is free to process further frames.
+	 * Note - Do not change state to IDLE until we read the frame results,
+	 * Otherwise, other thread may schedule frame processing before
+	 * reading current frame's results. Also, we need to set to IDLE state
+	 * in case some error happens after getting this irq callback
+	 */
+	mutex_lock(&hw_device->lock);
+	hw_device->ready_to_process = true;
+	CAM_DBG(CAM_FD, "ready_to_process=%d", hw_device->ready_to_process);
+	mutex_unlock(&hw_device->lock);
+
+put_req_in_free_list:
+	rc = cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+		&frame_req);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in putting frame req in free list");
+		/* continue */
+	}
+
+submit_next_frame:
+	/* Check if there are any frames pending for processing and submit */
+	rc = cam_fd_mgr_util_submit_frame(hw_mgr, NULL);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error while submit frame, rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int cam_fd_mgr_irq_cb(void *data, enum cam_fd_hw_irq_type irq_type)
+{
+	struct cam_fd_hw_mgr *hw_mgr = &g_fd_hw_mgr;
+	int rc = 0;
+	unsigned long flags;
+	struct crm_workq_task *task;
+	struct cam_fd_mgr_work_data *work_data;
+
+	spin_lock_irqsave(&hw_mgr->hw_mgr_slock, flags);
+	task = cam_req_mgr_workq_get_task(hw_mgr->work);
+	if (!task) {
+		CAM_ERR(CAM_FD, "no empty task available");
+		spin_unlock_irqrestore(&hw_mgr->hw_mgr_slock, flags);
+		return -ENOMEM;
+	}
+
+	work_data = (struct cam_fd_mgr_work_data *)task->payload;
+	work_data->type = CAM_FD_WORK_IRQ;
+	work_data->irq_type = irq_type;
+
+	task->process_cb = cam_fd_mgr_workq_irq_cb;
+	rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, CRM_TASK_PRIORITY_0);
+	if (rc)
+		CAM_ERR(CAM_FD, "Failed in enqueue work task, rc=%d", rc);
+
+	spin_unlock_irqrestore(&hw_mgr->hw_mgr_slock, flags);
+
+	return rc;
+}
+
+static int cam_fd_mgr_hw_get_caps(void *hw_mgr_priv, void *hw_get_caps_args)
+{
+	int rc = 0;
+	struct cam_fd_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_query_cap_cmd *query = hw_get_caps_args;
+	struct cam_fd_query_cap_cmd query_fd;
+
+	if (copy_from_user(&query_fd, (void __user *)query->caps_handle,
+		sizeof(struct cam_fd_query_cap_cmd))) {
+		CAM_ERR(CAM_FD, "Failed in copy from user, rc=%d", rc);
+		return -EFAULT;
+	}
+
+	query_fd  = hw_mgr->fd_caps;
+
+	CAM_DBG(CAM_FD,
+		"IOMMU device(%d, %d), CDM(%d, %d), versions %d.%d, %d.%d",
+		query_fd.device_iommu.secure, query_fd.device_iommu.non_secure,
+		query_fd.cdm_iommu.secure, query_fd.cdm_iommu.non_secure,
+		query_fd.hw_caps.core_version.major,
+		query_fd.hw_caps.core_version.minor,
+		query_fd.hw_caps.wrapper_version.major,
+		query_fd.hw_caps.wrapper_version.minor);
+
+	if (copy_to_user((void __user *)query->caps_handle, &query_fd,
+		sizeof(struct cam_fd_query_cap_cmd)))
+		rc = -EFAULT;
+
+	return rc;
+}
+
+static int cam_fd_mgr_hw_acquire(void *hw_mgr_priv, void *hw_acquire_args)
+{
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_hw_acquire_args *acquire_args =
+		(struct cam_hw_acquire_args *)hw_acquire_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	struct cam_fd_acquire_dev_info fd_acquire_args;
+	int rc;
+
+	if (!acquire_args || acquire_args->num_acq <= 0) {
+		CAM_ERR(CAM_FD, "Invalid acquire args %pK", acquire_args);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&fd_acquire_args,
+		(void __user *)acquire_args->acquire_info,
+		sizeof(struct cam_fd_acquire_dev_info))) {
+		CAM_ERR(CAM_FD, "Copy from user failed");
+		return -EFAULT;
+	}
+
+	CAM_DBG(CAM_FD, "Acquire : mode=%d, get_raw_results=%d, priority=%d",
+		fd_acquire_args.mode, fd_acquire_args.get_raw_results,
+		fd_acquire_args.priority);
+
+	/* get a free fd hw mgr ctx */
+	rc = cam_fd_mgr_util_get_ctx(&hw_mgr->free_ctx_list, &hw_ctx);
+	if (rc || !hw_ctx) {
+		CAM_ERR(CAM_FD, "Get hw context failed, rc=%d, hw_ctx=%pK",
+			rc, hw_ctx);
+		return -EINVAL;
+	}
+
+	if (fd_acquire_args.get_raw_results && !hw_mgr->raw_results_available) {
+		CAM_ERR(CAM_FD, "HW cannot support raw results %d (%d)",
+			fd_acquire_args.get_raw_results,
+			hw_mgr->raw_results_available);
+		goto put_ctx;
+	}
+
+	if (!(fd_acquire_args.mode & hw_mgr->supported_modes)) {
+		CAM_ERR(CAM_FD, "HW cannot support requested mode 0x%x (0x%x)",
+			fd_acquire_args.mode, hw_mgr->supported_modes);
+		rc = -EPERM;
+		goto put_ctx;
+	}
+
+	rc = cam_fd_mgr_util_select_device(hw_mgr, hw_ctx, &fd_acquire_args);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in selecting device, rc=%d", rc);
+		goto put_ctx;
+	}
+
+	hw_ctx->ctx_in_use = true;
+	hw_ctx->hw_mgr = hw_mgr;
+	hw_ctx->get_raw_results = fd_acquire_args.get_raw_results;
+	hw_ctx->mode = fd_acquire_args.mode;
+
+	/* Save incoming cam core info into hw ctx*/
+	hw_ctx->cb_priv = acquire_args->context_data;
+	hw_ctx->event_cb = acquire_args->event_cb;
+
+	/* Update out args */
+	acquire_args->ctxt_to_hw_map = hw_ctx;
+
+	cam_fd_mgr_util_put_ctx(&hw_mgr->used_ctx_list, &hw_ctx);
+
+	return 0;
+put_ctx:
+	list_del_init(&hw_ctx->list);
+	cam_fd_mgr_util_put_ctx(&hw_mgr->free_ctx_list, &hw_ctx);
+	return rc;
+}
+
+static int cam_fd_mgr_hw_release(void *hw_mgr_priv, void *hw_release_args)
+{
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_hw_release_args *release_args = hw_release_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	int rc;
+
+	if (!hw_mgr_priv || !hw_release_args) {
+		CAM_ERR(CAM_FD, "Invalid arguments %pK, %pK",
+			hw_mgr_priv, hw_release_args);
+		return -EINVAL;
+	}
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)release_args->ctxt_to_hw_map;
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+
+	rc = cam_fd_mgr_util_release_device(hw_mgr, hw_ctx);
+	if (rc)
+		CAM_ERR(CAM_FD, "Failed in release device, rc=%d", rc);
+
+	list_del_init(&hw_ctx->list);
+	cam_fd_mgr_util_put_ctx(&hw_mgr->free_ctx_list, &hw_ctx);
+
+	return 0;
+}
+
+static int cam_fd_mgr_hw_start(void *hw_mgr_priv, void *mgr_start_args)
+{
+	int rc = 0;
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_hw_start_args *hw_mgr_start_args =
+		(struct cam_hw_start_args *)mgr_start_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	struct cam_fd_device *hw_device;
+	struct cam_fd_hw_init_args hw_init_args;
+
+	if (!hw_mgr_priv || !hw_mgr_start_args) {
+		CAM_ERR(CAM_FD, "Invalid arguments %pK %pK",
+			hw_mgr_priv, hw_mgr_start_args);
+		return -EINVAL;
+	}
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)hw_mgr_start_args->ctxt_to_hw_map;
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+
+	CAM_DBG(CAM_FD, "ctx index=%d, hw_ctx=%d", hw_ctx->ctx_index,
+		hw_ctx->device_index);
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	if (hw_device->hw_intf->hw_ops.init) {
+		hw_init_args.hw_ctx = hw_ctx;
+		hw_init_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+		rc = hw_device->hw_intf->hw_ops.init(
+			hw_device->hw_intf->hw_priv, &hw_init_args,
+			sizeof(hw_init_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in HW Init %d", rc);
+			return rc;
+		}
+	} else {
+		CAM_ERR(CAM_FD, "Invalid init function");
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int cam_fd_mgr_hw_stop(void *hw_mgr_priv, void *mgr_stop_args)
+{
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_hw_stop_args *hw_mgr_stop_args =
+		(struct cam_hw_stop_args *)mgr_stop_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	struct cam_fd_device *hw_device;
+	struct cam_fd_hw_stop_args hw_stop_args;
+	struct cam_fd_hw_deinit_args hw_deinit_args;
+	int rc = 0;
+
+	if (!hw_mgr_priv || !hw_mgr_stop_args) {
+		CAM_ERR(CAM_FD, "Invalid arguments %pK %pK",
+			hw_mgr_priv, hw_mgr_stop_args);
+		return -EINVAL;
+	}
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)hw_mgr_stop_args->ctxt_to_hw_map;
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+	CAM_DBG(CAM_FD, "ctx index=%d, hw_ctx=%d", hw_ctx->ctx_index,
+		hw_ctx->device_index);
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	CAM_DBG(CAM_FD, "FD Device ready_to_process = %d",
+		hw_device->ready_to_process);
+
+	if ((hw_device->hw_intf->hw_ops.stop) &&
+		(hw_device->ready_to_process == false)) {
+		/*
+		 * Even if device is in processing state, we should submit
+		 * stop command only if this ctx is running on hw
+		 */
+		hw_stop_args.hw_ctx = hw_ctx;
+		rc = hw_device->hw_intf->hw_ops.stop(
+			hw_device->hw_intf->hw_priv, &hw_stop_args,
+			sizeof(hw_stop_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in HW Stop %d", rc);
+			return rc;
+		}
+	}
+
+	if (hw_device->hw_intf->hw_ops.deinit) {
+		hw_deinit_args.hw_ctx = hw_ctx;
+		hw_deinit_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+		rc = hw_device->hw_intf->hw_ops.deinit(
+			hw_device->hw_intf->hw_priv, &hw_deinit_args,
+			sizeof(hw_deinit_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in HW DeInit %d", rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int cam_fd_mgr_hw_prepare_update(void *hw_mgr_priv,
+	void *hw_prepare_update_args)
+{
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_hw_prepare_update_args *prepare =
+		(struct cam_hw_prepare_update_args *) hw_prepare_update_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	struct cam_fd_device *hw_device;
+	struct cam_kmd_buf_info kmd_buf;
+	int rc;
+	struct cam_fd_hw_cmd_prestart_args prestart_args;
+	struct cam_fd_mgr_frame_request *frame_req;
+
+	if (!hw_mgr_priv || !hw_prepare_update_args) {
+		CAM_ERR(CAM_FD, "Invalid args %pK %pK",
+			hw_mgr_priv, hw_prepare_update_args);
+		return -EINVAL;
+	}
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)prepare->ctxt_to_hw_map;
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		goto error;
+	}
+
+	rc = cam_fd_mgr_util_packet_validate(prepare->packet);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in packet validation %d", rc);
+		goto error;
+	}
+
+	rc = cam_packet_util_get_kmd_buffer(prepare->packet, &kmd_buf);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in get kmd buf buffer %d", rc);
+		goto error;
+	}
+
+	CAM_DBG(CAM_FD,
+		"KMD Buf : hdl=%d, cpu_addr=%pK, offset=%d, size=%d, used=%d",
+		kmd_buf.handle, kmd_buf.cpu_addr, kmd_buf.offset,
+		kmd_buf.size, kmd_buf.used_bytes);
+
+	/* We do not expect any patching, but just do it anyway */
+	rc = cam_packet_util_process_patches(prepare->packet,
+		hw_mgr->device_iommu.non_secure);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Patch FD packet failed, rc=%d", rc);
+		return rc;
+	}
+
+	memset(&prestart_args, 0x0, sizeof(prestart_args));
+	prestart_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+	prestart_args.hw_ctx = hw_ctx;
+	prestart_args.request_id = prepare->packet->header.request_id;
+
+	rc = cam_fd_mgr_util_parse_generic_cmd_buffer(hw_ctx, prepare->packet,
+		&prestart_args);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in parsing gerneric cmd buffer %d", rc);
+		goto error;
+	}
+
+	rc = cam_fd_mgr_util_prepare_io_buf_info(
+		hw_mgr->device_iommu.non_secure, prepare,
+		prestart_args.input_buf, prestart_args.output_buf,
+		CAM_FD_MAX_IO_BUFFERS);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in prepare IO Buf %d", rc);
+		goto error;
+	}
+
+	rc = cam_fd_mgr_util_prepare_hw_update_entries(hw_mgr, prepare,
+		&prestart_args, &kmd_buf);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in hw update entries %d", rc);
+		goto error;
+	}
+
+	/* get a free frame req from free list */
+	rc = cam_fd_mgr_util_get_frame_req(&hw_mgr->frame_free_list,
+		&frame_req);
+	if (rc || !frame_req) {
+		CAM_ERR(CAM_FD, "Get frame_req failed, rc=%d, hw_ctx=%pK",
+			rc, hw_ctx);
+		return -ENOMEM;
+	}
+
+	/* Setup frame request info and queue to pending list */
+	frame_req->hw_ctx = hw_ctx;
+	frame_req->request_id = prepare->packet->header.request_id;
+	/* This has to be passed to HW while calling hw_ops->start */
+	frame_req->hw_req_private = prestart_args.hw_req_private;
+
+	/*
+	 * Save the current frame_req into priv,
+	 * this will come as priv while hw_config
+	 */
+	prepare->priv = frame_req;
+
+	CAM_DBG(CAM_FD, "FramePrepare : Frame[%lld]", frame_req->request_id);
+
+	return 0;
+error:
+	return rc;
+}
+
+static int cam_fd_mgr_hw_config(void *hw_mgr_priv, void *hw_config_args)
+{
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_hw_config_args *config =
+		(struct cam_hw_config_args *) hw_config_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	struct cam_fd_mgr_frame_request *frame_req;
+	int rc;
+	int i;
+
+	if (!hw_mgr || !config) {
+		CAM_ERR(CAM_FD, "Invalid arguments %pK %pK", hw_mgr, config);
+		return -EINVAL;
+	}
+
+	if (!config->num_hw_update_entries) {
+		CAM_ERR(CAM_FD, "No hw update enteries are available");
+		return -EINVAL;
+	}
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)config->ctxt_to_hw_map;
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+
+	frame_req = config->priv;
+	CAM_DBG(CAM_FD, "FrameHWConfig : Frame[%lld]", frame_req->request_id);
+
+	frame_req->num_hw_update_entries = config->num_hw_update_entries;
+	for (i = 0; i < config->num_hw_update_entries; i++) {
+		frame_req->hw_update_entries[i] = config->hw_update_entries[i];
+		CAM_DBG(CAM_FD, "PreStart HWEntry[%d] : %d %d %d %d %pK",
+			frame_req->hw_update_entries[i].handle,
+			frame_req->hw_update_entries[i].offset,
+			frame_req->hw_update_entries[i].len,
+			frame_req->hw_update_entries[i].flags,
+			frame_req->hw_update_entries[i].addr);
+	}
+
+	if (hw_ctx->priority == CAM_FD_PRIORITY_HIGH) {
+		CAM_DBG(CAM_FD, "Insert frame into prio0 queue");
+		rc = cam_fd_mgr_util_put_frame_req(
+			&hw_mgr->frame_pending_list_high, &frame_req);
+	} else {
+		CAM_DBG(CAM_FD, "Insert frame into prio1 queue");
+		rc = cam_fd_mgr_util_put_frame_req(
+			&hw_mgr->frame_pending_list_normal, &frame_req);
+	}
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in queuing frame req, rc=%d", rc);
+		goto put_free_list;
+	}
+
+	rc = cam_fd_mgr_util_schedule_frame_worker_task(hw_mgr);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Worker task scheduling failed %d", rc);
+		goto remove_and_put_free_list;
+	}
+
+	return 0;
+
+remove_and_put_free_list:
+
+	if (hw_ctx->priority == CAM_FD_PRIORITY_HIGH) {
+		CAM_DBG(CAM_FD, "Removing frame into prio0 queue");
+		cam_fd_mgr_util_get_frame_req(
+			&hw_mgr->frame_pending_list_high, &frame_req);
+	} else {
+		CAM_DBG(CAM_FD, "Removing frame into prio1 queue");
+		cam_fd_mgr_util_get_frame_req(
+			&hw_mgr->frame_pending_list_normal, &frame_req);
+	}
+put_free_list:
+	cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+		&frame_req);
+
+	return rc;
+}
+
+int cam_fd_hw_mgr_deinit(struct device_node *of_node)
+{
+	CAM_DBG(CAM_FD, "HW Mgr Deinit");
+
+	cam_req_mgr_workq_destroy(&g_fd_hw_mgr.work);
+
+	cam_smmu_ops(g_fd_hw_mgr.device_iommu.non_secure, CAM_SMMU_DETACH);
+	cam_smmu_destroy_handle(g_fd_hw_mgr.device_iommu.non_secure);
+	g_fd_hw_mgr.device_iommu.non_secure = -1;
+
+	mutex_destroy(&g_fd_hw_mgr.ctx_mutex);
+	mutex_destroy(&g_fd_hw_mgr.frame_req_mutex);
+	mutex_destroy(&g_fd_hw_mgr.hw_mgr_mutex);
+
+	return 0;
+}
+
+int cam_fd_hw_mgr_init(struct device_node *of_node,
+	struct cam_hw_mgr_intf *hw_mgr_intf)
+{
+	int count, i, rc = 0;
+	struct cam_hw_intf *hw_intf = NULL;
+	struct cam_fd_hw_mgr_ctx *hw_mgr_ctx;
+	struct cam_fd_device *hw_device;
+	struct cam_fd_mgr_frame_request *frame_req;
+
+	if (!of_node || !hw_mgr_intf) {
+		CAM_ERR(CAM_FD, "Invalid args of_node %pK hw_mgr_intf %pK",
+			of_node, hw_mgr_intf);
+		return -EINVAL;
+	}
+
+	memset(&g_fd_hw_mgr, 0x0, sizeof(g_fd_hw_mgr));
+	memset(hw_mgr_intf, 0x0, sizeof(*hw_mgr_intf));
+
+	mutex_init(&g_fd_hw_mgr.ctx_mutex);
+	mutex_init(&g_fd_hw_mgr.frame_req_mutex);
+	mutex_init(&g_fd_hw_mgr.hw_mgr_mutex);
+	spin_lock_init(&g_fd_hw_mgr.hw_mgr_slock);
+
+	count = of_property_count_strings(of_node, "compat-hw-name");
+	if (!count || (count > CAM_FD_HW_MAX)) {
+		CAM_ERR(CAM_FD, "Invalid compat names in dev tree %d", count);
+		return -EINVAL;
+	}
+	g_fd_hw_mgr.num_devices = count;
+
+	g_fd_hw_mgr.raw_results_available = false;
+	g_fd_hw_mgr.supported_modes = 0;
+
+	for (i = 0; i < count; i++) {
+		hw_device = &g_fd_hw_mgr.hw_device[i];
+
+		rc = cam_fd_mgr_util_pdev_get_hw_intf(of_node, i, &hw_intf);
+		if (rc) {
+			CAM_ERR(CAM_FD, "hw intf from pdev failed, rc=%d", rc);
+			return rc;
+		}
+
+		mutex_init(&hw_device->lock);
+
+		hw_device->valid = true;
+		hw_device->hw_intf = hw_intf;
+		hw_device->ready_to_process = true;
+
+		if (hw_device->hw_intf->hw_ops.process_cmd) {
+			struct cam_fd_hw_cmd_set_irq_cb irq_cb_args;
+
+			irq_cb_args.cam_fd_hw_mgr_cb = cam_fd_mgr_irq_cb;
+			irq_cb_args.data = hw_device;
+
+			rc = hw_device->hw_intf->hw_ops.process_cmd(
+				hw_device->hw_intf->hw_priv,
+				CAM_FD_HW_CMD_REGISTER_CALLBACK,
+				&irq_cb_args, sizeof(irq_cb_args));
+			if (rc) {
+				CAM_ERR(CAM_FD,
+					"Failed in REGISTER_CALLBACK %d", rc);
+				return rc;
+			}
+		}
+
+		if (hw_device->hw_intf->hw_ops.get_hw_caps) {
+			rc = hw_device->hw_intf->hw_ops.get_hw_caps(
+				hw_intf->hw_priv, &hw_device->hw_caps,
+				sizeof(hw_device->hw_caps));
+			if (rc) {
+				CAM_ERR(CAM_FD, "Failed in get_hw_caps %d", rc);
+				return rc;
+			}
+
+			g_fd_hw_mgr.raw_results_available |=
+				hw_device->hw_caps.raw_results_available;
+			g_fd_hw_mgr.supported_modes |=
+				hw_device->hw_caps.supported_modes;
+
+			CAM_DBG(CAM_FD,
+				"Device[mode=%d, raw=%d], Mgr[mode=%d, raw=%d]",
+				hw_device->hw_caps.supported_modes,
+				hw_device->hw_caps.raw_results_available,
+				g_fd_hw_mgr.supported_modes,
+				g_fd_hw_mgr.raw_results_available);
+		}
+	}
+
+	INIT_LIST_HEAD(&g_fd_hw_mgr.free_ctx_list);
+	INIT_LIST_HEAD(&g_fd_hw_mgr.used_ctx_list);
+	INIT_LIST_HEAD(&g_fd_hw_mgr.frame_free_list);
+	INIT_LIST_HEAD(&g_fd_hw_mgr.frame_pending_list_high);
+	INIT_LIST_HEAD(&g_fd_hw_mgr.frame_pending_list_normal);
+	INIT_LIST_HEAD(&g_fd_hw_mgr.frame_processing_list);
+
+	g_fd_hw_mgr.device_iommu.non_secure = -1;
+	g_fd_hw_mgr.device_iommu.secure = -1;
+	g_fd_hw_mgr.cdm_iommu.non_secure = -1;
+	g_fd_hw_mgr.cdm_iommu.secure = -1;
+
+	rc = cam_smmu_get_handle("fd",
+		&g_fd_hw_mgr.device_iommu.non_secure);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Get iommu handle failed, rc=%d", rc);
+		goto destroy_mutex;
+	}
+
+	rc = cam_smmu_ops(g_fd_hw_mgr.device_iommu.non_secure, CAM_SMMU_ATTACH);
+	if (rc) {
+		CAM_ERR(CAM_FD, "FD attach iommu handle failed, rc=%d", rc);
+		goto destroy_smmu;
+	}
+
+	rc = cam_cdm_get_iommu_handle("fd", &g_fd_hw_mgr.cdm_iommu);
+	if (rc)
+		CAM_DBG(CAM_FD, "Failed to acquire the CDM iommu handles");
+
+	CAM_DBG(CAM_FD, "iommu handles : device(%d, %d), cdm(%d, %d)",
+		g_fd_hw_mgr.device_iommu.non_secure,
+		g_fd_hw_mgr.device_iommu.secure,
+		g_fd_hw_mgr.cdm_iommu.non_secure,
+		g_fd_hw_mgr.cdm_iommu.secure);
+
+	/* Init hw mgr contexts and add to free list */
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		hw_mgr_ctx = &g_fd_hw_mgr.ctx_pool[i];
+
+		memset(hw_mgr_ctx, 0x0, sizeof(*hw_mgr_ctx));
+		INIT_LIST_HEAD(&hw_mgr_ctx->list);
+
+		hw_mgr_ctx->ctx_index = i;
+		hw_mgr_ctx->device_index = -1;
+		hw_mgr_ctx->hw_mgr = &g_fd_hw_mgr;
+
+		list_add_tail(&hw_mgr_ctx->list, &g_fd_hw_mgr.free_ctx_list);
+	}
+
+	/* Init hw mgr frame requests and add to free list */
+	for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
+		frame_req = &g_fd_hw_mgr.frame_req[i];
+
+		memset(frame_req, 0x0, sizeof(*frame_req));
+		INIT_LIST_HEAD(&frame_req->list);
+
+		list_add_tail(&frame_req->list, &g_fd_hw_mgr.frame_free_list);
+	}
+
+	rc = cam_req_mgr_workq_create("cam_fd_worker", CAM_FD_WORKQ_NUM_TASK,
+		&g_fd_hw_mgr.work, CRM_WORKQ_USAGE_IRQ);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Unable to create a worker, rc=%d", rc);
+		goto detach_smmu;
+	}
+
+	for (i = 0; i < CAM_FD_WORKQ_NUM_TASK; i++)
+		g_fd_hw_mgr.work->task.pool[i].payload =
+			&g_fd_hw_mgr.work_data[i];
+
+	/* Setup hw cap so that we can just return the info when requested */
+	memset(&g_fd_hw_mgr.fd_caps, 0, sizeof(g_fd_hw_mgr.fd_caps));
+	g_fd_hw_mgr.fd_caps.device_iommu = g_fd_hw_mgr.device_iommu;
+	g_fd_hw_mgr.fd_caps.cdm_iommu = g_fd_hw_mgr.cdm_iommu;
+	g_fd_hw_mgr.fd_caps.hw_caps = g_fd_hw_mgr.hw_device[0].hw_caps;
+
+	CAM_DBG(CAM_FD,
+		"IOMMU device(%d, %d), CDM(%d, %d) versions core[%d.%d], wrapper[%d.%d]",
+		g_fd_hw_mgr.fd_caps.device_iommu.secure,
+		g_fd_hw_mgr.fd_caps.device_iommu.non_secure,
+		g_fd_hw_mgr.fd_caps.cdm_iommu.secure,
+		g_fd_hw_mgr.fd_caps.cdm_iommu.non_secure,
+		g_fd_hw_mgr.fd_caps.hw_caps.core_version.major,
+		g_fd_hw_mgr.fd_caps.hw_caps.core_version.minor,
+		g_fd_hw_mgr.fd_caps.hw_caps.wrapper_version.major,
+		g_fd_hw_mgr.fd_caps.hw_caps.wrapper_version.minor);
+
+	hw_mgr_intf->hw_mgr_priv = &g_fd_hw_mgr;
+	hw_mgr_intf->hw_get_caps = cam_fd_mgr_hw_get_caps;
+	hw_mgr_intf->hw_acquire = cam_fd_mgr_hw_acquire;
+	hw_mgr_intf->hw_release = cam_fd_mgr_hw_release;
+	hw_mgr_intf->hw_start = cam_fd_mgr_hw_start;
+	hw_mgr_intf->hw_stop = cam_fd_mgr_hw_stop;
+	hw_mgr_intf->hw_prepare_update = cam_fd_mgr_hw_prepare_update;
+	hw_mgr_intf->hw_config = cam_fd_mgr_hw_config;
+	hw_mgr_intf->hw_read = NULL;
+	hw_mgr_intf->hw_write = NULL;
+	hw_mgr_intf->hw_close = NULL;
+
+	return rc;
+
+detach_smmu:
+	cam_smmu_ops(g_fd_hw_mgr.device_iommu.non_secure, CAM_SMMU_DETACH);
+destroy_smmu:
+	cam_smmu_destroy_handle(g_fd_hw_mgr.device_iommu.non_secure);
+	g_fd_hw_mgr.device_iommu.non_secure = -1;
+destroy_mutex:
+	mutex_destroy(&g_fd_hw_mgr.ctx_mutex);
+	mutex_destroy(&g_fd_hw_mgr.frame_req_mutex);
+	mutex_destroy(&g_fd_hw_mgr.hw_mgr_mutex);
+
+	return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h
new file mode 100644
index 0000000..135e006
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h
@@ -0,0 +1,182 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FD_HW_MGR_H_
+#define _CAM_FD_HW_MGR_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <media/cam_fd.h>
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_fd_hw_intf.h"
+
+#define CAM_FD_HW_MAX            1
+#define CAM_FD_WORKQ_NUM_TASK    10
+
+struct cam_fd_hw_mgr;
+
+/**
+ * enum cam_fd_mgr_work_type - Type of worker task
+ *
+ * @CAM_FD_WORK_FRAME : Work type indicating frame task
+ * @CAM_FD_WORK_IRQ   : Work type indicating irq task
+ */
+enum cam_fd_mgr_work_type {
+	CAM_FD_WORK_FRAME,
+	CAM_FD_WORK_IRQ,
+};
+
+/**
+ * struct cam_fd_hw_mgr_ctx : FD HW Mgr context
+ *
+ * @list            : List pointer used to maintain this context
+ *                    in free, used list
+ * @ctx_index       : Index of this context
+ * @ctx_in_use      : Whether this context is in use
+ * @event_cb        : Event callback pointer to notify cam core context
+ * @cb_priv         : Event callback private pointer
+ * @hw_mgr          : Pointer to hw manager
+ * @get_raw_results : Whether this context needs raw results
+ * @mode            : Mode in which this context runs
+ * @device_index    : HW Device used by this context
+ * @ctx_hw_private  : HW layer's private context pointer for this context
+ * @priority        : Priority of this context
+ */
+struct cam_fd_hw_mgr_ctx {
+	struct list_head               list;
+	uint32_t                       ctx_index;
+	bool                           ctx_in_use;
+	cam_hw_event_cb_func           event_cb;
+	void                          *cb_priv;
+	struct cam_fd_hw_mgr          *hw_mgr;
+	bool                           get_raw_results;
+	enum cam_fd_hw_mode            mode;
+	int32_t                        device_index;
+	void                          *ctx_hw_private;
+	uint32_t                       priority;
+};
+
+/**
+ * struct cam_fd_device : FD HW Device
+ *
+ * @hw_caps          : This FD device's capabilities
+ * @hw_intf          : FD device's interface information
+ * @ready_to_process : Whether this device is ready to process next frame
+ * @num_ctxts        : Number of context currently running on this device
+ * @valid            : Whether this device is valid
+ * @lock             : Lock used for protectin
+ */
+struct cam_fd_device {
+	struct cam_fd_hw_caps    hw_caps;
+	struct cam_hw_intf      *hw_intf;
+	bool                     ready_to_process;
+	uint32_t                 num_ctxts;
+	bool                     valid;
+	struct mutex             lock;
+};
+
+/**
+ * struct cam_fd_mgr_frame_request : Frame request information maintained
+ *                                   in HW Mgr layer
+ *
+ * @list                  : List pointer used to maintain this request in
+ *                          free, pending, processing request lists
+ * @request_id            : Request ID corresponding to this request
+ * @hw_ctx                : HW context from which this request is coming
+ * @hw_req_private        : HW layer's private information specific to
+ *                          this request
+ * @hw_update_entries     : HW update entries corresponding to this request
+ *                          which needs to be submitted to HW through CDM
+ * @num_hw_update_entries : Number of HW update entries
+ */
+struct cam_fd_mgr_frame_request {
+	struct list_head               list;
+	uint64_t                       request_id;
+	struct cam_fd_hw_mgr_ctx      *hw_ctx;
+	struct cam_fd_hw_req_private   hw_req_private;
+	struct cam_hw_update_entry     hw_update_entries[CAM_FD_MAX_HW_ENTRIES];
+	uint32_t                       num_hw_update_entries;
+};
+
+/**
+ * struct cam_fd_mgr_work_data : HW Mgr work data information
+ *
+ * @type     : Type of work
+ * @irq_type : IRQ type when this work is queued because of irq callback
+ */
+struct cam_fd_mgr_work_data {
+	enum cam_fd_mgr_work_type      type;
+	enum cam_fd_hw_irq_type        irq_type;
+};
+
+/**
+ * struct cam_fd_hw_mgr : FD HW Mgr information
+ *
+ * @free_ctx_list             : List of free contexts available for acquire
+ * @used_ctx_list             : List of contexts that are acquired
+ * @frame_free_list           : List of free frame requests available
+ * @frame_pending_list_high   : List of high priority frame requests pending
+ *                              for processing
+ * @frame_pending_list_normal : List of normal priority frame requests pending
+ *                              for processing
+ * @frame_processing_list     : List of frame requests currently being
+ *                              processed currently. Generally maximum one
+ *                              request would be present in this list
+ * @hw_mgr_mutex              : Mutex to protect hw mgr data when accessed
+ *                              from multiple threads
+ * @hw_mgr_slock              : Spin lock to protect hw mgr data when accessed
+ *                              from multiple threads
+ * @ctx_mutex                 : Mutex to protect context list
+ * @frame_req_mutex           : Mutex to protect frame request list
+ * @device_iommu              : Device IOMMU information
+ * @cdm_iommu                 : CDM IOMMU information
+ * @hw_device                 : Underlying HW device information
+ * @num_devices               : Number of HW devices available
+ * @raw_results_available     : Whether raw results available in this driver
+ * @supported_modes           : Supported modes by this driver
+ * @ctx_pool                  : List of context
+ * @frame_req                 : List of frame requests
+ * @work                      : Worker handle
+ * @work_data                 : Worker data
+ * @fd_caps                   : FD driver capabilities
+ */
+struct cam_fd_hw_mgr {
+	struct list_head                   free_ctx_list;
+	struct list_head                   used_ctx_list;
+	struct list_head                   frame_free_list;
+	struct list_head                   frame_pending_list_high;
+	struct list_head                   frame_pending_list_normal;
+	struct list_head                   frame_processing_list;
+	struct mutex                       hw_mgr_mutex;
+	spinlock_t                         hw_mgr_slock;
+	struct mutex                       ctx_mutex;
+	struct mutex                       frame_req_mutex;
+	struct cam_iommu_handle            device_iommu;
+	struct cam_iommu_handle            cdm_iommu;
+	struct cam_fd_device               hw_device[CAM_FD_HW_MAX];
+	uint32_t                           num_devices;
+	bool                               raw_results_available;
+	uint32_t                           supported_modes;
+	struct cam_fd_hw_mgr_ctx           ctx_pool[CAM_CTX_MAX];
+	struct cam_fd_mgr_frame_request    frame_req[CAM_CTX_REQ_MAX];
+	struct cam_req_mgr_core_workq     *work;
+	struct cam_fd_mgr_work_data        work_data[CAM_FD_WORKQ_NUM_TASK];
+	struct cam_fd_query_cap_cmd        fd_caps;
+};
+
+#endif /* _CAM_FD_HW_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr_intf.h
new file mode 100644
index 0000000..58cba4f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr_intf.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FD_HW_MGR_INTF_H_
+#define _CAM_FD_HW_MGR_INTF_H_
+
+#include <linux/of.h>
+
+#include "cam_debug_util.h"
+#include "cam_hw_mgr_intf.h"
+
+int cam_fd_hw_mgr_init(struct device_node *of_node,
+	struct cam_hw_mgr_intf *hw_mgr_intf);
+int cam_fd_hw_mgr_deinit(struct device_node *of_node);
+
+#endif /* _CAM_FD_HW_MGR_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/Makefile b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/Makefile
new file mode 100644
index 0000000..c3e706d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/Makefile
@@ -0,0 +1,13 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd/fd_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd_hw_dev.o cam_fd_hw_core.o cam_fd_hw_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
new file mode 100644
index 0000000..51fcdcaa
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
@@ -0,0 +1,1125 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_fd_hw_core.h"
+#include "cam_fd_hw_soc.h"
+
+#define CAM_FD_REG_VAL_PAIR_SIZE 256
+
+static uint32_t cam_fd_cdm_write_reg_val_pair(uint32_t *buffer,
+	uint32_t index, uint32_t reg_offset, uint32_t reg_value)
+{
+	buffer[index++] = reg_offset;
+	buffer[index++] = reg_value;
+
+	CAM_DBG(CAM_FD, "FD_CDM_CMD: Base[FD_CORE] Offset[0x%8x] Value[0x%8x]",
+		reg_offset, reg_value);
+
+	return index;
+}
+
+static void cam_fd_hw_util_cdm_callback(uint32_t handle, void *userdata,
+	enum cam_cdm_cb_status status, uint32_t cookie)
+{
+	CAM_DBG(CAM_FD, "CDM hdl=%x, udata=%pK, status=%d, cookie=%d",
+		handle, userdata, status, cookie);
+}
+
+static void cam_fd_hw_util_enable_power_on_settings(struct cam_hw_info *fd_hw)
+{
+	struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+	struct cam_fd_hw_static_info *hw_static_info =
+		((struct cam_fd_core *)fd_hw->core_info)->hw_static_info;
+
+	if (hw_static_info->enable_errata_wa.single_irq_only == false) {
+		/* Enable IRQs here */
+		cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+			hw_static_info->wrapper_regs.irq_mask,
+			hw_static_info->irq_mask);
+	}
+
+	/* QoS settings */
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.vbif_req_priority,
+		hw_static_info->qos_priority);
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.vbif_priority_level,
+		hw_static_info->qos_priority_level);
+}
+
+int cam_fd_hw_util_get_hw_caps(struct cam_hw_info *fd_hw,
+	struct cam_fd_hw_caps *hw_caps)
+{
+	struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+	struct cam_fd_hw_static_info *hw_static_info =
+		((struct cam_fd_core *)fd_hw->core_info)->hw_static_info;
+	uint32_t reg_value;
+
+	if (!hw_static_info) {
+		CAM_ERR(CAM_FD, "Invalid hw info data");
+		return -EINVAL;
+	}
+
+	reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_CORE,
+		hw_static_info->core_regs.version);
+	hw_caps->core_version.major =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf00, 0x8);
+	hw_caps->core_version.minor =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0, 0x4);
+	hw_caps->core_version.incr =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf, 0x0);
+
+	reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.wrapper_version);
+	hw_caps->wrapper_version.major =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+	hw_caps->wrapper_version.minor =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->wrapper_version.incr =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	hw_caps->raw_results_available =
+		hw_static_info->results.raw_results_available;
+	hw_caps->supported_modes = hw_static_info->supported_modes;
+
+	CAM_DBG(CAM_FD, "core:%d.%d.%d wrapper:%d.%d.%d intermediate:%d",
+		hw_caps->core_version.major, hw_caps->core_version.minor,
+		hw_caps->core_version.incr, hw_caps->wrapper_version.major,
+		hw_caps->wrapper_version.minor, hw_caps->wrapper_version.incr,
+		hw_caps->raw_results_available);
+
+	return 0;
+}
+
+static int cam_fd_hw_util_fdwrapper_sync_reset(struct cam_hw_info *fd_hw)
+{
+	struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info;
+	struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+	long time_left;
+
+	/* Before triggering reset to HW, clear the reset complete */
+	reinit_completion(&fd_core->reset_complete);
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_CORE,
+		hw_static_info->core_regs.control, 0x1);
+
+	if (hw_static_info->enable_errata_wa.single_irq_only) {
+		cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+			hw_static_info->wrapper_regs.irq_mask,
+			CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE));
+	}
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.sw_reset, 0x1);
+
+	time_left = wait_for_completion_timeout(&fd_core->reset_complete,
+		msecs_to_jiffies(CAM_FD_HW_HALT_RESET_TIMEOUT));
+	if (time_left <= 0) {
+		CAM_ERR(CAM_FD, "HW reset wait failed time_left=%d", time_left);
+		return -EPERM;
+	}
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_CORE,
+		hw_static_info->core_regs.control, 0x0);
+
+	CAM_DBG(CAM_FD, "FD Wrapper SW Sync Reset complete");
+
+	return 0;
+}
+
+
+static int cam_fd_hw_util_fdwrapper_halt(struct cam_hw_info *fd_hw)
+{
+	struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info;
+	struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+	long time_left;
+
+	/* Before triggering halt to HW, clear halt complete */
+	reinit_completion(&fd_core->halt_complete);
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_CORE,
+		hw_static_info->core_regs.control, 0x1);
+
+	if (hw_static_info->enable_errata_wa.single_irq_only) {
+		cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+			hw_static_info->wrapper_regs.irq_mask,
+			CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE));
+	}
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.hw_stop, 0x1);
+
+	time_left = wait_for_completion_timeout(&fd_core->halt_complete,
+		msecs_to_jiffies(CAM_FD_HW_HALT_RESET_TIMEOUT));
+	if (time_left <= 0) {
+		CAM_ERR(CAM_FD, "HW halt wait failed time_left=%d", time_left);
+		return -EPERM;
+	}
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_CORE,
+		hw_static_info->core_regs.control, 0x0);
+
+	CAM_DBG(CAM_FD, "FD Wrapper Halt complete");
+
+	return 0;
+}
+
+static int cam_fd_hw_util_processcmd_prestart(struct cam_hw_info *fd_hw,
+	struct cam_fd_hw_cmd_prestart_args *prestart_args)
+{
+	struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+	struct cam_fd_hw_static_info *hw_static_info =
+		((struct cam_fd_core *)fd_hw->core_info)->hw_static_info;
+	struct cam_fd_ctx_hw_private *ctx_hw_private =
+		prestart_args->ctx_hw_private;
+	uint32_t size, size_required = 0;
+	uint32_t mem_base;
+	uint32_t *cmd_buf_addr = prestart_args->cmd_buf_addr;
+	uint32_t reg_val_pair[CAM_FD_REG_VAL_PAIR_SIZE];
+	uint32_t num_cmds = 0;
+	int i;
+	struct cam_fd_hw_io_buffer *io_buf;
+	struct cam_fd_hw_req_private *req_private;
+	uint32_t available_size = prestart_args->size;
+	bool work_buffer_configured = false;
+
+	if (!ctx_hw_private || !cmd_buf_addr) {
+		CAM_ERR(CAM_FD, "Invalid input prestart args %pK %pK",
+			ctx_hw_private, cmd_buf_addr);
+		return -EINVAL;
+	}
+
+	if (prestart_args->get_raw_results &&
+		!hw_static_info->results.raw_results_available) {
+		CAM_ERR(CAM_FD, "Raw results not supported %d %d",
+			prestart_args->get_raw_results,
+			hw_static_info->results.raw_results_available);
+		return -EINVAL;
+	}
+
+	req_private = &prestart_args->hw_req_private;
+	req_private->ctx_hw_private = prestart_args->ctx_hw_private;
+	req_private->request_id = prestart_args->request_id;
+	req_private->get_raw_results = prestart_args->get_raw_results;
+	req_private->fd_results = NULL;
+	req_private->raw_results = NULL;
+
+	/* Start preparing CDM register values that KMD has to insert */
+	num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, num_cmds,
+		hw_static_info->core_regs.control, 0x1);
+	num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, num_cmds,
+		hw_static_info->core_regs.control, 0x0);
+
+	for (i = 0; i < CAM_FD_MAX_IO_BUFFERS; i++) {
+		io_buf = &prestart_args->input_buf[i];
+
+		if (io_buf->valid == false)
+			break;
+
+		if (io_buf->io_cfg->direction != CAM_BUF_INPUT) {
+			CAM_ERR(CAM_FD, "Incorrect direction %d %d",
+				io_buf->io_cfg->direction, CAM_BUF_INPUT);
+			return -EINVAL;
+		}
+
+		switch (io_buf->io_cfg->resource_type) {
+		case CAM_FD_INPUT_PORT_ID_IMAGE: {
+			if ((num_cmds + 2) > CAM_FD_REG_VAL_PAIR_SIZE) {
+				CAM_ERR(CAM_FD,
+					"Invalid reg_val pair size %d, %d",
+					num_cmds, CAM_FD_REG_VAL_PAIR_SIZE);
+				return -EINVAL;
+			}
+
+			num_cmds = cam_fd_cdm_write_reg_val_pair(
+				reg_val_pair, num_cmds,
+				hw_static_info->core_regs.image_addr,
+				io_buf->io_addr[0]);
+			break;
+		}
+		default:
+			CAM_ERR(CAM_FD, "Invalid resource type %d",
+				io_buf->io_cfg->resource_type);
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < CAM_FD_MAX_IO_BUFFERS; i++) {
+		io_buf = &prestart_args->output_buf[i];
+
+		if (io_buf->valid == false)
+			break;
+
+		if (io_buf->io_cfg->direction != CAM_BUF_OUTPUT) {
+			CAM_ERR(CAM_FD, "Incorrect direction %d %d",
+				io_buf->io_cfg->direction, CAM_BUF_INPUT);
+			return -EINVAL;
+		}
+
+		switch (io_buf->io_cfg->resource_type) {
+		case CAM_FD_OUTPUT_PORT_ID_RESULTS: {
+			uint32_t face_results_offset;
+
+			size_required = hw_static_info->results.max_faces *
+				hw_static_info->results.per_face_entries * 4;
+
+			if (io_buf->io_cfg->planes[0].plane_stride <
+				size_required) {
+				CAM_ERR(CAM_FD, "Invalid results size %d %d",
+					io_buf->io_cfg->planes[0].plane_stride,
+					size_required);
+				return -EINVAL;
+			}
+
+			req_private->fd_results =
+				(struct cam_fd_results *)io_buf->cpu_addr[0];
+
+			face_results_offset =
+				(uint8_t *)&req_private->fd_results->faces[0] -
+				(uint8_t *)req_private->fd_results;
+
+			if (hw_static_info->ro_mode_supported) {
+				if ((num_cmds + 4) > CAM_FD_REG_VAL_PAIR_SIZE) {
+					CAM_ERR(CAM_FD,
+						"Invalid reg_val size %d, %d",
+						num_cmds,
+						CAM_FD_REG_VAL_PAIR_SIZE);
+					return -EINVAL;
+				}
+				/*
+				 * Face data actually starts 16bytes later in
+				 * the io buffer  Check cam_fd_results.
+				 */
+				num_cmds = cam_fd_cdm_write_reg_val_pair(
+					reg_val_pair, num_cmds,
+					hw_static_info->core_regs.result_addr,
+					io_buf->io_addr[0] +
+					face_results_offset);
+				num_cmds = cam_fd_cdm_write_reg_val_pair(
+					reg_val_pair, num_cmds,
+					hw_static_info->core_regs.ro_mode,
+					0x1);
+
+				req_private->ro_mode_enabled = true;
+			} else {
+				req_private->ro_mode_enabled = false;
+			}
+			break;
+		}
+		case CAM_FD_OUTPUT_PORT_ID_RAW_RESULTS: {
+			size_required =
+				hw_static_info->results.raw_results_entries *
+				sizeof(uint32_t);
+
+			if (io_buf->io_cfg->planes[0].plane_stride <
+				size_required) {
+				CAM_ERR(CAM_FD, "Invalid results size %d %d",
+					io_buf->io_cfg->planes[0].plane_stride,
+					size_required);
+				return -EINVAL;
+			}
+
+			req_private->raw_results =
+				(uint32_t *)io_buf->cpu_addr[0];
+			break;
+		}
+		case CAM_FD_OUTPUT_PORT_ID_WORK_BUFFER: {
+			if ((num_cmds + 2) > CAM_FD_REG_VAL_PAIR_SIZE) {
+				CAM_ERR(CAM_FD,
+					"Invalid reg_val pair size %d, %d",
+					num_cmds, CAM_FD_REG_VAL_PAIR_SIZE);
+				return -EINVAL;
+			}
+
+			num_cmds = cam_fd_cdm_write_reg_val_pair(
+				reg_val_pair, num_cmds,
+				hw_static_info->core_regs.work_addr,
+				io_buf->io_addr[0]);
+
+			work_buffer_configured = true;
+			break;
+		}
+		default:
+			CAM_ERR(CAM_FD, "Invalid resource type %d",
+				io_buf->io_cfg->resource_type);
+			return -EINVAL;
+		}
+	}
+
+	if (!req_private->fd_results || !work_buffer_configured) {
+		CAM_ERR(CAM_FD, "Invalid IO Buffers results=%pK work=%d",
+			req_private->fd_results, work_buffer_configured);
+		return -EINVAL;
+	}
+
+	/* First insert CHANGE_BASE command */
+	size = ctx_hw_private->cdm_ops->cdm_required_size_changebase();
+	/* since cdm returns dwords, we need to convert it into bytes */
+	if ((size * 4) > available_size) {
+		CAM_ERR(CAM_FD, "buf size:%d is not sufficient, expected: %d",
+			prestart_args->size, size);
+		return -EINVAL;
+	}
+
+	mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(soc_info,
+		((struct cam_fd_soc_private *)soc_info->soc_private)->
+		regbase_index[CAM_FD_REG_CORE]);
+
+	ctx_hw_private->cdm_ops->cdm_write_changebase(cmd_buf_addr, mem_base);
+	cmd_buf_addr += size;
+	available_size -= (size * 4);
+
+	size = ctx_hw_private->cdm_ops->cdm_required_size_reg_random(
+		num_cmds/2);
+	/* cdm util returns dwords, need to convert to bytes */
+	if ((size * 4) > available_size) {
+		CAM_ERR(CAM_FD, "Insufficient size:%d , expected size:%d",
+			available_size, size);
+		return -ENOMEM;
+	}
+	ctx_hw_private->cdm_ops->cdm_write_regrandom(cmd_buf_addr, num_cmds/2,
+		reg_val_pair);
+	cmd_buf_addr += size;
+	available_size -= (size * 4);
+
+	/* Update pre_config_buf_size in bytes */
+	prestart_args->pre_config_buf_size =
+		prestart_args->size - available_size;
+
+	/*
+	 * Currently, no post config commands, we trigger HW start directly
+	 * from start(). Start trigger command can be inserted into CDM
+	 * as post config commands.
+	 */
+	prestart_args->post_config_buf_size = 0;
+
+	CAM_DBG(CAM_FD, "PreConfig [%pK %d], PostConfig[%pK %d]",
+		prestart_args->cmd_buf_addr, prestart_args->pre_config_buf_size,
+		cmd_buf_addr, prestart_args->post_config_buf_size);
+
+	for (i = 0; i < (prestart_args->pre_config_buf_size +
+		prestart_args->post_config_buf_size) / 4; i++)
+		CAM_DBG(CAM_FD, "CDM KMD Commands [%d] : [%pK] [0x%x]", i,
+			&prestart_args->cmd_buf_addr[i],
+			prestart_args->cmd_buf_addr[i]);
+
+	return 0;
+}
+
+static int cam_fd_hw_util_processcmd_frame_done(struct cam_hw_info *fd_hw,
+	struct cam_fd_hw_frame_done_args *frame_done_args)
+{
+	struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info;
+	struct cam_fd_hw_req_private *req_private;
+	uint32_t base, face_cnt;
+	uint32_t *buffer;
+	int i;
+
+	spin_lock(&fd_core->spin_lock);
+	if ((fd_core->core_state != CAM_FD_CORE_STATE_IDLE) ||
+		(fd_core->results_valid == false) ||
+		!fd_core->hw_req_private) {
+		CAM_ERR(CAM_FD,
+			"Invalid state for results state=%d, results=%d %pK",
+			fd_core->core_state, fd_core->results_valid,
+			fd_core->hw_req_private);
+		spin_unlock(&fd_core->spin_lock);
+		return -EINVAL;
+	}
+	fd_core->core_state = CAM_FD_CORE_STATE_READING_RESULTS;
+	req_private = fd_core->hw_req_private;
+	spin_unlock(&fd_core->spin_lock);
+
+	/*
+	 * Copy the register value as is into output buffers.
+	 * Wehter we are copying the output data by reading registers or
+	 * programming output buffer directly to HW must be transparent to UMD.
+	 * In case HW supports writing face count value directly into
+	 * DDR memory in future, these values should match.
+	 */
+	req_private->fd_results->face_count =
+		cam_fd_soc_register_read(&fd_hw->soc_info, CAM_FD_REG_CORE,
+		hw_static_info->core_regs.result_cnt);
+
+	face_cnt = req_private->fd_results->face_count & 0x3F;
+
+	if (face_cnt > hw_static_info->results.max_faces) {
+		CAM_WARN(CAM_FD, "Face count greater than max %d %d",
+			face_cnt, hw_static_info->results.max_faces);
+		face_cnt = hw_static_info->results.max_faces;
+	}
+
+	CAM_DBG(CAM_FD, "ReqID[%lld] Faces Detected = %d",
+		req_private->request_id, face_cnt);
+
+	/*
+	 * We need to read the face data information from registers only
+	 * if one of below is true
+	 * 1. RO mode is not set. i.e FD HW doesn't write face data into
+	 *    DDR memory
+	 * 2. On the current chipset, results written into DDR memory by FD HW
+	 *    are not gauranteed to be correct
+	 */
+	if (!req_private->ro_mode_enabled ||
+		hw_static_info->enable_errata_wa.ro_mode_results_invalid) {
+		buffer = (uint32_t *)&req_private->fd_results->faces[0];
+		base = hw_static_info->core_regs.results_reg_base;
+
+		/*
+		 * Write register values as is into face data buffer.  Its UMD
+		 * driver responsibility to interpret the data and extract face
+		 * properties from output buffer. Think in case output buffer
+		 * is directly programmed to HW, then KMD has no control to
+		 * extract the face properties and UMD anyway has to extract
+		 * face properties. So we follow the same approach and keep
+		 * this transparent to UMD.
+		 */
+		for (i = 0;
+			i < (face_cnt *
+			hw_static_info->results.per_face_entries); i++) {
+			*buffer = cam_fd_soc_register_read(&fd_hw->soc_info,
+				CAM_FD_REG_CORE, base + (i * 0x4));
+			CAM_DBG(CAM_FD, "FaceData[%d] : 0x%x", i / 4, *buffer);
+			buffer++;
+		}
+	}
+
+	if (req_private->get_raw_results &&
+		req_private->raw_results &&
+		hw_static_info->results.raw_results_available) {
+		buffer = req_private->raw_results;
+		base = hw_static_info->core_regs.raw_results_reg_base;
+
+		for (i = 0;
+			i < hw_static_info->results.raw_results_entries;
+			i++) {
+			*buffer = cam_fd_soc_register_read(&fd_hw->soc_info,
+				CAM_FD_REG_CORE, base + (i * 0x4));
+			CAM_DBG(CAM_FD, "RawData[%d] : 0x%x", i, *buffer);
+			buffer++;
+		}
+	}
+
+	spin_lock(&fd_core->spin_lock);
+	fd_core->hw_req_private = NULL;
+	fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+	spin_unlock(&fd_core->spin_lock);
+
+	return 0;
+}
+
+irqreturn_t cam_fd_hw_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)data;
+	struct cam_fd_core *fd_core;
+	struct cam_hw_soc_info *soc_info;
+	struct cam_fd_hw_static_info *hw_static_info;
+	uint32_t reg_value;
+	enum cam_fd_hw_irq_type irq_type = CAM_FD_IRQ_FRAME_DONE;
+	uint32_t num_irqs = 0;
+
+	if (!fd_hw) {
+		CAM_ERR(CAM_FD, "Invalid data in IRQ callback");
+		return -EINVAL;
+	}
+
+	fd_core = (struct cam_fd_core *) fd_hw->core_info;
+	soc_info = &fd_hw->soc_info;
+	hw_static_info = fd_core->hw_static_info;
+
+	reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.irq_status);
+
+	CAM_DBG(CAM_FD, "FD IRQ status 0x%x", reg_value);
+
+	if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE)) {
+		complete_all(&fd_core->halt_complete);
+		irq_type = CAM_FD_IRQ_HALT_DONE;
+		num_irqs++;
+	}
+
+	if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE)) {
+		complete_all(&fd_core->reset_complete);
+		irq_type = CAM_FD_IRQ_RESET_DONE;
+		num_irqs++;
+	}
+
+	if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE)) {
+		complete_all(&fd_core->processing_complete);
+		irq_type = CAM_FD_IRQ_FRAME_DONE;
+		num_irqs++;
+	}
+
+	/*
+	 * We should never get an IRQ callback with no or more than one mask.
+	 * Validate first to make sure nothing going wrong.
+	 */
+	if (num_irqs != 1) {
+		CAM_ERR(CAM_FD,
+			"Invalid number of IRQs, value=0x%x, num_irqs=%d",
+			reg_value, num_irqs);
+		return -EINVAL;
+	}
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.irq_clear,
+		hw_static_info->irq_mask);
+
+	if (irq_type == CAM_FD_IRQ_HALT_DONE) {
+		/*
+		 * Do not send HALT IRQ callback to Hw Mgr,
+		 * a reset would always follow
+		 */
+		return IRQ_HANDLED;
+	}
+
+	spin_lock(&fd_core->spin_lock);
+	/* Do not change state to IDLE on HALT IRQ. Reset must follow halt */
+	if ((irq_type == CAM_FD_IRQ_RESET_DONE) ||
+		(irq_type == CAM_FD_IRQ_FRAME_DONE)) {
+
+		fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+		if (irq_type == CAM_FD_IRQ_FRAME_DONE)
+			fd_core->results_valid = true;
+
+		CAM_DBG(CAM_FD, "FD IRQ type %d, state=%d",
+			irq_type, fd_core->core_state);
+	}
+	spin_unlock(&fd_core->spin_lock);
+
+	if (fd_core->irq_cb.cam_fd_hw_mgr_cb)
+		fd_core->irq_cb.cam_fd_hw_mgr_cb(fd_core->irq_cb.data,
+			irq_type);
+
+	return IRQ_HANDLED;
+}
+
+int cam_fd_hw_get_hw_caps(void *hw_priv, void *get_hw_cap_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	struct cam_fd_core *fd_core;
+	struct cam_fd_hw_caps *fd_hw_caps =
+		(struct cam_fd_hw_caps *)get_hw_cap_args;
+
+	if (!hw_priv || !get_hw_cap_args) {
+		CAM_ERR(CAM_FD, "Invalid input pointers %pK %pK",
+			hw_priv, get_hw_cap_args);
+		return -EINVAL;
+	}
+
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	*fd_hw_caps = fd_core->hw_caps;
+
+	CAM_DBG(CAM_FD, "core:%d.%d wrapper:%d.%d mode:%d, raw:%d",
+		fd_hw_caps->core_version.major,
+		fd_hw_caps->core_version.minor,
+		fd_hw_caps->wrapper_version.major,
+		fd_hw_caps->wrapper_version.minor,
+		fd_hw_caps->supported_modes,
+		fd_hw_caps->raw_results_available);
+
+	return 0;
+}
+
+int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	struct cam_fd_core *fd_core;
+	struct cam_fd_hw_init_args *init_args =
+		(struct cam_fd_hw_init_args *)init_hw_args;
+	int rc = 0;
+
+	if (!fd_hw || !init_args) {
+		CAM_ERR(CAM_FD, "Invalid argument %pK %pK", fd_hw, init_args);
+		return -EINVAL;
+	}
+
+	if (arg_size != sizeof(struct cam_fd_hw_init_args)) {
+		CAM_ERR(CAM_FD, "Invalid arg size %d, %d", arg_size,
+			sizeof(struct cam_fd_hw_init_args));
+		return -EINVAL;
+	}
+
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+
+	mutex_lock(&fd_hw->hw_mutex);
+	CAM_DBG(CAM_FD, "FD HW Init ref count before %d", fd_hw->open_count);
+
+	if (fd_hw->open_count > 0) {
+		rc = 0;
+		mutex_unlock(&fd_hw->hw_mutex);
+		goto cdm_streamon;
+	}
+
+	rc = cam_fd_soc_enable_resources(&fd_hw->soc_info);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Enable SOC failed, rc=%d", rc);
+		goto unlock_return;
+	}
+
+	rc = cam_fd_hw_reset(hw_priv, NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Reset Failed, rc=%d", rc);
+		goto disable_soc;
+	}
+
+	cam_fd_hw_util_enable_power_on_settings(fd_hw);
+
+	fd_hw->hw_state = CAM_HW_STATE_POWER_UP;
+	fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+	fd_hw->open_count++;
+	CAM_DBG(CAM_FD, "FD HW Init ref count after %d", fd_hw->open_count);
+
+	mutex_unlock(&fd_hw->hw_mutex);
+
+cdm_streamon:
+	if (init_args->ctx_hw_private) {
+		struct cam_fd_ctx_hw_private *ctx_hw_private =
+			init_args->ctx_hw_private;
+
+		rc = cam_cdm_stream_on(ctx_hw_private->cdm_handle);
+		if (rc) {
+			CAM_ERR(CAM_FD, "CDM StreamOn fail :handle=0x%x, rc=%d",
+				ctx_hw_private->cdm_handle, rc);
+			return rc;
+		}
+	}
+
+	return rc;
+
+disable_soc:
+	if (cam_fd_soc_disable_resources(&fd_hw->soc_info))
+		CAM_ERR(CAM_FD, "Error in disable soc resources");
+unlock_return:
+	mutex_unlock(&fd_hw->hw_mutex);
+	return rc;
+}
+
+int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = hw_priv;
+	struct cam_fd_core *fd_core;
+	struct cam_fd_hw_deinit_args *deinit_args =
+		(struct cam_fd_hw_deinit_args *)deinit_hw_args;
+	int rc = 0;
+
+	if (!fd_hw || !deinit_hw_args) {
+		CAM_ERR(CAM_FD, "Invalid argument");
+		return -EINVAL;
+	}
+
+	if (arg_size != sizeof(struct cam_fd_hw_deinit_args)) {
+		CAM_ERR(CAM_FD, "Invalid arg size %d, %d", arg_size,
+			sizeof(struct cam_fd_hw_deinit_args));
+		return -EINVAL;
+	}
+
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+
+	if (deinit_args->ctx_hw_private) {
+		struct cam_fd_ctx_hw_private *ctx_hw_private =
+			deinit_args->ctx_hw_private;
+
+		rc = cam_cdm_stream_off(ctx_hw_private->cdm_handle);
+		if (rc) {
+			CAM_ERR(CAM_FD,
+				"Failed in CDM StreamOff, handle=0x%x, rc=%d",
+				ctx_hw_private->cdm_handle, rc);
+			return rc;
+		}
+	}
+
+	mutex_lock(&fd_hw->hw_mutex);
+
+	if (fd_hw->open_count == 0) {
+		mutex_unlock(&fd_hw->hw_mutex);
+		CAM_ERR(CAM_FD, "Error Unbalanced deinit");
+		return -EFAULT;
+	}
+
+	fd_hw->open_count--;
+	CAM_DBG(CAM_FD, "FD HW ref count=%d", fd_hw->open_count);
+
+	if (fd_hw->open_count) {
+		rc = 0;
+		goto unlock_return;
+	}
+
+	rc = cam_fd_soc_disable_resources(&fd_hw->soc_info);
+	if (rc)
+		CAM_ERR(CAM_FD, "Failed in Disable SOC, rc=%d", rc);
+
+	fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	fd_core->core_state = CAM_FD_CORE_STATE_POWERDOWN;
+
+unlock_return:
+	mutex_unlock(&fd_hw->hw_mutex);
+	return rc;
+}
+
+int cam_fd_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	struct cam_fd_core *fd_core;
+	int rc;
+
+	if (!fd_hw) {
+		CAM_ERR(CAM_FD, "Invalid input handle");
+		return -EINVAL;
+	}
+
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+
+	spin_lock(&fd_core->spin_lock);
+	if (fd_core->core_state == CAM_FD_CORE_STATE_RESET_PROGRESS) {
+		CAM_ERR(CAM_FD, "Reset not allowed in %d state",
+			fd_core->core_state);
+		spin_unlock(&fd_core->spin_lock);
+		return -EINVAL;
+	}
+
+	fd_core->results_valid = false;
+	fd_core->core_state = CAM_FD_CORE_STATE_RESET_PROGRESS;
+	spin_unlock(&fd_core->spin_lock);
+
+	rc = cam_fd_hw_util_fdwrapper_sync_reset(fd_hw);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in RESET rc=%d", rc);
+		return rc;
+	}
+
+	spin_lock(&fd_core->spin_lock);
+	fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+	spin_unlock(&fd_core->spin_lock);
+
+	return rc;
+}
+
+int cam_fd_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	struct cam_fd_core *fd_core;
+	struct cam_fd_hw_static_info *hw_static_info;
+	struct cam_fd_hw_cmd_start_args *start_args =
+		(struct cam_fd_hw_cmd_start_args *)hw_start_args;
+	struct cam_fd_ctx_hw_private *ctx_hw_private;
+	int rc;
+
+	if (!hw_priv || !start_args) {
+		CAM_ERR(CAM_FD, "Invalid input args %pK %pK", hw_priv,
+			start_args);
+		return -EINVAL;
+	}
+
+	if (arg_size != sizeof(struct cam_fd_hw_cmd_start_args)) {
+		CAM_ERR(CAM_FD, "Invalid arg size %d, %d", arg_size,
+			sizeof(struct cam_fd_hw_cmd_start_args));
+		return -EINVAL;
+	}
+
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	hw_static_info = fd_core->hw_static_info;
+
+	spin_lock(&fd_core->spin_lock);
+	if (fd_core->core_state != CAM_FD_CORE_STATE_IDLE) {
+		CAM_ERR(CAM_FD, "Cannot start in %d state",
+			fd_core->core_state);
+		spin_unlock(&fd_core->spin_lock);
+		return -EINVAL;
+	}
+
+	/*
+	 * We are about to start FD HW processing, save the request
+	 * private data which is being processed by HW. Once the frame
+	 * processing is finished, process_cmd(FRAME_DONE) should be called
+	 * with same hw_req_private as input.
+	 */
+	fd_core->hw_req_private = start_args->hw_req_private;
+	fd_core->core_state = CAM_FD_CORE_STATE_PROCESSING;
+	fd_core->results_valid = false;
+	spin_unlock(&fd_core->spin_lock);
+
+	ctx_hw_private = start_args->ctx_hw_private;
+
+	/* Before starting HW process, clear processing complete */
+	reinit_completion(&fd_core->processing_complete);
+
+	if (hw_static_info->enable_errata_wa.single_irq_only) {
+		cam_fd_soc_register_write(&fd_hw->soc_info, CAM_FD_REG_WRAPPER,
+			hw_static_info->wrapper_regs.irq_mask,
+			CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE));
+	}
+
+	if (start_args->num_hw_update_entries > 0) {
+		struct cam_cdm_bl_request *cdm_cmd = ctx_hw_private->cdm_cmd;
+		struct cam_hw_update_entry *cmd;
+		int i;
+
+		cdm_cmd->cmd_arrary_count = start_args->num_hw_update_entries;
+		cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
+		cdm_cmd->flag = false;
+		cdm_cmd->userdata = NULL;
+		cdm_cmd->cookie = 0;
+
+		for (i = 0 ; i <= start_args->num_hw_update_entries; i++) {
+			cmd = (start_args->hw_update_entries + i);
+			cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle;
+			cdm_cmd->cmd[i].offset = cmd->offset;
+			cdm_cmd->cmd[i].len = cmd->len;
+		}
+
+		rc = cam_cdm_submit_bls(ctx_hw_private->cdm_handle, cdm_cmd);
+		if (rc) {
+			CAM_ERR(CAM_FD,
+				"Failed to submit cdm commands, rc=%d", rc);
+			goto error;
+		}
+	} else {
+		CAM_ERR(CAM_FD, "Invalid number of hw update entries");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	cam_fd_soc_register_write(&fd_hw->soc_info, CAM_FD_REG_CORE,
+		hw_static_info->core_regs.control, 0x2);
+
+	return 0;
+error:
+	spin_lock(&fd_core->spin_lock);
+	fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+	spin_unlock(&fd_core->spin_lock);
+
+	return rc;
+}
+
+int cam_fd_hw_halt_reset(void *hw_priv, void *stop_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	struct cam_fd_core *fd_core;
+	int rc;
+
+	if (!fd_hw) {
+		CAM_ERR(CAM_FD, "Invalid input handle");
+		return -EINVAL;
+	}
+
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+
+	spin_lock(&fd_core->spin_lock);
+	if ((fd_core->core_state == CAM_FD_CORE_STATE_POWERDOWN) ||
+		(fd_core->core_state == CAM_FD_CORE_STATE_RESET_PROGRESS)) {
+		CAM_ERR(CAM_FD, "Reset not allowed in %d state",
+			fd_core->core_state);
+		spin_unlock(&fd_core->spin_lock);
+		return -EINVAL;
+	}
+
+	fd_core->results_valid = false;
+	fd_core->core_state = CAM_FD_CORE_STATE_RESET_PROGRESS;
+	spin_unlock(&fd_core->spin_lock);
+
+	rc = cam_fd_hw_util_fdwrapper_halt(fd_hw);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in HALT rc=%d", rc);
+		return rc;
+	}
+
+	/* HALT must be followed by RESET */
+	rc = cam_fd_hw_util_fdwrapper_sync_reset(fd_hw);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in RESET rc=%d", rc);
+		return rc;
+	}
+
+	spin_lock(&fd_core->spin_lock);
+	fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+	spin_unlock(&fd_core->spin_lock);
+
+	return rc;
+}
+
+int cam_fd_hw_reserve(void *hw_priv, void *hw_reserve_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	int rc = -EINVAL;
+	struct cam_fd_ctx_hw_private *ctx_hw_private;
+	struct cam_fd_hw_reserve_args *reserve_args =
+		(struct cam_fd_hw_reserve_args *)hw_reserve_args;
+	struct cam_cdm_acquire_data cdm_acquire;
+	struct cam_cdm_bl_request *cdm_cmd;
+	int i;
+
+	if (!fd_hw || !reserve_args) {
+		CAM_ERR(CAM_FD, "Invalid input %pK, %pK", fd_hw, reserve_args);
+		return -EINVAL;
+	}
+
+	if (arg_size != sizeof(struct cam_fd_hw_reserve_args)) {
+		CAM_ERR(CAM_FD, "Invalid arg size %d, %d", arg_size,
+			sizeof(struct cam_fd_hw_reserve_args));
+		return -EINVAL;
+	}
+
+	cdm_cmd = kzalloc(((sizeof(struct cam_cdm_bl_request)) +
+			((CAM_FD_MAX_HW_ENTRIES - 1) *
+			sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+	if (!cdm_cmd)
+		return -ENOMEM;
+
+	ctx_hw_private = kzalloc(sizeof(struct cam_fd_ctx_hw_private),
+		GFP_KERNEL);
+	if (!ctx_hw_private) {
+		kfree(cdm_cmd);
+		return -ENOMEM;
+	}
+
+	memset(&cdm_acquire, 0, sizeof(cdm_acquire));
+	strlcpy(cdm_acquire.identifier, "fd", sizeof("fd"));
+	cdm_acquire.cell_index = fd_hw->soc_info.index;
+	cdm_acquire.handle = 0;
+	cdm_acquire.userdata = ctx_hw_private;
+	cdm_acquire.cam_cdm_callback = cam_fd_hw_util_cdm_callback;
+	cdm_acquire.id = CAM_CDM_VIRTUAL;
+	cdm_acquire.base_array_cnt = fd_hw->soc_info.num_reg_map;
+	for (i = 0; i < fd_hw->soc_info.num_reg_map; i++)
+		cdm_acquire.base_array[i] = &fd_hw->soc_info.reg_map[i];
+
+	rc = cam_cdm_acquire(&cdm_acquire);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed to acquire the CDM HW");
+		goto error;
+	}
+
+	ctx_hw_private->hw_ctx = reserve_args->hw_ctx;
+	ctx_hw_private->fd_hw = fd_hw;
+	ctx_hw_private->mode = reserve_args->mode;
+	ctx_hw_private->cdm_handle = cdm_acquire.handle;
+	ctx_hw_private->cdm_ops = cdm_acquire.ops;
+	ctx_hw_private->cdm_cmd = cdm_cmd;
+
+	reserve_args->ctx_hw_private = ctx_hw_private;
+
+	CAM_DBG(CAM_FD, "private=%pK, hw_ctx=%pK, mode=%d, cdm_handle=0x%x",
+		ctx_hw_private, ctx_hw_private->hw_ctx, ctx_hw_private->mode,
+		ctx_hw_private->cdm_handle);
+
+	return 0;
+error:
+	kfree(ctx_hw_private);
+	kfree(cdm_cmd);
+	return rc;
+}
+
+int cam_fd_hw_release(void *hw_priv, void *hw_release_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	int rc = -EINVAL;
+	struct cam_fd_ctx_hw_private *ctx_hw_private;
+	struct cam_fd_hw_release_args *release_args =
+		(struct cam_fd_hw_release_args *)hw_release_args;
+
+	if (!fd_hw || !release_args) {
+		CAM_ERR(CAM_FD, "Invalid input %pK, %pK", fd_hw, release_args);
+		return -EINVAL;
+	}
+
+	if (arg_size != sizeof(struct cam_fd_hw_release_args)) {
+		CAM_ERR(CAM_FD, "Invalid arg size %d, %d", arg_size,
+			sizeof(struct cam_fd_hw_release_args));
+		return -EINVAL;
+	}
+
+	ctx_hw_private =
+		(struct cam_fd_ctx_hw_private *)release_args->ctx_hw_private;
+
+	rc = cam_cdm_release(ctx_hw_private->cdm_handle);
+	if (rc)
+		CAM_ERR(CAM_FD, "Release cdm handle failed, handle=0x%x, rc=%d",
+			ctx_hw_private->cdm_handle, rc);
+
+	kfree(ctx_hw_private);
+	release_args->ctx_hw_private = NULL;
+
+	return 0;
+}
+
+int cam_fd_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	int rc = -EINVAL;
+
+	if (!hw_priv || !cmd_args ||
+		(cmd_type >= CAM_FD_HW_CMD_MAX)) {
+		CAM_ERR(CAM_FD, "Invalid arguments %pK %pK %d", hw_priv,
+			cmd_args, cmd_type);
+		return -EINVAL;
+	}
+
+	switch (cmd_type) {
+	case CAM_FD_HW_CMD_REGISTER_CALLBACK: {
+		struct cam_fd_hw_cmd_set_irq_cb *irq_cb_args;
+		struct cam_fd_core *fd_core =
+			(struct cam_fd_core *)fd_hw->core_info;
+
+		if (sizeof(struct cam_fd_hw_cmd_set_irq_cb) != arg_size) {
+			CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		irq_cb_args = (struct cam_fd_hw_cmd_set_irq_cb *)cmd_args;
+		fd_core->irq_cb.cam_fd_hw_mgr_cb =
+			irq_cb_args->cam_fd_hw_mgr_cb;
+		fd_core->irq_cb.data = irq_cb_args->data;
+		rc = 0;
+		break;
+	}
+	case CAM_FD_HW_CMD_PRESTART: {
+		struct cam_fd_hw_cmd_prestart_args *prestart_args;
+
+		if (sizeof(struct cam_fd_hw_cmd_prestart_args) != arg_size) {
+			CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		prestart_args = (struct cam_fd_hw_cmd_prestart_args *)cmd_args;
+		rc = cam_fd_hw_util_processcmd_prestart(fd_hw, prestart_args);
+		break;
+	}
+	case CAM_FD_HW_CMD_FRAME_DONE: {
+		struct cam_fd_hw_frame_done_args *cmd_frame_results;
+
+		if (sizeof(struct cam_fd_hw_frame_done_args) !=
+			arg_size) {
+			CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		cmd_frame_results =
+			(struct cam_fd_hw_frame_done_args *)cmd_args;
+		rc = cam_fd_hw_util_processcmd_frame_done(fd_hw,
+			cmd_frame_results);
+		break;
+	}
+	default:
+		break;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.h
new file mode 100644
index 0000000..35bf6b6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.h
@@ -0,0 +1,244 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FD_HW_CORE_H_
+#define _CAM_FD_HW_CORE_H_
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_defs.h>
+#include <media/cam_fd.h>
+
+#include "cam_common_util.h"
+#include "cam_debug_util.h"
+#include "cam_io_util.h"
+#include "cam_cpas_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_fd_hw_intf.h"
+#include "cam_fd_hw_soc.h"
+
+#define CAM_FD_IRQ_TO_MASK(irq)        (1 << (irq))
+#define CAM_FD_MASK_TO_IRQ(mask, irq)  ((mask) >> (irq))
+
+#define CAM_FD_HW_HALT_RESET_TIMEOUT   3000
+
+/**
+ * enum cam_fd_core_state - FD Core internal states
+ *
+ * @CAM_FD_CORE_STATE_POWERDOWN       : Indicates FD core is powered down
+ * @CAM_FD_CORE_STATE_IDLE            : Indicates FD HW is in idle state.
+ *                                      Core can be in this state when it is
+ *                                      ready to process frames or when
+ *                                      processing is finished and results are
+ *                                      available
+ * @CAM_FD_CORE_STATE_PROCESSING      : Indicates FD core is processing frame
+ * @CAM_FD_CORE_STATE_READING_RESULTS : Indicates results are being read from
+ *                                      FD core
+ * @CAM_FD_CORE_STATE_RESET_PROGRESS  :  Indicates FD Core is in reset state
+ */
+enum cam_fd_core_state {
+	CAM_FD_CORE_STATE_POWERDOWN,
+	CAM_FD_CORE_STATE_IDLE,
+	CAM_FD_CORE_STATE_PROCESSING,
+	CAM_FD_CORE_STATE_READING_RESULTS,
+	CAM_FD_CORE_STATE_RESET_PROGRESS,
+};
+
+/**
+ * struct cam_fd_ctx_hw_private : HW private information for a specific hw ctx.
+ *                                This information is populated by HW layer on
+ *                                reserve() and given back to HW Mgr as private
+ *                                data for the hw context. This private_data
+ *                                has to be passed by HW Mgr layer while
+ *                                further HW layer calls
+ *
+ * @hw_ctx           : Corresponding hw_ctx pointer
+ * @fd_hw            : FD HW info pointer
+ * @cdm_handle       : CDM Handle for this context
+ * @cdm_ops          : CDM Ops
+ * @cdm_cmd          : CDM command pointer
+ * @mode             : Mode this context is running
+ * @curr_req_private : Current Request information
+ *
+ */
+struct cam_fd_ctx_hw_private {
+	void                          *hw_ctx;
+	struct cam_hw_info            *fd_hw;
+	uint32_t                       cdm_handle;
+	struct cam_cdm_utils_ops      *cdm_ops;
+	struct cam_cdm_bl_request     *cdm_cmd;
+	enum cam_fd_hw_mode            mode;
+	struct cam_fd_hw_req_private  *curr_req_private;
+};
+
+/**
+ * struct cam_fd_core_regs : FD HW Core register offsets info
+ *
+ * @version              : Offset of version register
+ * @control              : Offset of control register
+ * @result_cnt           : Offset of result count register
+ * @result_addr          : Offset of results address register
+ * @image_addr           : Offset of image address register
+ * @work_addr            : Offset of work address register
+ * @ro_mode              : Offset of ro_mode register
+ * @results_reg_base     : Offset of results_reg_base register
+ * @raw_results_reg_base : Offset of raw_results_reg_base register
+ *
+ */
+struct cam_fd_core_regs {
+	uint32_t       version;
+	uint32_t       control;
+	uint32_t       result_cnt;
+	uint32_t       result_addr;
+	uint32_t       image_addr;
+	uint32_t       work_addr;
+	uint32_t       ro_mode;
+	uint32_t       results_reg_base;
+	uint32_t       raw_results_reg_base;
+};
+
+/**
+ * struct cam_fd_core_regs : FD HW Wrapper register offsets info
+ *
+ * @wrapper_version     : Offset of wrapper_version register
+ * @cgc_disable         : Offset of cgc_disable register
+ * @hw_stop             : Offset of hw_stop register
+ * @sw_reset            : Offset of sw_reset register
+ * @vbif_req_priority   : Offset of vbif_req_priority register
+ * @vbif_priority_level : Offset of vbif_priority_level register
+ * @vbif_done_status    : Offset of vbif_done_status register
+ * @irq_mask            : Offset of irq mask register
+ * @irq_status          : Offset of irq status register
+ * @irq_clear           : Offset of irq clear register
+ *
+ */
+struct cam_fd_wrapper_regs {
+	uint32_t       wrapper_version;
+	uint32_t       cgc_disable;
+	uint32_t       hw_stop;
+	uint32_t       sw_reset;
+	uint32_t       vbif_req_priority;
+	uint32_t       vbif_priority_level;
+	uint32_t       vbif_done_status;
+	uint32_t       irq_mask;
+	uint32_t       irq_status;
+	uint32_t       irq_clear;
+};
+
+/**
+ * struct cam_fd_hw_errata_wa : FD HW Errata workaround enable/dsiable info
+ *
+ * @single_irq_only         : Whether to enable only one irq at any time
+ * @ro_mode_enable_always   : Whether to enable ro mode always
+ * @ro_mode_results_invalid : Whether results written directly into output
+ *                            memory by HW are valid or not
+ */
+struct cam_fd_hw_errata_wa {
+	bool   single_irq_only;
+	bool   ro_mode_enable_always;
+	bool   ro_mode_results_invalid;
+};
+
+/**
+ * struct cam_fd_hw_results_prop : FD HW Results properties
+ *
+ * @max_faces             : Maximum number of faces supported
+ * @per_face_entries      : Number of register with properties for each face
+ * @raw_results_entries   : Number of raw results entries for the full search
+ * @raw_results_available : Whether raw results available on this HW
+ *
+ */
+struct cam_fd_hw_results_prop {
+	uint32_t       max_faces;
+	uint32_t       per_face_entries;
+	uint32_t       raw_results_entries;
+	bool           raw_results_available;
+};
+
+/**
+ * struct cam_fd_hw_static_info : FD HW information based on HW version
+ *
+ * @core_version       : Core version of FD HW
+ * @wrapper_version    : Wrapper version of FD HW
+ * @core_regs          : Register offset information for core registers
+ * @wrapper_regs       : Register offset information for wrapper registers
+ * @results            : Information about results available on this HW
+ * @enable_errata_wa   : Errata workaround information
+ * @irq_mask           : IRQ mask to enable
+ * @qos_priority       : QoS priority setting for this chipset
+ * @qos_priority_level : QoS priority level setting for this chipset
+ * @supported_modes    : Supported HW modes on this HW version
+ * @ro_mode_supported  : Whether RO mode is supported on this HW
+ *
+ */
+struct cam_fd_hw_static_info {
+	struct cam_hw_version          core_version;
+	struct cam_hw_version          wrapper_version;
+	struct cam_fd_core_regs        core_regs;
+	struct cam_fd_wrapper_regs     wrapper_regs;
+	struct cam_fd_hw_results_prop  results;
+	struct cam_fd_hw_errata_wa     enable_errata_wa;
+	uint32_t                       irq_mask;
+	uint32_t                       qos_priority;
+	uint32_t                       qos_priority_level;
+	uint32_t                       supported_modes;
+	bool                           ro_mode_supported;
+};
+
+/**
+ * struct cam_fd_core : FD HW core data structure
+ *
+ * @hw_static_info      : HW information specific to version
+ * @hw_caps             : HW capabilities
+ * @core_state          : Current HW state
+ * @processing_complete : Whether processing is complete
+ * @reset_complete      : Whether reset is complete
+ * @halt_complete       : Whether halt is complete
+ * @hw_req_private      : Request that is being currently processed by HW
+ * @results_valid       : Whether HW frame results are available to get
+ * @spin_lock           : Mutex to protect shared data in hw layer
+ * @irq_cb              : HW Manager callback information
+ *
+ */
+struct cam_fd_core {
+	struct cam_fd_hw_static_info   *hw_static_info;
+	struct cam_fd_hw_caps           hw_caps;
+	enum cam_fd_core_state          core_state;
+	struct completion               processing_complete;
+	struct completion               reset_complete;
+	struct completion               halt_complete;
+	struct cam_fd_hw_req_private   *hw_req_private;
+	bool                            results_valid;
+	spinlock_t                      spin_lock;
+	struct cam_fd_hw_cmd_set_irq_cb irq_cb;
+};
+
+int cam_fd_hw_util_get_hw_caps(struct cam_hw_info *fd_hw,
+	struct cam_fd_hw_caps *hw_caps);
+irqreturn_t cam_fd_hw_irq(int irq_num, void *data);
+
+int cam_fd_hw_get_hw_caps(void *hw_priv, void *get_hw_cap_args,
+	uint32_t arg_size);
+int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size);
+int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size);
+int cam_fd_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size);
+int cam_fd_hw_reserve(void *hw_priv, void *hw_reserve_args, uint32_t arg_size);
+int cam_fd_hw_release(void *hw_priv, void *hw_release_args, uint32_t arg_size);
+int cam_fd_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size);
+int cam_fd_hw_halt_reset(void *hw_priv, void *stop_args, uint32_t arg_size);
+int cam_fd_hw_read(void *hw_priv, void *read_args, uint32_t arg_size);
+int cam_fd_hw_write(void *hw_priv, void *write_args, uint32_t arg_size);
+int cam_fd_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+
+#endif /* _CAM_FD_HW_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c
new file mode 100644
index 0000000..803da76
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c
@@ -0,0 +1,223 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_subdev.h"
+#include "cam_fd_hw_intf.h"
+#include "cam_fd_hw_core.h"
+#include "cam_fd_hw_soc.h"
+#include "cam_fd_hw_v41.h"
+
+static int cam_fd_hw_dev_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info *fd_hw;
+	struct cam_hw_intf *fd_hw_intf;
+	struct cam_fd_core *fd_core;
+	const struct of_device_id *match_dev = NULL;
+	struct cam_fd_hw_static_info *hw_static_info = NULL;
+	int rc = 0;
+	struct cam_fd_hw_init_args init_args;
+	struct cam_fd_hw_deinit_args deinit_args;
+
+	fd_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!fd_hw_intf)
+		return -ENOMEM;
+
+	fd_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!fd_hw) {
+		kfree(fd_hw_intf);
+		return -ENOMEM;
+	}
+
+	fd_core = kzalloc(sizeof(struct cam_fd_core), GFP_KERNEL);
+	if (!fd_core) {
+		kfree(fd_hw);
+		kfree(fd_hw_intf);
+		return -ENOMEM;
+	}
+
+	fd_hw_intf->hw_priv = fd_hw;
+	fd_hw->core_info = fd_core;
+
+	fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	fd_hw->soc_info.pdev = pdev;
+	fd_hw->soc_info.dev = &pdev->dev;
+	fd_hw->soc_info.dev_name = pdev->name;
+	fd_hw->open_count = 0;
+	mutex_init(&fd_hw->hw_mutex);
+	spin_lock_init(&fd_hw->hw_lock);
+	init_completion(&fd_hw->hw_complete);
+
+	spin_lock_init(&fd_core->spin_lock);
+	init_completion(&fd_core->processing_complete);
+	init_completion(&fd_core->halt_complete);
+	init_completion(&fd_core->reset_complete);
+
+	fd_hw_intf->hw_ops.get_hw_caps = cam_fd_hw_get_hw_caps;
+	fd_hw_intf->hw_ops.init = cam_fd_hw_init;
+	fd_hw_intf->hw_ops.deinit = cam_fd_hw_deinit;
+	fd_hw_intf->hw_ops.reset = cam_fd_hw_reset;
+	fd_hw_intf->hw_ops.reserve = cam_fd_hw_reserve;
+	fd_hw_intf->hw_ops.release = cam_fd_hw_release;
+	fd_hw_intf->hw_ops.start = cam_fd_hw_start;
+	fd_hw_intf->hw_ops.stop = cam_fd_hw_halt_reset;
+	fd_hw_intf->hw_ops.read = NULL;
+	fd_hw_intf->hw_ops.write = NULL;
+	fd_hw_intf->hw_ops.process_cmd = cam_fd_hw_process_cmd;
+	fd_hw_intf->hw_type = CAM_HW_FD;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev || !match_dev->data) {
+		CAM_ERR(CAM_FD, "No Of_match data, %pK", match_dev);
+		rc = -EINVAL;
+		goto free_memory;
+	}
+	hw_static_info = (struct cam_fd_hw_static_info *)match_dev->data;
+	fd_core->hw_static_info = hw_static_info;
+
+	CAM_DBG(CAM_FD, "HW Static Info : version core[%d.%d] wrapper[%d.%d]",
+		hw_static_info->core_version.major,
+		hw_static_info->core_version.minor,
+		hw_static_info->wrapper_version.major,
+		hw_static_info->wrapper_version.minor);
+
+	rc = cam_fd_soc_init_resources(&fd_hw->soc_info, cam_fd_hw_irq, fd_hw);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed to init soc, rc=%d", rc);
+		goto free_memory;
+	}
+
+	fd_hw_intf->hw_idx = fd_hw->soc_info.index;
+
+	memset(&init_args, 0x0, sizeof(init_args));
+	memset(&deinit_args, 0x0, sizeof(deinit_args));
+	rc = cam_fd_hw_init(fd_hw, &init_args, sizeof(init_args));
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed to hw init, rc=%d", rc);
+		goto deinit_platform_res;
+	}
+
+	rc = cam_fd_hw_util_get_hw_caps(fd_hw, &fd_core->hw_caps);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed to get hw caps, rc=%d", rc);
+		goto deinit_hw;
+	}
+
+	rc = cam_fd_hw_deinit(fd_hw, &deinit_args, sizeof(deinit_args));
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed to deinit hw, rc=%d", rc);
+		goto deinit_platform_res;
+	}
+
+	platform_set_drvdata(pdev, fd_hw_intf);
+	CAM_DBG(CAM_FD, "FD-%d probe successful", fd_hw_intf->hw_idx);
+
+	return rc;
+
+deinit_hw:
+	if (cam_fd_hw_deinit(fd_hw, &deinit_args, sizeof(deinit_args)))
+		CAM_ERR(CAM_FD, "Failed in hw deinit");
+deinit_platform_res:
+	if (cam_fd_soc_deinit_resources(&fd_hw->soc_info))
+		CAM_ERR(CAM_FD, "Failed in soc deinit");
+	mutex_destroy(&fd_hw->hw_mutex);
+free_memory:
+	kfree(fd_hw);
+	kfree(fd_hw_intf);
+	kfree(fd_core);
+
+	return rc;
+}
+
+static int cam_fd_hw_dev_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct cam_hw_intf *fd_hw_intf;
+	struct cam_hw_info *fd_hw;
+	struct cam_fd_core *fd_core;
+
+	fd_hw_intf = platform_get_drvdata(pdev);
+	if (!fd_hw_intf) {
+		CAM_ERR(CAM_FD, "Invalid fd_hw_intf from pdev");
+		return -EINVAL;
+	}
+
+	fd_hw = fd_hw_intf->hw_priv;
+	if (!fd_hw) {
+		CAM_ERR(CAM_FD, "Invalid fd_hw from fd_hw_intf");
+		rc = -ENODEV;
+		goto free_fd_hw_intf;
+	}
+
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	if (!fd_core) {
+		CAM_ERR(CAM_FD, "Invalid fd_core from fd_hw");
+		rc = -EINVAL;
+		goto deinit_platform_res;
+	}
+
+	kfree(fd_core);
+
+deinit_platform_res:
+	rc = cam_fd_soc_deinit_resources(&fd_hw->soc_info);
+	if (rc)
+		CAM_ERR(CAM_FD, "Error in FD soc deinit, rc=%d", rc);
+
+	mutex_destroy(&fd_hw->hw_mutex);
+	kfree(fd_hw);
+
+free_fd_hw_intf:
+	kfree(fd_hw_intf);
+
+	return rc;
+}
+
+static const struct of_device_id cam_fd_hw_dt_match[] = {
+	{
+		.compatible = "qcom,fd41",
+		.data = &cam_fd_wrapper120_core410_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_fd_hw_dt_match);
+
+static struct platform_driver cam_fd_hw_driver = {
+	.probe = cam_fd_hw_dev_probe,
+	.remove = cam_fd_hw_dev_remove,
+	.driver = {
+		.name = "cam_fd_hw",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_fd_hw_dt_match,
+	},
+};
+
+static int __init cam_fd_hw_init_module(void)
+{
+	return platform_driver_register(&cam_fd_hw_driver);
+}
+
+static void __exit cam_fd_hw_exit_module(void)
+{
+	platform_driver_unregister(&cam_fd_hw_driver);
+}
+
+module_init(cam_fd_hw_init_module);
+module_exit(cam_fd_hw_exit_module);
+MODULE_DESCRIPTION("CAM FD HW driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h
new file mode 100644
index 0000000..aae7648
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h
@@ -0,0 +1,289 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FD_HW_INTF_H_
+#define _CAM_FD_HW_INTF_H_
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+#include <media/cam_fd.h>
+
+#include "cam_io_util.h"
+#include "cam_soc_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_subdev.h"
+#include "cam_cpas_api.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_debug_util.h"
+
+#define CAM_FD_MAX_IO_BUFFERS        5
+#define CAM_FD_MAX_HW_ENTRIES        5
+
+/**
+ * enum cam_fd_hw_type - Enum for FD HW type
+ *
+ * @CAM_HW_FD : FaceDetection HW type
+ */
+enum cam_fd_hw_type {
+	CAM_HW_FD,
+};
+
+/**
+ * enum cam_fd_hw_mode - Mode in which HW can run
+ *
+ * @CAM_FD_MODE_FACEDETECTION : Face Detection mode in which face search
+ *                              is done on the given frame
+ * @CAM_FD_MODE_PYRAMID       : Pyramid mode where a pyramid image is generated
+ *                              from an input image
+ */
+enum cam_fd_hw_mode {
+	CAM_FD_MODE_FACEDETECTION    = 0x1,
+	CAM_FD_MODE_PYRAMID          = 0x2,
+};
+
+/**
+ * enum cam_fd_priority - FD priority levels
+ *
+ * @CAM_FD_PRIORITY_HIGH   : Indicates high priority client, driver prioritizes
+ *                           frame requests coming from contexts with HIGH
+ *                           priority compared to context with normal priority
+ * @CAM_FD_PRIORITY_NORMAL : Indicates normal priority client
+ */
+enum cam_fd_priority {
+	CAM_FD_PRIORITY_HIGH         = 0x0,
+	CAM_FD_PRIORITY_NORMAL,
+};
+
+/**
+ * enum cam_fd_hw_irq_type - FD HW IRQ types
+ *
+ * @CAM_FD_IRQ_FRAME_DONE : Indicates frame processing is finished
+ * @CAM_FD_IRQ_HALT_DONE  : Indicates HW halt is finished
+ * @CAM_FD_IRQ_RESET_DONE : Indicates HW reset is finished
+ */
+enum cam_fd_hw_irq_type {
+	CAM_FD_IRQ_FRAME_DONE,
+	CAM_FD_IRQ_HALT_DONE,
+	CAM_FD_IRQ_RESET_DONE,
+};
+
+/**
+ * enum cam_fd_hw_cmd_type - FD HW layer custom commands
+ *
+ * @CAM_FD_HW_CMD_PRESTART          : Command to process pre-start settings
+ * @CAM_FD_HW_CMD_FRAME_DONE        : Command to process frame done settings
+ * @CAM_FD_HW_CMD_UPDATE_SOC        : Command to process soc update
+ * @CAM_FD_HW_CMD_REGISTER_CALLBACK : Command to set hw mgr callback
+ * @CAM_FD_HW_CMD_MAX               : Indicates max cmd
+ */
+enum cam_fd_hw_cmd_type {
+	CAM_FD_HW_CMD_PRESTART,
+	CAM_FD_HW_CMD_FRAME_DONE,
+	CAM_FD_HW_CMD_UPDATE_SOC,
+	CAM_FD_HW_CMD_REGISTER_CALLBACK,
+	CAM_FD_HW_CMD_MAX,
+};
+
+/**
+ * struct cam_fd_hw_io_buffer : FD HW IO Buffer information
+ *
+ * @valid    : Whether this IO Buf configuration is valid
+ * @io_cfg   : IO Configuration information
+ * @num_buf  : Number planes in io_addr, cpu_addr array
+ * @io_addr  : Array of IO address information for planes
+ * @cpu_addr : Array of CPU address information for planes
+ */
+struct cam_fd_hw_io_buffer {
+	bool                   valid;
+	struct cam_buf_io_cfg *io_cfg;
+	uint32_t               num_buf;
+	uint64_t               io_addr[CAM_PACKET_MAX_PLANES];
+	uint64_t               cpu_addr[CAM_PACKET_MAX_PLANES];
+};
+
+/**
+ * struct cam_fd_hw_req_private : FD HW layer's private information
+ *               specific to a request
+ *
+ * @ctx_hw_private  : FD HW layer's ctx specific private data
+ * @request_id      : Request ID corresponding to this private information
+ * @get_raw_results : Whether to get raw results for this request
+ * @ro_mode_enabled : Whether RO mode is enabled for this request
+ * @fd_results      : Pointer to save face detection results
+ * @raw_results     : Pointer to save face detection raw results
+ */
+struct cam_fd_hw_req_private {
+	void                  *ctx_hw_private;
+	uint64_t               request_id;
+	bool                   get_raw_results;
+	bool                   ro_mode_enabled;
+	struct cam_fd_results *fd_results;
+	uint32_t              *raw_results;
+};
+
+/**
+ * struct cam_fd_hw_reserve_args : Reserve args for this HW context
+ *
+ * @hw_ctx         : HW context for which reserve is requested
+ * @mode           : Mode for which this reserve is requested
+ * @ctx_hw_private : Pointer to save HW layer's private information specific
+ *                   to this hw context. This has to be passed while calling
+ *                   further HW layer calls
+ */
+struct cam_fd_hw_reserve_args {
+	void                  *hw_ctx;
+	enum cam_fd_hw_mode    mode;
+	void                  *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_release_args : Release args for this HW context
+ *
+ * @hw_ctx         : HW context for which release is requested
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ */
+struct cam_fd_hw_release_args {
+	void    *hw_ctx;
+	void    *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_init_args : Init args for this HW context
+ *
+ * @hw_ctx         : HW context for which init is requested
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ */
+struct cam_fd_hw_init_args {
+	void    *hw_ctx;
+	void    *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_deinit_args : Deinit args for this HW context
+ *
+ * @hw_ctx         : HW context for which deinit is requested
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ */
+struct cam_fd_hw_deinit_args {
+	void    *hw_ctx;
+	void    *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_cmd_prestart_args : Prestart command args
+ *
+ * @hw_ctx               : HW context which submitted this prestart
+ * @ctx_hw_private       : HW layer's private information specific to
+ *                         this hw context
+ * @request_id           : Request ID corresponds to this pre-start command
+ * @get_raw_results      : Whether to get raw results for this request
+ * @input_buf            : Input IO Buffer information for this request
+ * @output_buf           : Output IO Buffer information for this request
+ * @cmd_buf_addr         : Command buffer address to fill kmd commands
+ * @size                 : Size available in command buffer
+ * @pre_config_buf_size  : Buffer size filled with commands by KMD that has
+ *                         to be inserted before umd commands
+ * @post_config_buf_size : Buffer size filled with commands by KMD that has
+ *                         to be inserted after umd commands
+ * @hw_req_private       : HW layer's private information specific to
+ *                         this request
+ */
+struct cam_fd_hw_cmd_prestart_args {
+	void                         *hw_ctx;
+	void                         *ctx_hw_private;
+	uint64_t                      request_id;
+	bool                          get_raw_results;
+	struct cam_fd_hw_io_buffer    input_buf[CAM_FD_MAX_IO_BUFFERS];
+	struct cam_fd_hw_io_buffer    output_buf[CAM_FD_MAX_IO_BUFFERS];
+	uint32_t                     *cmd_buf_addr;
+	uint32_t                      size;
+	uint32_t                      pre_config_buf_size;
+	uint32_t                      post_config_buf_size;
+	struct cam_fd_hw_req_private  hw_req_private;
+};
+
+/**
+ * struct cam_fd_hw_cmd_start_args : Start command args
+ *
+ * @hw_ctx                : HW context which submitting start command
+ * @ctx_hw_private        : HW layer's private information specific to
+ *                            this hw context
+ * @hw_req_private        : HW layer's private information specific to
+ *          this request
+ * @hw_update_entries     : HW update entries corresponds to this request
+ * @num_hw_update_entries : Number of hw update entries
+ */
+struct cam_fd_hw_cmd_start_args {
+	void                          *hw_ctx;
+	void                          *ctx_hw_private;
+	struct cam_fd_hw_req_private  *hw_req_private;
+	struct cam_hw_update_entry    *hw_update_entries;
+	uint32_t                       num_hw_update_entries;
+};
+
+/**
+ * struct cam_fd_hw_stop_args : Stop command args
+ *
+ * @hw_ctx         : HW context which submitting stop command
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ * @request_id     : Request ID that need to be stopped
+ * @hw_req_private : HW layer's private information specific to this request
+ */
+struct cam_fd_hw_stop_args {
+	void                         *hw_ctx;
+	void                         *ctx_hw_private;
+	uint64_t                      request_id;
+	struct cam_fd_hw_req_private *hw_req_private;
+};
+
+/**
+ * struct cam_fd_hw_frame_done_args : Frame done command args
+ *
+ * @hw_ctx         : HW context which submitting frame done request
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ * @request_id     : Request ID that need to be stopped
+ * @hw_req_private : HW layer's private information specific to this request
+ */
+struct cam_fd_hw_frame_done_args {
+	void                         *hw_ctx;
+	void                         *ctx_hw_private;
+	uint64_t                      request_id;
+	struct cam_fd_hw_req_private *hw_req_private;
+};
+
+/**
+ * struct cam_fd_hw_reset_args : Reset command args
+ *
+ * @hw_ctx         : HW context which submitting reset command
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ */
+struct cam_fd_hw_reset_args {
+	void    *hw_ctx;
+	void    *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_cmd_set_irq_cb : Set IRQ callback command args
+ *
+ * @cam_fd_hw_mgr_cb : HW Mgr's callback pointer
+ * @data             : HW Mgr's private data
+ */
+struct cam_fd_hw_cmd_set_irq_cb {
+	int (*cam_fd_hw_mgr_cb)(void *data, enum cam_fd_hw_irq_type irq_type);
+	void *data;
+};
+
+#endif /* _CAM_FD_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c
new file mode 100644
index 0000000..9045dc1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c
@@ -0,0 +1,285 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_fd_hw_core.h"
+#include "cam_fd_hw_soc.h"
+
+static void cam_fd_hw_util_cpas_callback(uint32_t handle, void *userdata,
+	enum cam_camnoc_irq_type event_type, uint32_t event_data)
+{
+	CAM_DBG(CAM_FD, "CPAS hdl=%d, udata=%pK, event=%d, event_data=%d",
+		handle, userdata, event_type, event_data);
+}
+
+static int cam_fd_hw_soc_util_setup_regbase_indices(
+	struct cam_hw_soc_info *soc_info)
+{
+	struct cam_fd_soc_private *soc_private =
+		(struct cam_fd_soc_private *)soc_info->soc_private;
+	uint32_t index;
+	int rc, i;
+
+	for (i = 0; i < CAM_FD_REG_MAX; i++)
+		soc_private->regbase_index[i] = -1;
+
+	if ((soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) ||
+		(soc_info->num_mem_block != CAM_FD_REG_MAX)) {
+		CAM_ERR(CAM_FD, "Invalid num_mem_block=%d",
+			soc_info->num_mem_block);
+		return -EINVAL;
+	}
+
+	rc = cam_common_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "fd_core", &index);
+	if ((rc == 0) && (index < CAM_FD_REG_MAX)) {
+		soc_private->regbase_index[CAM_FD_REG_CORE] = index;
+	} else {
+		CAM_ERR(CAM_FD, "regbase not found for FD_CORE, rc=%d, %d %d",
+			rc, index, CAM_FD_REG_MAX);
+		return -EINVAL;
+	}
+
+	rc = cam_common_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "fd_wrapper", &index);
+	if ((rc == 0) && (index < CAM_FD_REG_MAX)) {
+		soc_private->regbase_index[CAM_FD_REG_WRAPPER] = index;
+	} else {
+		CAM_ERR(CAM_FD, "regbase not found FD_WRAPPER, rc=%d, %d %d",
+			rc, index, CAM_FD_REG_MAX);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_FD, "Reg indices : CORE=%d, WRAPPER=%d",
+		soc_private->regbase_index[CAM_FD_REG_CORE],
+		soc_private->regbase_index[CAM_FD_REG_WRAPPER]);
+
+	return 0;
+}
+
+static int cam_fd_soc_set_clk_flags(struct cam_hw_soc_info *soc_info)
+{
+	int i, rc = 0;
+
+	if (soc_info->num_clk > CAM_SOC_MAX_CLK) {
+		CAM_ERR(CAM_FD, "Invalid num clk %d", soc_info->num_clk);
+		return -EINVAL;
+	}
+
+	/* set memcore and mem periphery logic flags to 0 */
+	for (i = 0; i < soc_info->num_clk; i++) {
+		if ((strcmp(soc_info->clk_name[i], "fd_core_clk") == 0) ||
+			(strcmp(soc_info->clk_name[i], "fd_core_uar_clk") ==
+			0)) {
+			rc = cam_soc_util_set_clk_flags(soc_info, i,
+				CLKFLAG_NORETAIN_MEM);
+			if (rc)
+				CAM_ERR(CAM_FD,
+					"Failed in NORETAIN_MEM i=%d, rc=%d",
+					i, rc);
+
+			cam_soc_util_set_clk_flags(soc_info, i,
+				CLKFLAG_NORETAIN_PERIPH);
+			if (rc)
+				CAM_ERR(CAM_FD,
+					"Failed in NORETAIN_PERIPH i=%d, rc=%d",
+					i, rc);
+		}
+	}
+
+	return rc;
+}
+
+void cam_fd_soc_register_write(struct cam_hw_soc_info *soc_info,
+	enum cam_fd_reg_base reg_base, uint32_t reg_offset, uint32_t reg_value)
+{
+	struct cam_fd_soc_private *soc_private =
+		(struct cam_fd_soc_private *)soc_info->soc_private;
+	int32_t reg_index = soc_private->regbase_index[reg_base];
+
+	CAM_DBG(CAM_FD, "FD_REG_WRITE: Base[%d] Offset[0x%8x] Value[0x%8x]",
+		reg_base, reg_offset, reg_value);
+
+	cam_io_w_mb(reg_value,
+		soc_info->reg_map[reg_index].mem_base + reg_offset);
+}
+
+uint32_t cam_fd_soc_register_read(struct cam_hw_soc_info *soc_info,
+	enum cam_fd_reg_base reg_base, uint32_t reg_offset)
+{
+	struct cam_fd_soc_private *soc_private =
+		(struct cam_fd_soc_private *)soc_info->soc_private;
+	int32_t reg_index = soc_private->regbase_index[reg_base];
+	uint32_t reg_value;
+
+	reg_value = cam_io_r_mb(
+		soc_info->reg_map[reg_index].mem_base + reg_offset);
+
+	CAM_DBG(CAM_FD, "FD_REG_READ: Base[%d] Offset[0x%8x] Value[0x%8x]",
+		reg_base, reg_offset, reg_value);
+
+	return reg_value;
+}
+
+int cam_fd_soc_enable_resources(struct cam_hw_soc_info *soc_info)
+{
+	struct cam_fd_soc_private *soc_private = soc_info->soc_private;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	int rc;
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SVS_VOTE;
+	axi_vote.compressed_bw = 7200000;
+	axi_vote.uncompressed_bw = 7200000;
+	rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in CPAS START, rc=%d", rc);
+		return -EFAULT;
+	}
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true, CAM_SVS_VOTE,
+		true);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error enable platform failed, rc=%d", rc);
+		goto stop_cpas;
+	}
+
+	return rc;
+
+stop_cpas:
+	if (cam_cpas_stop(soc_private->cpas_handle))
+		CAM_ERR(CAM_FD, "Error in CPAS STOP");
+
+	return rc;
+}
+
+
+int cam_fd_soc_disable_resources(struct cam_hw_soc_info *soc_info)
+{
+	struct cam_fd_soc_private *soc_private;
+	int rc = 0;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_FD, "Invalid soc_info param");
+		return -EINVAL;
+	}
+	soc_private = soc_info->soc_private;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc) {
+		CAM_ERR(CAM_FD, "disable platform resources failed, rc=%d", rc);
+		return rc;
+	}
+
+	rc = cam_cpas_stop(soc_private->cpas_handle);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in CPAS STOP, handle=0x%x, rc=%d",
+			soc_private->cpas_handle, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int cam_fd_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t irq_handler, void *private_data)
+{
+	struct cam_fd_soc_private *soc_private;
+	struct cam_cpas_register_params cpas_register_param;
+	int rc;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in get_dt_properties, rc=%d", rc);
+		return rc;
+	}
+
+	rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
+		private_data);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in request_platform_resource rc=%d",
+			rc);
+		return rc;
+	}
+
+	rc = cam_fd_soc_set_clk_flags(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_FD, "failed in set_clk_flags rc=%d", rc);
+		goto release_res;
+	}
+
+	soc_private = kzalloc(sizeof(struct cam_fd_soc_private), GFP_KERNEL);
+	if (!soc_private) {
+		rc = -ENOMEM;
+		goto release_res;
+	}
+	soc_info->soc_private = soc_private;
+
+	rc = cam_fd_hw_soc_util_setup_regbase_indices(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in setup regbase, rc=%d", rc);
+		goto free_soc_private;
+	}
+
+	memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+	strlcpy(cpas_register_param.identifier, "fd", CAM_HW_IDENTIFIER_LENGTH);
+	cpas_register_param.cell_index = soc_info->index;
+	cpas_register_param.dev = &soc_info->pdev->dev;
+	cpas_register_param.userdata = private_data;
+	cpas_register_param.cam_cpas_client_cb = cam_fd_hw_util_cpas_callback;
+
+	rc = cam_cpas_register_client(&cpas_register_param);
+	if (rc) {
+		CAM_ERR(CAM_FD, "CPAS registration failed");
+		goto free_soc_private;
+	}
+	soc_private->cpas_handle = cpas_register_param.client_handle;
+	CAM_DBG(CAM_FD, "CPAS handle=%d", soc_private->cpas_handle);
+
+	return rc;
+
+free_soc_private:
+	kfree(soc_info->soc_private);
+	soc_info->soc_private = NULL;
+release_res:
+	cam_soc_util_release_platform_resource(soc_info);
+
+	return rc;
+}
+
+int cam_fd_soc_deinit_resources(struct cam_hw_soc_info *soc_info)
+{
+	struct cam_fd_soc_private *soc_private =
+		(struct cam_fd_soc_private *)soc_info->soc_private;
+	int rc;
+
+	rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_FD, "Unregister cpas failed, handle=%d, rc=%d",
+			soc_private->cpas_handle, rc);
+
+	rc = cam_soc_util_release_platform_resource(soc_info);
+	if (rc)
+		CAM_ERR(CAM_FD, "release platform failed, rc=%d", rc);
+
+	kfree(soc_info->soc_private);
+	soc_info->soc_private = NULL;
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.h
new file mode 100644
index 0000000..4a22293
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FD_HW_SOC_H_
+#define _CAM_FD_HW_SOC_H_
+
+#include "cam_soc_util.h"
+
+/**
+ * enum cam_fd_reg_base - Enum for FD register sets
+ *
+ * @CAM_FD_REG_CORE    : Indicates FD Core register space
+ * @CAM_FD_REG_WRAPPER : Indicates FD Wrapper register space
+ * @CAM_FD_REG_MAX     : Max number of register sets supported
+ */
+enum cam_fd_reg_base {
+	CAM_FD_REG_CORE,
+	CAM_FD_REG_WRAPPER,
+	CAM_FD_REG_MAX
+};
+
+/**
+ * struct cam_fd_soc_private : FD private SOC information
+ *
+ * @regbase_index : Mapping between Register base enum to register index in SOC
+ * @cpas_handle   : CPAS handle
+ *
+ */
+struct cam_fd_soc_private {
+	int32_t        regbase_index[CAM_FD_REG_MAX];
+	uint32_t       cpas_handle;
+};
+
+int cam_fd_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t irq_handler, void *private_data);
+int cam_fd_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
+int cam_fd_soc_enable_resources(struct cam_hw_soc_info *soc_info);
+int cam_fd_soc_disable_resources(struct cam_hw_soc_info *soc_info);
+uint32_t cam_fd_soc_register_read(struct cam_hw_soc_info *soc_info,
+	enum cam_fd_reg_base reg_base, uint32_t reg_offset);
+void cam_fd_soc_register_write(struct cam_hw_soc_info *soc_info,
+	enum cam_fd_reg_base reg_base, uint32_t reg_offset, uint32_t reg_value);
+
+#endif /* _CAM_FD_HW_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v41.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v41.h
new file mode 100644
index 0000000..70448bb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v41.h
@@ -0,0 +1,70 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FD_HW_V41_H_
+#define _CAM_FD_HW_V41_H_
+
+static struct cam_fd_hw_static_info cam_fd_wrapper120_core410_info = {
+	.core_version = {
+		.major  = 4,
+		.minor  = 1,
+		.incr   = 0,
+	},
+	.wrapper_version = {
+		.major  = 1,
+		.minor  = 2,
+		.incr   = 0,
+	},
+	.core_regs = {
+		.version               = 0x38,
+		.control               = 0x0,
+		.result_cnt            = 0x4,
+		.result_addr           = 0x20,
+		.image_addr            = 0x24,
+		.work_addr             = 0x28,
+		.ro_mode               = 0x34,
+		.results_reg_base      = 0x400,
+		.raw_results_reg_base  = 0x800,
+	},
+	.wrapper_regs = {
+		.wrapper_version       = 0x0,
+		.cgc_disable           = 0x4,
+		.hw_stop               = 0x8,
+		.sw_reset              = 0x10,
+		.vbif_req_priority     = 0x20,
+		.vbif_priority_level   = 0x24,
+		.vbif_done_status      = 0x34,
+		.irq_mask              = 0x50,
+		.irq_status            = 0x54,
+		.irq_clear             = 0x58,
+	},
+	.results = {
+		.max_faces             = 35,
+		.per_face_entries      = 4,
+		.raw_results_available = true,
+		.raw_results_entries   = 512,
+	},
+	.enable_errata_wa = {
+		.single_irq_only         = true,
+		.ro_mode_enable_always   = true,
+		.ro_mode_results_invalid = true,
+	},
+	.irq_mask = CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE) |
+		CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE) |
+		CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE),
+	.qos_priority       = 4,
+	.qos_priority_level = 4,
+	.supported_modes    = CAM_FD_MODE_FACEDETECTION | CAM_FD_MODE_PYRAMID,
+	.ro_mode_supported  = true,
+};
+
+#endif /* _CAM_FD_HW_V41_H_ */