Merge "mmc: block: do not query the sd card if a fault is injected" into msm-3.4
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index f71f74c..edb57dd 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -600,6 +600,22 @@
 		qcom,memory-reservation-type = "EBI1";
 		qcom,memory-reservation-size = <0x100000>; /* 1M EBI1 buffer */
 	};
+
+        qcom,qcedev@fd440000 {
+		compatible = "qcom,qcedev";
+		reg = <0xfd440000 0x20000>,
+		      <0xfd444000 0x8000>;
+		interrupts = <0 235 0>;
+		qcom,bam-pipes = <0>;
+	};
+
+        qcom,qcrypto@fd444000 {
+		compatible = "qcom,qcrypto";
+		reg = <0xfd440000 0x20000>,
+		      <0xfd444000 0x8000>;
+		interrupts = <0 235 0>;
+		qcom,bam-pipes = <1>;
+	};
 };
 
 /include/ "msm-pm8x41-rpm-regulator.dtsi"
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index b02bd7c..26ee198 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -50,7 +50,6 @@
 CONFIG_MSM_PIL_PRONTO=y
 CONFIG_MSM_TZ_LOG=y
 CONFIG_MSM_DIRECT_SCLK_ACCESS=y
-CONFIG_MSM_WATCHDOG_V2=y
 CONFIG_MSM_OCMEM=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -246,3 +245,4 @@
 CONFIG_CRYPTO_DEV_QCEDEV=m
 CONFIG_CRC_CCITT=y
 CONFIG_LIBCRC32C=y
+CONFIG_MSM_BUS_SCALING=y
diff --git a/arch/arm/configs/msm9625_defconfig b/arch/arm/configs/msm9625_defconfig
index 7ca9f49..9094db7 100644
--- a/arch/arm/configs/msm9625_defconfig
+++ b/arch/arm/configs/msm9625_defconfig
@@ -50,6 +50,12 @@
 CONFIG_NEON=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 # CONFIG_SUSPEND is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_MTD_MSM_NAND is not set
+CONFIG_MTD_MSM_QPIC_NAND=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 # CONFIG_ANDROID_PMEM is not set
@@ -74,7 +80,8 @@
 CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
-# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_YAFFS_FS=y
+CONFIG_YAFFS_DISABLE_TAGS_ECC=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=y
 CONFIG_NLS_ISO8859_1=y
@@ -103,5 +110,4 @@
 CONFIG_CRYPTO_DEFLATE=y
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC_CCITT=y
-CONFIG_CRC16=y
 CONFIG_LIBCRC32C=y
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index ab55bb1..0aec27a 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -817,18 +817,6 @@
 	  The two TSIF cores share the same DM configuration
 	  so they cannot be used simultaneously.
 
-config MACH_APQ8064_SIM
-	depends on ARCH_APQ8064
-	bool "APQ8064 Simulator"
-	help
-	  Support for the Qualcomm APQ8064 simulator.
-
-config MACH_APQ8064_RUMI3
-	depends on ARCH_APQ8064
-	bool "APQ8064 RUMI3"
-	help
-	  Support for the Qualcomm APQ8064 RUMI3 emulator.
-
 config MACH_APQ8064_CDP
 	depends on ARCH_APQ8064
 	bool "APQ8064 CDP"
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 50841ae..c839a4a 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -279,8 +279,6 @@
 obj-$(CONFIG_MACH_MSM8930_MTP) += board-8930-all.o board-8930-regulator.o
 obj-$(CONFIG_MACH_MSM8930_FLUID) += board-8930-all.o board-8930-regulator.o
 obj-$(CONFIG_PM8921_BMS) += bms-batterydata.o bms-batterydata-desay.o
-obj-$(CONFIG_MACH_APQ8064_SIM) += board-8064-all.o board-8064-regulator.o
-obj-$(CONFIG_MACH_APQ8064_RUMI3) += board-8064-all.o board-8064-regulator.o
 obj-$(CONFIG_MACH_APQ8064_CDP) += board-8064-all.o board-8064-regulator.o
 obj-$(CONFIG_MACH_APQ8064_MTP) += board-8064-all.o board-8064-regulator.o
 obj-$(CONFIG_MACH_APQ8064_LIQUID) += board-8064-all.o board-8064-regulator.o
diff --git a/arch/arm/mach-msm/board-8064-pmic.c b/arch/arm/mach-msm/board-8064-pmic.c
index 43a79b5..678eb9e 100644
--- a/arch/arm/mach-msm/board-8064-pmic.c
+++ b/arch/arm/mach-msm/board-8064-pmic.c
@@ -439,10 +439,7 @@
 	apq8064_pm8921_platform_data.num_regulators =
 					msm8064_pm8921_regulator_pdata_len;
 
-	if (machine_is_apq8064_rumi3()) {
-		apq8064_pm8921_irq_pdata.devirq = 0;
-		apq8064_pm8821_irq_pdata.devirq = 0;
-	} else if (machine_is_apq8064_mtp()) {
+	if (machine_is_apq8064_mtp()) {
 		apq8064_pm8921_bms_pdata.battery_type = BATT_PALLADIUM;
 	} else if (machine_is_apq8064_liquid()) {
 		apq8064_pm8921_bms_pdata.battery_type = BATT_DESAY;
diff --git a/arch/arm/mach-msm/board-8064-storage.c b/arch/arm/mach-msm/board-8064-storage.c
index 13d8b3b..c81a637 100644
--- a/arch/arm/mach-msm/board-8064-storage.c
+++ b/arch/arm/mach-msm/board-8064-storage.c
@@ -331,21 +331,6 @@
 
 void __init apq8064_init_mmc(void)
 {
-	if ((machine_is_apq8064_rumi3()) || machine_is_apq8064_sim()) {
-		if (apq8064_sdc1_pdata) {
-			if (machine_is_apq8064_sim())
-				apq8064_sdc1_pdata->disable_bam = true;
-			apq8064_sdc1_pdata->disable_runtime_pm = true;
-			apq8064_sdc1_pdata->disable_cmd23 = true;
-		}
-		if (apq8064_sdc3_pdata) {
-			if (machine_is_apq8064_sim())
-				apq8064_sdc3_pdata->disable_bam = true;
-			apq8064_sdc3_pdata->disable_runtime_pm = true;
-			apq8064_sdc3_pdata->disable_cmd23 = true;
-		}
-	}
-
 	if (apq8064_sdc1_pdata)
 		apq8064_add_sdcc(1, apq8064_sdc1_pdata);
 
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index ce1c829..15c7b52 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -2272,19 +2272,6 @@
 	&apq8064_cache_dump_device,
 };
 
-static struct platform_device *sim_devices[] __initdata = {
-	&apq8064_device_uart_gsbi3,
-	&msm_device_sps_apq8064,
-};
-
-static struct platform_device *rumi3_devices[] __initdata = {
-	&apq8064_device_uart_gsbi1,
-	&msm_device_sps_apq8064,
-#ifdef CONFIG_MSM_ROTATOR
-	&msm_rotator_device,
-#endif
-};
-
 static struct platform_device *cdp_devices[] __initdata = {
 	&apq8064_device_uart_gsbi1,
 	&apq8064_device_uart_gsbi7,
@@ -2846,10 +2833,6 @@
 		mach_mask = I2C_FFA;
 	else if (machine_is_apq8064_liquid())
 		mach_mask = I2C_LIQUID;
-	else if (machine_is_apq8064_rumi3())
-		mach_mask = I2C_RUMI;
-	else if (machine_is_apq8064_sim())
-		mach_mask = I2C_SIM;
 	else if (PLATFORM_IS_MPQ8064())
 		mach_mask = I2C_MPQ_CDP;
 	else
@@ -2966,25 +2949,6 @@
 	apq8064_allocate_fb_region();
 }
 
-static void __init apq8064_sim_init(void)
-{
-	struct msm_watchdog_pdata *wdog_pdata = (struct msm_watchdog_pdata *)
-		&msm8064_device_watchdog.dev.platform_data;
-
-	wdog_pdata->bark_time = 15000;
-	apq8064_common_init();
-	platform_add_devices(sim_devices, ARRAY_SIZE(sim_devices));
-}
-
-static void __init apq8064_rumi3_init(void)
-{
-	apq8064_common_init();
-	ethernet_init();
-	msm_rotator_set_split_iommu_domain();
-	platform_add_devices(rumi3_devices, ARRAY_SIZE(rumi3_devices));
-	spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
-}
-
 static void __init apq8064_cdp_init(void)
 {
 	if (meminfo_init(SYS_MEMORY, SZ_256M) < 0)
@@ -3024,27 +2988,6 @@
 	}
 }
 
-MACHINE_START(APQ8064_SIM, "QCT APQ8064 SIMULATOR")
-	.map_io = apq8064_map_io,
-	.reserve = apq8064_reserve,
-	.init_irq = apq8064_init_irq,
-	.handle_irq = gic_handle_irq,
-	.timer = &msm_timer,
-	.init_machine = apq8064_sim_init,
-	.restart = msm_restart,
-MACHINE_END
-
-MACHINE_START(APQ8064_RUMI3, "QCT APQ8064 RUMI3")
-	.map_io = apq8064_map_io,
-	.reserve = apq8064_reserve,
-	.init_irq = apq8064_init_irq,
-	.handle_irq = gic_handle_irq,
-	.timer = &msm_timer,
-	.init_machine = apq8064_rumi3_init,
-	.init_early = apq8064_allocate_memory_regions,
-	.restart = msm_restart,
-MACHINE_END
-
 MACHINE_START(APQ8064_CDP, "QCT APQ8064 CDP")
 	.map_io = apq8064_map_io,
 	.reserve = apq8064_reserve,
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index 74aa837..d65ebe1 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -656,6 +656,10 @@
 	OF_DEV_AUXDATA("qcom,mdss_mdp", 0xFD900000, "mdp.0", NULL),
 	OF_DEV_AUXDATA("qcom,msm-tsens", 0xFC4A8000, \
 			"msm-tsens", NULL),
+	OF_DEV_AUXDATA("qcom,qcedev", 0xFD440000, \
+			"qcedev.0", NULL),
+	OF_DEV_AUXDATA("qcom,qcrypto", 0xFD440000, \
+			"qcrypto.0", NULL),
 	{}
 };
 
diff --git a/arch/arm/mach-msm/board-msm7627a-camera.c b/arch/arm/mach-msm/board-msm7627a-camera.c
index 57684f9..1e198a7 100644
--- a/arch/arm/mach-msm/board-msm7627a-camera.c
+++ b/arch/arm/mach-msm/board-msm7627a-camera.c
@@ -281,8 +281,15 @@
 	.gpio_no_mux = 1,
 };
 
+static struct msm_camera_sensor_flash_src msm_flash_src_ov8825 = {
+	.flash_sr_type = MSM_CAMERA_FLASH_SRC_LED1,
+	._fsrc.ext_driver_src.led_en = 13,
+	._fsrc.ext_driver_src.led_flash_en = 32,
+};
+
 static struct msm_camera_sensor_flash_data flash_ov8825 = {
-	.flash_type     = MSM_CAMERA_FLASH_NONE,
+	.flash_type     = MSM_CAMERA_FLASH_LED,
+	.flash_src      = &msm_flash_src_ov8825,
 };
 
 static struct msm_camera_sensor_platform_info sensor_board_info_ov8825 = {
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index 5463924..82b7fd6 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -599,9 +599,9 @@
 
 static DEFINE_VDD_CLASS(vdd_dig, set_vdd_dig);
 
-#define RPM_MISC_CLK_TYPE 0x306b6c63
-#define RPM_BUS_CLK_TYPE  0x316b6c63
-#define RPM_MEM_CLK_TYPE  0x326b6c63
+#define RPM_MISC_CLK_TYPE	0x306b6c63
+#define RPM_BUS_CLK_TYPE	0x316b6c63
+#define RPM_MEM_CLK_TYPE	0x326b6c63
 
 #define CXO_ID		0x0
 #define QDSS_ID		0x1
@@ -614,6 +614,14 @@
 #define BIMC_ID		0x0
 #define OCMEM_ID	0x1
 
+enum {
+	D0_ID = 1,
+	D1_ID,
+	A0_ID,
+	A1_ID,
+	A2_ID,
+};
+
 DEFINE_CLK_RPM_SMD(pnoc_clk, pnoc_a_clk, RPM_BUS_CLK_TYPE, PNOC_ID, NULL);
 DEFINE_CLK_RPM_SMD(snoc_clk, snoc_a_clk, RPM_BUS_CLK_TYPE, SNOC_ID, NULL);
 DEFINE_CLK_RPM_SMD(cnoc_clk, cnoc_a_clk, RPM_BUS_CLK_TYPE, CNOC_ID, NULL);
@@ -628,6 +636,18 @@
 				RPM_MISC_CLK_TYPE, CXO_ID, 19200000);
 DEFINE_CLK_RPM_SMD_QDSS(qdss_clk, qdss_a_clk, RPM_MISC_CLK_TYPE, QDSS_ID);
 
+DEFINE_CLK_RPM_SMD_XO_BUFFER(cxo_d0, cxo_d0_a, D0_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(cxo_d1, cxo_d1_a, D1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(cxo_a0, cxo_a0_a, A0_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(cxo_a1, cxo_a1_a, A1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(cxo_a2, cxo_a2_a, A2_ID);
+
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(cxo_d0_pin, cxo_d0_a_pin, D0_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(cxo_d1_pin, cxo_d1_a_pin, D1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(cxo_a0_pin, cxo_a0_a_pin, A0_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(cxo_a1_pin, cxo_a1_a_pin, A1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(cxo_a2_pin, cxo_a2_a_pin, A2_ID);
+
 static struct pll_vote_clk gpll0_clk_src = {
 	.en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE_REG,
 	.status_reg = (void __iomem *)GPLL0_STATUS_REG,
@@ -4719,6 +4739,16 @@
 	CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, ""),
 	CLK_LOOKUP("bus_clk", gcc_ce2_axi_clk.c, ""),
 
+	CLK_LOOKUP("core_clk",     gcc_ce2_clk.c,         "qcedev.0"),
+	CLK_LOOKUP("iface_clk",    gcc_ce2_ahb_clk.c,     "qcedev.0"),
+	CLK_LOOKUP("bus_clk",      gcc_ce2_axi_clk.c,     "qcedev.0"),
+	CLK_LOOKUP("core_clk_src", ce2_clk_src.c,         "qcedev.0"),
+
+	CLK_LOOKUP("core_clk",     gcc_ce2_clk.c,     "qcrypto.0"),
+	CLK_LOOKUP("iface_clk",    gcc_ce2_ahb_clk.c, "qcrypto.0"),
+	CLK_LOOKUP("bus_clk",      gcc_ce2_axi_clk.c, "qcrypto.0"),
+	CLK_LOOKUP("core_clk_src", ce2_clk_src.c,     "qcrypto.0"),
+
 	CLK_LOOKUP("core_clk", gcc_gp1_clk.c, ""),
 	CLK_LOOKUP("core_clk", gcc_gp2_clk.c, ""),
 	CLK_LOOKUP("core_clk", gcc_gp3_clk.c, ""),
diff --git a/arch/arm/mach-msm/clock-rpm.h b/arch/arm/mach-msm/clock-rpm.h
index 22691c5..e203028 100644
--- a/arch/arm/mach-msm/clock-rpm.h
+++ b/arch/arm/mach-msm/clock-rpm.h
@@ -21,6 +21,10 @@
 #define RPM_SMD_KEY_ENABLE	0x62616E45
 #define RPM_SMD_KEY_STATE	0x54415453
 
+#define RPM_CLK_BUFFER_A_REQ			0x616B6C63
+#define RPM_KEY_SOFTWARE_ENABLE			0x6E657773
+#define RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY	0x62636370
+
 struct clk_ops;
 struct clk_rpmrs_data;
 extern struct clk_ops clk_ops_rpm;
@@ -187,5 +191,17 @@
 #define DEFINE_CLK_RPM_SMD_QDSS(name, active, type, r_id) \
 	__DEFINE_CLK_RPM_QDSS(name, active, type, r_id, \
 		0, RPM_SMD_KEY_STATE, &clk_rpmrs_data_smd)
+/*
+ * The RPM XO buffer clock management code aggregates votes for pin-control mode
+ * and software mode separately. Software-enable has higher priority over pin-
+ * control, and if the software-mode aggregation results in a 'disable', the
+ * buffer will be left in pin-control mode if a pin-control vote is in place.
+ */
+#define DEFINE_CLK_RPM_SMD_XO_BUFFER(name, active, r_id) \
+	__DEFINE_CLK_RPM_BRANCH(name, active, RPM_CLK_BUFFER_A_REQ, r_id, 0, \
+			1000, RPM_KEY_SOFTWARE_ENABLE, &clk_rpmrs_data_smd)
 
+#define DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(name, active, r_id) \
+	__DEFINE_CLK_RPM_BRANCH(name, active, RPM_CLK_BUFFER_A_REQ, r_id, 0, \
+	1000, RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY, &clk_rpmrs_data_smd)
 #endif
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index b2e4208..a6f18ba 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -15,6 +15,7 @@
 #include <linux/list.h>
 #include <linux/platform_device.h>
 #include <linux/msm_rotator.h>
+#include <linux/gpio.h>
 #include <linux/clkdev.h>
 #include <linux/dma-mapping.h>
 #include <linux/coresight.h>
@@ -27,6 +28,7 @@
 #include <mach/msm_dsps.h>
 #include <sound/msm-dai-q6.h>
 #include <sound/apr_audio.h>
+#include <mach/msm_tsif.h>
 #include <mach/msm_bus_board.h>
 #include <mach/rpm.h>
 #include <mach/mdm2.h>
@@ -464,6 +466,112 @@
 	.id     = 0x4009,
 };
 
+#define MSM_TSIF0_PHYS       (0x18200000)
+#define MSM_TSIF1_PHYS       (0x18201000)
+#define MSM_TSIF_SIZE        (0x200)
+
+#define TSIF_0_CLK       GPIO_CFG(55, 1, GPIO_CFG_INPUT, \
+	GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_0_EN        GPIO_CFG(56, 1, GPIO_CFG_INPUT, \
+	GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_0_DATA      GPIO_CFG(57, 1, GPIO_CFG_INPUT, \
+	GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_0_SYNC      GPIO_CFG(62, 1, GPIO_CFG_INPUT, \
+	GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_1_CLK       GPIO_CFG(59, 1, GPIO_CFG_INPUT, \
+	GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_1_EN        GPIO_CFG(60, 1, GPIO_CFG_INPUT, \
+	GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_1_DATA      GPIO_CFG(61, 1, GPIO_CFG_INPUT, \
+	GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_1_SYNC      GPIO_CFG(58, 1, GPIO_CFG_INPUT, \
+	GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+
+static const struct msm_gpio tsif0_gpios[] = {
+	{ .gpio_cfg = TSIF_0_CLK,  .label =  "tsif_clk", },
+	{ .gpio_cfg = TSIF_0_EN,   .label =  "tsif_en", },
+	{ .gpio_cfg = TSIF_0_DATA, .label =  "tsif_data", },
+	{ .gpio_cfg = TSIF_0_SYNC, .label =  "tsif_sync", },
+};
+
+static const struct msm_gpio tsif1_gpios[] = {
+	{ .gpio_cfg = TSIF_1_CLK,  .label =  "tsif_clk", },
+	{ .gpio_cfg = TSIF_1_EN,   .label =  "tsif_en", },
+	{ .gpio_cfg = TSIF_1_DATA, .label =  "tsif_data", },
+	{ .gpio_cfg = TSIF_1_SYNC, .label =  "tsif_sync", },
+};
+
+struct msm_tsif_platform_data tsif1_8064_platform_data = {
+	.num_gpios = ARRAY_SIZE(tsif1_gpios),
+	.gpios = tsif1_gpios,
+	.tsif_pclk = "iface_clk",
+	.tsif_ref_clk = "ref_clk",
+};
+
+struct resource tsif1_8064_resources[] = {
+	[0] = {
+		.flags = IORESOURCE_IRQ,
+		.start = TSIF2_IRQ,
+		.end   = TSIF2_IRQ,
+	},
+	[1] = {
+		.flags = IORESOURCE_MEM,
+		.start = MSM_TSIF1_PHYS,
+		.end   = MSM_TSIF1_PHYS + MSM_TSIF_SIZE - 1,
+	},
+	[2] = {
+		.flags = IORESOURCE_DMA,
+		.start = DMOV8064_TSIF_CHAN,
+		.end   = DMOV8064_TSIF_CRCI,
+	},
+};
+
+struct msm_tsif_platform_data tsif0_8064_platform_data = {
+	.num_gpios = ARRAY_SIZE(tsif0_gpios),
+	.gpios = tsif0_gpios,
+	.tsif_pclk = "iface_clk",
+	.tsif_ref_clk = "ref_clk",
+};
+
+struct resource tsif0_8064_resources[] = {
+	[0] = {
+		.flags = IORESOURCE_IRQ,
+		.start = TSIF1_IRQ,
+		.end   = TSIF1_IRQ,
+	},
+	[1] = {
+		.flags = IORESOURCE_MEM,
+		.start = MSM_TSIF0_PHYS,
+		.end   = MSM_TSIF0_PHYS + MSM_TSIF_SIZE - 1,
+	},
+	[2] = {
+		.flags = IORESOURCE_DMA,
+		.start = DMOV_TSIF_CHAN,
+		.end   = DMOV_TSIF_CRCI,
+	},
+};
+
+struct platform_device msm_8064_device_tsif[2] = {
+	{
+		.name          = "msm_tsif",
+		.id            = 0,
+		.num_resources = ARRAY_SIZE(tsif0_8064_resources),
+		.resource      = tsif0_8064_resources,
+		.dev = {
+			.platform_data = &tsif0_8064_platform_data
+		},
+	},
+	{
+		.name          = "msm_tsif",
+		.id            = 1,
+		.num_resources = ARRAY_SIZE(tsif1_8064_resources),
+		.resource      = tsif1_8064_resources,
+		.dev = {
+			.platform_data = &tsif1_8064_platform_data
+		},
+	}
+};
+
 /*
  * Machine specific data for AUX PCM Interface
  * which the driver will  be unware of.
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index f21d296..1770b97 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -184,10 +184,11 @@
 	REG_LDO,
 	REG_VS,
 	REG_GPIO,
+	REG_MAX
 };
 
 struct camera_vreg_t {
-	char *reg_name;
+	const char *reg_name;
 	enum camera_vreg_type type;
 	int min_voltage;
 	int max_voltage;
diff --git a/arch/arm/mach-msm/include/mach/camera.h b/arch/arm/mach-msm/include/mach/camera.h
index 43d707e..a51a6b5 100644
--- a/arch/arm/mach-msm/include/mach/camera.h
+++ b/arch/arm/mach-msm/include/mach/camera.h
@@ -85,13 +85,12 @@
 	VFE_MSG_SYNC_TIMER1,
 	VFE_MSG_SYNC_TIMER2,
 	VFE_MSG_COMMON,
-	VFE_MSG_V32_START,
-	VFE_MSG_V32_START_RECORDING, /* 20 */
-	VFE_MSG_V32_CAPTURE,
-	VFE_MSG_V32_JPEG_CAPTURE,
+	VFE_MSG_START,
+	VFE_MSG_START_RECORDING, /* 20 */
+	VFE_MSG_CAPTURE,
+	VFE_MSG_JPEG_CAPTURE,
 	VFE_MSG_OUTPUT_IRQ,
-	VFE_MSG_V2X_PREVIEW,
-	VFE_MSG_V2X_CAPTURE,
+	VFE_MSG_PREVIEW,
 	VFE_MSG_OUTPUT_PRIMARY,
 	VFE_MSG_OUTPUT_SECONDARY,
 	VFE_MSG_OUTPUT_TERTIARY1,
@@ -291,6 +290,109 @@
 	int (*strobe_flash_charge)(int32_t, int32_t, uint32_t);
 };
 
+enum cci_i2c_master_t {
+	MASTER_0,
+	MASTER_1,
+};
+
+enum cci_i2c_queue_t {
+	QUEUE_0,
+	QUEUE_1,
+};
+
+struct msm_camera_cci_client {
+	struct v4l2_subdev *cci_subdev;
+	uint32_t freq;
+	enum cci_i2c_master_t cci_i2c_master;
+	uint16_t sid;
+	uint16_t cid;
+	uint32_t timeout;
+	uint16_t retries;
+	uint16_t id_map;
+};
+
+enum msm_cci_cmd_type {
+	MSM_CCI_INIT,
+	MSM_CCI_RELEASE,
+	MSM_CCI_SET_SID,
+	MSM_CCI_SET_FREQ,
+	MSM_CCI_SET_SYNC_CID,
+	MSM_CCI_I2C_READ,
+	MSM_CCI_I2C_WRITE,
+	MSM_CCI_GPIO_WRITE,
+};
+
+struct msm_camera_cci_wait_sync_cfg {
+	uint16_t line;
+	uint16_t delay;
+};
+
+struct msm_camera_cci_gpio_cfg {
+	uint16_t gpio_queue;
+	uint16_t i2c_queue;
+};
+
+enum msm_camera_i2c_reg_addr_type {
+	MSM_CAMERA_I2C_BYTE_ADDR = 1,
+	MSM_CAMERA_I2C_WORD_ADDR,
+};
+
+enum msm_camera_i2c_data_type {
+	MSM_CAMERA_I2C_BYTE_DATA = 1,
+	MSM_CAMERA_I2C_WORD_DATA,
+	MSM_CAMERA_I2C_SET_BYTE_MASK,
+	MSM_CAMERA_I2C_UNSET_BYTE_MASK,
+	MSM_CAMERA_I2C_SET_WORD_MASK,
+	MSM_CAMERA_I2C_UNSET_WORD_MASK,
+	MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA,
+};
+
+enum msm_camera_i2c_cmd_type {
+	MSM_CAMERA_I2C_CMD_WRITE,
+	MSM_CAMERA_I2C_CMD_POLL,
+};
+
+struct msm_camera_i2c_reg_conf {
+	uint16_t reg_addr;
+	uint16_t reg_data;
+	enum msm_camera_i2c_data_type dt;
+	enum msm_camera_i2c_cmd_type cmd_type;
+	int16_t mask;
+};
+
+struct msm_camera_cci_i2c_write_cfg {
+	struct msm_camera_i2c_reg_conf *reg_conf_tbl;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	enum msm_camera_i2c_data_type data_type;
+	uint16_t size;
+};
+
+struct msm_camera_cci_i2c_read_cfg {
+	uint16_t addr;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	uint8_t *data;
+	uint16_t num_byte;
+};
+
+struct msm_camera_cci_i2c_queue_info {
+	uint32_t max_queue_size;
+	uint32_t report_id;
+	uint32_t irq_en;
+	uint32_t capture_rep_data;
+};
+
+struct msm_camera_cci_ctrl {
+	int32_t status;
+	struct msm_camera_cci_client *cci_info;
+	enum msm_cci_cmd_type cmd;
+	union {
+		struct msm_camera_cci_i2c_write_cfg cci_i2c_write_cfg;
+		struct msm_camera_cci_i2c_read_cfg cci_i2c_read_cfg;
+		struct msm_camera_cci_wait_sync_cfg cci_wait_sync_cfg;
+		struct msm_camera_cci_gpio_cfg gpio_cfg;
+	} cfg;
+};
+
 /* this structure is used in kernel */
 struct msm_queue_cmd {
 	struct list_head list_config;
diff --git a/arch/arm/mach-msm/include/mach/mpm.h b/arch/arm/mach-msm/include/mach/mpm.h
index 76f1ea6..fabaa09 100644
--- a/arch/arm/mach-msm/include/mach/mpm.h
+++ b/arch/arm/mach-msm/include/mach/mpm.h
@@ -109,13 +109,14 @@
 /**
  * msm_mpm_enter_sleep() -Called from PM code before entering low power mode
  *
+ * @sclk_count: wakeup time in sclk counts for programmed RPM wakeup
  * @from_idle: indicates if the sytem is entering low power mode as a part of
  *		suspend/idle task.
  *
  * Low power management code calls into this API to configure the MPM to
  * monitor the active irqs before going to sleep.
  */
-void msm_mpm_enter_sleep(bool from_idle);
+void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle);
 /**
  * msm_mpm_exit_sleep() -Called from PM code after resuming from low power mode
  *
@@ -158,11 +159,8 @@
 { return false; }
 static inline bool msm_mpm_gpio_irqs_detectable(bool from_idle)
 { return false; }
-static inline void msm_mpm_enter_sleep(bool from_idle) {}
+static inline void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle) {}
 static inline void msm_mpm_exit_sleep(bool from_idle) {}
 static inline void __init of_mpm_init(struct device_node *node) {}
 #endif
-
-
-
 #endif /* __ARCH_ARM_MACH_MSM_MPM_H */
diff --git a/arch/arm/mach-msm/include/mach/msm_smd.h b/arch/arm/mach-msm/include/mach/msm_smd.h
index dc633fb..97c03e7 100644
--- a/arch/arm/mach-msm/include/mach/msm_smd.h
+++ b/arch/arm/mach-msm/include/mach/msm_smd.h
@@ -223,6 +223,19 @@
  */
 void smd_disable_read_intr(smd_channel_t *ch);
 
+/**
+ * Enable/disable receive interrupts for the remote processor used by a
+ * particular channel.
+ * @ch:      open channel handle to use for the edge
+ * @mask:    1 = mask interrupts; 0 = unmask interrupts
+ * @returns: 0 for success; < 0 for failure
+ *
+ * Note that this enables/disables all interrupts from the remote subsystem for
+ * all channels.  As such, it should be used with care and only for specific
+ * use cases such as power-collapse sequencing.
+ */
+int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask);
+
 /* Starts a packet transaction.  The size of the packet may exceed the total
  * size of the smd ring buffer.
  *
@@ -389,6 +402,11 @@
 {
 }
 
+static inline int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
+{
+	return -ENODEV;
+}
+
 static inline int smd_write_start(smd_channel_t *ch, int len)
 {
 	return -ENODEV;
diff --git a/arch/arm/mach-msm/lpm_levels.c b/arch/arm/mach-msm/lpm_levels.c
index ea368ae..f7456ef 100644
--- a/arch/arm/mach-msm/lpm_levels.c
+++ b/arch/arm/mach-msm/lpm_levels.c
@@ -20,6 +20,7 @@
 #include <mach/mpm.h>
 #include "lpm_resources.h"
 #include "pm.h"
+#include "rpm-notifier.h"
 
 static struct msm_rpmrs_level *msm_lpm_levels;
 static int msm_lpm_level_count;
@@ -40,15 +41,23 @@
 		bool from_idle, bool notify_rpm)
 {
 	int ret = 0;
+	struct msm_rpmrs_limits *l = (struct msm_rpmrs_limits *)limits;
 
-	ret = msm_lpmrs_enter_sleep((struct msm_rpmrs_limits *)limits,
-					from_idle, notify_rpm);
+	ret = msm_rpm_enter_sleep();
+	if (ret) {
+		pr_warn("%s(): RPM failed to enter sleep err:%d\n",
+				__func__, ret);
+		goto bail;
+	}
+	ret = msm_lpmrs_enter_sleep(sclk_count, l, from_idle, notify_rpm);
+bail:
 	return ret;
 }
 
 static void msm_lpm_exit_sleep(void *limits, bool from_idle,
 		bool notify_rpm, bool collapsed)
 {
+	msm_rpm_exit_sleep();
 	msm_lpmrs_exit_sleep((struct msm_rpmrs_limits *)limits,
 				from_idle, notify_rpm, collapsed);
 }
diff --git a/arch/arm/mach-msm/lpm_resources.c b/arch/arm/mach-msm/lpm_resources.c
index ebcbd26..0758651 100644
--- a/arch/arm/mach-msm/lpm_resources.c
+++ b/arch/arm/mach-msm/lpm_resources.c
@@ -619,12 +619,10 @@
 	msm_lpm_notify_common(rpm_notifier_cb, rs);
 }
 
-/* MPM
-static bool msm_lpm_use_mpm(struct msm_rpmrs_limits *limits)
+static inline bool msm_lpm_use_mpm(struct msm_rpmrs_limits *limits)
 {
-	return ((limits->pxo == MSM_LPM_PXO_OFF) ||
-		(limits->vdd_dig_lower_bound <= VDD_DIG_RET_HIGH));
-}*/
+	return (limits->pxo == MSM_LPM_PXO_OFF);
+}
 
 /* LPM levels interface */
 bool msm_lpm_level_beyond_limit(struct msm_rpmrs_limits *limits)
@@ -647,7 +645,7 @@
 	return beyond_limit;
 }
 
-int msm_lpmrs_enter_sleep(struct msm_rpmrs_limits *limits,
+int msm_lpmrs_enter_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
 				bool from_idle, bool notify_rpm)
 {
 	int ret = 0;
@@ -668,9 +666,8 @@
 	}
 	msm_lpm_get_rpm_notif = true;
 
-	/* MPM Enter sleep
 	if (msm_lpm_use_mpm(limits))
-		msm_mpm_enter_sleep(from_idle);*/
+		msm_mpm_enter_sleep(sclk_count, from_idle);
 
 	return ret;
 }
diff --git a/arch/arm/mach-msm/lpm_resources.h b/arch/arm/mach-msm/lpm_resources.h
index bc06d7b..120832f 100644
--- a/arch/arm/mach-msm/lpm_resources.h
+++ b/arch/arm/mach-msm/lpm_resources.h
@@ -71,6 +71,7 @@
  * on the low power mode being entered. L2 low power mode is also set in
  * this function.
 
+ * @sclk_count: wakeup counter for RPM.
  * @limits: pointer to the resource limits of the low power mode being entered.
  * @from_idle: bool to determine if this call being made as a part of
  *             idle power collapse.
@@ -78,7 +79,7 @@
  *
  * returns 0 on success.
  */
-int msm_lpmrs_enter_sleep(struct msm_rpmrs_limits *limits,
+int msm_lpmrs_enter_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
 	bool from_idle, bool notify_rpm);
 
 /**
@@ -106,8 +107,8 @@
 	return true;
 }
 
-static inline int msm_lpmrs_enter_sleep(struct msm_rpmrs_limits *limits,
-	bool from_idle, bool notify_rpm)
+static inline int msm_lpmrs_enter_sleep(uint32_t sclk_count,
+	struct msm_rpmrs_limits *limits, bool from_idle, bool notify_rpm)
 {
 	return 0;
 }
diff --git a/arch/arm/mach-msm/mpm-of.c b/arch/arm/mach-msm/mpm-of.c
index f98c0f2..e4c0e4e 100644
--- a/arch/arm/mach-msm/mpm-of.c
+++ b/arch/arm/mach-msm/mpm-of.c
@@ -28,6 +28,7 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <asm/hardware/gic.h>
+#include <asm/arch_timer.h>
 #include <mach/gpio.h>
 #include <mach/mpm.h>
 
@@ -70,7 +71,8 @@
 #define MSM_MPM_DETECT_CTL_SHIFT(irq) ((irq % 16) * 2)
 
 #define hashfn(val) (val % MSM_MPM_NR_MPM_IRQS)
-
+#define SCLK_HZ (32768)
+#define ARCH_TIMER_HZ (19200000)
 static struct msm_mpm_device_data msm_mpm_dev_data;
 
 enum mpm_reg_offsets {
@@ -152,14 +154,20 @@
 	return IRQ_HANDLED;
 }
 
-static void msm_mpm_set(bool wakeset)
+static void msm_mpm_set(cycle_t wakeup, bool wakeset)
 {
 	uint32_t *irqs;
 	unsigned int reg;
 	int i;
+	uint32_t *expiry_timer;
+
+	expiry_timer = (uint32_t *)&wakeup;
 
 	irqs = wakeset ? msm_mpm_wake_irq : msm_mpm_enabled_irq;
 	for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
+		reg = MSM_MPM_REG_WAKEUP;
+		msm_mpm_write(reg, i, expiry_timer[i]);
+
 		reg = MSM_MPM_REG_ENABLE;
 		msm_mpm_write(reg, i, irqs[i]);
 
@@ -448,14 +456,23 @@
 	return true;
 }
 
-void msm_mpm_enter_sleep(bool from_idle)
+void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle)
 {
+	cycle_t wakeup = (u64)sclk_count * ARCH_TIMER_HZ;
+
 	if (!msm_mpm_is_initialized()) {
 		pr_err("%s(): MPM not initialized\n", __func__);
 		return;
 	}
 
-	msm_mpm_set(!from_idle);
+	if (sclk_count) {
+		do_div(wakeup, SCLK_HZ);
+		wakeup += arch_counter_get_cntpct();
+	} else {
+		wakeup = (~0ULL);
+	}
+
+	msm_mpm_set(wakeup, !from_idle);
 }
 
 void msm_mpm_exit_sleep(bool from_idle)
diff --git a/arch/arm/mach-msm/mpm.c b/arch/arm/mach-msm/mpm.c
index b395b61..1c39415 100644
--- a/arch/arm/mach-msm/mpm.c
+++ b/arch/arm/mach-msm/mpm.c
@@ -421,7 +421,7 @@
 			MSM_MPM_NR_APPS_IRQS);
 }
 
-void msm_mpm_enter_sleep(bool from_idle)
+void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle)
 {
 	msm_mpm_set(!from_idle);
 }
diff --git a/arch/arm/mach-msm/pil-q6v4.c b/arch/arm/mach-msm/pil-q6v4.c
index 131a74b..32cce1d 100644
--- a/arch/arm/mach-msm/pil-q6v4.c
+++ b/arch/arm/mach-msm/pil-q6v4.c
@@ -116,7 +116,7 @@
 	int err;
 	struct q6v4_data *drv = dev_get_drvdata(dev);
 
-	err = regulator_set_voltage(drv->vreg, 375000, 375000);
+	err = regulator_set_voltage(drv->vreg, 743750, 743750);
 	if (err) {
 		dev_err(dev, "Failed to set regulator's voltage step.\n");
 		return err;
diff --git a/arch/arm/mach-msm/pil-riva.c b/arch/arm/mach-msm/pil-riva.c
index 8a16b43..2d1fa80 100644
--- a/arch/arm/mach-msm/pil-riva.c
+++ b/arch/arm/mach-msm/pil-riva.c
@@ -263,17 +263,17 @@
 static int pil_riva_init_image_trusted(struct pil_desc *pil,
 		const u8 *metadata, size_t size)
 {
-	return pas_init_image(PAS_RIVA, metadata, size);
+	return pas_init_image(PAS_WCNSS, metadata, size);
 }
 
 static int pil_riva_reset_trusted(struct pil_desc *pil)
 {
-	return pas_auth_and_reset(PAS_RIVA);
+	return pas_auth_and_reset(PAS_WCNSS);
 }
 
 static int pil_riva_shutdown_trusted(struct pil_desc *pil)
 {
-	return pas_shutdown(PAS_RIVA);
+	return pas_shutdown(PAS_WCNSS);
 }
 
 static struct pil_reset_ops pil_riva_ops_trusted = {
@@ -334,7 +334,7 @@
 	desc->owner = THIS_MODULE;
 	desc->proxy_timeout = 10000;
 
-	if (pas_supported(PAS_RIVA) > 0) {
+	if (pas_supported(PAS_WCNSS) > 0) {
 		desc->ops = &pil_riva_ops_trusted;
 		dev_info(&pdev->dev, "using secure boot\n");
 	} else {
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index 08cbc34..a11ca95 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -83,9 +83,6 @@
 	if (!base_ptr)
 		return -ENODEV;
 
-	if (machine_is_apq8064_sim())
-		writel_relaxed(0xf0000, base_ptr+0x04);
-
 	if (machine_is_msm8974_sim()) {
 		writel_relaxed(0x800, base_ptr+0x04);
 		writel_relaxed(0x3FFF, base_ptr+0x14);
@@ -130,9 +127,6 @@
 	if (cpu_is_msm8x60())
 		return scorpion_release_secondary();
 
-	if (machine_is_apq8064_sim())
-		return krait_release_secondary_sim(0x02088000, cpu);
-
 	if (machine_is_msm8974_sim())
 		return krait_release_secondary_sim(0xf9088000, cpu);
 
diff --git a/arch/arm/mach-msm/qdsp5/adsp.c b/arch/arm/mach-msm/qdsp5/adsp.c
index eee7e37..11f6b28 100644
--- a/arch/arm/mach-msm/qdsp5/adsp.c
+++ b/arch/arm/mach-msm/qdsp5/adsp.c
@@ -1072,12 +1072,28 @@
 int msm_adsp_enable(struct msm_adsp_module *module)
 {
 	int rc = 0;
+	struct msm_adsp_module *module_en = NULL;
 
 	if (!module)
 		return -EINVAL;
 
 	MM_INFO("enable '%s'state[%d] id[%d]\n",
 				module->name, module->state, module->id);
+	if (!strncmp(module->name, "JPEGTASK", sizeof(module->name)))
+		module_en = find_adsp_module_by_name(&adsp_info, "VIDEOTASK");
+	else if (!strncmp(module->name, "VIDEOTASK", sizeof(module->name)))
+		module_en = find_adsp_module_by_name(&adsp_info, "JPEGTASK");
+	if (module_en) {
+		mutex_lock(&module_en->lock);
+		if (module_en->state == ADSP_STATE_ENABLED ||
+			module_en->state == ADSP_STATE_ENABLING) {
+			MM_ERR("both jpeg and video module can't"\
+				" exist at a time\n");
+			mutex_unlock(&module_en->lock);
+			return -EINVAL;
+		}
+		mutex_unlock(&module_en->lock);
+	}
 
 	mutex_lock(&module->lock);
 	switch (module->state) {
diff --git a/arch/arm/mach-msm/qdsp5/audio_out.c b/arch/arm/mach-msm/qdsp5/audio_out.c
index 8eaf829..0c8034c 100644
--- a/arch/arm/mach-msm/qdsp5/audio_out.c
+++ b/arch/arm/mach-msm/qdsp5/audio_out.c
@@ -111,7 +111,7 @@
 
 
 
-#define BUFSZ (960 * 5)
+#define BUFSZ (5248)
 #define DMASZ (BUFSZ * 2)
 
 #define COMMON_OBJ_ID 6
@@ -817,7 +817,7 @@
 		goto done;
 
 	audio->out_buffer_size = BUFSZ;
-	audio->out_sample_rate = 44100;
+	audio->out_sample_rate = 48000;
 	audio->out_channel_mode = AUDPP_CMD_PCM_INTF_STEREO_V;
 	audio->out_weight = 100;
 
diff --git a/arch/arm/mach-msm/rpm-notifier.h b/arch/arm/mach-msm/rpm-notifier.h
index df8d9b3..33086c6 100644
--- a/arch/arm/mach-msm/rpm-notifier.h
+++ b/arch/arm/mach-msm/rpm-notifier.h
@@ -20,8 +20,31 @@
 	uint32_t size;
 	uint8_t *value;
 };
-
+/**
+ * msm_rpm_register_notifier - Register for sleep set notifications
+ *
+ * @nb - notifier block to register
+ *
+ * return 0 on success, errno on failure.
+ */
 int msm_rpm_register_notifier(struct notifier_block *nb);
+
+/**
+ * msm_rpm_unregister_notifier - Unregister previously registered notifications
+ *
+ * @nb - notifier block to unregister
+ *
+ * return 0 on success, errno on failure.
+ */
 int msm_rpm_unregister_notifier(struct notifier_block *nb);
 
+/**
+ * msm_rpm_enter_sleep - Notify RPM driver to prepare for entering sleep
+ */
+int msm_rpm_enter_sleep(void);
+
+/**
+ * msm_rpm_exit_sleep - Notify RPM driver about resuming from power collapse
+ */
+void msm_rpm_exit_sleep(void);
 #endif /*__ARCH_ARM_MACH_MSM_RPM_NOTIF_H */
diff --git a/arch/arm/mach-msm/rpm-smd.c b/arch/arm/mach-msm/rpm-smd.c
index 697d504..0faafc8 100644
--- a/arch/arm/mach-msm/rpm-smd.c
+++ b/arch/arm/mach-msm/rpm-smd.c
@@ -886,6 +886,27 @@
 	return rc;
 }
 EXPORT_SYMBOL(msm_rpm_send_message_noirq);
+
+/**
+ * During power collapse, the rpm driver disables the SMD interrupts to make
+ * sure that the interrupt doesn't wakes us from sleep.
+ */
+int msm_rpm_enter_sleep(void)
+{
+	return smd_mask_receive_interrupt(msm_rpm_data.ch_info, true);
+}
+EXPORT_SYMBOL(msm_rpm_enter_sleep);
+
+/**
+ * When the system resumes from power collapse, the SMD interrupt disabled by
+ * enter function has to reenabled to continue processing SMD message.
+ */
+void msm_rpm_exit_sleep(void)
+{
+	smd_mask_receive_interrupt(msm_rpm_data.ch_info, false);
+}
+EXPORT_SYMBOL(msm_rpm_exit_sleep);
+
 static bool msm_rpm_set_standalone(void)
 {
 	if (machine_is_msm8974()) {
diff --git a/arch/arm/mach-msm/rpm_resources.c b/arch/arm/mach-msm/rpm_resources.c
index a88e42e..9d794e7 100644
--- a/arch/arm/mach-msm/rpm_resources.c
+++ b/arch/arm/mach-msm/rpm_resources.c
@@ -967,7 +967,7 @@
 			return rc;
 
 		if (msm_rpmrs_use_mpm(limits))
-			msm_mpm_enter_sleep(from_idle);
+			msm_mpm_enter_sleep(sclk_count, from_idle);
 	}
 
 	rc = msm_rpmrs_flush_L2(limits, notify_rpm);
diff --git a/arch/arm/mach-msm/scm-pas.h b/arch/arm/mach-msm/scm-pas.h
index 2fe71a9..dd24e20 100644
--- a/arch/arm/mach-msm/scm-pas.h
+++ b/arch/arm/mach-msm/scm-pas.h
@@ -19,7 +19,7 @@
 	PAS_TZAPPS,
 	PAS_MODEM_SW,
 	PAS_MODEM_FW,
-	PAS_RIVA,
+	PAS_WCNSS,
 	PAS_SECAPP,
 	PAS_GSS,
 	PAS_VIDC,
diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c
index 948dbbb..decee95 100644
--- a/arch/arm/mach-msm/smd.c
+++ b/arch/arm/mach-msm/smd.c
@@ -124,6 +124,7 @@
 	uint32_t out_bit_pos;
 	void __iomem *out_base;
 	uint32_t out_offset;
+	int irq_id;
 };
 
 struct interrupt_config {
@@ -2128,6 +2129,56 @@
 }
 EXPORT_SYMBOL(smd_disable_read_intr);
 
+/**
+ * Enable/disable receive interrupts for the remote processor used by a
+ * particular channel.
+ * @ch:      open channel handle to use for the edge
+ * @mask:    1 = mask interrupts; 0 = unmask interrupts
+ * @returns: 0 for success; < 0 for failure
+ *
+ * Note that this enables/disables all interrupts from the remote subsystem for
+ * all channels.  As such, it should be used with care and only for specific
+ * use cases such as power-collapse sequencing.
+ */
+int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
+{
+	struct irq_chip *irq_chip;
+	struct irq_data *irq_data;
+	struct interrupt_config_item *int_cfg;
+
+	if (!ch)
+		return -EINVAL;
+
+	if (ch->type >= ARRAY_SIZE(edge_to_pids))
+		return -ENODEV;
+
+	int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd;
+
+	if (int_cfg->irq_id < 0)
+		return -ENODEV;
+
+	irq_chip = irq_get_chip(int_cfg->irq_id);
+	if (!irq_chip)
+		return -ENODEV;
+
+	irq_data = irq_get_irq_data(int_cfg->irq_id);
+	if (!irq_data)
+		return -ENODEV;
+
+	if (mask) {
+		SMx_POWER_INFO("SMD Masking interrupts from %s\n",
+				edge_to_pids[ch->type].subsys_name);
+		irq_chip->irq_mask(irq_data);
+	} else {
+		SMx_POWER_INFO("SMD Unmasking interrupts from %s\n",
+				edge_to_pids[ch->type].subsys_name);
+		irq_chip->irq_unmask(irq_data);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(smd_mask_receive_interrupt);
+
 int smd_wait_until_readable(smd_channel_t *ch, int bytes)
 {
 	return -1;
@@ -3246,8 +3297,10 @@
 			);
 	if (ret < 0) {
 		platform_irq->irq_id = ret;
+		private_irq->irq_id = ret;
 	} else {
 		platform_irq->irq_id = irq_id;
+		private_irq->irq_id = irq_id;
 		ret_wake = enable_irq_wake(irq_id);
 		if (ret_wake < 0) {
 			pr_err("smd: enable_irq_wake failed on %s",
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index bea567b..39fbba8 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -630,8 +630,6 @@
 {
 	if (machine_is_msm8960_cdp())
 		dummy_socinfo.id = 87;
-	else if (machine_is_apq8064_rumi3() || machine_is_apq8064_sim())
-		dummy_socinfo.id = 109;
 	else if (machine_is_msm9615_mtp() || machine_is_msm9615_cdp())
 		dummy_socinfo.id = 104;
 	else if (early_machine_is_msm8974()) {
@@ -756,8 +754,7 @@
 	if (!(read_cpuid_mpidr() & BIT(31)))
 		return 1;
 
-	if (read_cpuid_mpidr() & BIT(30) &&
-		!machine_is_apq8064_sim())
+	if (read_cpuid_mpidr() & BIT(30))
 		return 1;
 
 	/* 1 + the PART[1:0] field of MIDR */
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index fb1ffd0..4657c37 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -288,6 +288,9 @@
 config CRYPTO_DEV_QCE40
 	bool
 
+config CRYPTO_DEV_QCE50
+	bool
+
 config CRYPTO_DEV_QCRYPTO
 	tristate "Qualcomm Crypto accelerator"
 	select CRYPTO_DES
@@ -303,20 +306,22 @@
 config CRYPTO_DEV_QCE
 	tristate "Qualcomm Crypto Engine (QCE) module"
 	select  CRYPTO_DEV_QCE40 if ARCH_MSM8960 || ARCH_MSM9615
+	select  CRYPTO_DEV_QCE50 if ARCH_MSM8974
 	default n
 	help
           This driver supports Qualcomm Crypto Engine in MSM7x30, MSM8660
 	  MSM8x55, MSM8960 and MSM9615
 	  To compile this driver as a module, choose M here: the
 	  For MSM7x30 MSM8660 and MSM8x55 the module is called qce
-	  For MSM8960 and MSM9615 the module is called qce40
+	  For MSM8960, APQ8064 and MSM9615 the module is called qce40
+	  For MSM8974 the module is called qce50
 
 config CRYPTO_DEV_QCEDEV
 	tristate "QCEDEV Interface to CE module"
 	default n
 	help
           This driver supports Qualcomm QCEDEV Crypto in MSM7x30, MSM8660,
-          MSM8960 and MSM9615.
+          MSM8960, MSM9615, APQ8064 and MSM8974.
           This exposes the interface to the QCE hardware accelerator via IOCTLs
 	  To compile this driver as a module, choose M here: the
 	  module will be called qcedev.
diff --git a/drivers/crypto/msm/Makefile b/drivers/crypto/msm/Makefile
index 61406b9..df9acf2 100644
--- a/drivers/crypto/msm/Makefile
+++ b/drivers/crypto/msm/Makefile
@@ -1,8 +1,12 @@
 obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev.o
-ifeq ($(CONFIG_CRYPTO_DEV_QCE40), y)
-	obj-$(CONFIG_CRYPTO_DEV_QCE) += qce40.o
+ifeq ($(CONFIG_CRYPTO_DEV_QCE50), y)
+	obj-$(CONFIG_CRYPTO_DEV_QCE) += qce50.o
 else
-	obj-$(CONFIG_CRYPTO_DEV_QCE) += qce.o
+	ifeq ($(CONFIG_CRYPTO_DEV_QCE40), y)
+		obj-$(CONFIG_CRYPTO_DEV_QCE) += qce40.o
+	else
+		obj-$(CONFIG_CRYPTO_DEV_QCE) += qce.o
+	endif
 endif
 obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto.o
 obj-$(CONFIG_CRYPTO_DEV_OTA_CRYPTO) += ota_crypto.o
diff --git a/drivers/crypto/msm/qce.c b/drivers/crypto/msm/qce.c
index 55cf651..4a9729c 100644
--- a/drivers/crypto/msm/qce.c
+++ b/drivers/crypto/msm/qce.c
@@ -1,6 +1,6 @@
 /* Qualcomm Crypto Engine driver.
  *
- * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2463,6 +2463,8 @@
 	ce_support->aes_xts  = false;
 	ce_support->aes_ccm  = false;
 	ce_support->ota = pce_dev->ota;
+	ce_support->aligned_only = false;
+	ce_support->bam = false;
 	return 0;
 }
 EXPORT_SYMBOL(qce_hw_support);
@@ -2703,7 +2705,5 @@
 EXPORT_SYMBOL(qce_f9_req);
 
 MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
 MODULE_DESCRIPTION("Crypto Engine driver");
-MODULE_VERSION("1.15");
 
diff --git a/drivers/crypto/msm/qce.h b/drivers/crypto/msm/qce.h
index edd2089..037861c 100644
--- a/drivers/crypto/msm/qce.h
+++ b/drivers/crypto/msm/qce.h
@@ -1,6 +1,6 @@
 /* Qualcomm Crypto Engine driver API
  *
- * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -71,6 +71,7 @@
 	QCE_HASH_SHA1_HMAC   = 2,
 	QCE_HASH_SHA256_HMAC = 3,
 	QCE_HASH_AES_CMAC = 4,
+	QCE_AEAD_SHA1_HMAC = 5,
 	QCE_HASH_LAST
 };
 
@@ -110,6 +111,8 @@
 	bool aes_xts;
 	bool aes_ccm;
 	bool ota;
+	bool aligned_only;
+	bool bam;
 };
 
 /* Sha operation parameters */
diff --git a/drivers/crypto/msm/qce40.c b/drivers/crypto/msm/qce40.c
index c203fc5..7a229a5 100644
--- a/drivers/crypto/msm/qce40.c
+++ b/drivers/crypto/msm/qce40.c
@@ -2599,11 +2599,11 @@
 	ce_support->aes_xts = true;
 	ce_support->aes_ccm = true;
 	ce_support->ota = false;
+	ce_support->aligned_only = false;
+	ce_support->bam = false;
 	return 0;
 }
 EXPORT_SYMBOL(qce_hw_support);
 
 MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
 MODULE_DESCRIPTION("Crypto Engine driver");
-MODULE_VERSION("2.17");
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
new file mode 100644
index 0000000..4ccd89d
--- /dev/null
+++ b/drivers/crypto/msm/qce50.c
@@ -0,0 +1,2739 @@
+/* Qualcomm Crypto Engine driver.
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "QCE50: %s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/qcedev.h>
+#include <linux/bitops.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <mach/dma.h>
+#include <mach/clk.h>
+#include <mach/socinfo.h>
+
+#include "qce.h"
+#include "qce50.h"
+#include "qcryptohw_50.h"
+
+#define CRYPTO_CONFIG_RESET 0xE001F
+
+static DEFINE_MUTEX(bam_register_cnt);
+struct bam_registration_info {
+	uint32_t handle;
+	uint32_t cnt;
+};
+static struct bam_registration_info bam_registry;
+
+/*
+ * CE HW device structure.
+ * Each engine has an instance of the structure.
+ * Each engine can only handle one crypto operation at one time. It is up to
+ * the sw above to ensure single threading of operation on an engine.
+ */
+struct qce_device {
+	struct device *pdev;        /* Handle to platform_device structure */
+
+	unsigned char *coh_vmem;    /* Allocated coherent virtual memory */
+	dma_addr_t coh_pmem;	    /* Allocated coherent physical memory */
+	int memsize;				/* Memory allocated */
+
+	void __iomem *iobase;	    /* Virtual io base of CE HW  */
+	unsigned int phy_iobase;    /* Physical io base of CE HW    */
+
+	struct clk *ce_core_src_clk;	/* Handle to CE src clk*/
+	struct clk *ce_core_clk;	/* Handle to CE clk */
+	struct clk *ce_clk;		/* Handle to CE clk */
+
+	qce_comp_func_ptr_t qce_cb;	/* qce callback function pointer */
+
+	int assoc_nents;
+	int ivsize;
+	int authsize;
+	int src_nents;
+	int dst_nents;
+
+	dma_addr_t phy_iv_in;
+
+	void *areq;
+	enum qce_cipher_mode_enum mode;
+	struct ce_sps_data ce_sps;
+};
+
+/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
+static uint32_t  _std_init_vector_sha1[] =   {
+	0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
+};
+
+/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint32_t _std_init_vector_sha256[] = {
+	0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
+	0x510E527F, 0x9B05688C,	0x1F83D9AB, 0x5BE0CD19
+};
+
+static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned n;
+
+	n = len  / sizeof(uint32_t) ;
+	for (; n > 0; n--) {
+		*iv =  ((*b << 24)      & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     |
+				(*(b+3)          & 0xff);
+		b += sizeof(uint32_t);
+		iv++;
+	}
+
+	n = len %  sizeof(uint32_t);
+	if (n == 3) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     ;
+	} else if (n == 2) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   ;
+	} else if (n == 1) {
+		*iv = ((*b << 24) & 0xff000000) ;
+	}
+}
+
+static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned i, j;
+	unsigned char swap_iv[AES_IV_LENGTH];
+
+	memset(swap_iv, 0, AES_IV_LENGTH);
+	for (i = (AES_IV_LENGTH-len), j = len-1;  i < AES_IV_LENGTH; i++, j--)
+		swap_iv[i] = b[j];
+	_byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
+}
+
+static int count_sg(struct scatterlist *sg, int nbytes)
+{
+	int i;
+
+	for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
+		nbytes -= sg->length;
+	return i;
+}
+
+static int _probe_ce_engine(struct qce_device *pce_dev)
+{
+	unsigned int rev;
+	unsigned int maj_rev, min_rev, step_rev;
+
+	rev = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
+	mb();
+	maj_rev = (rev & CRYPTO_CORE_MAJOR_REV_MASK) >> CRYPTO_CORE_MAJOR_REV;
+	min_rev = (rev & CRYPTO_CORE_MINOR_REV_MASK) >> CRYPTO_CORE_MINOR_REV;
+	step_rev = (rev & CRYPTO_CORE_STEP_REV_MASK) >> CRYPTO_CORE_STEP_REV;
+
+	if ((maj_rev != 0x05) || (min_rev > 0x02) || (step_rev > 0x02)) {
+		pr_err("Unknown Qualcomm crypto device at 0x%x, rev %d.%d.%d\n",
+			pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
+		return -EIO;
+	};
+	if ((min_rev > 0)  && (step_rev != 0)) {
+		pr_err("Unknown Qualcomm crypto device at 0x%x, rev %d.%d.%d\n",
+			pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
+		return -EIO;
+	};
+	pce_dev->ce_sps.minor_version = min_rev;
+
+	dev_info(pce_dev->pdev, "Qualcomm Crypto %d.%d.%d device found @0x%x\n",
+			maj_rev, min_rev, step_rev, pce_dev->phy_iobase);
+
+	pce_dev->ce_sps.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
+
+	dev_info(pce_dev->pdev,
+			"IO base, CE = 0x%x\n, "
+			"Consumer (IN) PIPE %d,    "
+			"Producer (OUT) PIPE %d\n"
+			"IO base BAM = 0x%x\n"
+			"BAM IRQ %d\n",
+			(uint32_t) pce_dev->iobase,
+			pce_dev->ce_sps.dest_pipe_index,
+			pce_dev->ce_sps.src_pipe_index,
+			(uint32_t)pce_dev->ce_sps.bam_iobase,
+			pce_dev->ce_sps.bam_irq);
+	return 0;
+};
+
+static int _ce_get_hash_cmdlistinfo(struct qce_device *pce_dev,
+				struct qce_sha_req *sreq,
+				struct qce_cmdlist_info **cmdplistinfo)
+{
+	struct qce_cmdlistptr_ops *cmdlistptr = &pce_dev->ce_sps.cmdlistptr;
+
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+		*cmdplistinfo = &cmdlistptr->auth_sha1;
+		break;
+
+	case QCE_HASH_SHA256:
+		*cmdplistinfo = &cmdlistptr->auth_sha256;
+		break;
+
+	case QCE_HASH_SHA1_HMAC:
+		*cmdplistinfo = &cmdlistptr->auth_sha1_hmac;
+			break;
+
+	case QCE_HASH_SHA256_HMAC:
+		*cmdplistinfo = &cmdlistptr->auth_sha256_hmac;
+		break;
+
+	case QCE_HASH_AES_CMAC:
+		if (sreq->authklen == AES128_KEY_SIZE)
+			*cmdplistinfo = &cmdlistptr->auth_aes_128_cmac;
+		else
+			*cmdplistinfo = &cmdlistptr->auth_aes_256_cmac;
+		break;
+
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int _ce_setup_hash(struct qce_device *pce_dev,
+				struct qce_sha_req *sreq,
+				struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+	uint32_t diglen;
+	int i;
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	bool sha1 = false;
+	struct sps_command_element *pce = NULL;
+
+	if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC) ||
+			(sreq->alg ==  QCE_HASH_AES_CMAC)) {
+		uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
+
+		_byte_stream_to_net_words(mackey32, sreq->authkey,
+						sreq->authklen);
+
+		/* check for null key. If null, use hw key*/
+		for (i = 0; i < authk_size_in_word; i++) {
+			if (mackey32[i] != 0)
+				break;
+		}
+
+		pce = cmdlistinfo->go_proc;
+		if (i == authk_size_in_word) {
+			pce->addr = (uint32_t)(CRYPTO_GOPROC_OEM_KEY_REG +
+							pce_dev->phy_iobase);
+		} else {
+			pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
+							pce_dev->phy_iobase);
+			pce = cmdlistinfo->auth_key;
+			for (i = 0; i < authk_size_in_word; i++, pce++)
+				pce->data = mackey32[i];
+		}
+	}
+
+	if (sreq->alg ==  QCE_HASH_AES_CMAC)
+		goto go_proc;
+
+	/* if not the last, the size has to be on the block boundary */
+	if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
+		return -EIO;
+
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+	case QCE_HASH_SHA1_HMAC:
+		diglen = SHA1_DIGEST_SIZE;
+		sha1 = true;
+		break;
+	case QCE_HASH_SHA256:
+	case QCE_HASH_SHA256_HMAC:
+		diglen = SHA256_DIGEST_SIZE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
+	if (sreq->first_blk) {
+		if (sha1) {
+			for (i = 0; i < 5; i++)
+				auth32[i] = _std_init_vector_sha1[i];
+		} else {
+			for (i = 0; i < 8; i++)
+				auth32[i] = _std_init_vector_sha256[i];
+		}
+	} else {
+		_byte_stream_to_net_words(auth32, sreq->digest, diglen);
+	}
+
+	pce = cmdlistinfo->auth_iv;
+	for (i = 0; i < 5; i++, pce++)
+		pce->data = auth32[i];
+
+	if ((sreq->alg == QCE_HASH_SHA256) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC)) {
+		for (i = 5; i < 8; i++, pce++)
+			pce->data = auth32[i];
+	}
+
+	/* write auth_bytecnt 0/1, start with 0 */
+	pce = cmdlistinfo->auth_bytecount;
+	for (i = 0; i < 2; i++, pce++)
+		pce->data = sreq->auth_data[i];
+
+	/* Set/reset  last bit in CFG register  */
+	pce = cmdlistinfo->auth_seg_cfg;
+	if (sreq->last_blk)
+		pce->data |= 1 << CRYPTO_LAST;
+	else
+		pce->data &= ~(1 << CRYPTO_LAST);
+	if (sreq->first_blk)
+		pce->data |= 1 << CRYPTO_FIRST;
+	else
+		pce->data &= ~(1 << CRYPTO_FIRST);
+go_proc:
+	/* write auth seg size */
+	pce = cmdlistinfo->auth_seg_size;
+	pce->data = sreq->size;
+
+	/* write auth seg size start*/
+	pce = cmdlistinfo->auth_seg_start;
+	pce->data = 0;
+
+	/* write seg size */
+	pce = cmdlistinfo->seg_size;
+	pce->data = sreq->size;
+
+	return 0;
+}
+
+static int _ce_get_cipher_cmdlistinfo(struct qce_device *pce_dev,
+				struct qce_req *creq,
+				struct qce_cmdlist_info **cmdlistinfo)
+{
+	struct qce_cmdlistptr_ops *cmdlistptr = &pce_dev->ce_sps.cmdlistptr;
+
+	if (creq->alg != CIPHER_ALG_AES) {
+		switch (creq->alg) {
+		case CIPHER_ALG_DES:
+			if (creq->mode ==  QCE_MODE_ECB)
+				*cmdlistinfo = &cmdlistptr->cipher_des_ecb;
+			else
+				*cmdlistinfo = &cmdlistptr->cipher_des_cbc;
+			break;
+
+		case CIPHER_ALG_3DES:
+			if (creq->mode ==  QCE_MODE_ECB)
+				*cmdlistinfo =
+					&cmdlistptr->cipher_3des_ecb;
+			else
+				*cmdlistinfo =
+					&cmdlistptr->cipher_3des_cbc;
+			break;
+		default:
+			break;
+		}
+	} else {
+		switch (creq->mode) {
+		case QCE_MODE_ECB:
+			if (creq->encklen ==  AES128_KEY_SIZE)
+				*cmdlistinfo = &cmdlistptr->cipher_aes_128_ecb;
+			else
+				*cmdlistinfo = &cmdlistptr->cipher_aes_256_ecb;
+			break;
+
+		case QCE_MODE_CBC:
+		case QCE_MODE_CTR:
+			if (creq->encklen ==  AES128_KEY_SIZE)
+				*cmdlistinfo =
+					&cmdlistptr->cipher_aes_128_cbc_ctr;
+			else
+				*cmdlistinfo =
+					&cmdlistptr->cipher_aes_256_cbc_ctr;
+			break;
+
+		case QCE_MODE_XTS:
+			if (creq->encklen ==  AES128_KEY_SIZE)
+				*cmdlistinfo = &cmdlistptr->cipher_aes_128_xts;
+			else
+				*cmdlistinfo = &cmdlistptr->cipher_aes_256_xts;
+			break;
+
+		case QCE_MODE_CCM:
+			if (creq->encklen ==  AES128_KEY_SIZE)
+				*cmdlistinfo = &cmdlistptr->aead_aes_128_ccm;
+			else
+				*cmdlistinfo = &cmdlistptr->aead_aes_256_ccm;
+			break;
+
+		default:
+			break;
+		}
+	}
+	return 0;
+}
+
+static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
+		uint32_t totallen_in, uint32_t coffset,
+		struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+			0, 0, 0, 0};
+	uint32_t enck_size_in_word = 0;
+	uint32_t key_size;
+	bool use_hw_key = false;
+	uint32_t encr_cfg = 0;
+	uint32_t ivsize = creq->ivsize;
+	int i;
+	struct sps_command_element *pce = NULL;
+
+	if (creq->mode == QCE_MODE_XTS)
+		key_size = creq->encklen/2;
+	else
+		key_size = creq->encklen;
+
+	_byte_stream_to_net_words(enckey32, creq->enckey, key_size);
+
+	/* check for null key. If null, use hw key*/
+	enck_size_in_word = key_size/sizeof(uint32_t);
+	for (i = 0; i < enck_size_in_word; i++) {
+		if (enckey32[i] != 0)
+			break;
+	}
+	pce = cmdlistinfo->go_proc;
+	if (i == enck_size_in_word) {
+		use_hw_key = true;
+		pce->addr = (uint32_t)(CRYPTO_GOPROC_OEM_KEY_REG +
+						pce_dev->phy_iobase);
+	} else {
+		pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
+						pce_dev->phy_iobase);
+	}
+
+	if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
+		uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
+		uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
+		uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
+		uint32_t auth_cfg = 0;
+
+		/* write nonce */
+		_byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
+		pce = cmdlistinfo->auth_nonce_info;
+		for (i = 0; i < noncelen32; i++, pce++)
+			pce->data = nonce32[i];
+
+		/* TBD  NEW FEATURE partial AES CCM  pkt support set last bit */
+		auth_cfg |= ((1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST));
+		if (creq->dir == QCE_ENCRYPT)
+			auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		else
+			auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+		auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
+		auth_cfg |= (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE);
+		if (creq->authklen ==  AES128_KEY_SIZE)
+			auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES128 <<
+						CRYPTO_AUTH_KEY_SIZE);
+		else {
+			if (creq->authklen ==  AES256_KEY_SIZE)
+				auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES256 <<
+							CRYPTO_AUTH_KEY_SIZE);
+		}
+		auth_cfg |= (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG);
+		auth_cfg |= ((MAX_NONCE/sizeof(uint32_t)) <<
+						CRYPTO_AUTH_NONCE_NUM_WORDS);
+
+		if (use_hw_key == true)	{
+			auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
+		} else {
+			auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+			/* write auth key */
+			pce = cmdlistinfo->auth_key;
+			for (i = 0; i < authklen32; i++, pce++)
+				pce->data = enckey32[i];
+		}
+
+		pce = cmdlistinfo->auth_seg_cfg;
+		pce->data = auth_cfg;
+
+		pce = cmdlistinfo->auth_seg_size;
+		pce->data = totallen_in;
+		pce = cmdlistinfo->auth_seg_start;
+		pce->data = 0;
+	}
+
+	switch (creq->mode) {
+	case QCE_MODE_ECB:
+		encr_cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+		break;
+	case QCE_MODE_CBC:
+		encr_cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+		break;
+	case QCE_MODE_XTS:
+		encr_cfg |= (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
+		break;
+	case QCE_MODE_CCM:
+		encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE);
+		break;
+	case QCE_MODE_CTR:
+	default:
+		encr_cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+		break;
+	}
+	pce_dev->mode = creq->mode;
+
+	switch (creq->alg) {
+	case CIPHER_ALG_DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			pce = cmdlistinfo->encr_cntr_iv;
+			pce->data = enciv32[0];
+			pce++;
+			pce->data = enciv32[1];
+		}
+		if (use_hw_key == false) {
+			pce = cmdlistinfo->encr_key;
+			pce->data = enckey32[0];
+			pce++;
+			pce->data = enckey32[1];
+		}
+		break;
+	case CIPHER_ALG_3DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			pce = cmdlistinfo->encr_cntr_iv;
+			pce->data = enciv32[0];
+			pce++;
+			pce->data = enciv32[1];
+		}
+		if (use_hw_key == false) {
+			/* write encr key */
+			pce = cmdlistinfo->encr_key;
+			for (i = 0; i < 6; i++, pce++)
+				pce->data = enckey32[i];
+		}
+		break;
+	case CIPHER_ALG_AES:
+	default:
+		if (creq->mode ==  QCE_MODE_XTS) {
+			uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
+					= {0, 0, 0, 0, 0, 0, 0, 0};
+			uint32_t xtsklen =
+					creq->encklen/(2 * sizeof(uint32_t));
+
+			_byte_stream_to_net_words(xtskey32, (creq->enckey +
+					creq->encklen/2), creq->encklen/2);
+			/* write xts encr key */
+			pce = cmdlistinfo->encr_xts_key;
+			for (i = 0; i < xtsklen; i++, pce++)
+				pce->data = xtskey32[i];
+
+			/* write xts du size */
+			pce = cmdlistinfo->encr_xts_du_size;
+			pce->data = creq->cryptlen;
+		}
+		if (creq->mode !=  QCE_MODE_ECB) {
+			if (creq->mode ==  QCE_MODE_XTS)
+				_byte_stream_swap_to_net_words(enciv32,
+							creq->iv, ivsize);
+			else
+				_byte_stream_to_net_words(enciv32, creq->iv,
+								ivsize);
+			/* write encr cntr iv */
+			pce = cmdlistinfo->encr_cntr_iv;
+			for (i = 0; i < 4; i++, pce++)
+				pce->data = enciv32[i];
+
+			if (creq->mode ==  QCE_MODE_CCM) {
+				/* write cntr iv for ccm */
+				pce = cmdlistinfo->encr_ccm_cntr_iv;
+				for (i = 0; i < 4; i++, pce++)
+					pce->data = enciv32[i];
+				/* update cntr_iv[3] by one */
+				pce = cmdlistinfo->encr_cntr_iv;
+				pce += 3;
+				pce->data += 1;
+			}
+		}
+
+		if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+				encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+						CRYPTO_ENCR_KEY_SZ);
+			encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
+		} else {
+			if (use_hw_key == false) {
+				/* write encr key */
+				pce = cmdlistinfo->encr_key;
+				for (i = 0; i < enck_size_in_word; i++, pce++)
+					pce->data = enckey32[i];
+				switch (key_size) {
+				case AES128_KEY_SIZE:
+					encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128
+							 << CRYPTO_ENCR_KEY_SZ);
+					break;
+				case AES256_KEY_SIZE:
+				default:
+					encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES256
+							 << CRYPTO_ENCR_KEY_SZ);
+				break;
+				} /* end of switch (creq->encklen) */
+			}
+			encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
+		} /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+		break;
+	} /* end of switch (creq->mode)  */
+
+	/* write encr seg cfg */
+	pce = cmdlistinfo->encr_seg_cfg;
+	if ((creq->alg == CIPHER_ALG_DES) || (creq->alg == CIPHER_ALG_3DES)) {
+		if (creq->dir == QCE_ENCRYPT)
+			pce->data |= (1 << CRYPTO_ENCODE);
+		else
+			pce->data &= ~(1 << CRYPTO_ENCODE);
+		encr_cfg = pce->data;
+	}  else	{
+		encr_cfg |=
+			((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+	}
+	if (use_hw_key == true)
+		encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	else
+		encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	pce->data = encr_cfg;
+
+	/* write encr seg size */
+	pce = cmdlistinfo->encr_seg_size;
+	if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
+		pce->data = (creq->cryptlen + creq->authsize);
+	else
+		pce->data = creq->cryptlen;
+
+	/* write encr seg start */
+	pce = cmdlistinfo->encr_seg_start;
+	pce->data = (coffset & 0xffff);
+
+	/* write seg size  */
+	pce = cmdlistinfo->seg_size;
+	pce->data = totallen_in;
+
+	return 0;
+};
+
+static int _aead_complete(struct qce_device *pce_dev)
+{
+	struct aead_request *areq;
+	unsigned char mac[SHA256_DIGEST_SIZE];
+
+	areq = (struct aead_request *) pce_dev->areq;
+	if (areq->src != areq->dst) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+					DMA_FROM_DEVICE);
+	}
+	dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+			DMA_TO_DEVICE);
+	/* check MAC */
+	memcpy(mac, (char *)(&pce_dev->ce_sps.result->auth_iv[0]),
+						SHA256_DIGEST_SIZE);
+	if (pce_dev->mode == QCE_MODE_CCM) {
+		uint32_t result_status;
+		result_status = pce_dev->ce_sps.result->status;
+		result_status &= (1 << CRYPTO_MAC_FAILED);
+		result_status |= (pce_dev->ce_sps.consumer_status |
+					pce_dev->ce_sps.producer_status);
+		pce_dev->qce_cb(areq, mac, NULL, result_status);
+	} else {
+		uint32_t ivsize = 0;
+		struct crypto_aead *aead;
+		unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
+
+		aead = crypto_aead_reqtfm(areq);
+		ivsize = crypto_aead_ivsize(aead);
+		dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
+				ivsize, DMA_TO_DEVICE);
+		memcpy(iv, (char *)(pce_dev->ce_sps.result->encr_cntr_iv),
+			sizeof(iv));
+		pce_dev->qce_cb(areq, mac, iv, pce_dev->ce_sps.consumer_status |
+			pce_dev->ce_sps.producer_status);
+
+	}
+	return 0;
+};
+
+static void _sha_complete(struct qce_device *pce_dev)
+{
+	struct ahash_request *areq;
+	unsigned char digest[SHA256_DIGEST_SIZE];
+
+	areq = (struct ahash_request *) pce_dev->areq;
+	dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+				DMA_TO_DEVICE);
+	memcpy(digest, (char *)(&pce_dev->ce_sps.result->auth_iv[0]),
+						SHA256_DIGEST_SIZE);
+	pce_dev->qce_cb(areq, digest,
+			(char *)pce_dev->ce_sps.result->auth_byte_count,
+				pce_dev->ce_sps.consumer_status);
+};
+
+static int _ablk_cipher_complete(struct qce_device *pce_dev)
+{
+	struct ablkcipher_request *areq;
+	unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
+
+	areq = (struct ablkcipher_request *) pce_dev->areq;
+
+	if (areq->src != areq->dst) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst,
+			pce_dev->dst_nents, DMA_FROM_DEVICE);
+	}
+	dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+		(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+						DMA_TO_DEVICE);
+
+	if (pce_dev->mode == QCE_MODE_ECB) {
+		pce_dev->qce_cb(areq, NULL, NULL,
+					pce_dev->ce_sps.consumer_status |
+					pce_dev->ce_sps.producer_status);
+	} else {
+		if (pce_dev->ce_sps.minor_version == 0) {
+			if (pce_dev->mode == QCE_MODE_CBC)
+				memcpy(iv, (char *)sg_virt(areq->src),
+							sizeof(iv));
+
+			if ((pce_dev->mode == QCE_MODE_CTR) ||
+				(pce_dev->mode == QCE_MODE_XTS)) {
+				uint32_t num_blk = 0;
+				uint32_t cntr_iv = 0;
+
+				memcpy(iv, areq->info, sizeof(iv));
+				if (pce_dev->mode == QCE_MODE_CTR)
+					num_blk = areq->nbytes/16;
+				cntr_iv = (u32)(((u32)(*(iv + 14))) << 8) |
+							(u32)(*(iv + 15));
+				*(iv + 14) = (char)((cntr_iv + num_blk) >> 8);
+				*(iv + 15) = (char)((cntr_iv + num_blk) & 0xFF);
+			}
+		} else {
+			memcpy(iv,
+				(char *)(pce_dev->ce_sps.result->encr_cntr_iv),
+				sizeof(iv));
+		}
+		pce_dev->qce_cb(areq, NULL, iv,
+			pce_dev->ce_sps.consumer_status |
+			pce_dev->ce_sps.producer_status);
+	}
+	return 0;
+};
+
+#ifdef QCE_DEBUG
+static void _qce_dump_descr_fifos(struct qce_device *pce_dev)
+{
+	int i, j, ents;
+	struct sps_iovec *iovec = pce_dev->ce_sps.in_transfer.iovec;
+	uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_NWD;
+
+	printk(KERN_INFO "==============================================\n");
+	printk(KERN_INFO "CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n");
+	printk(KERN_INFO "==============================================\n");
+	for (i = 0; i <  pce_dev->ce_sps.in_transfer.iovec_count; i++) {
+		printk(KERN_INFO " [%d] addr=0x%x  size=0x%x  flags=0x%x\n", i,
+					iovec->addr, iovec->size, iovec->flags);
+		if (iovec->flags & cmd_flags) {
+			struct sps_command_element *pced;
+
+			pced = (struct sps_command_element *)
+					(GET_VIRT_ADDR(iovec->addr));
+			ents = iovec->size/(sizeof(struct sps_command_element));
+			for (j = 0; j < ents; j++) {
+				printk(KERN_INFO "      [%d] [0x%x] 0x%x\n", j,
+					pced->addr, pced->data);
+				pced++;
+			}
+		}
+		iovec++;
+	}
+
+	printk(KERN_INFO "==============================================\n");
+	printk(KERN_INFO "PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n");
+	printk(KERN_INFO "==============================================\n");
+	iovec = pce_dev->ce_sps.out_transfer.iovec;
+	for (i = 0; i <  pce_dev->ce_sps.out_transfer.iovec_count; i++) {
+		printk(KERN_INFO " [%d] addr=0x%x  size=0x%x  flags=0x%x\n", i,
+				iovec->addr, iovec->size, iovec->flags);
+		iovec++;
+	}
+}
+
+#else
+static void _qce_dump_descr_fifos(struct qce_device *pce_dev)
+{
+}
+#endif
+
+static void _qce_sps_iovec_count_init(struct qce_device *pce_dev)
+{
+	pce_dev->ce_sps.in_transfer.iovec_count = 0;
+	pce_dev->ce_sps.out_transfer.iovec_count = 0;
+}
+
+static void _qce_set_eot_flag(struct sps_transfer *sps_bam_pipe)
+{
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+					(sps_bam_pipe->iovec_count - 1);
+	iovec->flags |= SPS_IOVEC_FLAG_EOT;
+}
+
+static void _qce_sps_add_data(uint32_t addr, uint32_t len,
+		struct sps_transfer *sps_bam_pipe)
+{
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+					sps_bam_pipe->iovec_count;
+	if (len) {
+		iovec->size = len;
+		iovec->addr = addr;
+		iovec->flags = 0;
+		sps_bam_pipe->iovec_count++;
+	}
+}
+
+static int _qce_sps_add_sg_data(struct qce_device *pce_dev,
+		struct scatterlist *sg_src, uint32_t nbytes,
+		struct sps_transfer *sps_bam_pipe)
+{
+	uint32_t addr, data_cnt, len;
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+						sps_bam_pipe->iovec_count;
+
+	while (nbytes > 0) {
+		len = min(nbytes, sg_dma_len(sg_src));
+		nbytes -= len;
+		addr = sg_dma_address(sg_src);
+		if (pce_dev->ce_sps.minor_version == 0)
+			len = ALIGN(len, pce_dev->ce_sps.ce_burst_size);
+		while (len > 0) {
+			if (len > SPS_MAX_PKT_SIZE) {
+				data_cnt = SPS_MAX_PKT_SIZE;
+				iovec->size = data_cnt;
+				iovec->addr = addr;
+				iovec->flags = 0;
+			} else {
+				data_cnt = len;
+				iovec->size = data_cnt;
+				iovec->addr = addr;
+				iovec->flags = 0;
+			}
+			iovec++;
+			sps_bam_pipe->iovec_count++;
+			addr += data_cnt;
+			len -= data_cnt;
+		}
+		sg_src++;
+	}
+	return 0;
+}
+
+static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag,
+				struct qce_cmdlist_info *cmdptr,
+				struct sps_transfer *sps_bam_pipe)
+{
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+					sps_bam_pipe->iovec_count;
+	iovec->size = cmdptr->size;
+	iovec->addr = GET_PHYS_ADDR(cmdptr->cmdlist);
+	iovec->flags = SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_NWD | flag;
+	sps_bam_pipe->iovec_count++;
+
+	return 0;
+}
+
+static int _qce_sps_transfer(struct qce_device *pce_dev)
+{
+	int rc = 0;
+
+	_qce_dump_descr_fifos(pce_dev);
+	rc = sps_transfer(pce_dev->ce_sps.consumer.pipe,
+					  &pce_dev->ce_sps.in_transfer);
+	if (rc) {
+		pr_err("sps_xfr() fail (consumer pipe=0x%x) rc = %d,",
+				(u32)pce_dev->ce_sps.consumer.pipe, rc);
+		return rc;
+	}
+	rc = sps_transfer(pce_dev->ce_sps.producer.pipe,
+					  &pce_dev->ce_sps.out_transfer);
+	if (rc) {
+		pr_err("sps_xfr() fail (producer pipe=0x%x) rc = %d,",
+				(u32)pce_dev->ce_sps.producer.pipe, rc);
+		return rc;
+	}
+	return rc;
+}
+
+/**
+ * Allocate and Connect a CE peripheral's SPS endpoint
+ *
+ * This function allocates endpoint context and
+ * connect it with memory endpoint by calling
+ * appropriate SPS driver APIs.
+ *
+ * Also registers a SPS callback function with
+ * SPS driver
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ * @ep   - Pointer to sps endpoint data structure
+ * @is_produce - 1 means Producer endpoint
+ *		 0 means Consumer endpoint
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int qce_sps_init_ep_conn(struct qce_device *pce_dev,
+				struct qce_sps_ep_conn_data *ep,
+				bool is_producer)
+{
+	int rc = 0;
+	struct sps_pipe *sps_pipe_info;
+	struct sps_connect *sps_connect_info = &ep->connect;
+	struct sps_register_event *sps_event = &ep->event;
+
+	/* Allocate endpoint context */
+	sps_pipe_info = sps_alloc_endpoint();
+	if (!sps_pipe_info) {
+		pr_err("sps_alloc_endpoint() failed!!! is_producer=%d",
+			   is_producer);
+		rc = -ENOMEM;
+		goto out;
+	}
+	/* Now save the sps pipe handle */
+	ep->pipe = sps_pipe_info;
+
+	/* Get default connection configuration for an endpoint */
+	rc = sps_get_config(sps_pipe_info, sps_connect_info);
+	if (rc) {
+		pr_err("sps_get_config() fail pipe_handle=0x%x, rc = %d\n",
+			(u32)sps_pipe_info, rc);
+		goto get_config_err;
+	}
+
+	/* Modify the default connection configuration */
+	if (is_producer) {
+		/*
+		* For CE producer transfer, source should be
+		* CE peripheral where as destination should
+		* be system memory.
+		*/
+		sps_connect_info->source = pce_dev->ce_sps.bam_handle;
+		sps_connect_info->destination = SPS_DEV_HANDLE_MEM;
+		/* Producer pipe will handle this connection */
+		sps_connect_info->mode = SPS_MODE_SRC;
+		sps_connect_info->options =
+			SPS_O_AUTO_ENABLE | SPS_O_EOT;
+	} else {
+		/* For CE consumer transfer, source should be
+		 * system memory where as destination should
+		 * CE peripheral
+		 */
+		sps_connect_info->source = SPS_DEV_HANDLE_MEM;
+		sps_connect_info->destination = pce_dev->ce_sps.bam_handle;
+		sps_connect_info->mode = SPS_MODE_DEST;
+		sps_connect_info->options =
+			SPS_O_AUTO_ENABLE | SPS_O_EOT;
+	}
+
+	/* Producer pipe index */
+	sps_connect_info->src_pipe_index = pce_dev->ce_sps.src_pipe_index;
+	/* Consumer pipe index */
+	sps_connect_info->dest_pipe_index = pce_dev->ce_sps.dest_pipe_index;
+	sps_connect_info->event_thresh = 0x10;
+	/*
+	 * Max. no of scatter/gather buffers that can
+	 * be passed by block layer = 32 (NR_SG).
+	 * Each BAM descritor needs 64 bits (8 bytes).
+	 * One BAM descriptor is required per buffer transfer.
+	 * So we would require total 256 (32 * 8) bytes of descriptor FIFO.
+	 * But due to HW limitation we need to allocate atleast one extra
+	 * descriptor memory (256 bytes + 8 bytes). But in order to be
+	 * in power of 2, we are allocating 512 bytes of memory.
+	 */
+	sps_connect_info->desc.size = 512;
+	sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev,
+					sps_connect_info->desc.size,
+					&sps_connect_info->desc.phys_base,
+					GFP_KERNEL);
+	if (sps_connect_info->desc.base == NULL) {
+		rc = -ENOMEM;
+		pr_err("Can not allocate coherent memory for sps data\n");
+		goto get_config_err;
+	}
+
+	memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+
+	/* Establish connection between peripheral and memory endpoint */
+	rc = sps_connect(sps_pipe_info, sps_connect_info);
+	if (rc) {
+		pr_err("sps_connect() fail pipe_handle=0x%x, rc = %d\n",
+			(u32)sps_pipe_info, rc);
+		goto sps_connect_err;
+	}
+
+	sps_event->mode = SPS_TRIGGER_CALLBACK;
+	sps_event->options = SPS_O_EOT;
+	sps_event->xfer_done = NULL;
+	sps_event->user = (void *)pce_dev;
+
+	pr_debug("success, %s : pipe_handle=0x%x, desc fifo base (phy) = 0x%x\n",
+		is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)",
+		(u32)sps_pipe_info, sps_connect_info->desc.phys_base);
+	goto out;
+
+sps_connect_err:
+	dma_free_coherent(pce_dev->pdev,
+			sps_connect_info->desc.size,
+			sps_connect_info->desc.base,
+			sps_connect_info->desc.phys_base);
+get_config_err:
+	sps_free_endpoint(sps_pipe_info);
+out:
+	return rc;
+}
+
+/**
+ * Disconnect and Deallocate a CE peripheral's SPS endpoint
+ *
+ * This function disconnect endpoint and deallocates
+ * endpoint context.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ * @ep   - Pointer to sps endpoint data structure
+ *
+ */
+static void qce_sps_exit_ep_conn(struct qce_device *pce_dev,
+				struct qce_sps_ep_conn_data *ep)
+{
+	struct sps_pipe *sps_pipe_info = ep->pipe;
+	struct sps_connect *sps_connect_info = &ep->connect;
+
+	sps_disconnect(sps_pipe_info);
+	dma_free_coherent(pce_dev->pdev,
+			sps_connect_info->desc.size,
+			sps_connect_info->desc.base,
+			sps_connect_info->desc.phys_base);
+	sps_free_endpoint(sps_pipe_info);
+}
+/**
+ * Initialize SPS HW connected with CE core
+ *
+ * This function register BAM HW resources with
+ * SPS driver and then initialize 2 SPS endpoints
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int qce_sps_init(struct qce_device *pce_dev)
+{
+	int rc = 0;
+	struct sps_bam_props bam = {0};
+	bool register_bam = false;
+
+	bam.phys_addr = pce_dev->ce_sps.bam_mem;
+	bam.virt_addr = pce_dev->ce_sps.bam_iobase;
+
+	/*
+	 * This event thresold value is only significant for BAM-to-BAM
+	 * transfer. It's ignored for BAM-to-System mode transfer.
+	 */
+	bam.event_threshold = 0x10;	/* Pipe event threshold */
+	/*
+	 * This threshold controls when the BAM publish
+	 * the descriptor size on the sideband interface.
+	 * SPS HW will only be used when
+	 * data transfer size >  64 bytes.
+	 */
+	bam.summing_threshold = 64;
+	/* SPS driver wll handle the crypto BAM IRQ */
+	bam.irq = (u32)pce_dev->ce_sps.bam_irq;
+	bam.manage = SPS_BAM_MGR_LOCAL;
+
+	pr_debug("bam physical base=0x%x\n", (u32)bam.phys_addr);
+	pr_debug("bam virtual base=0x%x\n", (u32)bam.virt_addr);
+
+	mutex_lock(&bam_register_cnt);
+	if ((bam_registry.handle == 0) && (bam_registry.cnt == 0)) {
+		/* Register CE Peripheral BAM device to SPS driver */
+		rc = sps_register_bam_device(&bam, &bam_registry.handle);
+		if (rc) {
+			pr_err("sps_register_bam_device() failed! err=%d", rc);
+			return -EIO;
+		}
+		bam_registry.cnt++;
+		register_bam = true;
+	} else {
+		   bam_registry.cnt++;
+	}
+	mutex_unlock(&bam_register_cnt);
+	pce_dev->ce_sps.bam_handle =  bam_registry.handle;
+	pr_debug("BAM device registered. bam_handle=0x%x",
+		pce_dev->ce_sps.bam_handle);
+
+	rc = qce_sps_init_ep_conn(pce_dev, &pce_dev->ce_sps.producer, true);
+	if (rc)
+		goto sps_connect_producer_err;
+	rc = qce_sps_init_ep_conn(pce_dev, &pce_dev->ce_sps.consumer, false);
+	if (rc)
+		goto sps_connect_consumer_err;
+
+	pce_dev->ce_sps.out_transfer.user = pce_dev->ce_sps.producer.pipe;
+	pce_dev->ce_sps.in_transfer.user = pce_dev->ce_sps.consumer.pipe;
+	pr_info(" Qualcomm MSM CE-BAM at 0x%016llx irq %d\n",
+		(unsigned long long)pce_dev->ce_sps.bam_mem,
+		(unsigned int)pce_dev->ce_sps.bam_irq);
+	return rc;
+
+sps_connect_consumer_err:
+	qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.producer);
+sps_connect_producer_err:
+	if (register_bam)
+		sps_deregister_bam_device(pce_dev->ce_sps.bam_handle);
+
+	return rc;
+}
+
+/**
+ * De-initialize SPS HW connected with CE core
+ *
+ * This function deinitialize SPS endpoints and then
+ * deregisters BAM resources from SPS driver.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ *
+ */
+static void qce_sps_exit(struct qce_device *pce_dev)
+{
+	qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.consumer);
+	qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.producer);
+	mutex_lock(&bam_register_cnt);
+	if ((bam_registry.handle != 0) && (bam_registry.cnt == 1)) {
+		sps_deregister_bam_device(pce_dev->ce_sps.bam_handle);
+		bam_registry.cnt = 0;
+		bam_registry.handle = 0;
+	}
+	if ((bam_registry.handle != 0) && (bam_registry.cnt > 1))
+		bam_registry.cnt--;
+	mutex_unlock(&bam_register_cnt);
+
+	iounmap(pce_dev->ce_sps.bam_iobase);
+}
+
+static void _aead_sps_producer_callback(struct sps_event_notify *notify)
+{
+	struct qce_device *pce_dev = (struct qce_device *)
+		((struct sps_event_notify *)notify)->user;
+
+	pce_dev->ce_sps.notify = *notify;
+	pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+			notify->event_id,
+			notify->data.transfer.iovec.addr,
+			notify->data.transfer.iovec.size,
+			notify->data.transfer.iovec.flags);
+
+	pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
+	if (pce_dev->ce_sps.consumer_state == QCE_PIPE_STATE_COMP) {
+		pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
+		pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+
+		/* done */
+		_aead_complete(pce_dev);
+	}
+};
+
+static void _aead_sps_consumer_callback(struct sps_event_notify *notify)
+{
+	struct qce_device *pce_dev = (struct qce_device *)
+		((struct sps_event_notify *)notify)->user;
+
+	pce_dev->ce_sps.notify = *notify;
+	pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+			notify->event_id,
+			notify->data.transfer.iovec.addr,
+			notify->data.transfer.iovec.size,
+			notify->data.transfer.iovec.flags);
+
+	pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_COMP;
+	if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) {
+		pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
+		pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+
+		/* done */
+		_aead_complete(pce_dev);
+	}
+};
+
+static void _sha_sps_producer_callback(struct sps_event_notify *notify)
+{
+	struct qce_device *pce_dev = (struct qce_device *)
+		((struct sps_event_notify *)notify)->user;
+
+	pce_dev->ce_sps.notify = *notify;
+	pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+			notify->event_id,
+			notify->data.transfer.iovec.addr,
+			notify->data.transfer.iovec.size,
+			notify->data.transfer.iovec.flags);
+
+	pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
+	if (pce_dev->ce_sps.consumer_state == QCE_PIPE_STATE_COMP) {
+		pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
+		pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+
+		/* done */
+		_sha_complete(pce_dev);
+	}
+};
+
+static void _sha_sps_consumer_callback(struct sps_event_notify *notify)
+{
+	struct qce_device *pce_dev = (struct qce_device *)
+		((struct sps_event_notify *)notify)->user;
+
+	pce_dev->ce_sps.notify = *notify;
+	pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+			notify->event_id,
+			notify->data.transfer.iovec.addr,
+			notify->data.transfer.iovec.size,
+			notify->data.transfer.iovec.flags);
+
+	pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_COMP;
+	if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) {
+		pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
+		pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+
+		/* done */
+	_sha_complete(pce_dev);
+	}
+};
+
+static void _ablk_cipher_sps_producer_callback(struct sps_event_notify *notify)
+{
+	struct qce_device *pce_dev = (struct qce_device *)
+		((struct sps_event_notify *)notify)->user;
+
+	pce_dev->ce_sps.notify = *notify;
+	pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+			notify->event_id,
+			notify->data.transfer.iovec.addr,
+			notify->data.transfer.iovec.size,
+			notify->data.transfer.iovec.flags);
+
+	pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
+	if (pce_dev->ce_sps.consumer_state == QCE_PIPE_STATE_COMP) {
+		pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
+		pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_complete(pce_dev);
+	}
+};
+
+static void _ablk_cipher_sps_consumer_callback(struct sps_event_notify *notify)
+{
+	struct qce_device *pce_dev = (struct qce_device *)
+		((struct sps_event_notify *)notify)->user;
+
+	pce_dev->ce_sps.notify = *notify;
+	pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+			notify->event_id,
+			notify->data.transfer.iovec.addr,
+			notify->data.transfer.iovec.size,
+			notify->data.transfer.iovec.flags);
+
+	pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_COMP;
+	if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) {
+		pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
+		pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_complete(pce_dev);
+	}
+};
+
+static void qce_add_cmd_element(struct qce_device *pdev,
+			struct sps_command_element **cmd_ptr, u32 addr,
+			u32 data, struct sps_command_element **populate)
+{
+	(*cmd_ptr)->addr = (uint32_t)(addr + pdev->phy_iobase);
+	(*cmd_ptr)->data = data;
+	(*cmd_ptr)->mask = 0xFFFFFFFF;
+	if (populate != NULL)
+		*populate = *cmd_ptr;
+	(*cmd_ptr)++ ;
+}
+
+static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev,
+		unsigned char **pvaddr, enum qce_cipher_mode_enum mode,
+		bool key_128)
+{
+	struct sps_command_element *ce_vaddr =
+				(struct sps_command_element *)(*pvaddr);
+	uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
+	struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t key_reg = 0;
+	uint32_t xts_key_reg = 0;
+	uint32_t iv_reg = 0;
+	uint32_t crypto_cfg = 0;
+	uint32_t beats = (pdev->ce_sps.ce_burst_size >> 3) - 1;
+	uint32_t pipe_pair = pdev->ce_sps.pipe_pair_index;
+
+	crypto_cfg = (beats << CRYPTO_REQ_SIZE) |
+			BIT(CRYPTO_MASK_DOUT_INTR) |
+			BIT(CRYPTO_MASK_DIN_INTR) |
+			BIT(CRYPTO_MASK_OP_DONE_INTR) |
+			(0 << CRYPTO_HIGH_SPD_EN_N) |
+			(pipe_pair << CRYPTO_PIPE_SET_SELECT);
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to AES cipher operations defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+	switch (mode) {
+	case QCE_MODE_CBC:
+	case QCE_MODE_CTR:
+		if (key_128 == true) {
+			cmdlistptr->cipher_aes_128_cbc_ctr.cmdlist =
+							(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_128_cbc_ctr);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES128 <<
+						CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_AES <<
+						CRYPTO_ENCR_ALG);
+			iv_reg = 4;
+			key_reg = 4;
+			xts_key_reg = 0;
+		} else {
+			cmdlistptr->cipher_aes_256_cbc_ctr.cmdlist =
+							(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_256_cbc_ctr);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES256 <<
+							CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_AES <<
+							CRYPTO_ENCR_ALG);
+			iv_reg = 4;
+			key_reg = 8;
+			xts_key_reg = 0;
+		}
+	break;
+	case QCE_MODE_ECB:
+		if (key_128 == true) {
+			cmdlistptr->cipher_aes_128_ecb.cmdlist =
+							(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_128_ecb);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES128 <<
+						CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_AES <<
+						CRYPTO_ENCR_ALG) |
+					(CRYPTO_ENCR_MODE_ECB <<
+						CRYPTO_ENCR_MODE);
+			iv_reg = 0;
+			key_reg = 4;
+			xts_key_reg = 0;
+		} else {
+			cmdlistptr->cipher_aes_256_ecb.cmdlist =
+							(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_256_ecb);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES256 <<
+							CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_AES <<
+							CRYPTO_ENCR_ALG) |
+					(CRYPTO_ENCR_MODE_ECB <<
+						CRYPTO_ENCR_MODE);
+			iv_reg = 0;
+			key_reg = 8;
+			xts_key_reg = 0;
+		}
+	break;
+	case QCE_MODE_XTS:
+		if (key_128 == true) {
+			cmdlistptr->cipher_aes_128_xts.cmdlist =
+							(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_128_xts);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES128 <<
+						CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_AES <<
+						CRYPTO_ENCR_ALG) |
+					(CRYPTO_ENCR_MODE_XTS <<
+						CRYPTO_ENCR_MODE);
+			iv_reg = 4;
+			key_reg = 4;
+			xts_key_reg = 4;
+		} else {
+			cmdlistptr->cipher_aes_256_xts.cmdlist =
+							(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_256_xts);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES256 <<
+							CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_AES <<
+							CRYPTO_ENCR_ALG) |
+					(CRYPTO_ENCR_MODE_XTS <<
+						CRYPTO_ENCR_MODE);
+			iv_reg = 4;
+			key_reg = 8;
+			xts_key_reg = 8;
+		}
+	break;
+	default:
+	break;
+	}
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, crypto_cfg,
+						&pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
+				(uint32_t)0xffffffff, &pcl_info->encr_mask);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+						&pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	if (xts_key_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_XTS_KEY0_REG,
+					0, &pcl_info->encr_xts_key);
+		for (i = 1; i < xts_key_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				CRYPTO_ENCR_XTS_DU_SIZE_REG, 0, NULL);
+	}
+	if (iv_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+		for (i = 1; i < iv_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	}
+	/* Add dummy to  align size to burst-size multiple */
+	if (mode == QCE_MODE_XTS) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+						0, &pcl_info->auth_seg_size);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+						0, &pcl_info->auth_seg_size);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
+						0, &pcl_info->auth_seg_size);
+
+	}
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+			&pcl_info->go_proc);
+
+	pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev,
+		unsigned char **pvaddr, enum qce_cipher_alg_enum alg,
+		bool mode_cbc)
+{
+
+	struct sps_command_element *ce_vaddr =
+				(struct sps_command_element *)(*pvaddr);
+	uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
+	struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t key_reg = 0;
+	uint32_t iv_reg = 0;
+	uint32_t crypto_cfg = 0;
+	uint32_t beats = (pdev->ce_sps.ce_burst_size >> 3) - 1;
+	uint32_t pipe_pair = pdev->ce_sps.pipe_pair_index;
+
+	crypto_cfg = (beats << CRYPTO_REQ_SIZE) |
+			BIT(CRYPTO_MASK_DOUT_INTR) |
+			BIT(CRYPTO_MASK_DIN_INTR) |
+			BIT(CRYPTO_MASK_OP_DONE_INTR) |
+			(0 << CRYPTO_HIGH_SPD_EN_N) |
+			(pipe_pair << CRYPTO_PIPE_SET_SELECT);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to cipher operations defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+	switch (alg) {
+	case CIPHER_ALG_DES:
+		if (mode_cbc) {
+			cmdlistptr->cipher_des_cbc.cmdlist =
+						(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_des_cbc);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_DES <<
+						CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_DES <<
+						CRYPTO_ENCR_ALG) |
+					(CRYPTO_ENCR_MODE_CBC <<
+							CRYPTO_ENCR_MODE);
+			iv_reg = 2;
+			key_reg = 2;
+		} else {
+			cmdlistptr->cipher_des_ecb.cmdlist =
+						(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_des_ecb);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_DES <<
+						CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_DES <<
+						CRYPTO_ENCR_ALG) |
+					(CRYPTO_ENCR_MODE_ECB <<
+						CRYPTO_ENCR_MODE);
+			iv_reg = 0;
+			key_reg = 2;
+		}
+	break;
+	case CIPHER_ALG_3DES:
+		if (mode_cbc) {
+			cmdlistptr->cipher_3des_cbc.cmdlist =
+						(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_3des_cbc);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_3DES <<
+						CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_DES <<
+						CRYPTO_ENCR_ALG) |
+					(CRYPTO_ENCR_MODE_CBC <<
+							CRYPTO_ENCR_MODE);
+			iv_reg = 2;
+			key_reg = 6;
+		} else {
+			cmdlistptr->cipher_3des_ecb.cmdlist =
+						(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_3des_ecb);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_3DES <<
+						CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_DES <<
+						CRYPTO_ENCR_ALG) |
+					(CRYPTO_ENCR_MODE_ECB <<
+						CRYPTO_ENCR_MODE);
+			iv_reg = 0;
+			key_reg = 6;
+		}
+	break;
+	default:
+	break;
+	}
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, crypto_cfg,
+						&pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+						&pcl_info->auth_seg_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	if (iv_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
+								NULL);
+		/* Add 2 dummy to  align size to burst-size multiple */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR2_IV2_REG, 0,
+								NULL);
+	}
+	/* Add dummy to  align size to burst-size multiple */
+	if (!mode_cbc) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+						0, &pcl_info->auth_seg_size);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+						0, &pcl_info->auth_seg_size);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
+						0, &pcl_info->auth_seg_size);
+
+	}
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+			&pcl_info->go_proc);
+
+	pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_auth_cmdlistptrs(struct qce_device *pdev,
+		unsigned char **pvaddr, enum qce_hash_alg_enum alg,
+		bool key_128)
+{
+	struct sps_command_element *ce_vaddr =
+			(struct sps_command_element *)(*pvaddr);
+	uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
+	struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t key_reg = 0;
+	uint32_t auth_cfg = 0;
+	uint32_t iv_reg = 0;
+	uint32_t crypto_cfg = 0;
+	uint32_t beats = (pdev->ce_sps.ce_burst_size >> 3) - 1;
+	uint32_t pipe_pair = pdev->ce_sps.pipe_pair_index;
+
+	crypto_cfg = (beats << CRYPTO_REQ_SIZE) |
+			BIT(CRYPTO_MASK_DOUT_INTR) |
+			BIT(CRYPTO_MASK_DIN_INTR) |
+			BIT(CRYPTO_MASK_OP_DONE_INTR) |
+			(0 << CRYPTO_HIGH_SPD_EN_N) |
+			(pipe_pair << CRYPTO_PIPE_SET_SELECT);
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to authentication operations
+	 * defined in ce_cmdlistptrs_ops structure.
+	 */
+	switch (alg) {
+	case QCE_HASH_SHA1:
+		cmdlistptr->auth_sha1.cmdlist = (uint32_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha1);
+
+		auth_cfg = (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
+				(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+				(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+				(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		iv_reg = 5;
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					crypto_cfg, &pcl_info->crypto_cfg);
+		/* 1 dummy write */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+							0, NULL);
+
+	break;
+	case QCE_HASH_SHA256:
+		cmdlistptr->auth_sha256.cmdlist = (uint32_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha256);
+
+		auth_cfg = (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
+				(CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+				(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+				(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		iv_reg = 8;
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					crypto_cfg, &pcl_info->crypto_cfg);
+		/* 2 dummy writes */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+								0, NULL);
+	break;
+	case QCE_HASH_SHA1_HMAC:
+		cmdlistptr->auth_sha1_hmac.cmdlist = (uint32_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha1_hmac);
+
+		auth_cfg = (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+				(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+				(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+				(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		key_reg = 16;
+		iv_reg = 5;
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					crypto_cfg, &pcl_info->crypto_cfg);
+		/* 1 dummy write */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+							0, NULL);
+	break;
+	case QCE_AEAD_SHA1_HMAC:
+		cmdlistptr->aead_sha1_hmac.cmdlist = (uint32_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->aead_sha1_hmac);
+
+		auth_cfg = (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+				(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+				(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+				(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS) |
+				(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
+
+		key_reg = 16;
+		iv_reg = 5;
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					crypto_cfg, &pcl_info->crypto_cfg);
+		/* 2 dummy writes */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+							0, NULL);
+	break;
+	case QCE_HASH_SHA256_HMAC:
+		cmdlistptr->auth_sha256_hmac.cmdlist = (uint32_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha256_hmac);
+
+		auth_cfg = (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+				(CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+				(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+				(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		key_reg = 16;
+		iv_reg = 8;
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					crypto_cfg, &pcl_info->crypto_cfg);
+		/* 2 dummy writes */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+								0, NULL);
+	break;
+	case QCE_HASH_AES_CMAC:
+		if (key_128 == true) {
+			cmdlistptr->auth_aes_128_cmac.cmdlist =
+						(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->auth_aes_128_cmac);
+
+			auth_cfg = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+				(CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
+				(CRYPTO_AUTH_SIZE_ENUM_16_BYTES <<
+							CRYPTO_AUTH_SIZE) |
+				(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+				(CRYPTO_AUTH_KEY_SZ_AES128 <<
+							CRYPTO_AUTH_KEY_SIZE) |
+				(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+			key_reg = 4;
+		} else {
+			cmdlistptr->auth_aes_256_cmac.cmdlist =
+							(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->auth_aes_256_cmac);
+
+			auth_cfg = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST)|
+				(CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
+				(CRYPTO_AUTH_SIZE_ENUM_16_BYTES <<
+							CRYPTO_AUTH_SIZE) |
+				(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+				(CRYPTO_AUTH_KEY_SZ_AES256 <<
+							CRYPTO_AUTH_KEY_SIZE) |
+				(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+			key_reg = 8;
+		}
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					crypto_cfg, &pcl_info->crypto_cfg);
+		/* 2 dummy writes */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+								0, NULL);
+	break;
+	default:
+	break;
+	}
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					auth_cfg, &pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+						&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						&pcl_info->auth_seg_start);
+
+	if (alg == QCE_HASH_AES_CMAC) {
+		/* reset auth iv, bytecount and key  registers */
+		for (i = 0; i < 16; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+		for (i = 0; i < 16; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+						0, NULL);
+	} else {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+							&pcl_info->auth_iv);
+		for (i = 1; i < iv_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+						0, &pcl_info->auth_bytecount);
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+	if (key_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key);
+		for (i = 1; i < key_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+	}
+	if (alg != QCE_AEAD_SHA1_HMAC)
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+			&pcl_info->go_proc);
+
+	pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_aead_cmdlistptrs(struct qce_device *pdev,
+				unsigned char **pvaddr, bool key_128)
+{
+	struct sps_command_element *ce_vaddr =
+				(struct sps_command_element *)(*pvaddr);
+	uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
+	struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t auth_cfg = 0;
+	uint32_t key_reg = 0;
+	uint32_t crypto_cfg = 0;
+	uint32_t beats = (pdev->ce_sps.ce_burst_size >> 3) - 1;
+	uint32_t pipe_pair = pdev->ce_sps.pipe_pair_index;
+
+	crypto_cfg = (beats << CRYPTO_REQ_SIZE) |
+			BIT(CRYPTO_MASK_DOUT_INTR) |
+			BIT(CRYPTO_MASK_DIN_INTR) |
+			BIT(CRYPTO_MASK_OP_DONE_INTR) |
+			(0 << CRYPTO_HIGH_SPD_EN_N) |
+			(pipe_pair << CRYPTO_PIPE_SET_SELECT);
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to aead operations
+	 * defined in ce_cmdlistptrs_ops structure.
+	 */
+	if (key_128 == true) {
+		cmdlistptr->aead_aes_128_ccm.cmdlist = (uint32_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->aead_aes_128_ccm);
+
+		auth_cfg = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+			(CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
+			(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+			(CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE);
+		auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+		encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+			(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+			((CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE));
+		key_reg = 4;
+	} else {
+
+		cmdlistptr->aead_aes_256_ccm.cmdlist = (uint32_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->aead_aes_256_ccm);
+
+		auth_cfg = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+			(CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
+			(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+			(CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE) |
+			((MAX_NONCE/sizeof(uint32_t)) <<
+						CRYPTO_AUTH_NONCE_NUM_WORDS);
+		auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+		encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+			(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+			((CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE));
+		key_reg = 8;
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+				crypto_cfg, &pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+									NULL);
+	/* add 1 dummy */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
+					encr_cfg, &pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
+				(uint32_t)0xffffffff, &pcl_info->encr_mask);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					auth_cfg, &pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+						&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						&pcl_info->auth_seg_start);
+	/* reset auth iv, bytecount and key  registers */
+	for (i = 0; i < 8; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+					0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG,
+					0, NULL);
+	for (i = 0; i < 16; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	/* set auth key */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
+							&pcl_info->auth_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	/* set NONCE info */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_INFO_NONCE0_REG, 0,
+						&pcl_info->auth_nonce_info);
+	for (i = 1; i < 4; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_INFO_NONCE0_REG +
+				i * sizeof(uint32_t)), 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+	for (i = 1; i < 4; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_CCM_INT_CNTR0_REG, 0,
+						&pcl_info->encr_ccm_cntr_iv);
+	for (i = 1; i < 4; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+			(CRYPTO_ENCR_CCM_INT_CNTR0_REG + i * sizeof(uint32_t)),
+			0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+			&pcl_info->go_proc);
+
+	pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev,
+		unsigned char **pvaddr)
+{
+	struct sps_command_element *ce_vaddr =
+			(struct sps_command_element *)(*pvaddr);
+	uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
+	struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+
+	cmdlistptr->unlock_all_pipes.cmdlist = (uint32_t)ce_vaddr;
+	pcl_info = &(cmdlistptr->unlock_all_pipes);
+
+	/*
+	 * Designate chunks of the allocated memory to command list
+	 * to unlock pipes.
+	 */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					CRYPTO_CONFIG_RESET, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					CRYPTO_CONFIG_RESET, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					CRYPTO_CONFIG_RESET, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					CRYPTO_CONFIG_RESET, NULL);
+	pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int qce_setup_cmdlistptrs(struct qce_device *pdev,
+					unsigned char **pvaddr)
+{
+	struct sps_command_element *ce_vaddr =
+				(struct sps_command_element *)(*pvaddr);
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to operations defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+	ce_vaddr =
+		(struct sps_command_element *) ALIGN(((unsigned int) ce_vaddr),
+									16);
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CBC, true);
+	_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CTR, true);
+	_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_ECB, true);
+	_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_XTS, true);
+	_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CBC, false);
+	_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CTR, false);
+	_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_ECB, false);
+	_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_XTS, false);
+
+	_setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, true);
+	_setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, false);
+	_setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, true);
+	_setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, false);
+
+	_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA1, false);
+	_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA256, false);
+
+	_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA1_HMAC, false);
+	_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA256_HMAC, false);
+
+	_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_AES_CMAC, true);
+	_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_AES_CMAC, false);
+
+	_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_AEAD_SHA1_HMAC, false);
+
+	_setup_aead_cmdlistptrs(pdev, pvaddr, true);
+	_setup_aead_cmdlistptrs(pdev, pvaddr, false);
+	_setup_unlock_pipe_cmdlistptrs(pdev, pvaddr);
+
+	return 0;
+}
+
+static int qce_setup_ce_sps_data(struct qce_device *pce_dev)
+{
+	unsigned char *vaddr;
+
+	vaddr = pce_dev->coh_vmem;
+	vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr),  16);
+
+	/* Allow for 256 descriptor (cmd and data) entries per pipe */
+	pce_dev->ce_sps.in_transfer.iovec = (struct sps_iovec *)vaddr;
+	pce_dev->ce_sps.in_transfer.iovec_phys =
+					(uint32_t)GET_PHYS_ADDR(vaddr);
+	vaddr += MAX_BAM_DESCRIPTORS * 8;
+
+	pce_dev->ce_sps.out_transfer.iovec = (struct sps_iovec *)vaddr;
+	pce_dev->ce_sps.out_transfer.iovec_phys =
+					(uint32_t)GET_PHYS_ADDR(vaddr);
+	vaddr += MAX_BAM_DESCRIPTORS * 8;
+
+	qce_setup_cmdlistptrs(pce_dev, &vaddr);
+	pce_dev->ce_sps.result_dump = (uint32_t)vaddr;
+	pce_dev->ce_sps.result = (struct ce_result_dump_format *)vaddr;
+	vaddr += 128;
+
+	return 0;
+}
+
+int qce_aead_sha1_hmac_setup(struct qce_req *creq, struct crypto_aead *aead,
+				struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t authk_size_in_word = creq->authklen/sizeof(uint32_t);
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	struct sps_command_element *pce = NULL;
+	struct aead_request *areq = (struct aead_request *)creq->areq;
+	int i;
+
+	_byte_stream_to_net_words(mackey32, creq->authkey,
+					creq->authklen);
+	pce = cmdlistinfo->auth_key;
+	for (i = 0; i < authk_size_in_word; i++, pce++)
+		pce->data = mackey32[i];
+	pce = cmdlistinfo->auth_iv;
+	for (i = 0; i < 5; i++, pce++)
+		pce->data = _std_init_vector_sha1[i];
+	/* write auth seg size */
+	pce = cmdlistinfo->auth_seg_size;
+	pce->data = creq->cryptlen + areq->assoclen + crypto_aead_ivsize(aead);
+
+	/* write auth seg size start*/
+	pce = cmdlistinfo->auth_seg_start;
+	pce->data = 0;
+
+	return 0;
+}
+
+int qce_aead_req(void *handle, struct qce_req *q_req)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct aead_request *areq = (struct aead_request *) q_req->areq;
+	uint32_t authsize = q_req->authsize;
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	uint32_t ivsize = 0;
+	uint32_t totallen_in, out_len;
+	uint32_t hw_pad_out = 0;
+	int rc = 0;
+	int ce_burst_size;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+	struct qce_cmdlist_info *auth_cmdlistinfo = NULL;
+
+	if (q_req->mode != QCE_MODE_CCM)
+		ivsize = crypto_aead_ivsize(aead);
+
+	ce_burst_size = pce_dev->ce_sps.ce_burst_size;
+	if (q_req->dir == QCE_ENCRYPT) {
+		q_req->cryptlen = areq->cryptlen;
+			totallen_in = q_req->cryptlen + areq->assoclen + ivsize;
+		if (q_req->mode == QCE_MODE_CCM) {
+			out_len = areq->cryptlen + authsize;
+			hw_pad_out = ALIGN(authsize, ce_burst_size) - authsize;
+		} else {
+			out_len = areq->cryptlen;
+		}
+	} else {
+		q_req->cryptlen = areq->cryptlen - authsize;
+		if (q_req->mode == QCE_MODE_CCM)
+			totallen_in = areq->cryptlen + areq->assoclen;
+		else
+			totallen_in = q_req->cryptlen + areq->assoclen + ivsize;
+		out_len = q_req->cryptlen;
+		hw_pad_out = authsize;
+	}
+
+	pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
+	pce_dev->src_nents = count_sg(areq->src, areq->cryptlen);
+	pce_dev->ivsize = q_req->ivsize;
+	pce_dev->authsize = q_req->authsize;
+	pce_dev->phy_iv_in = 0;
+
+	/* associated data input */
+	dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+					 DMA_TO_DEVICE);
+	/* cipher input */
+	dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	/* cipher + mac output  for encryption    */
+	if (areq->src != areq->dst) {
+		pce_dev->dst_nents = count_sg(areq->dst, out_len);
+		dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+				DMA_FROM_DEVICE);
+	} else {
+		pce_dev->dst_nents = pce_dev->src_nents;
+	}
+
+	_ce_get_cipher_cmdlistinfo(pce_dev, q_req, &cmdlistinfo);
+	/* set up crypto device */
+	rc = _ce_setup_cipher(pce_dev, q_req, totallen_in,
+				areq->assoclen + ivsize, cmdlistinfo);
+	if (rc < 0)
+		goto bad;
+
+	if (q_req->mode != QCE_MODE_CCM) {
+		rc = qce_aead_sha1_hmac_setup(q_req, aead, auth_cmdlistinfo);
+		if (rc < 0)
+			goto bad;
+		/* overwrite seg size */
+		cmdlistinfo->seg_size->data = totallen_in;
+		/* cipher iv for input */
+		pce_dev->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv,
+			ivsize, DMA_TO_DEVICE);
+	}
+
+	/* setup for callback, and issue command to bam */
+	pce_dev->areq = q_req->areq;
+	pce_dev->qce_cb = q_req->qce_cb;
+
+	/* Register callback event for EOT (End of transfer) event. */
+	pce_dev->ce_sps.producer.event.callback = _aead_sps_producer_callback;
+	rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
+					&pce_dev->ce_sps.producer.event);
+	if (rc) {
+		pr_err("Producer callback registration failed rc = %d\n", rc);
+		goto bad;
+	}
+
+	/* Register callback event for EOT (End of transfer) event. */
+	pce_dev->ce_sps.consumer.event.callback = _aead_sps_consumer_callback;
+	rc = sps_register_event(pce_dev->ce_sps.consumer.pipe,
+					&pce_dev->ce_sps.consumer.event);
+	if (rc) {
+		pr_err("Consumer callback registration failed rc = %d\n", rc);
+		goto bad;
+	}
+
+	_qce_sps_iovec_count_init(pce_dev);
+
+	_qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
+					&pce_dev->ce_sps.in_transfer);
+
+	if (pce_dev->ce_sps.minor_version == 0) {
+		_qce_sps_add_sg_data(pce_dev, areq->src, totallen_in,
+					&pce_dev->ce_sps.in_transfer);
+
+		_qce_set_eot_flag(&pce_dev->ce_sps.in_transfer);
+		_qce_sps_add_sg_data(pce_dev, areq->dst, out_len +
+					areq->assoclen + hw_pad_out,
+				&pce_dev->ce_sps.out_transfer);
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					&pce_dev->ce_sps.out_transfer);
+	} else {
+		_qce_sps_add_sg_data(pce_dev, areq->assoc, areq->assoclen,
+					 &pce_dev->ce_sps.in_transfer);
+		_qce_sps_add_data((uint32_t)pce_dev->phy_iv_in, ivsize,
+					&pce_dev->ce_sps.in_transfer);
+		_qce_sps_add_sg_data(pce_dev, areq->src, areq->cryptlen,
+					&pce_dev->ce_sps.in_transfer);
+		_qce_set_eot_flag(&pce_dev->ce_sps.in_transfer);
+
+		/* Pass through to ignore associated (+iv, if applicable) data*/
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer),
+				(ivsize + areq->assoclen),
+				&pce_dev->ce_sps.out_transfer);
+		_qce_sps_add_sg_data(pce_dev, areq->dst, out_len,
+					&pce_dev->ce_sps.out_transfer);
+		/* Pass through to ignore hw_pad (padding of the MAC data) */
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer),
+				hw_pad_out, &pce_dev->ce_sps.out_transfer);
+
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+			CRYPTO_RESULT_DUMP_SIZE, &pce_dev->ce_sps.out_transfer);
+	}
+	rc = _qce_sps_transfer(pce_dev);
+	if (rc)
+		goto bad;
+	return 0;
+
+bad:
+	if (pce_dev->assoc_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+				DMA_TO_DEVICE);
+	}
+	if (pce_dev->src_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+				(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+								DMA_TO_DEVICE);
+	}
+	if (areq->src != areq->dst) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+				DMA_FROM_DEVICE);
+	}
+	if (pce_dev->phy_iv_in) {
+		dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
+				ivsize, DMA_TO_DEVICE);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(qce_aead_req);
+
+int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
+{
+	int rc = 0;
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct ablkcipher_request *areq = (struct ablkcipher_request *)
+						c_req->areq;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+
+	pce_dev->src_nents = 0;
+	pce_dev->dst_nents = 0;
+	_ce_get_cipher_cmdlistinfo(pce_dev, c_req, &cmdlistinfo);
+
+	/* cipher input */
+	pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
+
+	dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+		(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	/* cipher output */
+	if (areq->src != areq->dst) {
+		pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
+			dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+							DMA_FROM_DEVICE);
+	} else {
+		pce_dev->dst_nents = pce_dev->src_nents;
+	}
+	/* set up crypto device */
+	rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0, cmdlistinfo);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for client callback, and issue command to BAM */
+	pce_dev->areq = areq;
+	pce_dev->qce_cb = c_req->qce_cb;
+
+	/* Register callback event for EOT (End of transfer) event. */
+	pce_dev->ce_sps.producer.event.callback =
+				_ablk_cipher_sps_producer_callback;
+	rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
+					&pce_dev->ce_sps.producer.event);
+	if (rc) {
+		pr_err("Producer callback registration failed rc = %d\n", rc);
+		goto bad;
+	}
+
+	/* Register callback event for EOT (End of transfer) event. */
+	pce_dev->ce_sps.consumer.event.callback =
+			_ablk_cipher_sps_consumer_callback;
+	rc = sps_register_event(pce_dev->ce_sps.consumer.pipe,
+					&pce_dev->ce_sps.consumer.event);
+	if (rc) {
+		pr_err("Consumer callback registration failed rc = %d\n", rc);
+		goto bad;
+	}
+
+	_qce_sps_iovec_count_init(pce_dev);
+
+	_qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
+					&pce_dev->ce_sps.in_transfer);
+	_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+					&pce_dev->ce_sps.in_transfer);
+	_qce_set_eot_flag(&pce_dev->ce_sps.in_transfer);
+
+	_qce_sps_add_sg_data(pce_dev, areq->dst, areq->nbytes,
+					&pce_dev->ce_sps.out_transfer);
+	_qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_dev->ce_sps.out_transfer);
+	rc = _qce_sps_transfer(pce_dev);
+	if (rc)
+		goto bad;
+		return 0;
+bad:
+	if (pce_dev->dst_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst,
+		pce_dev->dst_nents, DMA_FROM_DEVICE);
+	}
+	if (pce_dev->src_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->src,
+				pce_dev->src_nents,
+				(areq->src == areq->dst) ?
+				DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	}
+	return rc;
+}
+EXPORT_SYMBOL(qce_ablk_cipher_req);
+
+int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	int rc;
+
+	struct ahash_request *areq = (struct ahash_request *)sreq->areq;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+
+	pce_dev->src_nents = count_sg(sreq->src, sreq->size);
+	_ce_get_hash_cmdlistinfo(pce_dev, sreq, &cmdlistinfo);
+	dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents,
+							DMA_TO_DEVICE);
+	rc = _ce_setup_hash(pce_dev, sreq, cmdlistinfo);
+	if (rc < 0)
+		goto bad;
+
+	pce_dev->areq = areq;
+	pce_dev->qce_cb = sreq->qce_cb;
+
+	/* Register callback event for EOT (End of transfer) event. */
+	pce_dev->ce_sps.producer.event.callback = _sha_sps_producer_callback;
+	rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
+					&pce_dev->ce_sps.producer.event);
+	if (rc) {
+		pr_err("Producer callback registration failed rc = %d\n", rc);
+		goto bad;
+	}
+
+	/* Register callback event for EOT (End of transfer) event. */
+	pce_dev->ce_sps.consumer.event.callback = _sha_sps_consumer_callback;
+	rc = sps_register_event(pce_dev->ce_sps.consumer.pipe,
+					&pce_dev->ce_sps.consumer.event);
+	if (rc) {
+		pr_err("Consumer callback registration failed rc = %d\n", rc);
+		goto bad;
+	}
+
+	_qce_sps_iovec_count_init(pce_dev);
+
+	_qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
+					&pce_dev->ce_sps.in_transfer);
+	_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+						 &pce_dev->ce_sps.in_transfer);
+	_qce_set_eot_flag(&pce_dev->ce_sps.in_transfer);
+
+	_qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_dev->ce_sps.out_transfer);
+	rc = _qce_sps_transfer(pce_dev);
+	if (rc)
+		goto bad;
+		return 0;
+bad:
+	if (pce_dev->src_nents) {
+		dma_unmap_sg(pce_dev->pdev, sreq->src,
+				pce_dev->src_nents, DMA_TO_DEVICE);
+	}
+	return rc;
+}
+EXPORT_SYMBOL(qce_process_sha_req);
+
+static int __qce_get_device_tree_data(struct platform_device *pdev,
+		struct qce_device *pce_dev)
+{
+	struct resource *resource;
+	int rc = 0;
+
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,bam-pipe-pair",
+				&pce_dev->ce_sps.pipe_pair_index)) {
+		pr_err("Fail to get bam pipe pair information.\n");
+		return -EINVAL;
+	} else {
+		pr_warn("bam_pipe_pair=0x%x", pce_dev->ce_sps.pipe_pair_index);
+	}
+	pce_dev->ce_sps.dest_pipe_index	= 2 * pce_dev->ce_sps.pipe_pair_index;
+	pce_dev->ce_sps.src_pipe_index	= pce_dev->ce_sps.dest_pipe_index + 1;
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"crypto-base");
+	if (resource) {
+		pce_dev->phy_iobase = resource->start;
+		pce_dev->iobase = ioremap_nocache(resource->start,
+					resource_size(resource));
+		if (!pce_dev->iobase) {
+			pr_err("Can not map CRYPTO io memory\n");
+			return -ENOMEM;
+		}
+	} else {
+		pr_err("CRYPTO HW mem unavailable.\n");
+		return -ENODEV;
+	}
+	pr_warn("ce_phy_reg_base=0x%x  ", pce_dev->phy_iobase);
+	pr_warn("ce_virt_reg_base=0x%x\n", (uint32_t)pce_dev->iobase);
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"crypto-bam-base");
+	if (resource) {
+		pce_dev->ce_sps.bam_mem = resource->start;
+		pce_dev->ce_sps.bam_iobase = ioremap_nocache(resource->start,
+					resource_size(resource));
+		if (!pce_dev->iobase) {
+			rc = -ENOMEM;
+			pr_err("Can not map BAM io memory\n");
+			goto err_getting_bam_info;
+		}
+	} else {
+		pr_err("CRYPTO BAM mem unavailable.\n");
+		rc = -ENODEV;
+		goto err_getting_bam_info;
+	}
+	pr_warn("ce_bam_phy_reg_base=0x%x  ", pce_dev->ce_sps.bam_mem);
+	pr_warn("ce_bam_virt_reg_base=0x%x\n",
+				(uint32_t)pce_dev->ce_sps.bam_iobase);
+
+	resource  = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (resource) {
+		pce_dev->ce_sps.bam_irq = resource->start;
+		pr_warn("CRYPTO BAM IRQ = %d.\n", pce_dev->ce_sps.bam_irq);
+	} else {
+		pr_err("CRYPTO BAM IRQ unavailable.\n");
+		goto err_dev;
+	}
+	return rc;
+err_dev:
+	if (pce_dev->ce_sps.bam_iobase)
+		iounmap(pce_dev->ce_sps.bam_iobase);
+
+err_getting_bam_info:
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+
+	return rc;
+}
+
+static int __qce_init_clk(struct qce_device *pce_dev)
+{
+	int rc = 0;
+	struct clk *ce_core_clk;
+	struct clk *ce_clk;
+	struct clk *ce_core_src_clk;
+
+	/* Get CE3 src core clk. */
+	ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src");
+	if (!IS_ERR(ce_core_src_clk)) {
+		pce_dev->ce_core_src_clk = ce_core_src_clk;
+
+		/* Set the core src clk @100Mhz */
+		rc = clk_set_rate(pce_dev->ce_core_src_clk, 100000000);
+		if (rc) {
+			clk_put(pce_dev->ce_core_src_clk);
+			pr_err("Unable to set the core src clk @100Mhz.\n");
+			goto err_clk;
+		}
+	} else {
+		pr_warn("Unable to get CE core src clk, set to NULL\n");
+		pce_dev->ce_core_src_clk = NULL;
+	}
+
+	/* Get CE core clk */
+	ce_core_clk = clk_get(pce_dev->pdev, "core_clk");
+	if (IS_ERR(ce_core_clk)) {
+		rc = PTR_ERR(ce_core_clk);
+		pr_err("Unable to get CE core clk\n");
+		if (pce_dev->ce_core_src_clk != NULL)
+			clk_put(pce_dev->ce_core_src_clk);
+		goto err_clk;
+	}
+	pce_dev->ce_core_clk = ce_core_clk;
+
+	/* Get CE Interface clk */
+	ce_clk = clk_get(pce_dev->pdev, "iface_clk");
+	if (IS_ERR(ce_clk)) {
+		rc = PTR_ERR(ce_clk);
+		pr_err("Unable to get CE interface clk\n");
+		if (pce_dev->ce_core_src_clk != NULL)
+			clk_put(pce_dev->ce_core_src_clk);
+		clk_put(pce_dev->ce_core_clk);
+		goto err_clk;
+	}
+	pce_dev->ce_clk = ce_clk;
+
+	/* Enable CE core clk */
+	rc = clk_prepare_enable(pce_dev->ce_core_clk);
+	if (rc) {
+		pr_err("Unable to enable/prepare CE core clk\n");
+		if (pce_dev->ce_core_src_clk != NULL)
+			clk_put(pce_dev->ce_core_src_clk);
+		clk_put(pce_dev->ce_core_clk);
+		clk_put(pce_dev->ce_clk);
+		goto err_clk;
+	} else {
+		/* Enable CE clk */
+		rc = clk_prepare_enable(pce_dev->ce_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE iface clk\n");
+			clk_disable_unprepare(pce_dev->ce_core_clk);
+			if (pce_dev->ce_core_src_clk != NULL)
+				clk_put(pce_dev->ce_core_src_clk);
+			clk_put(pce_dev->ce_core_clk);
+			clk_put(pce_dev->ce_clk);
+			goto err_clk;
+		}
+	}
+err_clk:
+	if (rc)
+		pr_err("Unable to init CE clks, rc = %d\n", rc);
+	return rc;
+}
+
+/* crypto engine open function. */
+void *qce_open(struct platform_device *pdev, int *rc)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
+	if (!pce_dev) {
+		*rc = -ENOMEM;
+		pr_err("Can not allocate memory: %d\n", *rc);
+		return NULL;
+	}
+	pce_dev->pdev = &pdev->dev;
+
+	if (pdev->dev.of_node) {
+		*rc = __qce_get_device_tree_data(pdev, pce_dev);
+		if (*rc)
+			goto err_pce_dev;
+	} else {
+		*rc = -EINVAL;
+		pr_err("Device Node not found.\n");
+		goto err_pce_dev;
+	}
+
+	pce_dev->memsize = 9 * PAGE_SIZE;
+	pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
+			pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL);
+	if (pce_dev->coh_vmem == NULL) {
+		*rc = -ENOMEM;
+		pr_err("Can not allocate coherent memory for sps data\n");
+		goto err_iobase;
+	}
+
+	*rc = __qce_init_clk(pce_dev);
+	if (*rc)
+		goto err_mem;
+
+	if (_probe_ce_engine(pce_dev)) {
+		*rc = -ENXIO;
+		goto err;
+	}
+	*rc = 0;
+	qce_setup_ce_sps_data(pce_dev);
+	qce_sps_init(pce_dev);
+
+	return pce_dev;
+err:
+	clk_disable_unprepare(pce_dev->ce_clk);
+	clk_disable_unprepare(pce_dev->ce_core_clk);
+
+	if (pce_dev->ce_core_src_clk != NULL)
+		clk_put(pce_dev->ce_core_src_clk);
+	clk_put(pce_dev->ce_clk);
+	clk_put(pce_dev->ce_core_clk);
+err_mem:
+	if (pce_dev->coh_vmem)
+		dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
+			pce_dev->coh_vmem, pce_dev->coh_pmem);
+err_iobase:
+	if (pce_dev->ce_sps.bam_iobase)
+		iounmap(pce_dev->ce_sps.bam_iobase);
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+err_pce_dev:
+	kfree(pce_dev);
+	return NULL;
+}
+EXPORT_SYMBOL(qce_open);
+
+/* crypto engine close function. */
+int qce_close(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	if (handle == NULL)
+		return -ENODEV;
+
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+	if (pce_dev->coh_vmem)
+		dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
+				pce_dev->coh_vmem, pce_dev->coh_pmem);
+
+	clk_disable_unprepare(pce_dev->ce_clk);
+	clk_disable_unprepare(pce_dev->ce_core_clk);
+	if (pce_dev->ce_core_src_clk != NULL)
+		clk_put(pce_dev->ce_core_src_clk);
+	clk_put(pce_dev->ce_clk);
+	clk_put(pce_dev->ce_core_clk);
+
+	qce_sps_exit(pce_dev);
+	kfree(handle);
+
+	return 0;
+}
+EXPORT_SYMBOL(qce_close);
+
+int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+
+	if (ce_support == NULL)
+		return -EINVAL;
+
+	ce_support->sha1_hmac_20 = false;
+	ce_support->sha1_hmac = false;
+	ce_support->sha256_hmac = false;
+	ce_support->sha_hmac = true;
+	ce_support->cmac  = true;
+	ce_support->aes_key_192 = false;
+	ce_support->aes_xts = true;
+	ce_support->ota = false;
+	ce_support->bam = true;
+	if (pce_dev->ce_sps.minor_version) {
+		ce_support->aligned_only = false;
+		ce_support->aes_ccm = true;
+	} else {
+		ce_support->aligned_only = true;
+		ce_support->aes_ccm = false;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(qce_hw_support);
+
+static int __init qce_init(void)
+{
+	bam_registry.handle = 0;
+	bam_registry.cnt = 0;
+	return 0;
+}
+
+static void __exit qce_exit(void)
+{
+	bam_registry.handle = 0;
+	bam_registry.cnt = 0;
+}
+
+module_init(qce_init);
+module_exit(qce_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Crypto Engine driver");
diff --git a/drivers/crypto/msm/qce50.h b/drivers/crypto/msm/qce50.h
new file mode 100644
index 0000000..c9eba82
--- /dev/null
+++ b/drivers/crypto/msm/qce50.h
@@ -0,0 +1,148 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _DRIVERS_CRYPTO_MSM_QCE50_H_
+#define _DRIVERS_CRYPTO_MSM_QCE50_H_
+
+#include <mach/sps.h>
+
+/* MAX Data xfer block size between BAM and CE */
+#define MAX_CE_BAM_BURST_SIZE   0x40
+#define QCEBAM_BURST_SIZE	MAX_CE_BAM_BURST_SIZE
+#define MAX_BAM_DESCRIPTORS	(0x40 - 1)
+
+#define GET_VIRT_ADDR(x)  \
+		((uint32_t)pce_dev->coh_vmem +			\
+		((uint32_t)x - pce_dev->coh_pmem))
+#define GET_PHYS_ADDR(x)  \
+		(pce_dev->coh_pmem + (x - (uint32_t)pce_dev->coh_vmem))
+
+#define CRYPTO_REG_SIZE 4
+#define NUM_OF_CRYPTO_AUTH_IV_REG 16
+#define NUM_OF_CRYPTO_CNTR_IV_REG 4
+#define NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG 4
+#define CRYPTO_TOTAL_REGISTERS_DUMPED   26
+#define CRYPTO_RESULT_DUMP_SIZE   \
+	ALIGN((CRYPTO_TOTAL_REGISTERS_DUMPED * CRYPTO_REG_SIZE), \
+	QCEBAM_BURST_SIZE)
+
+/* QCE max number of descriptor in a descriptor list */
+#define QCE_MAX_NUM_DESC    128
+#define SPS_MAX_PKT_SIZE  (64 * 1024  - 1)
+
+/* State of consumer/producer Pipe */
+enum qce_pipe_st_enum {
+	QCE_PIPE_STATE_IDLE = 0,
+	QCE_PIPE_STATE_IN_PROG = 1,
+	QCE_PIPE_STATE_COMP = 2,
+	QCE_PIPE_STATE_LAST
+};
+
+struct qce_sps_ep_conn_data {
+	struct sps_pipe			*pipe;
+	struct sps_connect		connect;
+	struct sps_register_event	event;
+};
+
+/* CE Result DUMP format*/
+struct ce_result_dump_format {
+	uint32_t auth_iv[NUM_OF_CRYPTO_AUTH_IV_REG];
+	uint32_t auth_byte_count[NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG];
+	uint32_t encr_cntr_iv[NUM_OF_CRYPTO_CNTR_IV_REG];
+	uint32_t status;
+	uint32_t status2;
+};
+
+struct qce_cmdlist_info {
+
+	uint32_t cmdlist;
+	struct sps_command_element *crypto_cfg;
+	struct sps_command_element *encr_seg_cfg;
+	struct sps_command_element *encr_seg_size;
+	struct sps_command_element *encr_seg_start;
+	struct sps_command_element *encr_key;
+	struct sps_command_element *encr_xts_key;
+	struct sps_command_element *encr_cntr_iv;
+	struct sps_command_element *encr_ccm_cntr_iv;
+	struct sps_command_element *encr_mask;
+	struct sps_command_element *encr_xts_du_size;
+
+	struct sps_command_element *auth_seg_cfg;
+	struct sps_command_element *auth_seg_size;
+	struct sps_command_element *auth_seg_start;
+	struct sps_command_element *auth_key;
+	struct sps_command_element *auth_iv;
+	struct sps_command_element *auth_nonce_info;
+	struct sps_command_element *auth_bytecount;
+	struct sps_command_element *seg_size;
+	struct sps_command_element *go_proc;
+	uint32_t size;
+};
+
+struct qce_cmdlistptr_ops {
+	struct qce_cmdlist_info cipher_aes_128_cbc_ctr;
+	struct qce_cmdlist_info cipher_aes_256_cbc_ctr;
+	struct qce_cmdlist_info cipher_aes_128_ecb;
+	struct qce_cmdlist_info cipher_aes_256_ecb;
+	struct qce_cmdlist_info cipher_aes_128_xts;
+	struct qce_cmdlist_info cipher_aes_256_xts;
+	struct qce_cmdlist_info cipher_des_cbc;
+	struct qce_cmdlist_info cipher_des_ecb;
+	struct qce_cmdlist_info cipher_3des_cbc;
+	struct qce_cmdlist_info cipher_3des_ecb;
+	struct qce_cmdlist_info auth_sha1;
+	struct qce_cmdlist_info auth_sha256;
+	struct qce_cmdlist_info auth_sha1_hmac;
+	struct qce_cmdlist_info auth_sha256_hmac;
+	struct qce_cmdlist_info auth_aes_128_cmac;
+	struct qce_cmdlist_info auth_aes_256_cmac;
+	struct qce_cmdlist_info aead_sha1_hmac;
+	struct qce_cmdlist_info aead_aes_128_ccm;
+	struct qce_cmdlist_info aead_aes_256_ccm;
+	struct qce_cmdlist_info unlock_all_pipes;
+};
+
+
+/* DM data structure with buffers, commandlists & commmand pointer lists */
+struct ce_sps_data {
+
+	uint32_t			bam_irq;
+	uint32_t			bam_mem;
+	void __iomem			*bam_iobase;
+
+	struct qce_sps_ep_conn_data	producer;
+	struct qce_sps_ep_conn_data	consumer;
+	struct sps_event_notify		notify;
+	struct scatterlist		*src;
+	struct scatterlist		*dst;
+	unsigned int			pipe_pair_index;
+	unsigned int			src_pipe_index;
+	unsigned int			dest_pipe_index;
+	uint32_t			bam_handle;
+
+	enum qce_pipe_st_enum consumer_state;	/* Consumer pipe state */
+	enum qce_pipe_st_enum producer_state;	/* Producer pipe state */
+
+	int consumer_status;		/* consumer pipe status */
+	int producer_status;		/* producer pipe status */
+
+	struct sps_transfer in_transfer;
+	struct sps_transfer out_transfer;
+
+	int ce_burst_size;
+
+	struct qce_cmdlistptr_ops cmdlistptr;
+	uint32_t result_dump;
+	uint32_t ignore_buffer;
+	struct ce_result_dump_format *result;
+	uint32_t minor_version;
+};
+#endif /* _DRIVERS_CRYPTO_MSM_QCE50_H */
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index fecce3f..63f7fd9 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -2016,21 +2016,8 @@
 	struct qcedev_control *podev;
 	struct msm_ce_hw_support *platform_support;
 
-	if (pdev->id >= MAX_QCE_DEVICE) {
-		pr_err("%s: device id %d  exceeds allowed %d\n",
-			__func__, pdev->id, MAX_QCE_DEVICE);
-		return -ENOENT;
-	}
-	podev = &qce_dev[pdev->id];
+	podev = &qce_dev[0];
 
-	platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data;
-	podev->platform_support.ce_shared = platform_support->ce_shared;
-	podev->platform_support.shared_ce_resource =
-				platform_support->shared_ce_resource;
-	podev->platform_support.hw_key_support =
-				platform_support->hw_key_support;
-	podev->platform_support.bus_scale_table =
-				platform_support->bus_scale_table;
 	podev->ce_lock_count = 0;
 	podev->high_bw_req_count = 0;
 	INIT_LIST_HEAD(&podev->ready_commands);
@@ -2050,7 +2037,6 @@
 	podev->qce = handle;
 	podev->pdev = pdev;
 	platform_set_drvdata(pdev, podev);
-	qce_hw_support(podev->qce, &podev->ce_support);
 
 	if (podev->platform_support.bus_scale_table != NULL) {
 		podev->bus_scale_handle =
@@ -2065,7 +2051,25 @@
 		}
 	}
 	rc = misc_register(&podev->miscdevice);
-
+	qce_hw_support(podev->qce, &podev->ce_support);
+	if (podev->ce_support.bam) {
+		podev->platform_support.ce_shared = 0;
+		podev->platform_support.shared_ce_resource = 0;
+		podev->platform_support.hw_key_support = 0;
+		podev->platform_support.bus_scale_table = NULL;
+		podev->platform_support.sha_hmac = 1;
+	} else {
+		platform_support =
+			(struct msm_ce_hw_support *)pdev->dev.platform_data;
+		podev->platform_support.ce_shared = platform_support->ce_shared;
+		podev->platform_support.shared_ce_resource =
+				platform_support->shared_ce_resource;
+		podev->platform_support.hw_key_support =
+				platform_support->hw_key_support;
+		podev->platform_support.bus_scale_table =
+				platform_support->bus_scale_table;
+		podev->platform_support.sha_hmac = platform_support->sha_hmac;
+	}
 	if (rc >= 0)
 		return 0;
 	else
@@ -2230,9 +2234,7 @@
 }
 
 MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
 MODULE_DESCRIPTION("Qualcomm DEV Crypto driver");
-MODULE_VERSION("1.27");
 
 module_init(qcedev_init);
 module_exit(qcedev_exit);
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index c11c36e..7fc5cab 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -228,6 +228,13 @@
 	enum qce_cipher_alg_enum alg;
 	enum qce_cipher_dir_enum dir;
 	enum qce_cipher_mode_enum mode;
+
+	struct scatterlist *orig_src;	/* Original src sg ptr  */
+	struct scatterlist *orig_dst;	/* Original dst sg ptr  */
+	struct scatterlist dsg;		/* Dest Data sg  */
+	struct scatterlist ssg;		/* Source Data sg  */
+	unsigned char *data;		/* Incoming data pointer*/
+
 };
 
 #define SHA_MAX_BLOCK_SIZE      SHA256_BLOCK_SIZE
@@ -275,6 +282,11 @@
 	};
 	struct scatterlist *src;
 	uint32_t nbytes;
+
+	struct scatterlist *orig_src;	/* Original src sg ptr  */
+	struct scatterlist dsg;		/* Data sg */
+	unsigned char *data;		/* Incoming data pointer*/
+	unsigned char *data2;		/* Updated data pointer*/
 };
 
 static void _byte_stream_to_words(uint32_t *iv, unsigned char *b,
@@ -497,9 +509,6 @@
 				&sha_ctx->ahash_req_complete);
 	crypto_ahash_clear_flags(ahash, ~0);
 
-	if (sha_ctx->cp->platform_support.bus_scale_table != NULL)
-		qcrypto_ce_high_bw_req(sha_ctx->cp, true);
-
 	return 0;
 };
 
@@ -785,7 +794,6 @@
 	dev_info(&cp->pdev->dev, "_qce_ahash_complete: %p ret %d\n",
 				areq, ret);
 #endif
-
 	if (digest) {
 		memcpy(sha_ctx->digest, digest, diglen);
 		memcpy(areq->result, digest, diglen);
@@ -819,6 +827,10 @@
 		cp->res = 0;
 		pstat->sha_op_success++;
 	}
+	if (cp->ce_support.aligned_only)  {
+		areq->src = rctx->orig_src;
+		kfree(rctx->data);
+	}
 
 	if (cp->platform_support.ce_shared)
 		schedule_work(&cp->unlock_ce_ws);
@@ -850,6 +862,24 @@
 		cp->res = 0;
 		pstat->ablk_cipher_op_success++;
 	}
+
+	if (cp->ce_support.aligned_only)  {
+		struct qcrypto_cipher_req_ctx *rctx;
+		struct scatterlist *sg;
+		uint32_t bytes = 0;
+
+		rctx = ablkcipher_request_ctx(areq);
+		areq->src = rctx->orig_src;
+		areq->dst = rctx->orig_dst;
+
+		for (sg = areq->dst; bytes != areq->nbytes; sg++) {
+			memcpy(sg_virt(sg), ((char *)rctx->data + bytes),
+								sg->length);
+			bytes += sg->length;
+		}
+		kfree(rctx->data);
+	}
+
 	if (cp->platform_support.ce_shared)
 		schedule_work(&cp->unlock_ce_ws);
 	tasklet_schedule(&cp->done_tasklet);
@@ -871,6 +901,30 @@
 	rctx = aead_request_ctx(areq);
 
 	if (rctx->mode == QCE_MODE_CCM) {
+		if (cp->ce_support.aligned_only)  {
+			struct qcrypto_cipher_req_ctx *rctx;
+			struct scatterlist *sg;
+			uint32_t bytes = 0;
+			uint32_t nbytes = 0;
+
+			rctx = aead_request_ctx(areq);
+			areq->src = rctx->orig_src;
+			areq->dst = rctx->orig_dst;
+			if (rctx->dir == QCE_ENCRYPT)
+				nbytes = areq->cryptlen +
+						crypto_aead_authsize(aead);
+			else
+				nbytes = areq->cryptlen -
+						crypto_aead_authsize(aead);
+
+			for (sg = areq->dst; bytes != nbytes; sg++) {
+				memcpy(sg_virt(sg),
+				((char *)rctx->data + rctx->assoclen + bytes),
+								sg->length);
+				bytes += sg->length;
+			}
+			kfree(rctx->data);
+		}
 		kzfree(rctx->assoc);
 		areq->assoc = rctx->assoc_sg;
 		areq->assoclen = rctx->assoclen;
@@ -997,17 +1051,222 @@
 	return 0;
 }
 
+static int _qcrypto_process_ablkcipher(struct crypto_priv *cp,
+				struct crypto_async_request *async_req)
+{
+	struct qce_req qreq;
+	int ret;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *cipher_ctx;
+	struct ablkcipher_request *req;
+	struct crypto_ablkcipher *tfm;
+
+	req = container_of(async_req, struct ablkcipher_request, base);
+	cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+	rctx = ablkcipher_request_ctx(req);
+	tfm = crypto_ablkcipher_reqtfm(req);
+	if (cp->ce_support.aligned_only) {
+		uint32_t bytes = 0;
+		struct scatterlist *sg = req->src;
+
+		rctx->orig_src = req->src;
+		rctx->orig_dst = req->dst;
+		rctx->data = kzalloc((req->nbytes + 64), GFP_KERNEL);
+		for (sg = req->src; bytes != req->nbytes; sg++) {
+			memcpy(((char *)rctx->data + bytes),
+					sg_virt(sg), sg->length);
+			bytes += sg->length;
+		}
+		sg_set_buf(&rctx->dsg, rctx->data, req->nbytes);
+		sg_mark_end(&rctx->dsg);
+		rctx->iv = req->info;
+
+		req->src = &rctx->dsg;
+		req->dst = &rctx->dsg;
+
+	}
+	qreq.op = QCE_REQ_ABLK_CIPHER;
+	qreq.qce_cb = _qce_ablk_cipher_complete;
+	qreq.areq = req;
+	qreq.alg = rctx->alg;
+	qreq.dir = rctx->dir;
+	qreq.mode = rctx->mode;
+	qreq.enckey = cipher_ctx->enc_key;
+	qreq.encklen = cipher_ctx->enc_key_len;
+	qreq.iv = req->info;
+	qreq.ivsize = crypto_ablkcipher_ivsize(tfm);
+	qreq.cryptlen = req->nbytes;
+	qreq.use_pmem = 0;
+
+	if ((cipher_ctx->enc_key_len == 0) &&
+			(cp->platform_support.hw_key_support == 0))
+		ret = -EINVAL;
+	else
+		ret =  qce_ablk_cipher_req(cp->qce, &qreq);
+
+	return ret;
+}
+
+static int _qcrypto_process_ahash(struct crypto_priv *cp,
+				struct crypto_async_request *async_req)
+{
+	struct ahash_request *req;
+	struct qce_sha_req sreq;
+	struct qcrypto_sha_ctx *sha_ctx;
+	int ret = 0;
+
+	req = container_of(async_req,
+				struct ahash_request, base);
+	sha_ctx = crypto_tfm_ctx(async_req->tfm);
+
+	sreq.qce_cb = _qce_ahash_complete;
+	sreq.digest =  &sha_ctx->digest[0];
+	sreq.src = req->src;
+	sreq.auth_data[0] = sha_ctx->byte_count[0];
+	sreq.auth_data[1] = sha_ctx->byte_count[1];
+	sreq.auth_data[2] = sha_ctx->byte_count[2];
+	sreq.auth_data[3] = sha_ctx->byte_count[3];
+	sreq.first_blk = sha_ctx->first_blk;
+	sreq.last_blk = sha_ctx->last_blk;
+	sreq.size = req->nbytes;
+	sreq.areq = req;
+
+	switch (sha_ctx->alg) {
+	case QCE_HASH_SHA1:
+		sreq.alg = QCE_HASH_SHA1;
+		sreq.authkey = NULL;
+		break;
+	case QCE_HASH_SHA256:
+		sreq.alg = QCE_HASH_SHA256;
+		sreq.authkey = NULL;
+		break;
+	case QCE_HASH_SHA1_HMAC:
+		sreq.alg = QCE_HASH_SHA1_HMAC;
+		sreq.authkey = &sha_ctx->authkey[0];
+		sreq.authklen = SHA_HMAC_KEY_SIZE;
+		break;
+	case QCE_HASH_SHA256_HMAC:
+		sreq.alg = QCE_HASH_SHA256_HMAC;
+		sreq.authkey = &sha_ctx->authkey[0];
+		sreq.authklen = SHA_HMAC_KEY_SIZE;
+		break;
+	default:
+		break;
+	};
+	ret =  qce_process_sha_req(cp->qce, &sreq);
+
+	return ret;
+}
+
+static int _qcrypto_process_aead(struct crypto_priv *cp,
+				struct crypto_async_request *async_req)
+{
+	struct qce_req qreq;
+	int ret = 0;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *cipher_ctx;
+	struct aead_request *req = container_of(async_req,
+				struct aead_request, base);
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+	rctx = aead_request_ctx(req);
+	cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+
+	qreq.op = QCE_REQ_AEAD;
+	qreq.qce_cb = _qce_aead_complete;
+
+	qreq.areq = req;
+	qreq.alg = rctx->alg;
+	qreq.dir = rctx->dir;
+	qreq.mode = rctx->mode;
+	qreq.iv = rctx->iv;
+
+	qreq.enckey = cipher_ctx->enc_key;
+	qreq.encklen = cipher_ctx->enc_key_len;
+	qreq.authkey = cipher_ctx->auth_key;
+	qreq.authklen = cipher_ctx->auth_key_len;
+	qreq.authsize = crypto_aead_authsize(aead);
+	qreq.ivsize =  crypto_aead_ivsize(aead);
+	if (qreq.mode == QCE_MODE_CCM) {
+		if (qreq.dir == QCE_ENCRYPT)
+			qreq.cryptlen = req->cryptlen;
+		else
+			qreq.cryptlen = req->cryptlen -
+						qreq.authsize;
+		/* Get NONCE */
+		ret = qccrypto_set_aead_ccm_nonce(&qreq);
+		if (ret)
+			return ret;
+
+		/* Format Associated data    */
+		ret = qcrypto_aead_ccm_format_adata(&qreq,
+						req->assoclen,
+						req->assoc);
+		if (ret)
+			return ret;
+
+		if (cp->ce_support.aligned_only) {
+			uint32_t bytes = 0;
+			struct scatterlist *sg = req->src;
+
+			rctx->orig_src = req->src;
+			rctx->orig_dst = req->dst;
+			rctx->data = kzalloc((req->cryptlen + qreq.assoclen +
+					qreq.authsize + 64*2), GFP_KERNEL);
+
+			memcpy((char *)rctx->data, qreq.assoc, qreq.assoclen);
+
+			for (sg = req->src; bytes != req->cryptlen; sg++) {
+				memcpy((rctx->data + bytes + qreq.assoclen),
+						sg_virt(sg), sg->length);
+				bytes += sg->length;
+			}
+			sg_set_buf(&rctx->ssg, rctx->data, req->cryptlen +
+							qreq.assoclen);
+			sg_mark_end(&rctx->ssg);
+
+			if (qreq.dir == QCE_ENCRYPT)
+				sg_set_buf(&rctx->dsg, rctx->data,
+					qreq.assoclen + qreq.cryptlen +
+					ALIGN(qreq.authsize, 64));
+			else
+				sg_set_buf(&rctx->dsg, rctx->data,
+						qreq.assoclen + req->cryptlen +
+						qreq.authsize);
+			sg_mark_end(&rctx->dsg);
+
+			req->src = &rctx->ssg;
+			req->dst = &rctx->dsg;
+		}
+		/*
+		 * Save the original associated data
+		 * length and sg
+		 */
+		rctx->assoc_sg  = req->assoc;
+		rctx->assoclen  = req->assoclen;
+		rctx->assoc  = qreq.assoc;
+		/*
+		 * update req with new formatted associated
+		 * data info
+		 */
+		req->assoc = &rctx->asg;
+		req->assoclen = qreq.assoclen;
+		sg_set_buf(req->assoc, qreq.assoc,
+					req->assoclen);
+		sg_mark_end(req->assoc);
+	}
+	ret =  qce_aead_req(cp->qce, &qreq);
+
+	return ret;
+}
+
 static void _start_qcrypto_process(struct crypto_priv *cp)
 {
 	struct crypto_async_request *async_req = NULL;
 	struct crypto_async_request *backlog = NULL;
 	unsigned long flags;
 	u32 type;
-	struct qce_req qreq;
 	int ret;
-	struct qcrypto_cipher_req_ctx *rctx;
-	struct qcrypto_cipher_ctx *cipher_ctx;
-	struct qcrypto_sha_ctx *sha_ctx;
 	struct crypto_stat *pstat;
 
 	pstat = &_qcrypto_stat[cp->pdev->id];
@@ -1026,139 +1285,21 @@
 		backlog->complete(backlog, -EINPROGRESS);
 	type = crypto_tfm_alg_type(async_req->tfm);
 
-	if (type == CRYPTO_ALG_TYPE_ABLKCIPHER) {
-		struct ablkcipher_request *req;
-		struct crypto_ablkcipher *tfm;
-
-		req = container_of(async_req, struct ablkcipher_request, base);
-		cipher_ctx = crypto_tfm_ctx(async_req->tfm);
-		rctx = ablkcipher_request_ctx(req);
-		tfm = crypto_ablkcipher_reqtfm(req);
-
-		qreq.op = QCE_REQ_ABLK_CIPHER;
-		qreq.qce_cb = _qce_ablk_cipher_complete;
-		qreq.areq = req;
-		qreq.alg = rctx->alg;
-		qreq.dir = rctx->dir;
-		qreq.mode = rctx->mode;
-		qreq.enckey = cipher_ctx->enc_key;
-		qreq.encklen = cipher_ctx->enc_key_len;
-		qreq.iv = req->info;
-		qreq.ivsize = crypto_ablkcipher_ivsize(tfm);
-		qreq.cryptlen = req->nbytes;
-		qreq.use_pmem = 0;
-
-		if ((cipher_ctx->enc_key_len == 0) &&
-				(cp->platform_support.hw_key_support == 0))
-			ret = -EINVAL;
-		else
-			ret =  qce_ablk_cipher_req(cp->qce, &qreq);
-	} else {
-		if (type == CRYPTO_ALG_TYPE_AHASH) {
-
-			struct ahash_request *req;
-			struct qce_sha_req sreq;
-
-			req = container_of(async_req,
-						struct ahash_request, base);
-			sha_ctx = crypto_tfm_ctx(async_req->tfm);
-
-			sreq.qce_cb = _qce_ahash_complete;
-			sreq.digest =  &sha_ctx->digest[0];
-			sreq.src = req->src;
-			sreq.auth_data[0] = sha_ctx->byte_count[0];
-			sreq.auth_data[1] = sha_ctx->byte_count[1];
-			sreq.auth_data[2] = sha_ctx->byte_count[2];
-			sreq.auth_data[3] = sha_ctx->byte_count[3];
-			sreq.first_blk = sha_ctx->first_blk;
-			sreq.last_blk = sha_ctx->last_blk;
-			sreq.size = req->nbytes;
-			sreq.areq = req;
-
-			switch (sha_ctx->alg) {
-			case QCE_HASH_SHA1:
-				sreq.alg = QCE_HASH_SHA1;
-				sreq.authkey = NULL;
-				break;
-			case QCE_HASH_SHA256:
-				sreq.alg = QCE_HASH_SHA256;
-				sreq.authkey = NULL;
-				break;
-			case QCE_HASH_SHA1_HMAC:
-				sreq.alg = QCE_HASH_SHA1_HMAC;
-				sreq.authkey = &sha_ctx->authkey[0];
-				break;
-			case QCE_HASH_SHA256_HMAC:
-				sreq.alg = QCE_HASH_SHA256_HMAC;
-				sreq.authkey = &sha_ctx->authkey[0];
-				break;
-			default:
-				break;
-			};
-			ret =  qce_process_sha_req(cp->qce, &sreq);
-
-		} else {
-			struct aead_request *req = container_of(async_req,
-						struct aead_request, base);
-			struct crypto_aead *aead = crypto_aead_reqtfm(req);
-
-			rctx = aead_request_ctx(req);
-			cipher_ctx = crypto_tfm_ctx(async_req->tfm);
-
-			qreq.op = QCE_REQ_AEAD;
-			qreq.qce_cb = _qce_aead_complete;
-
-			qreq.areq = req;
-			qreq.alg = rctx->alg;
-			qreq.dir = rctx->dir;
-			qreq.mode = rctx->mode;
-			qreq.iv = rctx->iv;
-
-			qreq.enckey = cipher_ctx->enc_key;
-			qreq.encklen = cipher_ctx->enc_key_len;
-			qreq.authkey = cipher_ctx->auth_key;
-			qreq.authklen = cipher_ctx->auth_key_len;
-			qreq.authsize = crypto_aead_authsize(aead);
-			qreq.ivsize =  crypto_aead_ivsize(aead);
-			if (qreq.mode == QCE_MODE_CCM) {
-				if (qreq.dir == QCE_ENCRYPT)
-					qreq.cryptlen = req->cryptlen;
-				else
-					qreq.cryptlen = req->cryptlen -
-								qreq.authsize;
-				/* Get NONCE */
-				ret = qccrypto_set_aead_ccm_nonce(&qreq);
-				if (ret)
-					goto done;
-				/* Format Associated data    */
-				ret = qcrypto_aead_ccm_format_adata(&qreq,
-								req->assoclen,
-								req->assoc);
-				if (ret)
-					goto done;
-				/*
-				 * Save the original associated data
-				 * length and sg
-				 */
-				rctx->assoc_sg  = req->assoc;
-				rctx->assoclen  = req->assoclen;
-				rctx->assoc  = qreq.assoc;
-				/*
-				 * update req with new formatted associated
-				 * data info
-				 */
-				req->assoc = &rctx->asg;
-				req->assoclen = qreq.assoclen;
-				sg_set_buf(req->assoc, qreq.assoc,
-							req->assoclen);
-				sg_mark_end(req->assoc);
-			}
-			ret =  qce_aead_req(cp->qce, &qreq);
-		}
+	switch (type) {
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		ret = _qcrypto_process_ablkcipher(cp, async_req);
+		break;
+	case CRYPTO_ALG_TYPE_AHASH:
+		ret = _qcrypto_process_ahash(cp, async_req);
+		break;
+	case CRYPTO_ALG_TYPE_AEAD:
+		ret = _qcrypto_process_aead(cp, async_req);
+		break;
+	default:
+		ret = -EINVAL;
 	};
-done:
-	if (ret) {
 
+	if (ret) {
 		spin_lock_irqsave(&cp->lock, flags);
 		cp->req = NULL;
 		spin_unlock_irqrestore(&cp->lock, flags);
@@ -2118,6 +2259,26 @@
 	return 0;
 }
 
+static void _copy_source(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *srctx = NULL;
+	uint32_t bytes = 0;
+	struct scatterlist *sg = req->src;
+
+	srctx = ahash_request_ctx(req);
+	srctx->orig_src = req->src;
+	srctx->data = kzalloc((req->nbytes + 64), GFP_KERNEL);
+	for (sg = req->src; bytes != req->nbytes;
+						sg++) {
+		memcpy(((char *)srctx->data + bytes),
+			sg_virt(sg), sg->length);
+		bytes += sg->length;
+	}
+	sg_set_buf(&srctx->dsg, srctx->data,
+				req->nbytes);
+	sg_mark_end(&srctx->dsg);
+	req->src = &srctx->dsg;
+}
 
 static int _sha_update(struct ahash_request  *req, uint32_t sha_block_size)
 {
@@ -2198,23 +2359,55 @@
 	}
 
 	if (sha_ctx->trailing_buf_len) {
-		num_sg = end_src + 2;
-		sha_ctx->sg = kzalloc(num_sg * (sizeof(struct scatterlist)),
+		if (cp->ce_support.aligned_only)  {
+			sha_ctx->sg = kzalloc(sizeof(struct scatterlist),
 								GFP_KERNEL);
-		if (sha_ctx->sg == NULL) {
-			pr_err("qcrypto Can't Allocate mem: sha_ctx->sg, error %ld\n",
-				PTR_ERR(sha_ctx->sg));
-			return -ENOMEM;
-		}
-
-		sg_set_buf(&sha_ctx->sg[0], sha_ctx->tmp_tbuf,
+			if (sha_ctx->sg == NULL) {
+				pr_err("MemAlloc fail sha_ctx->sg, error %ld\n",
+						PTR_ERR(sha_ctx->sg));
+				return -ENOMEM;
+			}
+			rctx->data2 = kzalloc((req->nbytes + 64), GFP_KERNEL);
+			if (rctx->data2 == NULL) {
+				pr_err("Mem Alloc fail srctx->data2, err %ld\n",
+							PTR_ERR(rctx->data2));
+				kfree(sha_ctx->sg);
+				return -ENOMEM;
+			}
+			memcpy(rctx->data2, sha_ctx->tmp_tbuf,
 						sha_ctx->trailing_buf_len);
-		for (i = 1; i < num_sg; i++)
-			sg_set_buf(&sha_ctx->sg[i], sg_virt(&req->src[i-1]),
-							req->src[i-1].length);
+			memcpy((rctx->data2 + sha_ctx->trailing_buf_len),
+					rctx->data, req->src[i-1].length);
+			kfree(rctx->data);
+			rctx->data = rctx->data2;
+			sg_set_buf(&sha_ctx->sg[0], rctx->data,
+					(sha_ctx->trailing_buf_len +
+							req->src[i-1].length));
+			req->src = sha_ctx->sg;
+			sg_mark_end(&sha_ctx->sg[0]);
 
-		req->src = sha_ctx->sg;
-		sg_mark_end(&sha_ctx->sg[num_sg - 1]);
+		} else {
+			num_sg = end_src + 2;
+
+			sha_ctx->sg = kzalloc(num_sg *
+				(sizeof(struct scatterlist)), GFP_KERNEL);
+			if (sha_ctx->sg == NULL) {
+				pr_err("MEMalloc fail sha_ctx->sg, error %ld\n",
+							PTR_ERR(sha_ctx->sg));
+				return -ENOMEM;
+			}
+
+			sg_set_buf(&sha_ctx->sg[0], sha_ctx->tmp_tbuf,
+						sha_ctx->trailing_buf_len);
+			for (i = 1; i < num_sg; i++)
+				sg_set_buf(&sha_ctx->sg[i],
+						sg_virt(&req->src[i-1]),
+						req->src[i-1].length);
+
+			req->src = sha_ctx->sg;
+			sg_mark_end(&sha_ctx->sg[num_sg - 1]);
+
+		}
 	} else
 		sg_mark_end(&req->src[end_src]);
 
@@ -2232,6 +2425,11 @@
 {
 	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
 	struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+
+	if (cp->ce_support.aligned_only)
+		_copy_source(req);
 
 	sha_state_ctx->count += req->nbytes;
 	return _sha_update(req, SHA1_BLOCK_SIZE);
@@ -2241,6 +2439,11 @@
 {
 	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
 	struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+
+	if (cp->ce_support.aligned_only)
+		_copy_source(req);
 
 	sha_state_ctx->count += req->nbytes;
 	return _sha_update(req, SHA256_BLOCK_SIZE);
@@ -2253,6 +2456,9 @@
 	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
 	int ret = 0;
 
+	if (cp->ce_support.aligned_only)
+		_copy_source(req);
+
 	sha_ctx->last_blk = 1;
 
 	/* save the original req structure fields*/
@@ -2288,10 +2494,13 @@
 	struct crypto_priv *cp = sha_ctx->cp;
 	int ret = 0;
 
+	if (cp->ce_support.aligned_only)
+		_copy_source(req);
+
 	/* save the original req structure fields*/
 	rctx->src = req->src;
 	rctx->nbytes = req->nbytes;
-
+	sha_ctx->first_blk = 1;
 	sha_ctx->last_blk = 1;
 	ret =  _qcrypto_queue_req(cp, &req->base);
 
@@ -2326,7 +2535,7 @@
 	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
 	int ret = 0;
 
-	sha_ctx->in_buf = kzalloc(len, GFP_KERNEL);
+	sha_ctx->in_buf = kzalloc(len + 64, GFP_KERNEL);
 	if (sha_ctx->in_buf == NULL) {
 		pr_err("qcrypto Can't Allocate mem: sha_ctx->in_buf, error %ld\n",
 		PTR_ERR(sha_ctx->in_buf));
@@ -3079,17 +3288,27 @@
 	cp->qce = handle;
 	cp->pdev = pdev;
 	qce_hw_support(cp->qce, &cp->ce_support);
-	platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data;
-	cp->platform_support.ce_shared = platform_support->ce_shared;
-	cp->platform_support.shared_ce_resource =
+	if (cp->ce_support.bam)	 {
+		cp->platform_support.ce_shared = 0;
+		cp->platform_support.shared_ce_resource = 0;
+		cp->platform_support.hw_key_support = 0;
+		cp->platform_support.bus_scale_table =	NULL;
+		cp->platform_support.sha_hmac = 1;
+	} else {
+		platform_support =
+			(struct msm_ce_hw_support *)pdev->dev.platform_data;
+		cp->platform_support.ce_shared = platform_support->ce_shared;
+		cp->platform_support.shared_ce_resource =
 				platform_support->shared_ce_resource;
-	cp->platform_support.hw_key_support =
+		cp->platform_support.hw_key_support =
 				platform_support->hw_key_support;
-	cp->platform_support.bus_scale_table =
+		cp->platform_support.bus_scale_table =
 				platform_support->bus_scale_table;
+		cp->platform_support.sha_hmac = platform_support->sha_hmac;
+	}
 	cp->high_bw_req_count = 0;
 	cp->ce_lock_count = 0;
-	cp->platform_support.sha_hmac = platform_support->sha_hmac;
+
 
 	if (cp->platform_support.ce_shared)
 		INIT_WORK(&cp->unlock_ce_ws, qcrypto_unlock_ce);
@@ -3370,6 +3589,4 @@
 module_exit(_qcrypto_exit);
 
 MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
 MODULE_DESCRIPTION("Qualcomm Crypto driver");
-MODULE_VERSION("1.22");
diff --git a/drivers/crypto/msm/qcryptohw_50.h b/drivers/crypto/msm/qcryptohw_50.h
new file mode 100644
index 0000000..898109e
--- /dev/null
+++ b/drivers/crypto/msm/qcryptohw_50.h
@@ -0,0 +1,501 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
+
+
+#define QCE_AUTH_REG_BYTE_COUNT 4
+#define CRYPTO_VERSION_REG			0x1A000
+
+#define CRYPTO_DATA_IN0_REG			0x1A010
+#define CRYPTO_DATA_IN1_REG			0x1A014
+#define CRYPTO_DATA_IN2_REG			0x1A018
+#define CRYPTO_DATA_IN3_REG			0x1A01C
+
+#define CRYPTO_DATA_OUT0_REG			0x1A020
+#define CRYPTO_DATA_OUT1_REG			0x1A024
+#define CRYPTO_DATA_OUT2_REG			0x1A028
+#define CRYPTO_DATA_OUT3_REG			0x1A02C
+
+#define CRYPTO_STATUS_REG			0x1A100
+#define CRYPTO_STATUS2_REG			0x1A100
+#define CRYPTO_ENGINES_AVAIL			0x1A108
+#define CRYPTO_FIFO_SIZES_REG			0x1A10C
+
+#define CRYPTO_SEG_SIZE_REG			0x1A110
+#define CRYPTO_GOPROC_REG			0x1A120
+#define CRYPTO_GOPROC_QC_KEY_REG		0x1B000
+#define CRYPTO_GOPROC_OEM_KEY_REG		0x1C000
+
+#define CRYPTO_ENCR_SEG_CFG_REG			0x1A200
+#define CRYPTO_ENCR_SEG_SIZE_REG		0x1A204
+#define CRYPTO_ENCR_SEG_START_REG		0x1A208
+
+#define CRYPTO_ENCR_KEY0_REG			0x1D000
+#define CRYPTO_ENCR_KEY1_REG			0x1D004
+#define CRYPTO_ENCR_KEY2_REG			0x1D008
+#define CRYPTO_ENCR_KEY3_REG			0x1D00C
+#define CRYPTO_ENCR_KEY4_REG			0x1D010
+#define CRYPTO_ENCR_KEY5_REG			0x1D014
+#define CRYPTO_ENCR_KEY6_REG			0x1D018
+#define CRYPTO_ENCR_KEY7_REG			0x1D01C
+
+#define CRYPTO_ENCR_XTS_KEY0_REG		0x1D020
+#define CRYPTO_ENCR_XTS_KEY1_REG		0x1D024
+#define CRYPTO_ENCR_XTS_KEY2_REG		0x1D028
+#define CRYPTO_ENCR_XTS_KEY3_REG		0x1D02C
+#define CRYPTO_ENCR_XTS_KEY4_REG		0x1D030
+#define CRYPTO_ENCR_XTS_KEY5_REG		0x1D034
+#define CRYPTO_ENCR_XTS_KEY6_REG		0x1D038
+#define CRYPTO_ENCR_XTS_KEY7_REG		0x1D03C
+
+#define CRYPTO_ENCR_PIP0_KEY0_REG		0x1E000
+#define CRYPTO_ENCR_PIP0_KEY1_REG		0x1E004
+#define CRYPTO_ENCR_PIP0_KEY2_REG		0x1E008
+#define CRYPTO_ENCR_PIP0_KEY3_REG		0x1E00C
+#define CRYPTO_ENCR_PIP0_KEY4_REG		0x1E010
+#define CRYPTO_ENCR_PIP0_KEY5_REG		0x1E004
+#define CRYPTO_ENCR_PIP0_KEY6_REG		0x1E008
+#define CRYPTO_ENCR_PIP0_KEY7_REG		0x1E00C
+
+#define CRYPTO_ENCR_PIP1_KEY0_REG		0x1E000
+#define CRYPTO_ENCR_PIP1_KEY1_REG		0x1E004
+#define CRYPTO_ENCR_PIP1_KEY2_REG		0x1E008
+#define CRYPTO_ENCR_PIP1_KEY3_REG		0x1E00C
+#define CRYPTO_ENCR_PIP1_KEY4_REG		0x1E010
+#define CRYPTO_ENCR_PIP1_KEY5_REG		0x1E014
+#define CRYPTO_ENCR_PIP1_KEY6_REG		0x1E018
+#define CRYPTO_ENCR_PIP1_KEY7_REG		0x1E01C
+
+#define CRYPTO_ENCR_PIP2_KEY0_REG		0x1E020
+#define CRYPTO_ENCR_PIP2_KEY1_REG		0x1E024
+#define CRYPTO_ENCR_PIP2_KEY2_REG		0x1E028
+#define CRYPTO_ENCR_PIP2_KEY3_REG		0x1E02C
+#define CRYPTO_ENCR_PIP2_KEY4_REG		0x1E030
+#define CRYPTO_ENCR_PIP2_KEY5_REG		0x1E034
+#define CRYPTO_ENCR_PIP2_KEY6_REG		0x1E038
+#define CRYPTO_ENCR_PIP2_KEY7_REG		0x1E03C
+
+#define CRYPTO_ENCR_PIP3_KEY0_REG		0x1E040
+#define CRYPTO_ENCR_PIP3_KEY1_REG		0x1E044
+#define CRYPTO_ENCR_PIP3_KEY2_REG		0x1E048
+#define CRYPTO_ENCR_PIP3_KEY3_REG		0x1E04C
+#define CRYPTO_ENCR_PIP3_KEY4_REG		0x1E050
+#define CRYPTO_ENCR_PIP3_KEY5_REG		0x1E054
+#define CRYPTO_ENCR_PIP3_KEY6_REG		0x1E058
+#define CRYPTO_ENCR_PIP3_KEY7_REG		0x1E05C
+
+#define CRYPTO_ENCR_PIP0_XTS_KEY0_REG		0x1E200
+#define CRYPTO_ENCR_PIP0_XTS_KEY1_REG		0x1E204
+#define CRYPTO_ENCR_PIP0_XTS_KEY2_REG		0x1E208
+#define CRYPTO_ENCR_PIP0_XTS_KEY3_REG		0x1E20C
+#define CRYPTO_ENCR_PIP0_XTS_KEY4_REG		0x1E210
+#define CRYPTO_ENCR_PIP0_XTS_KEY5_REG		0x1E214
+#define CRYPTO_ENCR_PIP0_XTS_KEY6_REG		0x1E218
+#define CRYPTO_ENCR_PIP0_XTS_KEY7_REG		0x1E21C
+
+#define CRYPTO_ENCR_PIP1_XTS_KEY0_REG		0x1E220
+#define CRYPTO_ENCR_PIP1_XTS_KEY1_REG		0x1E224
+#define CRYPTO_ENCR_PIP1_XTS_KEY2_REG		0x1E228
+#define CRYPTO_ENCR_PIP1_XTS_KEY3_REG		0x1E22C
+#define CRYPTO_ENCR_PIP1_XTS_KEY4_REG		0x1E230
+#define CRYPTO_ENCR_PIP1_XTS_KEY5_REG		0x1E234
+#define CRYPTO_ENCR_PIP1_XTS_KEY6_REG		0x1E238
+#define CRYPTO_ENCR_PIP1_XTS_KEY7_REG		0x1E23C
+
+#define CRYPTO_ENCR_PIP2_XTS_KEY0_REG		0x1E240
+#define CRYPTO_ENCR_PIP2_XTS_KEY1_REG		0x1E244
+#define CRYPTO_ENCR_PIP2_XTS_KEY2_REG		0x1E248
+#define CRYPTO_ENCR_PIP2_XTS_KEY3_REG		0x1E24C
+#define CRYPTO_ENCR_PIP2_XTS_KEY4_REG		0x1E250
+#define CRYPTO_ENCR_PIP2_XTS_KEY5_REG		0x1E254
+#define CRYPTO_ENCR_PIP2_XTS_KEY6_REG		0x1E258
+#define CRYPTO_ENCR_PIP2_XTS_KEY7_REG		0x1E25C
+
+#define CRYPTO_ENCR_PIP3_XTS_KEY0_REG		0x1E260
+#define CRYPTO_ENCR_PIP3_XTS_KEY1_REG		0x1E264
+#define CRYPTO_ENCR_PIP3_XTS_KEY2_REG		0x1E268
+#define CRYPTO_ENCR_PIP3_XTS_KEY3_REG		0x1E26C
+#define CRYPTO_ENCR_PIP3_XTS_KEY4_REG		0x1E270
+#define CRYPTO_ENCR_PIP3_XTS_KEY5_REG		0x1E274
+#define CRYPTO_ENCR_PIP3_XTS_KEY6_REG		0x1E278
+#define CRYPTO_ENCR_PIP3_XTS_KEY7_REG		0x1E27C
+
+
+#define CRYPTO_CNTR0_IV0_REG			0x1A20C
+#define CRYPTO_CNTR1_IV1_REG			0x1A210
+#define CRYPTO_CNTR2_IV2_REG			0x1A214
+#define CRYPTO_CNTR3_IV3_REG			0x1A218
+
+#define CRYPTO_CNTR_MASK_REG			0x1A21C
+
+#define CRYPTO_ENCR_CCM_INT_CNTR0_REG		0x1A220
+#define CRYPTO_ENCR_CCM_INT_CNTR1_REG		0x1A224
+#define CRYPTO_ENCR_CCM_INT_CNTR2_REG		0x1A228
+#define CRYPTO_ENCR_CCM_INT_CNTR3_REG		0x1A22C
+
+#define CRYPTO_ENCR_XTS_DU_SIZE_REG		0xA1230
+
+#define CRYPTO_AUTH_SEG_CFG_REG			0x1A300
+#define CRYPTO_AUTH_SEG_SIZE_REG		0x1A304
+#define CRYPTO_AUTH_SEG_START_REG		0x1A308
+
+#define CRYPTO_AUTH_KEY0_REG			0x1D040
+#define CRYPTO_AUTH_KEY1_REG			0x1D044
+#define CRYPTO_AUTH_KEY2_REG			0x1D048
+#define CRYPTO_AUTH_KEY3_REG			0x1D04C
+#define CRYPTO_AUTH_KEY4_REG			0x1D050
+#define CRYPTO_AUTH_KEY5_REG			0x1D054
+#define CRYPTO_AUTH_KEY6_REG			0x1D058
+#define CRYPTO_AUTH_KEY7_REG			0x1D05C
+#define CRYPTO_AUTH_KEY8_REG			0x1D060
+#define CRYPTO_AUTH_KEY9_REG			0x1D064
+#define CRYPTO_AUTH_KEY10_REG			0x1D068
+#define CRYPTO_AUTH_KEY11_REG			0x1D06C
+#define CRYPTO_AUTH_KEY12_REG			0x1D070
+#define CRYPTO_AUTH_KEY13_REG			0x1D074
+#define CRYPTO_AUTH_KEY14_REG			0x1D078
+#define CRYPTO_AUTH_KEY15_REG			0x1D07C
+
+#define CRYPTO_AUTH_PIPE0_KEY0_REG		0x1E800
+#define CRYPTO_AUTH_PIPE0_KEY1_REG		0x1E804
+#define CRYPTO_AUTH_PIPE0_KEY2_REG		0x1E808
+#define CRYPTO_AUTH_PIPE0_KEY3_REG		0x1E80C
+#define CRYPTO_AUTH_PIPE0_KEY4_REG		0x1E810
+#define CRYPTO_AUTH_PIPE0_KEY5_REG		0x1E814
+#define CRYPTO_AUTH_PIPE0_KEY6_REG		0x1E818
+#define CRYPTO_AUTH_PIPE0_KEY7_REG		0x1E81C
+#define CRYPTO_AUTH_PIPE0_KEY8_REG		0x1E820
+#define CRYPTO_AUTH_PIPE0_KEY9_REG		0x1E824
+#define CRYPTO_AUTH_PIPE0_KEY10_REG		0x1E828
+#define CRYPTO_AUTH_PIPE0_KEY11_REG		0x1E82C
+#define CRYPTO_AUTH_PIPE0_KEY12_REG		0x1E830
+#define CRYPTO_AUTH_PIPE0_KEY13_REG		0x1E834
+#define CRYPTO_AUTH_PIPE0_KEY14_REG		0x1E838
+#define CRYPTO_AUTH_PIPE0_KEY15_REG		0x1E83C
+
+#define CRYPTO_AUTH_PIPE1_KEY0_REG		0x1E800
+#define CRYPTO_AUTH_PIPE1_KEY1_REG		0x1E804
+#define CRYPTO_AUTH_PIPE1_KEY2_REG		0x1E808
+#define CRYPTO_AUTH_PIPE1_KEY3_REG		0x1E80C
+#define CRYPTO_AUTH_PIPE1_KEY4_REG		0x1E810
+#define CRYPTO_AUTH_PIPE1_KEY5_REG		0x1E814
+#define CRYPTO_AUTH_PIPE1_KEY6_REG		0x1E818
+#define CRYPTO_AUTH_PIPE1_KEY7_REG		0x1E81C
+#define CRYPTO_AUTH_PIPE1_KEY8_REG		0x1E820
+#define CRYPTO_AUTH_PIPE1_KEY9_REG		0x1E824
+#define CRYPTO_AUTH_PIPE1_KEY10_REG		0x1E828
+#define CRYPTO_AUTH_PIPE1_KEY11_REG		0x1E82C
+#define CRYPTO_AUTH_PIPE1_KEY12_REG		0x1E830
+#define CRYPTO_AUTH_PIPE1_KEY13_REG		0x1E834
+#define CRYPTO_AUTH_PIPE1_KEY14_REG		0x1E838
+#define CRYPTO_AUTH_PIPE1_KEY15_REG		0x1E83C
+
+#define CRYPTO_AUTH_PIPE2_KEY0_REG		0x1E840
+#define CRYPTO_AUTH_PIPE2_KEY1_REG		0x1E844
+#define CRYPTO_AUTH_PIPE2_KEY2_REG		0x1E848
+#define CRYPTO_AUTH_PIPE2_KEY3_REG		0x1E84C
+#define CRYPTO_AUTH_PIPE2_KEY4_REG		0x1E850
+#define CRYPTO_AUTH_PIPE2_KEY5_REG		0x1E854
+#define CRYPTO_AUTH_PIPE2_KEY6_REG		0x1E858
+#define CRYPTO_AUTH_PIPE2_KEY7_REG		0x1E85C
+#define CRYPTO_AUTH_PIPE2_KEY8_REG		0x1E860
+#define CRYPTO_AUTH_PIPE2_KEY9_REG		0x1E864
+#define CRYPTO_AUTH_PIPE2_KEY10_REG		0x1E868
+#define CRYPTO_AUTH_PIPE2_KEY11_REG		0x1E86C
+#define CRYPTO_AUTH_PIPE2_KEY12_REG		0x1E870
+#define CRYPTO_AUTH_PIPE2_KEY13_REG		0x1E874
+#define CRYPTO_AUTH_PIPE2_KEY14_REG		0x1E878
+#define CRYPTO_AUTH_PIPE2_KEY15_REG		0x1E87C
+
+#define CRYPTO_AUTH_PIPE3_KEY0_REG		0x1E880
+#define CRYPTO_AUTH_PIPE3_KEY1_REG		0x1E884
+#define CRYPTO_AUTH_PIPE3_KEY2_REG		0x1E888
+#define CRYPTO_AUTH_PIPE3_KEY3_REG		0x1E88C
+#define CRYPTO_AUTH_PIPE3_KEY4_REG		0x1E890
+#define CRYPTO_AUTH_PIPE3_KEY5_REG		0x1E894
+#define CRYPTO_AUTH_PIPE3_KEY6_REG		0x1E898
+#define CRYPTO_AUTH_PIPE3_KEY7_REG		0x1E89C
+#define CRYPTO_AUTH_PIPE3_KEY8_REG		0x1E8A0
+#define CRYPTO_AUTH_PIPE3_KEY9_REG		0x1E8A4
+#define CRYPTO_AUTH_PIPE3_KEY10_REG		0x1E8A8
+#define CRYPTO_AUTH_PIPE3_KEY11_REG		0x1E8AC
+#define CRYPTO_AUTH_PIPE3_KEY12_REG		0x1E8B0
+#define CRYPTO_AUTH_PIPE3_KEY13_REG		0x1E8B4
+#define CRYPTO_AUTH_PIPE3_KEY14_REG		0x1E8B8
+#define CRYPTO_AUTH_PIPE3_KEY15_REG		0x1E8BC
+
+
+#define CRYPTO_AUTH_IV0_REG			0x1A310
+#define CRYPTO_AUTH_IV1_REG			0x1A314
+#define CRYPTO_AUTH_IV2_REG			0x1A318
+#define CRYPTO_AUTH_IV3_REG			0x1A31C
+#define CRYPTO_AUTH_IV4_REG			0x1A320
+#define CRYPTO_AUTH_IV5_REG			0x1A324
+#define CRYPTO_AUTH_IV6_REG			0x1A328
+#define CRYPTO_AUTH_IV7_REG			0x1A32C
+#define CRYPTO_AUTH_IV8_REG			0x1A330
+#define CRYPTO_AUTH_IV9_REG			0x1A334
+#define CRYPTO_AUTH_IV10_REG			0x1A338
+#define CRYPTO_AUTH_IV11_REG			0x1A33C
+#define CRYPTO_AUTH_IV12_REG			0x1A340
+#define CRYPTO_AUTH_IV13_REG			0x1A344
+#define CRYPTO_AUTH_IV14_REG			0x1A348
+#define CRYPTO_AUTH_IV15_REG			0x1A34C
+
+#define CRYPTO_AUTH_INFO_NONCE0_REG		0x1A350
+#define CRYPTO_AUTH_INFO_NONCE1_REG		0x1A354
+#define CRYPTO_AUTH_INFO_NONCE2_REG		0x1A358
+#define CRYPTO_AUTH_INFO_NONCE3_REG		0x1A35C
+
+#define CRYPTO_AUTH_BYTECNT0_REG		0x1A390
+#define CRYPTO_AUTH_BYTECNT1_REG		0x1A394
+#define CRYPTO_AUTH_BYTECNT2_REG		0x1A398
+#define CRYPTO_AUTH_BYTECNT3_REG		0x1A39C
+
+#define CRYPTO_AUTH_EXP_MAC0_REG		0x1A3A0
+#define CRYPTO_AUTH_EXP_MAC1_REG		0x1A3A4
+#define CRYPTO_AUTH_EXP_MAC2_REG		0x1A3A8
+#define CRYPTO_AUTH_EXP_MAC3_REG		0x1A3AC
+#define CRYPTO_AUTH_EXP_MAC4_REG		0x1A3B0
+#define CRYPTO_AUTH_EXP_MAC5_REG		0x1A3B4
+#define CRYPTO_AUTH_EXP_MAC6_REG		0x1A3B8
+#define CRYPTO_AUTH_EXP_MAC7_REG		0x1A3BC
+
+#define CRYPTO_CONFIG_REG			0x1A400
+#define CRYPTO_DEBUG_ENABLE_REG			0x1AF00
+#define CRYPTO_DEBUG_REG			0x1AF04
+
+
+
+/* Register bits */
+#define CRYPTO_CORE_STEP_REV_MASK		0xFFFF
+#define CRYPTO_CORE_STEP_REV			0 /* bit 15-0 */
+#define CRYPTO_CORE_MAJOR_REV_MASK		0xFF000000
+#define CRYPTO_CORE_MAJOR_REV			24 /* bit 31-24 */
+#define CRYPTO_CORE_MINOR_REV_MASK		0xFF0000
+#define CRYPTO_CORE_MINOR_REV			16 /* bit 23-16 */
+
+/* status reg  */
+#define CRYPTO_MAC_FAILED			31
+#define CRYPTO_DOUT_SIZE_AVAIL			26 /* bit 30-26 */
+#define CRYPTO_DOUT_SIZE_AVAIL_MASK		(0x1F << CRYPTO_DOUT_SIZE_AVAIL)
+#define CRYPTO_DIN_SIZE_AVAIL			21 /* bit 21-25 */
+#define CRYPTO_DIN_SIZE_AVAIL_MASK		(0x1F << CRYPTO_DIN_SIZE_AVAIL)
+#define CRYPTO_ACCESS_VIOL			19
+#define CRYPTO_PIPE_ACTIVE_ERR			18
+#define CRYPTO_CFG_CHNG_ERR			17
+#define CRYPTO_DOUT_ERR				16
+#define CRYPTO_DIN_ERR				15
+#define CRYPTO_AXI_ERR				14
+#define CRYPTO_CRYPTO_STATE			10 /* bit 13-10 */
+#define CRYPTO_CRYPTO_STATE_MASK		(0xF << CRYPTO_CRYPTO_STATE)
+#define CRYPTO_ENCR_BUSY			9
+#define CRYPTO_AUTH_BUSY			8
+#define CRYPTO_DOUT_INTR			7
+#define CRYPTO_DIN_INTR				6
+#define CRYPTO_OP_DONE_INTR			5
+#define CRYPTO_ERR_INTR				4
+#define CRYPTO_DOUT_RDY				3
+#define CRYPTO_DIN_RDY				2
+#define CRYPTO_OPERATION_DONE			1
+#define CRYPTO_SW_ERR				0
+
+/* status2 reg  */
+#define CRYPTO_AXI_EXTRA			1
+#define CRYPTO_LOCKED				2
+
+/* config reg */
+#define CRYPTO_REQ_SIZE				17 /* bit 20-17 */
+#define CRYPTO_REQ_SIZE_MASK			(0xF << CRYPTO_REQ_SIZE)
+#define CRYPTO_REQ_SIZE_ENUM_1_BEAT	0
+#define CRYPTO_REQ_SIZE_ENUM_2_BEAT	1
+#define CRYPTO_REQ_SIZE_ENUM_3_BEAT	2
+#define CRYPTO_REQ_SIZE_ENUM_4_BEAT	3
+#define CRYPTO_REQ_SIZE_ENUM_5_BEAT	4
+#define CRYPTO_REQ_SIZE_ENUM_6_BEAT	5
+#define CRYPTO_REQ_SIZE_ENUM_7_BEAT	6
+#define CRYPTO_REQ_SIZE_ENUM_8_BEAT	7
+#define CRYPTO_REQ_SIZE_ENUM_9_BEAT	8
+#define CRYPTO_REQ_SIZE_ENUM_10_BEAT	9
+#define CRYPTO_REQ_SIZE_ENUM_11_BEAT	10
+#define CRYPTO_REQ_SIZE_ENUM_12_BEAT	11
+#define CRYPTO_REQ_SIZE_ENUM_13_BEAT	12
+#define CRYPTO_REQ_SIZE_ENUM_14_BEAT	13
+#define CRYPTO_REQ_SIZE_ENUM_15_BEAT	14
+#define CRYPTO_REQ_SIZE_ENUM_16_BEAT	15
+
+#define CRYPTO_MAX_QUEUED_REQ			14 /* bit 16-14 */
+#define CRYPTO_MAX_QUEUED_REQ_MASK		(0x7 << CRYPTO_MAX_QUEUED_REQ)
+#define CRYPTO_ENUM_1_QUEUED_REQS	0
+#define CRYPTO_ENUM_2_QUEUED_REQS	1
+#define CRYPTO_ENUM_3_QUEUED_REQS	2
+
+#define CRYPTO_IRQ_ENABLES			10	/* bit 13-10 */
+#define CRYPTO_IRQ_ENABLES_MASK			(0xF << CRYPTO_IRQ_ENABLES)
+
+#define CRYPTO_LITTLE_ENDIAN_MODE		9
+#define CRYPTO_PIPE_SET_SELECT			5 /* bit 8-5 */
+#define CRYPTO_PIPE_SET_SELECT_MASK		(0xF << CRYPTO_PIPE_SET_SELECT)
+
+#define CRYPTO_HIGH_SPD_EN_N			4
+
+#define CRYPTO_MASK_DOUT_INTR			3
+#define CRYPTO_MASK_DIN_INTR			2
+#define CRYPTO_MASK_OP_DONE_INTR		1
+#define CRYPTO_MASK_ERR_INTR			0
+
+/* auth_seg_cfg reg */
+#define CRYPTO_COMP_EXP_MAC			24
+#define CRYPTO_COMP_EXP_MAC_DISABLED		0
+#define CRYPTO_COMP_EXP_MAC_ENABLED		1
+
+#define CRYPTO_F9_DIRECTION			23
+#define CRYPTO_F9_DIRECTION_UPLINK		0
+#define CRYPTO_F9_DIRECTION_DOWNLINK		1
+
+#define CRYPTO_AUTH_NONCE_NUM_WORDS		20 /* bit 22-20 */
+#define CRYPTO_AUTH_NONCE_NUM_WORDS_MASK \
+				(0x7 << CRYPTO_AUTH_NONCE_NUM_WORDS)
+
+#define CRYPTO_USE_PIPE_KEY_AUTH		19
+#define CRYPTO_USE_HW_KEY_AUTH			18
+#define CRYPTO_FIRST				17
+#define CRYPTO_LAST				16
+
+#define CRYPTO_AUTH_POS				15 /* bit 15 .. 14*/
+#define CRYPTO_AUTH_POS_MASK			(0x3 << CRYPTO_AUTH_POS)
+#define CRYPTO_AUTH_POS_BEFORE			0
+#define CRYPTO_AUTH_POS_AFTER			1
+
+#define CRYPTO_AUTH_SIZE			9 /* bits 13 .. 9*/
+#define CRYPTO_AUTH_SIZE_MASK			(0x1F << CRYPTO_AUTH_SIZE)
+#define CRYPTO_AUTH_SIZE_SHA1		0
+#define CRYPTO_AUTH_SIZE_SHA256		1
+#define CRYPTO_AUTH_SIZE_ENUM_1_BYTES	0
+#define CRYPTO_AUTH_SIZE_ENUM_2_BYTES	1
+#define CRYPTO_AUTH_SIZE_ENUM_3_BYTES	2
+#define CRYPTO_AUTH_SIZE_ENUM_4_BYTES	3
+#define CRYPTO_AUTH_SIZE_ENUM_5_BYTES	4
+#define CRYPTO_AUTH_SIZE_ENUM_6_BYTES	5
+#define CRYPTO_AUTH_SIZE_ENUM_7_BYTES	6
+#define CRYPTO_AUTH_SIZE_ENUM_8_BYTES	7
+#define CRYPTO_AUTH_SIZE_ENUM_9_BYTES	8
+#define CRYPTO_AUTH_SIZE_ENUM_10_BYTES	9
+#define CRYPTO_AUTH_SIZE_ENUM_11_BYTES	10
+#define CRYPTO_AUTH_SIZE_ENUM_12_BYTES	11
+#define CRYPTO_AUTH_SIZE_ENUM_13_BYTES	12
+#define CRYPTO_AUTH_SIZE_ENUM_14_BYTES	13
+#define CRYPTO_AUTH_SIZE_ENUM_15_BYTES	14
+#define CRYPTO_AUTH_SIZE_ENUM_16_BYTES	15
+
+
+#define CRYPTO_AUTH_MODE			6 /* bit 8 .. 6*/
+#define CRYPTO_AUTH_MODE_MASK			(0x7 << CRYPTO_AUTH_MODE)
+#define CRYPTO_AUTH_MODE_HASH	0
+#define CRYPTO_AUTH_MODE_HMAC	1
+#define CRYPTO_AUTH_MODE_CCM	0
+#define CRYPTO_AUTH_MODE_CMAC	1
+
+#define CRYPTO_AUTH_KEY_SIZE			3  /* bit 5 .. 3*/
+#define CRYPTO_AUTH_KEY_SIZE_MASK		(0x7 << CRYPTO_AUTH_KEY_SIZE)
+#define CRYPTO_AUTH_KEY_SZ_AES128	0
+#define CRYPTO_AUTH_KEY_SZ_AES256	2
+
+#define CRYPTO_AUTH_ALG				0 /* bit 2 .. 0*/
+#define CRYPTO_AUTH_ALG_MASK			7
+#define CRYPTO_AUTH_ALG_NONE	0
+#define CRYPTO_AUTH_ALG_SHA	1
+#define CRYPTO_AUTH_ALG_AES	2
+#define CRYPTO_AUTH_ALG_KASUMI	3
+#define CRYPTO_AUTH_ALG_SNOW3G	4
+
+/* encr_xts_du_size reg */
+#define CRYPTO_ENCR_XTS_DU_SIZE			0 /* bit 19-0  */
+#define CRYPTO_ENCR_XTS_DU_SIZE_MASK		0xfffff
+
+/* encr_seg_cfg reg */
+#define CRYPTO_F8_KEYSTREAM_ENABLE		17/* bit */
+#define CRYPTO_F8_KEYSTREAM_DISABLED	0
+#define CRYPTO_F8_KEYSTREAM_ENABLED	1
+
+#define CRYPTO_F8_DIRECTION			16 /* bit */
+#define CRYPTO_F8_DIRECTION_UPLINK	0
+#define CRYPTO_F8_DIRECTION_DOWNLINK	1
+
+
+#define CRYPTO_USE_PIPE_KEY_ENCR		15 /* bit */
+#define CRYPTO_USE_PIPE_KEY_ENCR_ENABLED	1
+#define CRYPTO_USE_KEY_REGISTERS		0
+
+
+#define CRYPTO_USE_HW_KEY_ENCR			14
+#define CRYPTO_USE_KEY_REG	0
+#define CRYPTO_USE_HW_KEY	1
+
+#define CRYPTO_LAST_CCM				13
+#define CRYPTO_LAST_CCM_XFR	1
+#define CRYPTO_INTERM_CCM_XFR	0
+
+
+#define CRYPTO_CNTR_ALG				11 /* bit 12-11 */
+#define CRYPTO_CNTR_ALG_MASK			(3 << CRYPTO_CNTR_ALG)
+#define CRYPTO_CNTR_ALG_NIST	0
+
+#define CRYPTO_ENCODE				10
+
+#define CRYPTO_ENCR_MODE			6 /* bit 9-6 */
+#define CRYPTO_ENCR_MODE_MASK			(0xF << CRYPTO_ENCR_MODE)
+/* only valid when AES */
+#define CRYPTO_ENCR_MODE_ECB	0
+#define CRYPTO_ENCR_MODE_CBC	1
+#define CRYPTO_ENCR_MODE_CTR	2
+#define CRYPTO_ENCR_MODE_XTS	3
+#define CRYPTO_ENCR_MODE_CCM	4
+
+#define CRYPTO_ENCR_KEY_SZ			3 /* bit 5-3 */
+#define CRYPTO_ENCR_KEY_SZ_MASK			(7 << CRYPTO_ENCR_KEY_SZ)
+#define CRYPTO_ENCR_KEY_SZ_DES		0
+#define CRYPTO_ENCR_KEY_SZ_3DES		1
+#define CRYPTO_ENCR_KEY_SZ_AES128	0
+#define CRYPTO_ENCR_KEY_SZ_AES256	2
+#define CRYPTO_ENCR_KEY_SZ_UEA1		0
+#define CRYPTO_ENCR_KEY_SZ_UEA2		1
+
+#define CRYPTO_ENCR_ALG				0 /* bit 2-0 */
+#define CRYPTO_ENCR_ALG_MASK			(7 << CRYPTO_ENCR_ALG)
+#define CRYPTO_ENCR_ALG_NONE		0
+#define CRYPTO_ENCR_ALG_DES		1
+#define CRYPTO_ENCR_ALG_AES		2
+#define CRYPTO_ENCR_ALG_KASUMI		3
+#define CRYPTO_ENCR_ALG_SNOW_3G		5
+
+/* goproc reg */
+#define CRYPTO_GO				0
+#define CRYPTO_CLR_CNTXT			1
+#define CRYPTO_RESULTS_DUMP			2
+
+
+/* engines_avail */
+#define CRYPTO_ENCR_AES_SEL			0
+#define CRYPTO_DES_SEL				3
+#define CRYPTO_ENCR_SNOW3G_SEL			4
+#define CRYPTO_ENCR_KASUMI_SEL			5
+#define CRYPTO_SHA_SEL				6
+#define CRYPTO_SHA512_SEL			7
+#define CRYPTO_AUTH_AES_SEL			8
+#define CRYPTO_AUTH_SNOW3G_SEL			9
+#define CRYPTO_AUTH_KASUMI_SEL			10
+#define CRYPTO_BAM_SEL				11
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_ */
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index 0bdf6cb..790cc10 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -137,7 +137,12 @@
 
 	/*
 	cc = buf[3] & 0x0f;
-	ccok = ((feed->cc + 1) & 0x0f) == cc;
+	if (feed->first_cc)
+		ccok = 1;
+	else
+		ccok = ((feed->cc + 1) & 0x0f) == cc;
+
+	feed->first_cc = 0;
 	feed->cc = cc;
 	if (!ccok)
 		printk("missed packet!\n");
@@ -351,7 +356,12 @@
 	p = 188 - count;	/* payload start */
 
 	cc = buf[3] & 0x0f;
-	ccok = ((feed->cc + 1) & 0x0f) == cc;
+	if (feed->first_cc)
+		ccok = 1;
+	else
+		ccok = ((feed->cc + 1) & 0x0f) == cc;
+
+	feed->first_cc = 0;
 	feed->cc = cc;
 
 	if (buf[3] & 0x20) {
@@ -996,6 +1006,8 @@
 		return -ENODEV;
 	}
 
+	feed->first_cc = 1;
+
 	if ((ret = demux->start_feed(feed)) < 0) {
 		mutex_unlock(&demux->mutex);
 		return ret;
@@ -1299,6 +1311,7 @@
 	dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base;
 	dvbdmxfeed->feed.sec.secbufp = 0;
 	dvbdmxfeed->feed.sec.seclen = 0;
+	dvbdmxfeed->first_cc = 1;
 
 	if (!dvbdmx->start_feed) {
 		mutex_unlock(&dvbdmx->mutex);
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
index 3970a6c..a663191 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
@@ -98,6 +98,7 @@
 	enum dmx_ts_pes pes_type;
 
 	int cc;
+	int first_cc;
 	int pusi_seen;		/* prevents feeding of garbage from previous section */
 
 	u32 peslen;
diff --git a/drivers/media/video/msm/Kconfig b/drivers/media/video/msm/Kconfig
index fb5aea0..c5abd76 100644
--- a/drivers/media/video/msm/Kconfig
+++ b/drivers/media/video/msm/Kconfig
@@ -280,6 +280,16 @@
           and cropping image. The driver support V4L2 subdev
           APIs.
 
+config MSM_CCI
+        bool "Qualcomm MSM Camera Control Interface support"
+        depends on MSM_CAMERA
+        ---help---
+          Enable support for Camera Control Interface driver only
+          for those platforms that have hardware support. This driver
+          is responsible for handling I2C read and write on the I2C
+          bus. It is also responsible for synchronization with
+          GPIO and data frames.
+
 config QUP_EXCLUSIVE_TO_CAMERA
 	bool "QUP exclusive to camera"
 	depends on MSM_CAMERA
@@ -339,6 +349,15 @@
           of CSIPHY, CSID and ISPIF subdevices to receive data
           from sensor.
 
+config MSM_ISPIF
+        bool "Qualcomm MSM Image Signal Processing interface support"
+        depends on MSM_CAMERA
+        ---help---
+          Enable support for Image Signal Processing interface module.
+          This module acts as a crossbar between CSID and VFE. Output
+          of any CID of CSID can be routed to of of pixel or raw
+          data interface in VFE.
+
 config S5K3L1YX
 	bool "Sensor S5K3L1YX (BAYER 12M)"
 	depends on MSM_CAMERA
diff --git a/drivers/media/video/msm/Makefile b/drivers/media/video/msm/Makefile
index eb45271..3ca4198 100644
--- a/drivers/media/video/msm/Makefile
+++ b/drivers/media/video/msm/Makefile
@@ -1,42 +1,35 @@
 GCC_VERSION      := $(shell $(CONFIG_SHELL) $(PWD)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
-ifeq ($(GCC_VERSION),0404)
-CFLAGS_REMOVE_msm_vfe8x.o = -Wframe-larger-than=1024
-endif
 
-EXTRA_CFLAGS += -Idrivers/media/video/msm/io
+ccflags-y += -Idrivers/media/video/msm/io
+ccflags-y += -Idrivers/media/video/msm/vfe
 obj-$(CONFIG_MSM_CAMERA) += io/
 ifeq ($(CONFIG_MSM_CAMERA_V4L2),y)
+  EXTRA_CFLAGS += -Idrivers/media/video/msm/cci
   EXTRA_CFLAGS += -Idrivers/media/video/msm/csi
   EXTRA_CFLAGS += -Idrivers/media/video/msm/eeprom
   EXTRA_CFLAGS += -Idrivers/media/video/msm/sensors
   EXTRA_CFLAGS += -Idrivers/media/video/msm/actuators
   EXTRA_CFLAGS += -Idrivers/media/video/msm/server
-  obj-$(CONFIG_MSM_CAMERA) += msm_isp.o msm.o msm_mem.o msm_mctl.o msm_mctl_buf.o msm_mctl_pp.o msm_vfe_stats_buf.o
+  obj-$(CONFIG_MSM_CAMERA) += msm_isp.o msm.o msm_mem.o msm_mctl.o msm_mctl_buf.o msm_mctl_pp.o
   obj-$(CONFIG_MSM_CAMERA) += server/
   obj-$(CONFIG_MSM_CAM_IRQ_ROUTER) += msm_camirq_router.o
-  obj-$(CONFIG_MSM_CAMERA) += eeprom/ sensors/ actuators/ csi/
+  obj-$(CONFIG_MSM_CAMERA) += cci/ eeprom/ sensors/ actuators/ csi/
   obj-$(CONFIG_MSM_CPP) += cpp/
   obj-$(CONFIG_MSM_CAMERA) += msm_gesture.o
 else
   obj-$(CONFIG_MSM_CAMERA) += msm_camera.o
 endif
+obj-$(CONFIG_MSM_CAMERA) += vfe/
 obj-$(CONFIG_MSM_CAMERA) += msm_axi_qos.o gemini/ mercury/
 obj-$(CONFIG_MSM_CAMERA_FLASH) += flash.o
-obj-$(CONFIG_ARCH_MSM_ARM11) += msm_vfe7x.o
 ifeq ($(CONFIG_MSM_CAMERA_V4L2),y)
-  obj-$(CONFIG_ARCH_MSM7X27A) += msm_vfe7x27a_v4l2.o
+  obj-$(CONFIG_ARCH_MSM8X60) += msm_vpe.o
+  obj-$(CONFIG_ARCH_MSM7X30) += msm_vpe.o msm_axi_qos.o
 else
-  obj-$(CONFIG_ARCH_MSM7X27A) += msm_vfe7x27a.o
+  obj-$(CONFIG_ARCH_MSM8X60) += msm_vpe1.o
+  obj-$(CONFIG_ARCH_MSM7X30) += msm_vpe1.o
 endif
-obj-$(CONFIG_ARCH_QSD8X50) += msm_vfe8x.o msm_vfe8x_proc.o
-ifeq ($(CONFIG_MSM_CAMERA_V4L2),y)
-  obj-$(CONFIG_ARCH_MSM8X60) += msm_vfe31_v4l2.o msm_vpe.o
-  obj-$(CONFIG_ARCH_MSM7X30) += msm_vfe31_v4l2.o msm_vpe.o msm_axi_qos.o
-else
-  obj-$(CONFIG_ARCH_MSM8X60) += msm_vfe31.o msm_vpe1.o
-  obj-$(CONFIG_ARCH_MSM7X30) += msm_vfe31.o msm_vpe1.o
-endif
-obj-$(CONFIG_ARCH_MSM8960) += msm_vfe32.o msm_vpe.o
+obj-$(CONFIG_ARCH_MSM8960) += msm_vpe.o
 obj-$(CONFIG_MT9T013) += mt9t013.o mt9t013_reg.o
 obj-$(CONFIG_SN12M0PZ) += sn12m0pz.o sn12m0pz_reg.o
 obj-$(CONFIG_MT9P012) += mt9p012_reg.o
diff --git a/drivers/media/video/msm/cci/Makefile b/drivers/media/video/msm/cci/Makefile
new file mode 100644
index 0000000..195a1b2
--- /dev/null
+++ b/drivers/media/video/msm/cci/Makefile
@@ -0,0 +1,3 @@
+GCC_VERSION      := $(shell $(CONFIG_SHELL) $(PWD)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+ccflags-y += -Idrivers/media/video/msm -Idrivers/media/video/msm/server
+obj-$(CONFIG_MSM_CCI) += msm_cci.o
diff --git a/drivers/media/video/msm/cci/msm_cam_cci_hwreg.h b/drivers/media/video/msm/cci/msm_cam_cci_hwreg.h
new file mode 100644
index 0000000..68c78d5
--- /dev/null
+++ b/drivers/media/video/msm/cci/msm_cam_cci_hwreg.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CAM_CCI_HWREG__
+#define __MSM_CAM_CCI_HWREG__
+
+#define CCI_HW_VERSION_ADDR                                         0x00000000
+#define CCI_RESET_CMD_ADDR                                          0x00000004
+#define CCI_RESET_CMD_RMSK                                          0xcf73f3f7
+#define CCI_M0_RESET_RMSK                                                0x3F1
+#define CCI_M1_RESET_RMSK                                              0x3F001
+#define CCI_QUEUE_START_ADDR                                        0x00000008
+#define CCI_SET_CID_SYNC_TIMER_0_ADDR                               0x00000010
+#define CCI_I2C_M0_SCL_CTL_ADDR                                     0x00000100
+#define CCI_I2C_M0_SDA_CTL_0_ADDR                                   0x00000104
+#define CCI_I2C_M0_SDA_CTL_1_ADDR                                   0x00000108
+#define CCI_I2C_M0_SDA_CTL_2_ADDR                                   0x0000010c
+#define CCI_I2C_M0_READ_DATA_ADDR                                   0x00000118
+#define CCI_I2C_M0_MISC_CTL_ADDR                                    0x00000110
+#define CCI_HALT_REQ_ADDR                                           0x00000034
+#define CCI_M0_HALT_REQ_RMSK                                               0x1
+#define CCI_M1_HALT_REQ_RMSK                                              0x01
+#define CCI_HALT_REQ_ADDR                                           0x00000034
+#define CCI_I2C_M1_SCL_CTL_ADDR                                     0x00000200
+#define CCI_I2C_M1_SDA_CTL_0_ADDR                                   0x00000204
+#define CCI_I2C_M1_SDA_CTL_1_ADDR                                   0x00000208
+#define CCI_I2C_M1_SDA_CTL_2_ADDR                                   0x0000020c
+#define CCI_I2C_M1_MISC_CTL_ADDR                                    0x00000210
+#define CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR                             0x00000304
+#define CCI_I2C_M0_Q0_CUR_CMD_ADDR                                  0x00000308
+#define CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR                            0x00000300
+#define CCI_I2C_M0_Q0_LOAD_DATA_ADDR                                0x00000310
+#define CCI_IRQ_MASK_0_ADDR                                         0x00000c04
+#define CCI_IRQ_CLEAR_0_ADDR                                        0x00000c08
+#define CCI_IRQ_STATUS_0_ADDR                                       0x00000c0c
+#define CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR_BMSK                    0x40000000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR_BMSK                    0x20000000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR_BMSK                    0x10000000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR_BMSK                     0x8000000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK                   0x4000000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK                   0x2000000
+#define CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK                           0x1000000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK                        0x100000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK                         0x10000
+#define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK                            0x1000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK                           0x100
+#define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK                            0x10
+#define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK                               0x1
+#define CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR                               0x00000c00
+#endif /* __MSM_CAM_CCI_HWREG__ */
diff --git a/drivers/media/video/msm/cci/msm_cci.c b/drivers/media/video/msm/cci/msm_cci.c
new file mode 100644
index 0000000..ad3cc6a
--- /dev/null
+++ b/drivers/media/video/msm/cci/msm_cci.c
@@ -0,0 +1,748 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <mach/board.h>
+#include <mach/camera.h>
+#include <media/msm_isp.h>
+#include "msm_cci.h"
+#include "msm.h"
+#include "msm_cam_server.h"
+#include "msm_cam_cci_hwreg.h"
+
+#define V4L2_IDENT_CCI 50005
+#define CCI_I2C_QUEUE_0_SIZE 2
+#define CCI_I2C_QUEUE_1_SIZE 16
+
+#undef CDBG
+#define CDBG pr_debug
+
+static void msm_cci_set_clk_param(struct cci_device *cci_dev)
+{
+	uint16_t THIGH = 78;
+	uint16_t TLOW = 114;
+	uint16_t TSUSTO = 28;
+	uint16_t TSUSTA = 28;
+	uint16_t THDDAT = 10;
+	uint16_t THDSTA = 77;
+	uint16_t TBUF = 118;
+	uint8_t HW_SCL_STRETCH_EN = 0; /*enable or disable SCL clock
+					* stretching */
+	uint8_t HW_RDHLD = 6; /* internal hold time 1-6 cycles of SDA to bridge
+			       * undefined falling SCL region */
+	uint8_t HW_TSP = 1; /* glitch filter 1-3 cycles */
+
+	msm_camera_io_w(THIGH << 16 | TLOW, cci_dev->base +
+		CCI_I2C_M0_SCL_CTL_ADDR);
+	msm_camera_io_w(TSUSTO << 16 | TSUSTA, cci_dev->base +
+		CCI_I2C_M0_SDA_CTL_0_ADDR);
+	msm_camera_io_w(THDDAT << 16 | THDSTA, cci_dev->base +
+		CCI_I2C_M0_SDA_CTL_1_ADDR);
+	msm_camera_io_w(TBUF, cci_dev->base +
+		CCI_I2C_M0_SDA_CTL_2_ADDR);
+	msm_camera_io_w(HW_SCL_STRETCH_EN << 8 | HW_RDHLD << 4 | HW_TSP,
+		cci_dev->base + CCI_I2C_M0_MISC_CTL_ADDR);
+	msm_camera_io_w(THIGH << 16 | TLOW, cci_dev->base +
+		CCI_I2C_M1_SCL_CTL_ADDR);
+	msm_camera_io_w(TSUSTO << 16 | TSUSTA, cci_dev->base +
+		CCI_I2C_M1_SDA_CTL_0_ADDR);
+	msm_camera_io_w(THDDAT << 16 | THDSTA, cci_dev->base +
+		CCI_I2C_M1_SDA_CTL_1_ADDR);
+	msm_camera_io_w(TBUF, cci_dev->base + CCI_I2C_M1_SDA_CTL_2_ADDR);
+	msm_camera_io_w(HW_SCL_STRETCH_EN << 8 | HW_RDHLD << 4 | HW_TSP,
+		cci_dev->base + CCI_I2C_M1_MISC_CTL_ADDR);
+}
+
+static int32_t msm_cci_i2c_config_sync_timer(struct v4l2_subdev *sd,
+	struct msm_camera_cci_ctrl *c_ctrl)
+{
+	struct cci_device *cci_dev;
+	cci_dev = v4l2_get_subdevdata(sd);
+	msm_camera_io_w(c_ctrl->cci_info->cid, cci_dev->base +
+		CCI_SET_CID_SYNC_TIMER_0_ADDR + (c_ctrl->cci_info->cid * 0x4));
+	return 0;
+}
+
+static int32_t msm_cci_i2c_set_freq(struct v4l2_subdev *sd,
+	struct msm_camera_cci_ctrl *c_ctrl)
+{
+	struct cci_device *cci_dev;
+	uint32_t val;
+	cci_dev = v4l2_get_subdevdata(sd);
+	val = c_ctrl->cci_info->freq;
+	msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_SCL_CTL_ADDR +
+		c_ctrl->cci_info->cci_i2c_master*0x100);
+	msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_SDA_CTL_0_ADDR +
+		c_ctrl->cci_info->cci_i2c_master*0x100);
+	msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_SDA_CTL_1_ADDR +
+		c_ctrl->cci_info->cci_i2c_master*0x100);
+	msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_SDA_CTL_2_ADDR +
+		c_ctrl->cci_info->cci_i2c_master*0x100);
+	msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_MISC_CTL_ADDR +
+		c_ctrl->cci_info->cci_i2c_master*0x100);
+	return 0;
+}
+
+static int32_t msm_cci_validate_queue(struct cci_device *cci_dev,
+	uint32_t len,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	int32_t rc = 0;
+	uint32_t read_val = 0;
+	uint32_t reg_offset = master * 0x200 + queue * 0x100;
+	read_val = msm_camera_io_r(cci_dev->base +
+		CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+	CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d len %d max %d\n",
+		__func__, __LINE__, read_val, len,
+		cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
+	if ((read_val + len + 1) > cci_dev->
+		cci_i2c_queue_info[master][queue].max_queue_size) {
+		uint32_t reg_val = 0;
+		uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
+		CDBG("%s:%d CCI_I2C_REPORT_CMD\n", __func__, __LINE__);
+		msm_camera_io_w(report_val,
+			cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+			reg_offset);
+		read_val++;
+		CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d\n",
+			__func__, __LINE__, read_val);
+		msm_camera_io_w(read_val, cci_dev->base +
+			CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+		reg_val = 1 << ((master * 2) + queue);
+		CDBG("%s:%d CCI_QUEUE_START_ADDR\n", __func__, __LINE__);
+		msm_camera_io_w(reg_val, cci_dev->base + CCI_QUEUE_START_ADDR);
+		CDBG("%s line %d wait_for_completion_interruptible\n",
+			__func__, __LINE__);
+		wait_for_completion_interruptible(&cci_dev->
+			cci_master_info[master].reset_complete);
+		rc = cci_dev->cci_master_info[master].status;
+		if (rc < 0)
+			pr_err("%s failed rc %d\n", __func__, rc);
+	}
+	return rc;
+}
+
+static int32_t msm_cci_data_queue(struct cci_device *cci_dev,
+	struct msm_camera_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue)
+{
+	uint16_t i = 0, j = 0, k = 0, h = 0, len = 0;
+	uint32_t cmd = 0;
+	uint8_t data[10];
+	uint16_t reg_addr = 0;
+	struct msm_camera_cci_i2c_write_cfg *i2c_msg =
+		&c_ctrl->cfg.cci_i2c_write_cfg;
+	uint16_t cmd_size = i2c_msg->size;
+	struct msm_camera_i2c_reg_conf *i2c_cmd = i2c_msg->reg_conf_tbl;
+	enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master;
+	CDBG("%s addr type %d data type %d\n", __func__,
+		i2c_msg->addr_type, i2c_msg->data_type);
+	/* assume total size within the max queue */
+	while (cmd_size) {
+		CDBG("%s cmd_size %d addr 0x%x data 0x%x", __func__,
+			cmd_size, i2c_cmd->reg_addr, i2c_cmd->reg_data);
+		data[i++] = CCI_I2C_WRITE_CMD;
+		if (i2c_cmd->reg_addr)
+			reg_addr = i2c_cmd->reg_addr;
+		/* either byte or word addr */
+		if (i2c_msg->addr_type == MSM_CAMERA_I2C_BYTE_ADDR)
+			data[i++] = reg_addr;
+		else {
+			data[i++] = (reg_addr & 0xFF00) >> 8;
+			data[i++] = reg_addr & 0x00FF;
+		}
+		/* max of 10 data bytes */
+		do {
+			if (i2c_msg->data_type == MSM_CAMERA_I2C_BYTE_DATA) {
+				data[i++] = i2c_cmd->reg_data;
+				reg_addr++;
+			} else {
+				if ((i + 1) <= 10) {
+					data[i++] = (i2c_cmd->reg_data &
+						0xFF00) >> 8; /* MSB */
+					data[i++] = i2c_cmd->reg_data &
+						0x00FF; /* LSB */
+					reg_addr += 2;
+				} else
+					break;
+			}
+			i2c_cmd++;
+		} while (--cmd_size && !i2c_cmd->reg_addr && (i <= 10));
+		data[0] |= ((i-1) << 4);
+		len = ((i-1)/4) + 1;
+		msm_cci_validate_queue(cci_dev, len, master, queue);
+		for (h = 0, k = 0; h < len; h++) {
+			cmd = 0;
+			for (j = 0; (j < 4 && k < i); j++)
+				cmd |= (data[k++] << (j * 8));
+			CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x%x\n",
+				__func__, cmd);
+			msm_camera_io_w(cmd, cci_dev->base +
+				CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+				master * 0x200 + queue * 0x100);
+		}
+		i = 0;
+	}
+	return 0;
+}
+
+static int32_t msm_cci_write_i2c_queue(struct cci_device *cci_dev,
+	uint32_t val,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	int32_t rc = 0;
+	uint32_t reg_offset = master * 0x200 + queue * 0x100;
+	CDBG("%s:%d called\n", __func__, __LINE__);
+	msm_cci_validate_queue(cci_dev, 1, master, queue);
+	CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val %x:%x\n",
+		__func__, CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+		reg_offset, val);
+	msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+		reg_offset);
+	return rc;
+}
+
+static int32_t msm_cci_i2c_read(struct v4l2_subdev *sd,
+	struct msm_camera_cci_ctrl *c_ctrl)
+{
+	uint32_t rc = 0;
+	uint32_t val = 0;
+	int32_t read_bytes = 0;
+	int32_t index = 0, first_byte = 0;
+	uint32_t i = 0;
+	enum cci_i2c_master_t master;
+	enum cci_i2c_queue_t queue = QUEUE_1;
+	struct cci_device *cci_dev = NULL;
+	struct msm_camera_cci_i2c_read_cfg *read_cfg = NULL;
+	CDBG("%s line %d\n", __func__, __LINE__);
+	cci_dev = v4l2_get_subdevdata(sd);
+	master = c_ctrl->cci_info->cci_i2c_master;
+	read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
+	mutex_lock(&cci_dev->cci_master_info[master].mutex);
+	CDBG("%s master %d, queue %d\n", __func__, master, queue);
+	val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+		c_ctrl->cci_info->retries << 16 |
+		c_ctrl->cci_info->id_map << 18;
+	rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+
+	val = CCI_I2C_LOCK_CMD;
+	rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+
+	if (read_cfg->addr_type == MSM_CAMERA_I2C_BYTE_ADDR)
+		val = CCI_I2C_WRITE_CMD | (read_cfg->addr_type << 4) |
+			((read_cfg->addr & 0xFF) << 8);
+	if (read_cfg->addr_type == MSM_CAMERA_I2C_WORD_ADDR)
+		val = CCI_I2C_WRITE_CMD | (read_cfg->addr_type << 4) |
+			(((read_cfg->addr & 0xFF00) >> 8) << 8) |
+			((read_cfg->addr & 0xFF) << 16);
+	rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+
+	val = CCI_I2C_READ_CMD | (read_cfg->num_byte << 4);
+	rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+
+	val = CCI_I2C_UNLOCK_CMD;
+	rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+
+	val = msm_camera_io_r(cci_dev->base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR +
+		master * 0x200 + queue * 0x100);
+	CDBG("%s cur word cnt %x\n", __func__, val);
+	msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR +
+		master * 0x200 + queue * 0x100);
+
+	val = 1 << ((master * 2) + queue);
+	msm_camera_io_w(val, cci_dev->base + CCI_QUEUE_START_ADDR);
+
+	wait_for_completion_interruptible(&cci_dev->
+		cci_master_info[master].reset_complete);
+
+	read_bytes = (read_cfg->num_byte / 4) + 1;
+	index = 0;
+	CDBG("%s index %d num_type %d\n", __func__, index,
+		read_cfg->num_byte);
+	first_byte = 0;
+	do {
+		val = msm_camera_io_r(cci_dev->base +
+			CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
+		CDBG("%s read val %x\n", __func__, val);
+		for (i = 0; (i < 4) && (index < read_cfg->num_byte); i++) {
+			CDBG("%s i %d index %d\n", __func__, i, index);
+			if (!first_byte) {
+				CDBG("%s sid %x\n", __func__, val & 0xFF);
+				first_byte++;
+			} else {
+				read_cfg->data[index] =
+					(val  >> (i * 8)) & 0xFF;
+				CDBG("%s data[%d] %x\n", __func__, index,
+					read_cfg->data[index]);
+				index++;
+			}
+		}
+	} while (--read_bytes > 0);
+ERROR:
+	mutex_unlock(&cci_dev->cci_master_info[master].mutex);
+	return rc;
+}
+
+static int32_t msm_cci_i2c_write(struct v4l2_subdev *sd,
+	struct msm_camera_cci_ctrl *c_ctrl)
+{
+	int32_t rc = 0;
+	struct cci_device *cci_dev;
+	uint32_t val;
+	enum cci_i2c_master_t master;
+	enum cci_i2c_queue_t queue = QUEUE_0;
+	cci_dev = v4l2_get_subdevdata(sd);
+	master = c_ctrl->cci_info->cci_i2c_master;
+	CDBG("%s master %d, queue %d\n", __func__, master, queue);
+	mutex_lock(&cci_dev->cci_master_info[master].mutex);
+	val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+		c_ctrl->cci_info->retries << 16 |
+		c_ctrl->cci_info->id_map << 18;
+	CDBG("%s:%d CCI_I2C_SET_PARAM_CMD\n", __func__, __LINE__);
+	rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+
+	val = CCI_I2C_LOCK_CMD;
+	CDBG("%s:%d CCI_I2C_LOCK_CMD\n", __func__, __LINE__);
+	rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+
+	msm_cci_data_queue(cci_dev, c_ctrl, queue);
+	val = CCI_I2C_UNLOCK_CMD;
+	CDBG("%s:%d CCI_I2C_UNLOCK_CMD\n", __func__, __LINE__);
+	rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+
+	val = CCI_I2C_REPORT_CMD | (1 << 8);
+	CDBG("%s:%d CCI_I2C_REPORT_CMD\n", __func__, __LINE__);
+	rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+
+	val = msm_camera_io_r(cci_dev->base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR +
+		master * 0x200 + queue * 0x100);
+	CDBG("%s line %d size of queue %d\n", __func__, __LINE__, val);
+	CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR\n", __func__, __LINE__);
+	msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR +
+		master * 0x200 + queue * 0x100);
+
+	val = 1 << ((master * 2) + queue);
+	CDBG("%s:%d CCI_QUEUE_START_ADDR\n", __func__, __LINE__);
+	msm_camera_io_w(val, cci_dev->base + CCI_QUEUE_START_ADDR +
+		master*0x200 + queue * 0x100);
+
+	CDBG("%s line %d wait_for_completion_interruptible\n",
+		__func__, __LINE__);
+	wait_for_completion_interruptible(&cci_dev->
+		cci_master_info[master].reset_complete);
+ERROR:
+	mutex_unlock(&cci_dev->cci_master_info[master].mutex);
+	return rc;
+}
+
+static int msm_cci_subdev_g_chip_ident(struct v4l2_subdev *sd,
+			struct v4l2_dbg_chip_ident *chip)
+{
+	BUG_ON(!chip);
+	chip->ident = V4L2_IDENT_CCI;
+	chip->revision = 0;
+	return 0;
+}
+
+static struct msm_cam_clk_info cci_clk_info[] = {
+	{"cci_clk_src", 19200000},
+};
+
+static int32_t msm_cci_init(struct v4l2_subdev *sd)
+{
+	int rc = 0;
+	struct cci_device *cci_dev;
+	cci_dev = v4l2_get_subdevdata(sd);
+	CDBG("%s line %d\n", __func__, __LINE__);
+	if (cci_dev == NULL) {
+		rc = -ENOMEM;
+		return rc;
+	}
+	rc = msm_cam_clk_enable(&cci_dev->pdev->dev, cci_clk_info,
+		cci_dev->cci_clk, ARRAY_SIZE(cci_clk_info), 1);
+	if (rc < 0) {
+		CDBG("%s: regulator enable failed\n", __func__);
+		goto clk_enable_failed;
+	}
+
+	enable_irq(cci_dev->irq->start);
+	cci_dev->hw_version = msm_camera_io_r(cci_dev->base +
+		CCI_HW_VERSION_ADDR);
+	cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
+	msm_camera_io_w(0xFFFFFFFF, cci_dev->base + CCI_RESET_CMD_ADDR);
+	msm_camera_io_w(0x1, cci_dev->base + CCI_RESET_CMD_ADDR);
+	wait_for_completion_interruptible(&cci_dev->cci_master_info[MASTER_0].
+		reset_complete);
+	msm_cci_set_clk_param(cci_dev);
+	msm_camera_io_w(0xFFFFFFFF, cci_dev->base + CCI_IRQ_MASK_0_ADDR);
+	msm_camera_io_w(0xFFFFFFFF, cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
+	msm_camera_io_w(0x1, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+	msm_camera_io_w(0x0, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+	msm_camera_io_w(0x0, cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
+	return 0;
+
+clk_enable_failed:
+	return rc;
+}
+
+static int32_t msm_cci_release(struct v4l2_subdev *sd)
+{
+	struct cci_device *cci_dev;
+	cci_dev = v4l2_get_subdevdata(sd);
+
+	disable_irq(cci_dev->irq->start);
+
+	msm_cam_clk_enable(&cci_dev->pdev->dev, cci_clk_info,
+		cci_dev->cci_clk, ARRAY_SIZE(cci_clk_info), 0);
+
+	return 0;
+}
+
+static int32_t msm_cci_config(struct v4l2_subdev *sd,
+	struct msm_camera_cci_ctrl *cci_ctrl)
+{
+	int32_t rc = 0;
+	CDBG("%s line %d cmd %d\n", __func__, __LINE__,
+		cci_ctrl->cmd);
+	switch (cci_ctrl->cmd) {
+	case MSM_CCI_INIT:
+		rc = msm_cci_init(sd);
+		break;
+	case MSM_CCI_RELEASE:
+		rc = msm_cci_release(sd);
+		break;
+	case MSM_CCI_SET_SID:
+		break;
+	case MSM_CCI_SET_FREQ:
+		rc = msm_cci_i2c_set_freq(sd, cci_ctrl);
+		break;
+	case MSM_CCI_SET_SYNC_CID:
+		rc = msm_cci_i2c_config_sync_timer(sd, cci_ctrl);
+		break;
+	case MSM_CCI_I2C_READ:
+		rc = msm_cci_i2c_read(sd, cci_ctrl);
+		break;
+	case MSM_CCI_I2C_WRITE:
+		rc = msm_cci_i2c_write(sd, cci_ctrl);
+		break;
+	case MSM_CCI_GPIO_WRITE:
+		break;
+	default:
+		rc = -ENOIOCTLCMD;
+	}
+	CDBG("%s line %d rc %d\n", __func__, __LINE__, rc);
+	cci_ctrl->status = rc;
+	return rc;
+}
+
+static irqreturn_t msm_cci_irq(int irq_num, void *data)
+{
+	uint32_t irq;
+	struct cci_device *cci_dev = data;
+	irq = msm_camera_io_r(cci_dev->base + CCI_IRQ_STATUS_0_ADDR);
+	msm_camera_io_w(irq, cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
+	msm_camera_io_w(0x1, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+	msm_camera_io_w(0x0, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+	CDBG("%s CCI_I2C_M0_STATUS_ADDR = 0x%x\n", __func__, irq);
+	if (irq & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) {
+		if (cci_dev->cci_master_info[MASTER_0].reset_pending == TRUE) {
+			cci_dev->cci_master_info[MASTER_0].reset_pending =
+				FALSE;
+			complete(&cci_dev->cci_master_info[MASTER_0].
+				reset_complete);
+		}
+		if (cci_dev->cci_master_info[MASTER_1].reset_pending == TRUE) {
+			cci_dev->cci_master_info[MASTER_1].reset_pending =
+				FALSE;
+			complete(&cci_dev->cci_master_info[MASTER_1].
+				reset_complete);
+		}
+	} else if ((irq & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) ||
+		(irq & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) ||
+		(irq & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK)) {
+		cci_dev->cci_master_info[MASTER_0].status = 0;
+		complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
+	} else if ((irq & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) ||
+		(irq & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) ||
+		(irq & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK)) {
+		cci_dev->cci_master_info[MASTER_1].status = 0;
+		complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
+	} else if ((irq & CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR_BMSK) ||
+		(irq & CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR_BMSK)) {
+		cci_dev->cci_master_info[MASTER_0].status = -EINVAL;
+		msm_camera_io_w(CCI_M0_HALT_REQ_RMSK,
+			cci_dev->base + CCI_HALT_REQ_ADDR);
+	} else if ((irq & CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR_BMSK) ||
+		(irq & CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR_BMSK)) {
+		cci_dev->cci_master_info[MASTER_1].status = -EINVAL;
+		msm_camera_io_w(CCI_M1_HALT_REQ_RMSK,
+			cci_dev->base + CCI_HALT_REQ_ADDR);
+	} else if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
+		cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
+		msm_camera_io_w(CCI_M0_RESET_RMSK,
+			cci_dev->base + CCI_RESET_CMD_ADDR);
+	} else if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) {
+		cci_dev->cci_master_info[MASTER_1].reset_pending = TRUE;
+		msm_camera_io_w(CCI_M1_RESET_RMSK,
+			cci_dev->base + CCI_RESET_CMD_ADDR);
+	}
+	return IRQ_HANDLED;
+}
+
+int msm_cci_irq_routine(struct v4l2_subdev *sd, u32 status, bool *handled)
+{
+	struct cci_device *cci_dev = v4l2_get_subdevdata(sd);
+	irqreturn_t ret;
+	CDBG("%s line %d\n", __func__, __LINE__);
+	ret = msm_cci_irq(cci_dev->irq->start, cci_dev);
+	*handled = TRUE;
+	return 0;
+}
+
+static long msm_cci_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	int32_t rc = 0;
+	CDBG("%s line %d\n", __func__, __LINE__);
+	switch (cmd) {
+	case VIDIOC_MSM_CCI_CFG:
+		rc = msm_cci_config(sd, arg);
+		break;
+	default:
+		rc = -ENOIOCTLCMD;
+	}
+	CDBG("%s line %d rc %d\n", __func__, __LINE__, rc);
+	return rc;
+}
+
+static struct v4l2_subdev_core_ops msm_cci_subdev_core_ops = {
+	.g_chip_ident = &msm_cci_subdev_g_chip_ident,
+	.ioctl = &msm_cci_subdev_ioctl,
+	.interrupt_service_routine = msm_cci_irq_routine,
+};
+
+static const struct v4l2_subdev_ops msm_cci_subdev_ops = {
+	.core = &msm_cci_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_cci_internal_ops;
+
+static void msm_cci_initialize_cci_params(struct cci_device *new_cci_dev)
+{
+	uint8_t i = 0, j = 0;
+	for (i = 0; i < NUM_MASTERS; i++) {
+		new_cci_dev->cci_master_info[i].status = 0;
+		mutex_init(&new_cci_dev->cci_master_info[i].mutex);
+		init_completion(&new_cci_dev->
+			cci_master_info[i].reset_complete);
+		for (j = 0; j < NUM_QUEUES; j++) {
+			if (j == QUEUE_0)
+				new_cci_dev->cci_i2c_queue_info[i][j].
+					max_queue_size = CCI_I2C_QUEUE_0_SIZE;
+			else
+				new_cci_dev->cci_i2c_queue_info[i][j].
+					max_queue_size = CCI_I2C_QUEUE_1_SIZE;
+			}
+	}
+	return;
+}
+
+static int __devinit msm_cci_probe(struct platform_device *pdev)
+{
+	struct cci_device *new_cci_dev;
+	int rc = 0;
+	struct msm_cam_subdev_info sd_info;
+	struct intr_table_entry irq_req;
+	CDBG("%s: pdev %p device id = %d\n", __func__, pdev, pdev->id);
+	new_cci_dev = kzalloc(sizeof(struct cci_device), GFP_KERNEL);
+	if (!new_cci_dev) {
+		CDBG("%s: no enough memory\n", __func__);
+		return -ENOMEM;
+	}
+	v4l2_subdev_init(&new_cci_dev->subdev, &msm_cci_subdev_ops);
+	new_cci_dev->subdev.internal_ops = &msm_cci_internal_ops;
+	new_cci_dev->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+	snprintf(new_cci_dev->subdev.name,
+			ARRAY_SIZE(new_cci_dev->subdev.name), "msm_cci");
+	v4l2_set_subdevdata(&new_cci_dev->subdev, new_cci_dev);
+	platform_set_drvdata(pdev, &new_cci_dev->subdev);
+	CDBG("%s sd %p\n", __func__, &new_cci_dev->subdev);
+	if (pdev->dev.of_node)
+		of_property_read_u32((&pdev->dev)->of_node,
+			"cell-index", &pdev->id);
+
+	new_cci_dev->mem = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "cci");
+	if (!new_cci_dev->mem) {
+		CDBG("%s: no mem resource?\n", __func__);
+		rc = -ENODEV;
+		goto cci_no_resource;
+	}
+	new_cci_dev->irq = platform_get_resource_byname(pdev,
+					IORESOURCE_IRQ, "cci");
+	CDBG("%s line %d cci irq start %d end %d\n", __func__,
+		__LINE__,
+		new_cci_dev->irq->start,
+		new_cci_dev->irq->end);
+	if (!new_cci_dev->irq) {
+		CDBG("%s: no irq resource?\n", __func__);
+		rc = -ENODEV;
+		goto cci_no_resource;
+	}
+	new_cci_dev->io = request_mem_region(new_cci_dev->mem->start,
+		resource_size(new_cci_dev->mem), pdev->name);
+	if (!new_cci_dev->io) {
+		CDBG("%s: no valid mem region\n", __func__);
+		rc = -EBUSY;
+		goto cci_no_resource;
+	}
+
+	new_cci_dev->base = ioremap(new_cci_dev->mem->start,
+		resource_size(new_cci_dev->mem));
+	if (!new_cci_dev->base) {
+		rc = -ENOMEM;
+		goto cci_release_mem;
+	}
+
+	sd_info.sdev_type = CCI_DEV;
+	sd_info.sd_index = pdev->id;
+	sd_info.irq_num = new_cci_dev->irq->start;
+	msm_cam_register_subdev_node(&new_cci_dev->subdev, &sd_info);
+
+	irq_req.cam_hw_idx       = MSM_CAM_HW_CCI;
+	irq_req.dev_name         = "msm_cci";
+	irq_req.irq_idx          = CAMERA_SS_IRQ_1;
+	irq_req.irq_num          = new_cci_dev->irq->start;
+	irq_req.is_composite     = 0;
+	irq_req.irq_trigger_type = IRQF_TRIGGER_RISING;
+	irq_req.num_hwcore       = 1;
+	irq_req.subdev_list[0]   = &new_cci_dev->subdev;
+	irq_req.data             = (void *)new_cci_dev;
+	rc = msm_cam_server_request_irq(&irq_req);
+	if (rc == -ENXIO) {
+		/* IRQ Router hardware is not present on this hardware.
+		 * Request for the IRQ and register the interrupt handler. */
+		rc = request_irq(new_cci_dev->irq->start, msm_cci_irq,
+			IRQF_TRIGGER_RISING, "cci", new_cci_dev);
+		if (rc < 0) {
+			CDBG("%s: irq request fail\n", __func__);
+			rc = -EBUSY;
+			goto cci_release_mem;
+		}
+		disable_irq(new_cci_dev->irq->start);
+	} else if (rc < 0) {
+		CDBG("%s Error registering irq ", __func__);
+		rc = -EBUSY;
+		goto cci_release_mem;
+	}
+
+	CDBG("%s line %d cci irq start %d end %d\n", __func__,
+		__LINE__,
+		new_cci_dev->irq->start,
+		new_cci_dev->irq->end);
+
+	new_cci_dev->pdev = pdev;
+	msm_cci_initialize_cci_params(new_cci_dev);
+
+	CDBG("%s line %d\n", __func__, __LINE__);
+	return 0;
+
+cci_release_mem:
+	release_mem_region(new_cci_dev->mem->start,
+		resource_size(new_cci_dev->mem));
+cci_no_resource:
+	kfree(new_cci_dev);
+	return 0;
+}
+
+static int __exit msm_cci_exit(struct platform_device *pdev)
+{
+	struct v4l2_subdev *subdev = platform_get_drvdata(pdev);
+	struct cci_device *cci_dev =
+		v4l2_get_subdevdata(subdev);
+	release_mem_region(cci_dev->mem->start, resource_size(cci_dev->mem));
+	kfree(cci_dev);
+	return 0;
+}
+
+static const struct of_device_id msm_cci_dt_match[] = {
+	{.compatible = "qcom,cci"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, msm_cci_dt_match);
+
+static struct platform_driver cci_driver = {
+	.probe = msm_cci_probe,
+	.remove = msm_cci_exit,
+	.driver = {
+		.name = MSM_CCI_DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = msm_cci_dt_match,
+	},
+};
+
+static int __init msm_cci_init_module(void)
+{
+	return platform_driver_register(&cci_driver);
+}
+
+static void __exit msm_cci_exit_module(void)
+{
+	platform_driver_unregister(&cci_driver);
+}
+
+module_init(msm_cci_init_module);
+module_exit(msm_cci_exit_module);
+MODULE_DESCRIPTION("MSM CCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/cci/msm_cci.h b/drivers/media/video/msm/cci/msm_cci.h
new file mode 100644
index 0000000..6955576
--- /dev/null
+++ b/drivers/media/video/msm/cci/msm_cci.h
@@ -0,0 +1,85 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CCI_H
+#define MSM_CCI_H
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+#include <mach/camera.h>
+
+#define NUM_MASTERS 2
+#define NUM_QUEUES 2
+
+#define TRUE  1
+#define FALSE 0
+
+struct msm_camera_cci_master_info {
+	uint32_t status;
+	uint8_t reset_pending;
+	struct mutex mutex;
+	struct completion reset_complete;
+};
+
+struct cci_device {
+	struct platform_device *pdev;
+	struct v4l2_subdev subdev;
+	struct resource *mem;
+	struct resource *irq;
+	struct resource *io;
+	void __iomem *base;
+	uint32_t hw_version;
+	struct clk *cci_clk[5];
+	struct msm_camera_cci_i2c_queue_info
+		cci_i2c_queue_info[NUM_MASTERS][NUM_QUEUES];
+	struct msm_camera_cci_master_info cci_master_info[NUM_MASTERS];
+};
+
+enum msm_cci_i2c_cmd_type {
+	CCI_I2C_SET_PARAM_CMD = 1,
+	CCI_I2C_WAIT_CMD,
+	CCI_I2C_WAIT_SYNC_CMD,
+	CCI_I2C_WAIT_GPIO_EVENT_CMD,
+	CCI_I2C_TRIG_I2C_EVENT_CMD,
+	CCI_I2C_LOCK_CMD,
+	CCI_I2C_UNLOCK_CMD,
+	CCI_I2C_REPORT_CMD,
+	CCI_I2C_WRITE_CMD,
+	CCI_I2C_READ_CMD,
+	CCI_I2C_WRITE_DISABLE_P_CMD,
+	CCI_I2C_READ_DISABLE_P_CMD,
+	CCI_I2C_WRITE_CMD2,
+	CCI_I2C_WRITE_CMD3,
+	CCI_I2C_REPEAT_CMD,
+	CCI_I2C_INVALID_CMD,
+};
+
+enum msm_cci_gpio_cmd_type {
+	CCI_GPIO_SET_PARAM_CMD = 1,
+	CCI_GPIO_WAIT_CMD,
+	CCI_GPIO_WAIT_SYNC_CMD,
+	CCI_GPIO_WAIT_GPIO_IN_EVENT_CMD,
+	CCI_GPIO_WAIT_I2C_Q_TRIG_EVENT_CMD,
+	CCI_GPIO_OUT_CMD,
+	CCI_GPIO_TRIG_EVENT_CMD,
+	CCI_GPIO_REPORT_CMD,
+	CCI_GPIO_REPEAT_CMD,
+	CCI_GPIO_CONTINUE_CMD,
+	CCI_GPIO_INVALID_CMD,
+};
+
+#define VIDIOC_MSM_CCI_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 23, struct msm_camera_cci_ctrl *)
+
+#endif
+
diff --git a/drivers/media/video/msm/csi/Makefile b/drivers/media/video/msm/csi/Makefile
index d11e2d2..547eb13 100644
--- a/drivers/media/video/msm/csi/Makefile
+++ b/drivers/media/video/msm/csi/Makefile
@@ -8,6 +8,7 @@
 obj-$(CONFIG_MSM_CSI2_REGISTER) += msm_csi2_register.o
 obj-$(CONFIG_MSM_CSIPHY) += msm_csiphy.o
 obj-$(CONFIG_MSM_CSID) += msm_csid.o
+obj-$(CONFIG_MSM_ISPIF) += msm_ispif.o
 obj-$(CONFIG_ARCH_MSM8960) += msm_csi2_register.o msm_csiphy.o msm_csid.o msm_ispif.o
 obj-$(CONFIG_ARCH_MSM7X27A) += msm_csic_register.o msm_csic.o
 obj-$(CONFIG_ARCH_MSM8X60) += msm_csic_register.o msm_csic.o
diff --git a/drivers/media/video/msm/csi/include/csi2.0/msm_ispif_hwreg.h b/drivers/media/video/msm/csi/include/csi2.0/msm_ispif_hwreg.h
new file mode 100644
index 0000000..ecc4fea
--- /dev/null
+++ b/drivers/media/video/msm/csi/include/csi2.0/msm_ispif_hwreg.h
@@ -0,0 +1,77 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_ISPIF_HWREG_H
+#define MSM_ISPIF_HWREG_H
+
+
+/* ISPIF registers */
+
+#define ISPIF_RST_CMD_ADDR                        0x00
+#define ISPIF_RST_CMD_1_ADDR                      0x00
+#define ISPIF_INTF_CMD_ADDR                       0x04
+#define ISPIF_INTF_CMD_1_ADDR                     0x30
+#define ISPIF_CTRL_ADDR                           0x08
+#define ISPIF_INPUT_SEL_ADDR                      0x0C
+#define ISPIF_PIX_0_INTF_CID_MASK_ADDR            0x10
+#define ISPIF_RDI_0_INTF_CID_MASK_ADDR            0x14
+#define ISPIF_PIX_1_INTF_CID_MASK_ADDR            0x38
+#define ISPIF_RDI_1_INTF_CID_MASK_ADDR            0x3C
+#define ISPIF_RDI_2_INTF_CID_MASK_ADDR            0x44
+#define ISPIF_PIX_0_STATUS_ADDR                   0x24
+#define ISPIF_RDI_0_STATUS_ADDR                   0x28
+#define ISPIF_PIX_1_STATUS_ADDR                   0x60
+#define ISPIF_RDI_1_STATUS_ADDR                   0x64
+#define ISPIF_RDI_2_STATUS_ADDR                   0x6C
+#define ISPIF_IRQ_MASK_ADDR                     0x0100
+#define ISPIF_IRQ_CLEAR_ADDR                    0x0104
+#define ISPIF_IRQ_STATUS_ADDR                   0x0108
+#define ISPIF_IRQ_MASK_1_ADDR                   0x010C
+#define ISPIF_IRQ_CLEAR_1_ADDR                  0x0110
+#define ISPIF_IRQ_STATUS_1_ADDR                 0x0114
+#define ISPIF_IRQ_MASK_2_ADDR                   0x0118
+#define ISPIF_IRQ_CLEAR_2_ADDR                  0x011C
+#define ISPIF_IRQ_STATUS_2_ADDR                 0x0120
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR         0x0124
+
+/*ISPIF RESET BITS*/
+
+#define VFE_CLK_DOMAIN_RST           31
+#define RDI_CLK_DOMAIN_RST           30
+#define PIX_CLK_DOMAIN_RST           29
+#define AHB_CLK_DOMAIN_RST           28
+#define RDI_1_CLK_DOMAIN_RST         27
+#define RDI_2_VFE_RST_STB            19
+#define RDI_2_CSID_RST_STB           18
+#define RDI_1_VFE_RST_STB            13
+#define RDI_1_CSID_RST_STB           12
+#define RDI_0_VFE_RST_STB            7
+#define RDI_0_CSID_RST_STB           6
+#define PIX_1_VFE_RST_STB            10
+#define PIX_1_CSID_RST_STB           9
+#define PIX_0_VFE_RST_STB            4
+#define PIX_0_CSID_RST_STB           3
+#define SW_REG_RST_STB               2
+#define MISC_LOGIC_RST_STB           1
+#define STROBED_RST_EN               0
+
+#define PIX_INTF_0_OVERFLOW_IRQ      12
+#define RAW_INTF_0_OVERFLOW_IRQ      25
+#define RAW_INTF_1_OVERFLOW_IRQ      25
+#define RESET_DONE_IRQ               27
+
+#define ISPIF_IRQ_STATUS_MASK        0xA493000
+#define ISPIF_IRQ_1_STATUS_MASK      0xA493000
+#define ISPIF_IRQ_STATUS_RDI_SOF_MASK	0x492000
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD     0x1
+
+#endif
diff --git a/drivers/media/video/msm/csi/include/csi3.0/msm_ispif_hwreg.h b/drivers/media/video/msm/csi/include/csi3.0/msm_ispif_hwreg.h
new file mode 100644
index 0000000..820adf4
--- /dev/null
+++ b/drivers/media/video/msm/csi/include/csi3.0/msm_ispif_hwreg.h
@@ -0,0 +1,91 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_ISPIF_HWREG_H
+#define MSM_ISPIF_HWREG_H
+
+
+/* ISPIF registers */
+
+#define ISPIF_RST_CMD_ADDR                        0x08
+#define ISPIF_RST_CMD_1_ADDR                      0x0C
+#define ISPIF_INTF_CMD_ADDR                      0x248
+#define ISPIF_INTF_CMD_1_ADDR                    0x24C
+#define ISPIF_CTRL_ADDR                           0x08
+#define ISPIF_INPUT_SEL_ADDR                     0x244
+#define ISPIF_PIX_0_INTF_CID_MASK_ADDR           0x254
+#define ISPIF_RDI_0_INTF_CID_MASK_ADDR           0x264
+#define ISPIF_PIX_1_INTF_CID_MASK_ADDR           0x258
+#define ISPIF_RDI_1_INTF_CID_MASK_ADDR           0x268
+#define ISPIF_RDI_2_INTF_CID_MASK_ADDR           0x26C
+#define ISPIF_PIX_0_STATUS_ADDR                  0x2C0
+#define ISPIF_RDI_0_STATUS_ADDR                  0x2D0
+#define ISPIF_PIX_1_STATUS_ADDR                  0x2C4
+#define ISPIF_RDI_1_STATUS_ADDR                  0x2D4
+#define ISPIF_RDI_2_STATUS_ADDR                  0x2D8
+#define ISPIF_IRQ_MASK_ADDR                      0x208
+#define ISPIF_IRQ_CLEAR_ADDR                     0x230
+#define ISPIF_IRQ_STATUS_ADDR                    0x21C
+#define ISPIF_IRQ_MASK_1_ADDR                    0x20C
+#define ISPIF_IRQ_CLEAR_1_ADDR                   0x234
+#define ISPIF_IRQ_STATUS_1_ADDR                  0x220
+#define ISPIF_IRQ_MASK_2_ADDR                    0x210
+#define ISPIF_IRQ_CLEAR_2_ADDR                   0x238
+#define ISPIF_IRQ_STATUS_2_ADDR                  0x224
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR           0x1C
+
+/* new */
+#define ISPIF_VFE_m_CTRL_0_ADDR                  0x200
+#define ISPIF_VFE_m_IRQ_MASK_0                   0x208
+#define ISPIF_VFE_m_IRQ_MASK_1                   0x20C
+#define ISPIF_VFE_m_IRQ_MASK_2                   0x210
+#define ISPIF_VFE_m_IRQ_STATUS_0                 0x21C
+#define ISPIF_VFE_m_IRQ_STATUS_1                 0x220
+#define ISPIF_VFE_m_IRQ_STATUS_2                 0x224
+#define ISPIF_VFE_m_IRQ_CLEAR_0                  0x230
+#define ISPIF_VFE_m_IRQ_CLEAR_1                  0x234
+#define ISPIF_VFE_m_IRQ_CLEAR_2                  0x238
+
+/*ISPIF RESET BITS*/
+
+#define VFE_CLK_DOMAIN_RST           31
+#define RDI_CLK_DOMAIN_RST           26
+#define RDI_1_CLK_DOMAIN_RST         27
+#define RDI_2_CLK_DOMAIN_RST         28
+#define PIX_CLK_DOMAIN_RST           29
+#define PIX_1_CLK_DOMAIN_RST         30
+#define AHB_CLK_DOMAIN_RST           25
+#define RDI_2_VFE_RST_STB            12
+#define RDI_2_CSID_RST_STB           11
+#define RDI_1_VFE_RST_STB            10
+#define RDI_1_CSID_RST_STB           9
+#define RDI_0_VFE_RST_STB            8
+#define RDI_0_CSID_RST_STB           7
+#define PIX_1_VFE_RST_STB            6
+#define PIX_1_CSID_RST_STB           5
+#define PIX_0_VFE_RST_STB            4
+#define PIX_0_CSID_RST_STB           3
+#define SW_REG_RST_STB               2
+#define MISC_LOGIC_RST_STB           1
+#define STROBED_RST_EN               0
+
+#define PIX_INTF_0_OVERFLOW_IRQ      12
+#define RAW_INTF_0_OVERFLOW_IRQ      25
+#define RAW_INTF_1_OVERFLOW_IRQ      25
+#define RESET_DONE_IRQ               27
+
+#define ISPIF_IRQ_STATUS_MASK        0xA493000
+#define ISPIF_IRQ_1_STATUS_MASK      0xA493000
+#define ISPIF_IRQ_STATUS_RDI_SOF_MASK	0x492000
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD     0x1
+
+#endif
diff --git a/drivers/media/video/msm/csi/msm_ispif.c b/drivers/media/video/msm/csi/msm_ispif.c
index 1494644..b23efb5 100644
--- a/drivers/media/video/msm/csi/msm_ispif.c
+++ b/drivers/media/video/msm/csi/msm_ispif.c
@@ -14,78 +14,30 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <mach/gpio.h>
 #include <mach/camera.h>
 #include "msm_ispif.h"
 #include "msm.h"
+#include "msm_ispif_hwreg.h"
 
-#define V4L2_IDENT_ISPIF			50001
-#define CSID_VERSION_V2                      0x2000011
-
-/* ISPIF registers */
-
-#define ISPIF_RST_CMD_ADDR                        0X00
-#define ISPIF_INTF_CMD_ADDR                       0X04
-#define ISPIF_CTRL_ADDR                           0X08
-#define ISPIF_INPUT_SEL_ADDR                      0X0C
-#define ISPIF_PIX_INTF_CID_MASK_ADDR              0X10
-#define ISPIF_RDI_INTF_CID_MASK_ADDR              0X14
-#define ISPIF_PIX_1_INTF_CID_MASK_ADDR            0X38
-#define ISPIF_RDI_1_INTF_CID_MASK_ADDR            0X3C
-#define ISPIF_PIX_STATUS_ADDR                     0X24
-#define ISPIF_RDI_STATUS_ADDR                     0X28
-#define ISPIF_RDI_1_STATUS_ADDR                   0X64
-#define ISPIF_IRQ_MASK_ADDR                     0X0100
-#define ISPIF_IRQ_CLEAR_ADDR                    0X0104
-#define ISPIF_IRQ_STATUS_ADDR                   0X0108
-#define ISPIF_IRQ_MASK_1_ADDR                   0X010C
-#define ISPIF_IRQ_CLEAR_1_ADDR                  0X0110
-#define ISPIF_IRQ_STATUS_1_ADDR                 0X0114
-#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR         0x0124
-
-/*ISPIF RESET BITS*/
-
-#define VFE_CLK_DOMAIN_RST           31
-#define RDI_CLK_DOMAIN_RST           30
-#define PIX_CLK_DOMAIN_RST           29
-#define AHB_CLK_DOMAIN_RST           28
-#define RDI_1_CLK_DOMAIN_RST         27
-#define RDI_1_VFE_RST_STB            13
-#define RDI_1_CSID_RST_STB           12
-#define RDI_VFE_RST_STB              7
-#define RDI_CSID_RST_STB             6
-#define PIX_VFE_RST_STB              4
-#define PIX_CSID_RST_STB             3
-#define SW_REG_RST_STB               2
-#define MISC_LOGIC_RST_STB           1
-#define STROBED_RST_EN               0
-
-#define PIX_INTF_0_OVERFLOW_IRQ      12
-#define RAW_INTF_0_OVERFLOW_IRQ      25
-#define RAW_INTF_1_OVERFLOW_IRQ      25
-#define RESET_DONE_IRQ               27
-
-#define ISPIF_IRQ_STATUS_MASK        0xA493000
-#define ISPIF_IRQ_1_STATUS_MASK      0xA493000
-#define ISPIF_IRQ_STATUS_RDI_SOF_MASK	0x492000
-#define ISPIF_IRQ_GLOBAL_CLEAR_CMD     0x1
-
+#define V4L2_IDENT_ISPIF                     50001
+#define CSID_VERSION_V2                      0x02000011
+#define CSID_VERSION_V3                      0x30000000
 #define MAX_CID 15
 
+static atomic_t ispif_irq_cnt;
+static spinlock_t ispif_tasklet_lock;
+static struct list_head ispif_tasklet_q;
 
-static struct ispif_device *ispif;
-atomic_t ispif_irq_cnt;
-spinlock_t  ispif_tasklet_lock;
-struct list_head ispif_tasklet_q;
-
-static uint32_t global_intf_cmd_mask = 0xFFFFFFFF;
-
-
-static int msm_ispif_intf_reset(uint8_t intfmask)
+static int msm_ispif_intf_reset(struct ispif_device *ispif,
+	uint16_t intfmask, uint8_t vfe_intf)
 {
 	int rc = 0;
-	uint32_t data = 0x1;
-	uint8_t intfnum = 0, mask = intfmask;
+	uint32_t data = (0x1 << STROBED_RST_EN);
+	uint32_t data1 = (0x1 << STROBED_RST_EN);
+	uint16_t intfnum = 0, mask = intfmask;
+
 	while (mask != 0) {
 		if (!(intfmask & (0x1 << intfnum))) {
 			mask >>= 1;
@@ -94,21 +46,48 @@
 		}
 		switch (intfnum) {
 		case PIX0:
-			data = (0x1 << STROBED_RST_EN) +
-				(0x1 << PIX_VFE_RST_STB) +
-				(0x1 << PIX_CSID_RST_STB);
+			if (vfe_intf == VFE0)
+				data |= (0x1 << PIX_0_VFE_RST_STB) |
+					(0x1 << PIX_0_CSID_RST_STB);
+			else
+				data1 |= (0x1 << PIX_0_VFE_RST_STB) |
+					(0x1 << PIX_0_CSID_RST_STB);
 			break;
 
 		case RDI0:
-			data = (0x1 << STROBED_RST_EN) +
-				(0x1 << RDI_VFE_RST_STB)  +
-				(0x1 << RDI_CSID_RST_STB);
+			if (vfe_intf == VFE0)
+				data |= (0x1 << RDI_0_VFE_RST_STB) |
+					(0x1 << RDI_0_CSID_RST_STB);
+			else
+				data1 |= (0x1 << RDI_0_VFE_RST_STB) |
+					(0x1 << RDI_0_CSID_RST_STB);
+			break;
+
+		case PIX1:
+			if (vfe_intf == VFE0)
+				data |= (0x1 << PIX_1_VFE_RST_STB) |
+					(0x1 << PIX_1_CSID_RST_STB);
+			else
+				data1 |= (0x1 << PIX_1_VFE_RST_STB) |
+					(0x1 << PIX_1_CSID_RST_STB);
 			break;
 
 		case RDI1:
-			data = (0x1 << STROBED_RST_EN) +
-				(0x1 << RDI_1_VFE_RST_STB) +
-				(0x1 << RDI_1_CSID_RST_STB);
+			if (vfe_intf == VFE0)
+				data |= (0x1 << RDI_1_VFE_RST_STB) |
+					(0x1 << RDI_1_CSID_RST_STB);
+			else
+				data1 |= (0x1 << RDI_1_VFE_RST_STB) |
+					(0x1 << RDI_1_CSID_RST_STB);
+			break;
+
+		case RDI2:
+			if (vfe_intf == VFE0)
+				data |= (0x1 << RDI_2_VFE_RST_STB) |
+					(0x1 << RDI_2_CSID_RST_STB);
+			else
+				data1 |= (0x1 << RDI_2_VFE_RST_STB) |
+					(0x1 << RDI_2_CSID_RST_STB);
 			break;
 
 		default:
@@ -118,27 +97,44 @@
 		mask >>= 1;
 		intfnum++;
 	}	/*end while */
-	if (rc >= 0) {
-		msm_camera_io_w(data, ispif->base + ISPIF_RST_CMD_ADDR);
-		rc = wait_for_completion_interruptible(&ispif->reset_complete);
+	msm_camera_io_w(data, ispif->base + ISPIF_RST_CMD_ADDR);
+	rc = wait_for_completion_interruptible(&ispif->reset_complete);
+	if (ispif->csid_version >= CSID_VERSION_V3 && data1 > 0x1) {
+		msm_camera_io_w(data1,
+			ispif->base + ISPIF_RST_CMD_1_ADDR);
+		rc = wait_for_completion_interruptible(&ispif->
+			reset_complete);
 	}
 
 	return rc;
 }
 
-static int msm_ispif_reset(void)
+static int msm_ispif_reset(struct ispif_device *ispif)
 {
-	uint32_t data = (0x1 << STROBED_RST_EN) +
-		(0x1 << SW_REG_RST_STB) +
-		(0x1 << MISC_LOGIC_RST_STB) +
-		(0x1 << PIX_VFE_RST_STB) +
-		(0x1 << PIX_CSID_RST_STB) +
-		(0x1 << RDI_VFE_RST_STB) +
-		(0x1 << RDI_CSID_RST_STB) +
-		(0x1 << RDI_1_VFE_RST_STB) +
+	int rc = 0;
+	uint32_t data = (0x1 << STROBED_RST_EN) |
+		(0x1 << SW_REG_RST_STB) |
+		(0x1 << MISC_LOGIC_RST_STB) |
+		(0x1 << PIX_0_VFE_RST_STB) |
+		(0x1 << PIX_0_CSID_RST_STB) |
+		(0x1 << RDI_0_VFE_RST_STB) |
+		(0x1 << RDI_0_CSID_RST_STB) |
+		(0x1 << RDI_1_VFE_RST_STB) |
 		(0x1 << RDI_1_CSID_RST_STB);
+
+	if (ispif->csid_version >= CSID_VERSION_V2)
+		data |= (0x1 << PIX_1_VFE_RST_STB) |
+			(0x1 << PIX_1_CSID_RST_STB) |
+			(0x1 << RDI_2_VFE_RST_STB) |
+			(0x1 << RDI_2_CSID_RST_STB);
+	return 0;
 	msm_camera_io_w(data, ispif->base + ISPIF_RST_CMD_ADDR);
-	return wait_for_completion_interruptible(&ispif->reset_complete);
+	rc = wait_for_completion_interruptible(&ispif->reset_complete);
+	if (ispif->csid_version >= CSID_VERSION_V3) {
+		msm_camera_io_w(data, ispif->base + ISPIF_RST_CMD_1_ADDR);
+		rc = wait_for_completion_interruptible(&ispif->reset_complete);
+	}
+	return rc;
 }
 
 static int msm_ispif_subdev_g_chip_ident(struct v4l2_subdev *sd,
@@ -150,10 +146,12 @@
 	return 0;
 }
 
-static void msm_ispif_sel_csid_core(uint8_t intftype, uint8_t csid)
+static void msm_ispif_sel_csid_core(struct ispif_device *ispif,
+	uint8_t intftype, uint8_t csid, uint8_t vfe_intf)
 {
 	int rc = 0;
 	uint32_t data;
+
 	if (ispif->ispif_clk[intftype] == NULL) {
 		pr_err("%s: ispif NULL clk\n", __func__);
 		return;
@@ -163,62 +161,156 @@
 	if (rc < 0)
 		pr_err("%s: clk_set_rate failed %d\n", __func__, rc);
 
-	data = msm_camera_io_r(ispif->base + ISPIF_INPUT_SEL_ADDR);
-	data |= csid<<(intftype*4);
-	msm_camera_io_w(data, ispif->base + ISPIF_INPUT_SEL_ADDR);
+	data = msm_camera_io_r(ispif->base + ISPIF_INPUT_SEL_ADDR +
+		(0x200 * vfe_intf));
+	switch (intftype) {
+	case PIX0:
+		data |= csid;
+		break;
+
+	case RDI0:
+		data |= (csid << 4);
+		break;
+
+	case PIX1:
+		data |= (csid << 8);
+		break;
+
+	case RDI1:
+		data |= (csid << 12);
+		break;
+
+	case RDI2:
+		data |= (csid << 20);
+		break;
+	}
+	msm_camera_io_w(data, ispif->base + ISPIF_INPUT_SEL_ADDR +
+		(0x200 * vfe_intf));
 }
 
-static void msm_ispif_enable_intf_cids(uint8_t intftype, uint16_t cid_mask)
+static void msm_ispif_enable_intf_cids(struct ispif_device *ispif,
+	uint8_t intftype, uint16_t cid_mask, uint8_t vfe_intf)
 {
 	uint32_t data;
 	mutex_lock(&ispif->mutex);
 	switch (intftype) {
 	case PIX0:
 		data = msm_camera_io_r(ispif->base +
-				ISPIF_PIX_INTF_CID_MASK_ADDR);
+			ISPIF_PIX_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
 		data |= cid_mask;
 		msm_camera_io_w(data, ispif->base +
-				ISPIF_PIX_INTF_CID_MASK_ADDR);
+			ISPIF_PIX_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
 		break;
 
 	case RDI0:
 		data = msm_camera_io_r(ispif->base +
-				ISPIF_RDI_INTF_CID_MASK_ADDR);
+			ISPIF_RDI_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
 		data |= cid_mask;
 		msm_camera_io_w(data, ispif->base +
-				ISPIF_RDI_INTF_CID_MASK_ADDR);
+			ISPIF_RDI_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
+		break;
+
+	case PIX1:
+		data = msm_camera_io_r(ispif->base +
+			ISPIF_PIX_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
+		data |= cid_mask;
+		msm_camera_io_w(data, ispif->base +
+			ISPIF_PIX_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
 		break;
 
 	case RDI1:
 		data = msm_camera_io_r(ispif->base +
-				ISPIF_RDI_1_INTF_CID_MASK_ADDR);
+			ISPIF_RDI_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
 		data |= cid_mask;
 		msm_camera_io_w(data, ispif->base +
-				ISPIF_RDI_1_INTF_CID_MASK_ADDR);
+			ISPIF_RDI_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
+		break;
+
+	case RDI2:
+		data = msm_camera_io_r(ispif->base +
+			ISPIF_RDI_2_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
+		data |= cid_mask;
+		msm_camera_io_w(data, ispif->base +
+			ISPIF_RDI_2_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
 		break;
 	}
 	mutex_unlock(&ispif->mutex);
 }
 
-static int msm_ispif_config(struct msm_ispif_params_list *params_list)
+static int32_t msm_ispif_validate_intf_status(struct ispif_device *ispif,
+	uint8_t intftype, uint8_t vfe_intf)
+{
+	int32_t rc = 0;
+	uint32_t data;
+	mutex_lock(&ispif->mutex);
+	switch (intftype) {
+	case PIX0:
+		data = msm_camera_io_r(ispif->base +
+				ISPIF_PIX_0_STATUS_ADDR + (0x200 * vfe_intf));
+		break;
+
+	case RDI0:
+		data = msm_camera_io_r(ispif->base +
+				ISPIF_RDI_0_STATUS_ADDR + (0x200 * vfe_intf));
+		break;
+
+	case PIX1:
+		data = msm_camera_io_r(ispif->base +
+				ISPIF_PIX_1_STATUS_ADDR + (0x200 * vfe_intf));
+		break;
+
+	case RDI1:
+		data = msm_camera_io_r(ispif->base +
+				ISPIF_RDI_1_STATUS_ADDR + (0x200 * vfe_intf));
+		break;
+
+	case RDI2:
+		data = msm_camera_io_r(ispif->base +
+				ISPIF_RDI_2_STATUS_ADDR + (0x200 * vfe_intf));
+		break;
+	}
+	if ((data & 0xf) != 0xf)
+		rc = -EBUSY;
+	mutex_unlock(&ispif->mutex);
+	return rc;
+}
+
+static int msm_ispif_config(struct ispif_device *ispif,
+	struct msm_ispif_params_list *params_list)
 {
 	uint32_t params_len;
 	struct msm_ispif_params *ispif_params;
-	uint32_t data, data1;
 	int rc = 0, i = 0;
+	uint8_t intftype;
+	uint8_t vfe_intf;
 	params_len = params_list->len;
 	ispif_params = params_list->params;
 	CDBG("Enable interface\n");
-	data = msm_camera_io_r(ispif->base + ISPIF_PIX_STATUS_ADDR);
-	data1 = msm_camera_io_r(ispif->base + ISPIF_RDI_STATUS_ADDR);
-	if (((data & 0xf) != 0xf) || ((data1 & 0xf) != 0xf))
-		return -EBUSY;
 	msm_camera_io_w(0x00000000, ispif->base + ISPIF_IRQ_MASK_ADDR);
 	for (i = 0; i < params_len; i++) {
-		msm_ispif_sel_csid_core(ispif_params[i].intftype,
-			ispif_params[i].csid);
-		msm_ispif_enable_intf_cids(ispif_params[i].intftype,
-			ispif_params[i].cid_mask);
+		intftype = ispif_params[i].intftype;
+		vfe_intf = ispif_params[i].vfe_intf;
+		CDBG("%s intftype %x, vfe_intf %d\n", __func__, intftype,
+			vfe_intf);
+		if ((intftype >= INTF_MAX) ||
+			(ispif->csid_version <= CSID_VERSION_V2 &&
+			vfe_intf > VFE0) ||
+			(ispif->csid_version == CSID_VERSION_V3 &&
+			vfe_intf >= VFE_MAX)) {
+			pr_err("%s: intftype / vfe intf not valid\n",
+				__func__);
+			return -EINVAL;
+		}
+
+		rc = msm_ispif_validate_intf_status(ispif, intftype, vfe_intf);
+		if (rc < 0) {
+			pr_err("%s:%d failed rc %d\n", __func__, __LINE__, rc);
+			return rc;
+		}
+		msm_ispif_sel_csid_core(ispif, intftype, ispif_params[i].csid,
+			vfe_intf);
+		msm_ispif_enable_intf_cids(ispif, intftype,
+			ispif_params[i].cid_mask, vfe_intf);
 	}
 
 	msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base +
@@ -230,23 +322,34 @@
 	return rc;
 }
 
-static uint32_t msm_ispif_get_cid_mask(uint8_t intftype)
+static uint32_t msm_ispif_get_cid_mask(struct ispif_device *ispif,
+	uint16_t intftype, uint8_t vfe_intf)
 {
 	uint32_t mask = 0;
 	switch (intftype) {
 	case PIX0:
 		mask = msm_camera_io_r(ispif->base +
-			ISPIF_PIX_INTF_CID_MASK_ADDR);
+			ISPIF_PIX_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
 		break;
 
 	case RDI0:
 		mask = msm_camera_io_r(ispif->base +
-			ISPIF_RDI_INTF_CID_MASK_ADDR);
+			ISPIF_RDI_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
+		break;
+
+	case PIX1:
+		mask = msm_camera_io_r(ispif->base +
+			ISPIF_PIX_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
 		break;
 
 	case RDI1:
 		mask = msm_camera_io_r(ispif->base +
-			ISPIF_RDI_1_INTF_CID_MASK_ADDR);
+			ISPIF_RDI_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
+		break;
+
+	case RDI2:
+		mask = msm_camera_io_r(ispif->base +
+			ISPIF_RDI_2_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
 		break;
 
 	default:
@@ -255,12 +358,14 @@
 	return mask;
 }
 
-static void
-msm_ispif_intf_cmd(uint8_t intfmask, uint8_t intf_cmd_mask)
+static void msm_ispif_intf_cmd(struct ispif_device *ispif, uint16_t intfmask,
+	uint8_t intf_cmd_mask, uint8_t vfe_intf)
 {
-	uint8_t vc = 0, val = 0;
-	uint8_t mask = intfmask, intfnum = 0;
+	uint8_t vc = 0;
+	uint16_t mask = intfmask, intfnum = 0;
 	uint32_t cid_mask = 0;
+	uint32_t global_intf_cmd_mask = 0xFFFFFFFF;
+	uint32_t global_intf_cmd_mask1 = 0xFFFFFFFF;
 	while (mask != 0) {
 		if (!(intfmask & (0x1 << intfnum))) {
 			mask >>= 1;
@@ -268,17 +373,20 @@
 			continue;
 		}
 
-		cid_mask = msm_ispif_get_cid_mask(intfnum);
+		cid_mask = msm_ispif_get_cid_mask(ispif, intfnum, vfe_intf);
 		vc = 0;
 
 		while (cid_mask != 0) {
 			if ((cid_mask & 0xf) != 0x0) {
-				val = (intf_cmd_mask>>(vc*2)) & 0x3;
-				global_intf_cmd_mask |=
-					(0x3 << ((vc * 2) + (intfnum * 8)));
-				global_intf_cmd_mask &= ~((0x3 & ~val)
-					<< ((vc * 2) +
-					(intfnum * 8)));
+				if (intfnum != RDI2)
+					global_intf_cmd_mask &=
+						~((0x3 & ~intf_cmd_mask)
+						<< ((vc * 2) +
+						(intfnum * 8)));
+				else
+					global_intf_cmd_mask1 &=
+						~((0x3 & ~intf_cmd_mask)
+						<< ((vc * 2) + 8));
 			}
 			vc++;
 			cid_mask >>= 4;
@@ -287,50 +395,57 @@
 		intfnum++;
 	}
 	msm_camera_io_w(global_intf_cmd_mask,
-		ispif->base + ISPIF_INTF_CMD_ADDR);
+		ispif->base + ISPIF_INTF_CMD_ADDR + (0x200 * vfe_intf));
+	if (global_intf_cmd_mask1 != 0xFFFFFFFF)
+		msm_camera_io_w(global_intf_cmd_mask1,
+			ispif->base + ISPIF_INTF_CMD_1_ADDR +
+			(0x200 * vfe_intf));
 }
 
-static int msm_ispif_abort_intf_transfer(uint8_t intfmask)
+static int msm_ispif_abort_intf_transfer(struct ispif_device *ispif,
+	uint16_t intfmask, uint8_t vfe_intf)
 {
 	int rc = 0;
-	uint8_t intf_cmd_mask = 0xAA;
-	uint8_t intfnum = 0, mask = intfmask;
+	uint8_t intf_cmd_mask = 0x02;
 	mutex_lock(&ispif->mutex);
-	msm_ispif_intf_cmd(intfmask, intf_cmd_mask);
-	while (mask != 0) {
-		if (intfmask & (0x1 << intfnum))
-			global_intf_cmd_mask |= (0xFF << (intfnum * 8));
-		mask >>= 1;
-		intfnum++;
-	}
+	CDBG("%s intfmask %x intf_cmd_mask %x\n", __func__, intfmask,
+		intf_cmd_mask);
+	msm_ispif_intf_cmd(ispif, intfmask, intf_cmd_mask, vfe_intf);
 	mutex_unlock(&ispif->mutex);
 	return rc;
 }
 
-static int msm_ispif_start_intf_transfer(uint8_t intfmask)
+static int msm_ispif_start_intf_transfer(struct ispif_device *ispif,
+	uint16_t intfmask, uint8_t vfe_intf)
 {
-	uint8_t intf_cmd_mask = 0x55;
+	uint8_t intf_cmd_mask = 0x01;
 	int rc = 0;
 	mutex_lock(&ispif->mutex);
-	rc = msm_ispif_intf_reset(intfmask);
-	msm_ispif_intf_cmd(intfmask, intf_cmd_mask);
+	rc = msm_ispif_intf_reset(ispif, intfmask, vfe_intf);
+	CDBG("%s intfmask %x intf_cmd_mask %x\n", __func__, intfmask,
+		intf_cmd_mask);
+	msm_ispif_intf_cmd(ispif, intfmask, intf_cmd_mask, vfe_intf);
 	mutex_unlock(&ispif->mutex);
 	return rc;
 }
 
-static int msm_ispif_stop_intf_transfer(uint8_t intfmask)
+static int msm_ispif_stop_intf_transfer(struct ispif_device *ispif,
+	uint16_t intfmask, uint8_t vfe_intf)
 {
 	int rc = 0;
 	uint8_t intf_cmd_mask = 0x00;
-	uint8_t intfnum = 0, mask = intfmask;
+	uint16_t intfnum = 0, mask = intfmask;
 	mutex_lock(&ispif->mutex);
-	msm_ispif_intf_cmd(intfmask, intf_cmd_mask);
+	CDBG("%s intfmask %x intf_cmd_mask %x\n", __func__, intfmask,
+		intf_cmd_mask);
+	msm_ispif_intf_cmd(ispif, intfmask, intf_cmd_mask, vfe_intf);
 	while (mask != 0) {
 		if (intfmask & (0x1 << intfnum)) {
 			switch (intfnum) {
 			case PIX0:
 				while ((msm_camera_io_r(ispif->base +
-					ISPIF_PIX_STATUS_ADDR)
+					ISPIF_PIX_0_STATUS_ADDR +
+					(0x200 * vfe_intf))
 					& 0xf) != 0xf) {
 					CDBG("Wait for pix0 Idle\n");
 				}
@@ -338,24 +453,43 @@
 
 			case RDI0:
 				while ((msm_camera_io_r(ispif->base +
-					ISPIF_RDI_STATUS_ADDR)
+					ISPIF_RDI_0_STATUS_ADDR +
+					(0x200 * vfe_intf))
 					& 0xf) != 0xf) {
 					CDBG("Wait for rdi0 Idle\n");
 				}
 				break;
 
+			case PIX1:
+				while ((msm_camera_io_r(ispif->base +
+					ISPIF_PIX_1_STATUS_ADDR +
+					(0x200 * vfe_intf))
+					& 0xf) != 0xf) {
+					CDBG("Wait for pix1 Idle\n");
+				}
+				break;
+
 			case RDI1:
 				while ((msm_camera_io_r(ispif->base +
-					ISPIF_RDI_1_STATUS_ADDR)
+					ISPIF_RDI_1_STATUS_ADDR +
+					(0x200 * vfe_intf))
 					& 0xf) != 0xf) {
 					CDBG("Wait for rdi1 Idle\n");
 				}
 				break;
 
+			case RDI2:
+				while ((msm_camera_io_r(ispif->base +
+					ISPIF_RDI_2_STATUS_ADDR +
+					(0x200 * vfe_intf))
+					& 0xf) != 0xf) {
+					CDBG("Wait for rdi2 Idle\n");
+				}
+				break;
+
 			default:
 				break;
 			}
-			global_intf_cmd_mask |= (0xFF << (intfnum * 8));
 		}
 		mask >>= 1;
 		intfnum++;
@@ -364,24 +498,33 @@
 	return rc;
 }
 
-static int msm_ispif_subdev_video_s_stream(struct v4l2_subdev *sd, int enable)
+static int msm_ispif_subdev_video_s_stream(struct v4l2_subdev *sd,
+	int enable)
 {
 	struct ispif_device *ispif =
 			(struct ispif_device *)v4l2_get_subdevdata(sd);
-	int32_t cmd = enable & ((1<<ISPIF_S_STREAM_SHIFT)-1);
-	enum msm_ispif_intftype intf = enable >> ISPIF_S_STREAM_SHIFT;
+	uint32_t cmd = enable & ((1<<ISPIF_S_STREAM_SHIFT)-1);
+	uint16_t intf = enable >> ISPIF_S_STREAM_SHIFT;
+	uint8_t vfe_intf = enable >> ISPIF_VFE_INTF_SHIFT;
 	int rc = -EINVAL;
-
+	CDBG("%s enable %x, cmd %x, intf %x\n", __func__, enable, cmd, intf);
 	BUG_ON(!ispif);
+	if ((ispif->csid_version <= CSID_VERSION_V2 && vfe_intf > VFE0) ||
+		(ispif->csid_version == CSID_VERSION_V3 &&
+		vfe_intf >= VFE_MAX)) {
+		pr_err("%s invalid csid version %x && vfe intf %d\n", __func__,
+			ispif->csid_version, vfe_intf);
+		return rc;
+	}
 	switch (cmd) {
 	case ISPIF_ON_FRAME_BOUNDARY:
-		rc = msm_ispif_start_intf_transfer(intf);
+		rc = msm_ispif_start_intf_transfer(ispif, intf, vfe_intf);
 		break;
 	case ISPIF_OFF_FRAME_BOUNDARY:
-		rc = msm_ispif_stop_intf_transfer(intf);
+		rc = msm_ispif_stop_intf_transfer(ispif, intf, vfe_intf);
 		break;
 	case ISPIF_OFF_IMMEDIATELY:
-		rc = msm_ispif_abort_intf_transfer(intf);
+		rc = msm_ispif_abort_intf_transfer(ispif, intf, vfe_intf);
 		break;
 	default:
 		break;
@@ -449,8 +592,10 @@
 	return;
 }
 
-static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out)
+static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out,
+	void *data)
 {
+	struct ispif_device *ispif = (struct ispif_device *)data;
 	out->ispifIrqStatus0 = msm_camera_io_r(ispif->base +
 		ISPIF_IRQ_STATUS_ADDR);
 	out->ispifIrqStatus1 = msm_camera_io_r(ispif->base +
@@ -482,7 +627,7 @@
 static irqreturn_t msm_io_ispif_irq(int irq_num, void *data)
 {
 	struct ispif_irq_status irq;
-	msm_ispif_read_irq_status(&irq);
+	msm_ispif_read_irq_status(&irq, data);
 	return IRQ_HANDLED;
 }
 
@@ -494,19 +639,20 @@
 	{"csi_rdi2_clk", 0},
 };
 
-static int msm_ispif_init(const uint32_t *csid_version)
+static int msm_ispif_init(struct ispif_device *ispif,
+	const uint32_t *csid_version)
 {
 	int rc = 0;
+	CDBG("%s called %d\n", __func__, __LINE__);
 	spin_lock_init(&ispif_tasklet_lock);
 	INIT_LIST_HEAD(&ispif_tasklet_q);
 	rc = request_irq(ispif->irq->start, msm_io_ispif_irq,
-		IRQF_TRIGGER_RISING, "ispif", 0);
+		IRQF_TRIGGER_RISING, "ispif", ispif);
 
-	global_intf_cmd_mask = 0xFFFFFFFF;
 	init_completion(&ispif->reset_complete);
 
 	ispif->csid_version = *csid_version;
-	if (ispif->csid_version == CSID_VERSION_V2) {
+	if (ispif->csid_version >= CSID_VERSION_V2) {
 		rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_clk_info,
 			ispif->ispif_clk, ARRAY_SIZE(ispif_clk_info), 1);
 		if (rc < 0)
@@ -517,15 +663,12 @@
 		if (rc < 0)
 			return rc;
 	}
-	rc = msm_ispif_reset();
+	rc = msm_ispif_reset(ispif);
 	return rc;
 }
 
-static void msm_ispif_release(struct v4l2_subdev *sd)
+static void msm_ispif_release(struct ispif_device *ispif)
 {
-	struct ispif_device *ispif =
-			(struct ispif_device *)v4l2_get_subdevdata(sd);
-
 	CDBG("%s, free_irq\n", __func__);
 	free_irq(ispif->irq->start, 0);
 	tasklet_kill(&ispif_tasklet);
@@ -538,43 +681,12 @@
 			ispif->ispif_clk, 2, 0);
 }
 
-void msm_ispif_vfe_get_cid(uint8_t intftype, char *cids, int *num)
-{
-	uint32_t data = 0;
-	int i = 0, j = 0;
-	switch (intftype) {
-	case PIX0:
-		data = msm_camera_io_r(ispif->base +
-			ISPIF_PIX_INTF_CID_MASK_ADDR);
-		break;
-
-	case RDI0:
-		data = msm_camera_io_r(ispif->base +
-			ISPIF_RDI_INTF_CID_MASK_ADDR);
-		break;
-
-	case RDI1:
-		data = msm_camera_io_r(ispif->base +
-			ISPIF_RDI_1_INTF_CID_MASK_ADDR);
-		break;
-
-	default:
-		break;
-	}
-	for (i = 0; i <= MAX_CID; i++) {
-		if ((data & 0x1) == 0x1) {
-			cids[j++] = i;
-			(*num)++;
-		}
-		data >>= 1;
-	}
-}
-
 static long msm_ispif_cmd(struct v4l2_subdev *sd, void *arg)
 {
 	long rc = 0;
 	struct ispif_cfg_data cdata;
-
+	struct ispif_device *ispif =
+		(struct ispif_device *)v4l2_get_subdevdata(sd);
 	if (copy_from_user(&cdata, (void *)arg, sizeof(struct ispif_cfg_data)))
 		return -EFAULT;
 	CDBG("%s cfgtype = %d\n", __func__, cdata.cfgtype);
@@ -582,7 +694,7 @@
 	case ISPIF_INIT:
 		CDBG("%s csid_version = %x\n", __func__,
 			cdata.cfg.csid_version);
-		rc = msm_ispif_init(&cdata.cfg.csid_version);
+		rc = msm_ispif_init(ispif, &cdata.cfg.csid_version);
 		break;
 	case ISPIF_SET_CFG:
 		CDBG("%s len = %d, intftype = %d,.cid_mask = %d, csid = %d\n",
@@ -591,7 +703,7 @@
 			cdata.cfg.ispif_params.params[0].intftype,
 			cdata.cfg.ispif_params.params[0].cid_mask,
 			cdata.cfg.ispif_params.params[0].csid);
-		rc = msm_ispif_config(&cdata.cfg.ispif_params);
+		rc = msm_ispif_config(ispif, &cdata.cfg.ispif_params);
 		break;
 
 	case ISPIF_SET_ON_FRAME_BOUNDARY:
@@ -600,7 +712,7 @@
 		rc = msm_ispif_subdev_video_s_stream(sd, cdata.cfg.cmd);
 		break;
 	case ISPIF_RELEASE:
-		msm_ispif_release(sd);
+		msm_ispif_release(ispif);
 		break;
 	default:
 		break;
@@ -640,6 +752,7 @@
 {
 	int rc = 0;
 	struct msm_cam_subdev_info sd_info;
+	struct ispif_device *ispif;
 
 	CDBG("%s\n", __func__);
 	ispif = kzalloc(sizeof(struct ispif_device), GFP_KERNEL);
@@ -659,6 +772,10 @@
 								"ispif");
 	mutex_init(&ispif->mutex);
 
+	if (pdev->dev.of_node)
+		of_property_read_u32((&pdev->dev)->of_node,
+			"cell-index", &pdev->id);
+
 	ispif->mem = platform_get_resource_byname(pdev,
 					IORESOURCE_MEM, "ispif");
 	if (!ispif->mem) {
@@ -709,11 +826,18 @@
 	return rc;
 }
 
+static const struct of_device_id msm_ispif_dt_match[] = {
+	{.compatible = "qcom,ispif"},
+};
+
+MODULE_DEVICE_TABLE(of, msm_ispif_dt_match);
+
 static struct platform_driver ispif_driver = {
 	.probe = ispif_probe,
 	.driver = {
 		.name = MSM_ISPIF_DRV_NAME,
 		.owner = THIS_MODULE,
+		.of_match_table = msm_ispif_dt_match,
 	},
 };
 
diff --git a/drivers/media/video/msm/csi/msm_ispif.h b/drivers/media/video/msm/csi/msm_ispif.h
index 7b301ba..df93a44 100644
--- a/drivers/media/video/msm/csi/msm_ispif.h
+++ b/drivers/media/video/msm/csi/msm_ispif.h
@@ -45,6 +45,4 @@
 #define VIDIOC_MSM_ISPIF_CFG \
 	_IOWR('V', BASE_VIDIOC_PRIVATE + 18, struct ispif_cfg_data*)
 
-void msm_ispif_vfe_get_cid(uint8_t intftype, char *cids, int *num);
-
 #endif
diff --git a/drivers/media/video/msm/io/msm_camera_i2c.h b/drivers/media/video/msm/io/msm_camera_i2c.h
index 188a176..99c2762 100644
--- a/drivers/media/video/msm/io/msm_camera_i2c.h
+++ b/drivers/media/video/msm/io/msm_camera_i2c.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -25,39 +25,12 @@
 #define S_I2C_DBG(fmt, args...) CDBG(fmt, ##args)
 #endif
 
-enum msm_camera_i2c_reg_addr_type {
-	MSM_CAMERA_I2C_BYTE_ADDR = 1,
-	MSM_CAMERA_I2C_WORD_ADDR,
-};
-
 struct msm_camera_i2c_client {
 	struct i2c_client *client;
+	struct msm_camera_cci_client *cci_client;
 	enum msm_camera_i2c_reg_addr_type addr_type;
 };
 
-enum msm_camera_i2c_data_type {
-	MSM_CAMERA_I2C_BYTE_DATA = 1,
-	MSM_CAMERA_I2C_WORD_DATA,
-	MSM_CAMERA_I2C_SET_BYTE_MASK,
-	MSM_CAMERA_I2C_UNSET_BYTE_MASK,
-	MSM_CAMERA_I2C_SET_WORD_MASK,
-	MSM_CAMERA_I2C_UNSET_WORD_MASK,
-	MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA,
-};
-
-enum msm_camera_i2c_cmd_type {
-	MSM_CAMERA_I2C_CMD_WRITE,
-	MSM_CAMERA_I2C_CMD_POLL,
-};
-
-struct msm_camera_i2c_reg_conf {
-	uint16_t reg_addr;
-	uint16_t reg_data;
-	enum msm_camera_i2c_data_type dt;
-	enum msm_camera_i2c_cmd_type cmd_type;
-	int16_t mask;
-};
-
 struct msm_camera_i2c_reg_tbl {
 	uint16_t reg_addr;
 	uint16_t reg_data;
diff --git a/drivers/media/video/msm/msm.h b/drivers/media/video/msm/msm.h
index d77defd..687ed74 100644
--- a/drivers/media/video/msm/msm.h
+++ b/drivers/media/video/msm/msm.h
@@ -60,6 +60,7 @@
 #define MSM_I2C_MUX_DRV_NAME "msm_cam_i2c_mux"
 #define MSM_IRQ_ROUTER_DRV_NAME "msm_cam_irq_router"
 #define MSM_CPP_DRV_NAME "msm_cpp"
+#define MSM_CCI_DRV_NAME "msm_cci"
 
 #define MAX_NUM_SENSOR_DEV 3
 #define MAX_NUM_CSIPHY_DEV 3
@@ -71,6 +72,7 @@
 #define MAX_NUM_VPE_DEV 1
 #define MAX_NUM_JPEG_DEV 3
 #define MAX_NUM_CPP_DEV 1
+#define MAX_NUM_CCI_DEV 1
 
 /* msm queue management APIs*/
 
@@ -536,6 +538,7 @@
 	struct v4l2_subdev *gesture_device;
 	struct v4l2_subdev *cpp_device[MAX_NUM_CPP_DEV];
 	struct v4l2_subdev *irqr_device;
+	struct v4l2_subdev *cci_device;
 
 	spinlock_t  intr_table_lock;
 	struct irqmgr_intr_lkup_table irq_lkup_table;
diff --git a/drivers/media/video/msm/msm_isp.c b/drivers/media/video/msm/msm_isp.c
index 9ddde15..935ce75 100644
--- a/drivers/media/video/msm/msm_isp.c
+++ b/drivers/media/video/msm/msm_isp.c
@@ -188,9 +188,9 @@
 	}
 
 	switch (vdata->type) {
-	case VFE_MSG_V32_START:
-	case VFE_MSG_V32_START_RECORDING:
-	case VFE_MSG_V2X_PREVIEW:
+	case VFE_MSG_START:
+	case VFE_MSG_START_RECORDING:
+	case VFE_MSG_PREVIEW:
 		D("%s Got V32_START_*: Getting ping addr id = %d",
 						__func__, vfe_id);
 		msm_mctl_reserve_free_buf(pmctl, NULL,
@@ -208,8 +208,7 @@
 		vfe_params.data = (void *)&free_buf;
 		rc = v4l2_subdev_call(sd, core, ioctl, 0, &vfe_params);
 		break;
-	case VFE_MSG_V32_CAPTURE:
-	case VFE_MSG_V2X_CAPTURE:
+	case VFE_MSG_CAPTURE:
 		pr_debug("%s Got V32_CAPTURE: getting buffer for id = %d",
 						__func__, vfe_id);
 		msm_mctl_reserve_free_buf(pmctl, NULL,
@@ -231,8 +230,8 @@
 		vfe_params.data = (void *)&free_buf;
 		rc = v4l2_subdev_call(sd, core, ioctl, 0, &vfe_params);
 		break;
-	case VFE_MSG_V32_JPEG_CAPTURE:
-		D("%s:VFE_MSG_V32_JPEG_CAPTURE vdata->type %d\n", __func__,
+	case VFE_MSG_JPEG_CAPTURE:
+		D("%s:VFE_MSG_JPEG_CAPTURE vdata->type %d\n", __func__,
 			vdata->type);
 		free_buf.num_planes = 2;
 		free_buf.ch_paddr[0] = pmctl->ping_imem_y;
@@ -241,7 +240,7 @@
 		cfgcmd.value = &vfe_id;
 		vfe_params.vfe_cfg = &cfgcmd;
 		vfe_params.data = (void *)&free_buf;
-		D("%s:VFE_MSG_V32_JPEG_CAPTURE y_ping=%x cbcr_ping=%x\n",
+		D("%s:VFE_MSG_JPEG_CAPTURE y_ping=%x cbcr_ping=%x\n",
 			__func__, free_buf.ch_paddr[0], free_buf.ch_paddr[1]);
 		rc = v4l2_subdev_call(sd, core, ioctl, 0, &vfe_params);
 		/* Write the same buffer into PONG */
@@ -251,7 +250,7 @@
 		cfgcmd.value = &vfe_id;
 		vfe_params.vfe_cfg = &cfgcmd;
 		vfe_params.data = (void *)&free_buf;
-		D("%s:VFE_MSG_V32_JPEG_CAPTURE y_pong=%x cbcr_pong=%x\n",
+		D("%s:VFE_MSG_JPEG_CAPTURE y_pong=%x cbcr_pong=%x\n",
 			__func__, free_buf.ch_paddr[0], free_buf.ch_paddr[1]);
 		rc = v4l2_subdev_call(sd, core, ioctl, 0, &vfe_params);
 		break;
diff --git a/drivers/media/video/msm/server/msm_cam_server.c b/drivers/media/video/msm/server/msm_cam_server.c
index deab77b..36b9576 100644
--- a/drivers/media/video/msm/server/msm_cam_server.c
+++ b/drivers/media/video/msm/server/msm_cam_server.c
@@ -18,7 +18,6 @@
 #include "msm_ispif.h"
 #include "msm_sensor.h"
 #include "msm_actuator.h"
-#include "msm_vfe32.h"
 #include "msm_csi_register.h"
 
 #ifdef CONFIG_MSM_CAMERA_DEBUG
@@ -2034,6 +2033,21 @@
 		}
 		g_server_dev.cpp_device[index] = sd;
 		break;
+	case CCI_DEV:
+		g_server_dev.cci_device = sd;
+		if (g_server_dev.irqr_device) {
+			if (index >= MAX_NUM_CCI_DEV) {
+				pr_err("%s Invalid CCI idx %d", __func__,
+					index);
+				err = -EINVAL;
+				break;
+			}
+			cam_hw_idx = MSM_CAM_HW_CCI + index;
+			g_server_dev.subdev_table[cam_hw_idx] = sd;
+			err = msm_cam_server_fill_sdev_irqnum(MSM_CAM_HW_CCI,
+				sd_info->irq_num);
+		}
+		break;
 	default:
 		break;
 	}
diff --git a/drivers/media/video/msm/vfe/Makefile b/drivers/media/video/msm/vfe/Makefile
new file mode 100644
index 0000000..8068e4f
--- /dev/null
+++ b/drivers/media/video/msm/vfe/Makefile
@@ -0,0 +1,19 @@
+GCC_VERSION      := $(shell $(CONFIG_SHELL) $(PWD)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+ccflags-y += -Idrivers/media/video/msm
+ccflags-y += -Idrivers/media/video/msm/server
+ifeq ($(GCC_VERSION),0404)
+CFLAGS_REMOVE_msm_vfe8x.o = -Wframe-larger-than=1024
+endif
+ifeq ($(CONFIG_MSM_CAMERA_V4L2),y)
+  obj-$(CONFIG_ARCH_MSM7X27A) += msm_vfe7x27a_v4l2.o
+  obj-$(CONFIG_ARCH_MSM8X60) += msm_vfe31_v4l2.o
+  obj-$(CONFIG_ARCH_MSM7X30) += msm_vfe31_v4l2.o
+else
+  obj-$(CONFIG_ARCH_MSM7X27A) += msm_vfe7x27a.o
+  obj-$(CONFIG_ARCH_MSM8X60) += msm_vfe31.o
+  obj-$(CONFIG_ARCH_MSM7X30) += msm_vfe31.o
+endif
+obj-$(CONFIG_ARCH_MSM_ARM11) += msm_vfe7x.o
+obj-$(CONFIG_ARCH_QSD8X50) += msm_vfe8x.o msm_vfe8x_proc.o
+obj-$(CONFIG_ARCH_MSM8960) += msm_vfe32.o
+obj-$(CONFIG_MSM_CAMERA_V4L2) += msm_vfe_stats_buf.o
diff --git a/drivers/media/video/msm/msm_vfe31.c b/drivers/media/video/msm/vfe/msm_vfe31.c
similarity index 100%
rename from drivers/media/video/msm/msm_vfe31.c
rename to drivers/media/video/msm/vfe/msm_vfe31.c
diff --git a/drivers/media/video/msm/msm_vfe31.h b/drivers/media/video/msm/vfe/msm_vfe31.h
similarity index 100%
rename from drivers/media/video/msm/msm_vfe31.h
rename to drivers/media/video/msm/vfe/msm_vfe31.h
diff --git a/drivers/media/video/msm/msm_vfe31_v4l2.c b/drivers/media/video/msm/vfe/msm_vfe31_v4l2.c
similarity index 99%
rename from drivers/media/video/msm/msm_vfe31_v4l2.c
rename to drivers/media/video/msm/vfe/msm_vfe31_v4l2.c
index a22a09f..b6e01a59 100644
--- a/drivers/media/video/msm/msm_vfe31_v4l2.c
+++ b/drivers/media/video/msm/vfe/msm_vfe31_v4l2.c
@@ -63,12 +63,6 @@
 	uint32_t		vfeInterruptStatus1;
 };
 
-/*TODO: Why is V32 reference in arch/arm/mach-msm/include/mach/camera.h?*/
-#define VFE_MSG_V31_START VFE_MSG_V32_START
-#define VFE_MSG_V31_CAPTURE VFE_MSG_V32_CAPTURE
-#define VFE_MSG_V31_JPEG_CAPTURE VFE_MSG_V32_JPEG_CAPTURE
-#define VFE_MSG_V31_START_RECORDING VFE_MSG_V32_START_RECORDING
-
 static struct vfe31_cmd_type vfe31_cmd[] = {
 /* 0*/	{VFE_CMD_DUMMY_0},
 		{VFE_CMD_SET_CLK},
@@ -1420,11 +1414,11 @@
 				VFE_OUTPUTS_PREVIEW))
 			/* Configure primary channel */
 			rc = vfe31_configure_pingpong_buffers(
-				VFE_MSG_V31_START, VFE_MSG_OUTPUT_PRIMARY);
+				VFE_MSG_START, VFE_MSG_OUTPUT_PRIMARY);
 		else
 			/* Configure secondary channel */
 			rc = vfe31_configure_pingpong_buffers(
-				VFE_MSG_V31_START, VFE_MSG_OUTPUT_SECONDARY);
+				VFE_MSG_START, VFE_MSG_OUTPUT_SECONDARY);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
 				" for preview", __func__);
@@ -1443,7 +1437,7 @@
 			rc = -EFAULT;
 			goto proc_general_done;
 		}
-		rc = vfe31_configure_pingpong_buffers(VFE_MSG_V31_CAPTURE,
+		rc = vfe31_configure_pingpong_buffers(VFE_MSG_CAPTURE,
 			VFE_MSG_OUTPUT_PRIMARY);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
@@ -1469,12 +1463,12 @@
 			}
 			/* Configure primary channel for JPEG */
 			rc = vfe31_configure_pingpong_buffers(
-				VFE_MSG_V31_JPEG_CAPTURE,
+				VFE_MSG_JPEG_CAPTURE,
 				VFE_MSG_OUTPUT_PRIMARY);
 		} else {
 			/* Configure primary channel */
 			rc = vfe31_configure_pingpong_buffers(
-				VFE_MSG_V31_CAPTURE,
+				VFE_MSG_CAPTURE,
 				VFE_MSG_OUTPUT_PRIMARY);
 		}
 		if (rc < 0) {
@@ -1484,7 +1478,7 @@
 			goto proc_general_done;
 		}
 		/* Configure secondary channel */
-		rc = vfe31_configure_pingpong_buffers(VFE_MSG_V31_CAPTURE,
+		rc = vfe31_configure_pingpong_buffers(VFE_MSG_CAPTURE,
 			VFE_MSG_OUTPUT_SECONDARY);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
@@ -1508,13 +1502,13 @@
 			VFE_OUTPUTS_PREVIEW_AND_VIDEO) {
 			vfe31_ctrl->outpath.out1.inst_handle = temp1;
 			rc = vfe31_configure_pingpong_buffers(
-				VFE_MSG_V31_START_RECORDING,
+				VFE_MSG_START_RECORDING,
 				VFE_MSG_OUTPUT_SECONDARY);
 		} else if (vfe31_ctrl->operation_mode ==
 			VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
 			vfe31_ctrl->outpath.out0.inst_handle = temp1;
 			rc = vfe31_configure_pingpong_buffers(
-				VFE_MSG_V31_START_RECORDING,
+				VFE_MSG_START_RECORDING,
 				VFE_MSG_OUTPUT_PRIMARY);
 		}
 		if (rc < 0) {
@@ -1955,7 +1949,7 @@
 		}
 		vfe31_ctrl->outpath.out0.inst_handle = temp1;
 		/* Configure primary channel */
-		rc = vfe31_configure_pingpong_buffers(VFE_MSG_V31_CAPTURE,
+		rc = vfe31_configure_pingpong_buffers(VFE_MSG_CAPTURE,
 			VFE_MSG_OUTPUT_PRIMARY);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
@@ -2261,11 +2255,11 @@
 		break;
 
 	case VFE_CMD_ZSL:
-		rc = vfe31_configure_pingpong_buffers(VFE_MSG_V31_START,
+		rc = vfe31_configure_pingpong_buffers(VFE_MSG_START,
 			VFE_MSG_OUTPUT_PRIMARY);
 		if (rc < 0)
 			goto proc_general_done;
-		rc = vfe31_configure_pingpong_buffers(VFE_MSG_V31_START,
+		rc = vfe31_configure_pingpong_buffers(VFE_MSG_START,
 			VFE_MSG_OUTPUT_SECONDARY);
 		if (rc < 0)
 			goto proc_general_done;
diff --git a/drivers/media/video/msm/msm_vfe31_v4l2.h b/drivers/media/video/msm/vfe/msm_vfe31_v4l2.h
similarity index 100%
rename from drivers/media/video/msm/msm_vfe31_v4l2.h
rename to drivers/media/video/msm/vfe/msm_vfe31_v4l2.h
diff --git a/drivers/media/video/msm/msm_vfe32.c b/drivers/media/video/msm/vfe/msm_vfe32.c
similarity index 98%
rename from drivers/media/video/msm/msm_vfe32.c
rename to drivers/media/video/msm/vfe/msm_vfe32.c
index c4bdad2..284db08 100644
--- a/drivers/media/video/msm/msm_vfe32.c
+++ b/drivers/media/video/msm/vfe/msm_vfe32.c
@@ -379,12 +379,27 @@
 	"GET_RGB_G_TABLE",
 	"GET_LA_TABLE",
 	"DEMOSAICV3_UPDATE",
+	"DUMMY_11",
+	"DUMMY_12", /*130*/
+	"DUMMY_13",
+	"DUMMY_14",
+	"DUMMY_15",
+	"DUMMY_16",
+	"DUMMY_17", /*135*/
+	"DUMMY_18",
+	"DUMMY_19",
+	"DUMMY_20",
+	"STATS_REQBUF",
+	"STATS_ENQUEUEBUF", /*140*/
+	"STATS_FLUSH_BUFQ",
+	"STATS_UNREGBUF",
 	"STATS_BG_START",
 	"STATS_BG_STOP",
-	"STATS_BF_START",
+	"STATS_BF_START", /*145*/
 	"STATS_BF_STOP",
 	"STATS_BHIST_START",
 	"STATS_BHIST_STOP",
+	"RESET_2",
 };
 
 uint8_t vfe32_use_bayer_stats(struct vfe32_ctrl_type *vfe32_ctrl)
@@ -548,7 +563,7 @@
 	vfe32_ctrl->share_ctrl->stop_ack_pending  = FALSE;
 	spin_unlock_irqrestore(&vfe32_ctrl->share_ctrl->stop_flag_lock, flags);
 
-	vfe32_ctrl->reset_ack_pending  = FALSE;
+	init_completion(&vfe32_ctrl->reset_complete);
 
 	spin_lock_irqsave(&vfe32_ctrl->update_ack_lock, flags);
 	vfe32_ctrl->update_ack_pending = FALSE;
@@ -649,7 +664,7 @@
 	vfe32_program_dmi_cfg(NO_MEM_SELECTED, vfe32_ctrl);
 }
 
-static void vfe32_reset(struct vfe32_ctrl_type *vfe32_ctrl)
+static int vfe32_reset(struct vfe32_ctrl_type *vfe32_ctrl)
 {
 	vfe32_reset_internal_variables(vfe32_ctrl);
 	/* disable all interrupts.  vfeImaskLocal is also reset to 0
@@ -684,6 +699,12 @@
 	to the command register using the barrier */
 	msm_camera_io_w_mb(VFE_RESET_UPON_RESET_CMD,
 		vfe32_ctrl->share_ctrl->vfebase + VFE_GLOBAL_RESET);
+
+	if (vfe32_ctrl->is_reset_blocking)
+		return wait_for_completion_interruptible(
+				&vfe32_ctrl->reset_complete);
+	else
+		return 0;
 }
 
 static int vfe32_operation_config(uint32_t *cmd,
@@ -1740,6 +1761,7 @@
 	case VFE_CMD_RESET:
 		pr_info("vfe32_proc_general: cmdID = %s\n",
 			vfe32_general_cmd[cmd->id]);
+		vfe32_ctrl->is_reset_blocking = false;
 		vfe32_reset(vfe32_ctrl);
 		break;
 	case VFE_CMD_START:
@@ -1754,25 +1776,25 @@
 				VFE_OUTPUTS_PREVIEW))
 				/* Configure primary channel */
 				rc = vfe32_configure_pingpong_buffers(
-					VFE_MSG_V32_START,
+					VFE_MSG_START,
 					VFE_MSG_OUTPUT_PRIMARY,
 					vfe32_ctrl);
 			else
 			/* Configure secondary channel */
 				rc = vfe32_configure_pingpong_buffers(
-					VFE_MSG_V32_START,
+					VFE_MSG_START,
 					VFE_MSG_OUTPUT_SECONDARY,
 					vfe32_ctrl);
 		}
 		if (vfe32_ctrl->share_ctrl->operation_mode &
 				VFE_OUTPUTS_RDI0)
 			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_V32_START, VFE_MSG_OUTPUT_TERTIARY1,
+				VFE_MSG_START, VFE_MSG_OUTPUT_TERTIARY1,
 				vfe32_ctrl);
 		if (vfe32_ctrl->share_ctrl->operation_mode &
 				VFE_OUTPUTS_RDI1)
 			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_V32_START, VFE_MSG_OUTPUT_TERTIARY2,
+				VFE_MSG_START, VFE_MSG_OUTPUT_TERTIARY2,
 				vfe32_ctrl);
 
 		if (rc < 0) {
@@ -1794,7 +1816,7 @@
 			goto proc_general_done;
 		}
 		rc = vfe32_configure_pingpong_buffers(
-			VFE_MSG_V32_CAPTURE, VFE_MSG_OUTPUT_PRIMARY,
+			VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_PRIMARY,
 			vfe32_ctrl);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
@@ -1822,13 +1844,13 @@
 			}
 			/* Configure primary channel for JPEG */
 			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_V32_JPEG_CAPTURE,
+				VFE_MSG_JPEG_CAPTURE,
 				VFE_MSG_OUTPUT_PRIMARY,
 				vfe32_ctrl);
 		} else {
 			/* Configure primary channel */
 			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_V32_CAPTURE,
+				VFE_MSG_CAPTURE,
 				VFE_MSG_OUTPUT_PRIMARY,
 				vfe32_ctrl);
 		}
@@ -1840,7 +1862,7 @@
 		}
 		/* Configure secondary channel */
 		rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_V32_CAPTURE, VFE_MSG_OUTPUT_SECONDARY,
+				VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_SECONDARY,
 				vfe32_ctrl);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
@@ -1865,7 +1887,7 @@
 			vfe32_ctrl->share_ctrl->outpath.out1.inst_handle =
 				temp1;
 			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_V32_START_RECORDING,
+				VFE_MSG_START_RECORDING,
 				VFE_MSG_OUTPUT_SECONDARY,
 				vfe32_ctrl);
 		} else if (vfe32_ctrl->share_ctrl->operation_mode &
@@ -1873,7 +1895,7 @@
 			vfe32_ctrl->share_ctrl->outpath.out0.inst_handle =
 				temp1;
 			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_V32_START_RECORDING,
+				VFE_MSG_START_RECORDING,
 				VFE_MSG_OUTPUT_PRIMARY,
 				vfe32_ctrl);
 		}
@@ -2433,7 +2455,7 @@
 		}
 		vfe32_ctrl->share_ctrl->outpath.out0.inst_handle = temp1;
 		/* Configure primary channel */
-		rc = vfe32_configure_pingpong_buffers(VFE_MSG_V32_CAPTURE,
+		rc = vfe32_configure_pingpong_buffers(VFE_MSG_CAPTURE,
 					VFE_MSG_OUTPUT_PRIMARY, vfe32_ctrl);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
@@ -2964,11 +2986,11 @@
 		break;
 
 	case VFE_CMD_ZSL:
-		rc = vfe32_configure_pingpong_buffers(VFE_MSG_V32_START,
+		rc = vfe32_configure_pingpong_buffers(VFE_MSG_START,
 			VFE_MSG_OUTPUT_PRIMARY, vfe32_ctrl);
 		if (rc < 0)
 			goto proc_general_done;
-		rc = vfe32_configure_pingpong_buffers(VFE_MSG_V32_START,
+		rc = vfe32_configure_pingpong_buffers(VFE_MSG_START,
 			VFE_MSG_OUTPUT_SECONDARY, vfe32_ctrl);
 		if (rc < 0)
 			goto proc_general_done;
@@ -3236,6 +3258,12 @@
 		CDBG("%s Stopping liveshot ", __func__);
 		vfe32_stop_liveshot(pmctl, vfe32_ctrl);
 		break;
+	case VFE_CMD_RESET_2:
+		CDBG("vfe32_proc_general: cmdID = %s\n",
+			vfe32_general_cmd[cmd->id]);
+		vfe32_ctrl->is_reset_blocking = true;
+		vfe32_reset(vfe32_ctrl);
+		break;
 	default:
 		if (cmd->length != vfe32_cmd[cmd->id].length)
 			return -EINVAL;
@@ -3629,8 +3657,12 @@
 		/* reload all write masters. (frame & line)*/
 		msm_camera_io_w(0x7FFF,
 			vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_CMD);
-		vfe32_send_isp_msg(&vfe32_ctrl->subdev,
-			vfe32_ctrl->share_ctrl->vfeFrameId, MSG_ID_RESET_ACK);
+		if (vfe32_ctrl->is_reset_blocking)
+			complete(&vfe32_ctrl->reset_complete);
+		else
+			vfe32_send_isp_msg(&vfe32_ctrl->subdev,
+				vfe32_ctrl->share_ctrl->vfeFrameId,
+				MSG_ID_RESET_ACK);
 	}
 }
 
diff --git a/drivers/media/video/msm/msm_vfe32.h b/drivers/media/video/msm/vfe/msm_vfe32.h
similarity index 99%
rename from drivers/media/video/msm/msm_vfe32.h
rename to drivers/media/video/msm/vfe/msm_vfe32.h
index 0b685e1..1795aa2 100644
--- a/drivers/media/video/msm/msm_vfe32.h
+++ b/drivers/media/video/msm/vfe/msm_vfe32.h
@@ -986,8 +986,9 @@
 	void *extdata;
 
 	int8_t start_ack_pending;
-	int8_t reset_ack_pending;
 	int8_t update_ack_pending;
+	bool is_reset_blocking;
+	struct completion reset_complete;
 	enum vfe_output_state recording_state;
 	int8_t update_linear;
 	int8_t update_rolloff;
diff --git a/drivers/media/video/msm/msm_vfe7x.c b/drivers/media/video/msm/vfe/msm_vfe7x.c
similarity index 100%
rename from drivers/media/video/msm/msm_vfe7x.c
rename to drivers/media/video/msm/vfe/msm_vfe7x.c
diff --git a/drivers/media/video/msm/msm_vfe7x.h b/drivers/media/video/msm/vfe/msm_vfe7x.h
similarity index 100%
rename from drivers/media/video/msm/msm_vfe7x.h
rename to drivers/media/video/msm/vfe/msm_vfe7x.h
diff --git a/drivers/media/video/msm/msm_vfe7x27a.c b/drivers/media/video/msm/vfe/msm_vfe7x27a.c
similarity index 100%
rename from drivers/media/video/msm/msm_vfe7x27a.c
rename to drivers/media/video/msm/vfe/msm_vfe7x27a.c
diff --git a/drivers/media/video/msm/msm_vfe7x27a.h b/drivers/media/video/msm/vfe/msm_vfe7x27a.h
similarity index 100%
rename from drivers/media/video/msm/msm_vfe7x27a.h
rename to drivers/media/video/msm/vfe/msm_vfe7x27a.h
diff --git a/drivers/media/video/msm/msm_vfe7x27a_v4l2.c b/drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.c
similarity index 99%
rename from drivers/media/video/msm/msm_vfe7x27a_v4l2.c
rename to drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.c
index 32b33e2..b6daa5f 100644
--- a/drivers/media/video/msm/msm_vfe7x27a_v4l2.c
+++ b/drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.c
@@ -1786,11 +1786,11 @@
 		}
 		if (op_mode & SNAPSHOT_MASK_MODE)
 			rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_CAPTURE,
+						VFE_MSG_CAPTURE,
 						VFE_MSG_OUTPUT_SECONDARY);
 		else
 			rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_PREVIEW,
+						VFE_MSG_PREVIEW,
 						VFE_MSG_OUTPUT_SECONDARY);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
@@ -1840,11 +1840,11 @@
 		if (!vfe2x_ctrl->reconfig_vfe) {
 			if (op_mode & SNAPSHOT_MASK_MODE)
 				rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_CAPTURE,
+						VFE_MSG_CAPTURE,
 						VFE_MSG_OUTPUT_PRIMARY);
 			else
 				rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_PREVIEW,
+						VFE_MSG_PREVIEW,
 						VFE_MSG_OUTPUT_PRIMARY);
 			if (rc < 0) {
 				pr_err("%s error configuring pingpong buffers"
@@ -1905,10 +1905,10 @@
 		}
 		if (!vfe2x_ctrl->reconfig_vfe) {
 				rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_PREVIEW,
+						VFE_MSG_PREVIEW,
 						VFE_MSG_OUTPUT_PRIMARY);
 				rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_PREVIEW,
+						VFE_MSG_PREVIEW,
 						VFE_MSG_OUTPUT_SECONDARY);
 			if (rc < 0) {
 				pr_err("%s error configuring pingpong buffers"
@@ -1969,11 +1969,11 @@
 		}
 		if (op_mode & SNAPSHOT_MASK_MODE)
 			rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_CAPTURE,
+						VFE_MSG_CAPTURE,
 						VFE_MSG_OUTPUT_SECONDARY);
 		else
 			rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_PREVIEW,
+						VFE_MSG_PREVIEW,
 						VFE_MSG_OUTPUT_SECONDARY);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
@@ -2011,11 +2011,11 @@
 
 		if (op_mode & SNAPSHOT_MASK_MODE)
 			rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_CAPTURE,
+						VFE_MSG_CAPTURE,
 						VFE_MSG_OUTPUT_PRIMARY);
 		else
 			rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_PREVIEW,
+						VFE_MSG_PREVIEW,
 						VFE_MSG_OUTPUT_PRIMARY);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
@@ -2066,7 +2066,7 @@
 		}
 		header = cmds_map[vfecmd.id].vfe_id;
 		queue = cmds_map[vfecmd.id].queue;
-		rc = vfe2x_configure_pingpong_buffers(VFE_MSG_V2X_CAPTURE,
+		rc = vfe2x_configure_pingpong_buffers(VFE_MSG_CAPTURE,
 						VFE_MSG_OUTPUT_PRIMARY);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
diff --git a/drivers/media/video/msm/msm_vfe7x27a_v4l2.h b/drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.h
similarity index 100%
rename from drivers/media/video/msm/msm_vfe7x27a_v4l2.h
rename to drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.h
diff --git a/drivers/media/video/msm/msm_vfe8x.c b/drivers/media/video/msm/vfe/msm_vfe8x.c
similarity index 100%
rename from drivers/media/video/msm/msm_vfe8x.c
rename to drivers/media/video/msm/vfe/msm_vfe8x.c
diff --git a/drivers/media/video/msm/msm_vfe8x.h b/drivers/media/video/msm/vfe/msm_vfe8x.h
similarity index 100%
rename from drivers/media/video/msm/msm_vfe8x.h
rename to drivers/media/video/msm/vfe/msm_vfe8x.h
diff --git a/drivers/media/video/msm/msm_vfe8x_proc.c b/drivers/media/video/msm/vfe/msm_vfe8x_proc.c
similarity index 100%
rename from drivers/media/video/msm/msm_vfe8x_proc.c
rename to drivers/media/video/msm/vfe/msm_vfe8x_proc.c
diff --git a/drivers/media/video/msm/msm_vfe8x_proc.h b/drivers/media/video/msm/vfe/msm_vfe8x_proc.h
similarity index 100%
rename from drivers/media/video/msm/msm_vfe8x_proc.h
rename to drivers/media/video/msm/vfe/msm_vfe8x_proc.h
diff --git a/drivers/media/video/msm/msm_vfe_stats_buf.c b/drivers/media/video/msm/vfe/msm_vfe_stats_buf.c
similarity index 100%
rename from drivers/media/video/msm/msm_vfe_stats_buf.c
rename to drivers/media/video/msm/vfe/msm_vfe_stats_buf.c
diff --git a/drivers/media/video/msm/msm_vfe_stats_buf.h b/drivers/media/video/msm/vfe/msm_vfe_stats_buf.h
similarity index 100%
rename from drivers/media/video/msm/msm_vfe_stats_buf.h
rename to drivers/media/video/msm/vfe/msm_vfe_stats_buf.h
diff --git a/drivers/media/video/msm/wfd/enc-subdev.c b/drivers/media/video/msm/wfd/enc-subdev.c
index a5f2634..8b83a98 100644
--- a/drivers/media/video/msm/wfd/enc-subdev.c
+++ b/drivers/media/video/msm/wfd/enc-subdev.c
@@ -24,6 +24,8 @@
 #define VID_ENC_MAX_ENCODER_CLIENTS 1
 #define MAX_NUM_CTRLS 20
 
+static long venc_fill_outbuf(struct v4l2_subdev *sd, void *arg);
+
 struct venc_inst {
 	struct video_client_ctx venc_client;
 	void *cbdata;
@@ -34,6 +36,8 @@
 	u32 width;
 	u32 height;
 	int secure;
+	struct mem_region unqueued_op_bufs;
+	bool streaming;
 };
 
 struct venc {
@@ -287,8 +291,10 @@
 	init_waitqueue_head(&client_ctx->msg_wait);
 	inst->op_buffer_done = vmops->op_buffer_done;
 	inst->ip_buffer_done = vmops->ip_buffer_done;
+	INIT_LIST_HEAD(&inst->unqueued_op_bufs.list);
 	inst->cbdata = vmops->cbdata;
 	inst->secure = vmops->secure;
+	inst->streaming = false;
 	if (vmops->secure) {
 		WFD_MSG_ERR("OPENING SECURE SESSION\n");
 		flags |= VCD_CP_SESSION;
@@ -423,11 +429,13 @@
 {
 	struct venc_inst *inst = sd->dev_priv;
 	struct video_client_ctx *client_ctx = &inst->venc_client;
+	struct mem_region *curr = NULL, *temp = NULL;
 	int rc;
 	if (!client_ctx) {
 		WFD_MSG_ERR("Client context is NULL");
 		return -EINVAL;
 	}
+
 	rc = vcd_encode_start(client_ctx->vcd_handle);
 	if (rc) {
 		WFD_MSG_ERR("vcd_encode_start failed, rc = %d\n", rc);
@@ -437,6 +445,15 @@
 	if (client_ctx->event_status)
 		WFD_MSG_ERR("callback for vcd_encode_start returned error: %u",
 				client_ctx->event_status);
+
+	inst->streaming = true;
+	/* Push any buffers that we have held back */
+	list_for_each_entry_safe(curr, temp,
+			&inst->unqueued_op_bufs.list, list) {
+		venc_fill_outbuf(sd, curr);
+		list_del(&curr->list);
+		kfree(curr);
+	}
 err:
 	return rc;
 }
@@ -445,6 +462,7 @@
 {
 	struct venc_inst *inst = sd->dev_priv;
 	struct video_client_ctx *client_ctx = &inst->venc_client;
+	struct mem_region *curr = NULL, *temp = NULL;
 	int rc;
 	if (!client_ctx) {
 		WFD_MSG_ERR("Client context is NULL");
@@ -452,6 +470,15 @@
 	}
 	rc = vcd_stop(client_ctx->vcd_handle);
 	wait_for_completion(&client_ctx->event);
+	inst->streaming = false;
+	/* Drop whatever frames we haven't queued */
+	list_for_each_entry_safe(curr, temp,
+			&inst->unqueued_op_bufs.list, list) {
+		inst->op_buffer_done(inst->cbdata, 0,
+				(struct vb2_buffer *)curr->cookie);
+		list_del(&curr->list);
+		kfree(curr);
+	}
 	return rc;
 }
 
@@ -1841,22 +1868,29 @@
 	struct file *file;
 	s32 buffer_index = -1;
 
-	user_vaddr = mregion->cookie;
-	rc = vidc_lookup_addr_table(client_ctx, BUFFER_TYPE_OUTPUT,
-			true, &user_vaddr,
-			&kernel_vaddr, &phy_addr, &pmem_fd, &file,
-			&buffer_index);
-	if (!rc) {
-		WFD_MSG_ERR("Address lookup failed\n");
-		goto err;
-	}
-	vcd_frame.virtual = (u8 *) kernel_vaddr;
-	vcd_frame.frm_clnt_data = mregion->cookie;
-	vcd_frame.alloc_len = mregion->size;
+	if (inst->streaming) {
+		user_vaddr = mregion->cookie;
+		rc = vidc_lookup_addr_table(client_ctx, BUFFER_TYPE_OUTPUT,
+				true, &user_vaddr,
+				&kernel_vaddr, &phy_addr, &pmem_fd, &file,
+				&buffer_index);
+		if (!rc) {
+			WFD_MSG_ERR("Address lookup failed\n");
+			goto err;
+		}
+		vcd_frame.virtual = (u8 *) kernel_vaddr;
+		vcd_frame.frm_clnt_data = mregion->cookie;
+		vcd_frame.alloc_len = mregion->size;
 
-	rc = vcd_fill_output_buffer(client_ctx->vcd_handle,	&vcd_frame);
-	if (rc)
-		WFD_MSG_ERR("Failed to fill output buffer on encoder");
+		rc = vcd_fill_output_buffer(client_ctx->vcd_handle, &vcd_frame);
+		if (rc)
+			WFD_MSG_ERR("Failed to fill output buffer on encoder");
+	} else {
+		struct mem_region *temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+		*temp = *mregion;
+		INIT_LIST_HEAD(&temp->list);
+		list_add_tail(&temp->list, &inst->unqueued_op_bufs.list);
+	}
 err:
 	return rc;
 }
diff --git a/drivers/media/video/msm/wfd/mdp-subdev.c b/drivers/media/video/msm/wfd/mdp-subdev.c
index a6d244f..886b0ba 100644
--- a/drivers/media/video/msm/wfd/mdp-subdev.c
+++ b/drivers/media/video/msm/wfd/mdp-subdev.c
@@ -45,6 +45,13 @@
 		goto exit;
 	}
 
+	/*Tell HDMI daemon to open fb2*/
+	rc = kobject_uevent(&fbi->dev->kobj, KOBJ_ADD);
+	if (rc) {
+		WFD_MSG_ERR("Failed add to kobj");
+		goto exit;
+	}
+
 	msm_fb_writeback_init(fbi);
 	inst->mdp = fbi;
 	*cookie = inst;
@@ -71,6 +78,9 @@
 			rc = -ENODEV;
 			goto exit;
 		}
+		rc = kobject_uevent(&fbi->dev->kobj, KOBJ_ONLINE);
+		if (rc)
+			WFD_MSG_ERR("Failed to send ONLINE event\n");
 	}
 exit:
 	return rc;
@@ -79,12 +89,19 @@
 {
 	struct mdp_instance *inst = arg;
 	int rc = 0;
+	struct fb_info *fbi = NULL;
 	if (inst) {
 		rc = msm_fb_writeback_stop(inst->mdp);
 		if (rc) {
 			WFD_MSG_ERR("Failed to stop writeback mode\n");
 			return rc;
 		}
+		fbi = (struct fb_info *)inst->mdp;
+		rc = kobject_uevent(&fbi->dev->kobj, KOBJ_OFFLINE);
+		if (rc) {
+			WFD_MSG_ERR("Failed to send offline event\n");
+			return -EIO;
+		}
 	}
 	return 0;
 }
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index 5430e11..54c486e 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -71,6 +71,8 @@
 #include "u_ether.c"
 #include "u_bam_data.c"
 #include "f_mbim.c"
+#include "f_qc_ecm.c"
+#include "u_qc_ether.c"
 #ifdef CONFIG_TARGET_CORE
 #include "f_tcm.c"
 #endif
@@ -552,6 +554,95 @@
 	.attributes	= rmnet_function_attributes,
 };
 
+struct ecm_function_config {
+	u8      ethaddr[ETH_ALEN];
+};
+
+static int ecm_function_init(struct android_usb_function *f,
+				struct usb_composite_dev *cdev)
+{
+	f->config = kzalloc(sizeof(struct ecm_function_config), GFP_KERNEL);
+	if (!f->config)
+		return -ENOMEM;
+	return 0;
+}
+
+static void ecm_function_cleanup(struct android_usb_function *f)
+{
+	kfree(f->config);
+	f->config = NULL;
+}
+
+static int ecm_qc_function_bind_config(struct android_usb_function *f,
+					struct usb_configuration *c)
+{
+	int ret;
+	struct ecm_function_config *ecm = f->config;
+
+	if (!ecm) {
+		pr_err("%s: ecm_pdata\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_info("%s MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", __func__,
+		ecm->ethaddr[0], ecm->ethaddr[1], ecm->ethaddr[2],
+		ecm->ethaddr[3], ecm->ethaddr[4], ecm->ethaddr[5]);
+
+	ret = gether_qc_setup_name(c->cdev->gadget, ecm->ethaddr, "ecm");
+	if (ret) {
+		pr_err("%s: gether_setup failed\n", __func__);
+		return ret;
+	}
+
+	return ecm_qc_bind_config(c, ecm->ethaddr);
+}
+
+static void ecm_qc_function_unbind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	gether_qc_cleanup();
+}
+
+static ssize_t ecm_ethaddr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct ecm_function_config *ecm = f->config;
+	return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+		ecm->ethaddr[0], ecm->ethaddr[1], ecm->ethaddr[2],
+		ecm->ethaddr[3], ecm->ethaddr[4], ecm->ethaddr[5]);
+}
+
+static ssize_t ecm_ethaddr_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct ecm_function_config *ecm = f->config;
+
+	if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+		    (int *)&ecm->ethaddr[0], (int *)&ecm->ethaddr[1],
+		    (int *)&ecm->ethaddr[2], (int *)&ecm->ethaddr[3],
+		    (int *)&ecm->ethaddr[4], (int *)&ecm->ethaddr[5]) == 6)
+		return size;
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(ecm_ethaddr, S_IRUGO | S_IWUSR, ecm_ethaddr_show,
+					       ecm_ethaddr_store);
+
+static struct device_attribute *ecm_function_attributes[] = {
+	&dev_attr_ecm_ethaddr,
+	NULL
+};
+
+static struct android_usb_function ecm_qc_function = {
+	.name		= "ecm_qc",
+	.init		= ecm_function_init,
+	.cleanup	= ecm_function_cleanup,
+	.bind_config	= ecm_qc_function_bind_config,
+	.unbind_config	= ecm_qc_function_unbind_config,
+	.attributes	= ecm_function_attributes,
+};
 
 /* MBIM - used with BAM */
 #define MAX_MBIM_INSTANCES 1
@@ -1252,6 +1343,7 @@
 
 static struct android_usb_function *supported_functions[] = {
 	&mbim_function,
+	&ecm_qc_function,
 	&rmnet_smd_function,
 	&rmnet_sdio_function,
 	&rmnet_smd_sdio_function,
diff --git a/drivers/usb/gadget/f_qc_ecm.c b/drivers/usb/gadget/f_qc_ecm.c
new file mode 100644
index 0000000..98a29ae
--- /dev/null
+++ b/drivers/usb/gadget/f_qc_ecm.c
@@ -0,0 +1,869 @@
+/*
+ * f_qc_ecm.c -- USB CDC Ethernet (ECM) link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+
+#include "u_ether.h"
+#include "u_qc_ether.h"
+
+
+/*
+ * This function is a "CDC Ethernet Networking Control Model" (CDC ECM)
+ * Ethernet link.  The data transfer model is simple (packets sent and
+ * received over bulk endpoints using normal short packet termination),
+ * and the control model exposes various data and optional notifications.
+ *
+ * ECM is well standardized and (except for Microsoft) supported by most
+ * operating systems with USB host support.  It's the preferred interop
+ * solution for Ethernet over USB, at least for firmware based solutions.
+ * (Hardware solutions tend to be more minimalist.)  A newer and simpler
+ * "Ethernet Emulation Model" (CDC EEM) hasn't yet caught on.
+ *
+ * Note that ECM requires the use of "alternate settings" for its data
+ * interface.  This means that the set_alt() method has real work to do,
+ * and also means that a get_alt() method is required.
+ *
+ * This function is based on USB CDC Ethernet link function driver and
+ * contains MSM specific implementation.
+ */
+
+
+enum ecm_qc_notify_state {
+	ECM_QC_NOTIFY_NONE,		/* don't notify */
+	ECM_QC_NOTIFY_CONNECT,		/* issue CONNECT next */
+	ECM_QC_NOTIFY_SPEED,		/* issue SPEED_CHANGE next */
+};
+
+struct f_ecm_qc {
+	struct qc_gether			port;
+	u8				ctrl_id, data_id;
+
+	char				ethaddr[14];
+
+	struct usb_ep			*notify;
+	struct usb_request		*notify_req;
+	u8				notify_state;
+	bool				is_open;
+};
+
+static inline struct f_ecm_qc *func_to_ecm_qc(struct usb_function *f)
+{
+	return container_of(f, struct f_ecm_qc, port.func);
+}
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static inline unsigned ecm_qc_bitrate(struct usb_gadget *g)
+{
+	if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+		return 13 * 512 * 8 * 1000 * 8;
+	else
+		return 19 *  64 * 1 * 1000 * 8;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Include the status endpoint if we can, even though it's optional.
+ *
+ * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
+ * packet, to simplify cancellation; and a big transfer interval, to
+ * waste less bandwidth.
+ *
+ * Some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even
+ * if they ignore the connect/disconnect notifications that real aether
+ * can provide.  More advanced cdc configurations might want to support
+ * encapsulated commands (vendor-specific, using control-OUT).
+ */
+
+#define ECM_QC_LOG2_STATUS_INTERVAL_MSEC	5	/* 1 << 5 == 32 msec */
+#define ECM_QC_STATUS_BYTECOUNT		16	/* 8 byte header + data */
+
+/* currently only one std ecm instance is supported */
+#define ECM_QC_NO_PORTS						1
+
+/* interface descriptor: */
+
+static struct usb_interface_descriptor ecm_qc_control_intf = {
+	.bLength =		sizeof ecm_qc_control_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	/* status endpoint is optional; this could be patched later */
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_COMM,
+	.bInterfaceSubClass =	USB_CDC_SUBCLASS_ETHERNET,
+	.bInterfaceProtocol =	USB_CDC_PROTO_NONE,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc ecm_qc_header_desc = {
+	.bLength =		sizeof ecm_qc_header_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+
+	.bcdCDC =		cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_union_desc ecm_qc_union_desc = {
+	.bLength =		sizeof(ecm_qc_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+	/* .bMasterInterface0 =	DYNAMIC */
+	/* .bSlaveInterface0 =	DYNAMIC */
+};
+
+static struct usb_cdc_ether_desc ecm_qc_desc = {
+	.bLength =		sizeof ecm_qc_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ETHERNET_TYPE,
+
+	/* this descriptor actually adds value, surprise! */
+	/* .iMACAddress = DYNAMIC */
+	.bmEthernetStatistics =	cpu_to_le32(0), /* no statistics */
+	.wMaxSegmentSize =	cpu_to_le16(ETH_FRAME_LEN),
+	.wNumberMCFilters =	cpu_to_le16(0),
+	.bNumberPowerFilters =	0,
+};
+
+/* the default data interface has no endpoints ... */
+
+static struct usb_interface_descriptor ecm_qc_data_nop_intf = {
+	.bLength =		sizeof ecm_qc_data_nop_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bInterfaceNumber =	1,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	/* .iInterface = DYNAMIC */
+};
+
+/* ... but the "real" data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor ecm_qc_data_intf = {
+	.bLength =		sizeof ecm_qc_data_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bInterfaceNumber =	1,
+	.bAlternateSetting =	1,
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	/* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor ecm_qc_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+	.bInterval =		1 << ECM_QC_LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor ecm_qc_fs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor ecm_qc_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *ecm_qc_fs_function[] = {
+	/* CDC ECM control descriptors */
+	(struct usb_descriptor_header *) &ecm_qc_control_intf,
+	(struct usb_descriptor_header *) &ecm_qc_header_desc,
+	(struct usb_descriptor_header *) &ecm_qc_union_desc,
+	(struct usb_descriptor_header *) &ecm_qc_desc,
+	/* NOTE: status endpoint might need to be removed */
+	(struct usb_descriptor_header *) &ecm_qc_fs_notify_desc,
+	/* data interface, altsettings 0 and 1 */
+	(struct usb_descriptor_header *) &ecm_qc_data_nop_intf,
+	(struct usb_descriptor_header *) &ecm_qc_data_intf,
+	(struct usb_descriptor_header *) &ecm_qc_fs_in_desc,
+	(struct usb_descriptor_header *) &ecm_qc_fs_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor ecm_qc_hs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+	.bInterval =		ECM_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor ecm_qc_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor ecm_qc_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *ecm_qc_hs_function[] = {
+	/* CDC ECM control descriptors */
+	(struct usb_descriptor_header *) &ecm_qc_control_intf,
+	(struct usb_descriptor_header *) &ecm_qc_header_desc,
+	(struct usb_descriptor_header *) &ecm_qc_union_desc,
+	(struct usb_descriptor_header *) &ecm_qc_desc,
+	/* NOTE: status endpoint might need to be removed */
+	(struct usb_descriptor_header *) &ecm_qc_hs_notify_desc,
+	/* data interface, altsettings 0 and 1 */
+	(struct usb_descriptor_header *) &ecm_qc_data_nop_intf,
+	(struct usb_descriptor_header *) &ecm_qc_data_intf,
+	(struct usb_descriptor_header *) &ecm_qc_hs_in_desc,
+	(struct usb_descriptor_header *) &ecm_qc_hs_out_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string ecm_qc_string_defs[] = {
+	[0].s = "CDC Ethernet Control Model (ECM)",
+	[1].s = NULL /* DYNAMIC */,
+	[2].s = "CDC Ethernet Data",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings ecm_qc_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		ecm_qc_string_defs,
+};
+
+static struct usb_gadget_strings *ecm_qc_strings[] = {
+	&ecm_qc_string_table,
+	NULL,
+};
+
+static struct data_port ecm_qc_bam_port;
+
+static int ecm_qc_bam_setup(void)
+{
+	int ret;
+
+	ret = bam_data_setup(ECM_QC_NO_PORTS);
+	if (ret) {
+		pr_err("bam_data_setup failed err: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ecm_qc_bam_connect(struct f_ecm_qc *dev)
+{
+	int ret;
+
+	ecm_qc_bam_port.func = dev->port.func;
+	ecm_qc_bam_port.in = dev->port.in_ep;
+	ecm_qc_bam_port.out = dev->port.out_ep;
+
+	/* currently we use the first connection */
+	ret = bam_data_connect(&ecm_qc_bam_port, 0, 0);
+	if (ret) {
+		pr_err("bam_data_connect failed: err:%d\n",
+				ret);
+		return ret;
+	} else {
+		pr_info("ecm bam connected\n");
+	}
+
+	return 0;
+}
+
+static int ecm_qc_bam_disconnect(struct f_ecm_qc *dev)
+{
+	pr_debug("dev:%p. %s Do nothing.\n",
+			 dev, __func__);
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void ecm_qc_do_notify(struct f_ecm_qc *ecm)
+{
+	struct usb_request		*req = ecm->notify_req;
+	struct usb_cdc_notification	*event;
+	struct usb_composite_dev	*cdev = ecm->port.func.config->cdev;
+	__le32				*data;
+	int				status;
+
+	/* notification already in flight? */
+	if (!req)
+		return;
+
+	event = req->buf;
+	switch (ecm->notify_state) {
+	case ECM_QC_NOTIFY_NONE:
+		return;
+
+	case ECM_QC_NOTIFY_CONNECT:
+		event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+		if (ecm->is_open)
+			event->wValue = cpu_to_le16(1);
+		else
+			event->wValue = cpu_to_le16(0);
+		event->wLength = 0;
+		req->length = sizeof *event;
+
+		DBG(cdev, "notify connect %s\n",
+				ecm->is_open ? "true" : "false");
+		ecm->notify_state = ECM_QC_NOTIFY_SPEED;
+		break;
+
+	case ECM_QC_NOTIFY_SPEED:
+		event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
+		event->wValue = cpu_to_le16(0);
+		event->wLength = cpu_to_le16(8);
+		req->length = ECM_QC_STATUS_BYTECOUNT;
+
+		/* SPEED_CHANGE data is up/down speeds in bits/sec */
+		data = req->buf + sizeof *event;
+		data[0] = cpu_to_le32(ecm_qc_bitrate(cdev->gadget));
+		data[1] = data[0];
+
+		DBG(cdev, "notify speed %d\n", ecm_qc_bitrate(cdev->gadget));
+		ecm->notify_state = ECM_QC_NOTIFY_NONE;
+		break;
+	}
+	event->bmRequestType = 0xA1;
+	event->wIndex = cpu_to_le16(ecm->ctrl_id);
+
+	ecm->notify_req = NULL;
+	status = usb_ep_queue(ecm->notify, req, GFP_ATOMIC);
+	if (status < 0) {
+		ecm->notify_req = req;
+		DBG(cdev, "notify --> %d\n", status);
+	}
+}
+
+static void ecm_qc_notify(struct f_ecm_qc *ecm)
+{
+	/* NOTE on most versions of Linux, host side cdc-ethernet
+	 * won't listen for notifications until its netdevice opens.
+	 * The first notification then sits in the FIFO for a long
+	 * time, and the second one is queued.
+	 */
+	ecm->notify_state = ECM_QC_NOTIFY_CONNECT;
+	ecm_qc_do_notify(ecm);
+}
+
+static void ecm_qc_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_ecm_qc			*ecm = req->context;
+
+	switch (req->status) {
+	case 0:
+		/* no fault */
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		ecm->notify_state = ECM_QC_NOTIFY_NONE;
+		break;
+	default:
+		DBG(cdev, "event %02x --> %d\n",
+			event->bNotificationType, req->status);
+		break;
+	}
+	ecm->notify_req = req;
+	ecm_qc_do_notify(ecm);
+}
+
+static int ecm_qc_setup(struct usb_function *f,
+				const struct usb_ctrlrequest *ctrl)
+{
+	struct f_ecm_qc		*ecm = func_to_ecm_qc(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything except
+	 * CDC class messages; interface activation uses set_alt().
+	 */
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SET_ETHERNET_PACKET_FILTER:
+		/* see 6.2.30: no data, wIndex = interface,
+		 * wValue = packet filter bitmap
+		 */
+		if (w_length != 0 || w_index != ecm->ctrl_id)
+			goto invalid;
+		DBG(cdev, "packet filter %02x\n", w_value);
+		/* REVISIT locking of cdc_filter.  This assumes the UDC
+		 * driver won't have a concurrent packet TX irq running on
+		 * another CPU; or that if it does, this write is atomic...
+		 */
+		ecm->port.cdc_filter = w_value;
+		value = 0;
+		break;
+
+	/* and optionally:
+	 * case USB_CDC_SEND_ENCAPSULATED_COMMAND:
+	 * case USB_CDC_GET_ENCAPSULATED_RESPONSE:
+	 * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
+	 * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
+	 * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
+	 * case USB_CDC_GET_ETHERNET_STATISTIC:
+	 */
+
+	default:
+invalid:
+		DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		DBG(cdev, "ecm req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			pr_err("ecm req %02x.%02x response err %d\n",
+					ctrl->bRequestType, ctrl->bRequest,
+					value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+
+static int ecm_qc_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_ecm_qc		*ecm = func_to_ecm_qc(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	/* Control interface has only altsetting 0 */
+	if (intf == ecm->ctrl_id) {
+		if (alt != 0)
+			goto fail;
+
+		if (ecm->notify->driver_data) {
+			VDBG(cdev, "reset ecm control %d\n", intf);
+			usb_ep_disable(ecm->notify);
+		}
+		if (!(ecm->notify->desc)) {
+			VDBG(cdev, "init ecm ctrl %d\n", intf);
+			if (config_ep_by_speed(cdev->gadget, f, ecm->notify))
+				goto fail;
+		}
+		usb_ep_enable(ecm->notify);
+		ecm->notify->driver_data = ecm;
+
+	/* Data interface has two altsettings, 0 and 1 */
+	} else if (intf == ecm->data_id) {
+		if (alt > 1)
+			goto fail;
+
+		if (ecm->port.in_ep->driver_data) {
+			DBG(cdev, "reset ecm\n");
+			gether_qc_disconnect(&ecm->port);
+			ecm_qc_bam_disconnect(ecm);
+		}
+
+		if (!ecm->port.in_ep->desc ||
+		    !ecm->port.out_ep->desc) {
+			DBG(cdev, "init ecm\n");
+			if (config_ep_by_speed(cdev->gadget, f,
+					       ecm->port.in_ep) ||
+			    config_ep_by_speed(cdev->gadget, f,
+					       ecm->port.out_ep)) {
+				ecm->port.in_ep->desc = NULL;
+				ecm->port.out_ep->desc = NULL;
+				goto fail;
+			}
+		}
+
+		/* CDC Ethernet only sends data in non-default altsettings.
+		 * Changing altsettings resets filters, statistics, etc.
+		 */
+		if (alt == 1) {
+			struct net_device	*net;
+
+			/* Enable zlps by default for ECM conformance;
+			 * override for musb_hdrc (avoids txdma ovhead).
+			 */
+			ecm->port.is_zlp_ok = !(gadget_is_musbhdrc(cdev->gadget)
+				);
+			ecm->port.cdc_filter = DEFAULT_FILTER;
+			DBG(cdev, "activate ecm\n");
+			net = gether_qc_connect(&ecm->port);
+			if (IS_ERR(net))
+				return PTR_ERR(net);
+
+			if (ecm_qc_bam_connect(ecm))
+				goto fail;
+		}
+
+		/* NOTE this can be a minor disagreement with the ECM spec,
+		 * which says speed notifications will "always" follow
+		 * connection notifications.  But we allow one connect to
+		 * follow another (if the first is in flight), and instead
+		 * just guarantee that a speed notification is always sent.
+		 */
+		ecm_qc_notify(ecm);
+	} else
+		goto fail;
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+/* Because the data interface supports multiple altsettings,
+ * this ECM function *MUST* implement a get_alt() method.
+ */
+static int ecm_qc_get_alt(struct usb_function *f, unsigned intf)
+{
+	struct f_ecm_qc		*ecm = func_to_ecm_qc(f);
+
+	if (intf == ecm->ctrl_id)
+		return 0;
+	return ecm->port.in_ep->driver_data ? 1 : 0;
+}
+
+static void ecm_qc_disable(struct usb_function *f)
+{
+	struct f_ecm_qc		*ecm = func_to_ecm_qc(f);
+
+	DBG(cdev, "ecm deactivated\n");
+
+	if (ecm->port.in_ep->driver_data) {
+		gether_qc_disconnect(&ecm->port);
+		ecm_qc_bam_disconnect(ecm);
+	}
+
+	if (ecm->notify->driver_data) {
+		usb_ep_disable(ecm->notify);
+		ecm->notify->driver_data = NULL;
+		ecm->notify->desc = NULL;
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Callbacks let us notify the host about connect/disconnect when the
+ * net device is opened or closed.
+ *
+ * For testing, note that link states on this side include both opened
+ * and closed variants of:
+ *
+ *   - disconnected/unconfigured
+ *   - configured but inactive (data alt 0)
+ *   - configured and active (data alt 1)
+ *
+ * Each needs to be tested with unplug, rmmod, SET_CONFIGURATION, and
+ * SET_INTERFACE (altsetting).  Remember also that "configured" doesn't
+ * imply the host is actually polling the notification endpoint, and
+ * likewise that "active" doesn't imply it's actually using the data
+ * endpoints for traffic.
+ */
+
+static void ecm_qc_open(struct qc_gether *geth)
+{
+	struct f_ecm_qc		*ecm = func_to_ecm_qc(&geth->func);
+	DBG(ecm->port.func.config->cdev, "%s\n", __func__);
+
+	ecm->is_open = true;
+	ecm_qc_notify(ecm);
+}
+
+static void ecm_qc_close(struct qc_gether *geth)
+{
+	struct f_ecm_qc		*ecm = func_to_ecm_qc(&geth->func);
+
+	DBG(ecm->port.func.config->cdev, "%s\n", __func__);
+
+	ecm->is_open = false;
+	ecm_qc_notify(ecm);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ethernet function driver setup/binding */
+
+static int
+ecm_qc_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_ecm_qc		*ecm = func_to_ecm_qc(f);
+	int			status;
+	struct usb_ep		*ep;
+
+	/* allocate instance-specific interface IDs */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	ecm->ctrl_id = status;
+
+	ecm_qc_control_intf.bInterfaceNumber = status;
+	ecm_qc_union_desc.bMasterInterface0 = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	ecm->data_id = status;
+
+	ecm_qc_data_nop_intf.bInterfaceNumber = status;
+	ecm_qc_data_intf.bInterfaceNumber = status;
+	ecm_qc_union_desc.bSlaveInterface0 = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_in_desc);
+	if (!ep)
+		goto fail;
+
+	ecm->port.in_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_out_desc);
+	if (!ep)
+		goto fail;
+
+	ecm->port.out_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	/* NOTE:  a status/notification endpoint is *OPTIONAL* but we
+	 * don't treat it that way.  It's simpler, and some newer CDC
+	 * profiles (wireless handsets) no longer treat it as optional.
+	 */
+	ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_notify_desc);
+	if (!ep)
+		goto fail;
+	ecm->notify = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	status = -ENOMEM;
+
+	/* allocate notification request and buffer */
+	ecm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!ecm->notify_req)
+		goto fail;
+	ecm->notify_req->buf = kmalloc(ECM_QC_STATUS_BYTECOUNT, GFP_KERNEL);
+	if (!ecm->notify_req->buf)
+		goto fail;
+	ecm->notify_req->context = ecm;
+	ecm->notify_req->complete = ecm_qc_notify_complete;
+
+	/* copy descriptors, and track endpoint copies */
+	f->descriptors = usb_copy_descriptors(ecm_qc_fs_function);
+	if (!f->descriptors)
+		goto fail;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		ecm_qc_hs_in_desc.bEndpointAddress =
+				ecm_qc_fs_in_desc.bEndpointAddress;
+		ecm_qc_hs_out_desc.bEndpointAddress =
+				ecm_qc_fs_out_desc.bEndpointAddress;
+		ecm_qc_hs_notify_desc.bEndpointAddress =
+				ecm_qc_fs_notify_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(ecm_qc_hs_function);
+		if (!f->hs_descriptors)
+			goto fail;
+	}
+
+	/* NOTE:  all that is done without knowing or caring about
+	 * the network link ... which is unavailable to this code
+	 * until we're activated via set_alt().
+	 */
+
+	ecm->port.open = ecm_qc_open;
+	ecm->port.close = ecm_qc_close;
+
+	DBG(cdev, "CDC Ethernet: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			ecm->port.in_ep->name, ecm->port.out_ep->name,
+			ecm->notify->name);
+	return 0;
+
+fail:
+	if (f->descriptors)
+		usb_free_descriptors(f->descriptors);
+
+	if (ecm->notify_req) {
+		kfree(ecm->notify_req->buf);
+		usb_ep_free_request(ecm->notify, ecm->notify_req);
+	}
+
+	/* we might as well release our claims on endpoints */
+	if (ecm->notify)
+		ecm->notify->driver_data = NULL;
+	if (ecm->port.out_ep->desc)
+		ecm->port.out_ep->driver_data = NULL;
+	if (ecm->port.in_ep->desc)
+		ecm->port.in_ep->driver_data = NULL;
+
+	pr_err("%s: can't bind, err %d\n", f->name, status);
+
+	return status;
+}
+
+static void
+ecm_qc_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_ecm_qc		*ecm = func_to_ecm_qc(f);
+
+	DBG(c->cdev, "ecm unbind\n");
+
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	usb_free_descriptors(f->descriptors);
+
+	kfree(ecm->notify_req->buf);
+	usb_ep_free_request(ecm->notify, ecm->notify_req);
+
+	ecm_qc_string_defs[1].s = NULL;
+	kfree(ecm);
+}
+
+/**
+ * ecm_qc_bind_config - add CDC Ethernet network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ *	side of the link was recorded
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_qc_setup().  Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+int
+ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+{
+	struct f_ecm_qc		*ecm;
+	int		status;
+
+	if (!can_support_ecm(c->cdev->gadget) || !ethaddr)
+		return -EINVAL;
+
+	status = ecm_qc_bam_setup();
+	if (status) {
+		pr_err("bam setup failed");
+		return status;
+	}
+
+	/* maybe allocate device-global string IDs */
+	if (ecm_qc_string_defs[0].id == 0) {
+
+		/* control interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		ecm_qc_string_defs[0].id = status;
+		ecm_qc_control_intf.iInterface = status;
+
+		/* data interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		ecm_qc_string_defs[2].id = status;
+		ecm_qc_data_intf.iInterface = status;
+
+		/* MAC address */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		ecm_qc_string_defs[1].id = status;
+		ecm_qc_desc.iMACAddress = status;
+	}
+
+	/* allocate and initialize one new instance */
+	ecm = kzalloc(sizeof *ecm, GFP_KERNEL);
+	if (!ecm)
+		return -ENOMEM;
+
+	/* export host's Ethernet address in CDC format */
+	snprintf(ecm->ethaddr, sizeof ecm->ethaddr,
+		"%02X%02X%02X%02X%02X%02X",
+		ethaddr[0], ethaddr[1], ethaddr[2],
+		ethaddr[3], ethaddr[4], ethaddr[5]);
+	ecm_qc_string_defs[1].s = ecm->ethaddr;
+
+	ecm->port.cdc_filter = DEFAULT_FILTER;
+
+	ecm->port.func.name = "cdc_ethernet";
+	ecm->port.func.strings = ecm_qc_strings;
+	/* descriptors are per-instance copies */
+	ecm->port.func.bind = ecm_qc_bind;
+	ecm->port.func.unbind = ecm_qc_unbind;
+	ecm->port.func.set_alt = ecm_qc_set_alt;
+	ecm->port.func.get_alt = ecm_qc_get_alt;
+	ecm->port.func.setup = ecm_qc_setup;
+	ecm->port.func.disable = ecm_qc_disable;
+
+	status = usb_add_function(c, &ecm->port.func);
+	if (status) {
+		ecm_qc_string_defs[1].s = NULL;
+		kfree(ecm);
+	}
+	return status;
+}
diff --git a/drivers/video/msm/external_common.c b/drivers/video/msm/external_common.c
index 9f301fe..46ef7b4 100644
--- a/drivers/video/msm/external_common.c
+++ b/drivers/video/msm/external_common.c
@@ -1981,8 +1981,8 @@
 	struct fb_var_screeninfo *var = &mfd->fbi->var;
 	bool changed = TRUE;
 
-	if (var->reserved[2]) {
-		format = var->reserved[2]-1;
+	if (var->reserved[3]) {
+		format = var->reserved[3]-1;
 		DEV_DBG("reserved format is %d\n", format);
 	} else if (hdmi_prim_resolution) {
 		format = hdmi_prim_resolution - 1;
diff --git a/drivers/video/msm/lvds.c b/drivers/video/msm/lvds.c
index f5d8201..2987e2f 100644
--- a/drivers/video/msm/lvds.c
+++ b/drivers/video/msm/lvds.c
@@ -92,11 +92,11 @@
 		MDP_OUTP(MDP_BASE + 0xc3064, 0x05);
 		MDP_OUTP(MDP_BASE + 0xc3050, 0x20);
 	} else {
-		MDP_OUTP(MDP_BASE + 0xc3004, 0x62);
+		MDP_OUTP(MDP_BASE + 0xc3004, 0x8f);
 		MDP_OUTP(MDP_BASE + 0xc3008, 0x30);
-		MDP_OUTP(MDP_BASE + 0xc300c, 0xc4);
+		MDP_OUTP(MDP_BASE + 0xc300c, 0xc6);
 		MDP_OUTP(MDP_BASE + 0xc3014, 0x10);
-		MDP_OUTP(MDP_BASE + 0xc3018, 0x05);
+		MDP_OUTP(MDP_BASE + 0xc3018, 0x07);
 		MDP_OUTP(MDP_BASE + 0xc301c, 0x62);
 		MDP_OUTP(MDP_BASE + 0xc3020, 0x41);
 		MDP_OUTP(MDP_BASE + 0xc3024, 0x0d);
diff --git a/drivers/video/msm/lvds_chimei_wxga.c b/drivers/video/msm/lvds_chimei_wxga.c
index 9a385b9..39aa852 100644
--- a/drivers/video/msm/lvds_chimei_wxga.c
+++ b/drivers/video/msm/lvds_chimei_wxga.c
@@ -134,7 +134,7 @@
 	pinfo->wait_cycle = 0;
 	pinfo->bpp = 24;
 	pinfo->fb_num = 2;
-	pinfo->clk_rate = 75000000;
+	pinfo->clk_rate = 79400000;
 	pinfo->bl_max = 255;
 	pinfo->bl_min = 1;
 
diff --git a/drivers/video/msm/mdp.h b/drivers/video/msm/mdp.h
index e91209b..2411dca 100644
--- a/drivers/video/msm/mdp.h
+++ b/drivers/video/msm/mdp.h
@@ -283,10 +283,10 @@
 #define MDP_PRIM_RDPTR_TERM 0x400
 #endif
 #define MDP_OVERLAY2_TERM 0x80
-#define MDP_HISTOGRAM_TERM_DMA_P 0x100
-#define MDP_HISTOGRAM_TERM_DMA_S 0x200
-#define MDP_HISTOGRAM_TERM_VG_1 0x400
-#define MDP_HISTOGRAM_TERM_VG_2 0x800
+#define MDP_HISTOGRAM_TERM_DMA_P 0x10000
+#define MDP_HISTOGRAM_TERM_DMA_S 0x20000
+#define MDP_HISTOGRAM_TERM_VG_1 0x40000
+#define MDP_HISTOGRAM_TERM_VG_2 0x80000
 
 #define ACTIVE_START_X_EN BIT(31)
 #define ACTIVE_START_Y_EN BIT(31)
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 71315e6..eee114a 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -2064,7 +2064,7 @@
 	iom_pipe_info = &mdp_iommu[pipe->mixer_num][pipe->pipe_ndx - 1];
 	iom_pipe_info->mark_unmap = 1;
 
-	mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 1);
+	mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
 
 	memset(pipe, 0, sizeof(*pipe));
 
@@ -2886,16 +2886,11 @@
 
 		mfd->use_ov0_blt &= ~(1 << (pipe->pipe_ndx-1));
 		mdp4_overlay_update_blt_mode(mfd);
-		if (!mfd->use_ov0_blt)
-			mdp4_free_writeback_buf(mfd, MDP4_MIXER0);
 	} else {	/* mixer1, DTV, ATV */
 		if (ctrl->panel_mode & MDP4_PANEL_DTV) {
 			mdp4_overlay_dtv_unset(mfd, pipe);
 			mfd->use_ov1_blt &= ~(1 << (pipe->pipe_ndx-1));
 			mdp4_overlay1_update_blt_mode(mfd);
-			if (!mfd->use_ov1_blt)
-				mdp4_free_writeback_buf(mfd,
-								MDP4_MIXER1);
 		}
 	}
 
diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c
index 28b5cd5..398b1e6 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_video.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_video.c
@@ -49,16 +49,19 @@
 	struct device *dev;
 	int inited;
 	int update_ndx;
-	uint32 dmap_intr_cnt;
+	int ov_koff;
+	int ov_done;
 	atomic_t suspend;
-	int dmap_wait_cnt;
 	int wait_vsync_cnt;
 	int blt_change;
+	int blt_free;
 	int fake_vsync;
 	struct mutex update_lock;
+	struct completion ov_comp;
 	struct completion dmap_comp;
 	struct completion vsync_comp;
 	spinlock_t spin_lock;
+	struct msm_fb_data_type *mfd;
 	struct mdp4_overlay_pipe *base_pipe;
 	struct vsync_update vlist[2];
 	int vsync_irq_enabled;
@@ -145,8 +148,9 @@
 	mdp4_stat.overlay_play[pipe->mixer_num]++;
 }
 
-
 static void mdp4_dsi_video_blt_ov_update(struct mdp4_overlay_pipe *pipe);
+static void mdp4_dsi_video_wait4dmap(int cndx);
+static void mdp4_dsi_video_wait4ov(int cndx);
 
 int mdp4_dsi_video_pipe_commit(void)
 {
@@ -175,11 +179,37 @@
 	vctrl->update_ndx++;
 	vctrl->update_ndx &= 0x01;
 	vp->update_cnt = 0;     /* reset */
+	if (vctrl->blt_free) {
+		vctrl->blt_free--;
+		if (vctrl->blt_free == 0)
+			mdp4_free_writeback_buf(vctrl->mfd, mixer);
+	}
 	mutex_unlock(&vctrl->update_lock);
 
 	/* free previous committed iommu back to pool */
 	mdp4_overlay_iommu_unmap_freelist(mixer);
 
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (vctrl->ov_koff != vctrl->ov_done) {
+		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+		pr_err("%s: Error, frame dropped %d %d\n", __func__,
+				vctrl->ov_koff, vctrl->ov_done);
+		return 0;
+	}
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	if (vctrl->blt_change) {
+		pipe = vctrl->base_pipe;
+		spin_lock_irqsave(&vctrl->spin_lock, flags);
+		INIT_COMPLETION(vctrl->dmap_comp);
+		INIT_COMPLETION(vctrl->ov_comp);
+		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+		mdp4_dsi_video_wait4dmap(0);
+		if (pipe->ov_blt_addr)
+			mdp4_dsi_video_wait4ov(0);
+	}
+
 	pipe = vp->plist;
 	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
 		if (pipe->pipe_used) {
@@ -200,16 +230,17 @@
 	spin_lock_irqsave(&vctrl->spin_lock, flags);
 	if (pipe->ov_blt_addr) {
 		mdp4_dsi_video_blt_ov_update(pipe);
-		pipe->blt_ov_done++;
+		pipe->ov_cnt++;
+		INIT_COMPLETION(vctrl->ov_comp);
 		vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
 		mb();
-		pipe->blt_ov_koff++;
+		vctrl->ov_koff++;
 		/* kickoff overlay engine */
 		mdp4_stat.kickoff_ov0++;
 		outpdw(MDP_BASE + 0x0004, 0);
-	} else if (vctrl->dmap_intr_cnt == 0) {
+	} else {
 		/* schedule second phase update  at dmap */
-		vctrl->dmap_intr_cnt++;
+		INIT_COMPLETION(vctrl->dmap_comp);
 		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
 	}
 	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
@@ -285,7 +316,6 @@
 
 static void mdp4_dsi_video_wait4dmap(int cndx)
 {
-	unsigned long flags;
 	struct vsycn_ctrl *vctrl;
 
 	if (cndx >= MAX_CONTROLLER) {
@@ -298,23 +328,26 @@
 	if (atomic_read(&vctrl->suspend) > 0)
 		return;
 
-	/* start timing generator & mmu if they are not started yet */
-	mdp4_overlay_dsi_video_start();
-
-	spin_lock_irqsave(&vctrl->spin_lock, flags);
-	if (vctrl->dmap_wait_cnt == 0) {
-		INIT_COMPLETION(vctrl->dmap_comp);
-		if (vctrl->dmap_intr_cnt == 0) {
-			vctrl->dmap_intr_cnt++;
-			vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
-		}
-	}
-	vctrl->dmap_wait_cnt++;
-	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
-
 	wait_for_completion(&vctrl->dmap_comp);
 }
 
+static void mdp4_dsi_video_wait4ov(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	wait_for_completion(&vctrl->ov_comp);
+}
+
 static void send_vsync_work(struct work_struct *work)
 {
 	struct vsycn_ctrl *vctrl =
@@ -349,6 +382,7 @@
 	mutex_init(&vctrl->update_lock);
 	init_completion(&vctrl->vsync_comp);
 	init_completion(&vctrl->dmap_comp);
+	init_completion(&vctrl->ov_comp);
 	atomic_set(&vctrl->suspend, 0);
 	spin_lock_init(&vctrl->spin_lock);
 	INIT_WORK(&vctrl->vsync_work, send_vsync_work);
@@ -413,14 +447,15 @@
 	vctrl = &vsync_ctrl_db[cndx];
 	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
 
-	vctrl->dev = mfd->fbi->dev;
-
 	if (!mfd)
 		return -ENODEV;
 
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
+	vctrl->mfd = mfd;
+	vctrl->dev = mfd->fbi->dev;
+
 	/* mdp clock on */
 	mdp_clk_ctrl(1);
 
@@ -728,18 +763,16 @@
 	int bpp;
 	char *overlay_base;
 
-
 	if (pipe->ov_blt_addr == 0)
 		return;
 
-
 #ifdef BLT_RGB565
 	bpp = 2; /* overlay ouput is RGB565 */
 #else
 	bpp = 3; /* overlay ouput is RGB888 */
 #endif
 	off = 0;
-	if (pipe->blt_ov_done & 0x01)
+	if (pipe->ov_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
 	addr = pipe->ov_blt_addr + off;
 
@@ -764,7 +797,7 @@
 	bpp = 3; /* overlay ouput is RGB888 */
 #endif
 	off = 0;
-	if (pipe->blt_dmap_done & 0x01)
+	if (pipe->dmap_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
 	addr = pipe->dma_blt_addr + off;
 
@@ -774,7 +807,6 @@
 
 void mdp4_overlay_dsi_video_set_perf(struct msm_fb_data_type *mfd)
 {
-	mdp4_dsi_video_wait4dmap(0);
 	/* change mdp clk while mdp is idle */
 	mdp4_set_perf_level();
 }
@@ -819,30 +851,26 @@
 	pipe = vctrl->base_pipe;
 
 	spin_lock(&vctrl->spin_lock);
+	vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
 	if (vctrl->blt_change) {
 		mdp4_overlayproc_cfg(pipe);
 		mdp4_overlay_dmap_xy(pipe);
 		if (pipe->ov_blt_addr) {
 			mdp4_dsi_video_blt_ov_update(pipe);
-			pipe->blt_ov_done++;
-
+			pipe->ov_cnt++;
 			/* Prefill one frame */
-			vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+			vsync_irq_enable(INTR_OVERLAY0_DONE,
+						MDP_OVERLAY0_TERM);
 			/* kickoff overlay0 engine */
 			mdp4_stat.kickoff_ov0++;
+			vctrl->ov_koff++;	/* make up for prefill */
 			outpdw(MDP_BASE + 0x0004, 0);
 		}
 		vctrl->blt_change = 0;
 	}
 
-	vctrl->dmap_intr_cnt--;
-	if (vctrl->dmap_wait_cnt) {
-		complete_all(&vctrl->dmap_comp);
-		vctrl->dmap_wait_cnt = 0; /* reset */
-	} else  {
-		mdp4_overlay_dma_commit(cndx);
-	}
-	vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+	complete_all(&vctrl->dmap_comp);
+	mdp4_overlay_dma_commit(cndx);
 	spin_unlock(&vctrl->spin_lock);
 }
 
@@ -858,14 +886,16 @@
 	pipe = vctrl->base_pipe;
 
 	spin_lock(&vctrl->spin_lock);
+	vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+	vctrl->ov_done++;
+	complete_all(&vctrl->ov_comp);
 	if (pipe->ov_blt_addr == 0) {
 		spin_unlock(&vctrl->spin_lock);
 		return;
 	}
 
 	mdp4_dsi_video_blt_dmap_update(pipe);
-	pipe->blt_dmap_done++;
-	vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+	pipe->dmap_cnt++;
 	spin_unlock(&vctrl->spin_lock);
 }
 
@@ -876,7 +906,6 @@
 static void mdp4_dsi_video_do_blt(struct msm_fb_data_type *mfd, int enable)
 {
 	unsigned long flag;
-	int data;
 	int cndx = 0;
 	struct vsycn_ctrl *vctrl;
 	struct mdp4_overlay_pipe *pipe;
@@ -895,16 +924,17 @@
 	if (enable && pipe->ov_blt_addr == 0) {
 		pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
 		pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
-		pipe->blt_cnt = 0;
 		pipe->ov_cnt = 0;
-		pipe->blt_dmap_done = 0;
-		pipe->blt_ov_koff = 0;
-		pipe->blt_ov_done = 0;
+		pipe->dmap_cnt = 0;
+		vctrl->ov_koff = 0;
+		vctrl->ov_done = 0;
+		vctrl->blt_free = 0;
 		mdp4_stat.blt_dsi_video++;
 		vctrl->blt_change++;
 	} else if (enable == 0 && pipe->ov_blt_addr) {
 		pipe->ov_blt_addr = 0;
 		pipe->dma_blt_addr =  0;
+		vctrl->blt_free = 4;	/* 4 commits to free wb buf */
 		vctrl->blt_change++;
 	}
 
@@ -917,11 +947,6 @@
 	}
 
 	spin_unlock_irqrestore(&vctrl->spin_lock, flag);
-
-	data = inpdw(MDP_BASE + DSI_VIDEO_BASE);
-	data &= 0x01;
-	if (data)	/* timing generator enabled */
-		mdp4_dsi_video_wait4dmap(0);
 }
 
 void mdp4_dsi_video_overlay_blt(struct msm_fb_data_type *mfd,
@@ -972,6 +997,10 @@
 	}
 
 	mdp4_dsi_video_pipe_commit();
-	mdp4_dsi_video_wait4dmap(0);
+
+	if (pipe->ov_blt_addr)
+		mdp4_dsi_video_wait4ov(0);
+	else
+		mdp4_dsi_video_wait4dmap(0);
 }
 
diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c
index 57793fc..2da2052 100644
--- a/drivers/video/msm/mdp4_overlay_lcdc.c
+++ b/drivers/video/msm/mdp4_overlay_lcdc.c
@@ -50,16 +50,19 @@
 	struct device *dev;
 	int inited;
 	int update_ndx;
-	uint32 dmap_intr_cnt;
+	int ov_koff;
+	int ov_done;
 	atomic_t suspend;
-	int dmap_wait_cnt;
 	int wait_vsync_cnt;
 	int blt_change;
+	int blt_free;
 	int fake_vsync;
 	struct mutex update_lock;
+	struct completion ov_comp;
 	struct completion dmap_comp;
 	struct completion vsync_comp;
 	spinlock_t spin_lock;
+	struct msm_fb_data_type *mfd;
 	struct mdp4_overlay_pipe *base_pipe;
 	struct vsync_update vlist[2];
 	int vsync_irq_enabled;
@@ -150,8 +153,9 @@
 	mdp4_stat.overlay_play[pipe->mixer_num]++;
 }
 
-
 static void mdp4_lcdc_blt_ov_update(struct mdp4_overlay_pipe *pipe);
+static void mdp4_lcdc_wait4dmap(int cndx);
+static void mdp4_lcdc_wait4ov(int cndx);
 
 int mdp4_lcdc_pipe_commit(void)
 {
@@ -180,11 +184,37 @@
 	vctrl->update_ndx++;
 	vctrl->update_ndx &= 0x01;
 	vp->update_cnt = 0;     /* reset */
+	if (vctrl->blt_free) {
+		vctrl->blt_free--;
+		if (vctrl->blt_free == 0)
+			mdp4_free_writeback_buf(vctrl->mfd, mixer);
+	}
 	mutex_unlock(&vctrl->update_lock);
 
 	/* free previous committed iommu back to pool */
 	mdp4_overlay_iommu_unmap_freelist(mixer);
 
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (vctrl->ov_koff != vctrl->ov_done) {
+		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+		pr_err("%s: Error, frame dropped %d %d\n", __func__,
+			vctrl->ov_koff, vctrl->ov_done);
+		return 0;
+	}
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	if (vctrl->blt_change) {
+		pipe = vctrl->base_pipe;
+		spin_lock_irqsave(&vctrl->spin_lock, flags);
+		INIT_COMPLETION(vctrl->dmap_comp);
+		INIT_COMPLETION(vctrl->ov_comp);
+		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+		mdp4_lcdc_wait4dmap(0);
+		if (pipe->ov_blt_addr)
+			mdp4_lcdc_wait4ov(0);
+	}
+
 	pipe = vp->plist;
 	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
 		if (pipe->pipe_used) {
@@ -205,15 +235,17 @@
 	spin_lock_irqsave(&vctrl->spin_lock, flags);
 	if (pipe->ov_blt_addr) {
 		mdp4_lcdc_blt_ov_update(pipe);
-		pipe->blt_ov_done++;
+		pipe->ov_cnt++;
+		INIT_COMPLETION(vctrl->ov_comp);
 		vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
 		mb();
-		pipe->blt_ov_koff++;
+		vctrl->ov_koff++;
 		/* kickoff overlay engine */
+		mdp4_stat.kickoff_ov0++;
 		outpdw(MDP_BASE + 0x0004, 0);
-	} else if (vctrl->dmap_intr_cnt == 0) {
+	} else {
 		/* schedule second phase update  at dmap */
-		vctrl->dmap_intr_cnt++;
+		INIT_COMPLETION(vctrl->dmap_comp);
 		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
 	}
 	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
@@ -289,7 +321,6 @@
 
 static void mdp4_lcdc_wait4dmap(int cndx)
 {
-	unsigned long flags;
 	struct vsycn_ctrl *vctrl;
 
 	if (cndx >= MAX_CONTROLLER) {
@@ -302,22 +333,24 @@
 	if (atomic_read(&vctrl->suspend) > 0)
 		return;
 
-	/* start timing generator & mmu if they are not started yet */
-	mdp4_overlay_lcdc_start();
-
-	spin_lock_irqsave(&vctrl->spin_lock, flags);
-	if (vctrl->dmap_wait_cnt == 0) {
-		INIT_COMPLETION(vctrl->dmap_comp);
-		if (vctrl->dmap_intr_cnt == 0) {
-			vctrl->dmap_intr_cnt++;
-			vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
-		}
-	}
-	vctrl->dmap_wait_cnt++;
-	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
-
 	wait_for_completion(&vctrl->dmap_comp);
-	pr_debug("%s: pid=%d after wait\n", __func__, current->pid);
+}
+
+static void mdp4_lcdc_wait4ov(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	wait_for_completion(&vctrl->ov_comp);
 }
 
 static void send_vsync_work(struct work_struct *work)
@@ -354,6 +387,7 @@
 	mutex_init(&vctrl->update_lock);
 	init_completion(&vctrl->vsync_comp);
 	init_completion(&vctrl->dmap_comp);
+	init_completion(&vctrl->ov_comp);
 	atomic_set(&vctrl->suspend, 0);
 	spin_lock_init(&vctrl->spin_lock);
 	INIT_WORK(&vctrl->vsync_work, send_vsync_work);
@@ -424,6 +458,7 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
+	vctrl->mfd = mfd;
 	vctrl->dev = mfd->fbi->dev;
 
 	/* mdp clock on */
@@ -637,18 +672,16 @@
 	int bpp;
 	char *overlay_base;
 
-
 	if (pipe->ov_blt_addr == 0)
 		return;
 
-
 #ifdef BLT_RGB565
 	bpp = 2; /* overlay ouput is RGB565 */
 #else
 	bpp = 3; /* overlay ouput is RGB888 */
 #endif
 	off = 0;
-	if (pipe->blt_ov_done & 0x01)
+	if (pipe->ov_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
 	addr = pipe->ov_blt_addr + off;
 
@@ -666,14 +699,13 @@
 	if (pipe->ov_blt_addr == 0)
 		return;
 
-
 #ifdef BLT_RGB565
 	bpp = 2; /* overlay ouput is RGB565 */
 #else
 	bpp = 3; /* overlay ouput is RGB888 */
 #endif
 	off = 0;
-	if (pipe->blt_dmap_done & 0x01)
+	if (pipe->dmap_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
 	addr = pipe->dma_blt_addr + off;
 
@@ -683,7 +715,6 @@
 
 void mdp4_overlay_lcdc_set_perf(struct msm_fb_data_type *mfd)
 {
-	mdp4_lcdc_wait4dmap(0);
 	/* change mdp clk while mdp is idle */
 	mdp4_set_perf_level();
 }
@@ -727,29 +758,25 @@
 	pipe = vctrl->base_pipe;
 
 	spin_lock(&vctrl->spin_lock);
+	vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
 	if (vctrl->blt_change) {
 		mdp4_overlayproc_cfg(pipe);
 		mdp4_overlay_dmap_xy(pipe);
 		if (pipe->ov_blt_addr) {
 			mdp4_lcdc_blt_ov_update(pipe);
-			pipe->blt_ov_done++;
-
+			pipe->ov_cnt++;
 			/* Prefill one frame */
 			vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
 			/* kickoff overlay0 engine */
+			mdp4_stat.kickoff_ov0++;
+			vctrl->ov_koff++;       /* make up for prefill */
 			outpdw(MDP_BASE + 0x0004, 0);
 		}
 		vctrl->blt_change = 0;
 	}
 
-	vctrl->dmap_intr_cnt--;
-	if (vctrl->dmap_wait_cnt) {
-		complete_all(&vctrl->dmap_comp);
-		vctrl->dmap_wait_cnt = 0; /* reset */
-	} else  {
-		mdp4_overlay_dma_commit(cndx);
-	}
-	vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+	complete_all(&vctrl->dmap_comp);
+	mdp4_overlay_dma_commit(cndx);
 	spin_unlock(&vctrl->spin_lock);
 }
 
@@ -765,21 +792,22 @@
 	pipe = vctrl->base_pipe;
 
 	spin_lock(&vctrl->spin_lock);
+	vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+	vctrl->ov_done++;
+	complete_all(&vctrl->ov_comp);
 	if (pipe->ov_blt_addr == 0) {
 		spin_unlock(&vctrl->spin_lock);
 		return;
 	}
 
 	mdp4_lcdc_blt_dmap_update(pipe);
-	pipe->blt_dmap_done++;
-	mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
+	pipe->dmap_cnt++;
 	spin_unlock(&vctrl->spin_lock);
 }
 
 static void mdp4_lcdc_do_blt(struct msm_fb_data_type *mfd, int enable)
 {
 	unsigned long flag;
-	int data;
 	int cndx = 0;
 	struct vsycn_ctrl *vctrl;
 	struct mdp4_overlay_pipe *pipe;
@@ -798,16 +826,17 @@
 	if (enable && pipe->ov_blt_addr == 0) {
 		pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
 		pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
-		pipe->blt_cnt = 0;
 		pipe->ov_cnt = 0;
-		pipe->blt_dmap_done = 0;
-		pipe->blt_ov_koff = 0;
-		pipe->blt_ov_done = 0;
+		pipe->dmap_cnt = 0;
+		vctrl->ov_koff = 0;
+		vctrl->ov_done = 0;
+		vctrl->blt_free = 0;
 		mdp4_stat.blt_lcdc++;
 		vctrl->blt_change++;
 	} else if (enable == 0 && pipe->ov_blt_addr) {
 		pipe->ov_blt_addr = 0;
 		pipe->dma_blt_addr = 0;
+		vctrl->blt_free = 4;    /* 4 commits to free wb buf */
 		vctrl->blt_change++;
 	}
 
@@ -820,11 +849,6 @@
 	}
 
 	spin_unlock_irqrestore(&vctrl->spin_lock, flag);
-
-	data = inpdw(MDP_BASE + LCDC_BASE);
-	data &= 0x01;
-	if (data)       /* timing generator enabled */
-		mdp4_lcdc_wait4dmap(0);
 }
 
 void mdp4_lcdc_overlay_blt(struct msm_fb_data_type *mfd,
@@ -876,5 +900,9 @@
 	}
 
 	mdp4_lcdc_pipe_commit();
-	mdp4_lcdc_wait4dmap(0);
+
+	if (pipe->ov_blt_addr)
+		mdp4_lcdc_wait4ov(0);
+	else
+		mdp4_lcdc_wait4dmap(0);
 }
diff --git a/drivers/video/msm/mdp4_util.c b/drivers/video/msm/mdp4_util.c
index 573e317..e76b8ba 100644
--- a/drivers/video/msm/mdp4_util.c
+++ b/drivers/video/msm/mdp4_util.c
@@ -462,11 +462,6 @@
 		dma = &dma2_data;
 		if (panel & (MDP4_PANEL_LCDC | MDP4_PANEL_DSI_VIDEO)) {
 			/* disable LCDC interrupt */
-			spin_lock(&mdp_spin_lock);
-			mdp_intr_mask &= ~INTR_OVERLAY0_DONE;
-			outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-			dma->waiting = FALSE;
-			spin_unlock(&mdp_spin_lock);
 			if (panel & MDP4_PANEL_LCDC)
 				mdp4_overlay0_done_lcdc(0);
 #ifdef CONFIG_FB_MSM_MIPI_DSI
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index a96bf3a..ee086ad 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -774,17 +774,23 @@
 	fix->type = panel_info->is_3d_panel;
 	fix->line_length = mdss_fb_line_length(mfd->index, panel_info->xres,
 					       bpp);
-	mfd->var_xres = panel_info->xres;
-	mfd->var_yres = panel_info->yres;
-
-	var->pixclock = mfd->panel_info.clk_rate;
-	mfd->var_pixclock = var->pixclock;
 
 	var->xres = panel_info->xres;
 	var->yres = panel_info->yres;
 	var->xres_virtual = panel_info->xres;
 	var->yres_virtual = panel_info->yres * mfd->fb_page;
 	var->bits_per_pixel = bpp * 8;	/* FrameBuffer color depth */
+	var->upper_margin = panel_info->lcdc.v_front_porch;
+	var->lower_margin = panel_info->lcdc.v_back_porch;
+	var->vsync_len = panel_info->lcdc.v_pulse_width;
+	var->left_margin = panel_info->lcdc.h_front_porch;
+	var->right_margin = panel_info->lcdc.h_back_porch;
+	var->hsync_len = panel_info->lcdc.h_pulse_width;
+	var->pixclock = panel_info->clk_rate / 1000;
+
+	mfd->var_xres = var->xres;
+	mfd->var_yres = var->yres;
+	mfd->var_pixclock = var->pixclock;
 
 	/* id field for fb app  */
 
@@ -796,6 +802,7 @@
 	fbi->flags = FBINFO_FLAG_DEFAULT;
 	fbi->pseudo_palette = mdss_fb_pseudo_palette;
 
+	panel_info->fbi = fbi;
 	mfd->ref_cnt = 0;
 	mfd->panel_power_on = false;
 
diff --git a/drivers/video/msm/mdss/mdss_panel.h b/drivers/video/msm/mdss/mdss_panel.h
index 3fd943d..0411d8e 100644
--- a/drivers/video/msm/mdss/mdss_panel.h
+++ b/drivers/video/msm/mdss/mdss_panel.h
@@ -161,6 +161,7 @@
 	struct lcdc_panel_info lcdc;
 	struct mipi_panel_info mipi;
 	struct lvds_panel_info lvds;
+	struct fb_info *fbi;
 };
 
 struct mdss_panel_data {
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 7bf516d..72e3600 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -1310,14 +1310,14 @@
 	var->bits_per_pixel = bpp * 8;	/* FrameBuffer color depth */
 	if (mfd->dest == DISPLAY_LCD) {
 		if (panel_info->type == MDDI_PANEL && panel_info->mddi.is_type1)
-			var->reserved[3] = panel_info->lcd.refx100 / (100 * 2);
+			var->reserved[4] = panel_info->lcd.refx100 / (100 * 2);
 		else
-			var->reserved[3] = panel_info->lcd.refx100 / 100;
+			var->reserved[4] = panel_info->lcd.refx100 / 100;
 	} else {
 		if (panel_info->type == MIPI_VIDEO_PANEL) {
-			var->reserved[3] = panel_info->mipi.frame_rate;
+			var->reserved[4] = panel_info->mipi.frame_rate;
 		} else {
-			var->reserved[3] = panel_info->clk_rate /
+			var->reserved[4] = panel_info->clk_rate /
 				((panel_info->lcdc.h_back_porch +
 				  panel_info->lcdc.h_front_porch +
 				  panel_info->lcdc.h_pulse_width +
@@ -1328,7 +1328,7 @@
 				  panel_info->yres));
 		}
 	}
-	pr_debug("reserved[3] %u\n", var->reserved[3]);
+	pr_debug("reserved[4] %u\n", var->reserved[4]);
 
 		/*
 		 * id field for fb app
@@ -1407,6 +1407,14 @@
 	fbi->fix.smem_start = (unsigned long)fbram_phys;
 
 	msm_iommu_map_contig_buffer(fbi->fix.smem_start,
+					DISPLAY_WRITE_DOMAIN,
+					GEN_POOL,
+					fbi->fix.smem_len,
+					SZ_4K,
+					0,
+					&(mfd->display_iova));
+
+	msm_iommu_map_contig_buffer(fbi->fix.smem_start,
 					DISPLAY_READ_DOMAIN,
 					GEN_POOL,
 					fbi->fix.smem_len,
diff --git a/include/linux/fb.h b/include/linux/fb.h
index d31cb68..f6a2923 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -279,7 +279,7 @@
 	__u32 vmode;			/* see FB_VMODE_*		*/
 	__u32 rotate;			/* angle we rotate counter clockwise */
 	__u32 colorspace;		/* colorspace for FOURCC-based modes */
-	__u32 reserved[4];		/* Reserved for future compatibility */
+	__u32 reserved[5];		/* Reserved for future compatibility */
 };
 
 struct fb_cmap {
diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h
index 57ce7c0..e400375 100644
--- a/include/media/msm_camera.h
+++ b/include/media/msm_camera.h
@@ -1253,27 +1253,31 @@
 #define CSI_DECODE_10BIT 2
 #define CSI_DECODE_DPCM_10_8_10 5
 
-#define ISPIF_STREAM(intf, action) (((intf)<<ISPIF_S_STREAM_SHIFT)+(action))
-#define ISPIF_ON_FRAME_BOUNDARY	(0x01 << 0)
-#define ISPIF_OFF_FRAME_BOUNDARY    (0x01 << 1)
-#define ISPIF_OFF_IMMEDIATELY       (0x01 << 2)
-#define ISPIF_S_STREAM_SHIFT	4
-
+#define ISPIF_STREAM(intf, action, vfe) (((intf)<<ISPIF_S_STREAM_SHIFT)+\
+	(action)+((vfe)<<ISPIF_VFE_INTF_SHIFT))
+#define ISPIF_ON_FRAME_BOUNDARY   (0x01 << 0)
+#define ISPIF_OFF_FRAME_BOUNDARY  (0x01 << 1)
+#define ISPIF_OFF_IMMEDIATELY     (0x01 << 2)
+#define ISPIF_S_STREAM_SHIFT      4
+#define ISPIF_VFE_INTF_SHIFT      12
 
 #define PIX_0 (0x01 << 0)
 #define RDI_0 (0x01 << 1)
 #define PIX_1 (0x01 << 2)
 #define RDI_1 (0x01 << 3)
-#define PIX_2 (0x01 << 4)
-#define RDI_2 (0x01 << 5)
+#define RDI_2 (0x01 << 4)
 
+enum msm_ispif_vfe_intf {
+	VFE0,
+	VFE1,
+	VFE_MAX,
+};
 
 enum msm_ispif_intftype {
 	PIX0,
 	RDI0,
 	PIX1,
 	RDI1,
-	PIX2,
 	RDI2,
 	INTF_MAX,
 };
@@ -1308,6 +1312,7 @@
 	uint8_t intftype;
 	uint16_t cid_mask;
 	uint8_t csid;
+	uint8_t vfe_intf;
 };
 
 struct msm_ispif_params_list {
@@ -1673,6 +1678,7 @@
 	GESTURE_DEV,
 	IRQ_ROUTER_DEV,
 	CPP_DEV,
+	CCI_DEV,
 };
 
 struct msm_mctl_set_sdev_data {
diff --git a/include/media/msm_isp.h b/include/media/msm_isp.h
index 0ee7417..ab9e70c 100644
--- a/include/media/msm_isp.h
+++ b/include/media/msm_isp.h
@@ -217,6 +217,7 @@
 #define VFE_CMD_STATS_BF_STOP                           146
 #define VFE_CMD_STATS_BHIST_START                       147
 #define VFE_CMD_STATS_BHIST_STOP                        148
+#define VFE_CMD_RESET_2                                 149
 
 struct msm_isp_cmd {
 	int32_t  id;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index da8b2dc..56bdd46 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2124,6 +2124,55 @@
 EXPORT_SYMBOL(hci_send_sco);
 
 /* ---- HCI TX task (outgoing data) ---- */
+/* HCI ACL Connection scheduler */
+static inline struct hci_conn *hci_low_sent_acl(struct hci_dev *hdev,
+								int *quote)
+{
+	struct hci_conn_hash *h = &hdev->conn_hash;
+	struct hci_conn *conn = NULL;
+	int num = 0, min = ~0, conn_num = 0;
+	struct list_head *p;
+
+	/* We don't have to lock device here. Connections are always
+	 * added and removed with TX task disabled. */
+	list_for_each(p, &h->list) {
+		struct hci_conn *c;
+		c = list_entry(p, struct hci_conn, list);
+		if (c->type == ACL_LINK)
+			conn_num++;
+
+		if (skb_queue_empty(&c->data_q))
+			continue;
+
+		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
+			continue;
+
+		num++;
+
+		if (c->sent < min) {
+			min  = c->sent;
+			conn = c;
+		}
+	}
+
+	if (conn) {
+		int cnt, q;
+		cnt = hdev->acl_cnt;
+		q = cnt / num;
+		*quote = q ? q : 1;
+	} else
+		*quote = 0;
+
+	if ((*quote == hdev->acl_cnt) &&
+		(conn->sent == (hdev->acl_pkts - 1)) &&
+		(conn_num > 1)) {
+			*quote = 0;
+			conn = NULL;
+	}
+
+	BT_DBG("conn %p quote %d", conn, *quote);
+	return conn;
+}
 
 /* HCI Connection scheduler */
 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
@@ -2217,8 +2266,10 @@
 	}
 
 	while (hdev->acl_cnt > 0 &&
-		(conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
-		while (quote > 0 && (skb = skb_dequeue(&conn->data_q))) {
+		((conn = hci_low_sent_acl(hdev, &quote)) != NULL)) {
+
+		while (quote > 0 &&
+			  (skb = skb_dequeue(&conn->data_q))) {
 			int count = 1;
 
 			BT_DBG("skb %p len %d", skb, skb->len);
diff --git a/sound/soc/msm/msm-dai-q6.c b/sound/soc/msm/msm-dai-q6.c
index fb74c0a..89b709f 100644
--- a/sound/soc/msm/msm-dai-q6.c
+++ b/sound/soc/msm/msm-dai-q6.c
@@ -1005,6 +1005,7 @@
 		case VOICE_RECORD_TX:
 		case VOICE_RECORD_RX:
 			rc = afe_start_pseudo_port(dai->id);
+			break;
 		default:
 			rc = afe_port_start(dai->id, &dai_data->port_config,
 					    dai_data->rate);
diff --git a/sound/soc/msm/msm-pcm-routing.c b/sound/soc/msm/msm-pcm-routing.c
index c0c679d..da3d335 100644
--- a/sound/soc/msm/msm-pcm-routing.c
+++ b/sound/soc/msm/msm-pcm-routing.c
@@ -2499,6 +2499,9 @@
 	{"SEC_AUX_PCM_TX", NULL, "BE_IN"},
 	{"BE_OUT", NULL, "AUX_PCM_RX"},
 	{"AUX_PCM_TX", NULL, "BE_IN"},
+	{"INCALL_RECORD_TX", NULL, "BE_IN"},
+	{"INCALL_RECORD_RX", NULL, "BE_IN"},
+	{"BE_OUT", NULL, "VOICE_PLAYBACK_TX"},
 };
 
 static int msm_pcm_routing_hw_params(struct snd_pcm_substream *substream,