Merge "ASoc: msm: Add fix for Unsupported Proxy backend sample rates." into msm-3.4
diff --git a/arch/arm/mach-msm/acpuclock-8960.c b/arch/arm/mach-msm/acpuclock-8960.c
index 9809bdf..7f198d2 100644
--- a/arch/arm/mach-msm/acpuclock-8960.c
+++ b/arch/arm/mach-msm/acpuclock-8960.c
@@ -1235,10 +1235,8 @@
 	unsigned long flags;
 	int rc = 0;
 
-	if (cpu > num_possible_cpus()) {
-		rc = -EINVAL;
-		goto out;
-	}
+	if (cpu > num_possible_cpus())
+		return -EINVAL;
 
 	if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
 		mutex_lock(&driver_lock);
@@ -1615,7 +1613,7 @@
 		acpu_freq_tbl = acpu_freq_tbl_8627;
 		l2_freq_tbl = l2_freq_tbl_8627;
 		l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl_8627);
-	} else if (cpu_is_msm8930()) {
+	} else if (cpu_is_msm8930() || cpu_is_msm8930aa()) {
 		enum pvs pvs_id = get_pvs();
 
 		scalable = scalable_8930;
diff --git a/arch/arm/mach-msm/acpuclock-8x60.c b/arch/arm/mach-msm/acpuclock-8x60.c
index ef34b3c..f94f0b2 100644
--- a/arch/arm/mach-msm/acpuclock-8x60.c
+++ b/arch/arm/mach-msm/acpuclock-8x60.c
@@ -712,10 +712,8 @@
 	unsigned long flags;
 	int rc = 0;
 
-	if (cpu > num_possible_cpus()) {
-		rc = -EINVAL;
-		goto out;
-	}
+	if (cpu > num_possible_cpus())
+		return -EINVAL;
 
 	if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
 		mutex_lock(&drv_state.lock);
diff --git a/arch/arm/mach-msm/acpuclock-krait.c b/arch/arm/mach-msm/acpuclock-krait.c
index 84253b8..8bd54e3 100644
--- a/arch/arm/mach-msm/acpuclock-krait.c
+++ b/arch/arm/mach-msm/acpuclock-krait.c
@@ -385,10 +385,8 @@
 	unsigned long flags;
 	int rc = 0;
 
-	if (cpu > num_possible_cpus()) {
-		rc = -EINVAL;
-		goto out;
-	}
+	if (cpu > num_possible_cpus())
+		return -EINVAL;
 
 	if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
 		mutex_lock(&driver_lock);
diff --git a/arch/arm/mach-msm/board-8064-gpu.c b/arch/arm/mach-msm/board-8064-gpu.c
index 30a2683..0f9c939 100644
--- a/arch/arm/mach-msm/board-8064-gpu.c
+++ b/arch/arm/mach-msm/board-8064-gpu.c
@@ -224,7 +224,6 @@
 	.set_grp_async = NULL,
 	.idle_timeout = HZ/10,
 	.nap_allowed = true,
-	.strtstp_sleepwake = true,
 	.clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE,
 #ifdef CONFIG_MSM_BUS_SCALING
 	.bus_scale_table = &grp3d_bus_scale_pdata,
diff --git a/arch/arm/mach-msm/board-8064-pmic.c b/arch/arm/mach-msm/board-8064-pmic.c
index fc886ed..43a79b5 100644
--- a/arch/arm/mach-msm/board-8064-pmic.c
+++ b/arch/arm/mach-msm/board-8064-pmic.c
@@ -347,6 +347,7 @@
 	.update_time		= 60000,
 	.max_voltage		= MAX_VOLTAGE_MV,
 	.min_voltage		= 3200,
+	.uvd_thresh_voltage	= 4050,
 	.resume_voltage_delta	= 100,
 	.term_current		= 100,
 	.cool_temp		= 10,
diff --git a/arch/arm/mach-msm/board-8064-storage.c b/arch/arm/mach-msm/board-8064-storage.c
index a33b62b..13d8b3b 100644
--- a/arch/arm/mach-msm/board-8064-storage.c
+++ b/arch/arm/mach-msm/board-8064-storage.c
@@ -383,6 +383,16 @@
 				apq8064_sdc3_pdata->status_irq = 0;
 			}
 		}
+		if (machine_is_apq8064_cdp()) {
+			int i;
+
+			for (i = 0;
+			     i < apq8064_sdc3_pdata->pin_data->pad_data->\
+				 drv->size;
+			     i++)
+				apq8064_sdc3_pdata->pin_data->pad_data->\
+					drv->on[i].val = GPIO_CFG_10MA;
+		}
 		apq8064_add_sdcc(3, apq8064_sdc3_pdata);
 	}
 
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index f884631..08c3408 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -124,6 +124,10 @@
 #define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB     (MSM_QFPROM_BASE + 0x23c)
 #define QFPROM_RAW_OEM_CONFIG_ROW0_LSB      (MSM_QFPROM_BASE + 0x220)
 
+/* PCIE AXI address space */
+#define PCIE_AXI_BAR_PHYS   0x08000000
+#define PCIE_AXI_BAR_SIZE   SZ_128M
+
 /* PCIe power enable pmic gpio */
 #define PCIE_PWR_EN_PMIC_GPIO 13
 #define PCIE_RST_N_PMIC_MPP 1
@@ -2063,6 +2067,8 @@
 
 static struct msm_pcie_platform msm_pcie_platform_data = {
 	.gpio = msm_pcie_gpio_info,
+	.axi_addr = PCIE_AXI_BAR_PHYS,
+	.axi_size = PCIE_AXI_BAR_SIZE,
 };
 
 static int __init mpq8064_pcie_enabled(void)
diff --git a/arch/arm/mach-msm/board-8930-gpiomux.c b/arch/arm/mach-msm/board-8930-gpiomux.c
index 000f080..e0f012a 100644
--- a/arch/arm/mach-msm/board-8930-gpiomux.c
+++ b/arch/arm/mach-msm/board-8930-gpiomux.c
@@ -250,6 +250,28 @@
 	.drv = GPIOMUX_DRV_2MA,
 	.pull = GPIOMUX_PULL_DOWN,
 };
+
+static struct gpiomux_setting hdmi_active_3_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_UP,
+	.dir = GPIOMUX_IN,
+};
+
+static struct gpiomux_setting hdmi_active_4_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_UP,
+	.dir = GPIOMUX_OUT_HIGH,
+};
+
+static struct gpiomux_setting hdmi_active_5_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_UP,
+	.dir = GPIOMUX_OUT_HIGH,
+};
+
 #endif
 
 #if defined(CONFIG_KS8851) || defined(CONFIG_KS8851_MODULE)
@@ -593,6 +615,32 @@
 			[GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg,
 		},
 	},
+
+};
+
+static struct msm_gpiomux_config msm8930_mhl_configs[] __initdata = {
+	{
+		.gpio = 72,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &hdmi_active_3_cfg,
+			[GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg,
+		},
+	},
+	{
+		.gpio = 71,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &hdmi_active_4_cfg,
+			[GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg,
+		},
+	},
+	{
+		.gpio = 73,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &hdmi_active_5_cfg,
+			[GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg,
+		},
+	},
+
 };
 #endif
 
@@ -699,6 +747,9 @@
 #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL
 	msm_gpiomux_install(msm8960_hdmi_configs,
 			ARRAY_SIZE(msm8960_hdmi_configs));
+	if (machine_is_msm8930_fluid())
+		msm_gpiomux_install(msm8930_mhl_configs,
+				ARRAY_SIZE(msm8930_mhl_configs));
 #endif
 
 	msm_gpiomux_install(msm8960_mdp_vsync_configs,
diff --git a/arch/arm/mach-msm/board-8930-gpu.c b/arch/arm/mach-msm/board-8930-gpu.c
index 0c5ae5f..632fc9a 100644
--- a/arch/arm/mach-msm/board-8930-gpu.c
+++ b/arch/arm/mach-msm/board-8930-gpu.c
@@ -116,7 +116,7 @@
 static struct kgsl_device_platform_data kgsl_3d0_pdata = {
 	.pwrlevel = {
 		{
-			.gpu_freq = 450000000,
+			.gpu_freq = 400000000,
 			.bus_freq = 3,
 			.io_fraction = 0,
 		},
@@ -140,7 +140,6 @@
 	.set_grp_async = NULL,
 	.idle_timeout = HZ/12,
 	.nap_allowed = true,
-	.strtstp_sleepwake = true,
 	.clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE,
 #ifdef CONFIG_MSM_BUS_SCALING
 	.bus_scale_table = &grp3d_bus_scale_pdata,
diff --git a/arch/arm/mach-msm/board-8930-pmic.c b/arch/arm/mach-msm/board-8930-pmic.c
index e6a13b1..a1a4b7c 100644
--- a/arch/arm/mach-msm/board-8930-pmic.c
+++ b/arch/arm/mach-msm/board-8930-pmic.c
@@ -95,13 +95,12 @@
 	PM8XXX_GPIO_INPUT(11, PM_GPIO_PULL_UP_30),
 	/* haptics gpio */
 	PM8XXX_GPIO_OUTPUT_FUNC(7, 0, PM_GPIO_FUNC_1),
+	/* MHL PWR EN */
+	PM8XXX_GPIO_OUTPUT_VIN(5, 1, PM_GPIO_VIN_VPH),
 };
 
 /* Initial pm8038 MPP configurations */
-static struct pm8xxx_mpp_init pm8038_mpps[] __initdata = {
-	/* External 5V regulator enable; shared by HDMI and USB_OTG switches. */
-	PM8XXX_MPP_INIT(3, D_INPUT, PM8038_MPP_DIG_LEVEL_VPH, DIN_TO_INT),
-};
+static struct pm8xxx_mpp_init pm8038_mpps[] __initdata = {};
 
 void __init msm8930_pm8038_gpio_mpp_init(void)
 {
@@ -213,6 +212,7 @@
 	.update_time		= 60000,
 	.max_voltage		= MAX_VOLTAGE_MV,
 	.min_voltage		= 3200,
+	.uvd_thresh_voltage	= 4050,
 	.resume_voltage_delta	= 100,
 	.term_current		= 100,
 	.cool_temp		= 10,
diff --git a/arch/arm/mach-msm/board-8930-regulator.c b/arch/arm/mach-msm/board-8930-regulator.c
index bc370ba..f06a1b7 100644
--- a/arch/arm/mach-msm/board-8930-regulator.c
+++ b/arch/arm/mach-msm/board-8930-regulator.c
@@ -90,6 +90,7 @@
 	REGULATOR_SUPPLY("CDC_VDDA_TX",		"sitar1p1-slim"),
 	REGULATOR_SUPPLY("CDC_VDDA_RX",		"sitar1p1-slim"),
 	REGULATOR_SUPPLY("vddp",		"0-0048"),
+	REGULATOR_SUPPLY("mhl_iovcc18",		"0-0039"),
 };
 VREG_CONSUMERS(L12) = {
 	REGULATOR_SUPPLY("8038_l12",		NULL),
@@ -125,6 +126,7 @@
 	REGULATOR_SUPPLY("CDC_VDDA_A_1P2V",	"sitar-slim"),
 	REGULATOR_SUPPLY("VDDD_CDC_D",		"sitar1p1-slim"),
 	REGULATOR_SUPPLY("CDC_VDDA_A_1P2V",	"sitar1p1-slim"),
+	REGULATOR_SUPPLY("mhl_avcc12",		"0-0039"),
 };
 VREG_CONSUMERS(L21) = {
 	REGULATOR_SUPPLY("8038_l21",		NULL),
@@ -194,6 +196,7 @@
 VREG_CONSUMERS(EXT_5V) = {
 	REGULATOR_SUPPLY("ext_5v",		NULL),
 	REGULATOR_SUPPLY("hdmi_mvs",		"hdmi_msm.0"),
+	REGULATOR_SUPPLY("mhl_usb_hs_switch",	"msm_otg"),
 };
 VREG_CONSUMERS(EXT_OTG_SW) = {
 	REGULATOR_SUPPLY("ext_otg_sw",		NULL),
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index 1a61dbb..e695241 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -106,6 +106,11 @@
 #define KS8851_IRQ_GPIO		90
 #define HAP_SHIFT_LVL_OE_GPIO	47
 
+#define HDMI_MHL_MUX_GPIO       73
+#define MHL_GPIO_INT            72
+#define MHL_GPIO_RESET          71
+#define MHL_GPIO_PWR_EN         5
+
 #if defined(CONFIG_GPIO_SX150X) || defined(CONFIG_GPIO_SX150X_MODULE)
 
 struct sx150x_platform_data msm8930_sx150x_data[] = {
@@ -776,6 +781,8 @@
 		.cfilt2_mv = 1800,
 		.bias1_cfilt_sel = SITAR_CFILT1_SEL,
 		.bias2_cfilt_sel = SITAR_CFILT2_SEL,
+		.bias1_cap_mode = MICBIAS_EXT_BYP_CAP,
+		.bias2_cap_mode = MICBIAS_NO_EXT_BYP_CAP,
 	},
 	.regulator = {
 	{
@@ -840,6 +847,8 @@
 		.cfilt2_mv = 1800,
 		.bias1_cfilt_sel = SITAR_CFILT1_SEL,
 		.bias2_cfilt_sel = SITAR_CFILT2_SEL,
+		.bias1_cap_mode = MICBIAS_EXT_BYP_CAP,
+		.bias2_cap_mode = MICBIAS_NO_EXT_BYP_CAP,
 	},
 	.regulator = {
 	{
@@ -1732,7 +1741,7 @@
 #define MXT_TS_GPIO_IRQ			11
 #define MXT_TS_RESET_GPIO		52
 
-static const u8 mxt_config_data_8930[] = {
+static const u8 mxt_config_data_8930_v1[] = {
 	/* T6 Object */
 	 0, 0, 0, 0, 0, 0,
 	/* T38 Object */
@@ -1777,6 +1786,43 @@
 	 0, 0, 0, 0,
 };
 
+static const u8 mxt_config_data_8930_v2[] = {
+	/* T6 Object */
+	 0, 0, 0, 0, 0, 0,
+	/* T38 Object */
+	 15, 4, 0, 9, 7, 12, 0, 0,
+	/* T7 Object */
+	32, 16, 50,
+	/* T8 Object */
+	 30, 0, 5, 10, 0, 0, 10, 10, 0, 0,
+	/* T9 Object */
+	 131, 0, 0, 19, 11, 0, 16, 50, 1, 3,
+	 12, 7, 2, 0, 4, 5, 2, 10, 43, 4,
+	 54, 2, -25, 29, 38, 18, 143, 40, 207, 80,
+	 17, 5, 50, 50, 0,
+	/* T18 Object */
+	 0, 0,
+	/* T19 Object */
+	 0, 0, 0, 0, 0, 0,
+	/* T25 Object */
+	 0, 0, 0, 0, 0, 0,
+	/* T42 Object */
+	 3, 60, 20, 20, 150, 0, 0, 0,
+	/* T46 Object */
+	 0, 3, 28, 28, 0, 0, 1, 0, 0,
+	/* T47 Object */
+	 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	/* T48 Object */
+	 1, 3, 82, 0, 0, 0, 0, 0, 0, 0,
+	 16, 30, 0, 6, 6, 0, 0, 124, 4, 100,
+	 0, 0, 0, 5, 0, 42, 0, 1, 0, 40,
+	 52, 20, 0, 0, 0, 50, 1, 5, 2, 1,
+	 4, 5, 3, -25, 29, 38, 18, 143, 40, 207,
+	 80, 10, 5, 2,
+	/* T55 Object */
+	0, 0, 0, 0,
+};
+
 static ssize_t mxt224e_vkeys_show(struct kobject *kobj,
 			struct kobj_attribute *attr, char *buf)
 {
@@ -1824,12 +1870,33 @@
 
 static struct mxt_config_info mxt_config_array[] = {
 	{
-		.config			= mxt_config_data_8930,
-		.config_length		= ARRAY_SIZE(mxt_config_data_8930),
+		.config			= mxt_config_data_8930_v1,
+		.config_length		= ARRAY_SIZE(mxt_config_data_8930_v1),
 		.family_id		= 0x81,
 		.variant_id		= 0x01,
 		.version		= 0x10,
 		.build			= 0xAA,
+		.bootldr_id		= MXT_BOOTLOADER_ID_224E,
+		.fw_name		= "atmel_8930_fluid_v2_0_AB.hex",
+	},
+	{
+		.config			= mxt_config_data_8930_v2,
+		.config_length		= ARRAY_SIZE(mxt_config_data_8930_v2),
+		.family_id		= 0x81,
+		.variant_id		= 0x15,
+		.version		= 0x11,
+		.build			= 0xAA,
+		.bootldr_id		= MXT_BOOTLOADER_ID_224E,
+		.fw_name		= "atmel_8930_fluid_v2_0_AB.hex",
+	},
+	{
+		.config			= mxt_config_data_8930_v2,
+		.config_length		= ARRAY_SIZE(mxt_config_data_8930_v2),
+		.family_id		= 0x81,
+		.variant_id		= 0x01,
+		.version		= 0x20,
+		.build			= 0xAB,
+		.bootldr_id		= MXT_BOOTLOADER_ID_224E,
 	},
 };
 
@@ -1861,6 +1928,28 @@
 	},
 };
 
+#define MHL_POWER_GPIO       PM8038_GPIO_PM_TO_SYS(MHL_GPIO_PWR_EN)
+static struct msm_mhl_platform_data mhl_platform_data = {
+	.irq = MSM_GPIO_TO_INT(MHL_GPIO_INT),
+	.gpio_mhl_int = MHL_GPIO_INT,
+	.gpio_mhl_reset = MHL_GPIO_RESET,
+	.gpio_mhl_power = MHL_POWER_GPIO,
+	.gpio_hdmi_mhl_mux = HDMI_MHL_MUX_GPIO,
+};
+
+static struct i2c_board_info sii_device_info[] __initdata = {
+	{
+		/*
+		 * keeps SI 8334 as the default
+		 * MHL TX
+		 */
+		I2C_BOARD_INFO("sii8334", 0x39),
+		.platform_data = &mhl_platform_data,
+		.flags = I2C_CLIENT_WAKE,
+	},
+};
+
+
 #ifdef MSM8930_PHASE_2
 
 #define GPIO_VOLUME_UP		PM8038_GPIO_PM_TO_SYS(3)
@@ -2395,6 +2484,12 @@
 		mxt_device_info_8930,
 		ARRAY_SIZE(mxt_device_info_8930),
 	},
+	{
+		I2C_SURF | I2C_FFA | I2C_LIQUID | I2C_FLUID,
+		MSM_8930_GSBI9_QUP_I2C_BUS_ID,
+		sii_device_info,
+		ARRAY_SIZE(sii_device_info),
+	},
 };
 #endif /* CONFIG_I2C */
 
diff --git a/arch/arm/mach-msm/board-8960-pmic.c b/arch/arm/mach-msm/board-8960-pmic.c
index 19564e9..17b0b6f 100644
--- a/arch/arm/mach-msm/board-8960-pmic.c
+++ b/arch/arm/mach-msm/board-8960-pmic.c
@@ -418,6 +418,7 @@
 	.update_time		= 60000,
 	.max_voltage		= MAX_VOLTAGE_MV,
 	.min_voltage		= 3200,
+	.uvd_thresh_voltage	= 4050,
 	.resume_voltage_delta	= 100,
 	.term_current		= 100,
 	.cool_temp		= 10,
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 251c1de..4721c94 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -2280,60 +2280,13 @@
 	},
 };
 
-#ifdef CONFIG_FB_MSM_HDMI_MHL_8334
-static void mhl_sii_reset_gpio(int on)
-{
-	gpio_set_value(MHL_GPIO_RESET, on);
-	return;
-}
-
-/*
- * Request for GPIO allocations
- * Set appropriate GPIO directions
- */
-static int mhl_sii_gpio_setup(int on)
-{
-	int ret;
-
-	if (on) {
-		ret = gpio_request(MHL_GPIO_RESET, "W_RST#");
-		if (ret < 0) {
-			pr_err("GPIO RESET request failed: %d\n", ret);
-			return -EBUSY;
-		}
-		ret = gpio_direction_output(MHL_GPIO_RESET, 1);
-		if (ret < 0) {
-			pr_err("SET GPIO RESET direction failed: %d\n", ret);
-			gpio_free(MHL_GPIO_RESET);
-			return -EBUSY;
-		}
-		ret = gpio_request(MHL_GPIO_INT, "W_INT");
-		if (ret < 0) {
-			pr_err("GPIO INT request failed: %d\n", ret);
-			gpio_free(MHL_GPIO_RESET);
-			return -EBUSY;
-		}
-		ret = gpio_direction_input(MHL_GPIO_INT);
-		if (ret < 0) {
-			pr_err("SET GPIO INTR direction failed: %d\n", ret);
-			gpio_free(MHL_GPIO_RESET);
-			gpio_free(MHL_GPIO_INT);
-			return -EBUSY;
-		}
-	} else {
-		gpio_free(MHL_GPIO_RESET);
-		gpio_free(MHL_GPIO_INT);
-	}
-
-	return 0;
-}
-
 static struct msm_mhl_platform_data mhl_platform_data = {
 	.irq = MSM_GPIO_TO_INT(4),
-	.gpio_setup = mhl_sii_gpio_setup,
-	.reset_pin = mhl_sii_reset_gpio,
+	.gpio_mhl_int = MHL_GPIO_INT,
+	.gpio_mhl_reset = MHL_GPIO_RESET,
+	.gpio_mhl_power = 0,
+	.gpio_hdmi_mhl_mux = 0,
 };
-#endif
 
 static struct i2c_board_info sii_device_info[] __initdata = {
 	{
@@ -3040,6 +2993,10 @@
 						msm8960_i2c_devices[i].info,
 						msm8960_i2c_devices[i].len);
 	}
+
+	if (!mhl_platform_data.gpio_mhl_power)
+		pr_debug("mhl device configured for ext debug board\n");
+
 #ifdef CONFIG_MSM_CAMERA
 	if (msm8960_camera_i2c_devices.machs & mach_mask)
 		i2c_register_board_info(msm8960_camera_i2c_devices.bus,
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 909c736..f99e5de8 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -6117,7 +6117,7 @@
 
 	if (cpu_is_apq8064())
 		rmwreg(0x019FECFF, MAXI_EN5_REG, 0x01FFEFFF);
-	if (cpu_is_msm8930() || cpu_is_msm8627())
+	if (cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8627())
 		rmwreg(0x000004FF, MAXI_EN5_REG, 0x00000FFF);
 	if (cpu_is_msm8627())
 		rmwreg(0x000003C7, SAXI_EN_REG,  0x00003FFF);
@@ -6153,7 +6153,8 @@
 		rmwreg(0x80FF0000, DSI2_PIXEL_CC_REG, 0xE0FF0010);
 		rmwreg(0x80FF0000, JPEGD_CC_REG,      0xE0FF0010);
 	}
-	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm8627())
+	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm8930aa() ||
+	    cpu_is_msm8627())
 		rmwreg(0x80FF0000, TV_CC_REG,         0xE1FFC010);
 
 	if (cpu_is_msm8960()) {
@@ -6245,7 +6246,7 @@
 	 * Program PLL15 to 900MHz with ref clk = 27MHz and
 	 * only enable PLL main output.
 	 */
-	if (cpu_is_msm8930() || cpu_is_msm8627()) {
+	if (cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8627()) {
 		pll15_config.l = 0x21 | BVAL(31, 7, 0x600);
 		pll15_config.m = 0x1;
 		pll15_config.n = 0x3;
@@ -6259,7 +6260,7 @@
 {
 	if (cpu_is_apq8064()) {
 		vdd_sr2_pll.set_vdd = set_vdd_sr2_pll_8064;
-	} else if (cpu_is_msm8930() || cpu_is_msm8627()) {
+	} else if (cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8627()) {
 		vdd_dig.set_vdd = set_vdd_dig_8930;
 		vdd_sr2_pll.set_vdd = set_vdd_sr2_pll_8930;
 	}
@@ -6289,7 +6290,7 @@
 	 * Change the freq tables and voltage requirements for
 	 * clocks which differ between 8960 and 8930.
 	 */
-	if (cpu_is_msm8930() || cpu_is_msm8627()) {
+	if (cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8627()) {
 		gfx3d_clk.freq_tbl = clk_tbl_gfx3d_8930;
 
 		memcpy(gfx3d_clk.c.fmax, fmax_gfx3d_8930,
@@ -6334,7 +6335,8 @@
 		clk_set_rate(&usb_hs4_xcvr_clk.c, 60000000);
 	}
 	clk_set_rate(&usb_fs1_src_clk.c, 60000000);
-	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm8627())
+	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm8930aa() ||
+	    cpu_is_msm8627())
 		clk_set_rate(&usb_fs2_src_clk.c, 60000000);
 	clk_set_rate(&usb_hsic_xcvr_fs_clk.c, 60000000);
 	clk_set_rate(&usb_hsic_hsic_src_clk.c, 480000000);
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index a81343a..6c9a566 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -3775,6 +3775,18 @@
 	},
 };
 
+static struct branch_clk oxilicx_axi_clk = {
+	.cbcr_reg = OXILICX_AXI_CBCR,
+	.parent = &axi_clk_src.c,
+	.has_sibling = 1,
+	.base = &virt_bases[MMSS_BASE],
+	.c = {
+		.dbg_name = "oxilicx_axi_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(oxilicx_axi_clk.c),
+	},
+};
+
 static struct branch_clk oxili_gfx3d_clk = {
 	.cbcr_reg = OXILI_GFX3D_CBCR,
 	.has_sibling = 1,
@@ -3783,6 +3795,7 @@
 		.dbg_name = "oxili_gfx3d_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(oxili_gfx3d_clk.c),
+		.depends = &oxilicx_axi_clk.c,
 	},
 };
 
@@ -3797,18 +3810,6 @@
 	},
 };
 
-static struct branch_clk oxilicx_axi_clk = {
-	.cbcr_reg = OXILICX_AXI_CBCR,
-	.parent = &axi_clk_src.c,
-	.has_sibling = 1,
-	.base = &virt_bases[MMSS_BASE],
-	.c = {
-		.dbg_name = "oxilicx_axi_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(oxilicx_axi_clk.c),
-	},
-};
-
 static struct clk_freq_tbl ftbl_audio_core_slimbus_core_clock[] = {
 	F_LPASS(28800000, lpapll0, 1, 15, 256),
 	F_END
@@ -4744,9 +4745,10 @@
 	CLK_LOOKUP("iface_clk", mdss_ahb_clk.c, "fd928000.qcom,iommu"),
 	CLK_LOOKUP("core_clk", mdss_axi_clk.c, "fd928000.qcom,iommu"),
 	CLK_LOOKUP("bus_clk", mdss_axi_clk.c, "mdp.0"),
-	CLK_LOOKUP("core_clk", oxili_gfx3d_clk.c, ""),
-	CLK_LOOKUP("iface_clk", oxilicx_ahb_clk.c, ""),
-	CLK_LOOKUP("bus_clk", oxilicx_axi_clk.c, ""),
+	CLK_LOOKUP("core_clk", oxili_gfx3d_clk.c, "fdb00000.qcom,kgsl-3d0"),
+	CLK_LOOKUP("iface_clk", oxilicx_ahb_clk.c, "fdb00000.qcom,kgsl-3d0"),
+	CLK_LOOKUP("core_clk", oxilicx_axi_clk.c, "fdb10000.qcom,iommu"),
+	CLK_LOOKUP("iface_clk", oxilicx_ahb_clk.c, "fdb10000.qcom,iommu"),
 	CLK_LOOKUP("iface_clk", venus0_ahb_clk.c, "fdc84000.qcom,iommu"),
 	CLK_LOOKUP("core_clk", venus0_axi_clk.c, "fdc84000.qcom,iommu"),
 	CLK_LOOKUP("bus_clk", venus0_axi_clk.c, ""),
diff --git a/arch/arm/mach-msm/clock-9615.c b/arch/arm/mach-msm/clock-9615.c
index f5ce5a7..3c9bd36 100644
--- a/arch/arm/mach-msm/clock-9615.c
+++ b/arch/arm/mach-msm/clock-9615.c
@@ -154,16 +154,16 @@
 #define GCC_APCS_CLK_DIAG			REG_GCC(0x001C)
 
 /* MUX source input identifiers. */
-#define cxo_to_bb_mux		0
-#define pll8_to_bb_mux		3
-#define pll8_acpu_to_bb_mux	3
-#define pll14_to_bb_mux		4
-#define gnd_to_bb_mux		6
-#define cxo_to_xo_mux		0
-#define gnd_to_xo_mux		3
-#define cxo_to_lpa_mux		1
-#define pll4_to_lpa_mux		2
-#define gnd_to_lpa_mux		6
+#define cxo_to_bb_mux		  0
+#define pll8_to_bb_mux		  3
+#define pll8_activeonly_to_bb_mux 3
+#define pll14_to_bb_mux		  4
+#define gnd_to_bb_mux		  6
+#define cxo_to_xo_mux		  0
+#define gnd_to_xo_mux		  3
+#define cxo_to_lpa_mux		  1
+#define pll4_to_lpa_mux		  2
+#define gnd_to_lpa_mux		  6
 
 /* Test Vector Macros */
 #define TEST_TYPE_PER_LS	1
@@ -276,7 +276,7 @@
 	},
 };
 
-static struct pll_vote_clk pll0_acpu_clk = {
+static struct pll_vote_clk pll0_activeonly_clk = {
 	.en_reg = BB_PLL_ENA_SC0_REG,
 	.en_mask = BIT(0),
 	.status_reg = BB_PLL0_STATUS_REG,
@@ -284,10 +284,10 @@
 	.soft_vote = &soft_vote_pll0,
 	.soft_vote_mask = PLL_SOFT_VOTE_ACPU,
 	.c = {
-		.dbg_name = "pll0_acpu_clk",
+		.dbg_name = "pll0_activeonly_clk",
 		.rate = 276000000,
 		.ops = &clk_ops_pll_acpu_vote,
-		CLK_INIT(pll0_acpu_clk.c),
+		CLK_INIT(pll0_activeonly_clk.c),
 		.warned = true,
 	},
 };
@@ -326,7 +326,7 @@
 	},
 };
 
-static struct pll_vote_clk pll8_acpu_clk = {
+static struct pll_vote_clk pll8_activeonly_clk = {
 	.en_reg = BB_PLL_ENA_SC0_REG,
 	.en_mask = BIT(8),
 	.status_reg = BB_PLL8_STATUS_REG,
@@ -334,21 +334,21 @@
 	.soft_vote = &soft_vote_pll8,
 	.soft_vote_mask = PLL_SOFT_VOTE_ACPU,
 	.c = {
-		.dbg_name = "pll8_acpu_clk",
+		.dbg_name = "pll8_activeonly_clk",
 		.rate = 384000000,
 		.ops = &clk_ops_pll_acpu_vote,
-		CLK_INIT(pll8_acpu_clk.c),
+		CLK_INIT(pll8_activeonly_clk.c),
 		.warned = true,
 	},
 };
 
-static struct pll_clk pll9_acpu_clk = {
+static struct pll_clk pll9_activeonly_clk = {
 	.mode_reg = SC_PLL0_MODE_REG,
 	.c = {
-		.dbg_name = "pll9_acpu_clk",
+		.dbg_name = "pll9_activeonly_clk",
 		.rate = 440000000,
 		.ops = &clk_ops_local_pll,
-		CLK_INIT(pll9_acpu_clk.c),
+		CLK_INIT(pll9_activeonly_clk.c),
 		.warned = true,
 	},
 };
@@ -657,14 +657,20 @@
 		.ns_val = NS(23, 16, n, m, 5, 4, 3, d, 2, 0, s##_to_bb_mux), \
 	}
 static struct clk_freq_tbl clk_tbl_usb[] = {
-	F_USB(       0, gnd,  1, 0,  0),
+	F_USB(       0,  gnd, 1, 0,  0),
 	F_USB(60000000, pll8, 1, 5, 32),
 	F_END
 };
 
+static struct clk_freq_tbl clk_tbl_usb_hs1_sys[] = {
+	F_USB(       0,		    gnd, 1, 0,  0),
+	F_USB(60000000, pll8_activeonly, 1, 5, 32),
+	F_END
+};
+
 static struct clk_freq_tbl clk_tbl_usb_hsic_sys[] = {
-	F_USB(       0,       gnd, 1, 0, 0),
-	F_USB(64000000, pll8_acpu, 1, 1, 6),
+	F_USB(       0,		    gnd, 1, 0, 0),
+	F_USB(64000000, pll8_activeonly, 1, 1, 6),
 	F_END
 };
 
@@ -708,7 +714,7 @@
 	.ns_mask = (BM(23, 16) | BM(6, 0)),
 	.mnd_en_mask = BIT(8),
 	.set_rate = set_rate_mnd,
-	.freq_tbl = clk_tbl_usb,
+	.freq_tbl = clk_tbl_usb_hs1_sys,
 	.current_freq = &rcg_dummy_freq,
 	.c = {
 		.dbg_name = "usb_hs1_sys_clk",
@@ -1617,9 +1623,9 @@
 	CLK_LOOKUP("pll8",	pll8_clk.c,	NULL),
 	CLK_LOOKUP("pll14",	pll14_clk.c,	NULL),
 
-	CLK_LOOKUP("pll0", pll0_acpu_clk.c, "acpu"),
-	CLK_LOOKUP("pll8", pll8_acpu_clk.c, "acpu"),
-	CLK_LOOKUP("pll9", pll9_acpu_clk.c, "acpu"),
+	CLK_LOOKUP("pll0", pll0_activeonly_clk.c, "acpu"),
+	CLK_LOOKUP("pll8", pll8_activeonly_clk.c, "acpu"),
+	CLK_LOOKUP("pll9", pll9_activeonly_clk.c, "acpu"),
 
 	CLK_LOOKUP("measure",	measure_clk.c,	"debug"),
 
@@ -1822,7 +1828,7 @@
 	pll9_lval = readl_relaxed(SC_PLL0_L_VAL_REG);
 
 	if (pll9_lval == 0x1C)
-		pll9_acpu_clk.c.rate = 550000000;
+		pll9_activeonly_clk.c.rate = 550000000;
 
 	/* Enable PLL4 source on the LPASS Primary PLL Mux */
 	regval = readl_relaxed(LCC_PRI_PLL_CLK_CTL_REG);
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index 069d738..8a41a7c 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -95,14 +95,6 @@
 #define PCIE20_PHYS   0x1b500000
 #define PCIE20_SIZE   SZ_4K
 
-/* AXI address for PCIE device BAR resources */
-#define PCIE_AXI_BAR_PHYS   0x08000000
-#define PCIE_AXI_BAR_SIZE   SZ_8M
-
-/* AXI address for PCIE device config space */
-#define PCIE_AXI_CONF_PHYS   0x08c00000
-#define PCIE_AXI_CONF_SIZE   SZ_4K
-
 static struct msm_watchdog_pdata msm_watchdog_pdata = {
 	.pet_time = 10000,
 	.bark_time = 11000,
@@ -1639,13 +1631,13 @@
 
 static struct resource resources_msm_pcie[] = {
 	{
-		.name   = "parf",
+		.name   = "pcie_parf",
 		.start  = PCIE20_PARF_PHYS,
 		.end    = PCIE20_PARF_PHYS + PCIE20_PARF_SIZE - 1,
 		.flags  = IORESOURCE_MEM,
 	},
 	{
-		.name   = "elbi",
+		.name   = "pcie_elbi",
 		.start  = PCIE20_ELBI_PHYS,
 		.end    = PCIE20_ELBI_PHYS + PCIE20_ELBI_SIZE - 1,
 		.flags  = IORESOURCE_MEM,
@@ -1656,18 +1648,6 @@
 		.end    = PCIE20_PHYS + PCIE20_SIZE - 1,
 		.flags  = IORESOURCE_MEM,
 	},
-	{
-		.name   = "axi_bar",
-		.start  = PCIE_AXI_BAR_PHYS,
-		.end    = PCIE_AXI_BAR_PHYS + PCIE_AXI_BAR_SIZE - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	{
-		.name   = "axi_conf",
-		.start  = PCIE_AXI_CONF_PHYS,
-		.end    = PCIE_AXI_CONF_PHYS + PCIE_AXI_CONF_SIZE - 1,
-		.flags  = IORESOURCE_MEM,
-	},
 };
 
 struct platform_device msm_device_pcie = {
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 198cd38..8607177 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -491,12 +491,23 @@
 	int (*gpio_config)(int on);
 	int (*init_irq)(void);
 	bool (*check_hdcp_hw_support)(void);
+	bool is_mhl_enabled;
 };
 
 struct msm_mhl_platform_data {
 	int irq;
-	int (*gpio_setup)(int on);
-	void (*reset_pin)(int on);
+	/* GPIO no. for mhl intr */
+	uint32_t gpio_mhl_int;
+	/* GPIO no. for mhl block reset */
+	uint32_t gpio_mhl_reset;
+	/*
+	 * below gpios are specific to targets
+	 * that have the integrated MHL soln.
+	 */
+	/* GPIO no. for mhl block power */
+	uint32_t gpio_mhl_power;
+	/* GPIO no. for hdmi-mhl mux */
+	uint32_t gpio_hdmi_mhl_mux;
 };
 
 struct msm_i2c_platform_data {
diff --git a/arch/arm/mach-msm/include/mach/msm_pcie.h b/arch/arm/mach-msm/include/mach/msm_pcie.h
index 008c984..8bc4317 100644
--- a/arch/arm/mach-msm/include/mach/msm_pcie.h
+++ b/arch/arm/mach-msm/include/mach/msm_pcie.h
@@ -32,6 +32,8 @@
 /* msm pcie platfrom data */
 struct msm_pcie_platform {
 	struct msm_pcie_gpio_info_t  *gpio;
+	uint32_t                      axi_addr;
+	uint32_t                      axi_size;
 };
 
 #endif
diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h
index 6b4ce2a..15aebcd 100644
--- a/arch/arm/mach-msm/include/mach/socinfo.h
+++ b/arch/arm/mach-msm/include/mach/socinfo.h
@@ -73,6 +73,7 @@
 	MSM_CPU_7X25AB,
 	MSM_CPU_8064,
 	MSM_CPU_8930,
+	MSM_CPU_8930AA,
 	MSM_CPU_7X27AA,
 	MSM_CPU_9615,
 	MSM_CPU_8974,
@@ -262,6 +263,15 @@
 #endif
 }
 
+static inline int cpu_is_msm8930aa(void)
+{
+#ifdef CONFIG_ARCH_MSM8930
+	return read_msm_cpu_type() == MSM_CPU_8930AA;
+#else
+	return 0;
+#endif
+}
+
 static inline int cpu_is_msm8627(void)
 {
 /* 8930 and 8627 will share the same CONFIG_ARCH type unless otherwise needed */
diff --git a/arch/arm/mach-msm/modem-8960.c b/arch/arm/mach-msm/modem-8960.c
index 1132be2..fd7b7b5 100644
--- a/arch/arm/mach-msm/modem-8960.c
+++ b/arch/arm/mach-msm/modem-8960.c
@@ -281,8 +281,8 @@
 {
 	int ret;
 
-	if (!cpu_is_msm8960() && !cpu_is_msm8930() && !cpu_is_msm9615() &&
-	    !cpu_is_msm8627())
+	if (!cpu_is_msm8960() && !cpu_is_msm8930() && !cpu_is_msm8930aa() &&
+	    !cpu_is_msm9615() && !cpu_is_msm8627())
 		return -ENODEV;
 
 	ret = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_RESET,
diff --git a/arch/arm/mach-msm/msm_watchdog.c b/arch/arm/mach-msm/msm_watchdog.c
index 7ac3f74..b471426 100644
--- a/arch/arm/mach-msm/msm_watchdog.c
+++ b/arch/arm/mach-msm/msm_watchdog.c
@@ -27,6 +27,7 @@
 #include <asm/hardware/gic.h>
 #include <mach/msm_iomap.h>
 #include <asm/mach-types.h>
+#include <asm/cacheflush.h>
 #include <mach/scm.h>
 #include <mach/socinfo.h>
 #include "msm_watchdog.h"
@@ -105,6 +106,7 @@
 /* Called from the FIQ bark handler */
 void msm_wdog_bark_fin(void)
 {
+	flush_cache_all();
 	pr_crit("\nApps Watchdog bark received - Calling Panic\n");
 	panic("Apps Watchdog Bark received\n");
 }
diff --git a/arch/arm/mach-msm/msm_xo.c b/arch/arm/mach-msm/msm_xo.c
index fb01427..9825ce5 100644
--- a/arch/arm/mach-msm/msm_xo.c
+++ b/arch/arm/mach-msm/msm_xo.c
@@ -234,8 +234,8 @@
 	struct msm_xo *xo = xo_voter->xo;
 	int is_d0 = xo == &msm_xo_sources[MSM_XO_TCXO_D0];
 	int needs_workaround = cpu_is_msm8960() || cpu_is_apq8064() ||
-			       cpu_is_msm8930() || cpu_is_msm9615() ||
-			       cpu_is_msm8627();
+			       cpu_is_msm8930() || cpu_is_msm8930aa() ||
+			       cpu_is_msm9615() || cpu_is_msm8627();
 
 	if (xo_voter->mode == mode)
 		return 0;
diff --git a/arch/arm/mach-msm/pcie.c b/arch/arm/mach-msm/pcie.c
index 5818bef..f105356 100644
--- a/arch/arm/mach-msm/pcie.c
+++ b/arch/arm/mach-msm/pcie.c
@@ -27,6 +27,7 @@
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
+#include <linux/slab.h>
 #include <linux/types.h>
 #include <asm/mach/pci.h>
 #include <mach/gpiomux.h>
@@ -72,6 +73,9 @@
 #define RD 0
 #define WR 1
 
+/* PCIE AXI address space */
+#define PCIE_AXI_CONF_SIZE   SZ_1M
+
 /* debug mask sys interface */
 static int msm_pcie_debug_mask;
 module_param_named(debug_mask, msm_pcie_debug_mask,
@@ -79,12 +83,15 @@
 
 /* resources from device file */
 enum msm_pcie_res {
+	/* platform defined resources */
 	MSM_PCIE_RES_PARF,
 	MSM_PCIE_RES_ELBI,
 	MSM_PCIE_RES_PCIE20,
-	MSM_PCIE_RES_AXI_BAR,
-	MSM_PCIE_RES_AXI_CONF,
-	MSM_PCIE_MAX_RES
+	MSM_PCIE_MAX_PLATFORM_RES,
+
+	/* other resources */
+	MSM_PCIE_RES_AXI_CONF = MSM_PCIE_MAX_PLATFORM_RES,
+	MSM_PCIE_MAX_RES,
 };
 
 /* msm pcie device data */
@@ -107,11 +114,10 @@
 
 /* resources */
 static struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = {
-	{"parf",     0, 0, 0},
-	{"elbi",     0, 0, 0},
-	{"pcie20",   0, 0, 0},
-	{"axi_bar",  0, 0, 0},
-	{"axi_conf", 0, 0, 0},
+	{"pcie_parf",     0, 0},
+	{"pcie_elbi",     0, 0},
+	{"pcie20",        0, 0},
+	{"pcie_axi_conf", 0, 0},
 };
 
 int msm_pcie_get_debug_mask(void)
@@ -350,8 +356,7 @@
 static void __init msm_pcie_config_controller(void)
 {
 	struct msm_pcie_dev_t *dev = &msm_pcie_dev;
-	struct msm_pcie_res_info_t *axi_bar = &dev->res[MSM_PCIE_RES_AXI_BAR];
-	struct msm_pcie_res_info_t *axi_conf = &dev->res[MSM_PCIE_RES_AXI_CONF];
+	struct resource *axi_conf = dev->res[MSM_PCIE_RES_AXI_CONF].resource;
 
 	/*
 	 * program and enable address translation region 0 (device config
@@ -384,9 +389,9 @@
 
 	writel_relaxed(0, dev->pcie20 + PCIE20_PLR_IATU_CTRL1);
 	writel_relaxed(BIT(31), dev->pcie20 + PCIE20_PLR_IATU_CTRL2);
-	writel_relaxed(axi_bar->start, dev->pcie20 + PCIE20_PLR_IATU_LBAR);
+	writel_relaxed(dev->axi_bar_start, dev->pcie20 + PCIE20_PLR_IATU_LBAR);
 	writel_relaxed(0, dev->pcie20 + PCIE20_PLR_IATU_UBAR);
-	writel_relaxed(axi_bar->end, dev->pcie20 + PCIE20_PLR_IATU_LAR);
+	writel_relaxed(dev->axi_bar_end, dev->pcie20 + PCIE20_PLR_IATU_LAR);
 	writel_relaxed(MSM_PCIE_DEV_BAR_ADDR,
 		       dev->pcie20 + PCIE20_PLR_IATU_LTAR);
 	writel_relaxed(0, dev->pcie20 + PCIE20_PLR_IATU_UTAR);
@@ -404,8 +409,15 @@
 	for (i = 0; i < MSM_PCIE_MAX_RES; i++) {
 		info = &dev->res[i];
 
-		res = platform_get_resource_byname(pdev,
-						   IORESOURCE_MEM, info->name);
+		if (i < MSM_PCIE_MAX_PLATFORM_RES) {
+			res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							   info->name);
+		} else {
+			res = dev->res[i].resource;
+			if (request_resource(&iomem_resource, res))
+				res = NULL;
+		}
+
 		if (!res) {
 			pr_err("can't get %s resource\n", info->name);
 			rc = -ENOMEM;
@@ -419,14 +431,15 @@
 			break;
 		}
 
-		info->start = res->start;
-		info->end = res->end;
+		info->resource = res;
 	}
 
 	if (rc) {
 		while (i--) {
 			iounmap(dev->res[i].base);
 			dev->res[i].base = NULL;
+			if (i >= MSM_PCIE_MAX_PLATFORM_RES)
+				release_resource(dev->res[i].resource);
 		}
 	} else {
 		dev->parf = dev->res[MSM_PCIE_RES_PARF].base;
@@ -445,6 +458,8 @@
 	for (i = 0; i < MSM_PCIE_MAX_RES; i++) {
 		iounmap(msm_pcie_dev.res[i].base);
 		msm_pcie_dev.res[i].base = NULL;
+		if (i >= MSM_PCIE_MAX_PLATFORM_RES)
+			release_resource(msm_pcie_dev.res[i].resource);
 	}
 
 	msm_pcie_dev.parf = NULL;
@@ -463,6 +478,13 @@
 	if (nr != 0)
 		return 0;
 
+	/*
+	 * specify linux PCI framework to allocate device memory (BARs)
+	 * from msm_pcie_dev.dev_mem_res resource.
+	 */
+	sys->mem_offset = 0;
+	pci_add_resource(&sys->resources, &msm_pcie_dev.dev_mem_res);
+
 	/* assert PCIe reset link to keep EP in reset */
 	gpio_set_value_cansleep(dev->gpio[MSM_PCIE_GPIO_RST_N].num,
 				dev->gpio[MSM_PCIE_GPIO_RST_N].on);
@@ -556,7 +578,8 @@
 
 	PCIE_DBG("bus %d\n", nr);
 	if (nr == 0)
-		bus = pci_scan_bus(sys->busnr, &msm_pcie_ops, sys);
+		bus = pci_scan_root_bus(NULL, sys->busnr, &msm_pcie_ops, sys,
+					&sys->resources);
 
 	return bus;
 }
@@ -578,6 +601,7 @@
 static int __init msm_pcie_probe(struct platform_device *pdev)
 {
 	const struct msm_pcie_platform *pdata;
+	struct resource *res;
 	int rc;
 
 	PCIE_DBG("\n");
@@ -589,6 +613,31 @@
 	msm_pcie_dev.clk = msm_pcie_clk_info;
 	msm_pcie_dev.res = msm_pcie_res_info;
 
+	/* device memory resource */
+	res = &msm_pcie_dev.dev_mem_res;
+	res->name = "pcie_dev_mem";
+	res->start = MSM_PCIE_DEV_BAR_ADDR;
+	res->end = res->start + pdata->axi_size - 1;
+	res->flags = IORESOURCE_MEM;
+
+	/* axi address space = axi bar space + axi config space */
+	msm_pcie_dev.axi_bar_start = pdata->axi_addr;
+	msm_pcie_dev.axi_bar_end = pdata->axi_addr + pdata->axi_size -
+					PCIE_AXI_CONF_SIZE - 1;
+
+	/* axi config space resource */
+	res = kzalloc(sizeof(*res), GFP_KERNEL);
+	if (!res) {
+		pr_err("can't allocate memory\n");
+		return -ENOMEM;
+	}
+
+	msm_pcie_dev.res[MSM_PCIE_RES_AXI_CONF].resource = res;
+	res->name = msm_pcie_dev.res[MSM_PCIE_RES_AXI_CONF].name;
+	res->start = msm_pcie_dev.axi_bar_end + 1;
+	res->end = res->start + PCIE_AXI_CONF_SIZE - 1;
+	res->flags = IORESOURCE_MEM;
+
 	rc = msm_pcie_get_resources(msm_pcie_dev.pdev);
 	if (rc)
 		return rc;
@@ -632,7 +681,6 @@
 static int __init msm_pcie_init(void)
 {
 	PCIE_DBG("\n");
-	pcibios_min_io = 0x10000000;
 	pcibios_min_mem = 0x10000000;
 	return platform_driver_probe(&msm_pcie_driver, msm_pcie_probe);
 }
@@ -649,22 +697,30 @@
 			msm_pcie_fixup_early);
 
 /*
- * actual physical (BAR) address of the device resources starts from 0x10xxxxxx;
- * the system axi address for the device resources starts from 0x08xxxxxx;
- * correct the device resource structure here; address translation unit handles
- * the required translations
+ * actual physical (BAR) address of the device resources starts from
+ * MSM_PCIE_DEV_BAR_ADDR; the system axi address for the device resources starts
+ * from msm_pcie_dev.axi_bar_start; correct the device resource structure here;
+ * address translation unit handles the required translations
  */
 static void __devinit msm_pcie_fixup_final(struct pci_dev *dev)
 {
 	int i;
+	struct resource *res;
 
 	PCIE_DBG("vendor 0x%x 0x%x\n", dev->vendor, dev->device);
 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
-		if (dev->resource[i].start & 0xFF000000) {
-			dev->resource[i].start &= 0x00FFFFFF;
-			dev->resource[i].start |= 0x08000000;
-			dev->resource[i].end &= 0x00FFFFFF;
-			dev->resource[i].end |= 0x08000000;
+		res = &dev->resource[i];
+		if (res->start & MSM_PCIE_DEV_BAR_ADDR) {
+			res->start -= MSM_PCIE_DEV_BAR_ADDR;
+			res->start += msm_pcie_dev.axi_bar_start;
+			res->end -= MSM_PCIE_DEV_BAR_ADDR;
+			res->end += msm_pcie_dev.axi_bar_start;
+
+			/* If Root Port, request for the changed resource */
+			if ((dev->vendor == PCIE_VENDOR_ID_RCP) &&
+			    (dev->device == PCIE_DEVICE_ID_RCP)) {
+				insert_resource(&iomem_resource, res);
+			}
 		}
 	}
 }
diff --git a/arch/arm/mach-msm/pcie.h b/arch/arm/mach-msm/pcie.h
index 4866ec5..fba6b11 100644
--- a/arch/arm/mach-msm/pcie.h
+++ b/arch/arm/mach-msm/pcie.h
@@ -45,10 +45,9 @@
 
 /* resource info structure */
 struct msm_pcie_res_info_t {
-	char          *name;
-	uint32_t       start;
-	uint32_t       end;
-	void __iomem  *base;
+	char            *name;
+	struct resource *resource;
+	void __iomem    *base;
 };
 
 /* msm pcie device structure */
@@ -64,6 +63,11 @@
 	void __iomem                 *elbi;
 	void __iomem                 *pcie20;
 	void __iomem                 *axi_conf;
+
+	uint32_t                      axi_bar_start;
+	uint32_t                      axi_bar_end;
+
+	struct resource               dev_mem_res;
 };
 
 extern uint32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev);
diff --git a/arch/arm/mach-msm/pil-mba.c b/arch/arm/mach-msm/pil-mba.c
index 7405ab9..d0ba7d0 100644
--- a/arch/arm/mach-msm/pil-mba.c
+++ b/arch/arm/mach-msm/pil-mba.c
@@ -208,6 +208,8 @@
 
 static int __devexit pil_mba_driver_exit(struct platform_device *pdev)
 {
+	struct mba_data *drv = platform_get_drvdata(pdev);
+	msm_pil_unregister(drv->pil);
 	return 0;
 }
 
diff --git a/arch/arm/mach-msm/pil-vidc.c b/arch/arm/mach-msm/pil-vidc.c
index ceb9bcd..e4c6a2d 100644
--- a/arch/arm/mach-msm/pil-vidc.c
+++ b/arch/arm/mach-msm/pil-vidc.c
@@ -67,7 +67,6 @@
 {
 	struct pil_desc *desc;
 	struct vidc_data *drv;
-	int ret;
 
 	if (pas_supported(PAS_VIDC) < 0)
 		return -ENOSYS;
@@ -80,44 +79,29 @@
 	if (!drv)
 		return -ENOMEM;
 	platform_set_drvdata(pdev, drv);
-	drv->smmu_iface = clk_get(&pdev->dev, "smmu_iface_clk");
-	if (IS_ERR(drv->smmu_iface)) {
-		dev_err(&pdev->dev, "failed to get smmu interface clock\n");
-		ret = PTR_ERR(drv->smmu_iface);
-		goto err_smmu;
-	}
-	drv->core = clk_get(&pdev->dev, "core_clk");
-	if (IS_ERR(drv->core)) {
-		dev_err(&pdev->dev, "failed to get core clock\n");
-		ret = PTR_ERR(drv->core);
-		goto err_core;
-	}
+
+	drv->smmu_iface = devm_clk_get(&pdev->dev, "smmu_iface_clk");
+	if (IS_ERR(drv->smmu_iface))
+		return PTR_ERR(drv->smmu_iface);
+
+	drv->core = devm_clk_get(&pdev->dev, "core_clk");
+	if (IS_ERR(drv->core))
+		return PTR_ERR(drv->core);
 
 	desc->name = "vidc";
 	desc->dev = &pdev->dev;
 	desc->ops = &pil_vidc_ops;
 	desc->owner = THIS_MODULE;
 	drv->pil = msm_pil_register(desc);
-	if (IS_ERR(drv->pil)) {
-		ret = PTR_ERR(drv->pil);
-		goto err_register;
-	}
+	if (IS_ERR(drv->pil))
+		return PTR_ERR(drv->pil);
 	return 0;
-
-err_register:
-	clk_put(drv->core);
-err_core:
-	clk_put(drv->smmu_iface);
-err_smmu:
-	return ret;
 }
 
 static int __devexit pil_vidc_driver_exit(struct platform_device *pdev)
 {
 	struct vidc_data *drv = platform_get_drvdata(pdev);
 	msm_pil_unregister(drv->pil);
-	clk_put(drv->smmu_iface);
-	clk_put(drv->core);
 	return 0;
 }
 
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index 428b998..48a236f 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -142,8 +142,8 @@
 	if (machine_is_msm8974_sim())
 		return krait_release_secondary_sim(0xf9088000, cpu);
 
-	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_apq8064() ||
-	    cpu_is_msm8627())
+	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm8930aa() ||
+	    cpu_is_apq8064() || cpu_is_msm8627())
 		return krait_release_secondary(0x02088000, cpu);
 
 	WARN(1, "unknown CPU case in release_secondary\n");
diff --git a/arch/arm/mach-msm/pm-8x60.c b/arch/arm/mach-msm/pm-8x60.c
index 595484e..15a0b6a 100644
--- a/arch/arm/mach-msm/pm-8x60.c
+++ b/arch/arm/mach-msm/pm-8x60.c
@@ -247,6 +247,7 @@
 			if ((k == MSM_PM_MODE_ATTR_SUSPEND) &&
 			     !msm_pm_sleep_modes[idx].suspend_supported)
 				continue;
+			sysfs_attr_init(&mode->kas[j].ka.attr);
 			mode->kas[j].cpu = cpu;
 			mode->kas[j].ka.attr.mode = 0644;
 			mode->kas[j].ka.show = msm_pm_mode_attr_show;
diff --git a/arch/arm/mach-msm/qdsp5v2/adsp.c b/arch/arm/mach-msm/qdsp5v2/adsp.c
index acd9c4c..8c1413c 100644
--- a/arch/arm/mach-msm/qdsp5v2/adsp.c
+++ b/arch/arm/mach-msm/qdsp5v2/adsp.c
@@ -883,7 +883,7 @@
 			rc = -ETIMEDOUT;
 		}
 		if (module->open_count++ == 0 && module->clk)
-			clk_enable(module->clk);
+			clk_prepare_enable(module->clk);
 
 		mutex_lock(&adsp_open_lock);
 		if (adsp_open_count++ == 0)
@@ -938,7 +938,7 @@
 		mutex_lock(&module->lock);
 		module->state = ADSP_STATE_DISABLED;
 		if (--module->open_count == 0 && module->clk)
-			clk_disable(module->clk);
+			clk_disable_unprepare(module->clk);
 		mutex_unlock(&module->lock);
 		mutex_lock(&adsp_open_lock);
 		if (--adsp_open_count == 0) {
diff --git a/arch/arm/mach-msm/qdsp5v2/lpa.c b/arch/arm/mach-msm/qdsp5v2/lpa.c
index c4e0fee..98297e3 100644
--- a/arch/arm/mach-msm/qdsp5v2/lpa.c
+++ b/arch/arm/mach-msm/qdsp5v2/lpa.c
@@ -73,7 +73,7 @@
 		MM_ERR("failed to get adsp clk\n");
 		goto error;
 	}
-	clk_enable(adsp_clk);
+	clk_prepare_enable(adsp_clk);
 	lpa_enable_codec(lpa, 0);
 	LPA_REG_WRITEL(lpa, (LPA_OBUF_RESETS_MISR_RESET |
 		LPA_OBUF_RESETS_OVERALL_RESET), LPA_OBUF_RESETS);
@@ -83,7 +83,7 @@
 
 	LPA_REG_WRITEL(lpa, LPA_OBUF_ACK_RESET_DONE_BMSK, LPA_OBUF_ACK);
 	mb();
-	clk_disable(adsp_clk);
+	clk_disable_unprepare(adsp_clk);
 	clk_put(adsp_clk);
 error:
 	return;
diff --git a/arch/arm/mach-msm/qdsp5v2/snddev_ecodec.c b/arch/arm/mach-msm/qdsp5v2/snddev_ecodec.c
index a5da912..943489a 100644
--- a/arch/arm/mach-msm/qdsp5v2/snddev_ecodec.c
+++ b/arch/arm/mach-msm/qdsp5v2/snddev_ecodec.c
@@ -60,7 +60,7 @@
 			goto done;
 		}
 		/* config clocks */
-		clk_enable(drv->lpa_core_clk);
+		clk_prepare_enable(drv->lpa_core_clk);
 
 		/*if long sync is selected in aux PCM interface
 		ecodec clock is updated to work with 128KHz,
@@ -96,7 +96,7 @@
 		}
 
 		/* enable ecodec clk */
-		clk_enable(drv->ecodec_clk);
+		clk_prepare_enable(drv->ecodec_clk);
 
 		/* let ADSP confiure AUX PCM regs */
 		aux_codec_adsp_codec_ctl_en(ADSP_CTL);
@@ -109,7 +109,7 @@
 		audio_interct_tpcm_source(AUDIO_ADSP_A);
 		audio_interct_rpcm_source(AUDIO_ADSP_A);
 
-		clk_disable(drv->lpa_core_clk);
+		clk_disable_unprepare(drv->lpa_core_clk);
 
 		/* send AUX_CODEC_CONFIG to AFE */
 		rc = afe_config_aux_codec(ecodec->data->conf_pcm_ctl_val,
@@ -126,7 +126,7 @@
 	if (IS_ERR_VALUE(rc)) {
 		if (!drv->tx_active) {
 			aux_pcm_gpios_free();
-			clk_disable(drv->ecodec_clk);
+			clk_disable_unprepare(drv->ecodec_clk);
 		}
 		goto done;
 	}
@@ -136,7 +136,7 @@
 
 error:
 	aux_pcm_gpios_free();
-	clk_disable(drv->ecodec_clk);
+	clk_disable_unprepare(drv->ecodec_clk);
 done:
 	return rc;
 }
@@ -148,7 +148,7 @@
 	/* free GPIO */
 	if (!drv->tx_active) {
 		aux_pcm_gpios_free();
-		clk_disable(drv->ecodec_clk);
+		clk_disable_unprepare(drv->ecodec_clk);
 	}
 
 	/* disable AFE */
@@ -176,7 +176,7 @@
 			goto done;
 		}
 		/* config clocks */
-		clk_enable(drv->lpa_core_clk);
+		clk_prepare_enable(drv->lpa_core_clk);
 
 		/*if long sync is selected in aux PCM interface
 		ecodec clock is updated to work with 128KHz,
@@ -212,7 +212,7 @@
 		}
 
 		/* enable ecodec clk */
-		clk_enable(drv->ecodec_clk);
+		clk_prepare_enable(drv->ecodec_clk);
 
 		/* let ADSP confiure AUX PCM regs */
 		aux_codec_adsp_codec_ctl_en(ADSP_CTL);
@@ -225,7 +225,7 @@
 		audio_interct_tpcm_source(AUDIO_ADSP_A);
 		audio_interct_rpcm_source(AUDIO_ADSP_A);
 
-		clk_disable(drv->lpa_core_clk);
+		clk_disable_unprepare(drv->lpa_core_clk);
 
 		/* send AUX_CODEC_CONFIG to AFE */
 		rc = afe_config_aux_codec(ecodec->data->conf_pcm_ctl_val,
@@ -242,7 +242,7 @@
 	if (IS_ERR_VALUE(rc)) {
 		if (!drv->rx_active) {
 			aux_pcm_gpios_free();
-			clk_disable(drv->ecodec_clk);
+			clk_disable_unprepare(drv->ecodec_clk);
 		}
 		goto done;
 	}
@@ -251,7 +251,7 @@
 	return 0;
 
 error:
-	clk_disable(drv->ecodec_clk);
+	clk_disable_unprepare(drv->ecodec_clk);
 	aux_pcm_gpios_free();
 done:
 	return rc;
@@ -264,7 +264,7 @@
 	/* free GPIO */
 	if (!drv->rx_active) {
 		aux_pcm_gpios_free();
-		clk_disable(drv->ecodec_clk);
+		clk_disable_unprepare(drv->ecodec_clk);
 	}
 
 	/* disable AFE */
diff --git a/arch/arm/mach-msm/qdsp5v2/snddev_icodec.c b/arch/arm/mach-msm/qdsp5v2/snddev_icodec.c
index 80c9a01..c416c07 100644
--- a/arch/arm/mach-msm/qdsp5v2/snddev_icodec.c
+++ b/arch/arm/mach-msm/qdsp5v2/snddev_icodec.c
@@ -199,12 +199,12 @@
 		SNDDEV_ICODEC_CLK_RATE(icodec->sample_rate));
 	if (IS_ERR_VALUE(trc))
 		goto error_invalid_freq;
-	clk_enable(drv->rx_mclk);
-	clk_enable(drv->rx_sclk);
+	clk_prepare_enable(drv->rx_mclk);
+	clk_prepare_enable(drv->rx_sclk);
 	/* clk_set_rate(drv->lpa_codec_clk, 1); */ /* Remove if use pcom */
-	clk_enable(drv->lpa_p_clk);
-	clk_enable(drv->lpa_codec_clk);
-	clk_enable(drv->lpa_core_clk);
+	clk_prepare_enable(drv->lpa_p_clk);
+	clk_prepare_enable(drv->lpa_codec_clk);
+	clk_prepare_enable(drv->lpa_core_clk);
 
 	/* Enable LPA sub system
 	 */
@@ -263,11 +263,11 @@
 error_adie:
 	lpa_put(drv->lpa);
 error_lpa:
-	clk_disable(drv->lpa_p_clk);
-	clk_disable(drv->lpa_codec_clk);
-	clk_disable(drv->lpa_core_clk);
-	clk_disable(drv->rx_sclk);
-	clk_disable(drv->rx_mclk);
+	clk_disable_unprepare(drv->lpa_p_clk);
+	clk_disable_unprepare(drv->lpa_codec_clk);
+	clk_disable_unprepare(drv->lpa_core_clk);
+	clk_disable_unprepare(drv->rx_sclk);
+	clk_disable_unprepare(drv->rx_mclk);
 error_invalid_freq:
 
 	MM_ERR("encounter error\n");
@@ -307,8 +307,8 @@
 		SNDDEV_ICODEC_CLK_RATE(icodec->sample_rate));
 	if (IS_ERR_VALUE(trc))
 		goto error_invalid_freq;
-	clk_enable(drv->tx_mclk);
-	clk_enable(drv->tx_sclk);
+	clk_prepare_enable(drv->tx_mclk);
+	clk_prepare_enable(drv->tx_sclk);
 
 	/* Set MI2S */
 	mi2s_set_codec_input_path((icodec->data->channel_mode ==
@@ -344,8 +344,8 @@
 	adie_codec_close(icodec->adie_path);
 	icodec->adie_path = NULL;
 error_adie:
-	clk_disable(drv->tx_sclk);
-	clk_disable(drv->tx_mclk);
+	clk_disable_unprepare(drv->tx_sclk);
+	clk_disable_unprepare(drv->tx_mclk);
 error_invalid_freq:
 
 	/* Disable mic bias */
@@ -414,14 +414,14 @@
 	lpa_put(drv->lpa);
 
 	/* Disable LPA clocks */
-	clk_disable(drv->lpa_p_clk);
-	clk_disable(drv->lpa_codec_clk);
-	clk_disable(drv->lpa_core_clk);
+	clk_disable_unprepare(drv->lpa_p_clk);
+	clk_disable_unprepare(drv->lpa_codec_clk);
+	clk_disable_unprepare(drv->lpa_core_clk);
 
 	/* Disable MI2S RX master block */
 	/* Disable MI2S RX bit clock */
-	clk_disable(drv->rx_sclk);
-	clk_disable(drv->rx_mclk);
+	clk_disable_unprepare(drv->rx_sclk);
+	clk_disable_unprepare(drv->rx_mclk);
 
 	icodec->enabled = 0;
 
@@ -452,8 +452,8 @@
 
 	/* Disable MI2S TX master block */
 	/* Disable MI2S TX bit clock */
-	clk_disable(drv->tx_sclk);
-	clk_disable(drv->tx_mclk);
+	clk_disable_unprepare(drv->tx_sclk);
+	clk_disable_unprepare(drv->tx_mclk);
 
 	/* Disable mic bias */
 	for (i = 0; i < icodec->data->pmctl_id_sz; i++) {
@@ -889,8 +889,8 @@
 		/* enable MI2S RX bit clock */
 		clk_set_rate(drv->rx_mclk,
 			SNDDEV_ICODEC_CLK_RATE(8000));
-		clk_enable(drv->rx_mclk);
-		clk_enable(drv->rx_sclk);
+		clk_prepare_enable(drv->rx_mclk);
+		clk_prepare_enable(drv->rx_sclk);
 
 		MM_INFO("configure ADIE RX path\n");
 		/* Configure ADIE */
@@ -905,8 +905,8 @@
 		/* enable MI2S TX bit clock */
 		clk_set_rate(drv->tx_mclk,
 			SNDDEV_ICODEC_CLK_RATE(8000));
-		clk_enable(drv->tx_mclk);
-		clk_enable(drv->tx_sclk);
+		clk_prepare_enable(drv->tx_mclk);
+		clk_prepare_enable(drv->tx_sclk);
 
 		MM_INFO("configure ADIE TX path\n");
 		/* Configure ADIE */
@@ -927,13 +927,13 @@
 
 		/* Disable MI2S RX master block */
 		/* Disable MI2S RX bit clock */
-		clk_disable(drv->rx_sclk);
-		clk_disable(drv->rx_mclk);
+		clk_disable_unprepare(drv->rx_sclk);
+		clk_disable_unprepare(drv->rx_mclk);
 
 		/* Disable MI2S TX master block */
 		/* Disable MI2S TX bit clock */
-		clk_disable(drv->tx_sclk);
-		clk_disable(drv->tx_mclk);
+		clk_disable_unprepare(drv->tx_sclk);
+		clk_disable_unprepare(drv->tx_mclk);
 	}
 }
 
@@ -955,11 +955,11 @@
 		SNDDEV_ICODEC_CLK_RATE(8000));
 		if (IS_ERR_VALUE(trc))
 			MM_ERR("failed to set clk rate\n");
-		clk_enable(drv->rx_mclk);
-		clk_enable(drv->rx_sclk);
-		clk_enable(drv->lpa_p_clk);
-		clk_enable(drv->lpa_codec_clk);
-		clk_enable(drv->lpa_core_clk);
+		clk_prepare_enable(drv->rx_mclk);
+		clk_prepare_enable(drv->rx_sclk);
+		clk_prepare_enable(drv->lpa_p_clk);
+		clk_prepare_enable(drv->lpa_codec_clk);
+		clk_prepare_enable(drv->lpa_core_clk);
 		/* Enable LPA sub system
 		 */
 		drv->lpa = lpa_get();
@@ -1003,8 +1003,8 @@
 		/* enable MI2S TX bit clock */
 		clk_set_rate(drv->tx_mclk,
 			SNDDEV_ICODEC_CLK_RATE(8000));
-		clk_enable(drv->tx_mclk);
-		clk_enable(drv->tx_sclk);
+		clk_prepare_enable(drv->tx_mclk);
+		clk_prepare_enable(drv->tx_sclk);
 		/* Set MI2S */
 		mi2s_set_codec_input_path(MI2S_CHAN_MONO_PACKED, WT_16_BIT);
 		MM_INFO("configure ADIE TX path\n");
@@ -1048,14 +1048,14 @@
 		lpa_put(drv->lpa);
 
 		/* Disable LPA clocks */
-		clk_disable(drv->lpa_p_clk);
-		clk_disable(drv->lpa_codec_clk);
-		clk_disable(drv->lpa_core_clk);
+		clk_disable_unprepare(drv->lpa_p_clk);
+		clk_disable_unprepare(drv->lpa_codec_clk);
+		clk_disable_unprepare(drv->lpa_core_clk);
 
 		/* Disable MI2S RX master block */
 		/* Disable MI2S RX bit clock */
-		clk_disable(drv->rx_sclk);
-		clk_disable(drv->rx_mclk);
+		clk_disable_unprepare(drv->rx_sclk);
+		clk_disable_unprepare(drv->rx_mclk);
 
 		pmapp_smps_mode_vote(SMPS_AUDIO_RECORD_ID,
 			PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_DONTCARE);
@@ -1069,8 +1069,8 @@
 		adie_codec_close(debugfs_tx_adie);
 		/* Disable MI2S TX master block */
 		/* Disable MI2S TX bit clock */
-		clk_disable(drv->tx_sclk);
-		clk_disable(drv->tx_mclk);
+		clk_disable_unprepare(drv->tx_sclk);
+		clk_disable_unprepare(drv->tx_mclk);
 		pmic_hsed_enable(PM_HSED_CONTROLLER_0, PM_HSED_ENABLE_OFF);
 		MM_INFO("AFE loopback disabled\n");
 	}
diff --git a/arch/arm/mach-msm/qdsp5v2/snddev_mi2s.c b/arch/arm/mach-msm/qdsp5v2/snddev_mi2s.c
index 939cc8b..b5f3b66 100644
--- a/arch/arm/mach-msm/qdsp5v2/snddev_mi2s.c
+++ b/arch/arm/mach-msm/qdsp5v2/snddev_mi2s.c
@@ -154,12 +154,12 @@
 			mutex_unlock(&drv->lock);
 			return -EIO;
 		}
-		clk_enable(drv->mclk);
-		clk_enable(drv->sclk);
+		clk_prepare_enable(drv->mclk);
+		clk_prepare_enable(drv->sclk);
 		drv->clocks_enabled = 1;
-		MM_DBG("%s: clks enabled \n", __func__);
+		MM_DBG("%s: clks enabled\n", __func__);
 	} else
-		MM_DBG("%s: clks already enabled \n", __func__);
+		MM_DBG("%s: clks already enabled\n", __func__);
 
 	if (snddev_mi2s_data->capability & SNDDEV_CAP_RX) {
 
@@ -225,8 +225,8 @@
 
 mi2s_data_gpio_failure:
 	if (!drv->sd_lines_used) {
-		clk_disable(drv->sclk);
-		clk_disable(drv->mclk);
+		clk_disable_unprepare(drv->sclk);
+		clk_disable_unprepare(drv->mclk);
 		drv->clocks_enabled = 0;
 		mi2s_unconfig_clk_gpio();
 	}
@@ -268,8 +268,8 @@
 	mi2s_unconfig_data_gpio(dir, snddev_mi2s_data->sd_lines);
 
 	if (!drv->sd_lines_used) {
-		clk_disable(drv->sclk);
-		clk_disable(drv->mclk);
+		clk_disable_unprepare(drv->sclk);
+		clk_disable_unprepare(drv->mclk);
 		drv->clocks_enabled = 0;
 		mi2s_unconfig_clk_gpio();
 	}
diff --git a/arch/arm/mach-msm/rpm-smd.c b/arch/arm/mach-msm/rpm-smd.c
index 0edea3f..b8bb27b 100644
--- a/arch/arm/mach-msm/rpm-smd.c
+++ b/arch/arm/mach-msm/rpm-smd.c
@@ -52,7 +52,7 @@
 #define ERR "err\0"
 #define MAX_ERR_BUFFER_SIZE 60
 
-static struct atomic_notifier_head msm_rpm_sleep_notifier;
+static ATOMIC_NOTIFIER_HEAD(msm_rpm_sleep_notifier);
 static bool standalone;
 
 int msm_rpm_register_notifier(struct notifier_block *nb)
diff --git a/arch/arm/mach-msm/rpm_resources.c b/arch/arm/mach-msm/rpm_resources.c
index f9edfc9..667ede0 100644
--- a/arch/arm/mach-msm/rpm_resources.c
+++ b/arch/arm/mach-msm/rpm_resources.c
@@ -1105,8 +1105,8 @@
 
 static int __init msm_rpmrs_l2_init(void)
 {
-	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_apq8064() ||
-	    cpu_is_msm8627()) {
+	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm8930aa() ||
+	    cpu_is_apq8064() || cpu_is_msm8627()) {
 
 		msm_pm_set_l2_flush_flag(0);
 
diff --git a/arch/arm/mach-msm/smd_pkt.c b/arch/arm/mach-msm/smd_pkt.c
index f5f76f7..fdbc387 100644
--- a/arch/arm/mach-msm/smd_pkt.c
+++ b/arch/arm/mach-msm/smd_pkt.c
@@ -767,14 +767,14 @@
 	}
 	D_STATUS("Begin %s on smd_pkt_dev id:%d\n", __func__, smd_pkt_devp->i);
 
-	wake_lock_init(&smd_pkt_devp->pa_wake_lock, WAKE_LOCK_SUSPEND,
-			smd_pkt_dev_name[smd_pkt_devp->i]);
-	INIT_WORK(&smd_pkt_devp->packet_arrival_work, packet_arrival_worker);
-
 	file->private_data = smd_pkt_devp;
 
 	mutex_lock(&smd_pkt_devp->ch_lock);
 	if (smd_pkt_devp->ch == 0) {
+		wake_lock_init(&smd_pkt_devp->pa_wake_lock, WAKE_LOCK_SUSPEND,
+				smd_pkt_dev_name[smd_pkt_devp->i]);
+		INIT_WORK(&smd_pkt_devp->packet_arrival_work,
+				packet_arrival_worker);
 		init_completion(&smd_pkt_devp->ch_allocated);
 		smd_pkt_devp->driver.probe = smd_pkt_dummy_probe;
 		scnprintf(smd_pkt_devp->pdriver_name, PDRIVER_NAME_MAX_SIZE,
@@ -881,10 +881,11 @@
 		smd_pkt_devp->driver.probe = NULL;
 	}
 out:
+	if (!smd_pkt_devp->ch)
+		wake_lock_destroy(&smd_pkt_devp->pa_wake_lock);
+
 	mutex_unlock(&smd_pkt_devp->ch_lock);
 
-	if (r < 0)
-		wake_lock_destroy(&smd_pkt_devp->pa_wake_lock);
 
 	return r;
 }
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index c37943c..08af4d4 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -259,6 +259,9 @@
 	/* 9625 IDs */
 	[134] = MSM_CPU_9625,
 
+	/* 8930AA ID */
+	[142] = MSM_CPU_8930AA,
+
 	/* Uninitialized IDs are not known to run Linux.
 	   MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
 	   considered as unknown CPU. */
diff --git a/arch/arm/mach-msm/spm_devices.c b/arch/arm/mach-msm/spm_devices.c
index fb560ba..9d2aedc 100644
--- a/arch/arm/mach-msm/spm_devices.c
+++ b/arch/arm/mach-msm/spm_devices.c
@@ -136,8 +136,8 @@
 
 	reg = saw_bases[cpu];
 
-	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_apq8064() ||
-	    cpu_is_msm8627()) {
+	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm8930aa() ||
+	    cpu_is_apq8064() || cpu_is_msm8627()) {
 		val = 0xA4;
 		reg += 0x14;
 		timeout = 512;
diff --git a/arch/arm/mach-msm/subsystem_restart.c b/arch/arm/mach-msm/subsystem_restart.c
index 03db126..a9a25b3 100644
--- a/arch/arm/mach-msm/subsystem_restart.c
+++ b/arch/arm/mach-msm/subsystem_restart.c
@@ -568,8 +568,8 @@
 		n_restart_orders = ARRAY_SIZE(orders_8x60_all);
 	}
 
-	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm9615() ||
-			cpu_is_apq8064() || cpu_is_msm8627()) {
+	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm8930aa() ||
+	    cpu_is_msm9615() || cpu_is_apq8064() || cpu_is_msm8627()) {
 		if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE) {
 			restart_orders = restart_orders_8960_sglte;
 			n_restart_orders =
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index fcbd432..8d31683 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -964,8 +964,8 @@
 	if (!smp_processor_id())
 		return 0;
 
-	if (cpu_is_msm8x60() || cpu_is_msm8960() || cpu_is_apq8064()
-			|| cpu_is_msm8930() || cpu_is_msm8627())
+	if (cpu_is_msm8x60() || cpu_is_msm8960() || cpu_is_apq8064() ||
+	    cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8627())
 		__raw_writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
 
 	if (__get_cpu_var(first_boot)) {
@@ -1062,7 +1062,7 @@
 		gpt->flags |= MSM_CLOCK_FLAGS_UNSTABLE_COUNT;
 		dgt->flags |= MSM_CLOCK_FLAGS_UNSTABLE_COUNT;
 	} else if (cpu_is_msm8960() || cpu_is_apq8064() || cpu_is_msm8930() ||
-		   cpu_is_msm8627()) {
+		   cpu_is_msm8930aa() || cpu_is_msm8627()) {
 		global_timer_offset = MSM_TMR0_BASE - MSM_TMR_BASE;
 		dgt->freq = 6750000;
 		__raw_writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
@@ -1071,7 +1071,8 @@
 		gpt->freq = 32765;
 		gpt_hz = 32765;
 		sclk_hz = 32765;
-		if (!cpu_is_msm8930() && !cpu_is_msm8627()) {
+		if (!cpu_is_msm8930() && !cpu_is_msm8930aa() &&
+		    !cpu_is_msm8627()) {
 			gpt->flags |= MSM_CLOCK_FLAGS_UNSTABLE_COUNT;
 			dgt->flags |= MSM_CLOCK_FLAGS_UNSTABLE_COUNT;
 		}
@@ -1122,8 +1123,8 @@
 
 		ce->irq = clock->irq;
 		if (cpu_is_msm8x60() || cpu_is_msm8960() || cpu_is_apq8064() ||
-				cpu_is_msm8930() || cpu_is_msm9615() ||
-				cpu_is_msm8625() || cpu_is_msm8627()) {
+		    cpu_is_msm8930() || cpu_is_msm8930aa() ||
+		    cpu_is_msm9615() || cpu_is_msm8625() || cpu_is_msm8627()) {
 			clock->percpu_evt = alloc_percpu(struct clock_event_device *);
 			if (!clock->percpu_evt) {
 				pr_err("msm_timer_init: memory allocation "
diff --git a/drivers/bluetooth/hci_smd.c b/drivers/bluetooth/hci_smd.c
index 66bd739..6030520 100644
--- a/drivers/bluetooth/hci_smd.c
+++ b/drivers/bluetooth/hci_smd.c
@@ -42,6 +42,7 @@
  */
 
 #define RX_Q_MONITOR		(500)	/* 500 milli second */
+#define HCI_REGISTER_SET	0
 
 
 static int hcismd_set;
@@ -57,7 +58,7 @@
 
 struct hci_smd_data {
 	struct hci_dev *hdev;
-
+	unsigned long flags;
 	struct smd_channel *event_channel;
 	struct smd_channel *data_channel;
 	struct wake_lock wake_lock_tx;
@@ -403,11 +404,16 @@
 	struct hci_dev *hdev;
 
 	hdev = hsmd->hdev;
-
+	if (test_and_set_bit(HCI_REGISTER_SET, &hsmd->flags)) {
+		BT_ERR("HCI device registered already");
+		return 0;
+	} else
+		BT_INFO("HCI device registration is starting");
 	if (hci_register_dev(hdev) < 0) {
 		BT_ERR("Can't register HCI device");
 		hci_free_dev(hdev);
 		hsmd->hdev = NULL;
+		clear_bit(HCI_REGISTER_SET, &hsmd->flags);
 		return -ENODEV;
 	}
 	return 0;
@@ -473,6 +479,11 @@
 {
 	tasklet_kill(&hs.rx_task);
 
+	if (!test_and_clear_bit(HCI_REGISTER_SET, &hsmd->flags)) {
+		BT_ERR("HCI device un-registered already");
+		return;
+	} else
+		BT_INFO("HCI device un-registration going on");
 	if (hsmd->hdev) {
 		if (hci_unregister_dev(hsmd->hdev) < 0)
 			BT_ERR("Can't unregister HCI device %s",
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 6a7b931..2f356f0 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -47,7 +47,7 @@
 #define APPS_PROC		1
 #define QDSP_PROC		2
 #define WCNSS_PROC		3
-#define MSG_MASK_SIZE 9500
+#define MSG_MASK_SIZE 10000
 #define LOG_MASK_SIZE 8000
 #define EVENT_MASK_SIZE 1000
 #define USER_SPACE_DATA 8000
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 1c0f14b..f9c5006 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -133,6 +133,7 @@
 		case MSM_CPU_8064:
 			return APQ8064_TOOLS_ID;
 		case MSM_CPU_8930:
+		case MSM_CPU_8930AA:
 			return MSM8930_TOOLS_ID;
 		case MSM_CPU_8974:
 			return MSM8974_TOOLS_ID;
@@ -157,6 +158,7 @@
 	case MSM_CPU_8960:
 	case MSM_CPU_8064:
 	case MSM_CPU_8930:
+	case MSM_CPU_8930AA:
 	case MSM_CPU_8627:
 	case MSM_CPU_9615:
 	case MSM_CPU_8974:
@@ -175,8 +177,8 @@
 {
 	if (driver->use_device_tree)
 		return 1;
-	else if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm9615() ||
-		cpu_is_apq8064() || cpu_is_msm8627())
+	else if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm8930aa() ||
+		cpu_is_msm9615() || cpu_is_apq8064() || cpu_is_msm8627())
 		return 1;
 	else
 		return 0;
@@ -511,6 +513,7 @@
 	CREATE_MSG_MASK_TBL_ROW(20);
 	CREATE_MSG_MASK_TBL_ROW(21);
 	CREATE_MSG_MASK_TBL_ROW(22);
+	CREATE_MSG_MASK_TBL_ROW(23);
 }
 
 static void diag_set_msg_mask(int rt_mask)
@@ -1229,7 +1232,8 @@
 		driver->apps_rsp_buf[1] = 0x1;
 		driver->apps_rsp_buf[2] = 0x1;
 		driver->apps_rsp_buf[3] = 0x0;
-		*(int *)(driver->apps_rsp_buf + 4) = MSG_MASK_TBL_CNT;
+		/* -1 to un-account for OEM SSID range */
+		*(int *)(driver->apps_rsp_buf + 4) = MSG_MASK_TBL_CNT - 1;
 		*(uint16_t *)(driver->apps_rsp_buf + 8) = MSG_SSID_0;
 		*(uint16_t *)(driver->apps_rsp_buf + 10) = MSG_SSID_0_LAST;
 		*(uint16_t *)(driver->apps_rsp_buf + 12) = MSG_SSID_1;
diff --git a/drivers/char/msm_rotator.c b/drivers/char/msm_rotator.c
index e783a1a..4d34362 100644
--- a/drivers/char/msm_rotator.c
+++ b/drivers/char/msm_rotator.c
@@ -171,11 +171,10 @@
 	CLK_SUSPEND,
 };
 
-int msm_rotator_iommu_map_buf(int mem_id, unsigned char src,
+int msm_rotator_iommu_map_buf(int mem_id, int domain,
 	unsigned long *start, unsigned long *len,
-	struct ion_handle **pihdl)
+	struct ion_handle **pihdl, unsigned int secure)
 {
-	int domain;
 	if (!msm_rotator_dev->client)
 		return -EINVAL;
 
@@ -187,16 +186,30 @@
 	pr_debug("%s(): ion_hdl %p, ion_fd %d\n", __func__, *pihdl,
 		ion_share_dma_buf(msm_rotator_dev->client, *pihdl));
 
-	if (rot_iommu_split_domain)
-		domain = src ? ROTATOR_SRC_DOMAIN : ROTATOR_DST_DOMAIN;
-	else
-		domain = ROTATOR_SRC_DOMAIN;
-
-	if (ion_map_iommu(msm_rotator_dev->client,
-		*pihdl,	domain, GEN_POOL,
-		SZ_4K, 0, start, len, 0, ION_IOMMU_UNMAP_DELAYED)) {
-		pr_err("ion_map_iommu() failed\n");
-		return -EINVAL;
+	if (rot_iommu_split_domain) {
+		if (secure) {
+			if (ion_phys(msm_rotator_dev->client,
+				*pihdl, start, (unsigned *)len)) {
+				pr_err("%s:%d: ion_phys map failed\n",
+					 __func__, __LINE__);
+				return -ENOMEM;
+			}
+		} else {
+			if (ion_map_iommu(msm_rotator_dev->client,
+				*pihdl,	domain, GEN_POOL,
+				SZ_4K, 0, start, len, 0,
+				ION_IOMMU_UNMAP_DELAYED)) {
+				pr_err("ion_map_iommu() failed\n");
+				return -EINVAL;
+			}
+		}
+	} else {
+		if (ion_map_iommu(msm_rotator_dev->client,
+			*pihdl,	ROTATOR_SRC_DOMAIN, GEN_POOL,
+			SZ_4K, 0, start, len, 0, ION_IOMMU_UNMAP_DELAYED)) {
+			pr_err("ion_map_iommu() failed\n");
+			return -EINVAL;
+		}
 	}
 
 	pr_debug("%s(): mem_id %d, start 0x%lx, len 0x%lx\n",
@@ -813,9 +826,9 @@
 	return 0;
 }
 
-static int get_img(struct msmfb_data *fbd, unsigned char src,
+static int get_img(struct msmfb_data *fbd, int domain,
 	unsigned long *start, unsigned long *len, struct file **p_file,
-	int *p_need, struct ion_handle **p_ihdl)
+	int *p_need, struct ion_handle **p_ihdl, unsigned int secure)
 {
 	int ret = 0;
 #ifdef CONFIG_FB
@@ -857,8 +870,8 @@
 #endif
 
 #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
-	return msm_rotator_iommu_map_buf(fbd->memory_id, src, start,
-		len, p_ihdl);
+	return msm_rotator_iommu_map_buf(fbd->memory_id, domain, start,
+		len, p_ihdl, secure);
 #endif
 #ifdef CONFIG_ANDROID_PMEM
 	if (!get_pmem_file(fbd->memory_id, start, &vstart, len, p_file))
@@ -870,7 +883,7 @@
 }
 
 static void put_img(struct file *p_file, struct ion_handle *p_ihdl,
-	unsigned char src)
+	int domain, unsigned int secure)
 {
 #ifdef CONFIG_ANDROID_PMEM
 	if (p_file != NULL)
@@ -879,14 +892,15 @@
 
 #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
 	if (!IS_ERR_OR_NULL(p_ihdl)) {
-		int domain;
-		if (rot_iommu_split_domain)
-			domain = src ? ROTATOR_SRC_DOMAIN : ROTATOR_DST_DOMAIN;
-		else
-			domain = ROTATOR_SRC_DOMAIN;
 		pr_debug("%s(): p_ihdl %p\n", __func__, p_ihdl);
-		ion_unmap_iommu(msm_rotator_dev->client,
-			p_ihdl, domain, GEN_POOL);
+		if (rot_iommu_split_domain) {
+			if (!secure)
+				ion_unmap_iommu(msm_rotator_dev->client,
+					p_ihdl, domain, GEN_POOL);
+		} else {
+			ion_unmap_iommu(msm_rotator_dev->client,
+				p_ihdl, ROTATOR_SRC_DOMAIN, GEN_POOL);
+		}
 
 		ion_free(msm_rotator_dev->client, p_ihdl);
 	}
@@ -953,18 +967,18 @@
 		goto do_rotate_unlock_mutex;
 	}
 
-	rc = get_img(&info.src, 1, (unsigned long *)&in_paddr,
+	rc = get_img(&info.src, ROTATOR_SRC_DOMAIN, (unsigned long *)&in_paddr,
 			(unsigned long *)&src_len, &srcp0_file, &ps0_need,
-			&srcp0_ihdl);
+			&srcp0_ihdl, 0);
 	if (rc) {
 		pr_err("%s: in get_img() failed id=0x%08x\n",
 			DRIVER_NAME, info.src.memory_id);
 		goto do_rotate_unlock_mutex;
 	}
 
-	rc = get_img(&info.dst, 0, (unsigned long *)&out_paddr,
+	rc = get_img(&info.dst, ROTATOR_DST_DOMAIN, (unsigned long *)&out_paddr,
 			(unsigned long *)&dst_len, &dstp0_file, &p_need,
-			&dstp0_ihdl);
+			&dstp0_ihdl, img_info->secure);
 	if (rc) {
 		pr_err("%s: out get_img() failed id=0x%08x\n",
 		       DRIVER_NAME, info.dst.memory_id);
@@ -992,20 +1006,20 @@
 			goto do_rotate_unlock_mutex;
 		}
 
-		rc = get_img(&info.src_chroma, 1,
+		rc = get_img(&info.src_chroma, ROTATOR_SRC_DOMAIN,
 				(unsigned long *)&in_chroma_paddr,
 				(unsigned long *)&src_len, &srcp1_file, &p_need,
-				&srcp1_ihdl);
+				&srcp1_ihdl, 0);
 		if (rc) {
 			pr_err("%s: in chroma get_img() failed id=0x%08x\n",
 				DRIVER_NAME, info.src_chroma.memory_id);
 			goto do_rotate_unlock_mutex;
 		}
 
-		rc = get_img(&info.dst_chroma, 0,
+		rc = get_img(&info.dst_chroma, ROTATOR_DST_DOMAIN,
 				(unsigned long *)&out_chroma_paddr,
 				(unsigned long *)&dst_len, &dstp1_file, &p_need,
-				&dstp1_ihdl);
+				&dstp1_ihdl, img_info->secure);
 		if (rc) {
 			pr_err("%s: out chroma get_img() failed id=0x%08x\n",
 				DRIVER_NAME, info.dst_chroma.memory_id);
@@ -1176,15 +1190,17 @@
 #endif
 	schedule_delayed_work(&msm_rotator_dev->rot_clk_work, HZ);
 do_rotate_unlock_mutex:
-	put_img(dstp1_file, dstp1_ihdl, 0);
-	put_img(srcp1_file, srcp1_ihdl, 1);
-	put_img(dstp0_file, dstp0_ihdl, 0);
+	put_img(dstp1_file, dstp1_ihdl, ROTATOR_DST_DOMAIN,
+		msm_rotator_dev->img_info[s]->secure);
+	put_img(srcp1_file, srcp1_ihdl, ROTATOR_SRC_DOMAIN, 0);
+	put_img(dstp0_file, dstp0_ihdl, ROTATOR_DST_DOMAIN,
+		msm_rotator_dev->img_info[s]->secure);
 
 	/* only source may use frame buffer */
 	if (info.src.flags & MDP_MEMORY_ID_TYPE_FB)
 		fput_light(srcp0_file, ps0_need);
 	else
-		put_img(srcp0_file, srcp0_ihdl, 1);
+		put_img(srcp0_file, srcp0_ihdl, ROTATOR_SRC_DOMAIN, 0);
 	mutex_unlock(&msm_rotator_dev->rotator_lock);
 	dev_dbg(msm_rotator_dev->device, "%s() returning rc = %d\n",
 		__func__, rc);
diff --git a/drivers/gpio/gpio-msm-common.c b/drivers/gpio/gpio-msm-common.c
index f268f33..fa8239a 100644
--- a/drivers/gpio/gpio-msm-common.c
+++ b/drivers/gpio/gpio-msm-common.c
@@ -170,7 +170,7 @@
 {
 	struct msm_gpio_dev *g_dev = to_msm_gpio_dev(chip);
 	struct irq_domain *domain = g_dev->domain;
-	return irq_linear_revmap(domain, offset - chip->base);
+	return irq_linear_revmap(domain, offset);
 }
 
 static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq)
@@ -410,7 +410,10 @@
 /* TODO: This should be a real platform_driver */
 static int __devinit msm_gpio_probe(void)
 {
-	int i, irq, ret;
+	int ret;
+#ifndef CONFIG_OF
+	int irq, i;
+#endif
 
 	spin_lock_init(&tlmm_lock);
 	bitmap_zero(msm_gpio.enabled_irqs, NR_MSM_GPIOS);
@@ -420,6 +423,7 @@
 	if (ret < 0)
 		return ret;
 
+#ifndef CONFIG_OF
 	for (i = 0; i < msm_gpio.gpio_chip.ngpio; ++i) {
 		irq = msm_gpio_to_irq(&msm_gpio.gpio_chip, i);
 		irq_set_lockdep_class(irq, &msm_gpio_lock_class);
@@ -427,7 +431,7 @@
 					 handle_level_irq);
 		set_irq_flags(irq, IRQF_VALID);
 	}
-
+#endif
 	ret = request_irq(TLMM_MSM_SUMMARY_IRQ, msm_summary_irq_handler,
 			IRQF_TRIGGER_HIGH, "msmgpio", NULL);
 	if (ret) {
@@ -610,14 +614,14 @@
 	return 0;
 }
 
-/*
- * TODO: this really should be doing all the things that msm_gpio_probe() does,
- * but since the msm_gpio_probe is called unconditionally for DT and non-DT
- * configs, we can't duplicate it here. This should be fixed.
- */
-int msm_gpio_irq_domain_map(struct irq_domain *d, unsigned int irq,
-			  irq_hw_number_t hwirq)
+static int msm_gpio_irq_domain_map(struct irq_domain *d, unsigned int irq,
+				   irq_hw_number_t hwirq)
 {
+	irq_set_lockdep_class(irq, &msm_gpio_lock_class);
+	irq_set_chip_and_handler(irq, &msm_gpio_irq_chip,
+			handle_level_irq);
+	set_irq_flags(irq, IRQF_VALID);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index c31f0f0..741d4fa 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -1294,7 +1294,7 @@
 	valid_handle = ion_handle_validate(client, handle);
 	mutex_unlock(&client->lock);
 	if (!valid_handle) {
-		WARN("%s: invalid handle passed to share.\n", __func__);
+		WARN(1, "%s: invalid handle passed to share.\n", __func__);
 		return -EINVAL;
 	}
 
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index c33089d..ecff3ea 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -289,10 +289,6 @@
 				KGSL_IOMMU_CONTEXT_USER))
 		goto done;
 
-	cmds += __adreno_add_idle_indirect_cmds(cmds,
-		device->mmu.setstate_memory.gpuaddr +
-		KGSL_IOMMU_SETSTATE_NOP_OFFSET);
-
 	if (cpu_is_msm8960())
 		cmds += adreno_add_change_mh_phys_limit_cmds(cmds, 0xFFFFF000,
 					device->mmu.setstate_memory.gpuaddr +
@@ -361,9 +357,10 @@
 		*cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
 		*cmds++ = 0x7fff;
 
-		cmds += __adreno_add_idle_indirect_cmds(cmds,
-			device->mmu.setstate_memory.gpuaddr +
-			KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+		if (flags & KGSL_MMUFLAGS_TLBFLUSH)
+			cmds += __adreno_add_idle_indirect_cmds(cmds,
+				device->mmu.setstate_memory.gpuaddr +
+				KGSL_IOMMU_SETSTATE_NOP_OFFSET);
 	}
 	if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
 		/*
@@ -376,6 +373,11 @@
 				KGSL_IOMMU_CTX_SHIFT) +
 				KGSL_IOMMU_CTX_TLBIASID);
 			*cmds++ = kgsl_mmu_get_hwpagetable_asid(&device->mmu);
+
+			cmds += __adreno_add_idle_indirect_cmds(cmds,
+			device->mmu.setstate_memory.gpuaddr +
+			KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+
 			cmds += adreno_add_read_cmds(device, cmds,
 				reg_map_desc[i]->gpuaddr +
 				(KGSL_IOMMU_CONTEXT_USER <<
@@ -570,7 +572,7 @@
 			patchid = 1;
 		else
 			patchid = 0;
-	} else if (cpu_is_msm8930() || cpu_is_msm8627()) {
+	} else if (cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8627()) {
 
 		/* A305 */
 		majorid = 0;
@@ -636,7 +638,8 @@
 static unsigned int
 adreno_getchipid(struct kgsl_device *device)
 {
-	if (cpu_is_apq8064() || cpu_is_msm8930() || cpu_is_msm8627())
+	if (cpu_is_apq8064() || cpu_is_msm8930() || cpu_is_msm8930aa() ||
+	    cpu_is_msm8627())
 		return a3xx_getchipid(device);
 	else
 		return a2xx_getchipid(device);
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index fcbf1d9..e1d1eb9 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -261,6 +261,7 @@
 {
 	unsigned int *start = cmds;
 
+	cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
 	*cmds++ = cp_type0_packet(MH_MMU_MPU_END, 1);
 	*cmds++ = new_phys_limit;
 	cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
@@ -273,6 +274,7 @@
 {
 	unsigned int *start = cmds;
 
+	cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
 	*cmds++ = cp_type0_packet(REG_CP_STATE_DEBUG_INDEX, 1);
 	*cmds++ = (cur_ctx_bank ? 0 : 0x20);
 	cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
diff --git a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
index c8a7a71..d6c5e66 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
@@ -181,7 +181,7 @@
 
 	/* Trustzone is only valid for some SOCs */
 	if (!(cpu_is_msm8x60() || cpu_is_msm8960() || cpu_is_apq8064() ||
-		cpu_is_msm8930() || cpu_is_msm8627()))
+		cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8627()))
 		return -EINVAL;
 
 	priv = pwrscale->priv = kzalloc(sizeof(struct tz_priv), GFP_KERNEL);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index a2dd649..b341485 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -17,6 +17,7 @@
 #include <asm/cacheflush.h>
 #include <linux/slab.h>
 #include <linux/kmemleak.h>
+#include <linux/highmem.h>
 
 #include "kgsl.h"
 #include "kgsl_sharedmem.h"
@@ -489,9 +490,11 @@
 			struct kgsl_pagetable *pagetable,
 			size_t size, unsigned int protflags)
 {
-	int order, ret = 0;
+	int i, order, ret = 0;
 	int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
-	int i;
+	struct page **pages = NULL;
+	pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
+	void *ptr;
 
 	/*
 	 * Add guard page to the end of the allocation when the
@@ -515,26 +518,53 @@
 		goto done;
 	}
 
+	/*
+	 * Allocate space to store the list of pages to send to vmap.
+	 * This is an array of pointers so we can track 1024 pages per page of
+	 * allocation which means we can handle up to a 8MB buffer request with
+	 * two pages; well within the acceptable limits for using kmalloc.
+	 */
+
+	pages = kmalloc(sglen * sizeof(struct page *), GFP_KERNEL);
+
+	if (pages == NULL) {
+		KGSL_CORE_ERR("kmalloc (%d) failed\n",
+			sglen * sizeof(struct page *));
+		ret = -ENOMEM;
+		goto done;
+	}
+
 	kmemleak_not_leak(memdesc->sg);
 
 	memdesc->sglen = sglen;
 	sg_init_table(memdesc->sg, sglen);
 
 	for (i = 0; i < PAGE_ALIGN(size) / PAGE_SIZE; i++) {
-		struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO |
-						__GFP_HIGHMEM);
-		if (!page) {
+
+		/*
+		 * Don't use GFP_ZERO here because it is faster to memset the
+		 * range ourselves (see below)
+		 */
+
+		pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+		if (pages[i] == NULL) {
 			ret = -ENOMEM;
 			memdesc->sglen = i;
 			goto done;
 		}
-		flush_dcache_page(page);
-		sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
+
+		sg_set_page(&memdesc->sg[i], pages[i], PAGE_SIZE, 0);
 	}
 
 	/* ADd the guard page to the end of the sglist */
 
 	if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU) {
+		/*
+		 * It doesn't matter if we use GFP_ZERO here, this never
+		 * gets mapped, and we only allocate it once in the life
+		 * of the system
+		 */
+
 		if (kgsl_guard_page == NULL)
 			kgsl_guard_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
 				__GFP_HIGHMEM);
@@ -547,6 +577,44 @@
 			memdesc->sglen--;
 	}
 
+	/*
+	 * All memory that goes to the user has to be zeroed out before it gets
+	 * exposed to userspace. This means that the memory has to be mapped in
+	 * the kernel, zeroed (memset) and then unmapped.  This also means that
+	 * the dcache has to be flushed to ensure coherency between the kernel
+	 * and user pages. We used to pass __GFP_ZERO to alloc_page which mapped
+	 * zeroed and unmaped each individual page, and then we had to turn
+	 * around and call flush_dcache_page() on that page to clear the caches.
+	 * This was killing us for performance. Instead, we found it is much
+	 * faster to allocate the pages without GFP_ZERO, map the entire range,
+	 * memset it, flush the range and then unmap - this results in a factor
+	 * of 4 improvement for speed for large buffers.  There is a small
+	 * increase in speed for small buffers, but only on the order of a few
+	 * microseconds at best.  The only downside is that there needs to be
+	 * enough temporary space in vmalloc to accomodate the map. This
+	 * shouldn't be a problem, but if it happens, fall back to a much slower
+	 * path
+	 */
+
+	ptr = vmap(pages, i, VM_IOREMAP, page_prot);
+
+	if (ptr != NULL) {
+		memset(ptr, 0, memdesc->size);
+		dmac_flush_range(ptr, ptr + memdesc->size);
+		vunmap(ptr);
+	} else {
+		int j;
+
+		/* Very, very, very slow path */
+
+		for (j = 0; j < i; j++) {
+			ptr = kmap_atomic(pages[j]);
+			memset(ptr, 0, PAGE_SIZE);
+			dmac_flush_range(ptr, ptr + PAGE_SIZE);
+			kunmap_atomic(ptr);
+		}
+	}
+
 	outer_cache_range_op_sg(memdesc->sg, memdesc->sglen,
 				KGSL_CACHE_OP_FLUSH);
 
@@ -564,6 +632,8 @@
 		kgsl_driver.stats.histogram[order]++;
 
 done:
+	kfree(pages);
+
 	if (ret)
 		kgsl_sharedmem_free(memdesc);
 
diff --git a/drivers/hwmon/pm8xxx-adc-scale.c b/drivers/hwmon/pm8xxx-adc-scale.c
index fb2f1d5..4a1f58c 100644
--- a/drivers/hwmon/pm8xxx-adc-scale.c
+++ b/drivers/hwmon/pm8xxx-adc-scale.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -112,127 +112,162 @@
 };
 
 static const struct pm8xxx_adc_map_pt adcmap_pa_therm[] = {
-	{1677,	-30},
-	{1671,	-29},
-	{1663,	-28},
-	{1656,	-27},
-	{1648,	-26},
-	{1640,	-25},
-	{1632,	-24},
-	{1623,	-23},
-	{1615,	-22},
-	{1605,	-21},
-	{1596,	-20},
-	{1586,	-19},
-	{1576,	-18},
-	{1565,	-17},
-	{1554,	-16},
-	{1543,	-15},
-	{1531,	-14},
-	{1519,	-13},
-	{1507,	-12},
-	{1494,	-11},
-	{1482,	-10},
-	{1468,	-9},
-	{1455,	-8},
-	{1441,	-7},
-	{1427,	-6},
-	{1412,	-5},
-	{1398,	-4},
-	{1383,	-3},
-	{1367,	-2},
-	{1352,	-1},
-	{1336,	0},
-	{1320,	1},
-	{1304,	2},
-	{1287,	3},
-	{1271,	4},
-	{1254,	5},
-	{1237,	6},
-	{1219,	7},
-	{1202,	8},
-	{1185,	9},
-	{1167,	10},
-	{1149,	11},
-	{1131,	12},
-	{1114,	13},
-	{1096,	14},
-	{1078,	15},
-	{1060,	16},
-	{1042,	17},
-	{1024,	18},
-	{1006,	19},
-	{988,	20},
-	{970,	21},
-	{952,	22},
-	{934,	23},
-	{917,	24},
+	{1731,	-30},
+	{1726,	-29},
+	{1721,	-28},
+	{1715,	-27},
+	{1710,	-26},
+	{1703,	-25},
+	{1697,	-24},
+	{1690,	-23},
+	{1683,	-22},
+	{1675,	-21},
+	{1667,	-20},
+	{1659,	-19},
+	{1650,	-18},
+	{1641,	-17},
+	{1632,	-16},
+	{1622,	-15},
+	{1611,	-14},
+	{1600,	-13},
+	{1589,	-12},
+	{1577,	-11},
+	{1565,	-10},
+	{1552,	-9},
+	{1539,	-8},
+	{1525,	-7},
+	{1511,	-6},
+	{1496,	-5},
+	{1481,	-4},
+	{1465,	-3},
+	{1449,	-2},
+	{1432,	-1},
+	{1415,	0},
+	{1398,	1},
+	{1380,	2},
+	{1362,	3},
+	{1343,	4},
+	{1324,	5},
+	{1305,	6},
+	{1285,	7},
+	{1265,	8},
+	{1245,	9},
+	{1224,	10},
+	{1203,	11},
+	{1182,	12},
+	{1161,	13},
+	{1139,	14},
+	{1118,	15},
+	{1096,	16},
+	{1074,	17},
+	{1052,	18},
+	{1030,	19},
+	{1008,	20},
+	{986,	21},
+	{964,	22},
+	{943,	23},
+	{921,	24},
 	{899,	25},
-	{882,	26},
-	{865,	27},
-	{848,	28},
-	{831,	29},
-	{814,	30},
-	{797,	31},
-	{781,	32},
-	{764,	33},
-	{748,	34},
-	{732,	35},
-	{717,	36},
-	{701,	37},
-	{686,	38},
-	{671,	39},
-	{656,	40},
-	{642,	41},
-	{627,	42},
-	{613,	43},
-	{599,	44},
-	{586,	45},
-	{572,	46},
-	{559,	47},
-	{546,	48},
-	{534,	49},
-	{522,	50},
-	{509,	51},
-	{498,	52},
-	{486,	53},
-	{475,	54},
-	{463,	55},
-	{452,	56},
-	{442,	57},
-	{431,	58},
-	{421,	59},
-	{411,	60},
-	{401,	61},
-	{392,	62},
-	{383,	63},
-	{374,	64},
-	{365,	65},
-	{356,	66},
-	{348,	67},
-	{339,	68},
-	{331,	69},
-	{323,	70},
-	{316,	71},
-	{308,	72},
-	{301,	73},
-	{294,	74},
-	{287,	75},
-	{280,	76},
-	{273,	77},
-	{267,	78},
-	{261,	79},
-	{255,	80},
-	{249,	81},
-	{243,	82},
-	{237,	83},
-	{232,	84},
-	{226,	85},
-	{221,	86},
-	{216,	87},
-	{211,	88},
-	{206,	89},
-	{201,	90}
+	{878,	26},
+	{857,	27},
+	{836,	28},
+	{815,	29},
+	{794,	30},
+	{774,	31},
+	{754,	32},
+	{734,	33},
+	{714,	34},
+	{695,	35},
+	{676,	36},
+	{657,	37},
+	{639,	38},
+	{621,	39},
+	{604,	40},
+	{586,	41},
+	{570,	42},
+	{553,	43},
+	{537,	44},
+	{521,	45},
+	{506,	46},
+	{491,	47},
+	{476,	48},
+	{462,	49},
+	{448,	50},
+	{435,	51},
+	{421,	52},
+	{409,	53},
+	{396,	54},
+	{384,	55},
+	{372,	56},
+	{361,	57},
+	{350,	58},
+	{339,	59},
+	{329,	60},
+	{318,	61},
+	{309,	62},
+	{299,	63},
+	{290,	64},
+	{281,	65},
+	{272,	66},
+	{264,	67},
+	{256,	68},
+	{248,	69},
+	{240,	70},
+	{233,	71},
+	{226,	72},
+	{219,	73},
+	{212,	74},
+	{206,	75},
+	{199,	76},
+	{193,	77},
+	{187,	78},
+	{182,	79},
+	{176,	80},
+	{171,	81},
+	{166,	82},
+	{161,	83},
+	{156,	84},
+	{151,	85},
+	{147,	86},
+	{142,	87},
+	{138,	88},
+	{134,	89},
+	{130,	90},
+	{126,	91},
+	{122,	92},
+	{119,	93},
+	{115,	94},
+	{112,	95},
+	{109,	96},
+	{106,	97},
+	{103,	98},
+	{100,	99},
+	{97,	100},
+	{94,	101},
+	{91,	102},
+	{89,	103},
+	{86,	104},
+	{84,	105},
+	{82,	106},
+	{79,	107},
+	{77,	108},
+	{75,	109},
+	{73,	110},
+	{71,	111},
+	{69,	112},
+	{67,	113},
+	{65,	114},
+	{64,	115},
+	{62,	116},
+	{60,	117},
+	{59,	118},
+	{57,	119},
+	{56,	120},
+	{54,	121},
+	{53,	122},
+	{51,	123},
+	{50,	124},
+	{49,	125}
 };
 
 static const struct pm8xxx_adc_map_pt adcmap_ntcg_104ef_104fb[] = {
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 23317d6..1c70527 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -100,6 +100,7 @@
 #define MXT_PROCI_PALM_T41		41
 #define MXT_PROCI_TOUCHSUPPRESSION_T42	42
 #define MXT_PROCI_STYLUS_T47		47
+#define MXT_PROCI_ADAPTIVETHRESHOLD_T55 55
 #define MXT_PROCI_SHIELDLESS_T56	56
 #define MXT_PROCG_NOISESUPPRESSION_T48	48
 #define MXT_SPT_COMMSCONFIG_T18		18
@@ -228,7 +229,7 @@
 #define MXT_BACKUP_VALUE	0x55
 #define MXT_BACKUP_TIME		25	/* msec */
 #define MXT224_RESET_TIME	65	/* msec */
-#define MXT224E_RESET_TIME	22	/* msec */
+#define MXT224E_RESET_TIME	150	/* msec */
 #define MXT1386_RESET_TIME	250	/* msec */
 #define MXT_RESET_TIME		250	/* msec */
 #define MXT_RESET_NOCHGREAD	400	/* msec */
@@ -386,6 +387,7 @@
 	case MXT_SPT_USERDATA_T38:
 	case MXT_SPT_DIGITIZER_T43:
 	case MXT_SPT_CTECONFIG_T46:
+	case MXT_PROCI_ADAPTIVETHRESHOLD_T55:
 		return true;
 	default:
 		return false;
@@ -419,6 +421,7 @@
 	case MXT_SPT_USERDATA_T38:
 	case MXT_SPT_DIGITIZER_T43:
 	case MXT_SPT_CTECONFIG_T46:
+	case MXT_PROCI_ADAPTIVETHRESHOLD_T55:
 		return true;
 	default:
 		return false;
@@ -732,6 +735,36 @@
 	return __mxt_read_reg(data->client, reg + offset, 1, val);
 }
 
+static int mxt_get_object_address(struct device *dev, u8 type)
+{
+	struct mxt_data *data = dev_get_drvdata(dev);
+	u8 obj_num, obj_buf[MXT_OBJECT_SIZE];
+	u16 reg;
+	int i, error;
+
+	error = mxt_read_reg(data->client, MXT_OBJECT_NUM, &obj_num);
+
+	if (error) {
+		dev_err(dev, "reading number of objects failed\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < obj_num; i++) {
+		reg = MXT_OBJECT_START + MXT_OBJECT_SIZE * i;
+		error = mxt_read_object_table(data->client,
+						reg, obj_buf);
+		if (error)
+			return error;
+
+		if (obj_buf[0] == type)
+			return obj_buf[2] << 8 | obj_buf[1];
+	}
+	/* If control reaches here, i = obj_num and object not found */
+	dev_err(dev, "Requested object %d not found.\n", type);
+	return -EINVAL;
+
+}
+
 static int mxt_write_object(struct mxt_data *data,
 				 u8 type, u8 offset, u8 val)
 {
@@ -1543,6 +1576,7 @@
 
 	switch (data->info.family_id) {
 	case MXT224_ID:
+	case MXT224E_ID:
 		max_frame_size = MXT_SINGLE_FW_MAX_FRAME_SIZE;
 		break;
 	case MXT1386_ID:
@@ -1681,11 +1715,12 @@
 					const char *buf, size_t count)
 {
 	struct mxt_data *data = dev_get_drvdata(dev);
-	int error;
+	int error, address;
 	const char *fw_name;
 	u8 bootldr_id;
 	u8 cfg_version[MXT_CFG_VERSION_LEN] = {0};
 
+
 	/* If fw_name is set, then the existing firmware has an upgrade */
 	if (!data->fw_name) {
 		/*
@@ -1735,6 +1770,16 @@
 		data->cfg_version_idx = 0;
 		data->update_cfg = false;
 
+		/* T38 object address might have changed, read it from
+		   touch controller */
+		address = mxt_get_object_address(dev, MXT_SPT_USERDATA_T38);
+		if (address < 0) {
+			dev_err(dev, "T38 required for touch operation\n");
+			return -EINVAL;
+		}
+
+		data->t38_start_addr = address;
+
 		error = __mxt_write_reg(data->client, data->t38_start_addr,
 				sizeof(cfg_version), cfg_version);
 		if (error)
diff --git a/drivers/input/touchscreen/cy8c_ts.c b/drivers/input/touchscreen/cy8c_ts.c
index f708582..88f7d1b 100644
--- a/drivers/input/touchscreen/cy8c_ts.c
+++ b/drivers/input/touchscreen/cy8c_ts.c
@@ -3,7 +3,7 @@
  * drivers/input/touchscreen/cy8c_ts.c
  *
  * Copyright (C) 2009, 2010 Cypress Semiconductor, Inc.
- * Copyright (c) 2010, 2011 Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2010-2012 Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -387,6 +387,7 @@
 	input_set_drvdata(input_device, ts);
 
 	__set_bit(EV_ABS, input_device->evbit);
+	__set_bit(INPUT_PROP_DIRECT, input_device->propbit);
 
 	if (ts->device_id == CY8CTMA340) {
 		/* set up virtual key */
diff --git a/drivers/media/video/msm/Makefile b/drivers/media/video/msm/Makefile
index 63120da..5703d88 100644
--- a/drivers/media/video/msm/Makefile
+++ b/drivers/media/video/msm/Makefile
@@ -11,7 +11,7 @@
   EXTRA_CFLAGS += -Idrivers/media/video/msm/sensors
   EXTRA_CFLAGS += -Idrivers/media/video/msm/actuators
   EXTRA_CFLAGS += -Idrivers/media/video/msm/server
-  obj-$(CONFIG_MSM_CAMERA) += msm_isp.o msm.o msm_mem.o msm_mctl.o msm_mctl_buf.o msm_mctl_pp.o
+  obj-$(CONFIG_MSM_CAMERA) += msm_isp.o msm.o msm_mem.o msm_mctl.o msm_mctl_buf.o msm_mctl_pp.o msm_vfe_stats_buf.o
   obj-$(CONFIG_MSM_CAMERA) += server/ eeprom/ sensors/ actuators/ csi/
   obj-$(CONFIG_MSM_CPP) += cpp/
   obj-$(CONFIG_MSM_CAMERA) += msm_gesture.o
diff --git a/drivers/media/video/msm/msm.h b/drivers/media/video/msm/msm.h
index c3e8321..08278ff 100644
--- a/drivers/media/video/msm/msm.h
+++ b/drivers/media/video/msm/msm.h
@@ -131,6 +131,8 @@
 	uint32_t    id;
 	uint32_t    buffer;
 	uint32_t    frameCounter;
+	int32_t     buf_idx;
+	int32_t     fd;
 };
 
 struct msm_free_buf {
diff --git a/drivers/media/video/msm/msm_isp.c b/drivers/media/video/msm/msm_isp.c
index 5c61cdb..5fcb62b 100644
--- a/drivers/media/video/msm/msm_isp.c
+++ b/drivers/media/video/msm/msm_isp.c
@@ -406,12 +406,14 @@
 		struct msm_stats_buf stats;
 		struct isp_msg_stats *isp_stats = (struct isp_msg_stats *)arg;
 
+		memset(&stats, 0, sizeof(stats));
 		isp_event->isp_data.isp_msg.msg_id = isp_stats->id;
 		isp_event->isp_data.isp_msg.frame_id =
 			isp_stats->frameCounter;
-		stats.buffer = msm_pmem_stats_ptov_lookup(pmctl,
-						isp_stats->buffer,
-						&(stats.fd));
+		stats.buffer = isp_stats->buffer;
+		stats.fd = isp_stats->fd;
+		/* buf_idx used for O(0) lookup */
+		stats.buf_idx = isp_stats->buf_idx;
 		switch (isp_stats->id) {
 		case MSG_ID_STATS_AEC:
 			stats.aec.buff = stats.buffer;
@@ -453,8 +455,8 @@
 				kmalloc(sizeof(struct msm_stats_buf),
 							GFP_ATOMIC);
 			if (!stats_buf) {
-				pr_err("%s: out of memory.\n",
-							__func__);
+				pr_err("%s: out of memory. stats_id = %d\n",
+					__func__, isp_stats->id);
 				rc = -ENOMEM;
 			} else {
 				*stats_buf = stats;
@@ -550,7 +552,6 @@
 	struct msm_cam_media_controller *mctl, void __user *arg)
 {
 	struct msm_vfe_cfg_cmd cfgcmd;
-	struct msm_pmem_region region[8];
 	struct axidata axi_data;
 
 	if (copy_from_user(&cfgcmd, arg, sizeof(cfgcmd))) {
@@ -562,103 +563,13 @@
 	CDBG("%s: cmd_type %d\n", __func__, cfgcmd.cmd_type);
 	switch (cfgcmd.cmd_type) {
 	case CMD_STATS_AF_ENABLE:
-		axi_data.bufnum1 =
-			msm_pmem_region_lookup(
-				&mctl->stats_info.pmem_stats_list,
-				MSM_PMEM_AF, &region[0],
-				NUM_STAT_OUTPUT_BUFFERS);
-		if (!axi_data.bufnum1) {
-			pr_err("%s %d: pmem region lookup error\n",
-				__func__, __LINE__);
-			return -EINVAL;
-		}
-		axi_data.region = &region[0];
-		return msm_isp_subdev_ioctl(sd, &cfgcmd,
-							&axi_data);
 	case CMD_STATS_AEC_ENABLE:
-		axi_data.bufnum1 =
-			msm_pmem_region_lookup(
-				&mctl->stats_info.pmem_stats_list,
-				MSM_PMEM_AEC, &region[0],
-				NUM_STAT_OUTPUT_BUFFERS);
-		if (!axi_data.bufnum1) {
-			pr_err("%s %d: pmem region lookup error\n",
-				__func__, __LINE__);
-			return -EINVAL;
-		}
-		axi_data.region = &region[0];
-		return msm_isp_subdev_ioctl(sd, &cfgcmd,
-							&axi_data);
 	case CMD_STATS_AWB_ENABLE:
-		axi_data.bufnum1 =
-			msm_pmem_region_lookup(
-				&mctl->stats_info.pmem_stats_list,
-				MSM_PMEM_AWB, &region[0],
-				NUM_STAT_OUTPUT_BUFFERS);
-		if (!axi_data.bufnum1) {
-			pr_err("%s %d: pmem region lookup error\n",
-				__func__, __LINE__);
-			return -EINVAL;
-		}
-		axi_data.region = &region[0];
-		return msm_isp_subdev_ioctl(sd, &cfgcmd,
-							&axi_data);
 	case CMD_STATS_AEC_AWB_ENABLE:
-		axi_data.bufnum1 =
-			msm_pmem_region_lookup(
-				&mctl->stats_info.pmem_stats_list,
-				MSM_PMEM_AEC_AWB, &region[0],
-				NUM_STAT_OUTPUT_BUFFERS);
-		if (!axi_data.bufnum1) {
-			pr_err("%s %d: pmem region lookup error\n",
-				__func__, __LINE__);
-			return -EINVAL;
-		}
-		axi_data.region = &region[0];
-		return msm_isp_subdev_ioctl(sd, &cfgcmd,
-							&axi_data);
 	case CMD_STATS_IHIST_ENABLE:
-		axi_data.bufnum1 =
-			msm_pmem_region_lookup(
-				&mctl->stats_info.pmem_stats_list,
-				MSM_PMEM_IHIST, &region[0],
-				NUM_STAT_OUTPUT_BUFFERS);
-		if (!axi_data.bufnum1) {
-			pr_err("%s %d: pmem region lookup error\n",
-				__func__, __LINE__);
-			return -EINVAL;
-		}
-		axi_data.region = &region[0];
-		return msm_isp_subdev_ioctl(sd, &cfgcmd,
-							&axi_data);
 	case CMD_STATS_RS_ENABLE:
-		axi_data.bufnum1 =
-			msm_pmem_region_lookup(
-				&mctl->stats_info.pmem_stats_list,
-				MSM_PMEM_RS, &region[0],
-				NUM_STAT_OUTPUT_BUFFERS);
-		if (!axi_data.bufnum1) {
-			pr_err("%s %d: pmem region lookup error\n",
-				__func__, __LINE__);
-			return -EINVAL;
-		}
-		axi_data.region = &region[0];
-		return msm_isp_subdev_ioctl(sd, &cfgcmd,
-							&axi_data);
 	case CMD_STATS_CS_ENABLE:
-		axi_data.bufnum1 =
-			msm_pmem_region_lookup(
-				&mctl->stats_info.pmem_stats_list,
-				MSM_PMEM_CS, &region[0],
-				NUM_STAT_OUTPUT_BUFFERS);
-		if (!axi_data.bufnum1) {
-			pr_err("%s %d: pmem region lookup error\n",
-				__func__, __LINE__);
-			return -EINVAL;
-		}
-		axi_data.region = &region[0];
-		return msm_isp_subdev_ioctl(sd, &cfgcmd,
-							&axi_data);
+		return msm_isp_subdev_ioctl(sd, &cfgcmd, NULL);
 	case CMD_GENERAL:
 	case CMD_STATS_DISABLE:
 		return msm_isp_subdev_ioctl(sd, &cfgcmd,
@@ -764,6 +675,62 @@
 	return rc;
 }
 
+static int msm_vfe_stats_buf_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd,
+	struct msm_cam_media_controller *mctl,
+	void __user *arg)
+{
+	struct msm_vfe_cfg_cmd cfgcmd;
+	int rc = 0;
+	switch (cmd) {
+	case MSM_CAM_IOCTL_STATS_REQBUF: {
+		struct msm_stats_reqbuf reqbuf;
+		if (copy_from_user(&reqbuf, arg,
+			sizeof(struct msm_stats_reqbuf))) {
+			ERR_COPY_FROM_USER();
+			return -EFAULT;
+		}
+	cfgcmd.cmd_type = VFE_CMD_STATS_REQBUF;
+	cfgcmd.value = (void *)&reqbuf;
+	cfgcmd.length = sizeof(struct msm_stats_reqbuf);
+	rc = msm_isp_subdev_ioctl(sd, &cfgcmd, (void *)mctl->client);
+	break;
+	}
+	case MSM_CAM_IOCTL_STATS_ENQUEUEBUF: {
+		struct msm_stats_buf_info buf_info;
+
+		if (copy_from_user(&buf_info, arg,
+			sizeof(struct msm_stats_buf_info))) {
+			ERR_COPY_FROM_USER();
+			return -EFAULT;
+		}
+	cfgcmd.cmd_type = VFE_CMD_STATS_ENQUEUEBUF;
+	cfgcmd.value = (void *)&buf_info;
+	cfgcmd.length = sizeof(struct msm_stats_buf_info);
+	rc = msm_isp_subdev_ioctl(sd, &cfgcmd, NULL);
+	break;
+	}
+	case MSM_CAM_IOCTL_STATS_FLUSH_BUFQ: {
+		struct msm_stats_flush_bufq bufq_info;
+
+		if (copy_from_user(&bufq_info, arg,
+			sizeof(struct msm_stats_flush_bufq))) {
+			ERR_COPY_FROM_USER();
+			return -EFAULT;
+	}
+	cfgcmd.cmd_type = VFE_CMD_STATS_FLUSH_BUFQ;
+	cfgcmd.value = (void *)&bufq_info;
+	cfgcmd.length = sizeof(struct msm_stats_flush_bufq);
+	rc = msm_isp_subdev_ioctl(sd, &cfgcmd, NULL);
+	break;
+	}
+	default:
+		rc = -1;
+	break;
+	}
+	CDBG("%s\n", __func__);
+	return rc;
+}
 /* config function simliar to origanl msm_ioctl_config*/
 static int msm_isp_config(struct msm_cam_media_controller *pmctl,
 			 unsigned int cmd, unsigned long arg)
@@ -789,6 +756,12 @@
 		rc = msm_put_stats_buffer(sd, pmctl, argp);
 		break;
 
+	case MSM_CAM_IOCTL_STATS_REQBUF:
+	case MSM_CAM_IOCTL_STATS_ENQUEUEBUF:
+	case MSM_CAM_IOCTL_STATS_FLUSH_BUFQ:
+		rc = msm_vfe_stats_buf_ioctl(sd, cmd, pmctl, argp);
+		break;
+
 	default:
 		break;
 	}
diff --git a/drivers/media/video/msm/msm_vfe31_v4l2.c b/drivers/media/video/msm/msm_vfe31_v4l2.c
index fa985ce..bc99b2d 100644
--- a/drivers/media/video/msm/msm_vfe31_v4l2.c
+++ b/drivers/media/video/msm/msm_vfe31_v4l2.c
@@ -365,6 +365,229 @@
 	"DEMOSAICV3_UPDATE",
 };
 
+
+static unsigned long vfe31_stats_dqbuf(enum msm_stats_enum_type stats_type)
+{
+	struct msm_stats_meta_buf *buf = NULL;
+	int rc = 0;
+	rc = vfe31_ctrl->stats_ops.dqbuf(vfe31_ctrl->stats_ops.stats_ctrl,
+			stats_type, &buf);
+	if (rc < 0) {
+		pr_err("%s: dq stats buf (type = %d) err = %d",
+			__func__, stats_type, rc);
+		return 0L;
+	}
+	return buf->paddr;
+}
+
+static unsigned long vfe31_stats_flush_enqueue(
+	enum msm_stats_enum_type stats_type)
+{
+	struct msm_stats_bufq *bufq = NULL;
+	struct msm_stats_meta_buf *stats_buf = NULL;
+	int rc = 0;
+	int i;
+	/*
+	 * Passing NULL for ion client as the buffers are already
+	 * mapped at this stage, client is not required, flush all
+	 * the buffers, and buffers move to PREPARE state
+	 */
+
+	rc = vfe31_ctrl->stats_ops.bufq_flush(
+			vfe31_ctrl->stats_ops.stats_ctrl, stats_type, NULL);
+	if (rc < 0) {
+		pr_err("%s: dq stats buf (type = %d) err = %d",
+			__func__, stats_type, rc);
+		return 0L;
+	}
+
+	/* Queue all the buffers back to QUEUED state */
+	bufq = vfe31_ctrl->stats_ctrl.bufq[stats_type];
+	for (i = 0; i < bufq->num_bufs; i++) {
+		stats_buf = &bufq->bufs[i];
+		rc = vfe31_ctrl->stats_ops.enqueue_buf(
+				vfe31_ctrl->stats_ops.stats_ctrl,
+				&(stats_buf->info), NULL);
+		if (rc < 0) {
+			pr_err("%s: dq stats buf (type = %d) err = %d",
+				__func__, stats_type, rc);
+			return rc;
+		}
+	}
+	return 0L;
+}
+
+static int vfe_stats_awb_buf_init(
+	struct vfe_cmd_stats_buf *in)
+{
+	uint32_t addr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vfe31_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_AWB);
+	spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq awb ping buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe31_ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PING_ADDR);
+	spin_lock_irqsave(&vfe31_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_AWB);
+	spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq awb ping buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe31_ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PONG_ADDR);
+	return 0;
+}
+
+static int vfe_stats_aec_buf_init(
+	struct vfe_cmd_stats_buf *in)
+{
+	uint32_t addr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vfe31_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_AEC);
+	spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq aec ping buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe31_ctrl->vfebase +
+		VFE_BUS_STATS_AEC_WR_PING_ADDR);
+	spin_lock_irqsave(&vfe31_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_AEC);
+	spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq aec pong buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe31_ctrl->vfebase +
+		VFE_BUS_STATS_AEC_WR_PONG_ADDR);
+	return 0;
+}
+
+static int vfe_stats_af_buf_init(
+	struct vfe_cmd_stats_buf *in)
+{
+	uint32_t addr;
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&vfe31_ctrl->stats_bufq_lock, flags);
+	rc = vfe31_stats_flush_enqueue(MSM_STATS_TYPE_AF);
+	if (rc < 0) {
+		pr_err("%s: dq stats buf err = %d",
+			__func__, rc);
+		spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
+		return -EINVAL;
+	}
+	addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_AF);
+	spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq af ping buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe31_ctrl->vfebase + VFE_BUS_STATS_AF_WR_PING_ADDR);
+	spin_lock_irqsave(&vfe31_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_AF);
+	spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq af pong buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe31_ctrl->vfebase + VFE_BUS_STATS_AF_WR_PONG_ADDR);
+	return 0;
+}
+
+static int vfe_stats_ihist_buf_init(
+	struct vfe_cmd_stats_buf *in)
+{
+	uint32_t addr;
+	unsigned long flags;
+	spin_lock_irqsave(&vfe31_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_IHIST);
+	spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq ihist ping buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe31_ctrl->vfebase + VFE_BUS_STATS_HIST_WR_PING_ADDR);
+	spin_lock_irqsave(&vfe31_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_IHIST);
+	spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq ihist pong buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe31_ctrl->vfebase + VFE_BUS_STATS_HIST_WR_PONG_ADDR);
+	return 0;
+}
+
+static int vfe_stats_rs_buf_init(struct vfe_cmd_stats_buf *in)
+{
+	uint32_t addr;
+	unsigned long flags;
+	spin_lock_irqsave(&vfe31_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_RS);
+	spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq rs ping buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe31_ctrl->vfebase + VFE_BUS_STATS_RS_WR_PING_ADDR);
+	spin_lock_irqsave(&vfe31_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_RS);
+	spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq rs pong buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe31_ctrl->vfebase + VFE_BUS_STATS_RS_WR_PONG_ADDR);
+	return 0;
+}
+
+static int vfe_stats_cs_buf_init(struct vfe_cmd_stats_buf *in)
+{
+	uint32_t addr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vfe31_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_CS);
+	spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq cs ping buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe31_ctrl->vfebase + VFE_BUS_STATS_CS_WR_PING_ADDR);
+	spin_lock_irqsave(&vfe31_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_CS);
+	spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq cs pong buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe31_ctrl->vfebase + VFE_BUS_STATS_CS_WR_PONG_ADDR);
+	return 0;
+}
+
 static void vfe31_stop(void)
 {
 	uint8_t  axiBusyFlag = true;
@@ -600,101 +823,6 @@
 	return 0;
 }
 
-static uint32_t vfe_stats_awb_buf_init(struct vfe_cmd_stats_buf *in)
-{
-	uint32_t *ptr = in->statsBuf;
-	uint32_t addr;
-
-	addr = ptr[0];
-	msm_camera_io_w(addr,
-		vfe31_ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PING_ADDR);
-	addr = ptr[1];
-	msm_camera_io_w(addr,
-		vfe31_ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PONG_ADDR);
-	vfe31_ctrl->awbStatsControl.nextFrameAddrBuf = in->statsBuf[2];
-	return 0;
-}
-
-static uint32_t vfe_stats_aec_buf_init(struct vfe_cmd_stats_buf *in)
-{
-	uint32_t *ptr = in->statsBuf;
-	uint32_t addr;
-
-	addr = ptr[0];
-	msm_camera_io_w(addr,
-		vfe31_ctrl->vfebase + VFE_BUS_STATS_AEC_WR_PING_ADDR);
-	addr = ptr[1];
-	msm_camera_io_w(addr,
-		vfe31_ctrl->vfebase + VFE_BUS_STATS_AEC_WR_PONG_ADDR);
-
-	vfe31_ctrl->aecStatsControl.nextFrameAddrBuf = in->statsBuf[2];
-	return 0;
-}
-
-static uint32_t vfe_stats_af_buf_init(struct vfe_cmd_stats_buf *in)
-{
-	uint32_t *ptr = in->statsBuf;
-	uint32_t addr;
-
-	addr = ptr[0];
-	msm_camera_io_w(addr,
-		vfe31_ctrl->vfebase + VFE_BUS_STATS_AF_WR_PING_ADDR);
-	addr = ptr[1];
-	msm_camera_io_w(addr,
-		vfe31_ctrl->vfebase + VFE_BUS_STATS_AF_WR_PONG_ADDR);
-
-	vfe31_ctrl->afStatsControl.nextFrameAddrBuf = in->statsBuf[2];
-	return 0;
-}
-
-static uint32_t vfe_stats_ihist_buf_init(struct vfe_cmd_stats_buf *in)
-{
-	uint32_t *ptr = in->statsBuf;
-	uint32_t addr;
-
-	addr = ptr[0];
-	msm_camera_io_w(addr,
-		vfe31_ctrl->vfebase + VFE_BUS_STATS_HIST_WR_PING_ADDR);
-	addr = ptr[1];
-	msm_camera_io_w(addr,
-		vfe31_ctrl->vfebase + VFE_BUS_STATS_HIST_WR_PONG_ADDR);
-
-	vfe31_ctrl->ihistStatsControl.nextFrameAddrBuf = in->statsBuf[2];
-	return 0;
-}
-
-static uint32_t vfe_stats_rs_buf_init(struct vfe_cmd_stats_buf *in)
-{
-	uint32_t *ptr = in->statsBuf;
-	uint32_t addr;
-
-	addr = ptr[0];
-	msm_camera_io_w(addr,
-		vfe31_ctrl->vfebase + VFE_BUS_STATS_RS_WR_PING_ADDR);
-	addr = ptr[1];
-	msm_camera_io_w(addr,
-		vfe31_ctrl->vfebase + VFE_BUS_STATS_RS_WR_PONG_ADDR);
-
-	vfe31_ctrl->rsStatsControl.nextFrameAddrBuf = in->statsBuf[2];
-	return 0;
-}
-
-static uint32_t vfe_stats_cs_buf_init(struct vfe_cmd_stats_buf *in)
-{
-	uint32_t *ptr = in->statsBuf;
-	uint32_t addr;
-
-	addr = ptr[0];
-	msm_camera_io_w(addr,
-		vfe31_ctrl->vfebase + VFE_BUS_STATS_CS_WR_PING_ADDR);
-	addr = ptr[1];
-	msm_camera_io_w(addr,
-		vfe31_ctrl->vfebase + VFE_BUS_STATS_CS_WR_PONG_ADDR);
-
-	vfe31_ctrl->csStatsControl.nextFrameAddrBuf = in->statsBuf[2];
-	return 0;
-}
-
 static void msm_camera_io_dump2(void __iomem *addr, int size)
 {
 	char line_str[BUFF_SIZE_128], *p_str;
@@ -1388,6 +1516,12 @@
 		break;
 
 	case VFE_CMD_STATS_AE_START:
+		rc = vfe_stats_aec_buf_init(NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of AEC",
+				__func__);
+			goto proc_general_done;
+		}
 		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
 		if (!cmdp) {
 			rc = -ENOMEM;
@@ -1408,6 +1542,12 @@
 		cmdp, (vfe31_cmd[cmd->id].length));
 		break;
 	case VFE_CMD_STATS_AF_START:
+		rc = vfe_stats_af_buf_init(NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of AF",
+				__func__);
+			goto proc_general_done;
+		}
 		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
 		if (!cmdp) {
 			rc = -ENOMEM;
@@ -1428,6 +1568,12 @@
 		cmdp, (vfe31_cmd[cmd->id].length));
 		break;
 	case VFE_CMD_STATS_AWB_START:
+		rc = vfe_stats_awb_buf_init(NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of AWB",
+				__func__);
+			goto proc_general_done;
+		}
 		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
 		if (!cmdp) {
 			rc = -ENOMEM;
@@ -1449,6 +1595,12 @@
 		break;
 
 	case VFE_CMD_STATS_IHIST_START:
+		rc = vfe_stats_ihist_buf_init(NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of IHIST",
+			__func__);
+			goto proc_general_done;
+		}
 		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
 		if (!cmdp) {
 			rc = -ENOMEM;
@@ -1470,6 +1622,12 @@
 		break;
 
 	case VFE_CMD_STATS_RS_START:
+		rc = vfe_stats_rs_buf_init(NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of RS",
+				__func__);
+			goto proc_general_done;
+		}
 		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
 		if (!cmdp) {
 			rc = -ENOMEM;
@@ -1487,6 +1645,12 @@
 		break;
 
 	case VFE_CMD_STATS_CS_START:
+		rc = vfe_stats_cs_buf_init(NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of CS",
+				__func__);
+			goto proc_general_done;
+		}
 		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
 		if (!cmdp) {
 			rc = -ENOMEM;
@@ -1987,6 +2151,12 @@
 		old_val &= ~AF_BF_ENABLE_MASK;
 		msm_camera_io_w(old_val,
 			vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		rc = vfe31_stats_flush_enqueue(MSM_STATS_TYPE_AF);
+		if (rc < 0) {
+			pr_err("%s: dq stats buf err = %d",
+				   __func__, rc);
+			return -EINVAL;
+		}
 		break;
 
 	case VFE_CMD_STATS_IHIST_STOP:
@@ -2172,76 +2342,6 @@
 	return rc;
 }
 
-static void vfe31_stats_af_ack(struct vfe_cmd_stats_ack *pAck)
-{
-	unsigned long flags;
-	spinlock_t *lock = (vfe31_ctrl->stats_comp ?
-		&vfe31_ctrl->comp_stats_ack_lock :
-		&vfe31_ctrl->af_ack_lock);
-	spin_lock_irqsave(lock, flags);
-	vfe31_ctrl->afStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
-	vfe31_ctrl->afStatsControl.ackPending = FALSE;
-	spin_unlock_irqrestore(lock, flags);
-}
-
-static void vfe31_stats_awb_ack(struct vfe_cmd_stats_ack *pAck)
-{
-	unsigned long flags;
-	spinlock_t *lock = (vfe31_ctrl->stats_comp ?
-		&vfe31_ctrl->comp_stats_ack_lock :
-		&vfe31_ctrl->awb_ack_lock);
-	spin_lock_irqsave(lock, flags);
-	vfe31_ctrl->awbStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
-	vfe31_ctrl->awbStatsControl.ackPending = FALSE;
-	spin_unlock_irqrestore(lock, flags);
-}
-
-static void vfe31_stats_aec_ack(struct vfe_cmd_stats_ack *pAck)
-{
-	unsigned long flags;
-	spinlock_t *lock = (vfe31_ctrl->stats_comp ?
-		&vfe31_ctrl->comp_stats_ack_lock :
-		&vfe31_ctrl->aec_ack_lock);
-	spin_lock_irqsave(lock, flags);
-	vfe31_ctrl->aecStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
-	vfe31_ctrl->aecStatsControl.ackPending = FALSE;
-	spin_unlock_irqrestore(lock, flags);
-}
-
-static void vfe31_stats_ihist_ack(struct vfe_cmd_stats_ack *pAck)
-{
-	unsigned long flags;
-	spinlock_t *lock = (vfe31_ctrl->stats_comp ?
-		&vfe31_ctrl->comp_stats_ack_lock :
-		&vfe31_ctrl->ihist_ack_lock);
-	spin_lock_irqsave(lock, flags);
-	vfe31_ctrl->ihistStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
-	vfe31_ctrl->ihistStatsControl.ackPending = FALSE;
-	spin_unlock_irqrestore(lock, flags);
-}
-static void vfe31_stats_rs_ack(struct vfe_cmd_stats_ack *pAck)
-{
-	unsigned long flags;
-	spinlock_t *lock = (vfe31_ctrl->stats_comp ?
-		&vfe31_ctrl->comp_stats_ack_lock :
-		&vfe31_ctrl->rs_ack_lock);
-	spin_lock_irqsave(lock, flags);
-	vfe31_ctrl->rsStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
-	vfe31_ctrl->rsStatsControl.ackPending = FALSE;
-	spin_unlock_irqrestore(lock, flags);
-}
-static void vfe31_stats_cs_ack(struct vfe_cmd_stats_ack *pAck)
-{
-	unsigned long flags;
-	spinlock_t *lock = (vfe31_ctrl->stats_comp ?
-		&vfe31_ctrl->comp_stats_ack_lock :
-		&vfe31_ctrl->cs_ack_lock);
-	spin_lock_irqsave(lock, flags);
-	vfe31_ctrl->csStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
-	vfe31_ctrl->csStatsControl.ackPending = FALSE;
-	spin_unlock_irqrestore(lock, flags);
-}
-
 static inline void vfe31_read_irq_status(struct vfe31_irq_status *out)
 {
 	uint32_t *temp;
@@ -2810,7 +2910,8 @@
 static void
 vfe_send_stats_msg(uint32_t bufAddress, uint32_t statsNum)
 {
-	unsigned long flags;
+	int rc = 0;
+	void *vaddr = NULL;
 	/* fill message with right content. */
 	/* @todo This is causing issues, need further investigate */
 	/* spin_lock_irqsave(&ctrl->state_lock, flags); */
@@ -2821,55 +2922,72 @@
 	switch (statsNum) {
 	case STATS_AE_NUM:{
 		msgStats.id = MSG_ID_STATS_AEC;
-		spin_lock_irqsave(&vfe31_ctrl->aec_ack_lock, flags);
-		vfe31_ctrl->aecStatsControl.ackPending = TRUE;
-		spin_unlock_irqrestore(&vfe31_ctrl->aec_ack_lock, flags);
+		rc = vfe31_ctrl->stats_ops.dispatch(
+				vfe31_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_AEC,	bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe31_ctrl->stats_ops.client);
 		}
 		break;
 	case STATS_AF_NUM:{
 		msgStats.id = MSG_ID_STATS_AF;
-		spin_lock_irqsave(&vfe31_ctrl->af_ack_lock, flags);
-		vfe31_ctrl->afStatsControl.ackPending = TRUE;
-		spin_unlock_irqrestore(&vfe31_ctrl->af_ack_lock, flags);
+		rc = vfe31_ctrl->stats_ops.dispatch(
+				vfe31_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_AF, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe31_ctrl->stats_ops.client);
 		}
 		break;
 	case STATS_AWB_NUM: {
 		msgStats.id = MSG_ID_STATS_AWB;
-		spin_lock_irqsave(&vfe31_ctrl->awb_ack_lock, flags);
-		vfe31_ctrl->awbStatsControl.ackPending = TRUE;
-		spin_unlock_irqrestore(&vfe31_ctrl->awb_ack_lock, flags);
+		rc = vfe31_ctrl->stats_ops.dispatch(
+				vfe31_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_AWB, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe31_ctrl->stats_ops.client);
 		}
 		break;
 
 	case STATS_IHIST_NUM: {
 		msgStats.id = MSG_ID_STATS_IHIST;
-		spin_lock_irqsave(&vfe31_ctrl->ihist_ack_lock, flags);
-		vfe31_ctrl->ihistStatsControl.ackPending = TRUE;
-		spin_unlock_irqrestore(&vfe31_ctrl->ihist_ack_lock, flags);
+		rc = vfe31_ctrl->stats_ops.dispatch(
+				vfe31_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_IHIST, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe31_ctrl->stats_ops.client);
 		}
 		break;
 	case STATS_RS_NUM: {
 		msgStats.id = MSG_ID_STATS_RS;
-		spin_lock_irqsave(&vfe31_ctrl->rs_ack_lock, flags);
-		vfe31_ctrl->rsStatsControl.ackPending = TRUE;
-		spin_unlock_irqrestore(&vfe31_ctrl->rs_ack_lock, flags);
+		rc = vfe31_ctrl->stats_ops.dispatch(
+				vfe31_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_RS, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe31_ctrl->stats_ops.client);
 		}
 		break;
 	case STATS_CS_NUM: {
 		msgStats.id = MSG_ID_STATS_CS;
-		spin_lock_irqsave(&vfe31_ctrl->cs_ack_lock, flags);
-		vfe31_ctrl->csStatsControl.ackPending = TRUE;
-		spin_unlock_irqrestore(&vfe31_ctrl->cs_ack_lock, flags);
+		rc = vfe31_ctrl->stats_ops.dispatch(
+				vfe31_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_CS, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe31_ctrl->stats_ops.client);
 		}
 		break;
 
 	default:
 		goto stats_done;
 	}
-
-	v4l2_subdev_notify(&vfe31_ctrl->subdev,
-				NOTIFY_VFE_MSG_STATS,
-				&msgStats);
+	if (rc == 0) {
+		msgStats.buffer = (uint32_t)vaddr;
+		v4l2_subdev_notify(&vfe31_ctrl->subdev,
+			NOTIFY_VFE_MSG_STATS, &msgStats);
+	} else {
+		pr_err("%s: paddr to idx mapping error, stats_id = %d,\n"
+			"paddr = 0x%d\n", __func__,
+			 msgStats.id, msgStats.buffer);
+	}
 stats_done:
 	/* spin_unlock_irqrestore(&ctrl->state_lock, flags); */
 	return;
@@ -2901,17 +3019,18 @@
 static void vfe31_process_stats_ae_irq(void)
 {
 	unsigned long flags;
-	spin_lock_irqsave(&vfe31_ctrl->aec_ack_lock, flags);
-	if (!(vfe31_ctrl->aecStatsControl.ackPending)) {
-		spin_unlock_irqrestore(&vfe31_ctrl->aec_ack_lock, flags);
+	uint32_t addr;
+	spin_lock_irqsave(&vfe31_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_AEC);
+	spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
+	if (addr) {
 		vfe31_ctrl->aecStatsControl.bufToRender =
 			vfe31_process_stats_irq_common(STATS_AE_NUM,
-			vfe31_ctrl->aecStatsControl.nextFrameAddrBuf);
+			addr);
 
 		vfe_send_stats_msg(vfe31_ctrl->aecStatsControl.bufToRender,
 			STATS_AE_NUM);
 	} else{
-		spin_unlock_irqrestore(&vfe31_ctrl->aec_ack_lock, flags);
 		vfe31_ctrl->aecStatsControl.droppedStatsFrameCount++;
 		CDBG("%s: droppedStatsFrameCount = %d", __func__,
 			vfe31_ctrl->aecStatsControl.droppedStatsFrameCount);
@@ -2921,17 +3040,18 @@
 static void vfe31_process_stats_awb_irq(void)
 {
 	unsigned long flags;
-	spin_lock_irqsave(&vfe31_ctrl->awb_ack_lock, flags);
-	if (!(vfe31_ctrl->awbStatsControl.ackPending)) {
-		spin_unlock_irqrestore(&vfe31_ctrl->awb_ack_lock, flags);
+	uint32_t addr;
+	spin_lock_irqsave(&vfe31_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_AWB);
+	spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
+	if (addr) {
 		vfe31_ctrl->awbStatsControl.bufToRender =
 			vfe31_process_stats_irq_common(STATS_AWB_NUM,
-			vfe31_ctrl->awbStatsControl.nextFrameAddrBuf);
+			addr);
 
 		vfe_send_stats_msg(vfe31_ctrl->awbStatsControl.bufToRender,
 			STATS_AWB_NUM);
 	} else{
-		spin_unlock_irqrestore(&vfe31_ctrl->awb_ack_lock, flags);
 		vfe31_ctrl->awbStatsControl.droppedStatsFrameCount++;
 		CDBG("%s: droppedStatsFrameCount = %d", __func__,
 			vfe31_ctrl->awbStatsControl.droppedStatsFrameCount);
@@ -2941,17 +3061,18 @@
 static void vfe31_process_stats_af_irq(void)
 {
 	unsigned long flags;
-	spin_lock_irqsave(&vfe31_ctrl->af_ack_lock, flags);
-	if (!(vfe31_ctrl->afStatsControl.ackPending)) {
-		spin_unlock_irqrestore(&vfe31_ctrl->af_ack_lock, flags);
+	uint32_t addr;
+	spin_lock_irqsave(&vfe31_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_AF);
+	spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
+	if (addr) {
 		vfe31_ctrl->afStatsControl.bufToRender =
 			vfe31_process_stats_irq_common(STATS_AF_NUM,
-			vfe31_ctrl->afStatsControl.nextFrameAddrBuf);
+			addr);
 
 		vfe_send_stats_msg(vfe31_ctrl->afStatsControl.bufToRender,
 			STATS_AF_NUM);
 	} else{
-		spin_unlock_irqrestore(&vfe31_ctrl->af_ack_lock, flags);
 		vfe31_ctrl->afStatsControl.droppedStatsFrameCount++;
 		CDBG("%s: droppedStatsFrameCount = %d", __func__,
 			vfe31_ctrl->afStatsControl.droppedStatsFrameCount);
@@ -2960,27 +3081,35 @@
 
 static void vfe31_process_stats_ihist_irq(void)
 {
-	if (!(vfe31_ctrl->ihistStatsControl.ackPending)) {
+	unsigned long flags;
+	uint32_t addr;
+	spin_lock_irqsave(&vfe31_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_IHIST);
+	spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
+	if (addr) {
 		vfe31_ctrl->ihistStatsControl.bufToRender =
-			vfe31_process_stats_irq_common(STATS_IHIST_NUM,
-			vfe31_ctrl->ihistStatsControl.nextFrameAddrBuf);
-
+			  vfe31_process_stats_irq_common(STATS_IHIST_NUM,
+				addr);
 		vfe_send_stats_msg(vfe31_ctrl->ihistStatsControl.bufToRender,
-			STATS_IHIST_NUM);
+			  STATS_IHIST_NUM);
 	} else {
 		vfe31_ctrl->ihistStatsControl.droppedStatsFrameCount++;
 		CDBG("%s: droppedStatsFrameCount = %d", __func__,
-			vfe31_ctrl->ihistStatsControl.droppedStatsFrameCount);
+			 vfe31_ctrl->ihistStatsControl.droppedStatsFrameCount);
 	}
 }
 
 static void vfe31_process_stats_rs_irq(void)
 {
-	if (!(vfe31_ctrl->rsStatsControl.ackPending)) {
+	unsigned long flags;
+	uint32_t addr;
+	spin_lock_irqsave(&vfe31_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_RS);
+	spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
+	if (addr) {
 		vfe31_ctrl->rsStatsControl.bufToRender =
 			vfe31_process_stats_irq_common(STATS_RS_NUM,
-			vfe31_ctrl->rsStatsControl.nextFrameAddrBuf);
-
+			addr);
 		vfe_send_stats_msg(vfe31_ctrl->rsStatsControl.bufToRender,
 			STATS_RS_NUM);
 	} else {
@@ -2992,12 +3121,29 @@
 
 static void vfe31_process_stats_cs_irq(void)
 {
+	unsigned long flags;
+	uint32_t addr;
+	spin_lock_irqsave(&vfe31_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_CS);
+	spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
+	if (addr) {
+		vfe31_ctrl->csStatsControl.bufToRender =
+			vfe31_process_stats_irq_common(STATS_CS_NUM,
+				addr);
+			vfe_send_stats_msg(
+				vfe31_ctrl->csStatsControl.bufToRender,
+				STATS_CS_NUM);
+	} else {
+		vfe31_ctrl->csStatsControl.droppedStatsFrameCount++;
+		CDBG("%s: droppedStatsFrameCount = %d", __func__,
+			vfe31_ctrl->csStatsControl.droppedStatsFrameCount);
+	}
 	if (!(vfe31_ctrl->csStatsControl.ackPending)) {
 		vfe31_ctrl->csStatsControl.bufToRender =
 			vfe31_process_stats_irq_common(STATS_CS_NUM,
-			vfe31_ctrl->csStatsControl.nextFrameAddrBuf);
-
-		vfe_send_stats_msg(vfe31_ctrl->csStatsControl.bufToRender,
+				vfe31_ctrl->csStatsControl.nextFrameAddrBuf);
+		vfe_send_stats_msg(
+			vfe31_ctrl->csStatsControl.bufToRender,
 			STATS_CS_NUM);
 	} else {
 		vfe31_ctrl->csStatsControl.droppedStatsFrameCount++;
@@ -3010,15 +3156,16 @@
 {
 	unsigned long flags;
 	int32_t process_stats = false;
+	uint32_t addr;
 	CDBG("%s, stats = 0x%x\n", __func__, status_bits);
 
-	spin_lock_irqsave(&vfe31_ctrl->comp_stats_ack_lock, flags);
+	spin_lock_irqsave(&vfe31_ctrl->stats_bufq_lock, flags);
 	if (status_bits & VFE_IRQ_STATUS0_STATS_AEC) {
-		if (!vfe31_ctrl->aecStatsControl.ackPending) {
-			vfe31_ctrl->aecStatsControl.ackPending = TRUE;
+		addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_AEC);
+		if (addr) {
 			vfe31_ctrl->aecStatsControl.bufToRender =
 				vfe31_process_stats_irq_common(STATS_AE_NUM,
-				vfe31_ctrl->aecStatsControl.nextFrameAddrBuf);
+				addr);
 			process_stats = true;
 		} else{
 			vfe31_ctrl->aecStatsControl.bufToRender = 0;
@@ -3029,11 +3176,11 @@
 	}
 
 	if (status_bits & VFE_IRQ_STATUS0_STATS_AWB) {
-		if (!vfe31_ctrl->awbStatsControl.ackPending) {
-			vfe31_ctrl->awbStatsControl.ackPending = TRUE;
+		addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_AWB);
+		if (addr) {
 			vfe31_ctrl->awbStatsControl.bufToRender =
 				vfe31_process_stats_irq_common(STATS_AWB_NUM,
-				vfe31_ctrl->awbStatsControl.nextFrameAddrBuf);
+				addr);
 			process_stats = true;
 		} else{
 			vfe31_ctrl->awbStatsControl.droppedStatsFrameCount++;
@@ -3045,11 +3192,11 @@
 
 
 	if (status_bits & VFE_IRQ_STATUS0_STATS_AF) {
-		if (!vfe31_ctrl->afStatsControl.ackPending) {
-			vfe31_ctrl->afStatsControl.ackPending = TRUE;
+		addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_AF);
+		if (addr) {
 			vfe31_ctrl->afStatsControl.bufToRender =
 				vfe31_process_stats_irq_common(STATS_AF_NUM,
-				vfe31_ctrl->afStatsControl.nextFrameAddrBuf);
+				addr);
 			process_stats = true;
 		} else {
 			vfe31_ctrl->afStatsControl.bufToRender = 0;
@@ -3060,11 +3207,11 @@
 	}
 
 	if (status_bits & VFE_IRQ_STATUS0_STATS_IHIST) {
-		if (!vfe31_ctrl->ihistStatsControl.ackPending) {
-			vfe31_ctrl->ihistStatsControl.ackPending = TRUE;
+		addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_IHIST);
+		if (addr) {
 			vfe31_ctrl->ihistStatsControl.bufToRender =
 				vfe31_process_stats_irq_common(STATS_IHIST_NUM,
-				vfe31_ctrl->ihistStatsControl.nextFrameAddrBuf);
+				addr);
 			process_stats = true;
 		} else {
 			vfe31_ctrl->ihistStatsControl.droppedStatsFrameCount++;
@@ -3075,11 +3222,11 @@
 	}
 
 	if (status_bits & VFE_IRQ_STATUS0_STATS_RS) {
-		if (!vfe31_ctrl->rsStatsControl.ackPending) {
-			vfe31_ctrl->rsStatsControl.ackPending = TRUE;
+		addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_RS);
+		if (!addr) {
 			vfe31_ctrl->rsStatsControl.bufToRender =
 				vfe31_process_stats_irq_common(STATS_RS_NUM,
-				vfe31_ctrl->rsStatsControl.nextFrameAddrBuf);
+				addr);
 			process_stats = true;
 		} else {
 			vfe31_ctrl->rsStatsControl.droppedStatsFrameCount++;
@@ -3091,11 +3238,11 @@
 
 
 	if (status_bits & VFE_IRQ_STATUS0_STATS_CS) {
-		if (!vfe31_ctrl->csStatsControl.ackPending) {
-			vfe31_ctrl->csStatsControl.ackPending = TRUE;
+		addr = (uint32_t)vfe31_stats_dqbuf(MSM_STATS_TYPE_CS);
+		if (addr) {
 			vfe31_ctrl->csStatsControl.bufToRender =
 				vfe31_process_stats_irq_common(STATS_CS_NUM,
-				vfe31_ctrl->csStatsControl.nextFrameAddrBuf);
+				addr);
 			process_stats = true;
 		} else {
 			vfe31_ctrl->csStatsControl.droppedStatsFrameCount++;
@@ -3105,13 +3252,91 @@
 		vfe31_ctrl->csStatsControl.bufToRender = 0;
 	}
 
-	spin_unlock_irqrestore(&vfe31_ctrl->comp_stats_ack_lock, flags);
+	spin_unlock_irqrestore(&vfe31_ctrl->stats_bufq_lock, flags);
 	if (process_stats)
 		vfe_send_comp_stats_msg(status_bits);
 
 	return;
 }
 
+static long vfe_stats_bufq_sub_ioctl(struct msm_vfe_cfg_cmd *cmd,
+	void *ion_client)
+{
+	long rc = 0;
+	switch (cmd->cmd_type) {
+	case VFE_CMD_STATS_REQBUF:
+		if (!vfe31_ctrl->stats_ops.stats_ctrl) {
+			/* stats_ctrl has not been init yet */
+			rc = msm_stats_buf_ops_init(&vfe31_ctrl->stats_ctrl,
+					(struct ion_client *)ion_client,
+					&vfe31_ctrl->stats_ops);
+		if (rc < 0) {
+			pr_err("%s: cannot init stats ops", __func__);
+			goto end;
+		}
+		rc = vfe31_ctrl->stats_ops.stats_ctrl_init(
+				&vfe31_ctrl->stats_ctrl);
+		if (rc < 0) {
+			pr_err("%s: cannot init stats_ctrl ops", __func__);
+			memset(&vfe31_ctrl->stats_ops, 0,
+				sizeof(vfe31_ctrl->stats_ops));
+			goto end;
+		}
+		if (sizeof(struct msm_stats_reqbuf) != cmd->length) {
+			/* error. the length not match */
+			pr_err("%s: stats reqbuf input size = %d,\n"
+				"struct size = %d, mis match\n",
+				 __func__, cmd->length,
+				 sizeof(struct msm_stats_reqbuf));
+			rc = -EINVAL;
+			goto end;
+		}
+	}
+	rc = vfe31_ctrl->stats_ops.reqbuf(&vfe31_ctrl->stats_ctrl,
+			(struct msm_stats_reqbuf *)cmd->value,
+			vfe31_ctrl->stats_ops.client);
+	break;
+	case VFE_CMD_STATS_ENQUEUEBUF:
+		if (sizeof(struct msm_stats_buf_info) != cmd->length) {
+			/* error. the length not match */
+			pr_err("%s: stats enqueuebuf input size = %d,\n"
+				"struct size = %d, mis match\n",
+				__func__, cmd->length,
+				sizeof(struct msm_stats_buf_info));
+			rc = -EINVAL ;
+			goto end;
+		}
+		rc = vfe31_ctrl->stats_ops.enqueue_buf(&vfe31_ctrl->stats_ctrl,
+				(struct msm_stats_buf_info *)cmd->value,
+				vfe31_ctrl->stats_ops.client);
+	break;
+	case VFE_CMD_STATS_FLUSH_BUFQ: {
+		struct msm_stats_flush_bufq *flush_req = NULL;
+		flush_req = (struct msm_stats_flush_bufq *)cmd->value;
+		if (sizeof(struct msm_stats_flush_bufq) != cmd->length) {
+			/* error. the length not match */
+			pr_err("%s: stats flush queue input size = %d,\n"
+				"struct size = %d, mitch match\n",
+				__func__, cmd->length,
+				sizeof(struct msm_stats_flush_bufq));
+			rc = -EINVAL ;
+			goto end;
+		}
+		rc = vfe31_ctrl->stats_ops.bufq_flush(&vfe31_ctrl->stats_ctrl,
+				(enum msm_stats_enum_type)flush_req->stats_type,
+				vfe31_ctrl->stats_ops.client);
+	}
+	break;
+	default:
+		rc = -1;
+		pr_err("%s: cmd_type %d not supported", __func__,
+			cmd->cmd_type);
+	break;
+	}
+end:
+	return rc;
+}
+
 static void vfe31_process_stats_irq(uint32_t *irqstatus)
 {
 	uint32_t status_bits = VFE_COM_STATUS & *irqstatus;
@@ -3334,44 +3559,55 @@
 	void *data = vfe_params->data;
 
 	long rc = 0;
-	uint32_t i = 0;
 	struct vfe_cmd_stats_buf *scfg = NULL;
-	struct msm_pmem_region   *regptr = NULL;
 	struct vfe_cmd_stats_ack *sack = NULL;
-	if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
-		cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
-		cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR &&
-		cmd->cmd_type != CMD_STATS_AEC_BUF_RELEASE &&
-		cmd->cmd_type != CMD_STATS_AWB_BUF_RELEASE &&
-		cmd->cmd_type != CMD_STATS_IHIST_BUF_RELEASE &&
-		cmd->cmd_type != CMD_STATS_RS_BUF_RELEASE &&
-		cmd->cmd_type != CMD_STATS_CS_BUF_RELEASE &&
-		cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE) {
-		if (NULL != cmd->value) {
-			if (copy_from_user(&vfecmd,
-				(void __user *)(cmd->value),
-				sizeof(vfecmd))) {
-				pr_err("%s %d: copy_from_user failed\n",
-					__func__, __LINE__);
-				return -EFAULT;
-			}
-		}
-	} else {
-	/* here eith stats release or frame release. */
+
+	switch (cmd->cmd_type) {
+	case VFE_CMD_STATS_REQBUF:
+	case VFE_CMD_STATS_ENQUEUEBUF:
+	case VFE_CMD_STATS_FLUSH_BUFQ:
+		/* for easy porting put in one envelope */
+		rc = vfe_stats_bufq_sub_ioctl(cmd, vfe_params->data);
+		return rc;
+	default:
+		if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
+			cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
+			cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR &&
+			cmd->cmd_type != CMD_STATS_AEC_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_AWB_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_IHIST_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_RS_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_CS_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE) {
+				if (copy_from_user(&vfecmd,
+					(void __user *)(cmd->value),
+					sizeof(vfecmd))) {
+						pr_err("%s %d: copy_from_user failed\n",
+						__func__, __LINE__);
+					return -EFAULT;
+				}
+		} else {
+		/* here eith stats release or frame release. */
 		if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
 			cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
 			cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR) {
 			/* then must be stats release. */
-			if (!data)
+			if (!data) {
+				pr_err("%s: data = NULL, cmd->cmd_type = %d",
+					__func__, cmd->cmd_type);
 				return -EFAULT;
+			}
 			sack = kmalloc(sizeof(struct vfe_cmd_stats_ack),
 							GFP_ATOMIC);
-			if (!sack)
+			if (!sack) {
+				pr_err("%s: no mem for cmd->cmd_type = %d",
+					 __func__, cmd->cmd_type);
 				return -ENOMEM;
+			}
 
 			sack->nextStatsBuf = *(uint32_t *)data;
 		}
-	}
+	  }
 
 	CDBG("%s: cmdType = %d\n", __func__, cmd->cmd_type);
 
@@ -3381,53 +3617,7 @@
 		(cmd->cmd_type == CMD_STATS_RS_ENABLE)    ||
 		(cmd->cmd_type == CMD_STATS_CS_ENABLE)    ||
 		(cmd->cmd_type == CMD_STATS_AEC_ENABLE)) {
-		struct axidata *axid;
-		axid = data;
-		if (!axid) {
-			rc = -EFAULT;
-			goto vfe31_config_done;
-		}
-
-		scfg =
-			kmalloc(sizeof(struct vfe_cmd_stats_buf),
-				GFP_ATOMIC);
-		if (!scfg) {
-			rc = -ENOMEM;
-			goto vfe31_config_done;
-		}
-		regptr = axid->region;
-		if (axid->bufnum1 > 0) {
-			for (i = 0; i < axid->bufnum1; i++) {
-				scfg->statsBuf[i] =
-					(uint32_t)(regptr->paddr);
-				regptr++;
-			}
-		}
-		/* individual */
-		switch (cmd->cmd_type) {
-		case CMD_STATS_AEC_ENABLE:
-			rc = vfe_stats_aec_buf_init(scfg);
-			break;
-		case CMD_STATS_AF_ENABLE:
-			rc = vfe_stats_af_buf_init(scfg);
-			break;
-		case CMD_STATS_AWB_ENABLE:
-			rc = vfe_stats_awb_buf_init(scfg);
-			break;
-		case CMD_STATS_IHIST_ENABLE:
-			rc = vfe_stats_ihist_buf_init(scfg);
-			break;
-		case CMD_STATS_RS_ENABLE:
-			rc = vfe_stats_rs_buf_init(scfg);
-			break;
-		case CMD_STATS_CS_ENABLE:
-			rc = vfe_stats_cs_buf_init(scfg);
-			break;
-		default:
-			pr_err("%s Unsupported cmd type %d",
-				__func__, cmd->cmd_type);
-			break;
-		}
+		scfg = NULL;
 		goto vfe31_config_done;
 	}
 	switch (cmd->cmd_type) {
@@ -3459,36 +3649,6 @@
 	case CMD_SNAP_BUF_RELEASE:
 		break;
 
-	case CMD_STATS_AEC_BUF_RELEASE: {
-		vfe31_stats_aec_ack(sack);
-		}
-		break;
-
-	case CMD_STATS_AF_BUF_RELEASE: {
-		vfe31_stats_af_ack(sack);
-		}
-		break;
-
-	case CMD_STATS_AWB_BUF_RELEASE: {
-		vfe31_stats_awb_ack(sack);
-		}
-		break;
-
-	case CMD_STATS_IHIST_BUF_RELEASE: {
-		vfe31_stats_ihist_ack(sack);
-		}
-		break;
-
-	case CMD_STATS_RS_BUF_RELEASE: {
-		vfe31_stats_rs_ack(sack);
-		}
-		break;
-
-	case CMD_STATS_CS_BUF_RELEASE: {
-		vfe31_stats_cs_ack(sack);
-		}
-		break;
-
 	case CMD_AXI_CFG_PRIM: {
 		uint32_t *axio = NULL;
 		axio = kmalloc(vfe31_cmd[VFE_CMD_AXI_OUT_CFG].length,
@@ -3607,6 +3767,7 @@
 		pr_err("%s Unsupported AXI configuration %x ", __func__,
 			cmd->cmd_type);
 		break;
+		}
 	}
 vfe31_config_done:
 	kfree(scfg);
@@ -3714,20 +3875,16 @@
 
 	spin_lock_init(&vfe31_ctrl->stop_flag_lock);
 	spin_lock_init(&vfe31_ctrl->state_lock);
+	spin_lock_init(&vfe31_ctrl->stats_bufq_lock);
 	spin_lock_init(&vfe31_ctrl->io_lock);
 	spin_lock_init(&vfe31_ctrl->update_ack_lock);
 	spin_lock_init(&vfe31_ctrl->tasklet_lock);
-
-	spin_lock_init(&vfe31_ctrl->aec_ack_lock);
-	spin_lock_init(&vfe31_ctrl->awb_ack_lock);
-	spin_lock_init(&vfe31_ctrl->af_ack_lock);
-	spin_lock_init(&vfe31_ctrl->ihist_ack_lock);
-	spin_lock_init(&vfe31_ctrl->rs_ack_lock);
-	spin_lock_init(&vfe31_ctrl->cs_ack_lock);
-	spin_lock_init(&vfe31_ctrl->comp_stats_ack_lock);
 	spin_lock_init(&vfe31_ctrl->sd_notify_lock);
 	INIT_LIST_HEAD(&vfe31_ctrl->tasklet_q);
 
+	memset(&vfe31_ctrl->stats_ctrl, 0, sizeof(struct msm_stats_bufq_ctrl));
+	memset(&vfe31_ctrl->stats_ops, 0, sizeof(struct msm_stats_ops));
+
 	vfe31_ctrl->update_linear = false;
 	vfe31_ctrl->update_rolloff = false;
 	vfe31_ctrl->update_la = false;
diff --git a/drivers/media/video/msm/msm_vfe31_v4l2.h b/drivers/media/video/msm/msm_vfe31_v4l2.h
index 739d157..2cba995 100644
--- a/drivers/media/video/msm/msm_vfe31_v4l2.h
+++ b/drivers/media/video/msm/msm_vfe31_v4l2.h
@@ -15,6 +15,7 @@
 #define __MSM_VFE31_V4L2_H__
 
 #include <linux/bitops.h>
+#include "msm_vfe_stats_buf.h"
 
 #ifndef TRUE
 #define TRUE 1
@@ -862,14 +863,7 @@
 	spinlock_t  update_ack_lock;
 	spinlock_t  state_lock;
 	spinlock_t  io_lock;
-
-	spinlock_t  aec_ack_lock;
-	spinlock_t  awb_ack_lock;
-	spinlock_t  af_ack_lock;
-	spinlock_t  ihist_ack_lock;
-	spinlock_t  rs_ack_lock;
-	spinlock_t  cs_ack_lock;
-	spinlock_t  comp_stats_ack_lock;
+	spinlock_t  stats_bufq_lock;
 
 	uint32_t extlen;
 	void *extdata;
@@ -930,6 +924,8 @@
 	uint32_t frame_skip_cnt;
 	uint32_t frame_skip_pattern;
 	uint32_t snapshot_frame_cnt;
+	struct msm_stats_bufq_ctrl stats_ctrl;
+	struct msm_stats_ops stats_ops;
 };
 
 enum VFE31_STATS_NUM {
diff --git a/drivers/media/video/msm/msm_vfe32.c b/drivers/media/video/msm/msm_vfe32.c
index 3ac4c6a..c6dd143 100644
--- a/drivers/media/video/msm/msm_vfe32.c
+++ b/drivers/media/video/msm/msm_vfe32.c
@@ -624,116 +624,248 @@
 	return 0;
 }
 
-static uint32_t vfe_stats_awb_buf_init(
-	struct vfe32_ctrl_type *vfe32_ctrl, struct vfe_cmd_stats_buf *in)
+static unsigned long vfe32_stats_dqbuf(struct vfe32_ctrl_type *vfe32_ctrl,
+	enum msm_stats_enum_type stats_type)
 {
-	uint32_t *ptr = in->statsBuf;
-	uint32_t addr;
+	struct msm_stats_meta_buf *buf = NULL;
+	int rc = 0;
+	rc = vfe32_ctrl->stats_ops.dqbuf(
+			vfe32_ctrl->stats_ops.stats_ctrl, stats_type, &buf);
+	if (rc < 0) {
+		pr_err("%s: dq stats buf (type = %d) err = %d",
+			__func__, stats_type, rc);
+		return 0L;
+	}
+	return buf->paddr;
+}
 
-	addr = ptr[0];
+static unsigned long vfe32_stats_flush_enqueue(
+	struct vfe32_ctrl_type *vfe32_ctrl,
+	enum msm_stats_enum_type stats_type)
+{
+	struct msm_stats_bufq *bufq = NULL;
+	struct msm_stats_meta_buf *stats_buf = NULL;
+	int rc = 0;
+	int i;
+
+	/*
+	 * Passing NULL for ion client as the buffers are already
+	 * mapped at this stage, client is not required, flush all
+	 * the buffers, and buffers move to PREPARE state
+	 */
+
+	rc = vfe32_ctrl->stats_ops.bufq_flush(
+			vfe32_ctrl->stats_ops.stats_ctrl, stats_type, NULL);
+	if (rc < 0) {
+		pr_err("%s: dq stats buf (type = %d) err = %d",
+			__func__, stats_type, rc);
+		return 0L;
+	}
+	/* Queue all the buffers back to QUEUED state */
+	bufq = vfe32_ctrl->stats_ctrl.bufq[stats_type];
+	for (i = 0; i < bufq->num_bufs; i++) {
+		stats_buf = &bufq->bufs[i];
+		rc = vfe32_ctrl->stats_ops.enqueue_buf(
+				vfe32_ctrl->stats_ops.stats_ctrl,
+				&(stats_buf->info), NULL);
+		if (rc < 0) {
+			pr_err("%s: dq stats buf (type = %d) err = %d",
+				 __func__, stats_type, rc);
+			return rc;
+		}
+	}
+	return 0L;
+}
+
+static int vfe_stats_awb_buf_init(
+	struct vfe32_ctrl_type *vfe32_ctrl,
+	struct vfe_cmd_stats_buf *in)
+{
+	uint32_t addr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AWB);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq awb ping buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
 	msm_camera_io_w(addr,
 		vfe32_ctrl->share_ctrl->vfebase +
 		VFE_BUS_STATS_AWB_WR_PING_ADDR);
-	addr = ptr[1];
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AWB);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq awb ping buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
 	msm_camera_io_w(addr,
 		vfe32_ctrl->share_ctrl->vfebase +
 		VFE_BUS_STATS_AWB_WR_PONG_ADDR);
-	vfe32_ctrl->awbStatsControl.nextFrameAddrBuf = in->statsBuf[2];
 	return 0;
 }
 
-static uint32_t vfe_stats_aec_buf_init(
+static int vfe_stats_aec_buf_init(
 	struct vfe32_ctrl_type *vfe32_ctrl, struct vfe_cmd_stats_buf *in)
 {
-	uint32_t *ptr = in->statsBuf;
 	uint32_t addr;
+	unsigned long flags;
 
-	addr = ptr[0];
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AEC);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq aec ping buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
 	msm_camera_io_w(addr,
 		vfe32_ctrl->share_ctrl->vfebase +
 		VFE_BUS_STATS_AEC_WR_PING_ADDR);
-	addr = ptr[1];
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AEC);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq aec pong buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
 	msm_camera_io_w(addr,
 		vfe32_ctrl->share_ctrl->vfebase +
 		VFE_BUS_STATS_AEC_WR_PONG_ADDR);
-
-	vfe32_ctrl->aecStatsControl.nextFrameAddrBuf = in->statsBuf[2];
 	return 0;
 }
 
-static uint32_t vfe_stats_af_buf_init(
+static int vfe_stats_af_buf_init(
 	struct vfe32_ctrl_type *vfe32_ctrl, struct vfe_cmd_stats_buf *in)
 {
-	uint32_t *ptr = in->statsBuf;
 	uint32_t addr;
+	unsigned long flags;
+	int rc = 0;
 
-	addr = ptr[0];
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	rc = vfe32_stats_flush_enqueue(vfe32_ctrl, MSM_STATS_TYPE_AF);
+	if (rc < 0) {
+		pr_err("%s: dq stats buf err = %d",
+			   __func__, rc);
+		spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+		return -EINVAL;
+	}
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AF);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq af ping buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
 	msm_camera_io_w(addr,
 		vfe32_ctrl->share_ctrl->vfebase +
 		VFE_BUS_STATS_AF_WR_PING_ADDR);
-	addr = ptr[1];
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AF);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq af pong buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
 	msm_camera_io_w(addr,
 		vfe32_ctrl->share_ctrl->vfebase +
 		VFE_BUS_STATS_AF_WR_PONG_ADDR);
 
-	vfe32_ctrl->afStatsControl.nextFrameAddrBuf = in->statsBuf[2];
 	return 0;
 }
 
-static uint32_t vfe_stats_ihist_buf_init(
-	struct vfe32_ctrl_type *vfe32_ctrl, struct vfe_cmd_stats_buf *in)
+static int vfe_stats_ihist_buf_init(
+	struct vfe32_ctrl_type *vfe32_ctrl,
+	struct vfe_cmd_stats_buf *in)
 {
-	uint32_t *ptr = in->statsBuf;
 	uint32_t addr;
+	unsigned long flags;
 
-	addr = ptr[0];
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_IHIST);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq ihist ping buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
 	msm_camera_io_w(addr,
 		vfe32_ctrl->share_ctrl->vfebase +
 		VFE_BUS_STATS_HIST_WR_PING_ADDR);
-	addr = ptr[1];
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_IHIST);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq ihist pong buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
 	msm_camera_io_w(addr,
 		vfe32_ctrl->share_ctrl->vfebase +
 		VFE_BUS_STATS_HIST_WR_PONG_ADDR);
 
-	vfe32_ctrl->ihistStatsControl.nextFrameAddrBuf = in->statsBuf[2];
 	return 0;
 }
 
-static uint32_t vfe_stats_rs_buf_init(
-	struct vfe32_ctrl_type *vfe32_ctrl, struct vfe_cmd_stats_buf *in)
+static int vfe_stats_rs_buf_init(
+	struct vfe32_ctrl_type *vfe32_ctrl,
+	struct vfe_cmd_stats_buf *in)
 {
-	uint32_t *ptr = in->statsBuf;
 	uint32_t addr;
+	unsigned long flags;
 
-	addr = ptr[0];
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_RS);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq rs ping buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
 	msm_camera_io_w(addr,
 		vfe32_ctrl->share_ctrl->vfebase +
 		VFE_BUS_STATS_RS_WR_PING_ADDR);
-	addr = ptr[1];
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_RS);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq rs pong buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
 	msm_camera_io_w(addr,
 		vfe32_ctrl->share_ctrl->vfebase +
 		VFE_BUS_STATS_RS_WR_PONG_ADDR);
-
-	vfe32_ctrl->rsStatsControl.nextFrameAddrBuf = in->statsBuf[2];
 	return 0;
 }
 
-static uint32_t vfe_stats_cs_buf_init(
-	struct vfe32_ctrl_type *vfe32_ctrl, struct vfe_cmd_stats_buf *in)
+static int vfe_stats_cs_buf_init(
+	struct vfe32_ctrl_type *vfe32_ctrl,
+	struct vfe_cmd_stats_buf *in)
 {
-	uint32_t *ptr = in->statsBuf;
 	uint32_t addr;
-
-	addr = ptr[0];
+	unsigned long flags;
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_CS);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq cs ping buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
 	msm_camera_io_w(addr,
 		vfe32_ctrl->share_ctrl->vfebase +
 		VFE_BUS_STATS_CS_WR_PING_ADDR);
-	addr = ptr[1];
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_CS);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq cs pong buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
 	msm_camera_io_w(addr,
 		vfe32_ctrl->share_ctrl->vfebase +
 		VFE_BUS_STATS_CS_WR_PONG_ADDR);
-
-	vfe32_ctrl->csStatsControl.nextFrameAddrBuf = in->statsBuf[2];
 	return 0;
 }
 
@@ -1614,6 +1746,12 @@
 		break;
 
 	case VFE_CMD_STATS_AE_START: {
+		rc = vfe_stats_aec_buf_init(vfe32_ctrl, NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of AEC",
+				 __func__);
+			goto proc_general_done;
+		}
 		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
 		if (!cmdp) {
 			rc = -ENOMEM;
@@ -1637,6 +1775,12 @@
 		}
 		break;
 	case VFE_CMD_STATS_AF_START: {
+		rc = vfe_stats_af_buf_init(vfe32_ctrl, NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of AF",
+				__func__);
+			goto proc_general_done;
+		}
 		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
 		if (!cmdp) {
 			rc = -ENOMEM;
@@ -1660,6 +1804,12 @@
 		}
 		break;
 	case VFE_CMD_STATS_AWB_START: {
+		rc = vfe_stats_awb_buf_init(vfe32_ctrl, NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of AWB",
+				 __func__);
+			goto proc_general_done;
+		}
 		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
 		if (!cmdp) {
 			rc = -ENOMEM;
@@ -1684,6 +1834,12 @@
 		break;
 
 	case VFE_CMD_STATS_IHIST_START: {
+		rc = vfe_stats_ihist_buf_init(vfe32_ctrl, NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of IHIST",
+				 __func__);
+			goto proc_general_done;
+		}
 		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
 		if (!cmdp) {
 			rc = -ENOMEM;
@@ -1709,6 +1865,12 @@
 
 
 	case VFE_CMD_STATS_RS_START: {
+		rc = vfe_stats_rs_buf_init(vfe32_ctrl, NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of RS",
+				__func__);
+			goto proc_general_done;
+		}
 		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
 		if (!cmdp) {
 			rc = -ENOMEM;
@@ -1728,6 +1890,12 @@
 		break;
 
 	case VFE_CMD_STATS_CS_START: {
+		rc = vfe_stats_cs_buf_init(vfe32_ctrl, NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of CS",
+				__func__);
+			goto proc_general_done;
+		}
 		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
 		if (!cmdp) {
 			rc = -ENOMEM;
@@ -2430,6 +2598,12 @@
 		old_val &= ~AF_BF_ENABLE_MASK;
 		msm_camera_io_w(old_val,
 			vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		rc = vfe32_stats_flush_enqueue(vfe32_ctrl, MSM_STATS_TYPE_AF);
+		if (rc < 0) {
+			pr_err("%s: dq stats buf err = %d",
+				   __func__, rc);
+			return -EINVAL;
+		}
 		}
 		break;
 
@@ -2808,82 +2982,6 @@
 	return rc;
 }
 
-static void vfe32_stats_af_ack(
-	struct vfe32_ctrl_type *vfe32_ctrl, struct vfe_cmd_stats_ack *pAck)
-{
-	unsigned long flags;
-	spinlock_t *lock = (vfe32_ctrl->share_ctrl->stats_comp ?
-		&vfe32_ctrl->comp_stats_ack_lock :
-		&vfe32_ctrl->af_ack_lock);
-	spin_lock_irqsave(lock, flags);
-	vfe32_ctrl->afStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
-	vfe32_ctrl->afStatsControl.ackPending = FALSE;
-	spin_unlock_irqrestore(lock, flags);
-}
-
-static void vfe32_stats_awb_ack(
-	struct vfe32_ctrl_type *vfe32_ctrl, struct vfe_cmd_stats_ack *pAck)
-{
-	unsigned long flags;
-	spinlock_t *lock = (vfe32_ctrl->share_ctrl->stats_comp ?
-		&vfe32_ctrl->comp_stats_ack_lock :
-		&vfe32_ctrl->awb_ack_lock);
-	spin_lock_irqsave(lock, flags);
-	vfe32_ctrl->awbStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
-	vfe32_ctrl->awbStatsControl.ackPending = FALSE;
-	spin_unlock_irqrestore(lock, flags);
-}
-
-static void vfe32_stats_aec_ack(
-	struct vfe32_ctrl_type *vfe32_ctrl, struct vfe_cmd_stats_ack *pAck)
-{
-	unsigned long flags;
-	spinlock_t *lock = (vfe32_ctrl->share_ctrl->stats_comp ?
-		&vfe32_ctrl->comp_stats_ack_lock :
-		&vfe32_ctrl->aec_ack_lock);
-	spin_lock_irqsave(lock, flags);
-	vfe32_ctrl->aecStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
-	vfe32_ctrl->aecStatsControl.ackPending = FALSE;
-	spin_unlock_irqrestore(lock, flags);
-}
-
-static void vfe32_stats_ihist_ack(
-	struct vfe32_ctrl_type *vfe32_ctrl, struct vfe_cmd_stats_ack *pAck)
-{
-	unsigned long flags;
-	spinlock_t *lock = (vfe32_ctrl->share_ctrl->stats_comp ?
-		&vfe32_ctrl->comp_stats_ack_lock :
-		&vfe32_ctrl->ihist_ack_lock);
-	spin_lock_irqsave(lock, flags);
-	vfe32_ctrl->ihistStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
-	vfe32_ctrl->ihistStatsControl.ackPending = FALSE;
-	spin_unlock_irqrestore(lock, flags);
-}
-static void vfe32_stats_rs_ack(
-	struct vfe32_ctrl_type *vfe32_ctrl, struct vfe_cmd_stats_ack *pAck)
-{
-	unsigned long flags;
-	spinlock_t *lock = (vfe32_ctrl->share_ctrl->stats_comp ?
-		&vfe32_ctrl->comp_stats_ack_lock :
-		&vfe32_ctrl->rs_ack_lock);
-	spin_lock_irqsave(lock, flags);
-	vfe32_ctrl->rsStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
-	vfe32_ctrl->rsStatsControl.ackPending = FALSE;
-	spin_unlock_irqrestore(lock, flags);
-}
-static void vfe32_stats_cs_ack(
-	struct vfe32_ctrl_type *vfe32_ctrl, struct vfe_cmd_stats_ack *pAck)
-{
-	unsigned long flags;
-	spinlock_t *lock = (vfe32_ctrl->share_ctrl->stats_comp ?
-		&vfe32_ctrl->comp_stats_ack_lock :
-		&vfe32_ctrl->cs_ack_lock);
-	spin_lock_irqsave(lock, flags);
-	vfe32_ctrl->csStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
-	vfe32_ctrl->csStatsControl.ackPending = FALSE;
-	spin_unlock_irqrestore(lock, flags);
-}
-
 static inline void vfe32_read_irq_status(
 	struct axi_ctrl_t *axi_ctrl, struct vfe32_irq_status *out)
 {
@@ -3653,70 +3751,87 @@
 	return returnAddr;
 }
 
-static void
-vfe_send_stats_msg(struct vfe32_ctrl_type *vfe32_ctrl,
+static void vfe_send_stats_msg(
+	struct vfe32_ctrl_type *vfe32_ctrl,
 	uint32_t bufAddress, uint32_t statsNum)
 {
-	unsigned long flags;
+	int rc = 0;
+	void *vaddr = NULL;
 	/* fill message with right content. */
 	/* @todo This is causing issues, need further investigate */
 	/* spin_lock_irqsave(&ctrl->state_lock, flags); */
 	struct isp_msg_stats msgStats;
 	msgStats.frameCounter = vfe32_ctrl->share_ctrl->vfeFrameId;
 	msgStats.buffer = bufAddress;
-
 	switch (statsNum) {
 	case statsAeNum:{
 		msgStats.id = MSG_ID_STATS_AEC;
-		spin_lock_irqsave(&vfe32_ctrl->aec_ack_lock, flags);
-		vfe32_ctrl->aecStatsControl.ackPending = TRUE;
-		spin_unlock_irqrestore(&vfe32_ctrl->aec_ack_lock, flags);
+		rc = vfe32_ctrl->stats_ops.dispatch(
+				vfe32_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_AEC, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe32_ctrl->stats_ops.client);
 		}
 		break;
 	case statsAfNum:{
 		msgStats.id = MSG_ID_STATS_AF;
-		spin_lock_irqsave(&vfe32_ctrl->af_ack_lock, flags);
-		vfe32_ctrl->afStatsControl.ackPending = TRUE;
-		spin_unlock_irqrestore(&vfe32_ctrl->af_ack_lock, flags);
+		rc = vfe32_ctrl->stats_ops.dispatch(
+				vfe32_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_AF, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe32_ctrl->stats_ops.client);
 		}
 		break;
 	case statsAwbNum: {
 		msgStats.id = MSG_ID_STATS_AWB;
-		spin_lock_irqsave(&vfe32_ctrl->awb_ack_lock, flags);
-		vfe32_ctrl->awbStatsControl.ackPending = TRUE;
-		spin_unlock_irqrestore(&vfe32_ctrl->awb_ack_lock, flags);
+		rc = vfe32_ctrl->stats_ops.dispatch(
+				vfe32_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_AWB, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe32_ctrl->stats_ops.client);
 		}
 		break;
 
 	case statsIhistNum: {
 		msgStats.id = MSG_ID_STATS_IHIST;
-		spin_lock_irqsave(&vfe32_ctrl->ihist_ack_lock, flags);
-		vfe32_ctrl->ihistStatsControl.ackPending = TRUE;
-		spin_unlock_irqrestore(&vfe32_ctrl->ihist_ack_lock, flags);
+		rc = vfe32_ctrl->stats_ops.dispatch(
+				vfe32_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_IHIST, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe32_ctrl->stats_ops.client);
 		}
 		break;
 	case statsRsNum: {
 		msgStats.id = MSG_ID_STATS_RS;
-		spin_lock_irqsave(&vfe32_ctrl->rs_ack_lock, flags);
-		vfe32_ctrl->rsStatsControl.ackPending = TRUE;
-		spin_unlock_irqrestore(&vfe32_ctrl->rs_ack_lock, flags);
+		rc = vfe32_ctrl->stats_ops.dispatch(
+				vfe32_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_RS, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe32_ctrl->stats_ops.client);
 		}
 		break;
 	case statsCsNum: {
 		msgStats.id = MSG_ID_STATS_CS;
-		spin_lock_irqsave(&vfe32_ctrl->cs_ack_lock, flags);
-		vfe32_ctrl->csStatsControl.ackPending = TRUE;
-		spin_unlock_irqrestore(&vfe32_ctrl->cs_ack_lock, flags);
+		rc = vfe32_ctrl->stats_ops.dispatch(
+				vfe32_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_CS, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe32_ctrl->stats_ops.client);
 		}
 		break;
 
 	default:
 		goto stats_done;
 	}
-
-	v4l2_subdev_notify(&vfe32_ctrl->subdev,
-				NOTIFY_VFE_MSG_STATS,
-				&msgStats);
+	if (rc == 0) {
+		msgStats.buffer = (uint32_t)vaddr;
+		v4l2_subdev_notify(&vfe32_ctrl->subdev,
+			NOTIFY_VFE_MSG_STATS,
+			&msgStats);
+	} else {
+		pr_err("%s: paddr to idx mapping error, stats_id = %d, paddr = 0x%d",
+			 __func__, msgStats.id, msgStats.buffer);
+	}
 stats_done:
 	/* spin_unlock_irqrestore(&ctrl->state_lock, flags); */
 	return;
@@ -3751,17 +3866,18 @@
 static void vfe32_process_stats_ae_irq(struct vfe32_ctrl_type *vfe32_ctrl)
 {
 	unsigned long flags;
-	spin_lock_irqsave(&vfe32_ctrl->aec_ack_lock, flags);
-	if (!(vfe32_ctrl->aecStatsControl.ackPending)) {
-		spin_unlock_irqrestore(&vfe32_ctrl->aec_ack_lock, flags);
+	uint32_t addr;
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AEC);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (addr) {
 		vfe32_ctrl->aecStatsControl.bufToRender =
 			vfe32_process_stats_irq_common(vfe32_ctrl, statsAeNum,
-			vfe32_ctrl->aecStatsControl.nextFrameAddrBuf);
+			addr);
 
 		vfe_send_stats_msg(vfe32_ctrl,
 			vfe32_ctrl->aecStatsControl.bufToRender, statsAeNum);
 	} else{
-		spin_unlock_irqrestore(&vfe32_ctrl->aec_ack_lock, flags);
 		vfe32_ctrl->aecStatsControl.droppedStatsFrameCount++;
 		CDBG("%s: droppedStatsFrameCount = %d", __func__,
 			vfe32_ctrl->aecStatsControl.droppedStatsFrameCount);
@@ -3771,17 +3887,18 @@
 static void vfe32_process_stats_awb_irq(struct vfe32_ctrl_type *vfe32_ctrl)
 {
 	unsigned long flags;
-	spin_lock_irqsave(&vfe32_ctrl->awb_ack_lock, flags);
-	if (!(vfe32_ctrl->awbStatsControl.ackPending)) {
-		spin_unlock_irqrestore(&vfe32_ctrl->awb_ack_lock, flags);
+	uint32_t addr;
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AWB);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (addr) {
 		vfe32_ctrl->awbStatsControl.bufToRender =
 			vfe32_process_stats_irq_common(vfe32_ctrl, statsAwbNum,
-			vfe32_ctrl->awbStatsControl.nextFrameAddrBuf);
+			addr);
 
 		vfe_send_stats_msg(vfe32_ctrl,
 			vfe32_ctrl->awbStatsControl.bufToRender, statsAwbNum);
 	} else{
-		spin_unlock_irqrestore(&vfe32_ctrl->awb_ack_lock, flags);
 		vfe32_ctrl->awbStatsControl.droppedStatsFrameCount++;
 		CDBG("%s: droppedStatsFrameCount = %d", __func__,
 			vfe32_ctrl->awbStatsControl.droppedStatsFrameCount);
@@ -3791,17 +3908,18 @@
 static void vfe32_process_stats_af_irq(struct vfe32_ctrl_type *vfe32_ctrl)
 {
 	unsigned long flags;
-	spin_lock_irqsave(&vfe32_ctrl->af_ack_lock, flags);
-	if (!(vfe32_ctrl->afStatsControl.ackPending)) {
-		spin_unlock_irqrestore(&vfe32_ctrl->af_ack_lock, flags);
+	uint32_t addr;
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AF);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (addr) {
 		vfe32_ctrl->afStatsControl.bufToRender =
 			vfe32_process_stats_irq_common(vfe32_ctrl, statsAfNum,
-			vfe32_ctrl->afStatsControl.nextFrameAddrBuf);
+			addr);
 
 		vfe_send_stats_msg(vfe32_ctrl,
 			vfe32_ctrl->afStatsControl.bufToRender, statsAfNum);
 	} else{
-		spin_unlock_irqrestore(&vfe32_ctrl->af_ack_lock, flags);
 		vfe32_ctrl->afStatsControl.droppedStatsFrameCount++;
 		CDBG("%s: droppedStatsFrameCount = %d", __func__,
 			vfe32_ctrl->afStatsControl.droppedStatsFrameCount);
@@ -3810,11 +3928,15 @@
 
 static void vfe32_process_stats_ihist_irq(struct vfe32_ctrl_type *vfe32_ctrl)
 {
-	if (!(vfe32_ctrl->ihistStatsControl.ackPending)) {
+	unsigned long flags;
+	uint32_t addr;
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_IHIST);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (addr) {
 		vfe32_ctrl->ihistStatsControl.bufToRender =
 			vfe32_process_stats_irq_common(
-			vfe32_ctrl, statsIhistNum,
-			vfe32_ctrl->ihistStatsControl.nextFrameAddrBuf);
+			vfe32_ctrl, statsIhistNum, addr);
 
 		vfe_send_stats_msg(vfe32_ctrl,
 			vfe32_ctrl->ihistStatsControl.bufToRender,
@@ -3828,10 +3950,15 @@
 
 static void vfe32_process_stats_rs_irq(struct vfe32_ctrl_type *vfe32_ctrl)
 {
-	if (!(vfe32_ctrl->rsStatsControl.ackPending)) {
+	unsigned long flags;
+	uint32_t addr;
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_RS);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (addr) {
 		vfe32_ctrl->rsStatsControl.bufToRender =
 			vfe32_process_stats_irq_common(vfe32_ctrl, statsRsNum,
-			vfe32_ctrl->rsStatsControl.nextFrameAddrBuf);
+			addr);
 
 		vfe_send_stats_msg(vfe32_ctrl,
 			vfe32_ctrl->rsStatsControl.bufToRender, statsRsNum);
@@ -3844,13 +3971,19 @@
 
 static void vfe32_process_stats_cs_irq(struct vfe32_ctrl_type *vfe32_ctrl)
 {
-	if (!(vfe32_ctrl->csStatsControl.ackPending)) {
+	unsigned long flags;
+	uint32_t addr;
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_CS);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (addr) {
 		vfe32_ctrl->csStatsControl.bufToRender =
 			vfe32_process_stats_irq_common(vfe32_ctrl, statsCsNum,
-			vfe32_ctrl->csStatsControl.nextFrameAddrBuf);
+			addr);
 
-		vfe_send_stats_msg(vfe32_ctrl,
-			vfe32_ctrl->csStatsControl.bufToRender, statsCsNum);
+			vfe_send_stats_msg(vfe32_ctrl,
+				vfe32_ctrl->csStatsControl.bufToRender,
+				statsCsNum);
 	} else {
 		vfe32_ctrl->csStatsControl.droppedStatsFrameCount++;
 		CDBG("%s: droppedStatsFrameCount = %d", __func__,
@@ -3863,16 +3996,17 @@
 {
 	unsigned long flags;
 	int32_t process_stats = false;
-	CDBG("%s, stats = 0x%x\n", __func__, status_bits);
+	uint32_t addr;
 
-	spin_lock_irqsave(&vfe32_ctrl->comp_stats_ack_lock, flags);
+	CDBG("%s, stats = 0x%x\n", __func__, status_bits);
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
 	if (status_bits & VFE_IRQ_STATUS0_STATS_AEC) {
-		if (!vfe32_ctrl->aecStatsControl.ackPending) {
-			vfe32_ctrl->aecStatsControl.ackPending = TRUE;
+		addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl,
+				MSM_STATS_TYPE_AEC);
+		if (addr) {
 			vfe32_ctrl->aecStatsControl.bufToRender =
 				vfe32_process_stats_irq_common(
-				vfe32_ctrl, statsAeNum,
-				vfe32_ctrl->aecStatsControl.nextFrameAddrBuf);
+				vfe32_ctrl, statsAeNum,	addr);
 			process_stats = true;
 		} else{
 			vfe32_ctrl->aecStatsControl.bufToRender = 0;
@@ -3883,12 +4017,13 @@
 	}
 
 	if (status_bits & VFE_IRQ_STATUS0_STATS_AWB) {
-		if (!vfe32_ctrl->awbStatsControl.ackPending) {
-			vfe32_ctrl->awbStatsControl.ackPending = TRUE;
+		addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl,
+			MSM_STATS_TYPE_AWB);
+		if (addr) {
 			vfe32_ctrl->awbStatsControl.bufToRender =
 				vfe32_process_stats_irq_common(
 				vfe32_ctrl, statsAwbNum,
-				vfe32_ctrl->awbStatsControl.nextFrameAddrBuf);
+				addr);
 			process_stats = true;
 		} else{
 			vfe32_ctrl->awbStatsControl.droppedStatsFrameCount++;
@@ -3898,14 +4033,14 @@
 		vfe32_ctrl->awbStatsControl.bufToRender = 0;
 	}
 
-
 	if (status_bits & VFE_IRQ_STATUS0_STATS_AF) {
-		if (!vfe32_ctrl->afStatsControl.ackPending) {
-			vfe32_ctrl->afStatsControl.ackPending = TRUE;
+		addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl,
+					MSM_STATS_TYPE_AF);
+		if (addr) {
 			vfe32_ctrl->afStatsControl.bufToRender =
 				vfe32_process_stats_irq_common(
 				vfe32_ctrl, statsAfNum,
-				vfe32_ctrl->afStatsControl.nextFrameAddrBuf);
+				addr);
 			process_stats = true;
 		} else {
 			vfe32_ctrl->afStatsControl.bufToRender = 0;
@@ -3916,12 +4051,13 @@
 	}
 
 	if (status_bits & VFE_IRQ_STATUS0_STATS_IHIST) {
-		if (!vfe32_ctrl->ihistStatsControl.ackPending) {
-			vfe32_ctrl->ihistStatsControl.ackPending = TRUE;
+		addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl,
+					MSM_STATS_TYPE_IHIST);
+		if (addr) {
 			vfe32_ctrl->ihistStatsControl.bufToRender =
 				vfe32_process_stats_irq_common(
 				vfe32_ctrl, statsIhistNum,
-				vfe32_ctrl->ihistStatsControl.nextFrameAddrBuf);
+				addr);
 			process_stats = true;
 		} else {
 			vfe32_ctrl->ihistStatsControl.droppedStatsFrameCount++;
@@ -3932,12 +4068,13 @@
 	}
 
 	if (status_bits & VFE_IRQ_STATUS0_STATS_RS) {
-		if (!vfe32_ctrl->rsStatsControl.ackPending) {
-			vfe32_ctrl->rsStatsControl.ackPending = TRUE;
+		addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl,
+					MSM_STATS_TYPE_RS);
+		if (addr) {
 			vfe32_ctrl->rsStatsControl.bufToRender =
 				vfe32_process_stats_irq_common(
 				vfe32_ctrl, statsRsNum,
-				vfe32_ctrl->rsStatsControl.nextFrameAddrBuf);
+				addr);
 			process_stats = true;
 		} else {
 			vfe32_ctrl->rsStatsControl.droppedStatsFrameCount++;
@@ -3948,12 +4085,13 @@
 	}
 
 	if (status_bits & VFE_IRQ_STATUS0_STATS_CS) {
-		if (!vfe32_ctrl->csStatsControl.ackPending) {
-			vfe32_ctrl->csStatsControl.ackPending = TRUE;
+		addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl,
+					MSM_STATS_TYPE_CS);
+		if (addr) {
 			vfe32_ctrl->csStatsControl.bufToRender =
 				vfe32_process_stats_irq_common(
 				vfe32_ctrl, statsCsNum,
-				vfe32_ctrl->csStatsControl.nextFrameAddrBuf);
+				addr);
 			process_stats = true;
 		} else {
 			vfe32_ctrl->csStatsControl.droppedStatsFrameCount++;
@@ -3962,8 +4100,7 @@
 	} else {
 		vfe32_ctrl->csStatsControl.bufToRender = 0;
 	}
-
-	spin_unlock_irqrestore(&vfe32_ctrl->comp_stats_ack_lock, flags);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
 	if (process_stats)
 		vfe_send_comp_stats_msg(vfe32_ctrl, status_bits);
 
@@ -4259,6 +4396,88 @@
 	return 0;
 }
 
+static long vfe_stats_bufq_sub_ioctl(
+	struct vfe32_ctrl_type *vfe_ctrl,
+	struct msm_vfe_cfg_cmd *cmd, void *ion_client)
+{
+	long rc = 0;
+	switch (cmd->cmd_type) {
+	case VFE_CMD_STATS_REQBUF:
+	if (!vfe_ctrl->stats_ops.stats_ctrl) {
+		/* stats_ctrl has not been init yet */
+		rc = msm_stats_buf_ops_init(&vfe_ctrl->stats_ctrl,
+				(struct ion_client *)ion_client,
+				&vfe_ctrl->stats_ops);
+		if (rc < 0) {
+			pr_err("%s: cannot init stats ops", __func__);
+			goto end;
+		}
+		rc = vfe_ctrl->stats_ops.stats_ctrl_init(&vfe_ctrl->stats_ctrl);
+		if (rc < 0) {
+			pr_err("%s: cannot init stats_ctrl ops", __func__);
+			memset(&vfe_ctrl->stats_ops, 0,
+				sizeof(vfe_ctrl->stats_ops));
+			goto end;
+		}
+		if (sizeof(struct msm_stats_reqbuf) != cmd->length) {
+			/* error. the length not match */
+			pr_err("%s: stats reqbuf input size = %d,\n"
+				"struct size = %d, mitch match\n",
+				 __func__, cmd->length,
+				sizeof(struct msm_stats_reqbuf));
+			rc = -EINVAL ;
+			goto end;
+		}
+	}
+	rc = vfe_ctrl->stats_ops.reqbuf(
+			&vfe_ctrl->stats_ctrl,
+			(struct msm_stats_reqbuf *)cmd->value,
+			vfe_ctrl->stats_ops.client);
+	break;
+	case VFE_CMD_STATS_ENQUEUEBUF:
+	if (sizeof(struct msm_stats_buf_info) != cmd->length) {
+		/* error. the length not match */
+		pr_err("%s: stats enqueuebuf input size = %d,\n"
+			"struct size = %d, mitch match\n",
+			 __func__, cmd->length,
+			sizeof(struct msm_stats_buf_info));
+			rc = -EINVAL;
+			goto end;
+	}
+	rc = vfe_ctrl->stats_ops.enqueue_buf(
+			&vfe_ctrl->stats_ctrl,
+			(struct msm_stats_buf_info *)cmd->value,
+			vfe_ctrl->stats_ops.client);
+	break;
+	case VFE_CMD_STATS_FLUSH_BUFQ:
+	{
+		struct msm_stats_flush_bufq *flush_req = NULL;
+		flush_req = (struct msm_stats_flush_bufq *)cmd->value;
+		if (sizeof(struct msm_stats_flush_bufq) != cmd->length) {
+			/* error. the length not match */
+			pr_err("%s: stats flush queue input size = %d,\n"
+				"struct size = %d, mitch match\n",
+				__func__, cmd->length,
+				sizeof(struct msm_stats_flush_bufq));
+			rc = -EINVAL;
+			goto end;
+	}
+	rc = vfe_ctrl->stats_ops.bufq_flush(
+			&vfe_ctrl->stats_ctrl,
+			(enum msm_stats_enum_type)flush_req->stats_type,
+			vfe_ctrl->stats_ops.client);
+	}
+	break;
+	default:
+		rc = -1;
+		pr_err("%s: cmd_type %d not supported", __func__,
+			cmd->cmd_type);
+	break;
+	}
+end:
+	return rc;
+}
+
 static long msm_vfe_subdev_ioctl(struct v4l2_subdev *sd,
 			unsigned int subdev_cmd, void *arg)
 {
@@ -4273,9 +4492,7 @@
 	void *data = vfe_params->data;
 
 	long rc = 0;
-	uint32_t i = 0;
 	struct vfe_cmd_stats_buf *scfg = NULL;
-	struct msm_pmem_region   *regptr = NULL;
 	struct vfe_cmd_stats_ack *sack = NULL;
 
 	if (!vfe32_ctrl->share_ctrl->vfebase) {
@@ -4283,151 +4500,100 @@
 		return -EFAULT;
 	}
 
-	if (cmd->cmd_type == CMD_VFE_PROCESS_IRQ) {
+	switch (cmd->cmd_type) {
+	case CMD_VFE_PROCESS_IRQ:
 		vfe32_process_irq(vfe32_ctrl, (uint32_t) data);
 		return rc;
-	} else if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
-		cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
-		cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR &&
-		cmd->cmd_type != CMD_STATS_AEC_BUF_RELEASE &&
-		cmd->cmd_type != CMD_STATS_AWB_BUF_RELEASE &&
-		cmd->cmd_type != CMD_STATS_IHIST_BUF_RELEASE &&
-		cmd->cmd_type != CMD_STATS_RS_BUF_RELEASE &&
-		cmd->cmd_type != CMD_STATS_CS_BUF_RELEASE &&
-		cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE) {
-		if (copy_from_user(&vfecmd,
-				(void __user *)(cmd->value),
-				sizeof(vfecmd))) {
-			pr_err("%s %d: copy_from_user failed\n", __func__,
-				__LINE__);
-			return -EFAULT;
-		}
-	} else {
-	/* here eith stats release or frame release. */
+	case VFE_CMD_STATS_REQBUF:
+	case VFE_CMD_STATS_ENQUEUEBUF:
+	case VFE_CMD_STATS_FLUSH_BUFQ:
+		/* for easy porting put in one envelope */
+		rc = vfe_stats_bufq_sub_ioctl(vfe32_ctrl,
+				cmd, vfe_params->data);
+		return rc;
+	default:
 		if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
 			cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
-			cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR) {
-			/* then must be stats release. */
-			if (!data)
-				return -EFAULT;
-			sack = kmalloc(sizeof(struct vfe_cmd_stats_ack),
+			cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR &&
+			cmd->cmd_type != CMD_STATS_AEC_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_AWB_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_IHIST_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_RS_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_CS_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE) {
+				if (copy_from_user(&vfecmd,
+					(void __user *)(cmd->value),
+					sizeof(vfecmd))) {
+						pr_err("%s %d: copy_from_user failed\n",
+							__func__, __LINE__);
+					return -EFAULT;
+				}
+		} else {
+			/* here eith stats release or frame release. */
+			if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
+				cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
+				cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR) {
+				/* then must be stats release. */
+				if (!data) {
+					pr_err("%s: data = NULL, cmd->cmd_type = %d",
+						__func__, cmd->cmd_type);
+					return -EFAULT;
+				}
+				sack = kmalloc(sizeof(struct vfe_cmd_stats_ack),
 							GFP_ATOMIC);
-			if (!sack)
-				return -ENOMEM;
-
-			sack->nextStatsBuf = *(uint32_t *)data;
-		}
-	}
-
-	CDBG("%s: cmdType = %d\n", __func__, cmd->cmd_type);
-
-	if ((cmd->cmd_type == CMD_STATS_AF_ENABLE)    ||
-		(cmd->cmd_type == CMD_STATS_AWB_ENABLE)   ||
-		(cmd->cmd_type == CMD_STATS_IHIST_ENABLE) ||
-		(cmd->cmd_type == CMD_STATS_RS_ENABLE)    ||
-		(cmd->cmd_type == CMD_STATS_CS_ENABLE)    ||
-		(cmd->cmd_type == CMD_STATS_AEC_ENABLE)) {
-		struct axidata *axid;
-		axid = data;
-		if (!axid) {
-			rc = -EFAULT;
-			goto vfe32_config_done;
-		}
-
-		scfg =
-			kmalloc(sizeof(struct vfe_cmd_stats_buf),
-				GFP_ATOMIC);
-		if (!scfg) {
-			rc = -ENOMEM;
-			goto vfe32_config_done;
-		}
-		regptr = axid->region;
-		if (axid->bufnum1 > 0) {
-			for (i = 0; i < axid->bufnum1; i++) {
-				scfg->statsBuf[i] =
-					(uint32_t)(regptr->paddr);
-				regptr++;
+				if (!sack) {
+					pr_err("%s: no mem for cmd->cmd_type = %d",
+					 __func__, cmd->cmd_type);
+					return -ENOMEM;
+				}
+				sack->nextStatsBuf = *(uint32_t *)data;
 			}
 		}
-		/* individual */
-		switch (cmd->cmd_type) {
-		case CMD_STATS_AEC_ENABLE:
-			rc = vfe_stats_aec_buf_init(vfe32_ctrl, scfg);
-			break;
-		case CMD_STATS_AF_ENABLE:
-			rc = vfe_stats_af_buf_init(vfe32_ctrl, scfg);
-			break;
-		case CMD_STATS_AWB_ENABLE:
-			rc = vfe_stats_awb_buf_init(vfe32_ctrl, scfg);
-			break;
-		case CMD_STATS_IHIST_ENABLE:
-			rc = vfe_stats_ihist_buf_init(vfe32_ctrl, scfg);
-			break;
-		case CMD_STATS_RS_ENABLE:
-			rc = vfe_stats_rs_buf_init(vfe32_ctrl, scfg);
-			break;
-		case CMD_STATS_CS_ENABLE:
-			rc = vfe_stats_cs_buf_init(vfe32_ctrl, scfg);
-			break;
-		default:
-			pr_err("%s Unsupported cmd type %d",
-				__func__, cmd->cmd_type);
-			break;
+		CDBG("%s: cmdType = %d\n", __func__, cmd->cmd_type);
+
+		if ((cmd->cmd_type == CMD_STATS_AF_ENABLE)    ||
+			(cmd->cmd_type == CMD_STATS_AWB_ENABLE)   ||
+			(cmd->cmd_type == CMD_STATS_IHIST_ENABLE) ||
+			(cmd->cmd_type == CMD_STATS_RS_ENABLE)    ||
+			(cmd->cmd_type == CMD_STATS_CS_ENABLE)    ||
+			(cmd->cmd_type == CMD_STATS_AEC_ENABLE)) {
+				scfg = NULL;
+				/* individual */
+				goto vfe32_config_done;
 		}
-		goto vfe32_config_done;
-	}
-	switch (cmd->cmd_type) {
-	case CMD_GENERAL:
-		rc = vfe32_proc_general(pmctl, &vfecmd, vfe32_ctrl);
+		switch (cmd->cmd_type) {
+		case CMD_GENERAL:
+			rc = vfe32_proc_general(pmctl, &vfecmd, vfe32_ctrl);
 		break;
-	case CMD_CONFIG_PING_ADDR: {
-		int path = *((int *)cmd->value);
-		struct vfe32_output_ch *outch =
-			vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
-		outch->ping = *((struct msm_free_buf *)data);
-	}
+		case CMD_CONFIG_PING_ADDR: {
+			int path = *((int *)cmd->value);
+			struct vfe32_output_ch *outch =
+				vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
+			outch->ping = *((struct msm_free_buf *)data);
+		}
+		break;
+		case CMD_CONFIG_PONG_ADDR: {
+			int path = *((int *)cmd->value);
+			struct vfe32_output_ch *outch =
+				vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
+			outch->pong = *((struct msm_free_buf *)data);
+		}
 		break;
 
-	case CMD_CONFIG_PONG_ADDR: {
-		int path = *((int *)cmd->value);
-		struct vfe32_output_ch *outch =
-			vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
-		outch->pong = *((struct msm_free_buf *)data);
-	}
+		case CMD_CONFIG_FREE_BUF_ADDR: {
+			int path = *((int *)cmd->value);
+			struct vfe32_output_ch *outch =
+				vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
+			outch->free_buf = *((struct msm_free_buf *)data);
+		}
 		break;
-
-	case CMD_CONFIG_FREE_BUF_ADDR: {
-		int path = *((int *)cmd->value);
-		struct vfe32_output_ch *outch =
-			vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
-		outch->free_buf = *((struct msm_free_buf *)data);
-	}
+		case CMD_SNAP_BUF_RELEASE:
 		break;
-	case CMD_SNAP_BUF_RELEASE:
+		default:
+			pr_err("%s Unsupported AXI configuration %x ", __func__,
+				cmd->cmd_type);
 		break;
-	case CMD_STATS_AEC_BUF_RELEASE:
-		vfe32_stats_aec_ack(vfe32_ctrl, sack);
-		break;
-	case CMD_STATS_AF_BUF_RELEASE:
-		vfe32_stats_af_ack(vfe32_ctrl, sack);
-		break;
-	case CMD_STATS_AWB_BUF_RELEASE:
-		vfe32_stats_awb_ack(vfe32_ctrl, sack);
-		break;
-
-	case CMD_STATS_IHIST_BUF_RELEASE:
-		vfe32_stats_ihist_ack(vfe32_ctrl, sack);
-		break;
-	case CMD_STATS_RS_BUF_RELEASE:
-		vfe32_stats_rs_ack(vfe32_ctrl, sack);
-		break;
-	case CMD_STATS_CS_BUF_RELEASE:
-		vfe32_stats_cs_ack(vfe32_ctrl, sack);
-		break;
-	default:
-		pr_err("%s Unsupported AXI configuration %x ", __func__,
-			cmd->cmd_type);
-		break;
+		}
 	}
 vfe32_config_done:
 	kfree(scfg);
@@ -4542,14 +4708,7 @@
 	spin_lock_init(&vfe32_ctrl->io_lock);
 	spin_lock_init(&vfe32_ctrl->update_ack_lock);
 	spin_lock_init(&vfe32_ctrl->start_ack_lock);
-
-	spin_lock_init(&vfe32_ctrl->aec_ack_lock);
-	spin_lock_init(&vfe32_ctrl->awb_ack_lock);
-	spin_lock_init(&vfe32_ctrl->af_ack_lock);
-	spin_lock_init(&vfe32_ctrl->ihist_ack_lock);
-	spin_lock_init(&vfe32_ctrl->rs_ack_lock);
-	spin_lock_init(&vfe32_ctrl->cs_ack_lock);
-	spin_lock_init(&vfe32_ctrl->comp_stats_ack_lock);
+	spin_lock_init(&vfe32_ctrl->stats_bufq_lock);
 
 	vfe32_ctrl->update_linear = false;
 	vfe32_ctrl->update_rolloff = false;
@@ -4557,6 +4716,9 @@
 	vfe32_ctrl->update_gamma = false;
 	vfe32_ctrl->hfr_mode = HFR_MODE_OFF;
 
+	memset(&vfe32_ctrl->stats_ctrl, 0, sizeof(struct msm_stats_bufq_ctrl));
+	memset(&vfe32_ctrl->stats_ops, 0, sizeof(struct msm_stats_ops));
+
 	return rc;
 }
 
diff --git a/drivers/media/video/msm/msm_vfe32.h b/drivers/media/video/msm/msm_vfe32.h
index c41df09..542bbf8 100644
--- a/drivers/media/video/msm/msm_vfe32.h
+++ b/drivers/media/video/msm/msm_vfe32.h
@@ -14,6 +14,7 @@
 #define __MSM_VFE32_H__
 
 #include <linux/bitops.h>
+#include "msm_vfe_stats_buf.h"
 
 #define TRUE  1
 #define FALSE 0
@@ -912,8 +913,6 @@
 #define VFE32_OUTPUT_MODE_TERTIARY2		BIT(11)
 
 struct vfe_stats_control {
-	uint8_t  ackPending;
-	uint32_t nextFrameAddrBuf;
 	uint32_t droppedStatsFrameCount;
 	uint32_t bufToRender;
 };
@@ -966,15 +965,7 @@
 	spinlock_t  start_ack_lock;
 	spinlock_t  state_lock;
 	spinlock_t  io_lock;
-
-	spinlock_t  aec_ack_lock;
-	spinlock_t  awb_ack_lock;
-	spinlock_t  af_ack_lock;
-	spinlock_t  ihist_ack_lock;
-	spinlock_t  rs_ack_lock;
-	spinlock_t  cs_ack_lock;
-	spinlock_t  comp_stats_ack_lock;
-
+	spinlock_t  stats_bufq_lock;
 	uint32_t extlen;
 	void *extdata;
 
@@ -1013,6 +1004,8 @@
 	uint32_t frame_skip_cnt;
 	uint32_t frame_skip_pattern;
 	uint32_t snapshot_frame_cnt;
+	struct msm_stats_bufq_ctrl stats_ctrl;
+	struct msm_stats_ops stats_ops;
 };
 
 #define statsAeNum      0
diff --git a/drivers/media/video/msm/msm_vfe7x27a_v4l2.c b/drivers/media/video/msm/msm_vfe7x27a_v4l2.c
index 398621f..96047d5 100644
--- a/drivers/media/video/msm/msm_vfe7x27a_v4l2.c
+++ b/drivers/media/video/msm/msm_vfe7x27a_v4l2.c
@@ -351,6 +351,221 @@
 static uint32_t raw_mode;
 static struct vfe2x_ctrl_type *vfe2x_ctrl;
 
+static unsigned long vfe2x_stats_dqbuf(enum msm_stats_enum_type stats_type)
+{
+	struct msm_stats_meta_buf *buf = NULL;
+	int rc = 0;
+
+	rc = vfe2x_ctrl->stats_ops.dqbuf(vfe2x_ctrl->stats_ops.stats_ctrl,
+							  stats_type, &buf);
+	if (rc < 0) {
+		pr_err("%s: dq stats buf (type = %d) err = %d",
+			   __func__, stats_type, rc);
+		return 0;
+	}
+	return buf->paddr;
+}
+
+static unsigned long vfe2x_stats_flush_enqueue(
+	enum msm_stats_enum_type stats_type)
+{
+	struct msm_stats_bufq *bufq = NULL;
+	struct msm_stats_meta_buf *stats_buf = NULL;
+	int rc = 0;
+	int i;
+
+	/*
+	 * Passing NULL for ion client as the buffers are already
+	 * mapped at this stage, client is not required, flush all
+	 * the buffers, and buffers move to PREPARE state
+	 */
+	rc = vfe2x_ctrl->stats_ops.bufq_flush(
+			vfe2x_ctrl->stats_ops.stats_ctrl,
+			stats_type, NULL);
+	if (rc < 0) {
+		pr_err("%s: dq stats buf (type = %d) err = %d",
+			 __func__, stats_type, rc);
+		return 0L;
+	}
+
+	/* Queue all the buffers back to QUEUED state */
+	bufq = vfe2x_ctrl->stats_ctrl.bufq[stats_type];
+	for (i = 0; i < bufq->num_bufs; i++) {
+		stats_buf = &bufq->bufs[i];
+		rc = vfe2x_ctrl->stats_ops.enqueue_buf(
+				vfe2x_ctrl->stats_ops.stats_ctrl,
+				&(stats_buf->info), NULL);
+			if (rc < 0) {
+				pr_err("%s: dq stats buf (type = %d) err = %d",
+					 __func__, stats_type, rc);
+				return rc;
+			}
+	}
+	return 0L;
+}
+
+static int vfe2x_stats_buf_init(enum msm_stats_enum_type type)
+{
+	unsigned long flags;
+	int i = 0, rc = 0;
+	if (type == MSM_STATS_TYPE_AF) {
+		spin_lock_irqsave(&vfe2x_ctrl->stats_bufq_lock, flags);
+		rc = vfe2x_stats_flush_enqueue(MSM_STATS_TYPE_AF);
+		if (rc < 0) {
+			pr_err("%s: dq stats buf err = %d",
+				 __func__, rc);
+			spin_unlock_irqrestore(&vfe2x_ctrl->stats_bufq_lock,
+				flags);
+			return -EINVAL;
+		}
+		spin_unlock_irqrestore(&vfe2x_ctrl->stats_bufq_lock, flags);
+	}
+	for (i = 0; i < 3; i++) {
+		spin_lock_irqsave(&vfe2x_ctrl->stats_bufq_lock, flags);
+		if (type == MSM_STATS_TYPE_AE_AW)
+			vfe2x_ctrl->stats_we_buf_ptr[i] =
+				vfe2x_stats_dqbuf(type);
+		else
+			vfe2x_ctrl->stats_af_buf_ptr[i] =
+				vfe2x_stats_dqbuf(type);
+		spin_unlock_irqrestore(&vfe2x_ctrl->stats_bufq_lock, flags);
+		if (!vfe2x_ctrl->stats_we_buf_ptr[i]) {
+			pr_err("%s: dq error type %d ", __func__, type);
+			return -ENOMEM;
+		}
+	}
+	return rc;
+}
+
+static unsigned long vfe2x_stats_enqueuebuf(
+	struct msm_stats_buf_info *info, struct vfe_stats_ack *sack)
+{
+	struct msm_stats_bufq *bufq = NULL;
+	struct msm_stats_meta_buf *stats_buf = NULL;
+	struct msm_stats_meta_buf *buf = NULL;
+	int rc = 0;
+
+	bufq = vfe2x_ctrl->stats_ctrl.bufq[info->type];
+	stats_buf = &bufq->bufs[info->buf_idx];
+
+	CDBG("vfe2x_stats_enqueuebuf: %d\n", stats_buf->state);
+	if (stats_buf->state == MSM_STATS_BUFFER_STATE_INITIALIZED ||
+		stats_buf->state == MSM_STATS_BUFFER_STATE_PREPARED) {
+		rc = vfe2x_ctrl->stats_ops.enqueue_buf(
+				&vfe2x_ctrl->stats_ctrl,
+				info, vfe2x_ctrl->stats_ops.client);
+		if (rc < 0) {
+			pr_err("%s: enqueue_buf (type = %d), index : %d, err = %d",
+				 __func__, info->type, info->buf_idx, rc);
+			return rc;
+		}
+
+	} else {
+		rc = vfe2x_ctrl->stats_ops.querybuf(
+				vfe2x_ctrl->stats_ops.stats_ctrl, info, &buf);
+		if (rc < 0) {
+			pr_err("%s: querybuf (type = %d), index : %d, err = %d",
+				__func__, info->type, info->buf_idx, rc);
+			return rc;
+	}
+		stats_buf->state = MSM_STATS_BUFFER_STATE_DEQUEUED;
+	if (info->type == MSM_STATS_TYPE_AE_AW) {
+		sack->header = VFE_STATS_WB_EXP_ACK;
+		sack->bufaddr = (void *)(uint32_t *)buf->paddr;
+	} else if (info->type == MSM_STATS_TYPE_AF) {
+		sack->header = VFE_STATS_AUTOFOCUS_ACK;
+		sack->bufaddr = (void *)(uint32_t *)buf->paddr;
+	} else
+		pr_err("%s: Invalid stats: should never come here\n", __func__);
+	}
+	return 0L;
+}
+
+static long vfe2x_stats_bufq_sub_ioctl(struct msm_vfe_cfg_cmd *cmd,
+	void *ion_client)
+{
+	long rc = 0;
+
+	switch (cmd->cmd_type) {
+	case VFE_CMD_STATS_REQBUF:
+		if (!vfe2x_ctrl->stats_ops.stats_ctrl) {
+			/* stats_ctrl has not been init yet */
+			rc = msm_stats_buf_ops_init(
+					&vfe2x_ctrl->stats_ctrl,
+					(struct ion_client *)ion_client,
+					&vfe2x_ctrl->stats_ops);
+			if (rc < 0) {
+				pr_err("%s: cannot init stats ops", __func__);
+				goto end;
+			}
+			rc = vfe2x_ctrl->stats_ops.stats_ctrl_init(
+					&vfe2x_ctrl->stats_ctrl);
+			if (rc < 0) {
+				pr_err("%s: cannot init stats_ctrl ops",
+					 __func__);
+				memset(&vfe2x_ctrl->stats_ops, 0,
+				sizeof(vfe2x_ctrl->stats_ops));
+				goto end;
+			}
+			if (sizeof(struct msm_stats_reqbuf) != cmd->length) {
+				/* error. the length not match */
+				pr_err("%s: stats reqbuf input size = %d,\n"
+					"struct size = %d, mismatch\n",
+					__func__, cmd->length,
+					sizeof(struct msm_stats_reqbuf));
+				rc = -EINVAL;
+				goto end;
+			}
+		}
+		rc = vfe2x_ctrl->stats_ops.reqbuf(
+				&vfe2x_ctrl->stats_ctrl,
+				(struct msm_stats_reqbuf *)cmd->value,
+				vfe2x_ctrl->stats_ops.client);
+		break;
+		case VFE_CMD_STATS_ENQUEUEBUF: {
+			if (sizeof(struct msm_stats_buf_info) != cmd->length) {
+				/* error. the length not match */
+				pr_err("%s: stats enqueuebuf input size = %d,\n"
+					"struct size = %d, mismatch\n",
+					 __func__, cmd->length,
+					sizeof(struct msm_stats_buf_info));
+				rc = -EINVAL;
+				goto end;
+		}
+		rc = vfe2x_ctrl->stats_ops.enqueue_buf(
+				&vfe2x_ctrl->stats_ctrl,
+				(struct msm_stats_buf_info *)cmd->value,
+				vfe2x_ctrl->stats_ops.client);
+	}
+	break;
+	case VFE_CMD_STATS_FLUSH_BUFQ: {
+		struct msm_stats_flush_bufq *flush_req = NULL;
+		flush_req = (struct msm_stats_flush_bufq *)cmd->value;
+		if (sizeof(struct msm_stats_flush_bufq) != cmd->length) {
+			/* error. the length not match */
+			pr_err("%s: stats flush queue input size = %d,\n"
+				"struct size = %d, mismatch\n",
+				__func__, cmd->length,
+				sizeof(struct msm_stats_flush_bufq));
+				rc = -EINVAL;
+				goto end;
+		}
+		rc = vfe2x_ctrl->stats_ops.bufq_flush(
+				&vfe2x_ctrl->stats_ctrl,
+				(enum msm_stats_enum_type)flush_req->stats_type,
+				vfe2x_ctrl->stats_ops.client);
+	}
+	break;
+	default:
+		rc = -1;
+		pr_err("%s: cmd_type %d not supported",
+			 __func__, cmd->cmd_type);
+	break;
+	}
+end:
+	return rc;
+}
+
 static void vfe2x_send_isp_msg(
 	struct vfe2x_ctrl_type *vctrl,
 	uint32_t isp_msg_id)
@@ -384,11 +599,26 @@
 static void vfe_send_stats_msg(uint32_t buf_addr, uint32_t msg_id)
 {
 	struct isp_msg_stats msg_stats;
+	void *vaddr = NULL;
+	int rc;
 
 	msg_stats.frameCounter = vfe2x_ctrl->vfeFrameId;
 	msg_stats.buffer       = buf_addr;
 	msg_stats.id           = msg_id;
 
+	if (MSG_ID_STATS_AWB_AEC == msg_id)
+		rc = vfe2x_ctrl->stats_ops.dispatch(
+			vfe2x_ctrl->stats_ops.stats_ctrl,
+			MSM_STATS_TYPE_AE_AW, buf_addr,
+			&msg_stats.buf_idx, &vaddr, &msg_stats.fd,
+			vfe2x_ctrl->stats_ops.client);
+	else if (MSG_ID_STATS_AF == msg_id)
+		rc = vfe2x_ctrl->stats_ops.dispatch(
+			vfe2x_ctrl->stats_ops.stats_ctrl,
+			MSM_STATS_TYPE_AF, buf_addr,
+			&msg_stats.buf_idx, &vaddr, &msg_stats.fd,
+			vfe2x_ctrl->stats_ops.client);
+
 	v4l2_subdev_notify(&vfe2x_ctrl->subdev,
 				NOTIFY_VFE_MSG_STATS,
 				&msg_stats);
@@ -979,21 +1209,54 @@
 
 	CDBG("msm_vfe_subdev_ioctl is called\n");
 	if (cmd->cmd_type != CMD_FRAME_BUF_RELEASE &&
-	    cmd->cmd_type != CMD_STATS_BUF_RELEASE &&
-	    cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE &&
 		cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
 		cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
 		cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR &&
-		cmd->cmd_type != CMD_VFE_BUFFER_RELEASE) {
+		cmd->cmd_type != CMD_VFE_BUFFER_RELEASE &&
+		cmd->cmd_type != VFE_CMD_STATS_REQBUF &&
+		cmd->cmd_type != VFE_CMD_STATS_FLUSH_BUFQ &&
+		cmd->cmd_type != VFE_CMD_STATS_ENQUEUEBUF) {
 		if (copy_from_user(&vfecmd,
-				(void __user *)(cmd->value),
-				sizeof(vfecmd))) {
+			   (void __user *)(cmd->value),
+			   sizeof(vfecmd))) {
 			pr_err("copy_from_user in msm_vfe_subdev_ioctl fail\n");
 			return -EFAULT;
 		}
 	}
-
 	switch (cmd->cmd_type) {
+	case VFE_CMD_STATS_REQBUF:
+	case VFE_CMD_STATS_FLUSH_BUFQ:
+		/* for easy porting put in one envelope */
+		rc = vfe2x_stats_bufq_sub_ioctl(cmd, vfe_params->data);
+		return rc;
+	case VFE_CMD_STATS_ENQUEUEBUF:
+		if (sizeof(struct msm_stats_buf_info) != cmd->length) {
+			/* error. the length not match */
+			pr_err("%s: stats enqueuebuf input size = %d,\n"
+				"struct size = %d, mitch match\n",\
+				__func__, cmd->length,
+				sizeof(struct msm_stats_buf_info));
+			rc = -EINVAL;
+			return rc;
+		}
+		sack.header = 0;
+		sack.bufaddr = NULL;
+		rc = vfe2x_stats_enqueuebuf(cmd->value, &sack);
+		if (rc < 0) {
+			pr_err("%s: error", __func__);
+			rc = -EINVAL;
+			return rc;
+		}
+		if (sack.header != 0 && sack.bufaddr != NULL) {
+			queue  = QDSP_CMDQUEUE;
+			vfecmd.length = sizeof(struct vfe_stats_ack) - 4;
+			cmd_data = &sack;
+		} else {
+			return 0;
+		}
+	break;
 	case CMD_VFE_BUFFER_RELEASE: {
 		if (!(vfe2x_ctrl->vfe_started) || op_mode == 1)
 			return 0;
@@ -1041,7 +1304,6 @@
 	}
 		return 0;
 
-	case CMD_STATS_AEC_AWB_ENABLE:
 	case CMD_STATS_AXI_CFG: {
 		axid = data;
 		if (!axid) {
@@ -1096,15 +1358,49 @@
 		}
 	}
 		break;
-	case CMD_STATS_AF_ENABLE:
-	case CMD_STATS_AF_AXI_CFG: {
-		CDBG("CMD_STATS_AF_ENABLE CMD_STATS_AF_AXI_CFG\n");
-		axid = data;
-		if (!axid) {
-			rc = -EFAULT;
+	case CMD_STATS_AEC_AWB_ENABLE: {
+		pr_err("CMD_STATS_AEC_AWB_ENABLE\n");
+		scfg =
+			kmalloc(sizeof(struct vfe_stats_we_cfg),
+				GFP_ATOMIC);
+		if (!scfg) {
+			rc = -ENOMEM;
 			goto config_failure;
 		}
 
+		if (copy_from_user((char *)scfg + 4,
+					(void __user *)(vfecmd.value),
+					vfecmd.length)) {
+
+			rc = -EFAULT;
+			goto config_done;
+		}
+
+		header = cmds_map[vfecmd.id].vfe_id;
+		queue = cmds_map[vfecmd.id].queue;
+		if (header == -1 && queue == -1) {
+			rc = -EFAULT;
+			goto config_failure;
+		}
+		*(uint32_t *)scfg = header;
+		rc = vfe2x_stats_buf_init(MSM_STATS_TYPE_AE_AW);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of AWB",
+				 __func__);
+			goto config_failure;
+		}
+		scfg->wb_expstatoutputbuffer[0] =
+			(void *)vfe2x_ctrl->stats_we_buf_ptr[0];
+		scfg->wb_expstatoutputbuffer[1] =
+			(void *)vfe2x_ctrl->stats_we_buf_ptr[1];
+		scfg->wb_expstatoutputbuffer[2] =
+			(void *)vfe2x_ctrl->stats_we_buf_ptr[2];
+		cmd_data = scfg;
+	}
+	break;
+	case CMD_STATS_AF_ENABLE:
+	case CMD_STATS_AF_AXI_CFG: {
+		CDBG("CMD_STATS_AF_ENABLE CMD_STATS_AF_AXI_CFG\n");
 		sfcfg =
 			kmalloc(sizeof(struct vfe_stats_af_cfg),
 				GFP_ATOMIC);
@@ -1122,9 +1418,6 @@
 			goto config_done;
 		}
 
-		CDBG("AF_ENABLE: bufnum = %d, enabling = %d\n",
-			axid->bufnum1, sfcfg->af_enable);
-
 		header = cmds_map[vfecmd.id].vfe_id;
 		queue = cmds_map[vfecmd.id].queue;
 		if (header == -1 && queue == -1) {
@@ -1132,27 +1425,16 @@
 			goto config_failure;
 		}
 		*(uint32_t *)sfcfg = header;
-		CDBG("Number of buffers = %d\n", axid->bufnum1);
-		if (axid->bufnum1 > 0) {
-			regptr = &axid->region[0];
-
-			for (i = 0; i < axid->bufnum1; i++) {
-
-				CDBG("STATS_ENABLE, phy = 0x%lx\n",
-					regptr->paddr);
-
-				sfcfg->af_outbuf[i] =
-					(void *)regptr->paddr;
-
-				regptr++;
-			}
-
-			cmd_data = sfcfg;
-
-		} else {
-			rc = -EINVAL;
-			goto config_done;
+		rc = vfe2x_stats_buf_init(MSM_STATS_TYPE_AF);
+		sfcfg->af_outbuf[0] = (void *)vfe2x_ctrl->stats_af_buf_ptr[0];
+		sfcfg->af_outbuf[1] = (void *)vfe2x_ctrl->stats_af_buf_ptr[1];
+		sfcfg->af_outbuf[2] = (void *)vfe2x_ctrl->stats_af_buf_ptr[2];
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of AWB",
+				__func__);
+			goto config_failure;
 		}
+		cmd_data = sfcfg;
 	}
 		break;
 	case CMD_SNAP_BUF_RELEASE:
@@ -1688,6 +1970,8 @@
 	stopevent.state = 0;
 	vfe2x_ctrl->vfe_started = 0;
 
+	memset(&vfe2x_ctrl->stats_ctrl, 0, sizeof(struct msm_stats_bufq_ctrl));
+	memset(&vfe2x_ctrl->stats_ops, 0, sizeof(struct msm_stats_ops));
 
 	CDBG("msm_cam_clk_enable: enable vfe_clk\n");
 	rc = msm_cam_clk_enable(&vfe2x_ctrl->pdev->dev, vfe2x_clk_info,
diff --git a/drivers/media/video/msm/msm_vfe7x27a_v4l2.h b/drivers/media/video/msm/msm_vfe7x27a_v4l2.h
index 2f2d3c6..b7d6806 100644
--- a/drivers/media/video/msm/msm_vfe7x27a_v4l2.h
+++ b/drivers/media/video/msm/msm_vfe7x27a_v4l2.h
@@ -16,6 +16,7 @@
 #include <mach/camera.h>
 #include <linux/list.h>
 #include "msm.h"
+#include "msm_vfe_stats_buf.h"
 
 struct cmd_id_map {
 	uint32_t isp_id;
@@ -111,6 +112,11 @@
 	spinlock_t  sd_notify_lock;
 	uint32_t    reconfig_vfe;
 	uint32_t    zsl_mode;
+	spinlock_t  stats_bufq_lock;
+	struct msm_stats_bufq_ctrl stats_ctrl;
+	struct msm_stats_ops stats_ops;
+	unsigned long stats_we_buf_ptr[3];
+	unsigned long stats_af_buf_ptr[3];
 } __packed;
 
 struct vfe_frame_extra {
diff --git a/drivers/media/video/msm/msm_vfe_stats_buf.c b/drivers/media/video/msm/msm_vfe_stats_buf.c
new file mode 100644
index 0000000..9e8f285
--- /dev/null
+++ b/drivers/media/video/msm/msm_vfe_stats_buf.c
@@ -0,0 +1,509 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ioctl.h>
+#include <linux/spinlock.h>
+#include <linux/videodev2.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+
+#include <linux/android_pmem.h>
+#include <media/msm_camera.h>
+#include <media/msm_isp.h>
+#include "msm.h"
+#include "msm_vfe_stats_buf.h"
+
+#ifdef CONFIG_MSM_CAMERA_DEBUG
+	#define D(fmt, args...) pr_debug("msm_stats: " fmt, ##args)
+#else
+	#define D(fmt, args...) do {} while (0)
+#endif
+
+static int msm_stats_init(struct msm_stats_bufq_ctrl *stats_ctrl)
+{
+	int rc = 0;
+	/* cannot get spinlock here */
+	if (stats_ctrl->init_done > 0) {
+		pr_err("%s: already initialized stats ctrl. no op", __func__);
+		return 0;
+	}
+	memset(stats_ctrl,  0,  sizeof(struct msm_stats_bufq_ctrl));
+	spin_lock_init(&stats_ctrl->lock);
+	stats_ctrl->init_done = 1;
+	return rc;
+}
+
+static int msm_stats_reqbuf(struct msm_stats_bufq_ctrl *stats_ctrl,
+	struct msm_stats_reqbuf *reqbuf,
+	struct ion_client *client)
+{
+	int rc = 0;
+	struct msm_stats_bufq *bufq;
+	struct msm_stats_meta_buf *bufs;
+	int idx = reqbuf->stats_type;
+	int i;
+
+	D("%s: type : %d, buf num : %d\n", __func__,
+		reqbuf->stats_type, reqbuf->num_buf);
+	if (reqbuf->num_buf > 0) {
+		if (stats_ctrl->bufq[idx]) {
+			/* already in use. Error */
+			pr_err("%s: stats type %d aleady requested",
+				 __func__, reqbuf->stats_type);
+			rc = -EEXIST;
+			goto end;
+		} else {
+			/* good case */
+			bufq = (struct msm_stats_bufq *)
+				kzalloc(
+					sizeof(struct msm_stats_bufq),
+					GFP_KERNEL);
+			if (!bufq) {
+				/* no memory */
+				rc = -ENOMEM;
+				pr_err("%s: no mem for stats type %d",
+					__func__, reqbuf->stats_type);
+				goto end;
+			}
+			bufs = (struct msm_stats_meta_buf *)
+				kzalloc((reqbuf->num_buf *
+					sizeof(struct msm_stats_meta_buf)),
+					GFP_KERNEL);
+			if (!bufs) {
+				/* no memory */
+				rc = -ENOMEM;
+				pr_err("%s: no mem for stats buf, stats type = %d",
+					__func__, reqbuf->stats_type);
+				kfree(bufq);
+				goto end;
+			}
+			/* init bufq list head */
+			INIT_LIST_HEAD(&bufq->head);
+			/* set the meta buf state to initialized */
+			bufq->num_bufs = reqbuf->num_buf;
+			for (i = 0; i < reqbuf->num_buf; i++)
+				bufs[i].state =
+					MSM_STATS_BUFFER_STATE_INITIALIZED;
+			bufq->bufs = bufs;
+			bufq->num_bufs = reqbuf->num_buf;
+			bufq->type = reqbuf->stats_type;
+			stats_ctrl->bufq[idx] = bufq;
+			/* done reqbuf (larger than zero case) */
+			goto end;
+		}
+	} else if (reqbuf->num_buf == 0) {
+		if (stats_ctrl->bufq[idx] == NULL) {
+			/* double free case? */
+			pr_err("%s: stats type %d aleady freed",
+				 __func__, reqbuf->stats_type);
+			rc = -ENXIO;
+			goto end;
+		} else {
+			/* good case. need to de-reqbuf */
+			kfree(stats_ctrl->bufq[idx]->bufs);
+			kfree(stats_ctrl->bufq[idx]);
+			stats_ctrl->bufq[idx] = NULL;
+			goto end;
+		}
+	} else {
+		/* error case */
+		pr_err("%s: stats type = %d, req_num_buf = %d, error",
+			   __func__, reqbuf->stats_type, reqbuf->num_buf);
+		rc = -EPERM;
+		goto end;
+	}
+end:
+	return rc;
+}
+static int msm_stats_deinit(struct msm_stats_bufq_ctrl *stats_ctrl)
+{
+	int rc = 0;
+	int i;
+
+	if (stats_ctrl->init_done == 0) {
+		pr_err("%s: not inited yet. no op", __func__);
+		return 0;
+	}
+	/* safe guard in case deallocate memory not done yet. */
+	for (i = 0; i < MSM_STATS_TYPE_MAX; i++) {
+		if (stats_ctrl->bufq[i]) {
+			if (stats_ctrl->bufq[i]->bufs) {
+				rc = -1;
+				pr_err("%s: stats type = %d, buf not freed yet",
+					 __func__, i);
+				BUG_ON(stats_ctrl->bufq[i]->bufs);
+			} else {
+				rc = -1;
+				pr_err("%s: stats type = %d, bufq not freed yet",
+					__func__, i);
+				BUG_ON(stats_ctrl->bufq[i]);
+			}
+		}
+	}
+	memset(stats_ctrl,  0,  sizeof(struct msm_stats_bufq_ctrl));
+	return rc;
+}
+
+#ifdef CONFIG_ANDROID_PMEM
+static int msm_stats_check_pmem_info(struct msm_stats_buf_info *info, int len)
+{
+	if (info->offset < len &&
+		info->offset + info->len <= len &&
+		info->planar0_off < len && info->planar1_off < len)
+		return 0;
+
+	pr_err("%s: check failed: off %d len %d y %d cbcr %d (total len %d)\n",
+		   __func__,
+		   info->offset,
+		   info->len,
+		   info->planar0_off,
+		   info->planar1_off,
+		   len);
+	return -EINVAL;
+}
+#endif
+
+static int msm_stats_buf_prepare(struct msm_stats_bufq_ctrl *stats_ctrl,
+	struct msm_stats_buf_info *info, struct ion_client *client)
+{
+	unsigned long paddr;
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
+	unsigned long kvstart;
+	struct file *file;
+#endif
+	int rc = 0;
+	unsigned long len;
+	struct msm_stats_bufq *bufq = NULL;
+	struct msm_stats_meta_buf *stats_buf = NULL;
+
+	D("%s: type : %d, buf num : %d\n", __func__,
+		info->type, info->buf_idx);
+
+	bufq = stats_ctrl->bufq[info->type];
+	stats_buf = &bufq->bufs[info->buf_idx];
+	if (stats_buf->state == MSM_STATS_BUFFER_STATE_UNUSED) {
+		pr_err("%s: need reqbuf first, stats type = %d",
+			__func__, info->type);
+		rc = -1;
+		goto out1;
+	}
+	if (stats_buf->state != MSM_STATS_BUFFER_STATE_INITIALIZED) {
+		D("%s: stats already mapped, no op, stats type = %d",
+			__func__, info->type);
+		goto out1;
+	}
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+	stats_buf->handle = ion_import_dma_buf(client, info->fd);
+	if (IS_ERR_OR_NULL(stats_buf->handle)) {
+		rc = -EINVAL;
+		pr_err("%s: stats_buf has null/error ION handle %p",
+			   __func__, stats_buf->handle);
+		goto out1;
+	}
+	if (ion_map_iommu(client, stats_buf->handle,
+			CAMERA_DOMAIN, GEN_POOL, SZ_4K,
+			0, &paddr, &len, UNCACHED, 0) < 0) {
+		rc = -EINVAL;
+		pr_err("%s: cannot map address", __func__);
+		goto out2;
+	}
+#elif CONFIG_ANDROID_PMEM
+	rc = get_pmem_file(info->fd, &paddr, &kvstart, &len, &file);
+	if (rc < 0) {
+		pr_err("%s: get_pmem_file fd %d error %d\n",
+			   __func__, info->fd, rc);
+		goto out1;
+	}
+	stats_buf->file = file;
+#else
+	paddr = 0;
+	file = NULL;
+	kvstart = 0;
+#endif
+	if (!info->len)
+		info->len = len;
+	rc = msm_stats_check_pmem_info(info, len);
+	if (rc < 0) {
+		pr_err("%s: msm_stats_check_pmem_info err = %d", __func__, rc);
+		goto out3;
+	}
+	paddr += info->offset;
+	len = info->len;
+	stats_buf->paddr = paddr;
+	stats_buf->len = len;
+	memcpy(&stats_buf->info, info, sizeof(stats_buf->info));
+	D("%s Adding buf to list with type %d\n", __func__,
+	  stats_buf->info.type);
+	D("%s pmem_stats address is 0x%ld\n", __func__, paddr);
+	stats_buf->state = MSM_STATS_BUFFER_STATE_PREPARED;
+	return 0;
+out3:
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+	ion_unmap_iommu(client, stats_buf->handle, CAMERA_DOMAIN, GEN_POOL);
+#endif
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+out2:
+	ion_free(client, stats_buf->handle);
+#elif CONFIG_ANDROID_PMEM
+	put_pmem_file(stats_buf->file);
+#endif
+out1:
+	return rc;
+}
+static int msm_stats_buf_unprepare(struct msm_stats_bufq_ctrl *stats_ctrl,
+	enum msm_stats_enum_type stats_type, int buf_idx,
+	struct ion_client *client)
+{
+	int rc = 0;
+	struct msm_stats_bufq *bufq = NULL;
+	struct msm_stats_meta_buf *stats_buf = NULL;
+
+	D("%s: type : %d, idx : %d\n", __func__, stats_type, buf_idx);
+	bufq = stats_ctrl->bufq[stats_type];
+	stats_buf = &bufq->bufs[buf_idx];
+	if (stats_buf->state == MSM_STATS_BUFFER_STATE_UNUSED) {
+		pr_err("%s: need reqbuf first, stats type = %d",
+			__func__, stats_type);
+		rc = -1;
+		goto end;
+	}
+	if (stats_buf->state == MSM_STATS_BUFFER_STATE_INITIALIZED) {
+		D("%s: stats already mapped, no op, stats type = %d",
+			__func__, stats_type);
+		goto end;
+	}
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+	ion_unmap_iommu(client, stats_buf->handle,
+					CAMERA_DOMAIN, GEN_POOL);
+	ion_free(client, stats_buf->handle);
+#else
+	put_pmem_file(stats_buf->file);
+#endif
+	if (stats_buf->state == MSM_STATS_BUFFER_STATE_QUEUED) {
+		/* buf queued need delete from list */
+		D("%s: delete stats buf, type = %d, idx = %d",
+		  __func__,  stats_type,  buf_idx);
+		list_del_init(&stats_buf->list);
+	}
+end:
+	return rc;
+}
+
+static int msm_stats_bufq_flush(struct msm_stats_bufq_ctrl *stats_ctrl,
+	enum msm_stats_enum_type stats_type, struct ion_client *client)
+{
+	int rc = 0;
+	int i;
+	struct msm_stats_bufq *bufq = NULL;
+	struct msm_stats_meta_buf *stats_buf = NULL;
+
+	D("%s: type : %d\n", __func__, stats_type);
+	bufq = stats_ctrl->bufq[stats_type];
+
+	for (i = 0; i < bufq->num_bufs; i++) {
+		stats_buf = &bufq->bufs[i];
+		switch (stats_buf->state) {
+		case MSM_STATS_BUFFER_STATE_QUEUED:
+			/* buf queued in stats free queue */
+			stats_buf->state = MSM_STATS_BUFFER_STATE_PREPARED;
+			list_del_init(&stats_buf->list);
+			break;
+		case MSM_STATS_BUFFER_STATE_DEQUEUED:
+			/* if stats buf in VFE reset the state */
+			stats_buf->state = MSM_STATS_BUFFER_STATE_PREPARED;
+			break;
+		case MSM_STATS_BUFFER_STATE_DISPATCHED:
+			/* if stats buf in userspace reset the state */
+			stats_buf->state = MSM_STATS_BUFFER_STATE_PREPARED;
+			break;
+		default:
+			break;
+		}
+	}
+	return rc;
+}
+
+static int msm_stats_dqbuf(struct msm_stats_bufq_ctrl *stats_ctrl,
+	enum msm_stats_enum_type stats_type,
+	struct msm_stats_meta_buf **pp_stats_buf)
+{
+	int rc = 0;
+	struct msm_stats_bufq *bufq = NULL;
+	struct msm_stats_meta_buf *stats_buf = NULL;
+
+	D("%s: type : %d\n", __func__, stats_type);
+	*pp_stats_buf = NULL;
+	bufq = stats_ctrl->bufq[stats_type];
+
+	list_for_each_entry(stats_buf, &bufq->head, list) {
+		if (stats_buf->state == MSM_STATS_BUFFER_STATE_QUEUED) {
+			/* found one buf */
+			list_del_init(&stats_buf->list);
+			*pp_stats_buf = stats_buf;
+			break;
+		}
+	}
+	if (!(*pp_stats_buf)) {
+		pr_err("%s: no free stats buf, type = %d",
+			__func__, stats_type);
+		rc = -1;
+		return rc;
+	}
+	stats_buf->state = MSM_STATS_BUFFER_STATE_DEQUEUED;
+	return rc;
+}
+
+
+static int msm_stats_querybuf(struct msm_stats_bufq_ctrl *stats_ctrl,
+	struct msm_stats_buf_info *info,
+	struct msm_stats_meta_buf **pp_stats_buf)
+{
+	int rc = 0;
+	struct msm_stats_bufq *bufq = NULL;
+
+	*pp_stats_buf = NULL;
+	D("%s: stats type : %d, buf_idx : %d", __func__, info->type,
+		   info->buf_idx);
+	bufq = stats_ctrl->bufq[info->type];
+	*pp_stats_buf = &bufq->bufs[info->buf_idx];
+
+	return rc;
+}
+
+static int msm_stats_qbuf(struct msm_stats_bufq_ctrl *stats_ctrl,
+	enum msm_stats_enum_type stats_type,
+	int buf_idx)
+{
+	int rc = 0;
+	struct msm_stats_bufq *bufq = NULL;
+	struct msm_stats_meta_buf *stats_buf = NULL;
+	D("%s: stats type : %d, buf_idx : %d", __func__, stats_type,
+		   buf_idx);
+
+	bufq = stats_ctrl->bufq[stats_type];
+	if (!bufq) {
+		pr_err("%s: null bufq, stats type = %d", __func__, stats_type);
+		rc = -1;
+		goto end;
+	}
+	if (buf_idx >= bufq->num_bufs) {
+		pr_err("%s: stats type = %d, its idx %d larger than buf count %d",
+			   __func__, stats_type, buf_idx, bufq->num_bufs);
+		rc = -1;
+		goto end;
+	}
+	stats_buf = &bufq->bufs[buf_idx];
+	switch (stats_buf->state) {
+	case MSM_STATS_BUFFER_STATE_PREPARED:
+	case MSM_STATS_BUFFER_STATE_DEQUEUED:
+	case MSM_STATS_BUFFER_STATE_DISPATCHED:
+		stats_buf->state = MSM_STATS_BUFFER_STATE_QUEUED;
+		list_add_tail(&stats_buf->list, &bufq->head);
+		break;
+	default:
+		pr_err("%s: incorrect state = %d, stats type = %d, cannot qbuf",
+			   __func__, stats_buf->state, stats_type);
+		rc = -1;
+		break;
+	}
+end:
+	return rc;
+}
+
+static int msm_stats_buf_dispatch(struct msm_stats_bufq_ctrl *stats_ctrl,
+	enum msm_stats_enum_type stats_type,
+	unsigned long phy_addr, int *buf_idx,
+	void **vaddr, int *fd,
+	struct ion_client *client)
+{
+	int rc = 0;
+	int i;
+	struct msm_stats_bufq *bufq = NULL;
+	struct msm_stats_meta_buf *stats_buf = NULL;
+	D("%s: stats type : %d\n", __func__, stats_type);
+
+	*buf_idx = -1;
+	*vaddr = NULL;
+	*fd = 0;
+	bufq = stats_ctrl->bufq[stats_type];
+	for (i = 0; i < bufq->num_bufs; i++) {
+		if (bufq->bufs[i].paddr == phy_addr) {
+			stats_buf = &bufq->bufs[i];
+			*buf_idx = i;
+			*vaddr = stats_buf->info.vaddr;
+			*fd = stats_buf->info.fd;
+			break;
+		}
+	}
+	if (!stats_buf) {
+		pr_err("%s: no match, phy_addr = 0x%ld, stats_type = %d",
+			   __func__, phy_addr, stats_type);
+		return -EFAULT;
+	}
+	switch (stats_buf->state) {
+	case MSM_STATS_BUFFER_STATE_DEQUEUED:
+		stats_buf->state = MSM_STATS_BUFFER_STATE_DISPATCHED;
+		break;
+	default:
+		pr_err("%s: type = %d, idx = %d, cur_state = %d,\n"
+			   "cannot set state to DISPATCHED\n",
+			   __func__, stats_type, *buf_idx, stats_buf->state);
+		rc = -EFAULT;
+		break;
+	}
+	return rc;
+}
+static int msm_stats_enqueue_buf(struct msm_stats_bufq_ctrl *stats_ctrl,
+	struct msm_stats_buf_info *info, struct ion_client *client)
+{
+	int rc = 0;
+	rc = msm_stats_buf_prepare(stats_ctrl, info, client);
+	if (rc < 0) {
+		pr_err("%s: buf_prepare failed, rc = %d", __func__, rc);
+		return -EINVAL;
+	}
+	rc = msm_stats_qbuf(stats_ctrl,   info->type, info->buf_idx);
+	if (rc < 0) {
+		pr_err("%s: msm_stats_qbuf failed, rc = %d", __func__, rc);
+		return -EINVAL;
+	}
+	return rc;
+}
+
+int msm_stats_buf_ops_init(struct msm_stats_bufq_ctrl *stats_ctrl,
+	struct ion_client *client, struct msm_stats_ops *ops)
+{
+	ops->stats_ctrl = stats_ctrl;
+	ops->client = client;
+	ops->enqueue_buf = msm_stats_enqueue_buf;
+	ops->qbuf = msm_stats_qbuf;
+	ops->dqbuf = msm_stats_dqbuf;
+	ops->bufq_flush = msm_stats_bufq_flush;
+	ops->buf_unprepare = msm_stats_buf_unprepare;
+	ops->buf_prepare = msm_stats_buf_prepare;
+	ops->reqbuf = msm_stats_reqbuf;
+	ops->querybuf = msm_stats_querybuf;
+	ops->dispatch = msm_stats_buf_dispatch;
+	ops->stats_ctrl_init = msm_stats_init;
+	ops->stats_ctrl_deinit = msm_stats_deinit;
+	return 0;
+}
+
diff --git a/drivers/media/video/msm/msm_vfe_stats_buf.h b/drivers/media/video/msm/msm_vfe_stats_buf.h
new file mode 100644
index 0000000..18fd425
--- /dev/null
+++ b/drivers/media/video/msm/msm_vfe_stats_buf.h
@@ -0,0 +1,93 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MSM_STATS_BUF_H_
+#define _MSM_STATS_BUF_H_
+
+enum msm_stats_buffer_state {
+	MSM_STATS_BUFFER_STATE_UNUSED,	  /* not used */
+	MSM_STATS_BUFFER_STATE_INITIALIZED,	   /* REQBUF done */
+	MSM_STATS_BUFFER_STATE_PREPARED,	/* BUF mapped */
+	MSM_STATS_BUFFER_STATE_QUEUED,	  /* buf queued */
+	MSM_STATS_BUFFER_STATE_DEQUEUED,	/* in use in VFE */
+	MSM_STATS_BUFFER_STATE_DISPATCHED,	  /* sent to userspace */
+};
+
+struct msm_stats_meta_buf {
+	struct list_head list;
+	enum msm_stats_buffer_state state;
+	int type;
+	int fd;
+	uint32_t offset;
+	unsigned long paddr;
+	unsigned long len;
+	struct file *file;
+	struct msm_stats_buf_info info;
+	struct ion_handle *handle;
+};
+
+struct msm_stats_bufq {
+	struct list_head head;
+	int num_bufs;
+	int type;
+	struct msm_stats_meta_buf *bufs;
+};
+
+
+struct msm_stats_bufq_ctrl {
+	/* not use spin lock for now. Assume vfe holds spin lock */
+	spinlock_t lock;
+	int init_done;
+	struct msm_stats_bufq *bufq[MSM_STATS_TYPE_MAX];
+};
+
+struct msm_stats_ops {
+	struct msm_stats_bufq_ctrl *stats_ctrl;
+	struct ion_client *client;
+	int (*enqueue_buf) (struct msm_stats_bufq_ctrl *stats_ctrl,
+						struct msm_stats_buf_info *info,
+						struct ion_client *client);
+	int (*qbuf) (struct msm_stats_bufq_ctrl *stats_ctrl,
+				 enum msm_stats_enum_type stats_type,
+				 int buf_idx);
+	int (*dqbuf) (struct msm_stats_bufq_ctrl *stats_ctrl,
+				  enum msm_stats_enum_type stats_type,
+				  struct msm_stats_meta_buf **pp_stats_buf);
+	int (*bufq_flush) (struct msm_stats_bufq_ctrl *stats_ctrl,
+					   enum msm_stats_enum_type stats_type,
+					   struct ion_client *client);
+	int (*buf_unprepare) (struct msm_stats_bufq_ctrl *stats_ctrl,
+		enum msm_stats_enum_type stats_type,
+		int buf_idx,
+		struct ion_client *client);
+	int (*buf_prepare) (struct msm_stats_bufq_ctrl *stats_ctrl,
+						struct msm_stats_buf_info *info,
+						struct ion_client *client);
+	int (*reqbuf) (struct msm_stats_bufq_ctrl *stats_ctrl,
+				   struct msm_stats_reqbuf *reqbuf,
+				   struct ion_client *client);
+	int (*dispatch) (struct msm_stats_bufq_ctrl *stats_ctrl,
+		enum msm_stats_enum_type stats_type,
+		unsigned long phy_addr, int *buf_idx, void **vaddr, int *fd,
+		struct ion_client *client);
+	int (*querybuf) (struct msm_stats_bufq_ctrl *stats_ctrl,
+		struct msm_stats_buf_info *info,
+		struct msm_stats_meta_buf **pp_stats_buf);
+	int (*stats_ctrl_init) (struct msm_stats_bufq_ctrl *stats_ctrl);
+	int (*stats_ctrl_deinit) (struct msm_stats_bufq_ctrl *stats_ctrl);
+};
+
+int msm_stats_buf_ops_init(struct msm_stats_bufq_ctrl *stats_ctrl,
+	struct ion_client *client, struct msm_stats_ops *ops);
+
+#endif /* _MSM_STATS_BUF_H_ */
diff --git a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
index b604d0a..ded9f11 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
+++ b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
@@ -98,6 +98,18 @@
 	cmd_done.status = VIDC_ERR_NONE;
 	cmd_done.size = sizeof(struct msm_vidc_cb_event);
 	num_properties_changed = pkt->event_data2;
+	switch (pkt->event_data1) {
+	case HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUFFER_RESOURCES:
+		event_notify.hal_event_type =
+			HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES;
+		break;
+	case HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUFFER_RESOURCES:
+		event_notify.hal_event_type =
+			HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES;
+		break;
+	default:
+		break;
+	}
 	if (num_properties_changed) {
 		data_ptr = (u8 *) &pkt->rg_ext_event_data[0];
 		do {
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index ebb4afe..15254fb 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -76,3 +76,14 @@
 
 	  This driver is only of interest to those developing or
 	  testing a host driver. Most people should say N here.
+
+config MMC_BLOCK_TEST
+	tristate "MMC block test"
+	depends on MMC_BLOCK && IOSCHED_TEST
+	default m
+	help
+	  MMC block test can be used with test iosched to test the MMC block
+	  device.
+	  Currently used to test eMMC 4.5 features (packed commands, sanitize,
+	  BKOPs).
+
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
index c73b406..d55107f 100644
--- a/drivers/mmc/card/Makefile
+++ b/drivers/mmc/card/Makefile
@@ -8,3 +8,4 @@
 
 obj-$(CONFIG_SDIO_UART)		+= sdio_uart.o
 
+obj-$(CONFIG_MMC_BLOCK_TEST)		+= mmc_block_test.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 1331aa4..a496df0 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -125,17 +125,6 @@
 
 static DEFINE_MUTEX(open_lock);
 
-enum mmc_blk_status {
-	MMC_BLK_SUCCESS = 0,
-	MMC_BLK_PARTIAL,
-	MMC_BLK_CMD_ERR,
-	MMC_BLK_RETRY,
-	MMC_BLK_ABORT,
-	MMC_BLK_DATA_ERR,
-	MMC_BLK_ECC_ERR,
-	MMC_BLK_NOMEDIUM,
-};
-
 enum {
         MMC_PACKED_N_IDX = -1,
         MMC_PACKED_N_ZERO,
@@ -1431,6 +1420,64 @@
 }
 EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
 
+void print_mmc_packing_stats(struct mmc_card *card)
+{
+	int i;
+	int max_num_of_packed_reqs = 0;
+
+	if ((!card) || (!card->wr_pack_stats.packing_events))
+		return;
+
+	max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
+
+	spin_lock(&card->wr_pack_stats.lock);
+
+	pr_info("%s: write packing statistics:\n",
+		mmc_hostname(card->host));
+
+	for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
+		if (card->wr_pack_stats.packing_events[i] != 0)
+			pr_info("%s: Packed %d reqs - %d times\n",
+				mmc_hostname(card->host), i,
+				card->wr_pack_stats.packing_events[i]);
+	}
+
+	pr_info("%s: stopped packing due to the following reasons:\n",
+		mmc_hostname(card->host));
+
+	if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
+		pr_info("%s: %d times: exceedmax num of segments\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
+	if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
+		pr_info("%s: %d times: exceeding the max num of sectors\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
+	if (card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
+		pr_info("%s: %d times: wrong data direction\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
+	if (card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
+		pr_info("%s: %d times: flush or discard\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
+	if (card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
+		pr_info("%s: %d times: empty queue\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
+	if (card->wr_pack_stats.pack_stop_reason[REL_WRITE])
+		pr_info("%s: %d times: rel write\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[REL_WRITE]);
+	if (card->wr_pack_stats.pack_stop_reason[THRESHOLD])
+		pr_info("%s: %d times: Threshold\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[THRESHOLD]);
+
+	spin_unlock(&card->wr_pack_stats.lock);
+}
+EXPORT_SYMBOL(print_mmc_packing_stats);
+
 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
 {
 	struct request_queue *q = mq->queue;
@@ -1628,7 +1675,18 @@
 	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
 
 	mqrq->mmc_active.mrq = &brq->mrq;
-	mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+	/*
+	 * This is intended for packed commands tests usage - in case these
+	 * functions are not in use the respective pointers are NULL
+	 */
+	if (mq->err_check_fn)
+		mqrq->mmc_active.err_check = mq->err_check_fn;
+	else
+		mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+	if (mq->packed_test_fn)
+		mq->packed_test_fn(mq->queue, mqrq);
 
 	mmc_queue_bounce_pre(mqrq);
 }
diff --git a/drivers/mmc/card/mmc_block_test.c b/drivers/mmc/card/mmc_block_test.c
new file mode 100644
index 0000000..0ace608
--- /dev/null
+++ b/drivers/mmc/card/mmc_block_test.c
@@ -0,0 +1,1533 @@
+
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* MMC block test */
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/debugfs.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/delay.h>
+#include <linux/test-iosched.h>
+#include "queue.h"
+
+#define MODULE_NAME "mmc_block_test"
+#define TEST_MAX_SECTOR_RANGE		(600*1024*1024) /* 600 MB */
+#define TEST_MAX_BIOS_PER_REQ		120
+#define CMD23_PACKED_BIT	(1 << 30)
+#define LARGE_PRIME_1	1103515367
+#define LARGE_PRIME_2	35757
+#define PACKED_HDR_VER_MASK 0x000000FF
+#define PACKED_HDR_RW_MASK 0x0000FF00
+#define PACKED_HDR_NUM_REQS_MASK 0x00FF0000
+#define PACKED_HDR_BITS_16_TO_29_SET 0x3FFF0000
+
+#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
+#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
+#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
+
+enum is_random {
+	NON_RANDOM_TEST,
+	RANDOM_TEST,
+};
+
+enum mmc_block_test_testcases {
+	/* Start of send write packing test group */
+	SEND_WRITE_PACKING_MIN_TESTCASE,
+	TEST_STOP_DUE_TO_READ = SEND_WRITE_PACKING_MIN_TESTCASE,
+	TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS,
+	TEST_STOP_DUE_TO_FLUSH,
+	TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS,
+	TEST_STOP_DUE_TO_EMPTY_QUEUE,
+	TEST_STOP_DUE_TO_MAX_REQ_NUM,
+	TEST_STOP_DUE_TO_THRESHOLD,
+	SEND_WRITE_PACKING_MAX_TESTCASE = TEST_STOP_DUE_TO_THRESHOLD,
+
+	/* Start of err check test group */
+	ERR_CHECK_MIN_TESTCASE,
+	TEST_RET_ABORT = ERR_CHECK_MIN_TESTCASE,
+	TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS,
+	TEST_RET_PARTIAL_FOLLOWED_BY_ABORT,
+	TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS,
+	TEST_RET_PARTIAL_MAX_FAIL_IDX,
+	TEST_RET_RETRY,
+	TEST_RET_CMD_ERR,
+	TEST_RET_DATA_ERR,
+	ERR_CHECK_MAX_TESTCASE = TEST_RET_DATA_ERR,
+
+	/* Start of send invalid test group */
+	INVALID_CMD_MIN_TESTCASE,
+	TEST_HDR_INVALID_VERSION = INVALID_CMD_MIN_TESTCASE,
+	TEST_HDR_WRONG_WRITE_CODE,
+	TEST_HDR_INVALID_RW_CODE,
+	TEST_HDR_DIFFERENT_ADDRESSES,
+	TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL,
+	TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL,
+	TEST_HDR_CMD23_PACKED_BIT_SET,
+	TEST_CMD23_MAX_PACKED_WRITES,
+	TEST_CMD23_ZERO_PACKED_WRITES,
+	TEST_CMD23_PACKED_BIT_UNSET,
+	TEST_CMD23_REL_WR_BIT_SET,
+	TEST_CMD23_BITS_16TO29_SET,
+	TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
+	INVALID_CMD_MAX_TESTCASE = TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
+};
+
+enum mmc_block_test_group {
+	TEST_NO_GROUP,
+	TEST_GENERAL_GROUP,
+	TEST_SEND_WRITE_PACKING_GROUP,
+	TEST_ERR_CHECK_GROUP,
+	TEST_SEND_INVALID_GROUP,
+};
+
+struct mmc_block_test_debug {
+	struct dentry *send_write_packing_test;
+	struct dentry *err_check_test;
+	struct dentry *send_invalid_packed_test;
+	struct dentry *random_test_seed;
+};
+
+struct mmc_block_test_data {
+	/* The number of write requests that the test will issue */
+	int num_requests;
+	/* The expected write packing statistics for the current test */
+	struct mmc_wr_pack_stats exp_packed_stats;
+	/*
+	 * A user-defined seed for random choices of number of bios written in
+	 * a request, and of number of requests issued in a test
+	 * This field is randomly updated after each use
+	 */
+	unsigned int random_test_seed;
+	/* A retry counter used in err_check tests */
+	int err_check_counter;
+	/* Can be one of the values of enum test_group */
+	enum mmc_block_test_group test_group;
+	/*
+	 * Indicates if the current testcase is running with random values of
+	 * num_requests and num_bios (in each request)
+	 */
+	int is_random;
+	/* Data structure for debugfs dentrys */
+	struct mmc_block_test_debug debug;
+	/*
+	 * Data structure containing individual test information, including
+	 * self-defined specific data
+	 */
+	struct test_info test_info;
+	/* mmc block device test */
+	struct blk_dev_test_type bdt;
+};
+
+static struct mmc_block_test_data *mbtd;
+
+/*
+ * A callback assigned to the packed_test_fn field.
+ * Called from block layer in mmc_blk_packed_hdr_wrq_prep.
+ * Here we alter the packed header or CMD23 in order to send an invalid
+ * packed command to the card.
+ */
+static void test_invalid_packed_cmd(struct request_queue *q,
+				    struct mmc_queue_req *mqrq)
+{
+	struct mmc_queue *mq = q->queuedata;
+	u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
+	struct request *req = mqrq->req;
+	struct request *second_rq;
+	struct test_request *test_rq;
+	struct mmc_blk_request *brq = &mqrq->brq;
+	int num_requests;
+	int max_packed_reqs;
+
+	if (!mq) {
+		test_pr_err("%s: NULL mq", __func__);
+		return;
+	}
+
+	test_rq = (struct test_request *)req->elv.priv[0];
+	if (!test_rq) {
+		test_pr_err("%s: NULL test_rq", __func__);
+		return;
+	}
+	max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+	switch (mbtd->test_info.testcase) {
+	case TEST_HDR_INVALID_VERSION:
+		test_pr_info("%s: set invalid header version", __func__);
+		/* Put 0 in header version field (1 byte, offset 0 in header) */
+		packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_VER_MASK;
+		break;
+	case TEST_HDR_WRONG_WRITE_CODE:
+		test_pr_info("%s: wrong write code", __func__);
+		/* Set R/W field with R value (1 byte, offset 1 in header) */
+		packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
+		packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000100;
+		break;
+	case TEST_HDR_INVALID_RW_CODE:
+		test_pr_info("%s: invalid r/w code", __func__);
+		/* Set R/W field with invalid value */
+		packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
+		packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000400;
+		break;
+	case TEST_HDR_DIFFERENT_ADDRESSES:
+		test_pr_info("%s: different addresses", __func__);
+		second_rq = list_entry(req->queuelist.next, struct request,
+				queuelist);
+		test_pr_info("%s: test_rq->sector=%ld, second_rq->sector=%ld",
+			      __func__, (long)req->__sector,
+			     (long)second_rq->__sector);
+		/*
+		 * Put start sector of second write request in the first write
+		 * request's cmd25 argument in the packed header
+		 */
+		packed_cmd_hdr[3] = second_rq->__sector;
+		break;
+	case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
+		test_pr_info("%s: request num smaller than actual" , __func__);
+		num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
+									>> 16;
+		/* num of entries is decremented by 1 */
+		num_requests = (num_requests - 1) << 16;
+		/*
+		 * Set number of requests field in packed write header to be
+		 * smaller than the actual number (1 byte, offset 2 in header)
+		 */
+		packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
+				     ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
+		break;
+	case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
+		test_pr_info("%s: request num larger than actual" , __func__);
+		num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
+									>> 16;
+		/* num of entries is incremented by 1 */
+		num_requests = (num_requests + 1) << 16;
+		/*
+		 * Set number of requests field in packed write header to be
+		 * larger than the actual number (1 byte, offset 2 in header).
+		 */
+		packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
+				     ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
+		break;
+	case TEST_HDR_CMD23_PACKED_BIT_SET:
+		test_pr_info("%s: header CMD23 packed bit set" , __func__);
+		/*
+		 * Set packed bit (bit 30) in cmd23 argument of first and second
+		 * write requests in packed write header.
+		 * These are located at bytes 2 and 4 in packed write header
+		 */
+		packed_cmd_hdr[2] = packed_cmd_hdr[2] | CMD23_PACKED_BIT;
+		packed_cmd_hdr[4] = packed_cmd_hdr[4] | CMD23_PACKED_BIT;
+		break;
+	case TEST_CMD23_MAX_PACKED_WRITES:
+		test_pr_info("%s: CMD23 request num > max_packed_reqs",
+			      __func__);
+		/*
+		 * Set the individual packed cmd23 request num to
+		 * max_packed_reqs + 1
+		 */
+		brq->sbc.arg = MMC_CMD23_ARG_PACKED | (max_packed_reqs + 1);
+		break;
+	case TEST_CMD23_ZERO_PACKED_WRITES:
+		test_pr_info("%s: CMD23 request num = 0", __func__);
+		/* Set the individual packed cmd23 request num to zero */
+		brq->sbc.arg = MMC_CMD23_ARG_PACKED;
+		break;
+	case TEST_CMD23_PACKED_BIT_UNSET:
+		test_pr_info("%s: CMD23 packed bit unset", __func__);
+		/*
+		 * Set the individual packed cmd23 packed bit to 0,
+		 *  although there is a packed write request
+		 */
+		brq->sbc.arg &= ~CMD23_PACKED_BIT;
+		break;
+	case TEST_CMD23_REL_WR_BIT_SET:
+		test_pr_info("%s: CMD23 REL WR bit set", __func__);
+		/* Set the individual packed cmd23 reliable write bit */
+		brq->sbc.arg = MMC_CMD23_ARG_PACKED | MMC_CMD23_ARG_REL_WR;
+		break;
+	case TEST_CMD23_BITS_16TO29_SET:
+		test_pr_info("%s: CMD23 bits [16-29] set", __func__);
+		brq->sbc.arg = MMC_CMD23_ARG_PACKED |
+			PACKED_HDR_BITS_16_TO_29_SET;
+		break;
+	case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
+		test_pr_info("%s: CMD23 hdr not in block count", __func__);
+		brq->sbc.arg = MMC_CMD23_ARG_PACKED |
+		((rq_data_dir(req) == READ) ? 0 : mqrq->packed_blocks);
+		break;
+	default:
+		test_pr_err("%s: unexpected testcase %d",
+			__func__, mbtd->test_info.testcase);
+		break;
+	}
+}
+
+/*
+ * A callback assigned to the err_check_fn field of the mmc_request by the
+ * MMC/card/block layer.
+ * Called upon request completion by the MMC/core layer.
+ * Here we emulate an error return value from the card.
+ */
+static int test_err_check(struct mmc_card *card, struct mmc_async_req *areq)
+{
+	struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
+			mmc_active);
+	struct request_queue *req_q = test_iosched_get_req_queue();
+	struct mmc_queue *mq;
+	int max_packed_reqs;
+	int ret = 0;
+
+	if (req_q)
+		mq = req_q->queuedata;
+	else {
+		test_pr_err("%s: NULL request_queue", __func__);
+		return 0;
+	}
+
+	if (!mq) {
+		test_pr_err("%s: %s: NULL mq", __func__,
+			mmc_hostname(card->host));
+		return 0;
+	}
+
+	max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+	if (!mq_rq) {
+		test_pr_err("%s: %s: NULL mq_rq", __func__,
+			mmc_hostname(card->host));
+		return 0;
+	}
+
+	switch (mbtd->test_info.testcase) {
+	case TEST_RET_ABORT:
+		test_pr_info("%s: return abort", __func__);
+		ret = MMC_BLK_ABORT;
+		break;
+	case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
+		test_pr_info("%s: return partial followed by success",
+			      __func__);
+		/*
+		 * Since in this testcase num_requests is always >= 2,
+		 * we can be sure that packed_fail_idx is always >= 1
+		 */
+		mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
+		test_pr_info("%s: packed_fail_idx = %d"
+			, __func__, mq_rq->packed_fail_idx);
+		mq->err_check_fn = NULL;
+		ret = MMC_BLK_PARTIAL;
+		break;
+	case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
+		if (!mbtd->err_check_counter) {
+			test_pr_info("%s: return partial followed by abort",
+				      __func__);
+			mbtd->err_check_counter++;
+			/*
+			 * Since in this testcase num_requests is always >= 3,
+			 * we have that packed_fail_idx is always >= 1
+			 */
+			mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
+			test_pr_info("%s: packed_fail_idx = %d"
+				, __func__, mq_rq->packed_fail_idx);
+			ret = MMC_BLK_PARTIAL;
+			break;
+		}
+		mbtd->err_check_counter = 0;
+		mq->err_check_fn = NULL;
+		ret = MMC_BLK_ABORT;
+		break;
+	case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
+		test_pr_info("%s: return partial multiple until success",
+			     __func__);
+		if (++mbtd->err_check_counter >= (mbtd->num_requests)) {
+			mq->err_check_fn = NULL;
+			mbtd->err_check_counter = 0;
+			ret = MMC_BLK_PARTIAL;
+			break;
+		}
+		mq_rq->packed_fail_idx = 1;
+		ret = MMC_BLK_PARTIAL;
+		break;
+	case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+		test_pr_info("%s: return partial max fail_idx", __func__);
+		mq_rq->packed_fail_idx = max_packed_reqs - 1;
+		mq->err_check_fn = NULL;
+		ret = MMC_BLK_PARTIAL;
+		break;
+	case TEST_RET_RETRY:
+		test_pr_info("%s: return retry", __func__);
+		ret = MMC_BLK_RETRY;
+		break;
+	case TEST_RET_CMD_ERR:
+		test_pr_info("%s: return cmd err", __func__);
+		ret = MMC_BLK_CMD_ERR;
+		break;
+	case TEST_RET_DATA_ERR:
+		test_pr_info("%s: return data err", __func__);
+		ret = MMC_BLK_DATA_ERR;
+		break;
+	default:
+		test_pr_err("%s: unexpected testcase %d",
+			__func__, mbtd->test_info.testcase);
+	}
+
+	return ret;
+}
+
+/*
+ * This is a specific implementation for the get_test_case_str_fn function
+ * pointer in the test_info data structure. Given a valid test_data instance,
+ * the function returns a string resembling the test name, based on the testcase
+ */
+static char *get_test_case_str(struct test_data *td)
+{
+	if (!td) {
+		test_pr_err("%s: NULL td", __func__);
+		return NULL;
+	}
+
+	switch (td->test_info.testcase) {
+	case TEST_STOP_DUE_TO_FLUSH:
+		return "Test stop due to flush";
+	case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
+		return "Test stop due to flush after max-1 reqs";
+	case TEST_STOP_DUE_TO_READ:
+		return "Test stop due to read";
+	case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
+		return "Test stop due to read after max-1 reqs";
+	case TEST_STOP_DUE_TO_EMPTY_QUEUE:
+		return "Test stop due to empty queue";
+	case TEST_STOP_DUE_TO_MAX_REQ_NUM:
+		return "Test stop due to max req num";
+	case TEST_STOP_DUE_TO_THRESHOLD:
+		return "Test stop due to exceeding threshold";
+	case TEST_RET_ABORT:
+		return "Test err_check return abort";
+	case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
+		return "Test err_check return partial followed by success";
+	case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
+		return "Test err_check return partial followed by abort";
+	case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
+		return "Test err_check return partial multiple until success";
+	case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+		return "Test err_check return partial max fail index";
+	case TEST_RET_RETRY:
+		return "Test err_check return retry";
+	case TEST_RET_CMD_ERR:
+		return "Test err_check return cmd error";
+	case TEST_RET_DATA_ERR:
+		return "Test err_check return data error";
+	case TEST_HDR_INVALID_VERSION:
+		return "Test invalid - wrong header version";
+	case TEST_HDR_WRONG_WRITE_CODE:
+		return "Test invalid - wrong write code";
+	case TEST_HDR_INVALID_RW_CODE:
+		return "Test invalid - wrong R/W code";
+	case TEST_HDR_DIFFERENT_ADDRESSES:
+		return "Test invalid - header different addresses";
+	case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
+		return "Test invalid - header req num smaller than actual";
+	case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
+		return "Test invalid - header req num larger than actual";
+	case TEST_HDR_CMD23_PACKED_BIT_SET:
+		return "Test invalid - header cmd23 packed bit set";
+	case TEST_CMD23_MAX_PACKED_WRITES:
+		return "Test invalid - cmd23 max packed writes";
+	case TEST_CMD23_ZERO_PACKED_WRITES:
+		return "Test invalid - cmd23 zero packed writes";
+	case TEST_CMD23_PACKED_BIT_UNSET:
+		return "Test invalid - cmd23 packed bit unset";
+	case TEST_CMD23_REL_WR_BIT_SET:
+		return "Test invalid - cmd23 rel wr bit set";
+	case TEST_CMD23_BITS_16TO29_SET:
+		return "Test invalid - cmd23 bits [16-29] set";
+	case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
+		return "Test invalid - cmd23 header block not in count";
+	default:
+		 return "Unknown testcase";
+	}
+
+	return NULL;
+}
+
+/*
+ * Compare individual testcase's statistics to the expected statistics:
+ * Compare stop reason and number of packing events
+ */
+static int check_wr_packing_statistics(struct test_data *td)
+{
+	struct mmc_wr_pack_stats *mmc_packed_stats;
+	struct mmc_queue *mq = td->req_q->queuedata;
+	int max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+	int i;
+	struct mmc_card *card = mq->card;
+	struct mmc_wr_pack_stats expected_stats;
+	int *stop_reason;
+	int ret = 0;
+
+	if (!mq) {
+		test_pr_err("%s: NULL mq", __func__);
+		return -EINVAL;
+	}
+
+	expected_stats = mbtd->exp_packed_stats;
+
+	mmc_packed_stats = mmc_blk_get_packed_statistics(card);
+	if (!mmc_packed_stats) {
+		test_pr_err("%s: NULL mmc_packed_stats", __func__);
+		return -EINVAL;
+	}
+
+	if (!mmc_packed_stats->packing_events) {
+		test_pr_err("%s: NULL packing_events", __func__);
+		return -EINVAL;
+	}
+
+	spin_lock(&mmc_packed_stats->lock);
+
+	if (!mmc_packed_stats->enabled) {
+		test_pr_err("%s write packing statistics are not enabled",
+			     __func__);
+		ret = -EINVAL;
+		goto exit_err;
+	}
+
+	stop_reason = mmc_packed_stats->pack_stop_reason;
+
+	for (i = 1 ; i <= max_packed_reqs ; ++i) {
+		if (mmc_packed_stats->packing_events[i] !=
+		    expected_stats.packing_events[i]) {
+			test_pr_err(
+			"%s: Wrong pack stats in index %d, got %d, expected %d",
+			__func__, i, mmc_packed_stats->packing_events[i],
+			       expected_stats.packing_events[i]);
+			if (td->fs_wr_reqs_during_test)
+				goto cancel_round;
+			ret = -EINVAL;
+			goto exit_err;
+		}
+	}
+
+	if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SEGMENTS] !=
+	    expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]) {
+		test_pr_err(
+		"%s: Wrong pack stop reason EXCEEDS_SEGMENTS %d, expected %d",
+			__func__, stop_reason[EXCEEDS_SEGMENTS],
+		       expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
+		if (td->fs_wr_reqs_during_test)
+			goto cancel_round;
+		ret = -EINVAL;
+		goto exit_err;
+	}
+
+	if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SECTORS] !=
+	    expected_stats.pack_stop_reason[EXCEEDS_SECTORS]) {
+		test_pr_err(
+		"%s: Wrong pack stop reason EXCEEDS_SECTORS %d, expected %d",
+			__func__, stop_reason[EXCEEDS_SECTORS],
+		       expected_stats.pack_stop_reason[EXCEEDS_SECTORS]);
+		if (td->fs_wr_reqs_during_test)
+			goto cancel_round;
+		ret = -EINVAL;
+		goto exit_err;
+	}
+
+	if (mmc_packed_stats->pack_stop_reason[WRONG_DATA_DIR] !=
+	    expected_stats.pack_stop_reason[WRONG_DATA_DIR]) {
+		test_pr_err(
+		"%s: Wrong pack stop reason WRONG_DATA_DIR %d, expected %d",
+		       __func__, stop_reason[WRONG_DATA_DIR],
+		       expected_stats.pack_stop_reason[WRONG_DATA_DIR]);
+		if (td->fs_wr_reqs_during_test)
+			goto cancel_round;
+		ret = -EINVAL;
+		goto exit_err;
+	}
+
+	if (mmc_packed_stats->pack_stop_reason[FLUSH_OR_DISCARD] !=
+	    expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]) {
+		test_pr_err(
+		"%s: Wrong pack stop reason FLUSH_OR_DISCARD %d, expected %d",
+		       __func__, stop_reason[FLUSH_OR_DISCARD],
+		       expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
+		if (td->fs_wr_reqs_during_test)
+			goto cancel_round;
+		ret = -EINVAL;
+		goto exit_err;
+	}
+
+	if (mmc_packed_stats->pack_stop_reason[EMPTY_QUEUE] !=
+	    expected_stats.pack_stop_reason[EMPTY_QUEUE]) {
+		test_pr_err(
+		"%s: Wrong pack stop reason EMPTY_QUEUE %d, expected %d",
+		       __func__, stop_reason[EMPTY_QUEUE],
+		       expected_stats.pack_stop_reason[EMPTY_QUEUE]);
+		if (td->fs_wr_reqs_during_test)
+			goto cancel_round;
+		ret = -EINVAL;
+		goto exit_err;
+	}
+
+	if (mmc_packed_stats->pack_stop_reason[REL_WRITE] !=
+	    expected_stats.pack_stop_reason[REL_WRITE]) {
+		test_pr_err(
+			"%s: Wrong pack stop reason REL_WRITE %d, expected %d",
+		       __func__, stop_reason[REL_WRITE],
+		       expected_stats.pack_stop_reason[REL_WRITE]);
+		if (td->fs_wr_reqs_during_test)
+			goto cancel_round;
+		ret = -EINVAL;
+		goto exit_err;
+	}
+
+exit_err:
+	spin_unlock(&mmc_packed_stats->lock);
+	if (ret && mmc_packed_stats->enabled)
+		print_mmc_packing_stats(card);
+	return ret;
+cancel_round:
+	spin_unlock(&mmc_packed_stats->lock);
+	test_iosched_set_ignore_round(true);
+	return 0;
+}
+
+/*
+ * Pseudo-randomly choose a seed based on the last seed, and update it in
+ * seed_number. then return seed_number (mod max_val), or min_val.
+ */
+static unsigned int pseudo_random_seed(unsigned int *seed_number,
+				       unsigned int min_val,
+				       unsigned int max_val)
+{
+	int ret = 0;
+
+	if (!seed_number)
+		return 0;
+
+	*seed_number = ((unsigned int)(((unsigned long)*seed_number *
+				(unsigned long)LARGE_PRIME_1) + LARGE_PRIME_2));
+	ret = (unsigned int)((*seed_number) % max_val);
+
+	return (ret > min_val ? ret : min_val);
+}
+
+/*
+ * Given a pseudo-random seed, find a pseudo-random num_of_bios.
+ * Make sure that num_of_bios is not larger than TEST_MAX_SECTOR_RANGE
+ */
+static void pseudo_rnd_num_of_bios(unsigned int *num_bios_seed,
+				   unsigned int *num_of_bios)
+{
+	do {
+		*num_of_bios = pseudo_random_seed(num_bios_seed, 1,
+						  TEST_MAX_BIOS_PER_REQ);
+		if (!(*num_of_bios))
+			*num_of_bios = 1;
+	} while ((*num_of_bios) * BIO_U32_SIZE * 4 > TEST_MAX_SECTOR_RANGE);
+}
+
+/* Add a single read request to the given td's request queue */
+static int prepare_request_add_read(struct test_data *td)
+{
+	int ret;
+	int start_sec;
+
+	if (td)
+		start_sec = td->start_sector;
+	else {
+		test_pr_err("%s: NULL td", __func__);
+		return 0;
+	}
+
+	test_pr_info("%s: Adding a read request, first req_id=%d", __func__,
+		     td->wr_rd_next_req_id);
+
+	ret = test_iosched_add_wr_rd_test_req(0, READ, start_sec, 2,
+					      TEST_PATTERN_5A, NULL);
+	if (ret) {
+		test_pr_err("%s: failed to add a read request", __func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+/* Add a single flush request to the given td's request queue */
+static int prepare_request_add_flush(struct test_data *td)
+{
+	int ret;
+
+	if (!td) {
+		test_pr_err("%s: NULL td", __func__);
+		return 0;
+	}
+
+	test_pr_info("%s: Adding a flush request, first req_id=%d", __func__,
+		     td->unique_next_req_id);
+	ret = test_iosched_add_unique_test_req(0, REQ_UNIQUE_FLUSH,
+				  0, 0, NULL);
+	if (ret) {
+		test_pr_err("%s: failed to add a flush request", __func__);
+		return ret;
+	}
+
+	return ret;
+}
+
+/*
+ * Add num_requets amount of write requests to the given td's request queue.
+ * If random test mode is chosen we pseudo-randomly choose the number of bios
+ * for each write request, otherwise add between 1 to 5 bio per request.
+ */
+static int prepare_request_add_write_reqs(struct test_data *td,
+					  int num_requests, int is_err_expected,
+					  int is_random)
+{
+	int i;
+	unsigned int start_sec;
+	int num_bios;
+	int ret = 0;
+	unsigned int *bio_seed = &mbtd->random_test_seed;
+
+	if (td)
+		start_sec = td->start_sector;
+	else {
+		test_pr_err("%s: NULL td", __func__);
+		return ret;
+	}
+
+	test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
+		     num_requests, td->wr_rd_next_req_id);
+
+	for (i = 1 ; i <= num_requests ; i++) {
+		start_sec = td->start_sector + 4096 * td->num_of_write_bios;
+		if (is_random)
+			pseudo_rnd_num_of_bios(bio_seed, &num_bios);
+		else
+			/*
+			 * For the non-random case, give num_bios a value
+			 * between 1 and 5, to keep a small number of BIOs
+			 */
+			num_bios = (i%5)+1;
+
+		ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
+				start_sec, num_bios, TEST_PATTERN_5A, NULL);
+
+		if (ret) {
+			test_pr_err("%s: failed to add a write request",
+				    __func__);
+			return ret;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Prepare the write, read and flush requests for a generic packed commands
+ * testcase
+ */
+static int prepare_packed_requests(struct test_data *td, int is_err_expected,
+				   int num_requests, int is_random)
+{
+	int ret = 0;
+	struct mmc_queue *mq;
+	int max_packed_reqs;
+	struct request_queue *req_q;
+
+	if (!td) {
+		pr_err("%s: NULL td", __func__);
+		return -EINVAL;
+	}
+
+	req_q = td->req_q;
+
+	if (!req_q) {
+		pr_err("%s: NULL request queue", __func__);
+		return -EINVAL;
+	}
+
+	mq = req_q->queuedata;
+	if (!mq) {
+		test_pr_err("%s: NULL mq", __func__);
+		return -EINVAL;
+	}
+
+	max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+	if (mbtd->random_test_seed <= 0) {
+		mbtd->random_test_seed =
+			(unsigned int)(get_jiffies_64() & 0xFFFF);
+		test_pr_info("%s: got seed from jiffies %d",
+			     __func__, mbtd->random_test_seed);
+	}
+
+	mmc_blk_init_packed_statistics(mq->card);
+
+	ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
+					     is_random);
+	if (ret)
+		return ret;
+
+	/* Avoid memory corruption in upcoming stats set */
+	if (td->test_info.testcase == TEST_STOP_DUE_TO_THRESHOLD)
+		num_requests--;
+
+	memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
+		sizeof(mbtd->exp_packed_stats.pack_stop_reason));
+	memset(mbtd->exp_packed_stats.packing_events, 0,
+		(max_packed_reqs + 1) * sizeof(u32));
+	if (num_requests <= max_packed_reqs)
+		mbtd->exp_packed_stats.packing_events[num_requests] = 1;
+
+	switch (td->test_info.testcase) {
+	case TEST_STOP_DUE_TO_FLUSH:
+	case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
+		ret = prepare_request_add_flush(td);
+		if (ret)
+			return ret;
+
+		mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
+		break;
+	case TEST_STOP_DUE_TO_READ:
+	case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
+		ret = prepare_request_add_read(td);
+		if (ret)
+			return ret;
+
+		mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
+		break;
+	case TEST_STOP_DUE_TO_THRESHOLD:
+		mbtd->exp_packed_stats.packing_events[num_requests] = 1;
+		mbtd->exp_packed_stats.packing_events[1] = 1;
+		mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
+		mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+		break;
+	case TEST_STOP_DUE_TO_MAX_REQ_NUM:
+	case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+		mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
+		break;
+	default:
+		mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+	}
+	mbtd->num_requests = num_requests;
+
+	return 0;
+}
+
+/*
+ * Prepare requests for the TEST_RET_PARTIAL_FOLLOWED_BY_ABORT testcase.
+ * In this testcase we have mixed error expectations from different
+ * write requests, hence the special prepare function.
+ */
+static int prepare_partial_followed_by_abort(struct test_data *td,
+					      int num_requests)
+{
+	int i, start_address;
+	int is_err_expected = 0;
+	int ret = 0;
+	struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
+	int max_packed_reqs;
+
+	if (!mq) {
+		test_pr_err("%s: NULL mq", __func__);
+		return -EINVAL;
+	}
+
+	max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+	mmc_blk_init_packed_statistics(mq->card);
+
+	for (i = 1 ; i <= num_requests ; i++) {
+		if (i > (num_requests / 2))
+			is_err_expected = 1;
+
+		start_address = td->start_sector + 4096*td->num_of_write_bios;
+		ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
+				start_address, (i%5)+1, TEST_PATTERN_5A, NULL);
+		if (ret) {
+			test_pr_err("%s: failed to add a write request",
+				    __func__);
+			return ret;
+		}
+	}
+
+	memset((void *)&mbtd->exp_packed_stats.pack_stop_reason, 0,
+		sizeof(mbtd->exp_packed_stats.pack_stop_reason));
+	memset(mbtd->exp_packed_stats.packing_events, 0,
+		(max_packed_reqs + 1) * sizeof(u32));
+	mbtd->exp_packed_stats.packing_events[num_requests] = 1;
+	mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+
+	mbtd->num_requests = num_requests;
+
+	return ret;
+}
+
+/*
+ * Get number of write requests for current testcase. If random test mode was
+ * chosen, pseudo-randomly choose the number of requests, otherwise set to
+ * two less than the packing threshold.
+ */
+static int get_num_requests(struct test_data *td)
+{
+	int *seed = &mbtd->random_test_seed;
+	struct request_queue *req_q;
+	struct mmc_queue *mq;
+	int max_num_requests;
+	int num_requests;
+	int min_num_requests = 2;
+	int is_random = mbtd->is_random;
+
+	req_q = test_iosched_get_req_queue();
+	if (req_q)
+		mq = req_q->queuedata;
+	else {
+		test_pr_err("%s: NULL request queue", __func__);
+		return 0;
+	}
+
+	if (!mq) {
+		test_pr_err("%s: NULL mq", __func__);
+		return -EINVAL;
+	}
+
+	max_num_requests = mq->card->ext_csd.max_packed_writes;
+	num_requests = max_num_requests - 2;
+
+	if (is_random) {
+		if (td->test_info.testcase ==
+		    TEST_RET_PARTIAL_FOLLOWED_BY_ABORT)
+			min_num_requests = 3;
+
+		num_requests = pseudo_random_seed(seed, min_num_requests,
+						  max_num_requests - 1);
+	}
+
+	return num_requests;
+}
+
+/*
+ * An implementation for the prepare_test_fn pointer in the test_info
+ * data structure. According to the testcase we add the right number of requests
+ * and decide if an error is expected or not.
+ */
+static int prepare_test(struct test_data *td)
+{
+	struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
+	int max_num_requests;
+	int num_requests = 0;
+	int ret = 0;
+	int is_random = mbtd->is_random;
+
+	if (!mq) {
+		test_pr_err("%s: NULL mq", __func__);
+		return -EINVAL;
+	}
+
+	max_num_requests = mq->card->ext_csd.max_packed_writes;
+
+	if (is_random && mbtd->random_test_seed == 0) {
+		mbtd->random_test_seed =
+			(unsigned int)(get_jiffies_64() & 0xFFFF);
+		test_pr_info("%s: got seed from jiffies %d",
+			__func__, mbtd->random_test_seed);
+	}
+
+	num_requests = get_num_requests(td);
+
+	if (mbtd->test_group == TEST_SEND_INVALID_GROUP)
+		mq->packed_test_fn =
+				test_invalid_packed_cmd;
+
+	if (mbtd->test_group == TEST_ERR_CHECK_GROUP)
+		mq->err_check_fn = test_err_check;
+
+	switch (td->test_info.testcase) {
+	case TEST_STOP_DUE_TO_FLUSH:
+	case TEST_STOP_DUE_TO_READ:
+	case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
+	case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
+	case TEST_STOP_DUE_TO_EMPTY_QUEUE:
+	case TEST_CMD23_PACKED_BIT_UNSET:
+		ret = prepare_packed_requests(td, 0, num_requests, is_random);
+		break;
+	case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
+	case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
+		ret = prepare_packed_requests(td, 0, max_num_requests - 1,
+					      is_random);
+		break;
+	case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
+		ret = prepare_partial_followed_by_abort(td, num_requests);
+		break;
+	case TEST_STOP_DUE_TO_MAX_REQ_NUM:
+	case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+		ret = prepare_packed_requests(td, 0, max_num_requests,
+					      is_random);
+		break;
+	case TEST_STOP_DUE_TO_THRESHOLD:
+		ret = prepare_packed_requests(td, 0, max_num_requests + 1,
+					      is_random);
+		break;
+	case TEST_RET_ABORT:
+	case TEST_RET_RETRY:
+	case TEST_RET_CMD_ERR:
+	case TEST_RET_DATA_ERR:
+	case TEST_HDR_INVALID_VERSION:
+	case TEST_HDR_WRONG_WRITE_CODE:
+	case TEST_HDR_INVALID_RW_CODE:
+	case TEST_HDR_DIFFERENT_ADDRESSES:
+	case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
+	case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
+	case TEST_CMD23_MAX_PACKED_WRITES:
+	case TEST_CMD23_ZERO_PACKED_WRITES:
+	case TEST_CMD23_REL_WR_BIT_SET:
+	case TEST_CMD23_BITS_16TO29_SET:
+	case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
+	case TEST_HDR_CMD23_PACKED_BIT_SET:
+		ret = prepare_packed_requests(td, 1, num_requests, is_random);
+		break;
+	default:
+		test_pr_info("%s: Invalid test case...", __func__);
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+/*
+ * An implementation for the post_test_fn in the test_info data structure.
+ * In our case we just reset the function pointers in the mmc_queue in order for
+ * the FS to be able to dispatch it's requests correctly after the test is
+ * finished.
+ */
+static int post_test(struct test_data *td)
+{
+	struct mmc_queue *mq;
+
+	if (!td)
+		return -EINVAL;
+
+	mq = td->req_q->queuedata;
+
+	if (!mq) {
+		test_pr_err("%s: NULL mq", __func__);
+		return -EINVAL;
+	}
+
+	mq->packed_test_fn = NULL;
+	mq->err_check_fn = NULL;
+
+	return 0;
+}
+
+/*
+ * This function checks, based on the current test's test_group, that the
+ * packed commands capability and control are set right. In addition, we check
+ * if the card supports the packed command feature.
+ */
+static int validate_packed_commands_settings(void)
+{
+	struct request_queue *req_q;
+	struct mmc_queue *mq;
+	int max_num_requests;
+	struct mmc_host *host;
+
+	req_q = test_iosched_get_req_queue();
+	if (!req_q) {
+		test_pr_err("%s: test_iosched_get_req_queue failed", __func__);
+		test_iosched_set_test_result(TEST_FAILED);
+		return -EINVAL;
+	}
+
+	mq = req_q->queuedata;
+	if (!mq) {
+		test_pr_err("%s: NULL mq", __func__);
+		return -EINVAL;
+	}
+
+	max_num_requests = mq->card->ext_csd.max_packed_writes;
+	host = mq->card->host;
+
+	if (!(host->caps2 && MMC_CAP2_PACKED_WR)) {
+		test_pr_err("%s: Packed Write capability disabled, exit test",
+			    __func__);
+		test_iosched_set_test_result(TEST_NOT_SUPPORTED);
+		return -EINVAL;
+	}
+
+	if (max_num_requests == 0) {
+		test_pr_err(
+		"%s: no write packing support, ext_csd.max_packed_writes=%d",
+		__func__, mq->card->ext_csd.max_packed_writes);
+		test_iosched_set_test_result(TEST_NOT_SUPPORTED);
+		return -EINVAL;
+	}
+
+	test_pr_info("%s: max number of packed requests supported is %d ",
+		     __func__, max_num_requests);
+
+	switch (mbtd->test_group) {
+	case TEST_SEND_WRITE_PACKING_GROUP:
+	case TEST_ERR_CHECK_GROUP:
+	case TEST_SEND_INVALID_GROUP:
+		/* disable the packing control */
+		host->caps2 &= ~MMC_CAP2_PACKED_WR_CONTROL;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static bool message_repeat;
+static int test_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	message_repeat = 1;
+	return 0;
+}
+
+/* send_packing TEST */
+static ssize_t send_write_packing_test_write(struct file *file,
+				const char __user *buf,
+				size_t count,
+				loff_t *ppos)
+{
+	int ret = 0;
+	int i = 0;
+	int number = -1;
+	int j = 0;
+
+	test_pr_info("%s: -- send_write_packing TEST --", __func__);
+
+	sscanf(buf, "%d", &number);
+
+	if (number <= 0)
+		number = 1;
+
+
+	mbtd->test_group = TEST_SEND_WRITE_PACKING_GROUP;
+
+	if (validate_packed_commands_settings())
+		return count;
+
+	if (mbtd->random_test_seed > 0)
+		test_pr_info("%s: Test seed: %d", __func__,
+			      mbtd->random_test_seed);
+
+	memset(&mbtd->test_info, 0, sizeof(struct test_info));
+
+	mbtd->test_info.data = mbtd;
+	mbtd->test_info.prepare_test_fn = prepare_test;
+	mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
+	mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+	mbtd->test_info.post_test_fn = post_test;
+
+	for (i = 0 ; i < number ; ++i) {
+		test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+		test_pr_info("%s: ====================", __func__);
+
+		for (j = SEND_WRITE_PACKING_MIN_TESTCASE ;
+		      j <= SEND_WRITE_PACKING_MAX_TESTCASE ; j++) {
+
+			mbtd->test_info.testcase = j;
+			mbtd->is_random = RANDOM_TEST;
+			ret = test_iosched_start_test(&mbtd->test_info);
+			if (ret)
+				break;
+			/* Allow FS requests to be dispatched */
+			msleep(1000);
+			mbtd->test_info.testcase = j;
+			mbtd->is_random = NON_RANDOM_TEST;
+			ret = test_iosched_start_test(&mbtd->test_info);
+			if (ret)
+				break;
+			/* Allow FS requests to be dispatched */
+			msleep(1000);
+		}
+	}
+
+	test_pr_info("%s: Completed all the test cases.", __func__);
+
+	return count;
+}
+
+static ssize_t send_write_packing_test_read(struct file *file,
+			       char __user *buffer,
+			       size_t count,
+			       loff_t *offset)
+{
+	memset((void *)buffer, 0, count);
+
+	snprintf(buffer, count,
+		 "\nsend_write_packing_test\n"
+		 "=========\n"
+		 "Description:\n"
+		 "This test checks the following scenarios\n"
+		 "- Pack due to FLUSH message\n"
+		 "- Pack due to FLUSH after threshold writes\n"
+		 "- Pack due to READ message\n"
+		 "- Pack due to READ after threshold writes\n"
+		 "- Pack due to empty queue\n"
+		 "- Pack due to threshold writes\n"
+		 "- Pack due to one over threshold writes\n");
+
+	if (message_repeat == 1) {
+		message_repeat = 0;
+		return strnlen(buffer, count);
+	} else {
+		return 0;
+	}
+}
+
+const struct file_operations send_write_packing_test_ops = {
+	.open = test_open,
+	.write = send_write_packing_test_write,
+	.read = send_write_packing_test_read,
+};
+
+/* err_check TEST */
+static ssize_t err_check_test_write(struct file *file,
+				const char __user *buf,
+				size_t count,
+				loff_t *ppos)
+{
+	int ret = 0;
+	int i = 0;
+	int number = -1;
+	int j = 0;
+
+	test_pr_info("%s: -- err_check TEST --", __func__);
+
+	sscanf(buf, "%d", &number);
+
+	if (number <= 0)
+		number = 1;
+
+	mbtd->test_group = TEST_ERR_CHECK_GROUP;
+
+	if (validate_packed_commands_settings())
+		return count;
+
+	if (mbtd->random_test_seed > 0)
+		test_pr_info("%s: Test seed: %d", __func__,
+			      mbtd->random_test_seed);
+
+	memset(&mbtd->test_info, 0, sizeof(struct test_info));
+
+	mbtd->test_info.data = mbtd;
+	mbtd->test_info.prepare_test_fn = prepare_test;
+	mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
+	mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+	mbtd->test_info.post_test_fn = post_test;
+
+	for (i = 0 ; i < number ; ++i) {
+		test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+		test_pr_info("%s: ====================", __func__);
+
+		for (j = ERR_CHECK_MIN_TESTCASE;
+					j <= ERR_CHECK_MAX_TESTCASE ; j++) {
+			mbtd->test_info.testcase = j;
+			mbtd->is_random = RANDOM_TEST;
+			ret = test_iosched_start_test(&mbtd->test_info);
+			if (ret)
+				break;
+			/* Allow FS requests to be dispatched */
+			msleep(1000);
+			mbtd->test_info.testcase = j;
+			mbtd->is_random = NON_RANDOM_TEST;
+			ret = test_iosched_start_test(&mbtd->test_info);
+			if (ret)
+				break;
+			/* Allow FS requests to be dispatched */
+			msleep(1000);
+		}
+	}
+
+	test_pr_info("%s: Completed all the test cases.", __func__);
+
+	return count;
+}
+
+static ssize_t err_check_test_read(struct file *file,
+			       char __user *buffer,
+			       size_t count,
+			       loff_t *offset)
+{
+	memset((void *)buffer, 0, count);
+
+	snprintf(buffer, count,
+		 "\nerr_check_TEST\n"
+		 "=========\n"
+		 "Description:\n"
+		 "This test checks the following scenarios\n"
+		 "- Return ABORT\n"
+		 "- Return PARTIAL followed by success\n"
+		 "- Return PARTIAL followed by abort\n"
+		 "- Return PARTIAL multiple times until success\n"
+		 "- Return PARTIAL with fail index = threshold\n"
+		 "- Return RETRY\n"
+		 "- Return CMD_ERR\n"
+		 "- Return DATA_ERR\n");
+
+	if (message_repeat == 1) {
+		message_repeat = 0;
+		return strnlen(buffer, count);
+	} else {
+		return 0;
+	}
+}
+
+const struct file_operations err_check_test_ops = {
+	.open = test_open,
+	.write = err_check_test_write,
+	.read = err_check_test_read,
+};
+
+/* send_invalid_packed TEST */
+static ssize_t send_invalid_packed_test_write(struct file *file,
+				const char __user *buf,
+				size_t count,
+				loff_t *ppos)
+{
+	int ret = 0;
+	int i = 0;
+	int number = -1;
+	int j = 0;
+	int num_of_failures = 0;
+
+	test_pr_info("%s: -- send_invalid_packed TEST --", __func__);
+
+	sscanf(buf, "%d", &number);
+
+	if (number <= 0)
+		number = 1;
+
+	mbtd->test_group = TEST_SEND_INVALID_GROUP;
+
+	if (validate_packed_commands_settings())
+		return count;
+
+	if (mbtd->random_test_seed > 0)
+		test_pr_info("%s: Test seed: %d", __func__,
+			      mbtd->random_test_seed);
+
+	memset(&mbtd->test_info, 0, sizeof(struct test_info));
+
+	mbtd->test_info.data = mbtd;
+	mbtd->test_info.prepare_test_fn = prepare_test;
+	mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
+	mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+	mbtd->test_info.post_test_fn = post_test;
+
+	for (i = 0 ; i < number ; ++i) {
+		test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+		test_pr_info("%s: ====================", __func__);
+
+		for (j = INVALID_CMD_MIN_TESTCASE;
+				j <= INVALID_CMD_MAX_TESTCASE ; j++) {
+
+			mbtd->test_info.testcase = j;
+			mbtd->is_random = RANDOM_TEST;
+			ret = test_iosched_start_test(&mbtd->test_info);
+			if (ret)
+				num_of_failures++;
+			/* Allow FS requests to be dispatched */
+			msleep(1000);
+
+			mbtd->test_info.testcase = j;
+			mbtd->is_random = NON_RANDOM_TEST;
+			ret = test_iosched_start_test(&mbtd->test_info);
+			if (ret)
+				num_of_failures++;
+			/* Allow FS requests to be dispatched */
+			msleep(1000);
+		}
+	}
+
+	test_pr_info("%s: Completed all the test cases.", __func__);
+
+	if (num_of_failures > 0) {
+		test_iosched_set_test_result(TEST_FAILED);
+		test_pr_err(
+			"There were %d failures during the test, TEST FAILED",
+			num_of_failures);
+	}
+	return count;
+}
+
+static ssize_t send_invalid_packed_test_read(struct file *file,
+			       char __user *buffer,
+			       size_t count,
+			       loff_t *offset)
+{
+	memset((void *)buffer, 0, count);
+
+	snprintf(buffer, count,
+		 "\nsend_invalid_packed_TEST\n"
+		 "=========\n"
+		 "Description:\n"
+		 "This test checks the following scenarios\n"
+		 "- Send an invalid header version\n"
+		 "- Send the wrong write code\n"
+		 "- Send an invalid R/W code\n"
+		 "- Send wrong start address in header\n"
+		 "- Send header with block_count smaller than actual\n"
+		 "- Send header with block_count larger than actual\n"
+		 "- Send header CMD23 packed bit set\n"
+		 "- Send CMD23 with block count over threshold\n"
+		 "- Send CMD23 with block_count equals zero\n"
+		 "- Send CMD23 packed bit unset\n"
+		 "- Send CMD23 reliable write bit set\n"
+		 "- Send CMD23 bits [16-29] set\n"
+		 "- Send CMD23 header block not in block_count\n");
+
+	if (message_repeat == 1) {
+		message_repeat = 0;
+		return strnlen(buffer, count);
+	} else {
+		return 0;
+	}
+}
+
+const struct file_operations send_invalid_packed_test_ops = {
+	.open = test_open,
+	.write = send_invalid_packed_test_write,
+	.read = send_invalid_packed_test_read,
+};
+
+static void mmc_block_test_debugfs_cleanup(void)
+{
+	debugfs_remove(mbtd->debug.random_test_seed);
+	debugfs_remove(mbtd->debug.send_write_packing_test);
+	debugfs_remove(mbtd->debug.err_check_test);
+	debugfs_remove(mbtd->debug.send_invalid_packed_test);
+}
+
+static int mmc_block_test_debugfs_init(void)
+{
+	struct dentry *utils_root, *tests_root;
+
+	utils_root = test_iosched_get_debugfs_utils_root();
+	tests_root = test_iosched_get_debugfs_tests_root();
+
+	if (!utils_root || !tests_root)
+		return -EINVAL;
+
+	mbtd->debug.random_test_seed = debugfs_create_u32(
+					"random_test_seed",
+					S_IRUGO | S_IWUGO,
+					utils_root,
+					&mbtd->random_test_seed);
+
+	if (!mbtd->debug.random_test_seed)
+		goto err_nomem;
+
+	mbtd->debug.send_write_packing_test =
+		debugfs_create_file("send_write_packing_test",
+				    S_IRUGO | S_IWUGO,
+				    tests_root,
+				    NULL,
+				    &send_write_packing_test_ops);
+
+	if (!mbtd->debug.send_write_packing_test)
+		goto err_nomem;
+
+	mbtd->debug.err_check_test =
+		debugfs_create_file("err_check_test",
+				    S_IRUGO | S_IWUGO,
+				    tests_root,
+				    NULL,
+				    &err_check_test_ops);
+
+	if (!mbtd->debug.err_check_test)
+		goto err_nomem;
+
+	mbtd->debug.send_invalid_packed_test =
+		debugfs_create_file("send_invalid_packed_test",
+				    S_IRUGO | S_IWUGO,
+				    tests_root,
+				    NULL,
+				    &send_invalid_packed_test_ops);
+
+	if (!mbtd->debug.send_invalid_packed_test)
+		goto err_nomem;
+
+	return 0;
+
+err_nomem:
+	mmc_block_test_debugfs_cleanup();
+	return -ENOMEM;
+}
+
+static void mmc_block_test_probe(void)
+{
+	struct request_queue *q = test_iosched_get_req_queue();
+	struct mmc_queue *mq;
+	int max_packed_reqs;
+
+	if (!q) {
+		test_pr_err("%s: NULL request queue", __func__);
+		return;
+	}
+
+	mq = q->queuedata;
+	if (!mq) {
+		test_pr_err("%s: NULL mq", __func__);
+		return;
+	}
+
+	max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+	mbtd->exp_packed_stats.packing_events =
+			kzalloc((max_packed_reqs + 1) *
+				sizeof(*mbtd->exp_packed_stats.packing_events),
+				GFP_KERNEL);
+
+	mmc_block_test_debugfs_init();
+}
+
+static void mmc_block_test_remove(void)
+{
+	mmc_block_test_debugfs_cleanup();
+}
+
+static int __init mmc_block_test_init(void)
+{
+	mbtd = kzalloc(sizeof(struct mmc_block_test_data), GFP_KERNEL);
+	if (!mbtd) {
+		test_pr_err("%s: failed to allocate mmc_block_test_data",
+			    __func__);
+		return -ENODEV;
+	}
+
+	mbtd->bdt.init_fn = mmc_block_test_probe;
+	mbtd->bdt.exit_fn = mmc_block_test_remove;
+	INIT_LIST_HEAD(&mbtd->bdt.list);
+	test_iosched_register(&mbtd->bdt);
+
+	return 0;
+}
+
+static void __exit mmc_block_test_exit(void)
+{
+	test_iosched_unregister(&mbtd->bdt);
+	kfree(mbtd);
+}
+
+module_init(mmc_block_test_init);
+module_exit(mmc_block_test_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MMC block test");
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 6c29e0e..ec3d6d2 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -12,6 +12,17 @@
 	struct mmc_data		data;
 };
 
+enum mmc_blk_status {
+	MMC_BLK_SUCCESS = 0,
+	MMC_BLK_PARTIAL,
+	MMC_BLK_CMD_ERR,
+	MMC_BLK_RETRY,
+	MMC_BLK_ABORT,
+	MMC_BLK_DATA_ERR,
+	MMC_BLK_ECC_ERR,
+	MMC_BLK_NOMEDIUM,
+};
+
 enum mmc_packed_cmd {
 	MMC_PACKED_NONE = 0,
 	MMC_PACKED_WRITE,
@@ -47,6 +58,8 @@
 	bool			wr_packing_enabled;
 	int			num_of_potential_packed_wr_reqs;
 	int			num_wr_reqs_to_start_packing;
+	int (*err_check_fn) (struct mmc_card *, struct mmc_async_req *);
+	void (*packed_test_fn) (struct request_queue *, struct mmc_queue_req *);
 };
 
 extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
@@ -60,4 +73,6 @@
 extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
 extern void mmc_queue_bounce_post(struct mmc_queue_req *);
 
+extern void print_mmc_packing_stats(struct mmc_card *card);
+
 #endif
diff --git a/drivers/net/usb/rmnet_usb_ctrl.c b/drivers/net/usb/rmnet_usb_ctrl.c
index 2972af0..186d07d 100644
--- a/drivers/net/usb/rmnet_usb_ctrl.c
+++ b/drivers/net/usb/rmnet_usb_ctrl.c
@@ -16,6 +16,7 @@
 #include <linux/device.h>
 #include <linux/uaccess.h>
 #include <linux/termios.h>
+#include <linux/poll.h>
 #include <linux/ratelimit.h>
 #include <linux/debugfs.h>
 #include "rmnet_usb_ctrl.h"
@@ -529,6 +530,28 @@
 	return 0;
 }
 
+static unsigned int rmnet_ctl_poll(struct file *file, poll_table *wait)
+{
+	unsigned int		mask = 0;
+	struct rmnet_ctrl_dev	*dev;
+
+	dev = file->private_data;
+	if (!dev)
+		return POLLERR;
+
+	poll_wait(file, &dev->read_wait_queue, wait);
+	if (!is_dev_connected(dev)) {
+		dev_dbg(dev->devicep, "%s: Device not connected\n",
+			__func__);
+		return POLLERR;
+	}
+
+	if (!list_empty(&dev->rx_list))
+		mask |= POLLIN | POLLRDNORM;
+
+	return mask;
+}
+
 static ssize_t rmnet_ctl_read(struct file *file, char __user *buf, size_t count,
 		loff_t *ppos)
 {
@@ -722,6 +745,7 @@
 	.unlocked_ioctl = rmnet_ctrl_ioctl,
 	.open  = rmnet_ctl_open,
 	.release = rmnet_ctl_release,
+	.poll = rmnet_ctl_poll,
 };
 
 int rmnet_usb_ctrl_probe(struct usb_interface *intf,
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index b08fc7d..0c3d4ad 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -50,4 +50,14 @@
 	help
 	  No-Data-Path BAM is used to improve BAM performance.
 
+config QPNP_PWM
+	depends on SPMI
+	depends on OF_SPMI
+        tristate "Qualcomm QPNP LPG/PWM support"
+        help
+          This driver supports PWM/LPG devices in Qualcomm PMIC chips which
+          comply with QPNP.  QPNP is a SPMI based PMIC implementation.  These
+          devices support Pulse Width Modulation output with user generated
+          patterns. They share a lookup table with size of 64 entries.
+
 endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index 92eb492..6deb6ee 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -4,3 +4,4 @@
 obj-$(CONFIG_MSM_SSBI) += ssbi.o
 obj-$(CONFIG_USB_BAM) += usb_bam.o
 obj-$(CONFIG_SPS) += sps/
+obj-$(CONFIG_QPNP_PWM) += qpnp-pwm.o
diff --git a/drivers/platform/msm/qpnp-pwm.c b/drivers/platform/msm/qpnp-pwm.c
new file mode 100644
index 0000000..c9cd0e0
--- /dev/null
+++ b/drivers/platform/msm/qpnp-pwm.c
@@ -0,0 +1,1661 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Qualcomm QPNP Pulse Width Modulation (PWM) driver
+ *
+ * The HW module is also called LPG (Light Pattern Generator).
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/spmi.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/radix-tree.h>
+#include <linux/qpnp/pwm.h>
+
+#define QPNP_LPG_DRIVER_NAME	"qcom,qpnp-pwm"
+
+/* LPG Control for LPG_PATTERN_CONFIG */
+#define QPNP_RAMP_DIRECTION_SHIFT	4
+#define QPNP_RAMP_DIRECTION_MASK	0x10
+#define QPNP_PATTERN_REPEAT_SHIFT	3
+#define QPNP_PATTERN_REPEAT_MASK	0x08
+#define QPNP_RAMP_TOGGLE_SHIFT		2
+#define QPNP_RAMP_TOGGLE_MASK		0x04
+#define QPNP_EN_PAUSE_HI_SHIFT		1
+#define QPNP_EN_PAUSE_HI_MASK		0x02
+#define QPNP_EN_PAUSE_LO_MASK		0x01
+
+/* LPG Control for LPG_PWM_SIZE_CLK */
+#define QPNP_PWM_SIZE_SHIFT			4
+#define QPNP_PWM_SIZE_MASK			0x30
+#define QPNP_PWM_FREQ_CLK_SELECT_SHIFT		0
+#define QPNP_PWM_FREQ_CLK_SELECT_MASK		0x03
+#define QPNP_PWM_SIZE_9_BIT			0x03
+
+#define QPNP_SET_PWM_CLK(val, clk, pwm_size) \
+do { \
+	val = (clk + 1) & QPNP_PWM_FREQ_CLK_SELECT_MASK; \
+	val |= ((pwm_size > 6 ? QPNP_PWM_SIZE_9_BIT : 0) << \
+			QPNP_PWM_SIZE_SHIFT) & QPNP_PWM_SIZE_MASK; \
+} while (0)
+
+#define QPNP_GET_PWM_SIZE(reg) ((reg & QPNP_PWM_SIZE_MASK) \
+					>> QPNP_PWM_SIZE_SHIFT)
+
+/* LPG Control for LPG_PWM_FREQ_PREDIV_CLK */
+#define QPNP_PWM_FREQ_PRE_DIVIDE_SHIFT		5
+#define QPNP_PWM_FREQ_PRE_DIVIDE_MASK		0x60
+#define QPNP_PWM_FREQ_EXP_MASK			0x07
+
+#define QPNP_SET_PWM_FREQ_PREDIV(val, pre_div, pre_div_exp) \
+do { \
+	val = (pre_div << QPNP_PWM_FREQ_PRE_DIVIDE_SHIFT) & \
+				QPNP_PWM_FREQ_PRE_DIVIDE_MASK;	\
+	val |= pre_div_exp & QPNP_PWM_FREQ_EXP_MASK;	\
+} while (0)
+
+/* LPG Control for LPG_PWM_TYPE_CONFIG */
+#define QPNP_EN_GLITCH_REMOVAL_SHIFT		5
+#define QPNP_EN_GLITCH_REMOVAL_MASK		0x20
+#define QPNP_EN_FULL_SCALE_SHIFT		3
+#define QPNP_EN_FULL_SCALE_MASK			0x08
+#define QPNP_EN_PHASE_STAGGER_SHIFT		2
+#define QPNP_EN_PHASE_STAGGER_MASK		0x04
+#define QPNP_PHASE_STAGGER_MASK			0x03
+
+/* LPG Control for PWM_VALUE_LSB */
+#define QPNP_PWM_VALUE_LSB_MASK			0xFF
+
+/* LPG Control for PWM_VALUE_MSB */
+#define QPNP_PWM_VALUE_MSB_SHIFT		8
+#define QPNP_PWM_VALUE_MSB_MASK			0x01
+
+/* LPG Control for ENABLE_CONTROL */
+#define QPNP_EN_PWM_HIGH_SHIFT			7
+#define QPNP_EN_PWM_HIGH_MASK			0x80
+#define QPNP_EN_PWM_LO_SHIFT			6
+#define QPNP_EN_PWM_LO_MASK			0x40
+#define QPNP_EN_PWM_OUTPUT_SHIFT		5
+#define QPNP_EN_PWM_OUTPUT_MASK			0x20
+#define QPNP_PWM_SRC_SELECT_SHIFT		2
+#define QPNP_PWM_SRC_SELECT_MASK		0x04
+#define QPNP_PWM_EN_RAMP_GEN_SHIFT		1
+#define QPNP_PWM_EN_RAMP_GEN_MASK		0x02
+
+#define QPNP_ENABLE_PWM(value) \
+	(value |= (1 << QPNP_EN_PWM_OUTPUT_SHIFT) & QPNP_EN_PWM_OUTPUT_MASK)
+
+#define QPNP_DISABLE_PWM(value)  (value &= ~QPNP_EN_PWM_OUTPUT_MASK)
+
+/* LPG Control for RAMP_CONTROL */
+#define QPNP_RAMP_START_MASK			0x01
+
+#define QPNP_ENABLE_LUT(value) (value |= QPNP_RAMP_START_MASK)
+#define QPNP_DISABLE_LUT(value) (value &= ~QPNP_RAMP_START_MASK)
+
+/* LPG Control for RAMP_STEP_DURATION_LSB */
+#define QPNP_RAMP_STEP_DURATION_LSB_MASK	0xFF
+
+/* LPG Control for RAMP_STEP_DURATION_MSB */
+#define QPNP_RAMP_STEP_DURATION_MSB_SHIFT	8
+#define QPNP_RAMP_STEP_DURATION_MSB_MASK	0x01
+
+#define QPNP_PWM_1KHZ				1024
+#define QPNP_GET_RAMP_STEP_DURATION(ramp_time_ms) \
+		((ramp_time_ms * QPNP_PWM_1KHZ) / 1000)
+
+/* LPG Control for PAUSE_HI_MULTIPLIER_LSB */
+#define QPNP_PAUSE_HI_MULTIPLIER_LSB_MASK	0xFF
+
+/* LPG Control for PAUSE_HI_MULTIPLIER_MSB */
+#define QPNP_PAUSE_HI_MULTIPLIER_MSB_SHIFT	8
+#define QPNP_PAUSE_HI_MULTIPLIER_MSB_MASK	0x1F
+
+/* LPG Control for PAUSE_LO_MULTIPLIER_LSB */
+#define QPNP_PAUSE_LO_MULTIPLIER_LSB_MASK	0xFF
+
+/* LPG Control for PAUSE_LO_MULTIPLIER_MSB */
+#define QPNP_PAUSE_LO_MULTIPLIER_MSB_SHIFT	8
+#define QPNP_PAUSE_LO_MULTIPLIER_MSB_MASK	0x1F
+
+/* LPG Control for HI_INDEX */
+#define QPNP_HI_INDEX_MASK			0x3F
+
+/* LPG Control for LO_INDEX */
+#define QPNP_LO_INDEX_MASK			0x3F
+
+#define NUM_CLOCKS				3
+#define QPNP_PWM_M_MAX				7
+#define NSEC_1024HZ	(NSEC_PER_SEC / 1024)
+#define NSEC_32768HZ	(NSEC_PER_SEC / 32768)
+#define NSEC_19P2MHZ	(NSEC_PER_SEC / 19200000)
+
+#define NUM_LPG_PRE_DIVIDE	4
+
+#define PRE_DIVIDE_1		1
+#define PRE_DIVIDE_3		3
+#define PRE_DIVIDE_5		5
+#define PRE_DIVIDE_6		6
+
+#define SPMI_LPG_REG_ADDR_BASE	0x40
+#define SPMI_LPG_REG_ADDR(b, n)	(b + SPMI_LPG_REG_ADDR_BASE + (n))
+#define SPMI_MAX_BUF_LEN	8
+
+/* SPMI LPG registers */
+enum qpnp_lpg_registers_list {
+	QPNP_LPG_PATTERN_CONFIG,
+	QPNP_LPG_PWM_SIZE_CLK,
+	QPNP_LPG_PWM_FREQ_PREDIV_CLK,
+	QPNP_LPG_PWM_TYPE_CONFIG,
+	QPNP_PWM_VALUE_LSB,
+	QPNP_PWM_VALUE_MSB,
+	QPNP_ENABLE_CONTROL,
+	QPNP_RAMP_CONTROL,
+	QPNP_RAMP_STEP_DURATION_LSB = QPNP_RAMP_CONTROL + 9,
+	QPNP_RAMP_STEP_DURATION_MSB,
+	QPNP_PAUSE_HI_MULTIPLIER_LSB,
+	QPNP_PAUSE_HI_MULTIPLIER_MSB,
+	QPNP_PAUSE_LO_MULTIPLIER_LSB,
+	QPNP_PAUSE_LO_MULTIPLIER_MSB,
+	QPNP_HI_INDEX,
+	QPNP_LO_INDEX,
+	QPNP_TOTAL_LPG_SPMI_REGISTERS
+};
+
+/*
+ * Formula from HSID,
+ * pause_time (hi/lo) = (pause_cnt- 1)*(ramp_ms)
+ * OR,
+ * pause_cnt = (pause_time / ramp_ms) + 1
+ */
+#define QPNP_SET_PAUSE_CNT(to_pause_cnt, from_pause, ramp_ms) \
+	(to_pause_cnt = (from_pause / (ramp_ms ? ramp_ms : 1)) + 1)
+
+
+static unsigned int pt_t[NUM_LPG_PRE_DIVIDE][NUM_CLOCKS] = {
+	{	PRE_DIVIDE_1 * NSEC_1024HZ,
+		PRE_DIVIDE_1 * NSEC_32768HZ,
+		PRE_DIVIDE_1 * NSEC_19P2MHZ,
+	},
+	{	PRE_DIVIDE_3 * NSEC_1024HZ,
+		PRE_DIVIDE_3 * NSEC_32768HZ,
+		PRE_DIVIDE_3 * NSEC_19P2MHZ,
+	},
+	{	PRE_DIVIDE_5 * NSEC_1024HZ,
+		PRE_DIVIDE_5 * NSEC_32768HZ,
+		PRE_DIVIDE_5 * NSEC_19P2MHZ,
+	},
+	{	PRE_DIVIDE_6 * NSEC_1024HZ,
+		PRE_DIVIDE_6 * NSEC_32768HZ,
+		PRE_DIVIDE_6 * NSEC_19P2MHZ,
+	},
+};
+
+static RADIX_TREE(lpg_dev_tree, GFP_KERNEL);
+
+struct qpnp_lut_default_config {
+	u32		*duty_pct_list;
+	int		size;
+	int		start_idx;
+};
+
+struct qpnp_lut_config {
+	struct qpnp_lut_default_config def_config;
+	u8		*duty_pct_list;
+	int		list_size;
+	int		lo_index;
+	int		hi_index;
+	int		lut_pause_hi_cnt;
+	int		lut_pause_lo_cnt;
+	int		ramp_step_ms;
+	bool		ramp_direction;
+	bool		pattern_repeat;
+	bool		ramp_toggle;
+	bool		enable_pause_hi;
+	bool		enable_pause_lo;
+};
+
+struct qpnp_lpg_config {
+	struct qpnp_lut_config	lut_config;
+	u16			base_addr;
+	u16			lut_base_addr;
+	u16			lut_size;
+	bool			bypass_lut;
+	bool			lpg_configured;
+};
+
+struct qpnp_pwm_config {
+	int				channel_id;
+	bool				in_use;
+	const char			*lable;
+	int				pwm_value;
+	int				pwm_period;
+	int				pwm_duty;
+	struct pwm_period_config	period;
+};
+
+/* Public facing structure */
+struct pwm_device {
+	struct qpnp_lpg_chip	*chip;
+	struct qpnp_pwm_config	pwm_config;
+};
+
+struct qpnp_lpg_chip {
+	struct	spmi_device	*spmi_dev;
+	struct	pwm_device	pwm_dev;
+	struct	mutex		lpg_mutex;
+	struct	qpnp_lpg_config	lpg_config;
+	u8	qpnp_lpg_registers[QPNP_TOTAL_LPG_SPMI_REGISTERS];
+};
+
+/* Internal functions */
+static inline void qpnp_set_pattern_config(u8 *val,
+			struct qpnp_lut_config *lut_config)
+{
+	*val = lut_config->enable_pause_lo & QPNP_EN_PAUSE_LO_MASK;
+	*val |= (lut_config->enable_pause_hi << QPNP_EN_PAUSE_HI_SHIFT) &
+						QPNP_EN_PAUSE_HI_MASK;
+	*val |= (lut_config->ramp_toggle << QPNP_RAMP_TOGGLE_SHIFT) &
+						QPNP_RAMP_TOGGLE_MASK;
+	*val |= (lut_config->pattern_repeat << QPNP_PATTERN_REPEAT_SHIFT) &
+						QPNP_PATTERN_REPEAT_MASK;
+	*val |= (lut_config->ramp_direction << QPNP_RAMP_DIRECTION_SHIFT) &
+						QPNP_RAMP_DIRECTION_MASK;
+}
+
+static inline void qpnp_set_pwm_type_config(u8 *val, bool glitch,
+			bool full_scale, bool en_phase, bool phase)
+{
+	*val = phase;
+	*val |= (en_phase << QPNP_EN_PHASE_STAGGER_SHIFT) &
+				QPNP_EN_PHASE_STAGGER_MASK;
+	*val |= (full_scale << QPNP_EN_FULL_SCALE_SHIFT) &
+				QPNP_EN_FULL_SCALE_MASK;
+	*val |= (glitch << QPNP_EN_GLITCH_REMOVAL_SHIFT) &
+				QPNP_EN_GLITCH_REMOVAL_MASK;
+}
+
+static inline void qpnp_set_control(u8 *val, bool pwm_hi, bool pwm_lo,
+			bool pwm_out, bool pwm_src, bool ramp_gen)
+{
+	*val = (ramp_gen << QPNP_PWM_EN_RAMP_GEN_SHIFT) &
+				QPNP_PWM_EN_RAMP_GEN_MASK;
+	*val |= (pwm_src << QPNP_PWM_SRC_SELECT_SHIFT) &
+				QPNP_PWM_SRC_SELECT_MASK;
+	*val |= (pwm_out << QPNP_EN_PWM_OUTPUT_SHIFT) &
+				QPNP_EN_PWM_OUTPUT_MASK;
+	*val |= (pwm_lo << QPNP_EN_PWM_LO_SHIFT) & QPNP_EN_PWM_LO_MASK;
+	*val |= (pwm_hi << QPNP_EN_PWM_HIGH_SHIFT) & QPNP_EN_PWM_HIGH_MASK;
+}
+
+#define QPNP_ENABLE_LUT_CONTROL(p_val)	qpnp_set_control(p_val, 1, 1, 1, 0, 1)
+#define QPNP_ENABLE_PWM_CONTROL(p_val)	qpnp_set_control(p_val, 1, 1, 0, 1, 0)
+
+static inline void qpnp_convert_to_lut_flags(int *flags,
+				struct qpnp_lut_config *l_config)
+{
+	*flags = ((l_config->ramp_direction ? PM_PWM_LUT_RAMP_UP : 0) |
+		(l_config->pattern_repeat ? PM_PWM_LUT_LOOP : 0)|
+		(l_config->ramp_toggle ? PM_PWM_LUT_REVERSE : 0) |
+		(l_config->enable_pause_hi ? PM_PWM_LUT_PAUSE_HI_EN : 0) |
+		(l_config->enable_pause_lo ? PM_PWM_LUT_PAUSE_LO_EN : 0));
+}
+
+static inline void qpnp_set_lut_params(struct lut_params *l_params,
+				struct qpnp_lut_config *l_config)
+{
+	l_params->start_idx = l_config->def_config.start_idx;
+	l_params->idx_len = l_config->def_config.size;
+	l_params->lut_pause_hi = l_config->lut_pause_hi_cnt;
+	l_params->lut_pause_lo = l_config->lut_pause_lo_cnt;
+	l_params->ramp_step_ms = l_config->ramp_step_ms;
+	qpnp_convert_to_lut_flags(&l_params->flags, l_config);
+}
+
+static void qpnp_lpg_save(u8 *u8p, u8 mask, u8 val)
+{
+	*u8p &= ~mask;
+	*u8p |= val & mask;
+}
+
+static int qpnp_lpg_save_and_write(u8 value, u8 mask, u8 *reg, u16 base_addr,
+			u16 offset, u16 size, struct qpnp_lpg_chip *chip)
+{
+	qpnp_lpg_save(reg, mask, value);
+
+	return spmi_ext_register_writel(chip->spmi_dev->ctrl,
+	chip->spmi_dev->sid, SPMI_LPG_REG_ADDR(base_addr, offset), reg, size);
+}
+
+/*
+ * PWM Frequency = Clock Frequency / (N * T)
+ *	or
+ * PWM Period = Clock Period * (N * T)
+ *	where
+ * N = 2^9 or 2^6 for 9-bit or 6-bit PWM size
+ * T = Pre-divide * 2^m, where m = 0..7 (exponent)
+ *
+ * This is the formula to figure out m for the best pre-divide and clock:
+ * (PWM Period / N) = (Pre-divide * Clock Period) * 2^m
+ */
+static void qpnp_lpg_calc_period(unsigned int period_us,
+				   struct pwm_period_config *period)
+{
+	int		n, m, clk, div;
+	int		best_m, best_div, best_clk;
+	unsigned int	last_err, cur_err, min_err;
+	unsigned int	tmp_p, period_n;
+
+	/* PWM Period / N */
+	if (period_us < ((unsigned)(-1) / NSEC_PER_USEC)) {
+		period_n = (period_us * NSEC_PER_USEC) >> 6;
+		n = 6;
+	} else {
+		period_n = (period_us >> 9) * NSEC_PER_USEC;
+		n = 9;
+	}
+
+	min_err = last_err = (unsigned)(-1);
+	best_m = 0;
+	best_clk = 0;
+	best_div = 0;
+	for (clk = 0; clk < NUM_CLOCKS; clk++) {
+		for (div = 0; div < NUM_LPG_PRE_DIVIDE; div++) {
+			/* period_n = (PWM Period / N) */
+			/* tmp_p = (Pre-divide * Clock Period) * 2^m */
+			tmp_p = pt_t[div][clk];
+			for (m = 0; m <= QPNP_PWM_M_MAX; m++) {
+				if (period_n > tmp_p)
+					cur_err = period_n - tmp_p;
+				else
+					cur_err = tmp_p - period_n;
+
+				if (cur_err < min_err) {
+					min_err = cur_err;
+					best_m = m;
+					best_clk = clk;
+					best_div = div;
+				}
+
+				if (m && cur_err > last_err)
+					/* Break for bigger cur_err */
+					break;
+
+				last_err = cur_err;
+				tmp_p <<= 1;
+			}
+		}
+	}
+
+	/* Use higher resolution */
+	if (best_m >= 3 && n == 6) {
+		n += 3;
+		best_m -= 3;
+	}
+
+	period->pwm_size = n;
+	period->clk = best_clk;
+	period->pre_div = best_div;
+	period->pre_div_exp = best_m;
+}
+
+static void qpnp_lpg_calc_pwm_value(struct pwm_device *pwm,
+				      unsigned int period_us,
+				      unsigned int duty_us)
+{
+	unsigned int		max_pwm_value, tmp;
+	struct qpnp_pwm_config	*pwm_config = &pwm->pwm_config;
+
+	/* Figure out pwm_value with overflow handling */
+	tmp = 1 << (sizeof(tmp) * 8 - pwm_config->period.pwm_size);
+	if (duty_us < tmp) {
+		tmp = duty_us << pwm_config->period.pwm_size;
+		pwm_config->pwm_value = tmp / period_us;
+	} else {
+		tmp = period_us >> pwm_config->period.pwm_size;
+		pwm_config->pwm_value = duty_us / tmp;
+	}
+	max_pwm_value = (1 << pwm_config->period.pwm_size) - 1;
+	if (pwm_config->pwm_value > max_pwm_value)
+		pwm_config->pwm_value = max_pwm_value;
+}
+
+static int qpnp_lpg_change_table(struct pwm_device *pwm,
+					int duty_pct[], int raw_value)
+{
+	unsigned int		pwm_value, max_pwm_value;
+	struct qpnp_lpg_chip	*chip = pwm->chip;
+	struct qpnp_lut_config	*lut = &chip->lpg_config.lut_config;
+	int			i, pwm_size, rc;
+	int			burst_size = SPMI_MAX_BUF_LEN;
+	int			list_len = lut->list_size << 1;
+	int			offset = lut->lo_index << 2;
+
+	pwm_size = QPNP_GET_PWM_SIZE(
+			chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK]) &
+						QPNP_PWM_SIZE_9_BIT ? 9 : 6;
+
+	max_pwm_value = (1 << pwm_size) - 1;
+
+	if (unlikely(lut->list_size != (lut->hi_index - lut->lo_index + 1))) {
+		pr_err("LUT internal Data structure corruption detected\n");
+		pr_err("LUT list size: %d\n", lut->list_size);
+		pr_err("However, index size is: %d\n",
+				(lut->hi_index - lut->lo_index + 1));
+		return -EINVAL;
+	}
+
+	for (i = 0; i <= lut->list_size; i++) {
+		if (raw_value)
+			pwm_value = duty_pct[i];
+		else
+			pwm_value = (duty_pct[i] << pwm_size) / 100;
+
+		if (pwm_value > max_pwm_value)
+			pwm_value = max_pwm_value;
+
+		lut->duty_pct_list[i*2] = pwm_value;
+		lut->duty_pct_list[(i*2)+1] = (pwm_value >>
+			 QPNP_PWM_VALUE_MSB_SHIFT) & QPNP_PWM_VALUE_MSB_MASK;
+	}
+
+	/* Write with max allowable burst mode, each entry is of two bytes */
+	for (i = 0; i < list_len;) {
+		if (i + burst_size >= list_len)
+			burst_size = list_len - i;
+		rc = spmi_ext_register_writel(chip->spmi_dev->ctrl,
+			chip->spmi_dev->sid,
+			chip->lpg_config.lut_base_addr + offset + i,
+			lut->duty_pct_list + i, burst_size);
+		i += burst_size;
+	}
+
+	return rc;
+}
+
+static void qpnp_lpg_save_period(struct pwm_device *pwm)
+{
+	u8 mask, val;
+	struct qpnp_lpg_chip	*chip = pwm->chip;
+	struct qpnp_pwm_config	*pwm_config = &pwm->pwm_config;
+
+	QPNP_SET_PWM_CLK(val, pwm_config->period.clk,
+				pwm_config->period.pwm_size);
+
+	mask = QPNP_PWM_SIZE_MASK | QPNP_PWM_FREQ_CLK_SELECT_MASK;
+
+	qpnp_lpg_save(&chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK],
+							mask, val);
+
+	QPNP_SET_PWM_FREQ_PREDIV(val, pwm_config->period.pre_div,
+					pwm_config->period.pre_div_exp);
+
+	mask = QPNP_PWM_FREQ_PRE_DIVIDE_MASK | QPNP_PWM_FREQ_EXP_MASK;
+
+	qpnp_lpg_save(&chip->qpnp_lpg_registers[QPNP_LPG_PWM_FREQ_PREDIV_CLK],
+								mask, val);
+}
+
+static int qpnp_lpg_save_pwm_value(struct pwm_device *pwm)
+{
+	unsigned int		max_pwm_value;
+	int			pwm_size;
+	u8			mask, value;
+	struct qpnp_lpg_chip	*chip = pwm->chip;
+	struct qpnp_pwm_config	*pwm_config = &pwm->pwm_config;
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	int rc;
+
+	pwm_size = QPNP_GET_PWM_SIZE(
+			chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK]) &
+						QPNP_PWM_SIZE_9_BIT ? 9 : 6;
+
+	max_pwm_value = (1 << pwm_size) - 1;
+
+	if (pwm_config->pwm_value > max_pwm_value)
+		pwm_config->pwm_value = max_pwm_value;
+
+	value = pwm_config->pwm_value;
+	mask = QPNP_PWM_VALUE_LSB_MASK;
+
+	rc = qpnp_lpg_save_and_write(value, mask,
+			&pwm->chip->qpnp_lpg_registers[QPNP_PWM_VALUE_LSB],
+			lpg_config->base_addr, QPNP_PWM_VALUE_LSB, 1, chip);
+	if (rc)
+		return rc;
+
+	value = (pwm_config->pwm_value >> QPNP_PWM_VALUE_MSB_SHIFT) &
+					QPNP_PWM_VALUE_MSB_MASK;
+
+	mask = QPNP_PWM_VALUE_MSB_MASK;
+
+	return qpnp_lpg_save_and_write(value, mask,
+			&pwm->chip->qpnp_lpg_registers[QPNP_PWM_VALUE_MSB],
+			lpg_config->base_addr, QPNP_PWM_VALUE_MSB, 1, chip);
+}
+
+static int qpnp_lpg_configure_pattern(struct pwm_device *pwm)
+{
+	struct qpnp_lpg_config	*lpg_config = &pwm->chip->lpg_config;
+	struct qpnp_lut_config	*lut_config = &lpg_config->lut_config;
+	struct qpnp_lpg_chip	*chip = pwm->chip;
+	u8			value, mask;
+
+	qpnp_set_pattern_config(&value, lut_config);
+
+	mask = QPNP_RAMP_DIRECTION_MASK | QPNP_PATTERN_REPEAT_MASK |
+			QPNP_RAMP_TOGGLE_MASK | QPNP_EN_PAUSE_HI_MASK |
+			QPNP_EN_PAUSE_LO_MASK;
+
+	return qpnp_lpg_save_and_write(value, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_LPG_PATTERN_CONFIG],
+		lpg_config->base_addr, QPNP_LPG_PATTERN_CONFIG, 1, chip);
+}
+
+static int qpnp_lpg_configure_pwm(struct pwm_device *pwm)
+{
+	struct qpnp_lpg_config	*lpg_config = &pwm->chip->lpg_config;
+	struct qpnp_lpg_chip	*chip = pwm->chip;
+	int			rc;
+	u8			value, mask;
+
+	rc = spmi_ext_register_writel(chip->spmi_dev->ctrl, chip->spmi_dev->sid,
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_LPG_PWM_SIZE_CLK),
+		&chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK], 1);
+
+	if (rc)
+		return rc;
+
+	rc = spmi_ext_register_writel(chip->spmi_dev->ctrl, chip->spmi_dev->sid,
+		SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+		QPNP_LPG_PWM_FREQ_PREDIV_CLK),
+		&chip->qpnp_lpg_registers[QPNP_LPG_PWM_FREQ_PREDIV_CLK], 1);
+	if (rc)
+		return rc;
+
+	qpnp_set_pwm_type_config(&value, 1, 0, 0, 0);
+
+	mask = QPNP_EN_GLITCH_REMOVAL_MASK | QPNP_EN_FULL_SCALE_MASK |
+			QPNP_EN_PHASE_STAGGER_MASK | QPNP_PHASE_STAGGER_MASK;
+
+	return qpnp_lpg_save_and_write(value, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_LPG_PWM_TYPE_CONFIG],
+		lpg_config->base_addr, QPNP_LPG_PWM_TYPE_CONFIG, 1, chip);
+}
+
+static int qpnp_pwm_configure_control(struct pwm_device *pwm)
+{
+	struct qpnp_lpg_config	*lpg_config = &pwm->chip->lpg_config;
+	struct qpnp_lpg_chip	*chip = pwm->chip;
+	u8			value, mask;
+
+	QPNP_ENABLE_PWM_CONTROL(&value);
+
+	mask = QPNP_EN_PWM_HIGH_MASK | QPNP_EN_PWM_LO_MASK |
+		QPNP_EN_PWM_OUTPUT_MASK | QPNP_PWM_SRC_SELECT_MASK |
+					QPNP_PWM_EN_RAMP_GEN_MASK;
+
+	return qpnp_lpg_save_and_write(value, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL],
+		lpg_config->base_addr, QPNP_ENABLE_CONTROL, 1, chip);
+
+}
+
+static int qpnp_lpg_configure_control(struct pwm_device *pwm)
+{
+	struct qpnp_lpg_config	*lpg_config = &pwm->chip->lpg_config;
+	struct qpnp_lpg_chip	*chip = pwm->chip;
+	u8			value, mask;
+
+	QPNP_ENABLE_LUT_CONTROL(&value);
+
+	mask = QPNP_EN_PWM_HIGH_MASK | QPNP_EN_PWM_LO_MASK |
+		QPNP_EN_PWM_OUTPUT_MASK | QPNP_PWM_SRC_SELECT_MASK |
+				QPNP_PWM_EN_RAMP_GEN_MASK;
+
+	return qpnp_lpg_save_and_write(value, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL],
+		lpg_config->base_addr, QPNP_ENABLE_CONTROL, 1, chip);
+
+}
+
+static int qpnp_lpg_configure_ramp_step_duration(struct pwm_device *pwm)
+{
+	struct qpnp_lpg_config	*lpg_config = &pwm->chip->lpg_config;
+	struct qpnp_lut_config	lut_config = lpg_config->lut_config;
+	struct qpnp_lpg_chip	*chip = pwm->chip;
+	int			rc, value;
+	u8			val, mask;
+
+	value = QPNP_GET_RAMP_STEP_DURATION(lut_config.ramp_step_ms);
+	val = value & QPNP_RAMP_STEP_DURATION_LSB_MASK;
+	mask = QPNP_RAMP_STEP_DURATION_LSB_MASK;
+
+	rc = qpnp_lpg_save_and_write(val, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_RAMP_STEP_DURATION_LSB],
+		lpg_config->base_addr, QPNP_RAMP_STEP_DURATION_LSB, 1, chip);
+	if (rc)
+		return rc;
+
+	val = (value >> QPNP_RAMP_STEP_DURATION_MSB_SHIFT) &
+				QPNP_RAMP_STEP_DURATION_MSB_MASK;
+
+	mask = QPNP_RAMP_STEP_DURATION_MSB_MASK;
+
+	return qpnp_lpg_save_and_write(val, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_RAMP_STEP_DURATION_MSB],
+		lpg_config->base_addr, QPNP_RAMP_STEP_DURATION_MSB, 1, chip);
+}
+
+static int qpnp_lpg_configure_pause(struct pwm_device *pwm)
+{
+	struct qpnp_lpg_config	*lpg_config = &pwm->chip->lpg_config;
+	struct qpnp_lut_config	lut_config = lpg_config->lut_config;
+	struct qpnp_lpg_chip	*chip = pwm->chip;
+	u8			value, mask;
+	int			rc = 0;
+
+	if (lut_config.enable_pause_hi) {
+		value = lut_config.lut_pause_hi_cnt;
+		mask = QPNP_PAUSE_HI_MULTIPLIER_LSB_MASK;
+
+		rc = qpnp_lpg_save_and_write(value, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_PAUSE_HI_MULTIPLIER_LSB],
+		lpg_config->base_addr, QPNP_PAUSE_HI_MULTIPLIER_LSB, 1, chip);
+		if (rc)
+			return rc;
+
+		value = (lut_config.lut_pause_hi_cnt >>
+			QPNP_PAUSE_HI_MULTIPLIER_MSB_SHIFT) &
+					QPNP_PAUSE_HI_MULTIPLIER_MSB_MASK;
+
+		mask = QPNP_PAUSE_HI_MULTIPLIER_MSB_MASK;
+
+		rc = qpnp_lpg_save_and_write(value, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_PAUSE_HI_MULTIPLIER_MSB],
+		lpg_config->base_addr, QPNP_PAUSE_HI_MULTIPLIER_MSB, 1, chip);
+	} else {
+		value = 0;
+		mask = QPNP_PAUSE_HI_MULTIPLIER_LSB_MASK;
+
+		rc = qpnp_lpg_save_and_write(value, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_PAUSE_HI_MULTIPLIER_LSB],
+		lpg_config->base_addr, QPNP_PAUSE_HI_MULTIPLIER_LSB, 1, chip);
+		if (rc)
+			return rc;
+
+		mask = QPNP_PAUSE_HI_MULTIPLIER_MSB_MASK;
+
+		rc = qpnp_lpg_save_and_write(value, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_PAUSE_HI_MULTIPLIER_MSB],
+		lpg_config->base_addr, QPNP_PAUSE_HI_MULTIPLIER_MSB, 1, chip);
+		if (rc)
+			return rc;
+
+	}
+
+	if (lut_config.enable_pause_lo) {
+		value = lut_config.lut_pause_lo_cnt;
+		mask = QPNP_PAUSE_LO_MULTIPLIER_LSB_MASK;
+
+		rc = qpnp_lpg_save_and_write(value, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_PAUSE_LO_MULTIPLIER_LSB],
+		lpg_config->base_addr, QPNP_PAUSE_LO_MULTIPLIER_LSB, 1, chip);
+		if (rc)
+			return rc;
+
+		value = (lut_config.lut_pause_lo_cnt >>
+				QPNP_PAUSE_LO_MULTIPLIER_MSB_SHIFT) &
+					QPNP_PAUSE_LO_MULTIPLIER_MSB_MASK;
+
+		mask = QPNP_PAUSE_LO_MULTIPLIER_MSB_MASK;
+
+		rc = qpnp_lpg_save_and_write(value, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_PAUSE_LO_MULTIPLIER_MSB],
+		lpg_config->base_addr, QPNP_PAUSE_LO_MULTIPLIER_MSB, 1, chip);
+	} else {
+		value = 0;
+		mask = QPNP_PAUSE_LO_MULTIPLIER_LSB_MASK;
+
+		rc = qpnp_lpg_save_and_write(value, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_PAUSE_LO_MULTIPLIER_LSB],
+		lpg_config->base_addr, QPNP_PAUSE_LO_MULTIPLIER_LSB, 1, chip);
+		if (rc)
+			return rc;
+
+		mask = QPNP_PAUSE_LO_MULTIPLIER_MSB_MASK;
+
+		rc = qpnp_lpg_save_and_write(value, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_PAUSE_LO_MULTIPLIER_MSB],
+		lpg_config->base_addr, QPNP_PAUSE_LO_MULTIPLIER_MSB, 1, chip);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int qpnp_lpg_configure_index(struct pwm_device *pwm)
+{
+	struct qpnp_lpg_config	*lpg_config = &pwm->chip->lpg_config;
+	struct qpnp_lut_config	lut_config = lpg_config->lut_config;
+	struct qpnp_lpg_chip	*chip = pwm->chip;
+	u8			value, mask;
+	int			rc = 0;
+
+	value = lut_config.hi_index;
+	mask = QPNP_HI_INDEX_MASK;
+
+	rc = qpnp_lpg_save_and_write(value, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_HI_INDEX],
+		lpg_config->base_addr, QPNP_HI_INDEX, 1, chip);
+	if (rc)
+		return rc;
+
+	value = lut_config.lo_index;
+	mask = QPNP_LO_INDEX_MASK;
+
+	rc = qpnp_lpg_save_and_write(value, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_LO_INDEX],
+		lpg_config->base_addr, QPNP_LO_INDEX, 1, chip);
+
+	return rc;
+}
+
+static int qpnp_lpg_change_lut(struct pwm_device *pwm)
+{
+	int	rc;
+
+	rc = qpnp_lpg_configure_pattern(pwm);
+	if (rc) {
+		pr_err("Failed to configure LUT pattern");
+		return rc;
+	}
+	rc = qpnp_lpg_configure_pwm(pwm);
+	if (rc) {
+		pr_err("Failed to configure LUT pattern");
+		return rc;
+	}
+	rc = qpnp_lpg_configure_control(pwm);
+	if (rc) {
+		pr_err("Failed to configure pause registers");
+		return rc;
+	}
+	rc = qpnp_lpg_configure_ramp_step_duration(pwm);
+	if (rc) {
+		pr_err("Failed to configure duty time");
+		return rc;
+	}
+	rc = qpnp_lpg_configure_pause(pwm);
+	if (rc) {
+		pr_err("Failed to configure pause registers");
+		return rc;
+	}
+	rc = qpnp_lpg_configure_index(pwm);
+	if (rc) {
+		pr_err("Failed to configure index registers");
+		return rc;
+	}
+	return rc;
+}
+
+static int qpnp_lpg_enable_lut(struct pwm_device *pwm)
+{
+	struct qpnp_lpg_config	*lpg_config = &pwm->chip->lpg_config;
+	struct qpnp_lpg_chip	*chip = pwm->chip;
+	u8			value, mask;
+
+	value = pwm->chip->qpnp_lpg_registers[QPNP_RAMP_CONTROL];
+
+	QPNP_ENABLE_LUT(value);
+
+	mask = QPNP_RAMP_START_MASK;
+
+	return qpnp_lpg_save_and_write(value, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_RAMP_CONTROL],
+		lpg_config->base_addr, QPNP_RAMP_CONTROL, 1, chip);
+}
+
+static int qpnp_lpg_disable_lut(struct pwm_device *pwm)
+{
+	struct qpnp_lpg_config	*lpg_config = &pwm->chip->lpg_config;
+	struct qpnp_lpg_chip	*chip = pwm->chip;
+	u8			value, mask;
+
+	value = pwm->chip->qpnp_lpg_registers[QPNP_RAMP_CONTROL];
+
+	QPNP_DISABLE_LUT(value);
+
+	mask = QPNP_RAMP_START_MASK;
+
+	return qpnp_lpg_save_and_write(value, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_RAMP_CONTROL],
+		lpg_config->base_addr, QPNP_RAMP_CONTROL, 1, chip);
+}
+
+static int qpnp_lpg_enable_pwm(struct pwm_device *pwm)
+{
+	struct qpnp_lpg_config	*lpg_config = &pwm->chip->lpg_config;
+	struct qpnp_lpg_chip	*chip = pwm->chip;
+	u8			value, mask;
+
+	value = pwm->chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL];
+
+	QPNP_ENABLE_PWM(value);
+
+	mask = QPNP_EN_PWM_OUTPUT_MASK;
+
+	return qpnp_lpg_save_and_write(value, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL],
+		lpg_config->base_addr, QPNP_RAMP_CONTROL, 1, chip);
+}
+
+static int qpnp_lpg_disable_pwm(struct pwm_device *pwm)
+{
+	struct qpnp_lpg_config	*lpg_config = &pwm->chip->lpg_config;
+	struct qpnp_lpg_chip	*chip = pwm->chip;
+	u8			value, mask;
+
+	value = pwm->chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL];
+
+	QPNP_DISABLE_PWM(value);
+
+	mask = QPNP_EN_PWM_OUTPUT_MASK;
+
+	return qpnp_lpg_save_and_write(value, mask,
+		&pwm->chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL],
+		lpg_config->base_addr, QPNP_RAMP_CONTROL, 1, chip);
+}
+
+static int _pwm_config(struct pwm_device *pwm, int duty_us, int period_us)
+{
+	struct qpnp_pwm_config		*pwm_config;
+	struct qpnp_lpg_chip		*chip;
+	struct pwm_period_config	*period;
+	int				rc;
+
+	chip = pwm->chip;
+	pwm_config = &pwm->pwm_config;
+	period = &pwm_config->period;
+
+	if (pwm_config->pwm_period != period_us) {
+		qpnp_lpg_calc_period(period_us, period);
+		qpnp_lpg_save_period(pwm);
+		pwm_config->pwm_period = period_us;
+	}
+
+	pwm_config->pwm_duty = duty_us;
+	qpnp_lpg_calc_pwm_value(pwm, period_us, duty_us);
+	rc = qpnp_lpg_save_pwm_value(pwm);
+
+	if (rc) {
+		pr_err("Could not update PWM value for channel %d rc=%d\n",
+						pwm_config->channel_id, rc);
+		return rc;
+	}
+
+	rc = qpnp_lpg_configure_pwm(pwm);
+	if (rc) {
+		pr_err("Could not configure PWM clock for\n");
+		pr_err("channel %d rc=%d\n", pwm_config->channel_id, rc);
+		return rc;
+	}
+
+	rc = qpnp_pwm_configure_control(pwm);
+	if (rc) {
+		pr_err("Could not update PWM control for");
+		pr_err("channel %d rc=%d\n", pwm_config->channel_id, rc);
+		return rc;
+	}
+
+	pwm->chip->lpg_config.lpg_configured = 1;
+
+	pr_debug("duty/period=%u/%u usec: pwm_value=%d (of %d)\n",
+		 (unsigned)duty_us, (unsigned)period_us,
+		 pwm_config->pwm_value, 1 << period->pwm_size);
+
+	return 0;
+}
+
+static int _pwm_lut_config(struct pwm_device *pwm, int period_us,
+			int duty_pct[], struct lut_params lut_params)
+{
+	struct qpnp_lpg_config		*lpg_config;
+	struct qpnp_lut_config		*lut_config;
+	struct qpnp_lut_default_config  *def_lut_config =
+					&lut_config->def_config;
+	struct pwm_period_config	*period;
+	struct qpnp_pwm_config		*pwm_config;
+	int				start_idx = lut_params.start_idx;
+	int				len = lut_params.idx_len;
+	int				flags = lut_params.flags;
+	int				raw_lut, ramp_step_ms;
+	int				rc = 0;
+
+	pwm_config = &pwm->pwm_config;
+	lpg_config = &pwm->chip->lpg_config;
+	lut_config = &lpg_config->lut_config;
+	def_lut_config = &lut_config->def_config;
+
+	if ((start_idx + len) > lpg_config->lut_size) {
+		pr_err("Exceed LUT limit\n");
+		return -EINVAL;
+	}
+	if ((unsigned)period_us > PM_PWM_PERIOD_MAX ||
+		(unsigned)period_us < PM_PWM_PERIOD_MIN) {
+		pr_err("Period out of range\n");
+		return -EINVAL;
+	}
+
+	if (!pwm_config->in_use) {
+		pr_err("channel_id: %d: stale handle?\n",
+				pwm_config->channel_id);
+		return -EINVAL;
+	}
+
+	period = &pwm_config->period;
+
+	if (pwm_config->pwm_period != period_us) {
+		qpnp_lpg_calc_period(period_us, period);
+		qpnp_lpg_save_period(pwm);
+		pwm_config->pwm_period = period_us;
+	}
+
+	if (flags & PM_PWM_LUT_NO_TABLE)
+		goto after_table_write;
+
+	raw_lut = 0;
+	if (flags & PM_PWM_LUT_USE_RAW_VALUE)
+		raw_lut = 1;
+
+	lut_config->list_size = len;
+	lut_config->lo_index = start_idx;
+	lut_config->hi_index = start_idx + len - 1;
+
+	/*
+	 * LUT may not be specified in device tree by default.
+	 * This is the first time user is configuring it.
+	 */
+	if (lpg_config->bypass_lut) {
+		def_lut_config->duty_pct_list = kzalloc(sizeof(u32) *
+							len, GFP_KERNEL);
+		if (!def_lut_config->duty_pct_list) {
+			pr_err("kzalloc failed on def_duty_pct_list\n");
+			return -ENOMEM;
+		}
+
+		lut_config->duty_pct_list = kzalloc(lpg_config->lut_size *
+						sizeof(u16), GFP_KERNEL);
+		if (!lut_config->duty_pct_list) {
+			pr_err("kzalloc failed on duty_pct_list\n");
+			kfree(def_lut_config->duty_pct_list);
+			return -ENOMEM;
+		}
+
+		def_lut_config->size = len;
+		def_lut_config->start_idx = start_idx;
+		memcpy(def_lut_config->duty_pct_list, duty_pct, len);
+
+		lpg_config->bypass_lut = 0;
+	}
+
+	rc = qpnp_lpg_change_table(pwm, duty_pct, raw_lut);
+	if (rc) {
+		pr_err("qpnp_lpg_change_table: rc=%d\n", rc);
+		return -EINVAL;
+	}
+
+after_table_write:
+	ramp_step_ms = lut_params.ramp_step_ms;
+
+	if (ramp_step_ms > PM_PWM_LUT_RAMP_STEP_TIME_MAX)
+		ramp_step_ms = PM_PWM_LUT_RAMP_STEP_TIME_MAX;
+
+	QPNP_SET_PAUSE_CNT(lut_config->lut_pause_lo_cnt,
+			lut_params.lut_pause_lo, ramp_step_ms);
+	if (lut_config->lut_pause_lo_cnt > PM_PWM_LUT_PAUSE_MAX)
+		lut_config->lut_pause_lo_cnt = PM_PWM_LUT_PAUSE_MAX;
+
+	QPNP_SET_PAUSE_CNT(lut_config->lut_pause_hi_cnt,
+			lut_params.lut_pause_hi, ramp_step_ms);
+	if (lut_config->lut_pause_hi_cnt > PM_PWM_LUT_PAUSE_MAX)
+			lut_config->lut_pause_hi_cnt = PM_PWM_LUT_PAUSE_MAX;
+
+	lut_config->ramp_step_ms = ramp_step_ms;
+
+	lut_config->ramp_direction  = !!(flags & PM_PWM_LUT_RAMP_UP);
+	lut_config->pattern_repeat  = !!(flags & PM_PWM_LUT_LOOP);
+	lut_config->ramp_toggle	    = !!(flags & PM_PWM_LUT_REVERSE);
+	lut_config->enable_pause_hi = !!(flags & PM_PWM_LUT_PAUSE_HI_EN);
+	lut_config->enable_pause_lo = !!(flags & PM_PWM_LUT_PAUSE_LO_EN);
+	lpg_config->bypass_lut = 0;
+
+	rc = qpnp_lpg_change_lut(pwm);
+
+	if (!rc)
+		lpg_config->lpg_configured = 1;
+
+	return rc;
+}
+
+/* APIs */
+/**
+ * pwm_request - request a PWM device
+ * @channel_id: PWM id or channel
+ * @lable: the label to identify the user
+ */
+struct pwm_device *pwm_request(int pwm_id, const char *lable)
+{
+	struct qpnp_lpg_chip	*chip;
+	struct pwm_device	*pwm;
+
+	chip = radix_tree_lookup(&lpg_dev_tree, pwm_id);
+
+	if (!chip) {
+		pr_err("Could not find PWM Device for the\n");
+		pr_err("input pwm channel %d\n", pwm_id);
+		return ERR_PTR(-EINVAL);
+	}
+
+	mutex_lock(&chip->lpg_mutex);
+
+	pwm = &chip->pwm_dev;
+
+	if (pwm->pwm_config.in_use) {
+		pr_err("PWM device associated with the");
+		pr_err("input pwm id: %d is in use by %s",
+			pwm_id, pwm->pwm_config.lable);
+		pwm = ERR_PTR(-EBUSY);
+	} else {
+		pwm->pwm_config.in_use = 1;
+		pwm->pwm_config.lable  = lable;
+	}
+
+	mutex_unlock(&chip->lpg_mutex);
+
+	return pwm;
+}
+EXPORT_SYMBOL_GPL(pwm_request);
+
+/**
+ * pwm_free - free a PWM device
+ * @pwm: the PWM device
+ */
+void pwm_free(struct pwm_device *pwm)
+{
+	struct qpnp_pwm_config	*pwm_config;
+
+	if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL) {
+		pr_err("Invalid pwm handle or no pwm_chip\n");
+		return;
+	}
+
+	mutex_lock(&pwm->chip->lpg_mutex);
+
+	pwm_config = &pwm->pwm_config;
+
+	if (pwm_config->in_use) {
+		qpnp_lpg_disable_pwm(pwm);
+		qpnp_lpg_disable_lut(pwm);
+		pwm_config->in_use = 0;
+		pwm_config->lable = NULL;
+		pwm->chip->lpg_config.lpg_configured = 0;
+	}
+
+	mutex_unlock(&pwm->chip->lpg_mutex);
+}
+EXPORT_SYMBOL_GPL(pwm_free);
+
+/**
+ * pwm_config - change a PWM device configuration
+ * @pwm: the PWM device
+ * @period_us: period in microseconds
+ * @duty_us: duty cycle in microseconds
+ */
+int pwm_config(struct pwm_device *pwm, int duty_us, int period_us)
+{
+	int rc;
+
+	if (pwm == NULL || IS_ERR(pwm) ||
+		duty_us > period_us ||
+		(unsigned)period_us > PM_PWM_PERIOD_MAX ||
+		(unsigned)period_us < PM_PWM_PERIOD_MIN) {
+		pr_err("Invalid pwm handle or parameters\n");
+		return -EINVAL;
+	}
+
+	if (!pwm->pwm_config.in_use)
+		return -EINVAL;
+
+	mutex_lock(&pwm->chip->lpg_mutex);
+	rc = _pwm_config(pwm, duty_us, period_us);
+	mutex_unlock(&pwm->chip->lpg_mutex);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pwm_config);
+
+/**
+ * pwm_enable - start a PWM output toggling
+ * @pwm: the PWM device
+ */
+int pwm_enable(struct pwm_device *pwm)
+{
+	struct qpnp_pwm_config	*p_config;
+	struct qpnp_lpg_chip	*chip;
+	int			rc = 0;
+
+	if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL) {
+		pr_err("Invalid pwm handle or no pwm_chip\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&pwm->chip->lpg_mutex);
+
+	chip = pwm->chip;
+	p_config = &pwm->pwm_config;
+
+	if (!p_config->in_use) {
+		pr_err("channel_id: %d: stale handle?\n", p_config->channel_id);
+		rc = -EINVAL;
+		goto out_unlock;
+	}
+
+	if (!pwm->chip->lpg_config.lpg_configured) {
+		pr_err("Request received to enable PWM for channel Id: %d\n",
+							p_config->channel_id);
+		pr_err("However, PWM isn't configured\n");
+		pr_err("falling back to defaultconfiguration\n");
+		rc = _pwm_config(pwm, p_config->pwm_duty,
+					p_config->pwm_period);
+		if (rc) {
+			pr_err("Could not apply default PWM config\n");
+			goto out_unlock;
+		}
+	}
+
+	rc = qpnp_lpg_enable_pwm(pwm);
+
+out_unlock:
+	mutex_unlock(&pwm->chip->lpg_mutex);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pwm_enable);
+
+/**
+ * pwm_disable - stop a PWM output toggling
+ * @pwm: the PWM device
+ */
+void pwm_disable(struct pwm_device *pwm)
+{
+	struct qpnp_pwm_config	*pwm_config;
+	struct qpnp_lpg_chip	*chip;
+
+	if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL) {
+		pr_err("Invalid pwm handle or no pwm_chip\n");
+		return;
+	}
+
+	mutex_lock(&pwm->chip->lpg_mutex);
+
+	chip = pwm->chip;
+	pwm_config = &pwm->pwm_config;
+
+	if (pwm_config->in_use) {
+		if (!pwm->chip->lpg_config.lpg_configured) {
+			pr_err("Request received to disable PWM for\n");
+			pr_err("channel Id: %d\n", pwm_config->channel_id);
+			pr_err("However PWM is not configured by any means\n");
+			goto out_unlock;
+		}
+		qpnp_lpg_disable_pwm(pwm);
+	}
+
+out_unlock:
+	mutex_unlock(&pwm->chip->lpg_mutex);
+}
+EXPORT_SYMBOL_GPL(pwm_disable);
+
+/**
+ * pwm_config_period - change PWM period
+ *
+ * @pwm: the PWM device
+ * @pwm_p: period in struct qpnp_lpg_period
+ */
+int pwm_config_period(struct pwm_device *pwm,
+			     struct pwm_period_config *period)
+{
+	struct qpnp_pwm_config	*pwm_config;
+	struct qpnp_lpg_config	*lpg_config;
+	struct qpnp_lpg_chip	*chip;
+	int			rc = 0;
+
+	if (pwm == NULL || IS_ERR(pwm) || period == NULL)
+		return -EINVAL;
+	if (pwm->chip == NULL)
+		return -ENODEV;
+
+	mutex_lock(&pwm->chip->lpg_mutex);
+
+	chip = pwm->chip;
+	pwm_config = &pwm->pwm_config;
+	lpg_config = &chip->lpg_config;
+
+	if (!pwm_config->in_use) {
+		rc = -EINVAL;
+		goto out_unlock;
+	}
+
+	pwm_config->period.pwm_size = period->pwm_size;
+	pwm_config->period.clk = period->clk;
+	pwm_config->period.pre_div = period->pre_div;
+	pwm_config->period.pre_div_exp = period->pre_div_exp;
+
+	qpnp_lpg_save_period(pwm);
+
+	rc = spmi_ext_register_writel(chip->spmi_dev->ctrl, chip->spmi_dev->sid,
+			SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+			QPNP_LPG_PWM_SIZE_CLK),
+			&chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK], 1);
+
+	if (rc) {
+		pr_err("Write failed: QPNP_LPG_PWM_SIZE_CLK register, rc: %d\n",
+									rc);
+		goto out_unlock;
+	}
+
+	rc = spmi_ext_register_writel(chip->spmi_dev->ctrl, chip->spmi_dev->sid,
+			SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+			QPNP_LPG_PWM_FREQ_PREDIV_CLK),
+		&chip->qpnp_lpg_registers[QPNP_LPG_PWM_FREQ_PREDIV_CLK], 1);
+	if (rc) {
+		pr_err("Failed to write to QPNP_LPG_PWM_FREQ_PREDIV_CLK\n");
+		pr_err("register, rc = %d\n", rc);
+	}
+
+out_unlock:
+	mutex_unlock(&pwm->chip->lpg_mutex);
+	return rc;
+}
+EXPORT_SYMBOL(pwm_config_period);
+
+/**
+ * pwm_config_pwm_value - change a PWM device configuration
+ * @pwm: the PWM device
+ * @pwm_value: the duty cycle in raw PWM value (< 2^pwm_size)
+ */
+int pwm_config_pwm_value(struct pwm_device *pwm, int pwm_value)
+{
+	struct qpnp_lpg_config	*lpg_config;
+	struct qpnp_pwm_config	*pwm_config;
+	int			rc = 0;
+
+	if (pwm == NULL || IS_ERR(pwm))
+		return -EINVAL;
+
+	if (pwm->chip == NULL)
+		return -ENODEV;
+
+	lpg_config = &pwm->chip->lpg_config;
+	pwm_config = &pwm->pwm_config;
+
+	mutex_lock(&pwm->chip->lpg_mutex);
+
+	if (!pwm_config->in_use || !pwm_config->pwm_period) {
+		rc = -EINVAL;
+		goto out_unlock;
+	}
+
+	if (pwm_config->pwm_value == pwm_value)
+		goto out_unlock;
+
+	pwm_config->pwm_value = pwm_value;
+
+	rc = qpnp_lpg_save_pwm_value(pwm);
+
+	if (rc)
+		pr_err("Could not update PWM value for channel %d rc=%d\n",
+						pwm_config->channel_id, rc);
+
+out_unlock:
+	mutex_unlock(&pwm->chip->lpg_mutex);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pwm_config_pwm_value);
+
+/**
+ * pwm_lut_config - change LPG LUT device configuration
+ * @pwm: the PWM device
+ * @period_us: period in micro second
+ * @duty_pct: array of duty cycles in percent, like 20, 50.
+ * @lut_params: Lookup table parameters
+ */
+int pwm_lut_config(struct pwm_device *pwm, int period_us,
+		int duty_pct[], struct lut_params lut_params)
+{
+	int rc = 0;
+
+	if (pwm == NULL || IS_ERR(pwm) || !lut_params.idx_len) {
+		pr_err("Invalid pwm handle or idx_len=0\n");
+		return -EINVAL;
+	}
+
+	if (pwm->chip == NULL)
+		return -ENODEV;
+
+	if (duty_pct == NULL && !(lut_params.flags & PM_PWM_LUT_NO_TABLE)) {
+		pr_err("Invalid duty_pct with flag\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&pwm->chip->lpg_mutex);
+
+	rc = _pwm_lut_config(pwm, period_us, duty_pct, lut_params);
+
+	mutex_unlock(&pwm->chip->lpg_mutex);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pwm_lut_config);
+
+/**
+ * pwm_lut_enable - control a PWM device to start/stop LUT ramp
+ * @pwm: the PWM device
+ * @start: to start (1), or stop (0)
+ */
+int pwm_lut_enable(struct pwm_device *pwm, int start)
+{
+	struct qpnp_lpg_config	*lpg_config;
+	struct qpnp_pwm_config	*p_config;
+	struct lut_params	lut_params;
+	int			rc = 0;
+
+	if (pwm == NULL || IS_ERR(pwm)) {
+		pr_err("Invalid pwm handle\n");
+		return -EINVAL;
+	}
+
+	if (pwm->chip == NULL)
+		return -ENODEV;
+
+	lpg_config = &pwm->chip->lpg_config;
+	p_config = &pwm->pwm_config;
+
+	mutex_lock(&pwm->chip->lpg_mutex);
+
+	if (start) {
+		if (!lpg_config->lpg_configured) {
+			pr_err("Request received to enable LUT for\n");
+			pr_err("LPG channel %d\n", pwm->pwm_config.channel_id);
+			pr_err("But LPG is not configured, falling back to\n");
+			pr_err(" default LUT configuration if available\n");
+
+			if (lpg_config->bypass_lut) {
+				pr_err("No default LUT configuration found\n");
+				pr_err("Use pwm_lut_config() to configure\n");
+				rc = -EINVAL;
+				goto out;
+			}
+
+			qpnp_set_lut_params(&lut_params,
+					&lpg_config->lut_config);
+
+			rc = _pwm_lut_config(pwm, p_config->pwm_period,
+			(int *)lpg_config->lut_config.def_config.duty_pct_list,
+			lut_params);
+			if (rc) {
+				pr_err("Could not set the default LUT conf\n");
+				goto out;
+			}
+		}
+
+		rc = qpnp_lpg_enable_lut(pwm);
+	} else {
+		if (unlikely(!lpg_config->lpg_configured)) {
+			pr_err("LPG isn't configured\n");
+			rc = -EINVAL;
+			goto out;
+		}
+		rc = qpnp_lpg_disable_lut(pwm);
+	}
+
+out:
+	mutex_unlock(&pwm->chip->lpg_mutex);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pwm_lut_enable);
+
+/* Fill in lpg device elements based on values found in device tree. */
+static int qpnp_lpg_get_dt_config(struct spmi_device *spmi,
+					struct qpnp_lpg_chip *chip)
+{
+	int			rc;
+	struct resource		*res;
+	struct device_node	*of_node = spmi->dev.of_node;
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	struct pwm_device	*pwm_dev = &chip->pwm_dev;
+	struct qpnp_lut_config	*lut_config = &chip->lpg_config.lut_config;
+	struct qpnp_lut_default_config	*def_lut_config =
+						&lut_config->def_config;
+
+	res = spmi_get_resource(spmi, 0, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&spmi->dev, "%s: node is missing base address\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	lpg_config->base_addr = res->start;
+
+	res = spmi_get_resource(spmi, 0, IORESOURCE_MEM, 1);
+	if (!res) {
+		dev_err(&spmi->dev, "%s: node is missing LUT base address\n",
+								__func__);
+		return -EINVAL;
+	}
+
+	lpg_config->lut_base_addr = res->start;
+	/* Each entry of LUT is of 2 bytes */
+	lpg_config->lut_size = resource_size(res) >> 1;
+
+
+	rc = of_property_read_u32(of_node, "qcom,channel-id",
+				&pwm_dev->pwm_config.channel_id);
+	if (rc) {
+		dev_err(&spmi->dev, "%s: node is missing LPG channel id",
+								__func__);
+		return rc;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,period",
+				&pwm_dev->pwm_config.pwm_period);
+	if (rc) {
+		dev_err(&spmi->dev, "%s: node is missing PWM Period value",
+								__func__);
+		return rc;
+	}
+
+	if (!of_get_property(of_node, "qcom,duty-percents",
+						&def_lut_config->size)) {
+		lpg_config->bypass_lut = 1;
+	}
+
+	if (lpg_config->bypass_lut)
+		goto read_opt_props;
+
+	rc = of_property_read_u32(of_node, "qcom,start-index",
+					&def_lut_config->start_idx);
+
+	if (rc) {
+		dev_err(&spmi->dev, "Missing start index");
+		return rc;
+	}
+
+	def_lut_config->size /= sizeof(u32);
+
+	def_lut_config->duty_pct_list = kzalloc(sizeof(u32) *
+					def_lut_config->size, GFP_KERNEL);
+	if (!def_lut_config->duty_pct_list) {
+		dev_err(&spmi->dev, "%s: kzalloc failed on duty_pct_list\n",
+								__func__);
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,duty-percents",
+		def_lut_config->duty_pct_list, def_lut_config->size);
+	if (rc) {
+		dev_err(&spmi->dev, "invalid or missing property:\n");
+		dev_err(&spmi->dev, "qcom,duty-pcts-list\n");
+		kfree(def_lut_config->duty_pct_list);
+		return rc;
+	}
+
+	lut_config->duty_pct_list = kzalloc(lpg_config->lut_size * sizeof(u16),
+								GFP_KERNEL);
+	if (!lut_config->duty_pct_list) {
+		dev_err(&spmi->dev, "can not allocate duty pct list\n");
+		kfree(def_lut_config->duty_pct_list);
+		return -ENOMEM;
+	}
+
+read_opt_props:
+	/* Initialize optional config parameters from DT if provided */
+	of_property_read_u32(of_node, "qcom,duty",
+					&pwm_dev->pwm_config.pwm_duty);
+	of_property_read_u32(of_node, "qcom,ramp-step-duration",
+					&lut_config->ramp_step_ms);
+	of_property_read_u32(of_node, "qcom,lpg-lut-pause-hi",
+					&lut_config->lut_pause_hi_cnt);
+	of_property_read_u32(of_node, "qcom,lpg-lut-pause-lo",
+					&lut_config->lut_pause_lo_cnt);
+	of_property_read_u32(of_node, "qcom,lpg-lut-ramp-direction",
+					(u32 *)&lut_config->ramp_direction);
+	of_property_read_u32(of_node, "qcom,lpg-lut-pattern-repeat",
+					(u32 *)&lut_config->pattern_repeat);
+	of_property_read_u32(of_node, "qcom,lpg-lut-ramp-toggle",
+					(u32 *)&lut_config->ramp_toggle);
+	of_property_read_u32(of_node, "qcom,lpg-lut-enable-pause-hi",
+					(u32 *)&lut_config->enable_pause_hi);
+	of_property_read_u32(of_node, "qcom,lpg-lut-enable-pause-lo",
+					(u32 *)&lut_config->enable_pause_lo);
+
+	return 0;
+}
+
+static int __devinit qpnp_pwm_probe(struct spmi_device *spmi)
+{
+	struct qpnp_lpg_chip	*chip;
+	int			rc, id;
+
+	chip = kzalloc(sizeof *chip, GFP_KERNEL);
+	if (chip == NULL) {
+		pr_err("kzalloc() failed.\n");
+		return -ENOMEM;
+	}
+
+	mutex_init(&chip->lpg_mutex);
+
+	chip->spmi_dev = spmi;
+	chip->pwm_dev.chip = chip;
+	dev_set_drvdata(&spmi->dev, chip);
+
+	rc = qpnp_lpg_get_dt_config(spmi, chip);
+
+	if (rc)
+		goto failed_config;
+
+	id = chip->pwm_dev.pwm_config.channel_id;
+
+	rc = radix_tree_insert(&lpg_dev_tree, id, chip);
+
+	if (rc) {
+		dev_err(&spmi->dev, "%s: Failed to register LPG Channel %d\n",
+								__func__, id);
+		goto failed_insert;
+	}
+
+	return 0;
+
+failed_insert:
+	kfree(chip->lpg_config.lut_config.duty_pct_list);
+failed_config:
+	dev_set_drvdata(&spmi->dev, NULL);
+	mutex_destroy(&chip->lpg_mutex);
+	kfree(chip);
+	return rc;
+}
+
+static int __devexit qpnp_pwm_remove(struct spmi_device *spmi)
+{
+	struct qpnp_lpg_chip *chip;
+	struct qpnp_lpg_config *lpg_config;
+
+	chip = dev_get_drvdata(&spmi->dev);
+
+	dev_set_drvdata(&spmi->dev, NULL);
+
+	if (chip) {
+		lpg_config = &chip->lpg_config;
+		kfree(lpg_config->lut_config.duty_pct_list);
+		kfree(lpg_config->lut_config.def_config.duty_pct_list);
+		mutex_destroy(&chip->lpg_mutex);
+		kfree(chip);
+	}
+
+	return 0;
+}
+
+static struct of_device_id spmi_match_table[] = {
+	{ .compatible = QPNP_LPG_DRIVER_NAME, },
+	{}
+};
+
+static const struct spmi_device_id qpnp_lpg_id[] = {
+	{ QPNP_LPG_DRIVER_NAME, 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(spmi, qpnp_lpg_id);
+
+static struct spmi_driver qpnp_lpg_driver = {
+	.driver		= {
+		.name	= QPNP_LPG_DRIVER_NAME,
+		.of_match_table = spmi_match_table,
+		.owner = THIS_MODULE,
+	},
+	.probe		= qpnp_pwm_probe,
+	.remove		= __devexit_p(qpnp_pwm_remove),
+	.id_table	= qpnp_lpg_id,
+};
+
+/**
+ * qpnp_lpg_init() - register spmi driver for qpnp-lpg
+ */
+int __init qpnp_lpg_init(void)
+{
+	return spmi_driver_register(&qpnp_lpg_driver);
+}
+
+static void __exit qpnp_lpg_exit(void)
+{
+	spmi_driver_unregister(&qpnp_lpg_driver);
+}
+
+MODULE_DESCRIPTION("QPNP PMIC LPG driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" QPNP_LPG_DRIVER_NAME);
+
+subsys_initcall(qpnp_lpg_init);
+module_exit(qpnp_lpg_exit);
diff --git a/drivers/power/pm8921-bms.c b/drivers/power/pm8921-bms.c
index 85389d0..304dc6b 100644
--- a/drivers/power/pm8921-bms.c
+++ b/drivers/power/pm8921-bms.c
@@ -132,11 +132,14 @@
 	unsigned int		rconn_mohm;
 	struct mutex		last_ocv_uv_mutex;
 	int			last_ocv_uv;
-	int			last_cc_uah; /* used for Iavg calc for UUC */
+	int			last_cc_uah;
 	struct timeval		t;
 	int			last_uuc_uah;
 	int			enable_fcc_learning;
 	int			shutdown_soc;
+	int			timer_uuc_expired;
+	struct delayed_work	uuc_timer_work;
+	int			uuc_uah_iavg_prev;
 };
 
 static int shutdown_soc_invalid;
@@ -1153,49 +1156,128 @@
 	return uuc;
 }
 
-/* soc_rbatt when uuc_reported should be equal to uuc_now */
-#define SOC_RBATT_CHG		80
-#define SOC_RBATT_DISCHG	10
-static int calculate_unusable_charge_uah(struct pm8921_bms_chip *chip,
-				int rbatt, int fcc_uah, int cc_uah,
-				int soc_rbatt, int batt_temp, int chargecycles)
+#define SOC_RBATT_CHG		70
+#define SOC_RBATT_DISCHG	20
+
+static int uuc_iavg_div = 150;
+module_param(uuc_iavg_div, int, 0644);
+
+static int uuc_min_step_size = 120;
+module_param(uuc_min_step_size, int, 0644);
+
+static int uuc_multiplier = 1000;
+module_param(uuc_multiplier, int, 0644);
+
+#define UUC_TIMER_MS		120000
+
+static void uuc_timer_work(struct work_struct *work)
 {
-	struct timeval now;
-	int delta_time_s;
+	struct pm8921_bms_chip *chip = container_of(work,
+				struct pm8921_bms_chip, uuc_timer_work.work);
+
+	pr_debug("UUC Timer expired\n");
+	/* indicates the system is done with the high load during bootup */
+	chip->timer_uuc_expired = 1;
+}
+
+static void calculate_iavg_ua(struct pm8921_bms_chip *chip, int cc_uah,
+				int *iavg_ua, int *delta_time_us)
+{
 	int delta_cc_uah;
-	int iavg_ua, iavg_ma;
-	int uuc_uah_itest, uuc_uah_iavg, uuc_now, uuc_reported;
-	s64 stepsize = 0;
-	int firsttime = 0;
+	struct timeval now;
 
 	delta_cc_uah = cc_uah - chip->last_cc_uah;
 	do_gettimeofday(&now);
 	if (chip->t.tv_sec != 0) {
-		delta_time_s = (now.tv_sec - chip->t.tv_sec);
+		*delta_time_us = (now.tv_sec - chip->t.tv_sec) * USEC_PER_SEC
+				+ now.tv_usec - chip->t.tv_usec;
 	} else {
-		/* uuc calculation for the first time */
-		delta_time_s = 0;
-		firsttime = 1;
+		/* calculation for the first time */
+		*delta_time_us = 0;
 	}
 
-	if (delta_time_s != 0)
-		iavg_ua = div_s64((s64)delta_cc_uah * 3600, delta_time_s);
+	if (*delta_time_us != 0)
+		*iavg_ua = div_s64((s64)delta_cc_uah * 3600 * 1000000,
+					*delta_time_us);
 	else
-		iavg_ua = 0;
+		*iavg_ua = 0;
 
-	iavg_ma = iavg_ua/1000;
+	pr_debug("t.tv_sec = %d, now.tv_sec = %d delta_us = %d iavg_ua = %d\n",
+				(int)chip->t.tv_sec, (int)now.tv_sec,
+				*delta_time_us, (int)*iavg_ua);
+	/* remember cc_uah */
+	chip->last_cc_uah = cc_uah;
 
-	pr_debug("t.tv_sec = %d, now.tv_sec = %d\n", (int)chip->t.tv_sec,
-				(int)now.tv_sec);
+	/* remember this time */
+	chip->t = now;
+}
 
-	pr_debug("delta_time_s = %d iavg_ma = %d\n", delta_time_s, iavg_ma);
+#define UUC_IAVG_THRESHOLD_UAH	50000
+static int scale_unusable_charge_uah(struct pm8921_bms_chip *chip,
+			bool charging, int uuc_uah_iavg, int uuc_uah_itest,
+			int uuc_uah_iavg_prev)
+{
+	int stepsize = 0;
+	int delta_uuc = 0;
+	int uuc_reported = 0;
 
-	if (iavg_ma == 0) {
-		pr_debug("Iavg = 0 returning last uuc = %d\n",
-				chip->last_uuc_uah);
-		uuc_reported = chip->last_uuc_uah;
-		goto out;
+	if (charging) {
+		stepsize = max(uuc_min_step_size,
+				uuc_multiplier * (SOC_RBATT_CHG - last_soc));
+		/*
+		 * set the delta only if uuc is decreasing. If it has increased
+		 * simply report the last uuc since we don't want to report a
+		 * higher uuc as charging progresses
+		 */
+		if (chip->last_uuc_uah > uuc_uah_iavg)
+			delta_uuc = (chip->last_uuc_uah - uuc_uah_iavg)
+								/ stepsize;
+		uuc_reported = chip->last_uuc_uah - delta_uuc;
+	} else {
+		stepsize = max(uuc_min_step_size,
+			uuc_multiplier * (last_soc - SOC_RBATT_DISCHG));
+		if (uuc_uah_itest > uuc_uah_iavg) {
+			if ((uuc_uah_iavg > uuc_uah_iavg_prev
+						+ UUC_IAVG_THRESHOLD_UAH)
+				&& chip->timer_uuc_expired)
+				/*
+				 * there is a big jump in iavg current way past
+				 * the bootup increase  uuc to this high iavg
+				 * based uuc in steps
+				 */
+				delta_uuc = (uuc_uah_iavg - uuc_uah_iavg_prev)
+							/ uuc_iavg_div;
+			else
+				/* increase uuc towards itest based uuc */
+				delta_uuc = (uuc_uah_itest - uuc_uah_iavg)
+						/ stepsize;
+		} else {
+			/*
+			 * the iavg based uuc was higher than itest based
+			 * uuc. This means that iavg > itest. Itest represents
+			 * the max current drawn from the device at anytime.
+			 * If we find iavg > itest, ignore iavg and simply step
+			 * up the uuc based on itest
+			 */
+			delta_uuc = uuc_uah_itest / stepsize;
+		}
+		uuc_reported = min(uuc_uah_itest,
+					chip->last_uuc_uah + delta_uuc);
 	}
+	pr_debug("uuc_prev = %d stepsize = %d d_uuc =  %d uuc_reported = %d\n",
+			chip->last_uuc_uah, (int)stepsize, delta_uuc,
+			uuc_reported);
+	return uuc_reported;
+}
+
+static int calculate_unusable_charge_uah(struct pm8921_bms_chip *chip,
+				int rbatt, int fcc_uah, int cc_uah,
+				int soc_rbatt, int batt_temp, int chargecycles,
+				int iavg_ua)
+{
+	int uuc_uah_itest, uuc_uah_iavg, uuc_reported;
+	static int firsttime = 1;
+	int iavg_ma = iavg_ua / 1000;
 
 	/* calculate unusable charge with itest */
 	uuc_uah_itest = calculate_uuc_uah_at_given_current(chip,
@@ -1212,6 +1294,8 @@
 	pr_debug("iavg = %d uuc_iavg = %d\n", iavg_ma, uuc_uah_iavg);
 
 	if (firsttime) {
+		chip->uuc_uah_iavg_prev = uuc_uah_iavg;
+
 		if (cc_uah < chip->last_cc_uah)
 			chip->last_uuc_uah = uuc_uah_itest;
 		else
@@ -1219,45 +1303,21 @@
 		pr_debug("firsttime uuc_prev = %d\n", chip->last_uuc_uah);
 	}
 
-	uuc_now = min(uuc_uah_itest, uuc_uah_iavg);
+	uuc_reported = scale_unusable_charge_uah(chip,
+				cc_uah < chip->last_cc_uah,
+				uuc_uah_iavg, uuc_uah_itest,
+				chip->uuc_uah_iavg_prev);
 
-	uuc_reported = -EINVAL;
-	if (cc_uah < chip->last_cc_uah) {
-		/* charging */
-		if (uuc_now < chip->last_uuc_uah) {
-			stepsize = max(1, (SOC_RBATT_CHG - soc_rbatt));
-			/* uuc_reported = uuc_prev + deltauuc / stepsize */
-			uuc_reported = div_s64 (stepsize * chip->last_uuc_uah
-					+ (uuc_now - chip->last_uuc_uah),
-					stepsize);
-			uuc_reported = max(0, uuc_reported);
-		}
-	} else {
-		if (uuc_now > chip->last_uuc_uah) {
-			stepsize = max(1, (soc_rbatt - SOC_RBATT_DISCHG));
-			/* uuc_reported = uuc_prev + deltauuc / stepsize */
-			uuc_reported = div_s64 (stepsize * chip->last_uuc_uah
-					+ (uuc_now - chip->last_uuc_uah),
-					stepsize);
-			uuc_reported = max(0, uuc_reported);
-		}
-	}
-	if (uuc_reported == -EINVAL)
-		uuc_reported = chip->last_uuc_uah;
+	/* remember the last uuc_uah_iavg */
+	chip->uuc_uah_iavg_prev = uuc_uah_iavg;
 
-	pr_debug("uuc_now = %d uuc_prev = %d stepsize = %d uuc_reported = %d\n",
-			uuc_now, chip->last_uuc_uah, (int)stepsize,
-			uuc_reported);
-
-out:
 	/* remember the reported uuc */
 	chip->last_uuc_uah = uuc_reported;
 
-	/* remember cc_uah */
-	chip->last_cc_uah = cc_uah;
-
-	/* remember this time */
-	chip->t = now;
+	if (firsttime == 1) {
+		/* uuc calculation for the first time is done */
+		firsttime = 0;
+	}
 
 	return uuc_reported;
 }
@@ -1283,7 +1343,9 @@
 						int *unusable_charge_uah,
 						int *remaining_charge_uah,
 						int *cc_uah,
-						int *rbatt)
+						int *rbatt,
+						int *iavg_ua,
+						int *delta_time_us)
 {
 	int soc_rbatt;
 
@@ -1309,10 +1371,11 @@
 		soc_rbatt = 0;
 	*rbatt = get_rbatt(chip, soc_rbatt, batt_temp);
 
+	calculate_iavg_ua(chip, *cc_uah, iavg_ua, delta_time_us);
+
 	*unusable_charge_uah = calculate_unusable_charge_uah(chip, *rbatt,
 					*fcc_uah, *cc_uah, soc_rbatt,
-					batt_temp,
-					chargecycles);
+					batt_temp, chargecycles, *iavg_ua);
 	pr_debug("UUC = %uuAh\n", *unusable_charge_uah);
 }
 
@@ -1326,13 +1389,17 @@
 	int cc_uah;
 	int real_fcc_uah;
 	int rbatt;
+	int iavg_ua;
+	int delta_time_us;
 
 	calculate_soc_params(chip, raw, batt_temp, chargecycles,
 						&fcc_uah,
 						&unusable_charge_uah,
 						&remaining_charge_uah,
 						&cc_uah,
-						&rbatt);
+						&rbatt,
+						&iavg_ua,
+						&delta_time_us);
 
 	real_fcc_uah = remaining_charge_uah - cc_uah;
 	*ret_fcc_uah = fcc_uah;
@@ -1522,13 +1589,17 @@
 	int cc_uah;
 	int rbatt;
 	int shutdown_adjusted_soc;
+	int iavg_ua;
+	int delta_time_us;
 
 	calculate_soc_params(chip, raw, batt_temp, chargecycles,
 						&fcc_uah,
 						&unusable_charge_uah,
 						&remaining_charge_uah,
 						&cc_uah,
-						&rbatt);
+						&rbatt,
+						&iavg_ua,
+						&delta_time_us);
 
 	/* calculate remaining usable charge */
 	remaining_usable_charge_uah = remaining_charge_uah
@@ -1743,6 +1814,8 @@
 	int remaining_charge_uah;
 	int cc_uah;
 	int rbatt;
+	int iavg_ua;
+	int delta_time_us;
 
 	if (!the_chip) {
 		pr_err("called before initialization\n");
@@ -1768,7 +1841,9 @@
 						&unusable_charge_uah,
 						&remaining_charge_uah,
 						&cc_uah,
-						&rbatt);
+						&rbatt,
+						&iavg_ua,
+						&delta_time_us);
 	mutex_unlock(&the_chip->last_ocv_uv_mutex);
 
 	return rbatt;
@@ -2704,6 +2779,10 @@
 	pm8921_bms_enable_irq(chip, PM8921_BMS_GOOD_OCV);
 	pm8921_bms_enable_irq(chip, PM8921_BMS_OCV_FOR_R);
 
+	INIT_DELAYED_WORK(&chip->uuc_timer_work, uuc_timer_work);
+	schedule_delayed_work(&chip->uuc_timer_work,
+					msecs_to_jiffies(UUC_TIMER_MS));
+
 	get_battery_uvolts(chip, &vbatt);
 	pr_info("OK battery_capacity_at_boot=%d volt = %d ocv = %d\n",
 				pm8921_bms_get_percent_charge(),
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index dc40c8e..f84e3ac 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -78,6 +78,7 @@
 #define CHG_TTRIM		0x35C
 #define CHG_COMP_OVR		0x20A
 #define IUSB_FINE_RES		0x2B6
+#define OVP_USB_UVD		0x2B7
 
 /* check EOC every 10 seconds */
 #define EOC_CHECK_PERIOD_MS	10000
@@ -202,6 +203,7 @@
  * @update_time:		how frequently the userland needs to be updated
  * @max_voltage_mv:		the max volts the batt should be charged up to
  * @min_voltage_mv:		the min battery voltage before turning the FETon
+ * @uvd_voltage_mv:		(PM8917 only) the falling UVD threshold voltage
  * @cool_temp_dc:		the cool temp threshold in deciCelcius
  * @warm_temp_dc:		the warm temp threshold in deciCelcius
  * @resume_voltage_delta:	the voltage delta from vdd max at which the
@@ -221,6 +223,7 @@
 	unsigned int			update_time;
 	unsigned int			max_voltage_mv;
 	unsigned int			min_voltage_mv;
+	unsigned int			uvd_voltage_mv;
 	int				cool_temp_dc;
 	int				warm_temp_dc;
 	unsigned int			temp_check_period;
@@ -266,6 +269,7 @@
 	int				rconn_mohm;
 	enum pm8921_chg_led_src_config	led_src_config;
 	bool				host_mode;
+	u8				active_path;
 };
 
 /* user space parameter to limit usb current */
@@ -587,6 +591,24 @@
 	return voltage_mv;
 }
 
+#define PM8917_USB_UVD_MIN_MV	3850
+#define PM8917_USB_UVD_MAX_MV	4350
+#define PM8917_USB_UVD_STEP_MV	100
+#define PM8917_USB_UVD_MASK	0x7
+static int pm_chg_uvd_threshold_set(struct pm8921_chg_chip *chip, int thresh_mv)
+{
+	u8 temp;
+
+	if (thresh_mv < PM8917_USB_UVD_MIN_MV
+			|| thresh_mv > PM8917_USB_UVD_MAX_MV) {
+		pr_err("bad mV=%d asked to set\n", thresh_mv);
+		return -EINVAL;
+	}
+	temp = (thresh_mv - PM8917_USB_UVD_MIN_MV) / PM8917_USB_UVD_STEP_MV;
+	return pm_chg_masked_write(chip, OVP_USB_UVD,
+				PM8917_USB_UVD_MASK, temp);
+}
+
 #define PM8921_CHG_IBATMAX_MIN	325
 #define PM8921_CHG_IBATMAX_MAX	2000
 #define PM8921_CHG_I_MIN_MA	225
@@ -1740,6 +1762,16 @@
 }
 EXPORT_SYMBOL(pm8921_disable_input_current_limit);
 
+int pm8917_set_under_voltage_detection_threshold(int mv)
+{
+	if (!the_chip) {
+		pr_err("called before init\n");
+		return -EINVAL;
+	}
+	return pm_chg_uvd_threshold_set(the_chip, mv);
+}
+EXPORT_SYMBOL(pm8917_set_under_voltage_detection_threshold);
+
 int pm8921_set_max_battery_charge_current(int ma)
 {
 	if (!the_chip) {
@@ -1998,6 +2030,11 @@
 		return;
 	}
 
+	schedule_delayed_work(&chip->unplug_check_work,
+	round_jiffies_relative(msecs_to_jiffies
+		(UNPLUG_CHECK_WAIT_PERIOD_MS)));
+	pm8921_chg_enable_irq(chip, CHG_GONE_IRQ);
+
 	power_supply_set_online(chip->ext_psy, dc_present);
 	power_supply_set_charge_type(chip->ext_psy,
 					POWER_SUPPLY_CHARGE_TYPE_FAST);
@@ -2012,51 +2049,51 @@
 	power_supply_changed(&chip->batt_psy);
 }
 
-static void turn_off_usb_ovp_fet(struct pm8921_chg_chip *chip)
+static void turn_off_ovp_fet(struct pm8921_chg_chip *chip, u16 ovptestreg)
 {
 	u8 temp;
 	int rc;
 
-	rc = pm8xxx_writeb(chip->dev->parent, USB_OVP_TEST, 0x30);
+	rc = pm8xxx_writeb(chip->dev->parent, ovptestreg, 0x30);
 	if (rc) {
-		pr_err("Failed to write 0x30 to USB_OVP_TEST rc = %d\n", rc);
+		pr_err("Failed to write 0x30 to OVP_TEST rc = %d\n", rc);
 		return;
 	}
-	rc = pm8xxx_readb(chip->dev->parent, USB_OVP_TEST, &temp);
+	rc = pm8xxx_readb(chip->dev->parent, ovptestreg, &temp);
 	if (rc) {
-		pr_err("Failed to read from USB_OVP_TEST rc = %d\n", rc);
+		pr_err("Failed to read from OVP_TEST rc = %d\n", rc);
 		return;
 	}
 	/* set ovp fet disable bit and the write bit */
 	temp |= 0x81;
-	rc = pm8xxx_writeb(chip->dev->parent, USB_OVP_TEST, temp);
+	rc = pm8xxx_writeb(chip->dev->parent, ovptestreg, temp);
 	if (rc) {
-		pr_err("Failed to write 0x%x USB_OVP_TEST rc=%d\n", temp, rc);
+		pr_err("Failed to write 0x%x OVP_TEST rc=%d\n", temp, rc);
 		return;
 	}
 }
 
-static void turn_on_usb_ovp_fet(struct pm8921_chg_chip *chip)
+static void turn_on_ovp_fet(struct pm8921_chg_chip *chip, u16 ovptestreg)
 {
 	u8 temp;
 	int rc;
 
-	rc = pm8xxx_writeb(chip->dev->parent, USB_OVP_TEST, 0x30);
+	rc = pm8xxx_writeb(chip->dev->parent, ovptestreg, 0x30);
 	if (rc) {
-		pr_err("Failed to write 0x30 to USB_OVP_TEST rc = %d\n", rc);
+		pr_err("Failed to write 0x30 to OVP_TEST rc = %d\n", rc);
 		return;
 	}
-	rc = pm8xxx_readb(chip->dev->parent, USB_OVP_TEST, &temp);
+	rc = pm8xxx_readb(chip->dev->parent, ovptestreg, &temp);
 	if (rc) {
-		pr_err("Failed to read from USB_OVP_TEST rc = %d\n", rc);
+		pr_err("Failed to read from OVP_TEST rc = %d\n", rc);
 		return;
 	}
 	/* unset ovp fet disable bit and set the write bit */
 	temp &= 0xFE;
 	temp |= 0x80;
-	rc = pm8xxx_writeb(chip->dev->parent, USB_OVP_TEST, temp);
+	rc = pm8xxx_writeb(chip->dev->parent, ovptestreg, temp);
 	if (rc) {
-		pr_err("Failed to write 0x%x to USB_OVP_TEST rc = %d\n",
+		pr_err("Failed to write 0x%x to OVP_TEST rc = %d\n",
 								temp, rc);
 		return;
 	}
@@ -2065,38 +2102,66 @@
 static int param_open_ovp_counter = 10;
 module_param(param_open_ovp_counter, int, 0644);
 
+#define USB_ACTIVE_BIT BIT(5)
+#define DC_ACTIVE_BIT BIT(6)
+static int is_active_chg_plugged_in(struct pm8921_chg_chip *chip,
+						u8 active_chg_mask)
+{
+	if (active_chg_mask & USB_ACTIVE_BIT)
+		return pm_chg_get_rt_status(chip, USBIN_VALID_IRQ);
+	else if (active_chg_mask & DC_ACTIVE_BIT)
+		return pm_chg_get_rt_status(chip, DCIN_VALID_IRQ);
+	else
+		return 0;
+}
+
 #define WRITE_BANK_4		0xC0
-#define USB_OVP_DEBOUNCE_TIME 0x06
+#define OVP_DEBOUNCE_TIME 0x06
 static void unplug_ovp_fet_open(struct pm8921_chg_chip *chip)
 {
-	int chg_gone = 0, usb_chg_plugged_in = 0;
+	int chg_gone = 0, active_chg_plugged_in = 0;
 	int count = 0;
+	u8 active_mask = 0;
+	u16 ovpreg, ovptestreg;
+
+	if (is_usb_chg_plugged_in(chip) &&
+		(chip->active_path & USB_ACTIVE_BIT)) {
+		ovpreg = USB_OVP_CONTROL;
+		ovptestreg = USB_OVP_TEST;
+		active_mask = USB_ACTIVE_BIT;
+	} else if (is_dc_chg_plugged_in(chip) &&
+		(chip->active_path & DC_ACTIVE_BIT)) {
+		ovpreg = DC_OVP_CONTROL;
+		ovptestreg = DC_OVP_TEST;
+		active_mask = DC_ACTIVE_BIT;
+	} else {
+		return;
+	}
 
 	while (count++ < param_open_ovp_counter) {
-		pm_chg_masked_write(chip, USB_OVP_CONTROL,
-						USB_OVP_DEBOUNCE_TIME, 0x0);
+		pm_chg_masked_write(chip, ovpreg, OVP_DEBOUNCE_TIME, 0x0);
 		usleep(10);
-		usb_chg_plugged_in = is_usb_chg_plugged_in(chip);
+		active_chg_plugged_in
+			= is_active_chg_plugged_in(chip, active_mask);
 		chg_gone = pm_chg_get_rt_status(chip, CHG_GONE_IRQ);
-		pr_debug("OVP FET count = %d chg_gone=%d, usb_valid = %d\n",
-					count, chg_gone, usb_chg_plugged_in);
+		pr_debug("OVP FET count = %d chg_gone=%d, active_valid = %d\n",
+					count, chg_gone, active_chg_plugged_in);
 
 		/* note usb_chg_plugged_in=0 => chg_gone=1 */
-		if (chg_gone == 1 && usb_chg_plugged_in == 1) {
+		if (chg_gone == 1 && active_chg_plugged_in == 1) {
 			pr_debug("since chg_gone = 1 dis ovp_fet for 20msec\n");
-			turn_off_usb_ovp_fet(chip);
+			turn_off_ovp_fet(chip, ovptestreg);
 
 			msleep(20);
 
-			turn_on_usb_ovp_fet(chip);
+			turn_on_ovp_fet(chip, ovptestreg);
 		} else {
 			break;
 		}
 	}
-	pm_chg_masked_write(chip, USB_OVP_CONTROL,
-		USB_OVP_DEBOUNCE_TIME, 0x2);
-	pr_debug("Exit count=%d chg_gone=%d, usb_valid=%d\n",
-		count, chg_gone, usb_chg_plugged_in);
+	pm_chg_masked_write(chip, ovpreg, OVP_DEBOUNCE_TIME, 0x2);
+	pr_debug("Exit count=%d chg_gone=%d, active_valid=%d\n",
+		count, chg_gone, active_chg_plugged_in);
 	return;
 }
 
@@ -2120,6 +2185,9 @@
 		i = find_usb_ma_value(*value);
 		if (i > 0)
 			i--;
+		while (!the_chip->iusb_fine_res && i > 0
+			&& (usb_ma_table[i].value & PM8917_IUSB_FINE_RES))
+			i--;
 		*value = usb_ma_table[i].usb_ma;
 	}
 }
@@ -2318,7 +2386,8 @@
 static void attempt_reverse_boost_fix(struct pm8921_chg_chip *chip,
 							int count, int usb_ma)
 {
-	__pm8921_charger_vbus_draw(500);
+	if (usb_ma)
+		__pm8921_charger_vbus_draw(500);
 	pr_debug("count = %d iusb=500mA\n", count);
 	disable_input_voltage_regulation(chip);
 	pr_debug("count = %d disable_input_regulation\n", count);
@@ -2332,66 +2401,85 @@
 	pr_debug("count = %d restoring input regulation and usb_ma = %d\n",
 		 count, usb_ma);
 	enable_input_voltage_regulation(chip);
-	__pm8921_charger_vbus_draw(usb_ma);
+	if (usb_ma)
+		__pm8921_charger_vbus_draw(usb_ma);
 }
 
 #define VIN_ACTIVE_BIT BIT(0)
-#define UNPLUG_WRKARND_RESTORE_WAIT_PERIOD_US 200
-#define VIN_MIN_INCREASE_MV 100
+#define UNPLUG_WRKARND_RESTORE_WAIT_PERIOD_US	200
+#define VIN_MIN_INCREASE_MV	100
 static void unplug_check_worker(struct work_struct *work)
 {
 	struct delayed_work *dwork = to_delayed_work(work);
 	struct pm8921_chg_chip *chip = container_of(dwork,
 				struct pm8921_chg_chip, unplug_check_work);
-	u8 reg_loop;
-	int ibat, usb_chg_plugged_in, usb_ma;
+	u8 reg_loop, active_path;
+	int rc, ibat, active_chg_plugged_in, usb_ma;
 	int chg_gone = 0;
 
 	reg_loop = 0;
-	usb_chg_plugged_in = is_usb_chg_plugged_in(chip);
-	if (!usb_chg_plugged_in) {
-		pr_debug("Stopping Unplug Check Worker since USB is removed"
-			"reg_loop = %d, fsm = %d ibat = %d\n",
-			pm_chg_get_regulation_loop(chip),
-			pm_chg_get_fsm_state(chip),
-			get_prop_batt_current(chip)
-			);
+
+	rc = pm8xxx_readb(chip->dev->parent, PBL_ACCESS1, &active_path);
+	if (rc) {
+		pr_err("Failed to read PBL_ACCESS1 rc=%d\n", rc);
+		return;
+	}
+	chip->active_path = active_path;
+
+	active_chg_plugged_in = is_active_chg_plugged_in(chip, active_path);
+	pr_debug("active_path = 0x%x, active_chg_plugged_in = %d\n",
+			active_path, active_chg_plugged_in);
+	if (active_path & USB_ACTIVE_BIT) {
+		pr_debug("USB charger active\n");
+
+		pm_chg_iusbmax_get(chip, &usb_ma);
+		if (usb_ma == 500 && !usb_target_ma) {
+			pr_debug("Stopping Unplug Check Worker USB == 500mA\n");
+			disable_input_voltage_regulation(chip);
+			return;
+		}
+
+		if (usb_ma <= 100) {
+			pr_debug(
+				"Unenumerated or suspended usb_ma = %d skip\n",
+				usb_ma);
+			goto check_again_later;
+		}
+	} else if (active_path & DC_ACTIVE_BIT) {
+		pr_debug("DC charger active\n");
+	} else {
+		/* No charger active */
+		if (!(is_usb_chg_plugged_in(chip)
+				&& !(is_dc_chg_plugged_in(chip)))) {
+			pr_debug(
+			"Stop: chg removed reg_loop = %d, fsm = %d ibat = %d\n",
+				pm_chg_get_regulation_loop(chip),
+				pm_chg_get_fsm_state(chip),
+				get_prop_batt_current(chip)
+				);
+		}
 		return;
 	}
 
-	pm_chg_iusbmax_get(chip, &usb_ma);
-	if (usb_ma == 500 && !usb_target_ma) {
-		pr_debug("Stopping Unplug Check Worker since USB == 500mA\n");
-		disable_input_voltage_regulation(chip);
-		return;
-	}
-
-	if (usb_ma <= 100) {
-		pr_debug(
-			"Unenumerated yet or suspended usb_ma = %d skipping\n",
-			usb_ma);
-		goto check_again_later;
-	}
-	if (pm8921_chg_is_enabled(chip, CHG_GONE_IRQ))
-		pr_debug("chg gone irq is enabled\n");
-
-	reg_loop = pm_chg_get_regulation_loop(chip);
-	pr_debug("reg_loop=0x%x usb_ma = %d\n", reg_loop, usb_ma);
-
-	if ((reg_loop & VIN_ACTIVE_BIT) && (usb_ma > USB_WALL_THRESHOLD_MA)) {
-		decrease_usb_ma_value(&usb_ma);
-		usb_target_ma = usb_ma;
-		/* end AICL here */
-		__pm8921_charger_vbus_draw(usb_ma);
-		pr_debug("usb_now=%d, usb_target = %d\n",
-			usb_ma, usb_target_ma);
+	if (active_path & USB_ACTIVE_BIT) {
+		reg_loop = pm_chg_get_regulation_loop(chip);
+		pr_debug("reg_loop=0x%x usb_ma = %d\n", reg_loop, usb_ma);
+		if ((reg_loop & VIN_ACTIVE_BIT) &&
+			(usb_ma > USB_WALL_THRESHOLD_MA)) {
+			decrease_usb_ma_value(&usb_ma);
+			usb_target_ma = usb_ma;
+			/* end AICL here */
+			__pm8921_charger_vbus_draw(usb_ma);
+			pr_debug("usb_now=%d, usb_target = %d\n",
+				usb_ma, usb_target_ma);
+		}
 	}
 
 	reg_loop = pm_chg_get_regulation_loop(chip);
 	pr_debug("reg_loop=0x%x usb_ma = %d\n", reg_loop, usb_ma);
 
+	ibat = get_prop_batt_current(chip);
 	if (reg_loop & VIN_ACTIVE_BIT) {
-		ibat = get_prop_batt_current(chip);
 
 		pr_debug("ibat = %d fsm = %d reg_loop = 0x%x\n",
 				ibat, pm_chg_get_fsm_state(chip), reg_loop);
@@ -2399,25 +2487,36 @@
 			int count = 0;
 
 			while (count++ < param_vin_disable_counter
-					&& usb_chg_plugged_in == 1) {
-				attempt_reverse_boost_fix(chip, count, usb_ma);
-				usb_chg_plugged_in
-					= is_usb_chg_plugged_in(chip);
+					&& active_chg_plugged_in == 1) {
+				if (active_path & USB_ACTIVE_BIT)
+					attempt_reverse_boost_fix(chip,
+								count, usb_ma);
+				else
+					attempt_reverse_boost_fix(chip,
+								count, 0);
+				/* after reverse boost fix check if the active
+				 * charger was detected as removed */
+				active_chg_plugged_in
+					= is_active_chg_plugged_in(chip,
+						active_path);
+				pr_debug("active_chg_plugged_in = %d\n",
+						active_chg_plugged_in);
 			}
 		}
 	}
 
-	usb_chg_plugged_in = is_usb_chg_plugged_in(chip);
+	active_chg_plugged_in = is_active_chg_plugged_in(chip, active_path);
+	pr_debug("active_path = 0x%x, active_chg = %d\n",
+			active_path, active_chg_plugged_in);
 	chg_gone = pm_chg_get_rt_status(chip, CHG_GONE_IRQ);
 
-	if (chg_gone == 1  && usb_chg_plugged_in == 1) {
-		/* run the worker directly */
-		pr_debug(" ver5 step: chg_gone=%d, usb_valid = %d\n",
-						chg_gone, usb_chg_plugged_in);
+	if (chg_gone == 1  && active_chg_plugged_in == 1) {
+		pr_debug("chg_gone=%d, active_chg_plugged_in = %d\n",
+					chg_gone, active_chg_plugged_in);
 		unplug_ovp_fet_open(chip);
 	}
 
-	if (!(reg_loop & VIN_ACTIVE_BIT)) {
+	if (!(reg_loop & VIN_ACTIVE_BIT) && (active_path & USB_ACTIVE_BIT)) {
 		/* only increase iusb_max if vin loop not active */
 		if (usb_ma < usb_target_ma) {
 			increase_usb_ma_value(&usb_ma);
@@ -3385,6 +3484,8 @@
 
 #define ENUM_TIMER_STOP_BIT	BIT(1)
 #define BOOT_DONE_BIT		BIT(6)
+#define BOOT_TIMER_EN_BIT	BIT(1)
+#define BOOT_DONE_MASK		(BOOT_DONE_BIT | BOOT_TIMER_EN_BIT)
 #define CHG_BATFET_ON_BIT	BIT(3)
 #define CHG_VCP_EN		BIT(0)
 #define CHG_BAT_TEMP_DIS_BIT	BIT(2)
@@ -3400,7 +3501,7 @@
 	detect_battery_removal(chip);
 
 	rc = pm_chg_masked_write(chip, SYS_CONFIG_2,
-					BOOT_DONE_BIT, BOOT_DONE_BIT);
+					BOOT_DONE_MASK, BOOT_DONE_MASK);
 	if (rc) {
 		pr_err("Failed to set BOOT_DONE_BIT rc=%d\n", rc);
 		return rc;
@@ -3584,8 +3685,17 @@
 		pm8xxx_writeb(chip->dev->parent, CHG_BUCK_CTRL_TEST3, 0xAC);
 
 	/* Enable isub_fine resolution AICL for PM8917 */
-	if (pm8xxx_get_version(chip->dev->parent) == PM8XXX_VERSION_8917)
+	if (pm8xxx_get_version(chip->dev->parent) == PM8XXX_VERSION_8917) {
 		chip->iusb_fine_res = true;
+		if (chip->uvd_voltage_mv)
+			rc = pm_chg_uvd_threshold_set(chip,
+					chip->uvd_voltage_mv);
+			if (rc) {
+				pr_err("Failed to set UVD threshold %drc=%d\n",
+						chip->uvd_voltage_mv, rc);
+			return rc;
+		}
+	}
 
 	pm8xxx_writeb(chip->dev->parent, CHG_BUCK_CTRL_TEST3, 0xD9);
 
@@ -3871,6 +3981,7 @@
 	chip->update_time = pdata->update_time;
 	chip->max_voltage_mv = pdata->max_voltage;
 	chip->min_voltage_mv = pdata->min_voltage;
+	chip->uvd_voltage_mv = pdata->uvd_thresh_voltage;
 	chip->resume_voltage_delta = pdata->resume_voltage_delta;
 	chip->term_current = pdata->term_current;
 	chip->vbat_channel = pdata->charger_cdata.vbat_channel;
diff --git a/drivers/tty/n_smux.c b/drivers/tty/n_smux.c
index c271ca4..cb09de3 100644
--- a/drivers/tty/n_smux.c
+++ b/drivers/tty/n_smux.c
@@ -358,6 +358,7 @@
 				void *data);
 static void smux_uart_power_on_atomic(void);
 static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
+static void smux_flush_workqueues(void);
 
 /**
  * Convert TTY Error Flags to string for logging purposes.
@@ -513,7 +514,6 @@
 		}
 
 		ch->local_state = SMUX_LCH_LOCAL_CLOSED;
-		ch->local_mode = SMUX_LCH_MODE_NORMAL;
 		ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
 		ch->remote_mode = SMUX_LCH_MODE_NORMAL;
 		ch->tx_flow_control = 0;
@@ -526,12 +526,6 @@
 
 		spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
 	}
-
-	/* Flush TX/RX workqueues */
-	SMUX_DBG("%s: flushing tx wq\n", __func__);
-	flush_workqueue(smux_tx_wq);
-	SMUX_DBG("%s: flushing rx wq\n", __func__);
-	flush_workqueue(smux_rx_wq);
 }
 
 int smux_assert_lch_id(uint32_t lcid)
@@ -2232,12 +2226,13 @@
 
 /**
  * Power down the UART.
+ *
+ * Must be called with mutex_lha0 locked.
  */
-static void smux_uart_power_off(void)
+static void smux_uart_power_off_atomic(void)
 {
 	struct uart_state *state;
 
-	mutex_lock(&smux.mutex_lha0);
 	if (!smux.tty || !smux.tty->driver_data) {
 		pr_err("%s: unable to find UART port for tty %p\n",
 				__func__, smux.tty);
@@ -2246,6 +2241,15 @@
 	}
 	state = smux.tty->driver_data;
 	msm_hs_request_clock_off(state->uart_port);
+}
+
+/**
+ * Power down the UART.
+ */
+static void smux_uart_power_off(void)
+{
+	mutex_lock(&smux.mutex_lha0);
+	smux_uart_power_off_atomic();
 	mutex_unlock(&smux.mutex_lha0);
 }
 
@@ -2327,6 +2331,9 @@
 	struct smux_pkt_t *pkt;
 	unsigned long flags;
 
+	if (smux.in_reset)
+		return;
+
 	spin_lock_irqsave(&smux.rx_lock_lha1, flags);
 	spin_lock(&smux.tx_lock_lha2);
 
@@ -2446,6 +2453,12 @@
 	SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
 	used = 0;
 	do {
+		if (smux.in_reset) {
+			SMUX_DBG("%s: abort RX due to reset\n", __func__);
+			smux.rx_state = SMUX_RX_IDLE;
+			break;
+		}
+
 		SMUX_DBG("%s: state %d; %d of %d\n",
 				__func__, smux.rx_state, used, len);
 		initial_rx_state = smux.rx_state;
@@ -2494,7 +2507,7 @@
 
 	/* get next retry packet */
 	spin_lock_irqsave(&ch->state_lock_lhb1, flags);
-	if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
+	if ((ch->local_state != SMUX_LCH_LOCAL_OPENED) || smux.in_reset) {
 		/* port has been closed - remove all retries */
 		while (!list_empty(&ch->rx_retry_queue)) {
 			retry = list_first_entry(&ch->rx_retry_queue,
@@ -2797,6 +2810,26 @@
 	return updated;
 }
 
+/**
+ * Flush all SMUX workqueues.
+ *
+ * This sets the reset bit to abort any processing loops and then
+ * flushes the workqueues to ensure that no new pending work is
+ * running.  Do not call with any locks used by workers held as
+ * this will result in a deadlock.
+ */
+static void smux_flush_workqueues(void)
+{
+	smux.in_reset = 1;
+
+	SMUX_DBG("%s: flushing tx wq\n", __func__);
+	flush_workqueue(smux_tx_wq);
+	SMUX_DBG("%s: flushing rx wq\n", __func__);
+	flush_workqueue(smux_rx_wq);
+	SMUX_DBG("%s: flushing notify wq\n", __func__);
+	flush_workqueue(smux_notify_wq);
+}
+
 /**********************************************************************/
 /* Kernel API                                                         */
 /**********************************************************************/
@@ -2922,6 +2955,7 @@
 			ch->local_state,
 			SMUX_LCH_LOCAL_OPENING);
 
+	ch->rx_flow_control_auto = 0;
 	ch->local_state = SMUX_LCH_LOCAL_OPENING;
 
 	ch->priv = priv;
@@ -2948,6 +2982,7 @@
 
 out:
 	spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+	smux_rx_flow_control_updated(ch);
 	if (tx_ready)
 		list_channel(ch);
 	return ret;
@@ -3341,6 +3376,7 @@
 	SMUX_DBG("%s: ssr - after shutdown\n", __func__);
 
 	/* Cleanup channels */
+	smux_flush_workqueues();
 	mutex_lock(&smux.mutex_lha0);
 	smux_lch_purge();
 	if (smux.tty)
@@ -3357,8 +3393,11 @@
 	spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
 
 	if (power_off_uart)
-		smux_uart_power_off();
+		smux_uart_power_off_atomic();
 
+	smux.tx_activity_flag = 0;
+	smux.rx_activity_flag = 0;
+	smux.rx_state = SMUX_RX_IDLE;
 	smux.in_reset = 0;
 	mutex_unlock(&smux.mutex_lha0);
 
@@ -3440,6 +3479,8 @@
 	int i;
 
 	SMUX_DBG("%s: ldisc unload\n", __func__);
+	smux_flush_workqueues();
+
 	mutex_lock(&smux.mutex_lha0);
 	if (smux.ld_open_count <= 0) {
 		pr_err("%s: invalid ld count %d\n", __func__,
@@ -3447,7 +3488,6 @@
 		mutex_unlock(&smux.mutex_lha0);
 		return;
 	}
-	smux.in_reset = 1;
 	--smux.ld_open_count;
 
 	/* Cleanup channels */
@@ -3466,11 +3506,15 @@
 		power_up_uart = 1;
 	smux.power_state = SMUX_PWR_OFF;
 	smux.powerdown_enabled = 0;
+	smux.tx_activity_flag = 0;
+	smux.rx_activity_flag = 0;
 	spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
 
 	if (power_up_uart)
 		smux_uart_power_on_atomic();
 
+	smux.rx_state = SMUX_RX_IDLE;
+
 	/* Disconnect from TTY */
 	smux.tty = NULL;
 	mutex_unlock(&smux.mutex_lha0);
diff --git a/drivers/tty/smux_test.c b/drivers/tty/smux_test.c
index e488a63..4c255a4 100644
--- a/drivers/tty/smux_test.c
+++ b/drivers/tty/smux_test.c
@@ -21,6 +21,7 @@
 #include <linux/completion.h>
 #include <linux/termios.h>
 #include <linux/smux.h>
+#include <mach/subsystem_restart.h>
 #include "smux_private.h"
 
 #define DEBUG_BUFMAX 4096
@@ -207,6 +208,9 @@
 	struct list_head write_events;
 };
 
+static int get_rx_buffer_mock(void *priv, void **pkt_priv,
+		void **buffer, int size);
+
 /**
  * Initialize mock callback data. Only call once.
  *
@@ -673,6 +677,198 @@
 }
 
 /**
+ * Verify Basic Subsystem Restart Support
+ *
+ * Run a basic loopback test followed by a subsystem restart and then another
+ * loopback test.
+ */
+static int smux_ut_remote_ssr_basic(char *buf, int max)
+{
+	const struct test_vector test_data[] = {
+		{"hello\0world\n", sizeof("hello\0world\n")},
+		{0, 0},
+	};
+	int i = 0;
+	int failed = 0;
+	int ret;
+
+	i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+	while (!failed) {
+		/* enable remote mode */
+		ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+				SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
+		subsystem_restart("external_modem");
+		msleep(5000);
+		i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
+		break;
+	}
+
+	if (failed) {
+		pr_err("%s: Failed\n", __func__);
+		i += scnprintf(buf + i, max - i, "\tFailed\n");
+	}
+	return i;
+}
+
+/**
+ * Verify Subsystem Restart Support During Port Open
+ */
+static int smux_ut_remote_ssr_open(char *buf, int max)
+{
+	static struct smux_mock_callback cb_data;
+	static int cb_initialized;
+	int ret;
+	int i = 0;
+	int failed = 0;
+
+	i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+
+	if (!cb_initialized)
+		mock_cb_data_init(&cb_data);
+
+	mock_cb_data_reset(&cb_data);
+	while (!failed) {
+		ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+				SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		/* open port */
+		ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
+					get_rx_buffer);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&cb_data.cb_completion, HZ), >, 0);
+		UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+		mock_cb_data_reset(&cb_data);
+
+		/* restart modem */
+		subsystem_restart("external_modem");
+
+		/* verify SSR events */
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+				&cb_data.cb_completion, 5*HZ),
+			>, 0);
+		UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+		UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 1);
+		mock_cb_data_reset(&cb_data);
+
+		/* close port */
+		ret = msm_smux_close(SMUX_TEST_LCID);
+		UT_ASSERT_INT(ret, ==, 0);
+		break;
+	}
+
+	if (!failed) {
+		i += scnprintf(buf + i, max - i, "\tOK\n");
+	} else {
+		pr_err("%s: Failed\n", __func__);
+		i += scnprintf(buf + i, max - i, "\tFailed\n");
+		i += mock_cb_data_print(&cb_data, buf + i, max - i);
+		msm_smux_close(SMUX_TEST_LCID);
+	}
+
+	mock_cb_data_reset(&cb_data);
+
+	return i;
+}
+
+/**
+ * Verify get_rx_buffer callback retry doesn't livelock SSR
+ * until all RX Bufffer Retries have timed out.
+ *
+ * @buf  Buffer for status message
+ * @max  Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_remote_ssr_rx_buff_retry(char *buf, int max)
+{
+	static struct smux_mock_callback cb_data;
+	static int cb_initialized;
+	int i = 0;
+	int failed = 0;
+	int ret;
+
+	i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+	pr_err("%s", buf);
+
+	if (!cb_initialized)
+		mock_cb_data_init(&cb_data);
+
+	mock_cb_data_reset(&cb_data);
+	while (!failed) {
+		/* open port for loopback */
+		ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+				SMUX_CH_OPTION_REMOTE_LOOPBACK,
+				0);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
+				smux_mock_cb, get_rx_buffer_mock);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(
+				(int)wait_for_completion_timeout(
+					&cb_data.cb_completion, HZ), >, 0);
+		UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+		mock_cb_data_reset(&cb_data);
+
+		/* Queue up an RX buffer retry */
+		get_rx_buffer_mock_fail = 1;
+		ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
+					test_array, sizeof(test_array));
+		UT_ASSERT_INT(ret, ==, 0);
+		while (!cb_data.get_rx_buff_retry_count) {
+			UT_ASSERT_INT(
+				(int)wait_for_completion_timeout(
+					&cb_data.cb_completion, HZ),
+				>, 0);
+			INIT_COMPLETION(cb_data.cb_completion);
+		}
+		if (failed)
+			break;
+		mock_cb_data_reset(&cb_data);
+
+		/* trigger SSR */
+		subsystem_restart("external_modem");
+
+		/* verify SSR completed */
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+				&cb_data.cb_completion, 5*HZ),
+			>, 0);
+		UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+		UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 1);
+		mock_cb_data_reset(&cb_data);
+
+		/* close port */
+		ret = msm_smux_close(SMUX_TEST_LCID);
+		UT_ASSERT_INT(ret, ==, 0);
+		break;
+	}
+
+	if (!failed) {
+		i += scnprintf(buf + i, max - i, "\tOK\n");
+	} else {
+		pr_err("%s: Failed\n", __func__);
+		i += scnprintf(buf + i, max - i, "\tFailed\n");
+		i += mock_cb_data_print(&cb_data, buf + i, max - i);
+		msm_smux_close(SMUX_TEST_LCID);
+	}
+	mock_cb_data_reset(&cb_data);
+	return i;
+}
+/**
  * Fill test pattern into provided buffer including an optional
  * redzone 16 bytes before and 16 bytes after the buffer.
  *
@@ -1793,6 +1989,12 @@
 			smux_ut_local_get_rx_buff_retry);
 	debug_create("ut_local_get_rx_buff_retry_auto", 0444, dent,
 			smux_ut_local_get_rx_buff_retry_auto);
+	debug_create("ut_remote_ssr_basic", 0444, dent,
+			smux_ut_remote_ssr_basic);
+	debug_create("ut_remote_ssr_open", 0444, dent,
+			smux_ut_remote_ssr_open);
+	debug_create("ut_remote_ssr_rx_buff_retry", 0444, dent,
+			smux_ut_remote_ssr_rx_buff_retry);
 
 	return 0;
 }
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 18f0721..4d15d4d 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -1957,6 +1957,8 @@
 				mReq->req.length)
 				mEpTemp = &_udc->ep0in;
 			mReq->req.complete(&mEpTemp->ep, &mReq->req);
+			if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+				mReq->req.complete = NULL;
 			spin_lock(mEp->lock);
 		}
 	}
@@ -2803,7 +2805,12 @@
 
 	dbg_event(_usb_addr(mEp), "DEQUEUE", 0);
 
-	hw_ep_flush(mEp->num, mEp->dir);
+	if ((mEp->type == USB_ENDPOINT_XFER_CONTROL)) {
+		hw_ep_flush(_udc->ep0out.num, RX);
+		hw_ep_flush(_udc->ep0in.num, TX);
+	} else {
+		hw_ep_flush(mEp->num, mEp->dir);
+	}
 
 	/* pop request */
 	list_del_init(&mReq->queue);
@@ -2821,6 +2828,8 @@
 				mReq->req.length)
 			mEpTemp = &_udc->ep0in;
 		mReq->req.complete(&mEpTemp->ep, &mReq->req);
+		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+			mReq->req.complete = NULL;
 		spin_lock(mEp->lock);
 	}
 
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index bb6bb2c..c1e1e13 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -2766,28 +2766,46 @@
 		queue_work(system_nrt_wq, &motg->sm_work);
 }
 
-static irqreturn_t msm_pmic_id_irq(int irq, void *data)
+static void msm_pmic_id_status_w(struct work_struct *w)
 {
-	struct msm_otg *motg = data;
+	struct msm_otg *motg = container_of(w, struct msm_otg,
+						pmic_id_status_work.work);
+	int work = 0;
+	unsigned long flags;
 
-	if (aca_id_turned_on)
-		return IRQ_HANDLED;
-
+	local_irq_save(flags);
 	if (irq_read_line(motg->pdata->pmic_id_irq)) {
-		pr_debug("PMIC: ID set\n");
-		set_bit(ID, &motg->inputs);
+		if (!test_and_set_bit(ID, &motg->inputs)) {
+			pr_debug("PMIC: ID set\n");
+			work = 1;
+		}
 	} else {
-		pr_debug("PMIC: ID clear\n");
-		clear_bit(ID, &motg->inputs);
-		set_bit(A_BUS_REQ, &motg->inputs);
+		if (test_and_clear_bit(ID, &motg->inputs)) {
+			pr_debug("PMIC: ID clear\n");
+			set_bit(A_BUS_REQ, &motg->inputs);
+			work = 1;
+		}
 	}
 
-	if (motg->phy.state != OTG_STATE_UNDEFINED) {
+	if (work && (motg->phy.state != OTG_STATE_UNDEFINED)) {
 		if (atomic_read(&motg->pm_suspended))
 			motg->sm_work_pending = true;
 		else
 			queue_work(system_nrt_wq, &motg->sm_work);
 	}
+	local_irq_restore(flags);
+
+}
+
+#define MSM_PMIC_ID_STATUS_DELAY	5 /* 5msec */
+static irqreturn_t msm_pmic_id_irq(int irq, void *data)
+{
+	struct msm_otg *motg = data;
+
+	if (!aca_id_turned_on)
+		/*schedule delayed work for 5msec for ID line state to settle*/
+		queue_delayed_work(system_nrt_wq, &motg->pmic_id_status_work,
+				msecs_to_jiffies(MSM_PMIC_ID_STATUS_DELAY));
 
 	return IRQ_HANDLED;
 }
@@ -3402,6 +3420,7 @@
 	msm_otg_init_timer(motg);
 	INIT_WORK(&motg->sm_work, msm_otg_sm_work);
 	INIT_DELAYED_WORK(&motg->chg_work, msm_chg_detect_work);
+	INIT_DELAYED_WORK(&motg->pmic_id_status_work, msm_pmic_id_status_w);
 	setup_timer(&motg->id_timer, msm_otg_id_timer_func,
 				(unsigned long) motg);
 	ret = request_irq(motg->irq, msm_otg_irq, IRQF_SHARED,
@@ -3546,6 +3565,7 @@
 		pm8921_charger_unregister_vbus_sn(0);
 	msm_otg_debugfs_cleanup();
 	cancel_delayed_work_sync(&motg->chg_work);
+	cancel_delayed_work_sync(&motg->pmic_id_status_work);
 	cancel_work_sync(&motg->sm_work);
 
 	pm_runtime_resume(&pdev->dev);
diff --git a/drivers/video/msm/hdmi_msm.c b/drivers/video/msm/hdmi_msm.c
index 2526d76..f599f916 100644
--- a/drivers/video/msm/hdmi_msm.c
+++ b/drivers/video/msm/hdmi_msm.c
@@ -763,9 +763,18 @@
 
 static void hdmi_msm_hpd_state_work(struct work_struct *work)
 {
-	boolean hpd_state;
+	boolean hpd_state = false;
 	char *envp[2];
 
+	if (hdmi_msm_state->is_mhl_enabled) {
+		/*
+		 * HPD will be controlled from MHL
+		 */
+		envp[0] = "";
+		DEV_DBG("%s %u\n", envp[0], hpd_state);
+		return;
+	}
+
 	if (!hdmi_msm_state || !hdmi_msm_state->hpd_initialized ||
 		!MSM_HDMI_BASE) {
 		DEV_DBG("%s: ignored, probe failed\n", __func__);
@@ -4274,14 +4283,14 @@
 
 	if (trigger_handler) {
 		/* Set HPD state machine: ensure at least 2 readouts */
+		mutex_lock(&external_common_state_hpd_mutex);
 		mutex_lock(&hdmi_msm_state_mutex);
 		hdmi_msm_state->hpd_stable = 0;
 		hdmi_msm_state->hpd_prev_state = TRUE;
-		mutex_lock(&external_common_state_hpd_mutex);
 		external_common_state->hpd_state = FALSE;
-		mutex_unlock(&external_common_state_hpd_mutex);
 		hdmi_msm_state->hpd_cable_chg_detected = TRUE;
 		mutex_unlock(&hdmi_msm_state_mutex);
+		mutex_unlock(&external_common_state_hpd_mutex);
 		mod_timer(&hdmi_msm_state->hpd_state_timer,
 			jiffies + HZ/2);
 	}
@@ -4356,6 +4365,50 @@
 	return 0;
 }
 
+void mhl_connect_api(boolean on)
+{
+	char *envp[2];
+
+	/* Simulating a HPD event based on MHL event */
+	hdmi_msm_state->hpd_cable_chg_detected = FALSE;
+	/* QDSP OFF preceding the HPD event notification */
+	switch_set_state(&external_common_state->sdev, 0);
+	DEV_INFO("Hdmi state switch to %d: %s\n",
+		 external_common_state->sdev.state,  __func__);
+	if (on) {
+		hdmi_msm_read_edid();
+		if (hdmi_msm_has_hdcp())
+			hdmi_msm_state->reauth = FALSE ;
+		/* Build EDID table */
+		hdmi_msm_turn_on();
+		DEV_INFO("HDMI HPD: sense CONNECTED: send ONLINE\n");
+		kobject_uevent(external_common_state->uevent_kobj,
+			       KOBJ_ONLINE);
+		hdmi_msm_hdcp_enable();
+		envp[0] = 0;
+		if (!hdmi_msm_has_hdcp()) {
+			/* Send Audio for HDMI Compliance Cases*/
+			envp[0] = "HDCP_STATE=PASS";
+			envp[1] = NULL;
+			DEV_INFO("HDMI HPD: sense : send HDCP_PASS\n");
+			kobject_uevent_env(external_common_state->uevent_kobj,
+					   KOBJ_CHANGE, envp);
+			switch_set_state(&external_common_state->sdev, 1);
+			DEV_INFO("Hdmi state switch to %d: %s\n",
+				 external_common_state->sdev.state, __func__);
+		}
+	} else {
+		DEV_INFO("HDMI HPD: sense DISCONNECTED: send OFFLINE\n"
+			);
+		kobject_uevent(external_common_state->uevent_kobj,
+			       KOBJ_OFFLINE);
+		switch_set_state(&external_common_state->sdev, 0);
+		DEV_INFO("Hdmi state switch to %d: %s\n",
+			 external_common_state->sdev.state,  __func__);
+	}
+}
+EXPORT_SYMBOL(mhl_connect_api);
+
 /* Note that power-off will also be called when the cable-remove event is
  * processed on the user-space and as a result the framebuffer is powered
  * down.  However, we are still required to be able to detect a cable-insert
@@ -4461,6 +4514,8 @@
 		goto error;
 	}
 
+	hdmi_msm_state->is_mhl_enabled = hdmi_msm_state->pd->is_mhl_enabled;
+
 	rc = check_hdmi_features();
 	if (rc) {
 		DEV_ERR("Init FAILED: check_hdmi_features rc=%d\n", rc);
@@ -4678,7 +4733,7 @@
 	}
 
 	external_common_state = &hdmi_msm_state->common;
-	external_common_state->video_resolution = HDMI_VFRMT_1920x1080p60_16_9;
+	external_common_state->video_resolution = HDMI_VFRMT_1920x1080p30_16_9;
 #ifdef CONFIG_FB_MSM_HDMI_3D
 	external_common_state->switch_3d = hdmi_msm_switch_3d;
 #endif
diff --git a/drivers/video/msm/hdmi_msm.h b/drivers/video/msm/hdmi_msm.h
index 06ebb06..243a27b 100644
--- a/drivers/video/msm/hdmi_msm.h
+++ b/drivers/video/msm/hdmi_msm.h
@@ -110,6 +110,7 @@
 	void __iomem *hdmi_io;
 
 	struct external_common_state_type common;
+	boolean is_mhl_enabled;
 };
 
 extern struct hdmi_msm_state_type *hdmi_msm_state;
@@ -134,5 +135,5 @@
 void hdmi_msm_cec_one_touch_play(void);
 void hdmi_msm_cec_msg_send(struct hdmi_msm_cec_msg *msg);
 #endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT */
-
+void mhl_connect_api(boolean on);
 #endif /* __HDMI_MSM_H__ */
diff --git a/drivers/video/msm/mdp.h b/drivers/video/msm/mdp.h
index f8d54bd..3fd51ba 100644
--- a/drivers/video/msm/mdp.h
+++ b/drivers/video/msm/mdp.h
@@ -75,7 +75,8 @@
 
 struct mdp_buf_type {
 	struct ion_handle *ihdl;
-	u32 phys_addr;
+	u32 write_addr;
+	u32 read_addr;
 	u32 size;
 };
 
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index 1557eed..59404d0 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -337,7 +337,8 @@
 	uint32 element1; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
 	uint32 element0; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
 	struct completion comp;
-	ulong blt_addr; /* blt mode addr */
+	ulong ov_blt_addr; /* blt mode addr */
+	ulong dma_blt_addr; /* blt mode addr */
 	ulong blt_base;
 	ulong blt_offset;
 	uint32 blt_cnt;
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 4a4716e..d045e69 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -346,7 +346,7 @@
 
 	MDP_OUTP(MDP_BASE + 0xb0004,
 			(pipe->src_height << 16 | pipe->src_width));
-	if (pipe->blt_addr) {
+	if (pipe->dma_blt_addr) {
 		uint32 off, bpp;
 #ifdef BLT_RGB565
 		bpp = 2; /* overlay ouput is RGB565 */
@@ -356,7 +356,7 @@
 		off = 0;
 		if (pipe->ov_cnt & 0x01)
 			off = pipe->src_height * pipe->src_width * bpp;
-		MDP_OUTP(MDP_BASE + 0xb0008, pipe->blt_addr + off);
+		MDP_OUTP(MDP_BASE + 0xb0008, pipe->dma_blt_addr + off);
 		/* RGB888, output of overlay blending */
 		MDP_OUTP(MDP_BASE + 0xb000c, pipe->src_width * bpp);
 	} else {
@@ -424,10 +424,7 @@
 	if (mdp_is_in_isr == FALSE)
 		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 
-	/* dma_p source */
-	MDP_OUTP(MDP_BASE + 0x90004,
-			(pipe->src_height << 16 | pipe->src_width));
-	if (pipe->blt_addr) {
+	if (pipe->dma_blt_addr) {
 #ifdef BLT_RGB565
 		bpp = 2; /* overlay ouput is RGB565 */
 #else
@@ -436,13 +433,16 @@
 		off = 0;
 		if (pipe->dmap_cnt & 0x01)
 			off = pipe->src_height * pipe->src_width * bpp;
-		MDP_OUTP(MDP_BASE + 0x90008, pipe->blt_addr + off);
+		MDP_OUTP(MDP_BASE + 0x90008, pipe->dma_blt_addr + off);
 		/* RGB888, output of overlay blending */
 		MDP_OUTP(MDP_BASE + 0x9000c, pipe->src_width * bpp);
 	} else {
 		MDP_OUTP(MDP_BASE + 0x90008, pipe->srcp0_addr);
 		MDP_OUTP(MDP_BASE + 0x9000c, pipe->srcp0_ystride);
 	}
+	/* dma_p source */
+	MDP_OUTP(MDP_BASE + 0x90004,
+			(pipe->src_height << 16 | pipe->src_width));
 
 	/* dma_p dest */
 	MDP_OUTP(MDP_BASE + 0x90010, (pipe->dst_y << 16 | pipe->dst_x));
@@ -1321,7 +1321,7 @@
 	/*
 	 * BLT support both primary and external external
 	 */
-	if (pipe->blt_addr) {
+	if (pipe->ov_blt_addr) {
 		int off, bpp;
 #ifdef BLT_RGB565
 		bpp = 2;  /* overlay ouput is RGB565 */
@@ -1338,10 +1338,10 @@
 			if (pipe->ov_cnt & 0x01)
 				off = pipe->src_height * pipe->src_width * bpp;
 
-			outpdw(overlay_base + 0x000c, pipe->blt_addr + off);
+			outpdw(overlay_base + 0x000c, pipe->ov_blt_addr + off);
 			/* overlay ouput is RGB888 */
 			outpdw(overlay_base + 0x0010, pipe->src_width * bpp);
-			outpdw(overlay_base + 0x001c, pipe->blt_addr + off);
+			outpdw(overlay_base + 0x001c, pipe->ov_blt_addr + off);
 			/* MDDI - BLT + on demand */
 			outpdw(overlay_base + 0x0004, 0x08);
 
@@ -1361,19 +1361,19 @@
 							pipe->src_width * bpp;
 
 				outpdw(overlay_base + 0x000c,
-						pipe->blt_addr + off);
+						pipe->ov_blt_addr + off);
 				/* overlay ouput is RGB888 */
 				outpdw(overlay_base + 0x0010,
 					((pipe->src_width << 16) |
 					 pipe->src_width));
 				outpdw(overlay_base + 0x001c,
-						pipe->blt_addr + off);
+						pipe->ov_blt_addr + off);
 				off = pipe->src_height * pipe->src_width;
 				/* align chroma to 2k address */
 				off = (off + 2047) & ~2047;
 				/* UV plane adress */
 				outpdw(overlay_base + 0x0020,
-						pipe->blt_addr + off);
+						pipe->ov_blt_addr + off);
 				/* MDDI - BLT + on demand */
 				outpdw(overlay_base + 0x0004, 0x08);
 				/* pseudo planar + writeback */
@@ -3120,7 +3120,6 @@
 		/* primary interface */
 		ctrl->mixer0_played++;
 		if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
-			mdp4_overlay_reg_flush(pipe, 0);
 			mdp4_overlay_lcdc_start();
 			mdp4_overlay_lcdc_vsync_push(mfd, pipe);
 			if (!mfd->use_ov0_blt &&
@@ -3129,7 +3128,6 @@
 		}
 #ifdef CONFIG_FB_MSM_MIPI_DSI
 		else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
-			mdp4_overlay_reg_flush(pipe, 0);
 			mdp4_overlay_dsi_video_start();
 			mdp4_overlay_dsi_video_vsync_push(mfd, pipe);
 			if (!mfd->use_ov0_blt &&
diff --git a/drivers/video/msm/mdp4_overlay_dsi_cmd.c b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
index 8ebf8a0..59976b6 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_cmd.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
@@ -162,7 +162,8 @@
 		dsi_pipe = pipe; /* keep it */
 
 		mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
-		pipe->blt_addr = 0;
+		pipe->ov_blt_addr = 0;
+		pipe->dma_blt_addr = 0;
 
 	} else {
 		pipe = dsi_pipe;
@@ -321,24 +322,25 @@
 {
 	unsigned long flag;
 
-	pr_debug("%s: blt_end=%d blt_addr=%x pid=%d\n",
-	__func__, dsi_pipe->blt_end, (int)dsi_pipe->blt_addr, current->pid);
+	pr_debug("%s: blt_end=%d ov_blt_addr=%x pid=%d\n",
+	__func__, dsi_pipe->blt_end, (int)dsi_pipe->ov_blt_addr, current->pid);
 
 	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
 
-	if (mfd->ov0_wb_buf->phys_addr == 0) {
+	if (mfd->ov0_wb_buf->write_addr == 0) {
 		pr_info("%s: no blt_base assigned\n", __func__);
 		return -EBUSY;
 	}
 
-	if (dsi_pipe->blt_addr == 0) {
+	if (dsi_pipe->ov_blt_addr == 0) {
 		mdp4_dsi_cmd_dma_busy_wait(mfd);
 		spin_lock_irqsave(&mdp_spin_lock, flag);
 		dsi_pipe->blt_end = 0;
 		dsi_pipe->blt_cnt = 0;
 		dsi_pipe->ov_cnt = 0;
 		dsi_pipe->dmap_cnt = 0;
-		dsi_pipe->blt_addr = mfd->ov0_wb_buf->phys_addr;
+		dsi_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
+		dsi_pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
 		mdp4_stat.blt_dsi_cmd++;
 		spin_unlock_irqrestore(&mdp_spin_lock, flag);
 		return 0;
@@ -352,10 +354,10 @@
 	unsigned long flag;
 
 
-	pr_debug("%s: blt_end=%d blt_addr=%x\n",
-		 __func__, dsi_pipe->blt_end, (int)dsi_pipe->blt_addr);
+	pr_debug("%s: blt_end=%d ov_blt_addr=%x\n",
+		 __func__, dsi_pipe->blt_end, (int)dsi_pipe->ov_blt_addr);
 
-	if ((dsi_pipe->blt_end == 0) && dsi_pipe->blt_addr) {
+	if ((dsi_pipe->blt_end == 0) && dsi_pipe->ov_blt_addr) {
 		spin_lock_irqsave(&mdp_spin_lock, flag);
 		dsi_pipe->blt_end = 1;	/* mark as end */
 		spin_unlock_irqrestore(&mdp_spin_lock, flag);
@@ -393,7 +395,7 @@
 	char *overlay_base;
 
 
-	if (pipe->blt_addr == 0)
+	if (pipe->ov_blt_addr == 0)
 		return;
 
 
@@ -405,7 +407,7 @@
 	off = 0;
 	if (pipe->dmap_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
-	addr = pipe->blt_addr + off;
+	addr = pipe->dma_blt_addr + off;
 
 	/* dmap */
 	MDP_OUTP(MDP_BASE + 0x90008, addr);
@@ -413,7 +415,7 @@
 	off = 0;
 	if (pipe->ov_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
-	addr2 = pipe->blt_addr + off;
+	addr2 = pipe->ov_blt_addr + off;
 	/* overlay 0 */
 	overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
 	outpdw(overlay_base + 0x000c, addr2);
@@ -441,7 +443,8 @@
 		spin_unlock(&mdp_spin_lock);
 		if (dsi_pipe->blt_end) {
 			dsi_pipe->blt_end = 0;
-			dsi_pipe->blt_addr = 0;
+			dsi_pipe->dma_blt_addr = 0;
+			dsi_pipe->ov_blt_addr = 0;
 			pr_debug("%s: END, ov_cnt=%d dmap_cnt=%d\n",
 				__func__, dsi_pipe->ov_cnt, dsi_pipe->dmap_cnt);
 			mdp_intr_mask &= ~INTR_DMA_P_DONE;
@@ -479,7 +482,7 @@
 {
 	int diff;
 
-	if (dsi_pipe->blt_addr == 0) {
+	if (dsi_pipe->ov_blt_addr == 0) {
 		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
 		spin_lock(&mdp_spin_lock);
 		dma->busy = FALSE;
@@ -539,7 +542,7 @@
 		mipi_dsi_mdp_busy_wait(dsi_mfd);
 		mdp4_overlay_update_dsi_cmd(dsi_mfd);
 
-		if (dsi_pipe->blt_addr)
+		if (dsi_pipe->ov_blt_addr)
 			mdp4_dsi_blt_dmap_busy_wait(dsi_mfd);
 		mdp4_dsi_cmd_overlay_kickoff(dsi_mfd, dsi_pipe);
 	}
@@ -622,17 +625,17 @@
 	 * to be called before kickoff.
 	 * vice versa for blt disabled.
 	 */
-	if (dsi_pipe->blt_addr && dsi_pipe->blt_cnt == 0)
+	if (dsi_pipe->ov_blt_addr && dsi_pipe->blt_cnt == 0)
 		mdp4_overlay_update_dsi_cmd(mfd); /* first time */
-	else if (dsi_pipe->blt_addr == 0  && dsi_pipe->blt_cnt) {
+	else if (dsi_pipe->ov_blt_addr == 0  && dsi_pipe->blt_cnt) {
 		mdp4_overlay_update_dsi_cmd(mfd); /* last time */
 		dsi_pipe->blt_cnt = 0;
 	}
 
-	pr_debug("%s: blt_addr=%d blt_cnt=%d\n",
-		__func__, (int)dsi_pipe->blt_addr, dsi_pipe->blt_cnt);
+	pr_debug("%s: ov_blt_addr=%d blt_cnt=%d\n",
+		__func__, (int)dsi_pipe->ov_blt_addr, dsi_pipe->blt_cnt);
 
-	if (dsi_pipe->blt_addr)
+	if (dsi_pipe->ov_blt_addr)
 		mdp4_dsi_blt_dmap_busy_wait(dsi_mfd);
 
 	mdp4_dsi_cmd_overlay_kickoff(mfd, pipe);
@@ -658,7 +661,7 @@
 
 	mipi_dsi_mdp_busy_wait(mfd);
 
-	if (dsi_pipe->blt_addr == 0)
+	if (dsi_pipe->ov_blt_addr == 0)
 		mipi_dsi_cmd_mdp_start();
 
 	mdp4_overlay_dsi_state_set(ST_DSI_PLAYING);
@@ -666,7 +669,7 @@
 	spin_lock_irqsave(&mdp_spin_lock, flag);
 	mdp_enable_irq(MDP_OVERLAY0_TERM);
 	mfd->dma->busy = TRUE;
-	if (dsi_pipe->blt_addr)
+	if (dsi_pipe->ov_blt_addr)
 		mfd->dma->dmap_busy = TRUE;
 	/* start OVERLAY pipe */
 	spin_unlock_irqrestore(&mdp_spin_lock, flag);
@@ -700,7 +703,7 @@
 	if (mfd && mfd->panel_power_on) {
 		mdp4_dsi_cmd_dma_busy_wait(mfd);
 
-		if (dsi_pipe && dsi_pipe->blt_addr)
+		if (dsi_pipe && dsi_pipe->ov_blt_addr)
 			mdp4_dsi_blt_dmap_busy_wait(mfd);
 
 		mdp4_overlay_update_dsi_cmd(mfd);
diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c
index 3cdd72e..c7c2476 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_video.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_video.c
@@ -152,7 +152,8 @@
 		init_completion(&dsi_video_comp);
 
 		mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
-		pipe->blt_addr = 0;
+		pipe->ov_blt_addr = 0;
+		pipe->dma_blt_addr = 0;
 
 	} else {
 		pipe = dsi_pipe;
@@ -416,7 +417,7 @@
 	char *overlay_base;
 
 
-	if (pipe->blt_addr == 0)
+	if (pipe->ov_blt_addr == 0)
 		return;
 
 
@@ -428,7 +429,7 @@
 	off = 0;
 	if (pipe->ov_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
-	addr = pipe->blt_addr + off;
+	addr = pipe->ov_blt_addr + off;
 
 	/* overlay 0 */
 	overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
@@ -441,7 +442,7 @@
 	uint32 off, addr;
 	int bpp;
 
-	if (pipe->blt_addr == 0)
+	if (pipe->ov_blt_addr == 0)
 		return;
 
 
@@ -453,7 +454,7 @@
 	off = 0;
 	if (pipe->dmap_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
-	addr = pipe->blt_addr + off;
+	addr = pipe->dma_blt_addr + off;
 
 	/* dmap */
 	MDP_OUTP(MDP_BASE + 0x90008, addr);
@@ -529,7 +530,7 @@
 	if (pipe->flags & MDP_OV_PLAY_NOWAIT)
 		return;
 
-	if (dsi_pipe->blt_addr) {
+	if (dsi_pipe->ov_blt_addr) {
 		mdp4_overlay_dsi_video_dma_busy_wait(mfd);
 
 		mdp4_dsi_video_blt_ov_update(dsi_pipe);
@@ -572,7 +573,7 @@
 		mdp4_overlayproc_cfg(dsi_pipe);
 		mdp4_overlay_dmap_xy(dsi_pipe);
 		mdp_is_in_isr = FALSE;
-		if (dsi_pipe->blt_addr) {
+		if (dsi_pipe->ov_blt_addr) {
 			mdp4_dsi_video_blt_ov_update(dsi_pipe);
 			dsi_pipe->ov_cnt++;
 			outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE);
@@ -595,7 +596,7 @@
 {
 	spin_lock(&mdp_spin_lock);
 	dma->busy = FALSE;
-	if (dsi_pipe->blt_addr == 0) {
+	if (dsi_pipe->ov_blt_addr == 0) {
 		spin_unlock(&mdp_spin_lock);
 		return;
 	}
@@ -618,21 +619,23 @@
 
 	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
 
-	if (mfd->ov0_wb_buf->phys_addr == 0) {
+	if (mfd->ov0_wb_buf->write_addr == 0) {
 		pr_info("%s: no blt_base assigned\n", __func__);
 		return;
 	}
 
 	spin_lock_irqsave(&mdp_spin_lock, flag);
-	if (enable && dsi_pipe->blt_addr == 0) {
-		dsi_pipe->blt_addr = mfd->ov0_wb_buf->phys_addr;
+	if (enable && dsi_pipe->ov_blt_addr == 0) {
+		dsi_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
+		dsi_pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
 		dsi_pipe->blt_cnt = 0;
 		dsi_pipe->ov_cnt = 0;
 		dsi_pipe->dmap_cnt = 0;
 		mdp4_stat.blt_dsi_video++;
 		change++;
-	} else if (enable == 0 && dsi_pipe->blt_addr) {
-		dsi_pipe->blt_addr = 0;
+	} else if (enable == 0 && dsi_pipe->ov_blt_addr) {
+		dsi_pipe->ov_blt_addr = 0;
+		dsi_pipe->dma_blt_addr = 0;
 		change++;
 	}
 
@@ -641,8 +644,8 @@
 		return;
 	}
 
-	pr_debug("%s: enable=%d blt_addr=%x\n", __func__,
-			enable, (int)dsi_pipe->blt_addr);
+	pr_debug("%s: enable=%d ov_blt_addr=%x\n", __func__,
+			enable, (int)dsi_pipe->ov_blt_addr);
 	blt_cfg_changed = 1;
 
 	spin_unlock_irqrestore(&mdp_spin_lock, flag);
@@ -655,7 +658,7 @@
 	data &= 0x01;
 	if (data) {	/* timing generator enabled */
 		mdp4_overlay_dsi_video_wait4event(mfd, INTR_DMA_P_DONE);
-		mdp4_overlay_dsi_video_wait4event(mfd, INTR_PRIMARY_VSYNC);
+		msleep(20);
 	}
 
 
diff --git a/drivers/video/msm/mdp4_overlay_dtv.c b/drivers/video/msm/mdp4_overlay_dtv.c
index e26522b..12448e4 100644
--- a/drivers/video/msm/mdp4_overlay_dtv.c
+++ b/drivers/video/msm/mdp4_overlay_dtv.c
@@ -377,7 +377,8 @@
 		return -ENODEV;
 
 	mdp4_init_writeback_buf(mfd, MDP4_MIXER1);
-	dtv_pipe->blt_addr = 0;
+	dtv_pipe->ov_blt_addr = 0;
+	dtv_pipe->dma_blt_addr = 0;
 
 	return mdp4_dtv_start(mfd);
 }
@@ -408,7 +409,7 @@
 	int bpp;
 	char *overlay_base;
 
-	if (pipe->blt_addr == 0)
+	if (pipe->ov_blt_addr == 0)
 		return;
 #ifdef BLT_RGB565
 	bpp = 2; /* overlay ouput is RGB565 */
@@ -418,7 +419,7 @@
 	off = (pipe->ov_cnt & 0x01) ?
 		pipe->src_height * pipe->src_width * bpp : 0;
 
-	addr = pipe->blt_addr + off;
+	addr = pipe->ov_blt_addr + off;
 	pr_debug("%s overlay addr 0x%x\n", __func__, addr);
 	/* overlay 1 */
 	overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
@@ -431,7 +432,7 @@
 	uint32 off, addr;
 	int bpp;
 
-	if (pipe->blt_addr == 0)
+	if (pipe->ov_blt_addr == 0)
 		return;
 
 #ifdef BLT_RGB565
@@ -441,7 +442,7 @@
 #endif
 	off =  (pipe->dmae_cnt & 0x01) ?
 		pipe->src_height * pipe->src_width * bpp : 0;
-	addr = pipe->blt_addr + off;
+	addr = pipe->dma_blt_addr + off;
 	MDP_OUTP(MDP_BASE + 0xb0008, addr);
 }
 
@@ -464,7 +465,7 @@
 		return;
 	}
 
-	if (dtv_pipe->blt_addr) {
+	if (dtv_pipe->ov_blt_addr) {
 		mdp4_dtv_blt_ov_update(dtv_pipe);
 		dtv_pipe->ov_cnt++;
 		mdp4_overlay_dtv_ov_kick_start();
@@ -524,7 +525,7 @@
 			msecs_to_jiffies(VSYNC_PERIOD * 3));
 	mdp_disable_irq(MDP_OVERLAY1_TERM);
 
-	if (dtv_pipe->blt_addr)
+	if (dtv_pipe->ov_blt_addr)
 		mdp4_overlay_dtv_wait4dmae(mfd);
 }
 
@@ -581,7 +582,7 @@
 {
 	if (!dtv_pipe)
 		return;
-	if (dtv_pipe->blt_addr) {
+	if (dtv_pipe->ov_blt_addr) {
 		mdp4_dtv_blt_dmae_update(dtv_pipe);
 		dtv_pipe->dmae_cnt++;
 	}
@@ -642,7 +643,7 @@
 	unsigned long flag;
 	int change = 0;
 
-	if (!mfd->ov1_wb_buf->phys_addr) {
+	if (!mfd->ov1_wb_buf->write_addr) {
 		pr_debug("%s: no writeback buf assigned\n", __func__);
 		return;
 	}
@@ -654,16 +655,18 @@
 	}
 
 	spin_lock_irqsave(&mdp_spin_lock, flag);
-	if (enable && dtv_pipe->blt_addr == 0) {
-		dtv_pipe->blt_addr = mfd->ov1_wb_buf->phys_addr;
+	if (enable && dtv_pipe->ov_blt_addr == 0) {
+		dtv_pipe->ov_blt_addr = mfd->ov1_wb_buf->write_addr;
+		dtv_pipe->dma_blt_addr = mfd->ov1_wb_buf->read_addr;
 		change++;
 		dtv_pipe->ov_cnt = 0;
 		dtv_pipe->dmae_cnt = 0;
-	} else if (enable == 0 && dtv_pipe->blt_addr) {
-		dtv_pipe->blt_addr = 0;
+	} else if (enable == 0 && dtv_pipe->ov_blt_addr) {
+		dtv_pipe->ov_blt_addr = 0;
+		dtv_pipe->dma_blt_addr = 0;
 		change++;
 	}
-	pr_debug("%s: blt_addr=%x\n", __func__, (int)dtv_pipe->blt_addr);
+	pr_debug("%s: ov_blt_addr=%x\n", __func__, (int)dtv_pipe->ov_blt_addr);
 	spin_unlock_irqrestore(&mdp_spin_lock, flag);
 
 	if (!change)
diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c
index 98c8191..3739332 100644
--- a/drivers/video/msm/mdp4_overlay_lcdc.c
+++ b/drivers/video/msm/mdp4_overlay_lcdc.c
@@ -133,8 +133,8 @@
 		init_completion(&lcdc_comp);
 
 		mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
-		pipe->blt_addr = 0;
-
+		pipe->ov_blt_addr = 0;
+		pipe->dma_blt_addr = 0;
 	} else {
 		pipe = lcdc_pipe;
 	}
@@ -325,7 +325,7 @@
 	char *overlay_base;
 
 
-	if (pipe->blt_addr == 0)
+	if (pipe->ov_blt_addr == 0)
 		return;
 
 
@@ -337,7 +337,7 @@
 	off = 0;
 	if (pipe->ov_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
-	addr = pipe->blt_addr + off;
+	addr = pipe->ov_blt_addr + off;
 
 	/* overlay 0 */
 	overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
@@ -350,7 +350,7 @@
 	uint32 off, addr;
 	int bpp;
 
-	if (pipe->blt_addr == 0)
+	if (pipe->ov_blt_addr == 0)
 		return;
 
 
@@ -362,7 +362,7 @@
 	off = 0;
 	if (pipe->dmap_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
-	addr = pipe->blt_addr + off;
+	addr = pipe->dma_blt_addr + off;
 
 	/* dmap */
 	MDP_OUTP(MDP_BASE + 0x90008, addr);
@@ -438,7 +438,7 @@
 	if (pipe->flags & MDP_OV_PLAY_NOWAIT)
 		return;
 
-	if (lcdc_pipe->blt_addr) {
+	if (lcdc_pipe->ov_blt_addr) {
 		mdp4_overlay_lcdc_dma_busy_wait(mfd);
 
 		mdp4_lcdc_blt_ov_update(lcdc_pipe);
@@ -485,7 +485,7 @@
 {
 	spin_lock(&mdp_spin_lock);
 	dma->busy = FALSE;
-	if (lcdc_pipe->blt_addr == 0) {
+	if (lcdc_pipe->ov_blt_addr == 0) {
 		spin_unlock(&mdp_spin_lock);
 		return;
 	}
@@ -500,7 +500,7 @@
 {
 	unsigned long flag;
 
-	if (lcdc_pipe->blt_addr) {
+	if (lcdc_pipe->ov_blt_addr) {
 		mdp4_overlay_lcdc_dma_busy_wait(mfd);
 
 		mdp4_lcdc_blt_ov_update(lcdc_pipe);
@@ -530,24 +530,26 @@
 
 	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
 
-	if (!mfd->ov0_wb_buf->phys_addr) {
+	if (!mfd->ov0_wb_buf->write_addr) {
 		pr_debug("%s: no blt_base assigned\n", __func__);
 		return;
 	}
 
 	spin_lock_irqsave(&mdp_spin_lock, flag);
-	if (enable && lcdc_pipe->blt_addr == 0) {
-		lcdc_pipe->blt_addr = mfd->ov0_wb_buf->phys_addr;
+	if (enable && lcdc_pipe->ov_blt_addr == 0) {
+		lcdc_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
+		lcdc_pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
 		change++;
 		lcdc_pipe->blt_cnt = 0;
 		lcdc_pipe->ov_cnt = 0;
 		lcdc_pipe->dmap_cnt = 0;
 		mdp4_stat.blt_lcdc++;
-	} else if (enable == 0 && lcdc_pipe->blt_addr) {
-		lcdc_pipe->blt_addr = 0;
+	} else if (enable == 0 && lcdc_pipe->ov_blt_addr) {
+		lcdc_pipe->ov_blt_addr = 0;
+		lcdc_pipe->dma_blt_addr = 0;
 		change++;
 	}
-	pr_info("%s: blt_addr=%x\n", __func__, (int)lcdc_pipe->blt_addr);
+	pr_info("%s: ov_blt_addr=%x\n", __func__, (int)lcdc_pipe->ov_blt_addr);
 	spin_unlock_irqrestore(&mdp_spin_lock, flag);
 
 	if (!change)
@@ -561,7 +563,7 @@
 
 	mdp4_overlayproc_cfg(lcdc_pipe);
 	mdp4_overlay_dmap_xy(lcdc_pipe);
-	if (lcdc_pipe->blt_addr) {
+	if (lcdc_pipe->ov_blt_addr) {
 		mdp4_overlay_lcdc_prefill(mfd);
 		mdp4_overlay_lcdc_prefill(mfd);
 	}
diff --git a/drivers/video/msm/mdp4_overlay_mddi.c b/drivers/video/msm/mdp4_overlay_mddi.c
index 82864918..c4e6793 100644
--- a/drivers/video/msm/mdp4_overlay_mddi.c
+++ b/drivers/video/msm/mdp4_overlay_mddi.c
@@ -163,7 +163,8 @@
 
 		MDP_OUTP(MDP_BASE + 0x00098, 0x01);
 		mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
-		pipe->blt_addr = 0;
+		pipe->ov_blt_addr = 0;
+		pipe->dma_blt_addr = 0;
 	} else {
 		pipe = mddi_pipe;
 	}
@@ -254,23 +255,25 @@
 	unsigned long flag;
 
 	pr_debug("%s: blt_end=%d blt_addr=%x pid=%d\n",
-	__func__, mddi_pipe->blt_end, (int)mddi_pipe->blt_addr, current->pid);
+		__func__, mddi_pipe->blt_end,
+		(int)mddi_pipe->ov_blt_addr, current->pid);
 
 	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
 
-	if (mfd->ov0_wb_buf->phys_addr == 0) {
+	if (mfd->ov0_wb_buf->write_addr == 0) {
 		pr_info("%s: no blt_base assigned\n", __func__);
 		return -EBUSY;
 	}
 
-	if (mddi_pipe->blt_addr == 0) {
+	if (mddi_pipe->ov_blt_addr == 0) {
 		mdp4_mddi_dma_busy_wait(mfd);
 		spin_lock_irqsave(&mdp_spin_lock, flag);
 		mddi_pipe->blt_end = 0;
 		mddi_pipe->blt_cnt = 0;
 		mddi_pipe->ov_cnt = 0;
 		mddi_pipe->dmap_cnt = 0;
-		mddi_pipe->blt_addr = mfd->ov0_wb_buf->phys_addr;
+		mddi_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
+		mddi_pipe->dma_blt_addr = mfd->ov0_wb_buf->write_addr;
 		mdp4_stat.blt_mddi++;
 		spin_unlock_irqrestore(&mdp_spin_lock, flag);
 	return 0;
@@ -284,9 +287,9 @@
 	unsigned long flag;
 
 	pr_debug("%s: blt_end=%d blt_addr=%x\n",
-		 __func__, mddi_pipe->blt_end, (int)mddi_pipe->blt_addr);
+		 __func__, mddi_pipe->blt_end, (int)mddi_pipe->ov_blt_addr);
 
-	if ((mddi_pipe->blt_end == 0) && mddi_pipe->blt_addr) {
+	if ((mddi_pipe->blt_end == 0) && mddi_pipe->ov_blt_addr) {
 		spin_lock_irqsave(&mdp_spin_lock, flag);
 		mddi_pipe->blt_end = 1;	/* mark as end */
 		spin_unlock_irqrestore(&mdp_spin_lock, flag);
@@ -323,7 +326,7 @@
 	int bpp;
 	char *overlay_base;
 
-	if (pipe->blt_addr == 0)
+	if (pipe->ov_blt_addr == 0)
 		return;
 
 
@@ -336,7 +339,7 @@
 	if (pipe->dmap_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
 
-	addr = pipe->blt_addr + off;
+	addr = pipe->ov_blt_addr + off;
 
 	/* dmap */
 	MDP_OUTP(MDP_BASE + 0x90008, addr);
@@ -344,7 +347,7 @@
 	off = 0;
 	if (pipe->ov_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
-	addr2 = pipe->blt_addr + off;
+	addr2 = pipe->ov_blt_addr + off;
 	/* overlay 0 */
 	overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
 	outpdw(overlay_base + 0x000c, addr2);
@@ -371,7 +374,8 @@
 
 		if (mddi_pipe->blt_end) {
 			mddi_pipe->blt_end = 0;
-			mddi_pipe->blt_addr = 0;
+			mddi_pipe->ov_blt_addr = 0;
+			mddi_pipe->dma_blt_addr = 0;
 			pr_debug("%s: END, ov_cnt=%d dmap_cnt=%d\n", __func__,
 				mddi_pipe->ov_cnt, mddi_pipe->dmap_cnt);
 			mdp_intr_mask &= ~INTR_DMA_P_DONE;
@@ -406,7 +410,7 @@
 {
 	int diff;
 
-	if (mddi_pipe->blt_addr == 0) {
+	if (mddi_pipe->ov_blt_addr == 0) {
 		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
 		spin_lock(&mdp_spin_lock);
 		dma->busy = FALSE;
@@ -473,7 +477,7 @@
 		mdp4_mddi_dma_busy_wait(mddi_mfd);
 		mdp4_overlay_update_lcd(mddi_mfd);
 
-		if (mddi_pipe->blt_addr)
+		if (mddi_pipe->ov_blt_addr)
 			mdp4_mddi_blt_dmap_busy_wait(mddi_mfd);
 		mdp4_mddi_overlay_kickoff(mddi_mfd, mddi_pipe);
 		mddi_mfd->dma_update_flag = 1;
@@ -539,17 +543,17 @@
 	 * to be called before kickoff.
 	 * vice versa for blt disabled.
 	 */
-	if (mddi_pipe->blt_addr && mddi_pipe->blt_cnt == 0)
+	if (mddi_pipe->ov_blt_addr && mddi_pipe->blt_cnt == 0)
 		mdp4_overlay_update_lcd(mfd); /* first time */
-	else if (mddi_pipe->blt_addr == 0  && mddi_pipe->blt_cnt) {
+	else if (mddi_pipe->ov_blt_addr == 0  && mddi_pipe->blt_cnt) {
 		mdp4_overlay_update_lcd(mfd); /* last time */
 		mddi_pipe->blt_cnt = 0;
 	}
 
 	pr_debug("%s: blt_addr=%d blt_cnt=%d\n",
-		__func__, (int)mddi_pipe->blt_addr, mddi_pipe->blt_cnt);
+		__func__, (int)mddi_pipe->ov_blt_addr, mddi_pipe->blt_cnt);
 
-	if (mddi_pipe->blt_addr)
+	if (mddi_pipe->ov_blt_addr)
 		mdp4_mddi_blt_dmap_busy_wait(mddi_mfd);
 	mdp4_mddi_overlay_kickoff(mfd, pipe);
 }
@@ -572,7 +576,7 @@
 	mdp_enable_irq(MDP_OVERLAY0_TERM);
 	spin_lock_irqsave(&mdp_spin_lock, flag);
 	mfd->dma->busy = TRUE;
-	if (mddi_pipe->blt_addr)
+	if (mddi_pipe->ov_blt_addr)
 		mfd->dma->dmap_busy = TRUE;
 	spin_unlock_irqrestore(&mdp_spin_lock, flag);
 	/* start OVERLAY pipe */
@@ -657,7 +661,7 @@
 
 	mdp_enable_irq(MDP_DMA_S_TERM);
 
-	if (mddi_pipe->blt_addr == 0)
+	if (mddi_pipe->ov_blt_addr == 0)
 		mfd->dma->busy = TRUE;
 
 	mfd->ibuf_flushed = TRUE;
@@ -688,7 +692,7 @@
 	if (mfd && mfd->panel_power_on) {
 		mdp4_mddi_dma_busy_wait(mfd);
 
-		if (mddi_pipe && mddi_pipe->blt_addr)
+		if (mddi_pipe && mddi_pipe->ov_blt_addr)
 			mdp4_mddi_blt_dmap_busy_wait(mfd);
 
 		mdp4_overlay_update_lcd(mfd);
diff --git a/drivers/video/msm/mdp4_overlay_writeback.c b/drivers/video/msm/mdp4_overlay_writeback.c
index f426f8c..32fe141 100644
--- a/drivers/video/msm/mdp4_overlay_writeback.c
+++ b/drivers/video/msm/mdp4_overlay_writeback.c
@@ -272,11 +272,11 @@
 	}
 	mutex_unlock(&mfd->writeback_mutex);
 
-	writeback_pipe->blt_addr = (ulong) (node ? node->addr : NULL);
+	writeback_pipe->ov_blt_addr = (ulong) (node ? node->addr : NULL);
 
-	if (!writeback_pipe->blt_addr) {
+	if (!writeback_pipe->ov_blt_addr) {
 		pr_err("%s: no writeback buffer 0x%x, %p\n", __func__,
-				(unsigned int)writeback_pipe->blt_addr, node);
+			(unsigned int)writeback_pipe->ov_blt_addr, node);
 		mutex_unlock(&mfd->unregister_mutex);
 		return;
 	}
@@ -324,13 +324,13 @@
 	}
 	mutex_unlock(&mfd->writeback_mutex);
 
-	writeback_pipe->blt_addr = (ulong) (node ? node->addr : NULL);
+	writeback_pipe->ov_blt_addr = (ulong) (node ? node->addr : NULL);
 
 	mutex_lock(&mfd->dma->ov_mutex);
 	pr_debug("%s in writeback\n", __func__);
-	if (writeback_pipe && !writeback_pipe->blt_addr) {
+	if (writeback_pipe && !writeback_pipe->ov_blt_addr) {
 		pr_err("%s: no writeback buffer 0x%x\n", __func__,
-				(unsigned int)writeback_pipe->blt_addr);
+				(unsigned int)writeback_pipe->ov_blt_addr);
 		ret = mdp4_overlay_writeback_update(mfd);
 		if (ret)
 			pr_err("%s: update failed writeback pipe NULL\n",
@@ -351,7 +351,7 @@
 		}
 
 		pr_debug("%s: in writeback pan display 0x%x\n", __func__,
-				(unsigned int)writeback_pipe->blt_addr);
+				(unsigned int)writeback_pipe->ov_blt_addr);
 		mdp4_writeback_kickoff_ui(mfd, writeback_pipe);
 		mdp4_iommu_unmap(writeback_pipe);
 
diff --git a/drivers/video/msm/mdp4_util.c b/drivers/video/msm/mdp4_util.c
index 8bd125b..4c0e28f 100644
--- a/drivers/video/msm/mdp4_util.c
+++ b/drivers/video/msm/mdp4_util.c
@@ -2559,13 +2559,14 @@
 		buf = mfd->ov1_wb_buf;
 
 	buf->ihdl = NULL;
-	buf->phys_addr = 0;
+	buf->write_addr = 0;
+	buf->read_addr = 0;
 }
 
 u32 mdp4_allocate_writeback_buf(struct msm_fb_data_type *mfd, u32 mix_num)
 {
 	struct mdp_buf_type *buf;
-	ion_phys_addr_t	addr;
+	ion_phys_addr_t	addr, read_addr = 0;
 	size_t buffer_size;
 	unsigned long len;
 
@@ -2574,7 +2575,7 @@
 	else
 		buf = mfd->ov1_wb_buf;
 
-	if (buf->phys_addr || !IS_ERR_OR_NULL(buf->ihdl))
+	if (buf->write_addr || !IS_ERR_OR_NULL(buf->ihdl))
 		return 0;
 
 	if (!buf->size) {
@@ -2592,6 +2593,12 @@
 			mfd->mem_hid);
 		if (!IS_ERR_OR_NULL(buf->ihdl)) {
 			if (mdp_iommu_split_domain) {
+				if (ion_map_iommu(mfd->iclient, buf->ihdl,
+					DISPLAY_READ_DOMAIN, GEN_POOL, SZ_4K,
+					0, &read_addr, &len, 0, 0)) {
+					pr_err("ion_map_iommu() read failed\n");
+					return -ENOMEM;
+				}
 				if (mfd->mem_hid & ION_SECURE) {
 					if (ion_phys(mfd->iclient, buf->ihdl,
 						&addr, (size_t *)&len)) {
@@ -2612,7 +2619,7 @@
 				if (ion_map_iommu(mfd->iclient, buf->ihdl,
 					DISPLAY_READ_DOMAIN, GEN_POOL, SZ_4K,
 					0, &addr, &len, 0, 0)) {
-					pr_err("ion_map_iommu() failed\n");
+					pr_err("ion_map_iommu() write failed\n");
 					return -ENOMEM;
 				}
 			}
@@ -2628,7 +2635,13 @@
 	if (addr) {
 		pr_info("allocating %d bytes at %x for mdp writeback\n",
 			buffer_size, (u32) addr);
-		buf->phys_addr = addr;
+		buf->write_addr = addr;
+
+		if (read_addr)
+			buf->read_addr = read_addr;
+		else
+			buf->read_addr = buf->write_addr;
+
 		return 0;
 	} else {
 		pr_err("%s cannot allocate memory for mdp writeback!\n",
@@ -2652,6 +2665,8 @@
 				if (!(mfd->mem_hid & ION_SECURE))
 					ion_unmap_iommu(mfd->iclient, buf->ihdl,
 						DISPLAY_WRITE_DOMAIN, GEN_POOL);
+				ion_unmap_iommu(mfd->iclient, buf->ihdl,
+					DISPLAY_READ_DOMAIN, GEN_POOL);
 			} else {
 				ion_unmap_iommu(mfd->iclient, buf->ihdl,
 					DISPLAY_READ_DOMAIN, GEN_POOL);
@@ -2662,13 +2677,14 @@
 			buf->ihdl = NULL;
 		}
 	} else {
-		if (buf->phys_addr) {
-			free_contiguous_memory_by_paddr(buf->phys_addr);
+		if (buf->write_addr) {
+			free_contiguous_memory_by_paddr(buf->write_addr);
 			pr_debug("%s:%d free writeback pmem\n", __func__,
 				__LINE__);
 		}
 	}
-	buf->phys_addr = 0;
+	buf->write_addr = 0;
+	buf->read_addr = 0;
 }
 
 static int mdp4_update_pcc_regs(uint32_t offset,
diff --git a/drivers/video/msm/mhl/mhl_8334.c b/drivers/video/msm/mhl/mhl_8334.c
index 43280a5..d6e3f6f 100644
--- a/drivers/video/msm/mhl/mhl_8334.c
+++ b/drivers/video/msm/mhl/mhl_8334.c
@@ -27,16 +27,16 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/fs.h>
+#include <linux/regulator/consumer.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
 
 #include "msm_fb.h"
 #include "external_common.h"
+#include "hdmi_msm.h"
 #include "mhl_8334.h"
 #include "mhl_i2c_utils.h"
 
-#define DEBUG
-
 
 static struct i2c_device_id mhl_sii_i2c_id[] = {
 	{ MHL_DRIVER_NAME, 0 },
@@ -65,6 +65,163 @@
 	.id_table = mhl_sii_i2c_id,
 };
 
+static void mhl_sii_reset_pin(int on)
+{
+	gpio_set_value(mhl_msm_state->mhl_data->gpio_mhl_reset, on);
+	return;
+}
+
+static int mhl_sii_reg_enable(void)
+{
+	static struct regulator *reg_8038_l20;
+	static struct regulator *reg_8038_l11;
+	int rc;
+
+	pr_debug("Inside %s\n", __func__);
+	if (!reg_8038_l20) {
+		reg_8038_l20 = regulator_get(&mhl_msm_state->i2c_client->dev,
+			"mhl_avcc12");
+		if (IS_ERR(reg_8038_l20)) {
+			pr_err("could not get reg_8038_l20, rc = %ld\n",
+				PTR_ERR(reg_8038_l20));
+			return -ENODEV;
+		}
+		rc = regulator_enable(reg_8038_l20);
+		if (rc) {
+			pr_err("'%s' regulator enable failed, rc=%d\n",
+				"mhl_l20", rc);
+			return rc;
+		} else
+		       pr_debug("REGULATOR L20 ENABLED\n");
+	}
+
+	if (!reg_8038_l11) {
+		reg_8038_l11 = regulator_get(&mhl_msm_state->i2c_client->dev,
+			"mhl_iovcc18");
+		if (IS_ERR(reg_8038_l11)) {
+			pr_err("could not get reg_8038_l11, rc = %ld\n",
+				PTR_ERR(reg_8038_l11));
+			return -ENODEV;
+		}
+		rc = regulator_enable(reg_8038_l11);
+		if (rc) {
+			pr_err("'%s' regulator enable failed, rc=%d\n",
+				"mhl_l11", rc);
+			return rc;
+		} else
+			pr_debug("REGULATOR L11 ENABLED\n");
+	}
+
+	return rc;
+}
+
+
+static void mhl_sii_power_on(void)
+{
+	int ret;
+	pr_debug("MHL SII POWER ON\n");
+	if (!mhl_msm_state->mhl_data->gpio_mhl_power) {
+		pr_warn("%s: no power reqd for this platform\n", __func__);
+		return;
+	}
+
+	ret = gpio_request(mhl_msm_state->mhl_data->gpio_mhl_power, "W_PWR");
+	if (ret < 0) {
+		pr_err("MHL_POWER_GPIO req failed: %d\n",
+			ret);
+		return;
+	}
+	ret = gpio_direction_output(mhl_msm_state->mhl_data->gpio_mhl_power,
+		1);
+	if (ret < 0) {
+		pr_err(
+		"SET GPIO MHL_POWER_GPIO direction failed: %d\n",
+			ret);
+		gpio_free(mhl_msm_state->mhl_data->gpio_mhl_power);
+		return;
+	}
+	gpio_set_value(mhl_msm_state->mhl_data->gpio_mhl_power, 1);
+
+	if (mhl_sii_reg_enable())
+		pr_err("Regulator enable failed\n");
+
+	pr_debug("MHL SII POWER ON Successful\n");
+	return;
+}
+
+/*
+ * Request for GPIO allocations
+ * Set appropriate GPIO directions
+ */
+static int mhl_sii_gpio_setup(int on)
+{
+	int ret;
+	if (on) {
+		if (mhl_msm_state->mhl_data->gpio_hdmi_mhl_mux) {
+			ret = gpio_request(mhl_msm_state->\
+				mhl_data->gpio_hdmi_mhl_mux, "W_MUX");
+			if (ret < 0) {
+				pr_err("GPIO HDMI_MHL MUX req failed:%d\n",
+					ret);
+				return -EBUSY;
+			}
+			ret = gpio_direction_output(
+				mhl_msm_state->mhl_data->gpio_hdmi_mhl_mux, 0);
+			if (ret < 0) {
+				pr_err("SET GPIO HDMI_MHL dir failed:%d\n",
+					ret);
+				gpio_free(mhl_msm_state->\
+					mhl_data->gpio_hdmi_mhl_mux);
+				return -EBUSY;
+			}
+			msleep(50);
+			gpio_set_value(mhl_msm_state->\
+				mhl_data->gpio_hdmi_mhl_mux, 0);
+			pr_debug("SET GPIO HDMI MHL MUX %d to 0\n",
+				mhl_msm_state->mhl_data->gpio_hdmi_mhl_mux);
+		}
+
+		ret = gpio_request(mhl_msm_state->mhl_data->gpio_mhl_reset,
+			"W_RST#");
+		if (ret < 0) {
+			pr_err("GPIO RESET request failed: %d\n", ret);
+			return -EBUSY;
+		}
+		ret = gpio_direction_output(mhl_msm_state->\
+			mhl_data->gpio_mhl_reset, 1);
+		if (ret < 0) {
+			pr_err("SET GPIO RESET direction failed: %d\n", ret);
+			gpio_free(mhl_msm_state->mhl_data->gpio_mhl_reset);
+			gpio_free(mhl_msm_state->mhl_data->gpio_hdmi_mhl_mux);
+			return -EBUSY;
+		}
+		ret = gpio_request(mhl_msm_state->mhl_data->gpio_mhl_int,
+			"W_INT");
+		if (ret < 0) {
+			pr_err("GPIO INT request failed: %d\n", ret);
+			gpio_free(mhl_msm_state->mhl_data->gpio_mhl_reset);
+			gpio_free(mhl_msm_state->mhl_data->gpio_hdmi_mhl_mux);
+			return -EBUSY;
+		}
+		ret = gpio_direction_input(mhl_msm_state->\
+			mhl_data->gpio_mhl_int);
+		if (ret < 0) {
+			pr_err("SET GPIO INTR direction failed: %d\n", ret);
+			gpio_free(mhl_msm_state->mhl_data->gpio_mhl_reset);
+			gpio_free(mhl_msm_state->mhl_data->gpio_mhl_int);
+			gpio_free(mhl_msm_state->mhl_data->gpio_hdmi_mhl_mux);
+			return -EBUSY;
+		}
+	} else {
+		gpio_free(mhl_msm_state->mhl_data->gpio_mhl_reset);
+		gpio_free(mhl_msm_state->mhl_data->gpio_mhl_int);
+		gpio_free(mhl_msm_state->mhl_data->gpio_hdmi_mhl_mux);
+		gpio_free(mhl_msm_state->mhl_data->gpio_mhl_power);
+	}
+
+	return 0;
+}
+
 bool mhl_is_connected(void)
 {
 	return true;
@@ -194,6 +351,11 @@
 
 	/* Power up 1.2V core */
 	mhl_i2c_reg_write(TX_PAGE_L1, 0x003D, 0x3F);
+	/*
+	 * Wait for the source power to be enabled
+	 * before enabling pll clocks.
+	 */
+	msleep(50);
 	/* Enable Tx PLL Clock */
 	mhl_i2c_reg_write(TX_PAGE_2, 0x0011, 0x01);
 	/* Enable Tx Clock Path and Equalizer */
@@ -255,7 +417,7 @@
 	mhl_i2c_reg_write(TX_PAGE_3, 0x0017, 0x82);
 	mhl_i2c_reg_write(TX_PAGE_3, 0x0018, 0x24);
 	/* Pull-up resistance off for IDLE state */
-	mhl_i2c_reg_write(TX_PAGE_3, 0x0013, 0x84);
+	mhl_i2c_reg_write(TX_PAGE_3, 0x0013, 0x8C);
 	/* Enable CBUS Discovery */
 	mhl_i2c_reg_write(TX_PAGE_3, 0x0010, 0x27);
 	mhl_i2c_reg_write(TX_PAGE_3, 0x0016, 0x20);
@@ -282,9 +444,11 @@
 	pr_debug("MHL: chip rev ID read=[%x]\n", mhl_msm_state->chip_rev_id);
 
 	/* Reset the TX chip */
-	mhl_msm_state->mhl_data->reset_pin(0);
+	mhl_sii_reset_pin(1);
 	msleep(20);
-	mhl_msm_state->mhl_data->reset_pin(1);
+	mhl_sii_reset_pin(0);
+	msleep(20);
+	mhl_sii_reset_pin(1);
 	/* MHL spec requires a 100 ms wait here.  */
 	msleep(100);
 
@@ -305,34 +469,40 @@
 static int mhl_i2c_probe(struct i2c_client *client,
 	const struct i2c_device_id *id)
 {
-	int ret;
+	int ret = -ENODEV;
 	mhl_msm_state->mhl_data = kzalloc(sizeof(struct msm_mhl_platform_data),
 		GFP_KERNEL);
 	if (!(mhl_msm_state->mhl_data)) {
 		ret = -ENOMEM;
+		pr_err("MHL I2C Probe failed - no mem\n");
 		goto probe_exit;
 	}
-	pr_debug("Inside probe\n");
 	mhl_msm_state->i2c_client = client;
 
 	spin_lock_init(&mhl_state_lock);
 
 	i2c_set_clientdata(client, mhl_msm_state);
 	mhl_msm_state->mhl_data = client->dev.platform_data;
+	pr_debug("MHL: mhl_msm_state->mhl_data->irq=[%d]\n",
+		mhl_msm_state->mhl_data->irq);
 
 	/* Init GPIO stuff here */
-	ret = mhl_msm_state->mhl_data->gpio_setup(1);
+	ret = mhl_sii_gpio_setup(1);
 	if (ret == -1) {
 		pr_err("MHL: mhl_gpio_init has failed\n");
 		ret = -ENODEV;
 		goto probe_exit;
 	}
+
+	mhl_sii_power_on();
+
+	pr_debug("I2C PROBE successful\n");
 	return 0;
 
 probe_exit:
 	if (mhl_msm_state->mhl_data) {
 		/* free the gpios */
-		mhl_msm_state->mhl_data->gpio_setup(0);
+		mhl_sii_gpio_setup(0);
 		kfree(mhl_msm_state->mhl_data);
 		mhl_msm_state->mhl_data = NULL;
 	}
@@ -341,8 +511,8 @@
 
 static int mhl_i2c_remove(struct i2c_client *client)
 {
-	pr_debug("inside i2c remove\n");
-	mhl_msm_state->mhl_data->gpio_setup(0);
+	pr_debug("%s\n", __func__);
+	mhl_sii_gpio_setup(0);
 	kfree(mhl_msm_state->mhl_data);
 	return 0;
 }
@@ -351,6 +521,7 @@
 {
 	int32_t     ret;
 
+	pr_debug("%s\n", __func__);
 	mhl_msm_state = kzalloc(sizeof(struct mhl_msm_state_t), GFP_KERNEL);
 	if (!mhl_msm_state) {
 		pr_err("mhl_msm_init FAILED: out of memory\n");
@@ -366,11 +537,11 @@
 		goto init_exit;
 	} else {
 		if (mhl_msm_state->i2c_client == NULL) {
-			pr_err("JSR: I2C driver add failed\n");
+			pr_err("MHL: I2C driver add failed\n");
 			ret = -ENODEV;
 			goto init_exit;
 		}
-		pr_debug("MHL: I2C driver added\n");
+		pr_info("MHL: I2C driver added\n");
 	}
 
 	/* Request IRQ stuff here */
@@ -385,7 +556,8 @@
 			ret);
 		ret = -EACCES; /* Error code???? */
 		goto init_exit;
-	}
+	} else
+		pr_debug("request_threaded_irq succeeded\n");
 
 	mhl_msm_state->cur_state = POWER_STATE_D0_MHL;
 
@@ -482,7 +654,8 @@
 	uint8_t val;
 	unsigned long flags;
 
-	pr_err("%s: cur state = [0x%x]\n", __func__, mhl_msm_state->cur_state);
+	pr_debug("%s: cur state = [0x%x]\n", __func__,
+		mhl_msm_state->cur_state);
 
 	if (mhl_msm_state->cur_state == POWER_STATE_D0_MHL) {
 		/* Already in D0 - MHL power state */
@@ -510,7 +683,6 @@
 static void mhl_msm_disconnection(void)
 {
 	uint8_t reg;
-
 	/* Clear interrupts - REG INTR4 */
 	reg = mhl_i2c_reg_read(TX_PAGE_3, 0x0021);
 	mhl_i2c_reg_write(TX_PAGE_3, 0x0021, reg);
@@ -594,7 +766,10 @@
 	 * a previous interrupt brought us here,
 	 * do nothing.
 	 */
-	pr_debug("MHL: MRR Interrupt status is = %02X\n", (int) status);
+	if ((0x00 == status) && (mhl_msm_state->cur_state == POWER_STATE_D3)) {
+		mhl_chip_init();
+		return;
+	}
 	if (0xFF != status) {
 		if ((status & BIT0) && (mhl_msm_state->chip_rev_id < 1)) {
 			uint8_t tmds_cstat;
@@ -624,23 +799,25 @@
 		}
 
 		if (status & BIT1)
-			pr_err("MHL: INT4 BIT1 is set\n");
+			pr_debug("MHL: INT4 BIT1 is set\n");
 
 		/* MHL_EST interrupt */
 		if (status & BIT2) {
-			pr_err("MHL: Calling mhl_msm_connection() from ISR\n");
+			pr_debug("mhl_msm_connection() from ISR\n");
+			mhl_connect_api(true);
 			mhl_msm_connection();
-			pr_err("MHL Connect  Drv: INT4 Status = %02X\n",
+			pr_debug("MHL Connect  Drv: INT4 Status = %02X\n",
 				(int) status);
 		} else if (status & BIT3) {
-			pr_err("MHL: uUSB-A type device detected.\n");
+			pr_debug("MHL: uUSB-A type device detected.\n");
 			mhl_i2c_reg_write(TX_PAGE_3, 0x001C, 0x80);
 			switch_mode(POWER_STATE_D3);
 		}
 
 		if (status & BIT5) {
+			mhl_connect_api(false);
 			mhl_msm_disconnection();
-			pr_err("MHL Disconnect Drv: INT4 Status = %02X\n",
+			pr_debug("MHL Disconn Drv: INT4 Status = %02X\n",
 				(int)status);
 		}
 
@@ -734,7 +911,7 @@
 	if (regval)
 		mhl_i2c_reg_write(TX_PAGE_CBUS, 0x08, regval);
 
-	pr_err("%s: CBUS_INT = %02x\n", __func__, regval);
+	pr_debug("%s: CBUS_INT = %02x\n", __func__, regval);
 
 	/* MSC_MSG (RCP/RAP) */
 	if (regval & BIT(3)) {
@@ -755,7 +932,7 @@
 	if (regval)
 		mhl_i2c_reg_write(TX_PAGE_CBUS, 0x1E, regval);
 
-	pr_err("%s: CBUS_MSC_INT2 = %02x\n", __func__, regval);
+	pr_debug("%s: CBUS_MSC_INT2 = %02x\n", __func__, regval);
 
 	/* received SET_INT */
 	if (regval & BIT(2)) {
@@ -774,9 +951,9 @@
 	if (regval & BIT(3)) {
 		uint8_t stat;
 		stat = mhl_i2c_reg_read(TX_PAGE_CBUS, 0xB0);
-		pr_err("%s: MHL_STATUS_0 = %02x\n", __func__, stat);
+		pr_debug("%s: MHL_STATUS_0 = %02x\n", __func__, stat);
 		stat = mhl_i2c_reg_read(TX_PAGE_CBUS, 0xB1);
-		pr_err("%s: MHL_STATUS_1 = %02x\n", __func__, stat);
+		pr_debug("%s: MHL_STATUS_1 = %02x\n", __func__, stat);
 
 		mhl_i2c_reg_write(TX_PAGE_CBUS, 0xB0, 0xFF);
 		mhl_i2c_reg_write(TX_PAGE_CBUS, 0xB1, 0xFF);
@@ -797,12 +974,6 @@
 static irqreturn_t mhl_tx_isr(int irq, void *dev_id)
 {
 	/*
-	 * Check discovery interrupt
-	 * if not yet connected
-	 */
-	pr_debug("MHL: Current POWER state is [0x%x]\n",
-		mhl_msm_state->cur_state);
-	/*
 	 * Check RGND, MHL_EST, CBUS_LOCKOUT, SCDT
 	 * interrupts. In D3, we get only RGND
 	 */
diff --git a/drivers/video/msm/mhl/mhl_8334.h b/drivers/video/msm/mhl/mhl_8334.h
index c1d9030..eba544a 100644
--- a/drivers/video/msm/mhl/mhl_8334.h
+++ b/drivers/video/msm/mhl/mhl_8334.h
@@ -21,9 +21,6 @@
 #include "mhl_devcap.h"
 #include "mhl_defs.h"
 
-#define GPIO_MHL_RESET       15
-#define GPIO_MHL_INT         4
-
 #define MHL_DEVICE_NAME "sii8334"
 #define MHL_DRIVER_NAME "sii8334"
 
diff --git a/drivers/video/msm/mhl/mhl_i2c_utils.c b/drivers/video/msm/mhl/mhl_i2c_utils.c
index 596af2e..aab6e02 100644
--- a/drivers/video/msm/mhl/mhl_i2c_utils.c
+++ b/drivers/video/msm/mhl/mhl_i2c_utils.c
@@ -15,8 +15,6 @@
 #include "mhl_i2c_utils.h"
 #include "mhl_8334.h"
 
-#define DEBUG
-
 uint8_t slave_addrs[MAX_PAGES] = {
 	DEV_PAGE_TPI_0    ,
 	DEV_PAGE_TX_L0_0  ,
@@ -60,7 +58,7 @@
 		pr_err("I2C READ FAILED=[%d]\n", ret);
 		return -EACCES;
 	}
-	pr_err("Buffer is [%x]\n", buffer);
+	pr_debug("Buffer is [%x]\n", buffer);
 	return buffer;
 }
 
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
index 3c082e4..0c6aa86 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
@@ -310,6 +310,7 @@
 	u32  header_in_start;
 	u32  min_dpb_num;
 	u32  y_cb_cr_size;
+	u32  yuv_size;
 	u32  dynamic_prop_change;
 	u32  dynmic_prop_change_req;
 	u32  flush_pending;
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h
index 8a33512..db8a777 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h
@@ -64,6 +64,7 @@
 #define DDL_VIDC_1080P_48MHZ_TIMEOUT_VALUE	(0xCB8)
 #define DDL_VIDC_1080P_133MHZ_TIMEOUT_VALUE	(0x2355)
 #define DDL_VIDC_1080P_200MHZ_TIMEOUT_VALUE	(0x3500)
+#define DDL_VIDC_1080P_MAX_TIMEOUT_MULTIPLIER  (4)
 
 #define DDL_CONTEXT_MEMORY (1024 * 15 * (VCD_MAX_NO_CLIENT + 1))
 
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
index d7ebd54..949e5c0 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
@@ -1087,8 +1087,39 @@
 void ddl_set_vidc_timeout(struct ddl_client_context *ddl)
 {
 	u32 vidc_time_out = 0;
+	s32 multiplier = 1;
+	u32 temp = DDL_VIDC_1080P_200MHZ_TIMEOUT_VALUE;
+	struct ddl_decoder_data *decoder = &ddl->codec_data.decoder;
+	struct vcd_frame_data *ip_bitstream = &(ddl->input_frame.vcd_frm);
+
 	if (ddl->codec_data.decoder.idr_only_decoding)
 		vidc_time_out = 2 * DDL_VIDC_1080P_200MHZ_TIMEOUT_VALUE;
+	else {
+		vidc_time_out = DDL_VIDC_1080P_200MHZ_TIMEOUT_VALUE;
+		multiplier = decoder->yuv_size - (ip_bitstream->data_len +
+						(ip_bitstream->data_len / 2));
+		if (multiplier <= 0) {
+			multiplier = decoder->yuv_size - ip_bitstream->data_len;
+			if (multiplier <= 0) {
+				if (ip_bitstream->data_len)
+					multiplier =
+					DDL_VIDC_1080P_MAX_TIMEOUT_MULTIPLIER;
+			}
+		}
+		if (multiplier == DDL_VIDC_1080P_MAX_TIMEOUT_MULTIPLIER)
+			vidc_time_out = vidc_time_out *
+				DDL_VIDC_1080P_MAX_TIMEOUT_MULTIPLIER;
+		else if (multiplier > 1) {
+			temp = (decoder->yuv_size * 1000) / multiplier;
+			temp = (temp * vidc_time_out) / 1000;
+			if (temp > (u32)(vidc_time_out *
+				DDL_VIDC_1080P_MAX_TIMEOUT_MULTIPLIER))
+				vidc_time_out = vidc_time_out *
+					DDL_VIDC_1080P_MAX_TIMEOUT_MULTIPLIER;
+			else
+				vidc_time_out = temp;
+		}
+	}
 	DDL_MSG_HIGH("%s Video core time out value = 0x%x",
 		 __func__, vidc_time_out);
 	vidc_sm_set_video_core_timeout_value(
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
index 58d1f23..6571245 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
@@ -189,6 +189,9 @@
 			&decoder->frame_size.height);
 		progressive = seq_hdr_info->dec_progressive;
 	}
+	decoder->yuv_size = decoder->frame_size.width *
+				decoder->frame_size.height;
+	decoder->yuv_size += decoder->yuv_size / 2;
 	decoder->min_dpb_num = seq_hdr_info->min_num_dpb;
 	vidc_sm_get_min_yc_dpb_sizes(
 		&ddl->shared_mem[ddl->command_channel],
@@ -1266,6 +1269,9 @@
 			decoder->frame_size =
 				 output_vcd_frm->dec_op_prop.frm_size;
 			decoder->client_frame_size = decoder->frame_size;
+			decoder->yuv_size = decoder->frame_size.width *
+						decoder->frame_size.height;
+			decoder->yuv_size += decoder->yuv_size / 2;
 			decoder->y_cb_cr_size =
 				ddl_get_yuv_buffer_size(&decoder->frame_size,
 					&decoder->buf_format,
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
index 14e1331..a6001eb 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
@@ -1077,6 +1077,7 @@
 		decoder->flush_pending = false;
 	} else
 		dec_param.dpb_flush = false;
+	ddl_set_vidc_timeout(ddl);
 	vidc_sm_set_frame_tag(&ddl->shared_mem[ddl->command_channel],
 		bit_stream->ip_frm_tag);
 	if (ddl_context->pix_cache_enable) {
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 4ff1147..fbffdd2 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -107,7 +107,7 @@
 
 /* This needs to be modified manually now, when we add
  a new RANGE of SSIDs to the msg_mask_tbl */
-#define MSG_MASK_TBL_CNT		23
+#define MSG_MASK_TBL_CNT		24
 #define EVENT_LAST_ID			0x08AD
 
 #define MSG_SSID_0			0
@@ -156,6 +156,8 @@
 #define MSG_SSID_21_LAST		10300
 #define MSG_SSID_22			10350
 #define MSG_SSID_22_LAST		10361
+#define MSG_SSID_23			0xC000
+#define MSG_SSID_23_LAST		0xC063
 
 struct diagpkt_delay_params {
 	void *rsp_ptr;
diff --git a/include/linux/mfd/pm8xxx/pm8921-charger.h b/include/linux/mfd/pm8xxx/pm8921-charger.h
index 2186903..fca8700 100644
--- a/include/linux/mfd/pm8xxx/pm8921-charger.h
+++ b/include/linux/mfd/pm8xxx/pm8921-charger.h
@@ -67,6 +67,7 @@
  * @min_voltage:	the voltage (mV) where charging method switches from
  *			trickle to fast. This is also the minimum voltage the
  *			system operates at
+ * @uvd_thresh_voltage:	the USB falling UVD threshold (mV) (PM8917 only)
  * @resume_voltage_delta:	the (mV) drop to wait for before resume charging
  *				after the battery has been fully charged
  * @term_current:	the charger current (mA) at which EOC happens
@@ -121,6 +122,7 @@
 	unsigned int			update_time;
 	unsigned int			max_voltage;
 	unsigned int			min_voltage;
+	unsigned int			uvd_thresh_voltage;
 	unsigned int			resume_voltage_delta;
 	unsigned int			term_current;
 	int				cool_temp;
@@ -301,6 +303,10 @@
 {
 	return -ENXIO;
 }
+static inline int pm8917_set_under_voltage_detection_threshold(int mv)
+{
+	return -ENXIO;
+}
 static inline int pm8921_disable_input_current_limit(bool disable)
 {
 	return -ENXIO;
diff --git a/include/linux/mfd/wcd9xxx/pdata.h b/include/linux/mfd/wcd9xxx/pdata.h
index ba71293..1b7706b 100644
--- a/include/linux/mfd/wcd9xxx/pdata.h
+++ b/include/linux/mfd/wcd9xxx/pdata.h
@@ -16,6 +16,9 @@
 
 #include <linux/slimbus/slimbus.h>
 
+#define MICBIAS_EXT_BYP_CAP 0x00
+#define MICBIAS_NO_EXT_BYP_CAP 0x01
+
 #define SITAR_LDOH_1P95_V 0x0
 #define SITAR_LDOH_2P35_V 0x1
 #define SITAR_LDOH_2P75_V 0x2
@@ -99,10 +102,19 @@
 	u32 cfilt1_mv; /* in mv */
 	u32 cfilt2_mv; /* in mv */
 	u32 cfilt3_mv; /* in mv */
+	/* Different WCD9xxx series codecs may not
+	 * have 4 mic biases. If a codec has fewer
+	 * mic biases, some of these properties will
+	 * not be used.
+	 */
 	u8 bias1_cfilt_sel;
 	u8 bias2_cfilt_sel;
 	u8 bias3_cfilt_sel;
 	u8 bias4_cfilt_sel;
+	u8 bias1_cap_mode;
+	u8 bias2_cap_mode;
+	u8 bias3_cap_mode;
+	u8 bias4_cap_mode;
 };
 
 struct wcd9xxx_ocp_setting {
diff --git a/include/linux/mfd/wcd9xxx/wcd9304_registers.h b/include/linux/mfd/wcd9xxx/wcd9304_registers.h
index 53ae67b..f7c483c 100644
--- a/include/linux/mfd/wcd9xxx/wcd9304_registers.h
+++ b/include/linux/mfd/wcd9xxx/wcd9304_registers.h
@@ -590,6 +590,36 @@
 #define SITAR_A_CDC_IIR1_COEF_B4_CTL__POR		(0x00000000)
 #define SITAR_A_CDC_IIR1_COEF_B5_CTL			(0x34E)
 #define SITAR_A_CDC_IIR1_COEF_B5_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_IIR2_GAIN_B1_CTL			(0x350)
+#define SITAR_A_CDC_IIR2_GAIN_B1_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_IIR2_GAIN_B2_CTL			(0x351)
+#define SITAR_A_CDC_IIR2_GAIN_B2_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_IIR2_GAIN_B3_CTL			(0x352)
+#define SITAR_A_CDC_IIR2_GAIN_B3_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_IIR2_GAIN_B4_CTL			(0x353)
+#define SITAR_A_CDC_IIR2_GAIN_B4_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_IIR2_GAIN_B5_CTL			(0x354)
+#define SITAR_A_CDC_IIR2_GAIN_B5_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_IIR2_GAIN_B6_CTL			(0x355)
+#define SITAR_A_CDC_IIR2_GAIN_B6_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_IIR2_GAIN_B7_CTL			(0x356)
+#define SITAR_A_CDC_IIR2_GAIN_B7_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_IIR2_GAIN_B8_CTL			(0x357)
+#define SITAR_A_CDC_IIR2_GAIN_B8_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_IIR2_CTL				(0x358)
+#define SITAR_A_CDC_IIR2_CTL__POR			(0x00000040)
+#define SITAR_A_CDC_IIR2_GAIN_TIMER_CTL			(0x359)
+#define SITAR_A_CDC_IIR2_GAIN_TIMER_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_IIR2_COEF_B1_CTL			(0x35A)
+#define SITAR_A_CDC_IIR2_COEF_B1_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_IIR2_COEF_B2_CTL			(0x35B)
+#define SITAR_A_CDC_IIR2_COEF_B2_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_IIR2_COEF_B3_CTL			(0x35C)
+#define SITAR_A_CDC_IIR2_COEF_B3_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_IIR2_COEF_B4_CTL			(0x35D)
+#define SITAR_A_CDC_IIR2_COEF_B4_CTL__POR		(0x00000000)
+#define SITAR_A_CDC_IIR2_COEF_B5_CTL			(0x35E)
+#define SITAR_A_CDC_IIR2_COEF_B5_CTL__POR		(0x00000000)
 #define SITAR_A_CDC_TOP_GAIN_UPDATE			(0x360)
 #define SITAR_A_CDC_TOP_GAIN_UPDATE__POR		(0x00000000)
 #define SITAR_A_CDC_TOP_RDAC_DOUT_CTL			(0x361)
diff --git a/include/linux/msm_rotator.h b/include/linux/msm_rotator.h
index 463e5ce..17ae867 100644
--- a/include/linux/msm_rotator.h
+++ b/include/linux/msm_rotator.h
@@ -31,6 +31,7 @@
 	unsigned char   rotations;
 	int enable;
 	unsigned int	downscale_ratio;
+	unsigned int secure;
 };
 
 struct msm_rotator_data_info {
diff --git a/include/linux/qpnp/pwm.h b/include/linux/qpnp/pwm.h
new file mode 100644
index 0000000..de89a37
--- /dev/null
+++ b/include/linux/qpnp/pwm.h
@@ -0,0 +1,168 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QPNP_PWM_H__
+#define __QPNP_PWM_H__
+
+#include <linux/pwm.h>
+
+/* usec: 19.2M, n=6, m=0, pre=2 */
+#define PM_PWM_PERIOD_MIN			7
+/* 1K, n=9, m=7, pre=6 */
+#define PM_PWM_PERIOD_MAX			(384 * USEC_PER_SEC)
+#define PM_PWM_LUT_RAMP_STEP_TIME_MAX		499
+#define PM_PWM_MAX_PAUSE_CNT			8191
+/*
+ * Formula from HSID,
+ * pause_time (hi/lo) = (pause_code - 1)*(duty_ms)
+ */
+#define PM_PWM_LUT_PAUSE_MAX \
+	((PM_PWM_MAX_PAUSE_CNT - 1) * PM_PWM_LUT_RAMP_STEP_TIME_MAX) /* ms */
+
+/* Flags for Look Up Table */
+#define PM_PWM_LUT_LOOP			0x01
+#define PM_PWM_LUT_RAMP_UP		0x02
+#define PM_PWM_LUT_REVERSE		0x04
+#define PM_PWM_LUT_PAUSE_HI_EN		0x08
+#define PM_PWM_LUT_PAUSE_LO_EN		0x10
+
+#define PM_PWM_LUT_NO_TABLE		0x20
+#define PM_PWM_LUT_USE_RAW_VALUE	0x40
+
+/*
+ * PWM frequency/period control
+ *
+ * PWM Frequency = ClockFrequency / (N * T)
+ *   or
+ * PWM Period = Clock Period * (N * T)
+ *   where
+ * N = 2^9 or 2^6 for 9-bit or 6-bit PWM size
+ * T = Pre-divide * 2^m, m = 0..7 (exponent)
+ */
+
+/*
+ * enum pm_pwm_size - PWM bit mode selection
+ * %PM_PWM_SIZE_6BIT - Select 6 bit mode; 64 levels
+ * %PM_PWM_SIZE_9BIT - Select 9 bit mode; 512 levels
+ */
+enum pm_pwm_size {
+	PM_PWM_SIZE_6BIT =	6,
+	PM_PWM_SIZE_9BIT =	9,
+};
+
+/*
+ * enum pm_pwm_clk - PWM clock selection
+ * %PM_PWM_CLK_1KHZ - 1KHz clock
+ * %PM_PWM_CLK_32KHZ - 32KHz clock
+ * %PM_PWM_CLK_19P2MHZ - 19.2MHz clock
+ * Note: Here 1KHz = 1024Hz
+ */
+enum pm_pwm_clk {
+	PM_PWM_CLK_1KHZ,
+	PM_PWM_CLK_32KHZ,
+	PM_PWM_CLK_19P2MHZ,
+};
+
+/* PWM pre-divider selection */
+enum pm_pwm_pre_div {
+	PM_PWM_PDIV_2,
+	PM_PWM_PDIV_3,
+	PM_PWM_PDIV_5,
+	PM_PWM_PDIV_6,
+};
+
+/*
+ * struct pwm_period_config - PWM period configuration
+ * @pwm_size: enum pm_pwm_size
+ * @clk: enum pm_pwm_clk
+ * @pre_div: enum pm_pwm_pre_div
+ * @pre_div_exp: exponent of 2 as part of pre-divider: 0..7
+ */
+struct pwm_period_config {
+	enum pm_pwm_size	pwm_size;
+	enum pm_pwm_clk		clk;
+	enum pm_pwm_pre_div	pre_div;
+	int			pre_div_exp;
+};
+
+/*
+ * struct pwm_duty_cycles - PWM duty cycle info
+ * duty_pcts - pointer to an array of duty percentage for a pwm period
+ * num_duty_pcts - total entries in duty_pcts array
+ * duty_ms - duty cycle time in ms
+ * start_idx - index in the LUT
+ */
+struct pwm_duty_cycles {
+	int *duty_pcts;
+	int num_duty_pcts;
+	int duty_ms;
+	int start_idx;
+};
+
+int pwm_config_period(struct pwm_device *pwm,
+			     struct pwm_period_config *pwm_p);
+
+int pwm_config_pwm_value(struct pwm_device *pwm, int pwm_value);
+
+/*
+ * lut_params: Lookup table (LUT) parameters
+ * @start_idx: start index in lookup table from 0 to MAX-1
+ * @idx_len: number of index
+ * @pause_lo: pause time in millisecond at low index
+ * @pause_hi: pause time in millisecond at high index
+ * @ramp_step_ms: time before loading next LUT pattern in millisecond
+ * @flags: control flags
+ */
+struct lut_params {
+	int start_idx;
+	int idx_len;
+	int lut_pause_hi;
+	int lut_pause_lo;
+	int ramp_step_ms;
+	int flags;
+};
+
+int pwm_lut_config(struct pwm_device *pwm, int period_us,
+		int duty_pct[], struct lut_params lut_params);
+
+int pwm_lut_enable(struct pwm_device *pwm, int start);
+
+/* Standard APIs supported */
+/*
+ * pwm_request - request a PWM device
+ * @pwm_id: PWM id or channel
+ * @label: the label to identify the user
+ */
+
+/*
+ * pwm_free - free a PWM device
+ * @pwm: the PWM device
+ */
+
+/*
+ * pwm_config - change a PWM device configuration
+ * @pwm: the PWM device
+ * @period_us: period in microsecond
+ * @duty_us: duty cycle in microsecond
+ */
+
+/*
+ * pwm_enable - start a PWM output toggling
+ * @pwm: the PWM device
+ */
+
+/*
+ * pwm_disable - stop a PWM output toggling
+ * @pwm: the PWM device
+ */
+
+#endif /* __QPNP_PWM_H__ */
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index 68c1ffc..c0a23a3 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -322,6 +322,7 @@
 	int async_int;
 	unsigned cur_power;
 	struct delayed_work chg_work;
+	struct delayed_work pmic_id_status_work;
 	enum usb_chg_state chg_state;
 	enum usb_chg_type chg_type;
 	u8 dcd_retries;
diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h
index 81b6a40..48058e6 100644
--- a/include/media/msm_camera.h
+++ b/include/media/msm_camera.h
@@ -195,6 +195,24 @@
 #define MSM_CAM_IOCTL_ISPIF_IO_CFG \
 	_IOR(MSM_CAM_IOCTL_MAGIC, 54, struct ispif_cfg_data *)
 
+#define MSM_CAM_IOCTL_STATS_REQBUF \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 55, struct msm_stats_reqbuf *)
+
+#define MSM_CAM_IOCTL_STATS_ENQUEUEBUF \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 56, struct msm_stats_buf_info *)
+
+#define MSM_CAM_IOCTL_STATS_FLUSH_BUFQ \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 57, struct msm_stats_flush_bufq *)
+
+struct msm_stats_reqbuf {
+	int num_buf;		/* how many buffers requested */
+	int stats_type;	/* stats type */
+};
+
+struct msm_stats_flush_bufq {
+	int stats_type;	/* enum msm_stats_enum_type */
+};
+
 struct msm_mctl_pp_cmd {
 	int32_t  id;
 	uint16_t length;
@@ -515,6 +533,36 @@
 #define FRAME_RAW_SNAPSHOT		4
 #define FRAME_MAX			5
 
+enum msm_stats_enum_type {
+	MSM_STATS_TYPE_AEC, /* legacy based AEC */
+	MSM_STATS_TYPE_AF,  /* legacy based AF */
+	MSM_STATS_TYPE_AWB, /* legacy based AWB */
+	MSM_STATS_TYPE_RS,  /* legacy based RS */
+	MSM_STATS_TYPE_CS,  /* legacy based CS */
+	MSM_STATS_TYPE_IHIST,   /* legacy based HIST */
+	MSM_STATS_TYPE_SKIN,    /* legacy based SKIN */
+	MSM_STATS_TYPE_BG,  /* Bayer Grids */
+	MSM_STATS_TYPE_BF,  /* Bayer Focus */
+	MSM_STATS_TYPE_BHIST,   /* Bayer Hist */
+	MSM_STATS_TYPE_AE_AW,   /* legacy stats for vfe 2.x*/
+	MSM_STATS_TYPE_MAX  /* MAX */
+};
+
+struct msm_stats_buf_info {
+	int type; /* msm_stats_enum_type */
+	int fd;
+	void *vaddr;
+	uint32_t offset;
+	uint32_t len;
+	uint32_t y_off;
+	uint32_t cbcr_off;
+	uint32_t planar0_off;
+	uint32_t planar1_off;
+	uint32_t planar2_off;
+	uint8_t active;
+	int buf_idx;
+};
+
 struct msm_pmem_info {
 	int type;
 	int fd;
@@ -678,6 +726,7 @@
 	int length;
 	struct ion_handle *handle;
 	uint32_t frame_id;
+	int buf_idx;
 };
 #define MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT 0
 /* video capture mode in VIDIOC_S_PARM */
diff --git a/include/media/msm_isp.h b/include/media/msm_isp.h
index 6547795..1f3527b 100644
--- a/include/media/msm_isp.h
+++ b/include/media/msm_isp.h
@@ -203,6 +203,9 @@
 #define VFE_CMD_CAPTURE_RAW                             136
 #define VFE_CMD_STOP_LIVESHOT                           137
 #define VFE_CMD_RECONFIG_VFE                            138
+#define VFE_CMD_STATS_REQBUF                            139
+#define VFE_CMD_STATS_ENQUEUEBUF                        140
+#define VFE_CMD_STATS_FLUSH_BUFQ                        141
 
 struct msm_isp_cmd {
 	int32_t  id;
diff --git a/include/sound/q6afe.h b/include/sound/q6afe.h
index f93af1f..a7264e8 100644
--- a/include/sound/q6afe.h
+++ b/include/sound/q6afe.h
@@ -95,8 +95,7 @@
 int afe_unregister_get_events(u16 port_id);
 int afe_rt_proxy_port_write(u32 buf_addr_p, int bytes);
 int afe_rt_proxy_port_read(u32 buf_addr_p, int bytes);
-int afe_port_start_nowait(u16 port_id, union afe_port_config *afe_config,
-	u32 rate);
+int afe_port_start(u16 port_id, union afe_port_config *afe_config, u32 rate);
 int afe_port_stop_nowait(int port_id);
 int afe_apply_gain(u16 port_id, u16 gain);
 int afe_q6_interface_prepare(void);
diff --git a/include/sound/q6asm.h b/include/sound/q6asm.h
index 84e3150..d38dbd5 100644
--- a/include/sound/q6asm.h
+++ b/include/sound/q6asm.h
@@ -158,6 +158,7 @@
 	void			*priv;
 	uint32_t         io_mode;
 	uint64_t         time_stamp;
+	atomic_t         cmd_response;
 };
 
 void q6asm_audio_client_free(struct audio_client *ac);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 2b14423..eb5a0cc 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -2084,6 +2084,9 @@
 
 	opcode = __le16_to_cpu(ev->opcode);
 
+	if (test_bit(HCI_RESET, &hdev->flags) && (opcode != HCI_OP_RESET))
+		return;
+
 	switch (opcode) {
 	case HCI_OP_INQUIRY_CANCEL:
 		hci_cc_inquiry_cancel(hdev, skb);
diff --git a/sound/soc/codecs/wcd9304-tables.c b/sound/soc/codecs/wcd9304-tables.c
index 252cb0e..f0d76e8 100644
--- a/sound/soc/codecs/wcd9304-tables.c
+++ b/sound/soc/codecs/wcd9304-tables.c
@@ -288,6 +288,22 @@
 	[SITAR_A_CDC_IIR1_COEF_B3_CTL] = SITAR_A_CDC_IIR1_COEF_B3_CTL__POR,
 	[SITAR_A_CDC_IIR1_COEF_B4_CTL] = SITAR_A_CDC_IIR1_COEF_B4_CTL__POR,
 	[SITAR_A_CDC_IIR1_COEF_B5_CTL] = SITAR_A_CDC_IIR1_COEF_B5_CTL__POR,
+	[SITAR_A_CDC_IIR2_GAIN_B1_CTL] = SITAR_A_CDC_IIR2_GAIN_B1_CTL__POR,
+	[SITAR_A_CDC_IIR2_GAIN_B2_CTL] = SITAR_A_CDC_IIR2_GAIN_B2_CTL__POR,
+	[SITAR_A_CDC_IIR2_GAIN_B3_CTL] = SITAR_A_CDC_IIR2_GAIN_B3_CTL__POR,
+	[SITAR_A_CDC_IIR2_GAIN_B4_CTL] = SITAR_A_CDC_IIR2_GAIN_B4_CTL__POR,
+	[SITAR_A_CDC_IIR2_GAIN_B5_CTL] = SITAR_A_CDC_IIR2_GAIN_B5_CTL__POR,
+	[SITAR_A_CDC_IIR2_GAIN_B6_CTL] = SITAR_A_CDC_IIR2_GAIN_B6_CTL__POR,
+	[SITAR_A_CDC_IIR2_GAIN_B7_CTL] = SITAR_A_CDC_IIR2_GAIN_B7_CTL__POR,
+	[SITAR_A_CDC_IIR2_GAIN_B8_CTL] = SITAR_A_CDC_IIR2_GAIN_B8_CTL__POR,
+	[SITAR_A_CDC_IIR2_CTL] = SITAR_A_CDC_IIR2_CTL__POR,
+	[SITAR_A_CDC_IIR2_GAIN_TIMER_CTL] =
+		SITAR_A_CDC_IIR2_GAIN_TIMER_CTL__POR,
+	[SITAR_A_CDC_IIR2_COEF_B1_CTL] = SITAR_A_CDC_IIR2_COEF_B1_CTL__POR,
+	[SITAR_A_CDC_IIR2_COEF_B2_CTL] = SITAR_A_CDC_IIR2_COEF_B2_CTL__POR,
+	[SITAR_A_CDC_IIR2_COEF_B3_CTL] = SITAR_A_CDC_IIR2_COEF_B3_CTL__POR,
+	[SITAR_A_CDC_IIR2_COEF_B4_CTL] = SITAR_A_CDC_IIR2_COEF_B4_CTL__POR,
+	[SITAR_A_CDC_IIR2_COEF_B5_CTL] = SITAR_A_CDC_IIR2_COEF_B5_CTL__POR,
 	[SITAR_A_CDC_TOP_GAIN_UPDATE] = SITAR_A_CDC_TOP_GAIN_UPDATE__POR,
 	[SITAR_A_CDC_TOP_RDAC_DOUT_CTL] = SITAR_A_CDC_TOP_RDAC_DOUT_CTL__POR,
 	[SITAR_A_CDC_DEBUG_B1_CTL] = SITAR_A_CDC_DEBUG_B1_CTL__POR,
diff --git a/sound/soc/codecs/wcd9304.c b/sound/soc/codecs/wcd9304.c
index d9a8ae0..70d9fa9 100644
--- a/sound/soc/codecs/wcd9304.c
+++ b/sound/soc/codecs/wcd9304.c
@@ -378,9 +378,9 @@
 				int coeff_idx)
 {
 	/* Address does not automatically update if reading */
-	snd_soc_update_bits(codec,
+	snd_soc_write(codec,
 		(SITAR_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx),
-		0x1F, band_idx * BAND_MAX + coeff_idx);
+		(band_idx * BAND_MAX + coeff_idx) & 0x1F);
 
 	/* Mask bits top 2 bits since they are reserved */
 	return ((snd_soc_read(codec,
@@ -439,27 +439,27 @@
 {
 	/* Mask top 3 bits, 6-8 are reserved */
 	/* Update address manually each time */
-	snd_soc_update_bits(codec,
+	snd_soc_write(codec,
 		(SITAR_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx),
-		0x1F, band_idx * BAND_MAX + coeff_idx);
+		(band_idx * BAND_MAX + coeff_idx) & 0x1F);
 
 	/* Mask top 2 bits, 7-8 are reserved */
-	snd_soc_update_bits(codec,
+	snd_soc_write(codec,
 		(SITAR_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx),
-		0x3F, (value >> 24) & 0x3F);
+		(value >> 24) & 0x3F);
 
 	/* Isolate 8bits at a time */
-	snd_soc_update_bits(codec,
+	snd_soc_write(codec,
 		(SITAR_A_CDC_IIR1_COEF_B3_CTL + 16 * iir_idx),
-		0xFF, (value >> 16) & 0xFF);
+		(value >> 16) & 0xFF);
 
-	snd_soc_update_bits(codec,
+	snd_soc_write(codec,
 		(SITAR_A_CDC_IIR1_COEF_B4_CTL + 16 * iir_idx),
-		0xFF, (value >> 8) & 0xFF);
+		(value >> 8) & 0xFF);
 
-	snd_soc_update_bits(codec,
+	snd_soc_write(codec,
 		(SITAR_A_CDC_IIR1_COEF_B5_CTL + 16 * iir_idx),
-		0xFF, value & 0xFF);
+		value & 0xFF);
 }
 
 static int sitar_put_iir_band_audio_mixer(
@@ -562,9 +562,6 @@
 	SOC_SINGLE_TLV("ADC2 Volume", SITAR_A_TX_1_2_EN, 1, 3, 0, analog_gain),
 	SOC_SINGLE_TLV("ADC3 Volume", SITAR_A_TX_3_EN, 5, 3, 0, analog_gain),
 
-	SOC_SINGLE("MICBIAS1 CAPLESS Switch", SITAR_A_MICB_1_CTL, 4, 1, 1),
-	SOC_SINGLE("MICBIAS2 CAPLESS Switch", SITAR_A_MICB_2_CTL, 4, 1, 1),
-
 	SOC_SINGLE_EXT("ANC Slot", SND_SOC_NOPM, 0, 0, 100, sitar_get_anc_slot,
 				   sitar_put_anc_slot),
 
@@ -689,7 +686,7 @@
 	"ZERO", "EAR_HPH_L", "EAR_LINE_1",
 };
 
-static const char *iir1_inp1_text[] = {
+static const char const *iir_inp1_text[] = {
 	"ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "ZERO", "ZERO", "ZERO",
 	"ZERO", "ZERO", "ZERO", "RX1", "RX2", "RX3", "RX4", "RX5",
 };
@@ -761,7 +758,10 @@
 	SOC_ENUM_SINGLE(SITAR_A_CDC_CONN_ANC_B2_CTL, 0, 3, anc1_fb_mux_text);
 
 static const struct soc_enum iir1_inp1_mux_enum =
-	SOC_ENUM_SINGLE(SITAR_A_CDC_CONN_EQ1_B1_CTL, 0, 16, iir1_inp1_text);
+	SOC_ENUM_SINGLE(SITAR_A_CDC_CONN_EQ1_B1_CTL, 0, 16, iir_inp1_text);
+
+static const struct soc_enum iir2_inp1_mux_enum =
+	SOC_ENUM_SINGLE(SITAR_A_CDC_CONN_EQ2_B1_CTL, 0, 16, iir_inp1_text);
 
 static const struct snd_kcontrol_new rx_mix1_inp1_mux =
 	SOC_DAPM_ENUM("RX1 MIX1 INP1 Mux", rx_mix1_inp1_chain_enum);
@@ -823,6 +823,9 @@
 static const struct snd_kcontrol_new iir1_inp1_mux =
 	SOC_DAPM_ENUM("IIR1 INP1 Mux", iir1_inp1_mux_enum);
 
+static const struct snd_kcontrol_new iir2_inp1_mux =
+	SOC_DAPM_ENUM("IIR2 INP1 Mux", iir2_inp1_mux_enum);
+
 static const struct snd_kcontrol_new anc1_mux =
 	SOC_DAPM_ENUM("ANC1 MUX Mux", anc1_mux_enum);
 
@@ -1960,6 +1963,8 @@
 	/* Sidetone */
 	SND_SOC_DAPM_MUX("IIR1 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp1_mux),
 	SND_SOC_DAPM_PGA("IIR1", SITAR_A_CDC_CLK_SD_CTL, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MUX("IIR2 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp1_mux),
+	SND_SOC_DAPM_PGA("IIR2", SITAR_A_CDC_CLK_SD_CTL, 1, 0, NULL, 0),
 
 };
 
@@ -2058,31 +2063,37 @@
 	{"RX1 MIX1 INP1", "RX3", "SLIM RX3"},
 	{"RX1 MIX1 INP1", "RX4", "SLIM RX4"},
 	{"RX1 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX1 MIX1 INP1", "IIR2", "IIR2"},
 	{"RX1 MIX1 INP2", "RX1", "SLIM RX1"},
 	{"RX1 MIX1 INP2", "RX2", "SLIM RX2"},
 	{"RX1 MIX1 INP2", "RX3", "SLIM RX3"},
 	{"RX1 MIX1 INP2", "RX4", "SLIM RX4"},
 	{"RX1 MIX1 INP2", "IIR1", "IIR1"},
+	{"RX1 MIX1 INP2", "IIR2", "IIR2"},
 	{"RX2 MIX1 INP1", "RX1", "SLIM RX1"},
 	{"RX2 MIX1 INP1", "RX2", "SLIM RX2"},
 	{"RX2 MIX1 INP1", "RX3", "SLIM RX3"},
 	{"RX2 MIX1 INP1", "RX4", "SLIM RX4"},
 	{"RX2 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX2 MIX1 INP1", "IIR2", "IIR2"},
 	{"RX2 MIX1 INP2", "RX1", "SLIM RX1"},
 	{"RX2 MIX1 INP2", "RX2", "SLIM RX2"},
 	{"RX2 MIX1 INP2", "RX3", "SLIM RX3"},
 	{"RX2 MIX1 INP2", "RX4", "SLIM RX4"},
 	{"RX2 MIX1 INP2", "IIR1", "IIR1"},
+	{"RX2 MIX1 INP2", "IIR2", "IIR2"},
 	{"RX3 MIX1 INP1", "RX1", "SLIM RX1"},
 	{"RX3 MIX1 INP1", "RX2", "SLIM RX2"},
 	{"RX3 MIX1 INP1", "RX3", "SLIM RX3"},
 	{"RX3 MIX1 INP1", "RX4", "SLIM RX4"},
 	{"RX3 MIX1 INP1", "IIR1", "IIR1"},
+	{"RX3 MIX1 INP1", "IIR2", "IIR2"},
 	{"RX3 MIX1 INP2", "RX1", "SLIM RX1"},
 	{"RX3 MIX1 INP2", "RX2", "SLIM RX2"},
 	{"RX3 MIX1 INP2", "RX3", "SLIM RX3"},
 	{"RX3 MIX1 INP2", "RX4", "SLIM RX4"},
 	{"RX3 MIX1 INP2", "IIR1", "IIR1"},
+	{"RX3 MIX1 INP2", "IIR2", "IIR2"},
 
 
 	/* TX */
@@ -2138,6 +2149,26 @@
 	/* IIR */
 	{"IIR1", NULL, "IIR1 INP1 MUX"},
 	{"IIR1 INP1 MUX", "DEC1", "DEC1 MUX"},
+	{"IIR1 INP1 MUX", "DEC2", "DEC2 MUX"},
+	{"IIR1 INP1 MUX", "DEC3", "DEC3 MUX"},
+	{"IIR1 INP1 MUX", "DEC4", "DEC4 MUX"},
+	{"IIR1 INP1 MUX", "RX1", "SLIM RX1"},
+	{"IIR1 INP1 MUX", "RX2", "SLIM RX2"},
+	{"IIR1 INP1 MUX", "RX3", "SLIM RX3"},
+	{"IIR1 INP1 MUX", "RX4", "SLIM RX4"},
+	{"IIR1 INP1 MUX", "RX5", "SLIM RX5"},
+
+	{"IIR2", NULL, "IIR2 INP1 MUX"},
+	{"IIR2 INP1 MUX", "DEC1", "DEC1 MUX"},
+	{"IIR2 INP1 MUX", "DEC2", "DEC2 MUX"},
+	{"IIR2 INP1 MUX", "DEC3", "DEC3 MUX"},
+	{"IIR2 INP1 MUX", "DEC4", "DEC4 MUX"},
+	{"IIR2 INP1 MUX", "RX1", "SLIM RX1"},
+	{"IIR2 INP1 MUX", "RX2", "SLIM RX2"},
+	{"IIR2 INP1 MUX", "RX3", "SLIM RX3"},
+	{"IIR2 INP1 MUX", "RX4", "SLIM RX4"},
+	{"IIR2 INP1 MUX", "RX5", "SLIM RX5"},
+
 	{"MIC BIAS1 Internal1", NULL, "LDO_H"},
 	{"MIC BIAS1 External", NULL, "LDO_H"},
 	{"MIC BIAS2 Internal1", NULL, "LDO_H"},
@@ -4644,6 +4675,12 @@
 	snd_soc_update_bits(codec, SITAR_A_MICB_2_CTL, 0x60,
 		(pdata->micbias.bias2_cfilt_sel << 5));
 
+	/* Set micbias capless mode */
+	snd_soc_update_bits(codec, SITAR_A_MICB_1_CTL, 0x10,
+		(pdata->micbias.bias1_cap_mode << 4));
+	snd_soc_update_bits(codec, SITAR_A_MICB_2_CTL, 0x10,
+		(pdata->micbias.bias2_cap_mode << 4));
+
 	for (i = 0; i < 6; j++, i += 2) {
 		if (flag & (0x01 << i)) {
 			value = (leg_mode & (0x01 << i)) ? 0x10 : 0x00;
diff --git a/sound/soc/msm/msm-dai-q6-hdmi.c b/sound/soc/msm/msm-dai-q6-hdmi.c
index dfb090e..c082ed7 100644
--- a/sound/soc/msm/msm-dai-q6-hdmi.c
+++ b/sound/soc/msm/msm-dai-q6-hdmi.c
@@ -158,54 +158,19 @@
 	int rc = 0;
 
 	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
-		/* PORT START should be set if prepare called in active state */
-		rc = afe_q6_interface_prepare();
+		rc = afe_port_start(dai->id, &dai_data->port_config,
+				    dai_data->rate);
 		if (IS_ERR_VALUE(rc))
-			dev_err(dai->dev, "fail to open AFE APR\n");
+			dev_err(dai->dev, "fail to open AFE port %x\n",
+				dai->id);
+		else
+			set_bit(STATUS_PORT_STARTED,
+				dai_data->status_mask);
 	}
+
 	return rc;
 }
 
-static int msm_dai_q6_hdmi_trigger(struct snd_pcm_substream *substream, int cmd,
-		struct snd_soc_dai *dai)
-{
-	struct msm_dai_q6_hdmi_dai_data *dai_data = dev_get_drvdata(dai->dev);
-
-	/* Start/stop port without waiting for Q6 AFE response. Need to have
-	 * native q6 AFE driver propagates AFE response in order to handle
-	 * port start/stop command error properly if error does arise.
-	 */
-	pr_debug("%s:port:%d  cmd:%d dai_data->status_mask = %ld",
-		__func__, dai->id, cmd, *dai_data->status_mask);
-
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-	case SNDRV_PCM_TRIGGER_RESUME:
-	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
-			afe_port_start_nowait(dai->id, &dai_data->port_config,
-					dai_data->rate);
-
-			set_bit(STATUS_PORT_STARTED, dai_data->status_mask);
-		}
-		break;
-	case SNDRV_PCM_TRIGGER_STOP:
-	case SNDRV_PCM_TRIGGER_SUSPEND:
-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
-			afe_port_stop_nowait(dai->id);
-			clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
-		}
-		break;
-
-	default:
-		dev_err(dai->dev, "invalid Trigger command = %d\n", cmd);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
 static int msm_dai_q6_hdmi_dai_probe(struct snd_soc_dai *dai)
 {
 	struct msm_dai_q6_hdmi_dai_data *dai_data;
@@ -253,7 +218,6 @@
 
 static struct snd_soc_dai_ops msm_dai_q6_hdmi_ops = {
 	.prepare	= msm_dai_q6_hdmi_prepare,
-	.trigger	= msm_dai_q6_hdmi_trigger,
 	.hw_params	= msm_dai_q6_hdmi_hw_params,
 	.shutdown	= msm_dai_q6_hdmi_shutdown,
 };
diff --git a/sound/soc/msm/msm-dai-q6.c b/sound/soc/msm/msm-dai-q6.c
index 147316e..fb74c0a 100644
--- a/sound/soc/msm/msm-dai-q6.c
+++ b/sound/soc/msm/msm-dai-q6.c
@@ -407,55 +407,21 @@
 		(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
 		 &mi2s_dai_data->rx_dai.mi2s_dai_data :
 		 &mi2s_dai_data->tx_dai.mi2s_dai_data);
+	u16 port_id = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+		       MI2S_RX : MI2S_TX);
 	int rc = 0;
 
 	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
 		/* PORT START should be set if prepare called in active state */
-		rc = afe_q6_interface_prepare();
+		rc = afe_port_start(port_id, &dai_data->port_config,
+				    dai_data->rate);
+
 		if (IS_ERR_VALUE(rc))
-			dev_err(dai->dev, "fail to open AFE APR\n");
-	}
-	return rc;
-}
-
-static int msm_dai_q6_mi2s_trigger(struct snd_pcm_substream *substream, int cmd,
-		struct snd_soc_dai *dai)
-{
-	struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
-		dev_get_drvdata(dai->dev);
-	struct msm_dai_q6_dai_data *dai_data =
-		(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
-		 &mi2s_dai_data->rx_dai.mi2s_dai_data :
-		 &mi2s_dai_data->tx_dai.mi2s_dai_data);
-	u16 port_id = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
-		MI2S_RX : MI2S_TX);
-	int rc = 0;
-
-	dev_dbg(dai->dev, "%s: cmd:%d dai_data->status_mask = %ld",
-		__func__, cmd, *dai_data->status_mask);
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-	case SNDRV_PCM_TRIGGER_RESUME:
-	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
-			afe_port_start_nowait(port_id,
-				&dai_data->port_config, dai_data->rate);
+			dev_err(dai->dev, "fail to open AFE port %x\n",
+				dai->id);
+		else
 			set_bit(STATUS_PORT_STARTED,
 				dai_data->status_mask);
-		}
-		break;
-	case SNDRV_PCM_TRIGGER_STOP:
-	case SNDRV_PCM_TRIGGER_SUSPEND:
-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
-			afe_port_stop_nowait(port_id);
-			clear_bit(STATUS_PORT_STARTED,
-				dai_data->status_mask);
-		}
-		break;
-
-	default:
-		rc = -EINVAL;
 	}
 
 	return rc;
@@ -906,21 +872,20 @@
 
 	/*
 	 * For AUX PCM Interface the below sequence of clk
-	 * settings and afe_open is a strict requirement.
-	 *
-	 * Also using afe_open instead of afe_port_start_nowait
-	 * to make sure the port is open before deasserting the
-	 * clock line. This is required because pcm register is
-	 * not written before clock deassert. Hence the hw does
-	 * not get updated with new setting if the below clock
-	 * assert/deasset and afe_open sequence is not followed.
+	 * settings and opening of afe port is a strict requirement.
+	 * afe_port_start is called to make sure to make sure the port
+	 * is open before deasserting the clock line. This is
+	 * required because pcm register is not written before
+	 * clock deassert. Hence the hw does not get updated with
+	 * new setting if the below clock assert/deasset and afe_port_start
+	 * sequence is not followed.
 	 */
 
 	clk_reset(pcm_clk, CLK_RESET_ASSERT);
 
-	afe_open(PCM_RX, &dai_data->port_config, dai_data->rate);
+	afe_port_start(PCM_RX, &dai_data->port_config, dai_data->rate);
 
-	afe_open(PCM_TX, &dai_data->port_config, dai_data->rate);
+	afe_port_start(PCM_TX, &dai_data->port_config, dai_data->rate);
 	if (dai_data->rate == 8000) {
 		pcm_clk_rate = auxpcm_pdata->mode_8k.pcm_clk_rate;
 	} else if (dai_data->rate == 16000) {
@@ -988,21 +953,22 @@
 
 	/*
 	 * For AUX PCM Interface the below sequence of clk
-	 * settings and afe_open is a strict requirement.
-	 *
-	 * Also using afe_open instead of afe_port_start_nowait
-	 * to make sure the port is open before deasserting the
-	 * clock line. This is required because pcm register is
-	 * not written before clock deassert. Hence the hw does
-	 * not get updated with new setting if the below clock
-	 * assert/deasset and afe_open sequence is not followed.
+	 * settings and opening of afe port is a strict requirement.
+	 * afe_port_start is called to make sure to make sure the port
+	 * is open before deasserting the clock line. This is
+	 * required because pcm register is not written before
+	 * clock deassert. Hence the hw does not get updated with
+	 * new setting if the below clock assert/deasset and afe_port_start
+	 * sequence is not followed.
 	 */
 
 	clk_reset(sec_pcm_clk, CLK_RESET_ASSERT);
 
-	afe_open(SECONDARY_PCM_RX, &dai_data->port_config, dai_data->rate);
+	afe_port_start(SECONDARY_PCM_RX, &dai_data->port_config,
+		       dai_data->rate);
 
-	afe_open(SECONDARY_PCM_TX, &dai_data->port_config, dai_data->rate);
+	afe_port_start(SECONDARY_PCM_TX, &dai_data->port_config,
+		       dai_data->rate);
 	if (dai_data->rate == 8000) {
 		pcm_clk_rate = auxpcm_pdata->mode_8k.pcm_clk_rate;
 	} else if (dai_data->rate == 16000) {
@@ -1034,11 +1000,24 @@
 	int rc = 0;
 
 	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
-		/* PORT START should be set if prepare called in active state */
-		rc = afe_q6_interface_prepare();
+		switch (dai->id) {
+		case VOICE_PLAYBACK_TX:
+		case VOICE_RECORD_TX:
+		case VOICE_RECORD_RX:
+			rc = afe_start_pseudo_port(dai->id);
+		default:
+			rc = afe_port_start(dai->id, &dai_data->port_config,
+					    dai_data->rate);
+		}
+
 		if (IS_ERR_VALUE(rc))
-			dev_err(dai->dev, "fail to open AFE APR\n");
+			dev_err(dai->dev, "fail to open AFE port %x\n",
+				dai->id);
+		else
+			set_bit(STATUS_PORT_STARTED,
+				dai_data->status_mask);
 	}
+
 	return rc;
 }
 
@@ -1071,63 +1050,6 @@
 
 }
 
-static int msm_dai_q6_trigger(struct snd_pcm_substream *substream, int cmd,
-		struct snd_soc_dai *dai)
-{
-	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
-	int rc = 0;
-
-	/* Start/stop port without waiting for Q6 AFE response. Need to have
-	 * native q6 AFE driver propagates AFE response in order to handle
-	 * port start/stop command error properly if error does arise.
-	 */
-	pr_debug("%s:port:%d  cmd:%d dai_data->status_mask = %ld",
-		__func__, dai->id, cmd, *dai_data->status_mask);
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-	case SNDRV_PCM_TRIGGER_RESUME:
-	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
-			switch (dai->id) {
-			case VOICE_PLAYBACK_TX:
-			case VOICE_RECORD_TX:
-			case VOICE_RECORD_RX:
-				afe_pseudo_port_start_nowait(dai->id);
-				break;
-			default:
-				afe_port_start_nowait(dai->id,
-					&dai_data->port_config, dai_data->rate);
-				break;
-			}
-			set_bit(STATUS_PORT_STARTED,
-				dai_data->status_mask);
-		}
-		break;
-	case SNDRV_PCM_TRIGGER_STOP:
-	case SNDRV_PCM_TRIGGER_SUSPEND:
-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
-			switch (dai->id) {
-			case VOICE_PLAYBACK_TX:
-			case VOICE_RECORD_TX:
-			case VOICE_RECORD_RX:
-				afe_pseudo_port_stop_nowait(dai->id);
-				break;
-			default:
-				afe_port_stop_nowait(dai->id);
-				break;
-			}
-			clear_bit(STATUS_PORT_STARTED,
-				dai_data->status_mask);
-		}
-		break;
-
-	default:
-		rc = -EINVAL;
-	}
-
-	return rc;
-}
 static int msm_dai_q6_dai_auxpcm_probe(struct snd_soc_dai *dai)
 {
 	struct msm_dai_q6_dai_data *dai_data;
@@ -1535,7 +1457,6 @@
 static struct snd_soc_dai_ops msm_dai_q6_mi2s_ops = {
 	.startup	= msm_dai_q6_mi2s_startup,
 	.prepare	= msm_dai_q6_mi2s_prepare,
-	.trigger	= msm_dai_q6_mi2s_trigger,
 	.hw_params	= msm_dai_q6_mi2s_hw_params,
 	.shutdown	= msm_dai_q6_mi2s_shutdown,
 	.set_fmt	= msm_dai_q6_mi2s_set_fmt,
@@ -1543,7 +1464,6 @@
 
 static struct snd_soc_dai_ops msm_dai_q6_ops = {
 	.prepare	= msm_dai_q6_prepare,
-	.trigger	= msm_dai_q6_trigger,
 	.hw_params	= msm_dai_q6_hw_params,
 	.shutdown	= msm_dai_q6_shutdown,
 	.set_fmt	= msm_dai_q6_set_fmt,
diff --git a/sound/soc/msm/msm-pcm-voip.c b/sound/soc/msm/msm-pcm-voip.c
index 570d71c..b18117c 100644
--- a/sound/soc/msm/msm-pcm-voip.c
+++ b/sound/soc/msm/msm-pcm-voip.c
@@ -106,10 +106,9 @@
 	wait_queue_head_t in_wait;
 
 	struct mutex lock;
-	struct mutex in_lock;
-	struct mutex out_lock;
 
 	spinlock_t dsp_lock;
+	spinlock_t dsp_ul_lock;
 
 	uint32_t mode;
 	uint32_t rate_type;
@@ -268,7 +267,7 @@
 		return;
 
 	/* Copy up-link packet into out_queue. */
-	spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+	spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags);
 
 	/* discarding UL packets till start is received */
 	if (!list_empty(&prtd->free_out_queue) && prtd->capture_start) {
@@ -321,10 +320,10 @@
 		pr_debug("ul_pkt: pkt_len =%d, frame.len=%d\n", pkt_len,
 			buf_node->frame.len);
 		prtd->pcm_capture_irq_pos += prtd->pcm_capture_count;
-		spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+		spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags);
 		snd_pcm_period_elapsed(prtd->capture_substream);
 	} else {
-		spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+		spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags);
 		pr_err("UL data dropped\n");
 	}
 
@@ -516,6 +515,7 @@
 	struct voip_buf_node *buf_node = NULL;
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	struct voip_drv_info *prtd = runtime->private_data;
+	unsigned long dsp_flags;
 
 	int count = frames_to_bytes(runtime, frames);
 	pr_debug("%s: count = %d, frames=%d\n", __func__, count, (int)frames);
@@ -525,8 +525,8 @@
 				prtd->state == VOIP_STOPPED),
 				1 * HZ);
 	if (ret > 0) {
-		mutex_lock(&prtd->in_lock);
 		if (count <= VOIP_MAX_VOC_PKT_SIZE) {
+			spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
 			buf_node =
 				list_first_entry(&prtd->free_in_queue,
 						struct voip_buf_node, list);
@@ -539,13 +539,13 @@
 				ret = copy_from_user(&buf_node->frame,
 							buf, count);
 			list_add_tail(&buf_node->list, &prtd->in_queue);
+			spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
 		} else {
 			pr_err("%s: Write cnt %d is > VOIP_MAX_VOC_PKT_SIZE\n",
 				__func__, count);
 			ret = -ENOMEM;
 		}
 
-		mutex_unlock(&prtd->in_lock);
 	} else if (ret == 0) {
 		pr_err("%s: No free DL buffs\n", __func__);
 		ret = -ETIMEDOUT;
@@ -564,6 +564,7 @@
 	struct voip_buf_node *buf_node = NULL;
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	struct voip_drv_info *prtd = runtime->private_data;
+	unsigned long dsp_flags;
 
 	count = frames_to_bytes(runtime, frames);
 
@@ -575,9 +576,9 @@
 				1 * HZ);
 
 	if (ret > 0) {
-		mutex_lock(&prtd->out_lock);
 
 		if (count <= VOIP_MAX_VOC_PKT_SIZE) {
+			spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags);
 			buf_node = list_first_entry(&prtd->out_queue,
 					struct voip_buf_node, list);
 			list_del(&buf_node->list);
@@ -596,13 +597,14 @@
 			}
 			list_add_tail(&buf_node->list,
 						&prtd->free_out_queue);
+			spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags);
+
 		} else {
 			pr_err("%s: Read count %d > VOIP_MAX_VOC_PKT_SIZE\n",
 				__func__, count);
 			ret = -ENOMEM;
 		}
 
-		mutex_unlock(&prtd->out_lock);
 
 	} else if (ret == 0) {
 		pr_err("%s: No UL data available\n", __func__);
@@ -636,6 +638,7 @@
 	struct snd_pcm_substream *p_substream, *c_substream;
 	struct snd_pcm_runtime *runtime;
 	struct voip_drv_info *prtd;
+	unsigned long dsp_flags;
 
 	if (substream == NULL) {
 		pr_err("substream is NULL\n");
@@ -674,7 +677,7 @@
 			goto capt;
 		}
 		if (p_dma_buf->area != NULL) {
-			mutex_lock(&prtd->in_lock);
+			spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
 			list_for_each_safe(ptr, next, &prtd->in_queue) {
 				buf_node = list_entry(ptr,
 						struct voip_buf_node, list);
@@ -685,11 +688,11 @@
 						struct voip_buf_node, list);
 				list_del(&buf_node->list);
 			}
+			spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
 			dma_free_coherent(p_substream->pcm->card->dev,
 				runtime->hw.buffer_bytes_max, p_dma_buf->area,
 				p_dma_buf->addr);
 			p_dma_buf->area = NULL;
-			mutex_unlock(&prtd->in_lock);
 		}
 		/* release out_queue and free_out_queue */
 capt:		c_substream = prtd->capture_substream;
@@ -703,7 +706,7 @@
 			goto done;
 		}
 		if (c_dma_buf->area != NULL) {
-			mutex_lock(&prtd->out_lock);
+			spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags);
 			list_for_each_safe(ptr, next, &prtd->out_queue) {
 				buf_node = list_entry(ptr,
 						struct voip_buf_node, list);
@@ -714,11 +717,11 @@
 						struct voip_buf_node, list);
 				list_del(&buf_node->list);
 			}
+			spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags);
 			dma_free_coherent(c_substream->pcm->card->dev,
 				runtime->hw.buffer_bytes_max, c_dma_buf->area,
 				c_dma_buf->addr);
 			c_dma_buf->area = NULL;
-			mutex_unlock(&prtd->out_lock);
 		}
 done:
 		prtd->capture_substream = NULL;
@@ -888,19 +891,15 @@
 		for (i = 0; i < VOIP_MAX_Q_LEN; i++) {
 			buf_node = (void *)dma_buf->area + offset;
 
-			mutex_lock(&voip_info.in_lock);
 			list_add_tail(&buf_node->list,
 					&voip_info.free_in_queue);
-			mutex_unlock(&voip_info.in_lock);
 			offset = offset + sizeof(struct voip_buf_node);
 		}
 	} else {
 		for (i = 0; i < VOIP_MAX_Q_LEN; i++) {
 			buf_node = (void *) dma_buf->area + offset;
-			mutex_lock(&voip_info.out_lock);
 			list_add_tail(&buf_node->list,
 					&voip_info.free_out_queue);
-			mutex_unlock(&voip_info.out_lock);
 			offset = offset + sizeof(struct voip_buf_node);
 		}
 	}
@@ -1142,10 +1141,9 @@
 	memset(&voip_info, 0, sizeof(voip_info));
 	voip_info.mode = MODE_PCM;
 	mutex_init(&voip_info.lock);
-	mutex_init(&voip_info.in_lock);
-	mutex_init(&voip_info.out_lock);
 
 	spin_lock_init(&voip_info.dsp_lock);
+	spin_lock_init(&voip_info.dsp_ul_lock);
 
 	init_waitqueue_head(&voip_info.out_wait);
 	init_waitqueue_head(&voip_info.in_wait);
diff --git a/sound/soc/msm/msm8930.c b/sound/soc/msm/msm8930.c
index d07cdff..a0cad55 100644
--- a/sound/soc/msm/msm8930.c
+++ b/sound/soc/msm/msm8930.c
@@ -1222,7 +1222,7 @@
 {
 	int ret;
 
-	if (!cpu_is_msm8930() && !cpu_is_msm8627()) {
+	if (!cpu_is_msm8930() && !cpu_is_msm8930aa() && !cpu_is_msm8627()) {
 		pr_err("%s: Not the right machine type\n", __func__);
 		return -ENODEV ;
 	}
@@ -1260,7 +1260,7 @@
 
 static void __exit msm8930_audio_exit(void)
 {
-	if (!cpu_is_msm8930() && !cpu_is_msm8627()) {
+	if (!cpu_is_msm8930() && !cpu_is_msm8930aa() && !cpu_is_msm8627()) {
 		pr_err("%s: Not the right machine type\n", __func__);
 		return ;
 	}
diff --git a/sound/soc/msm/qdsp6/q6afe.c b/sound/soc/msm/qdsp6/q6afe.c
index 7b16adb..2f6772d 100644
--- a/sound/soc/msm/qdsp6/q6afe.c
+++ b/sound/soc/msm/qdsp6/q6afe.c
@@ -376,11 +376,10 @@
 	if ((afe_cal_addr[path].cal_paddr != cal_block.cal_paddr) ||
 		(cal_block.cal_size > afe_cal_addr[path].cal_size)) {
 		if (afe_cal_addr[path].cal_paddr != 0)
-			afe_cmd_memory_unmap_nowait(
+			afe_cmd_memory_unmap(
 				afe_cal_addr[path].cal_paddr);
 
-		afe_cmd_memory_map_nowait(cal_block.cal_paddr,
-						cal_block.cal_size);
+		afe_cmd_memory_map(cal_block.cal_paddr, cal_block.cal_size);
 		afe_cal_addr[path].cal_paddr = cal_block.cal_paddr;
 		afe_cal_addr[path].cal_size = cal_block.cal_size;
 	}
@@ -400,12 +399,21 @@
 		"cal size = %d, cal addr = 0x%x\n", __func__,
 		port_id, path, cal_block.cal_size, cal_block.cal_paddr);
 
+	atomic_set(&this_afe.state, 1);
 	result = apr_send_pkt(this_afe.apr, (uint32_t *) &afe_cal);
 	if (result < 0) {
 		pr_err("%s: AFE cal for port %d failed\n",
 			__func__, port_id);
 	}
 
+	result = wait_event_timeout(this_afe.wait,
+				 (atomic_read(&this_afe.state) == 0),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!result) {
+		pr_err("%s: wait_event timeout SET AFE CAL\n", __func__);
+		goto done;
+	}
+
 	pr_debug("%s: AFE cal sent for path %d device!\n", __func__, path);
 done:
 	return;
@@ -421,8 +429,11 @@
 		afe_send_cal_block(RX_CAL, port_id);
 }
 
-int afe_port_start_nowait(u16 port_id, union afe_port_config *afe_config,
-	u32 rate) /* This function is no blocking */
+/* This function sends multi-channel HDMI configuration command and AFE
+ * calibration which is only supported by QDSP6 on 8960 and onward.
+ */
+int afe_port_start(u16 port_id, union afe_port_config *afe_config,
+		   u32 rate)
 {
 	struct afe_port_start_command start;
 	struct afe_audioif_config_command config;
@@ -442,11 +453,9 @@
 		(port_id == RT_PROXY_DAI_001_TX))
 		port_id = VIRTUAL_ID_TO_PORTID(port_id);
 
-	if (this_afe.apr == NULL) {
-		pr_err("%s: AFE APR is not registered\n", __func__);
-		ret = -ENODEV;
+	ret = afe_q6_interface_prepare();
+	if (IS_ERR_VALUE(ret))
 		return ret;
-	}
 
 	if (port_id == HDMI_RX) {
 		config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
@@ -513,6 +522,8 @@
 	config.port_id = port_id;
 	config.port = *afe_config;
 
+	atomic_set(&this_afe.state, 1);
+	atomic_set(&this_afe.status, 0);
 	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
 	if (ret < 0) {
 		pr_err("%s: AFE enable for port %d failed\n", __func__,
@@ -521,6 +532,21 @@
 		goto fail_cmd;
 	}
 
+	ret = wait_event_timeout(this_afe.wait,
+			(atomic_read(&this_afe.state) == 0),
+				msecs_to_jiffies(TIMEOUT_MS));
+
+	if (!ret) {
+		pr_err("%s: wait_event timeout IF CONFIG\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	if (atomic_read(&this_afe.status) != 0) {
+		pr_err("%s: config cmd failed\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
 	/* send AFE cal */
 	afe_send_cal(port_id);
 
@@ -535,6 +561,7 @@
 	start.gain = 0x2000;
 	start.sample_rate = rate;
 
+	atomic_set(&this_afe.state, 1);
 	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);
 
 	if (IS_ERR_VALUE(ret)) {
@@ -544,6 +571,15 @@
 		goto fail_cmd;
 	}
 
+	ret = wait_event_timeout(this_afe.wait,
+			(atomic_read(&this_afe.state) == 0),
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout PORT START\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
 	if (this_afe.task != current)
 		this_afe.task = current;
 
@@ -555,6 +591,7 @@
 	return ret;
 }
 
+/* This function should be used by 8660 exclusively */
 int afe_open(u16 port_id, union afe_port_config *afe_config, int rate)
 {
 	struct afe_port_start_command start;
diff --git a/sound/soc/msm/qdsp6/q6asm.c b/sound/soc/msm/qdsp6/q6asm.c
index 9136f93..2993e37 100644
--- a/sound/soc/msm/qdsp6/q6asm.c
+++ b/sound/soc/msm/qdsp6/q6asm.c
@@ -445,6 +445,7 @@
 		spin_lock_init(&ac->port[lcnt].dsp_lock);
 	}
 	atomic_set(&ac->cmd_state, 0);
+	atomic_set(&ac->cmd_response, 0);
 
 	pr_debug("%s: session[%d]\n", __func__, ac->session);
 
@@ -863,6 +864,10 @@
 		case ASM_STREAM_CMD_OPEN_READ_COMPRESSED:
 			if (atomic_read(&ac->cmd_state)) {
 				atomic_set(&ac->cmd_state, 0);
+				if (payload[1] == ADSP_EUNSUPPORTED)
+					atomic_set(&ac->cmd_response, 1);
+				else
+					atomic_set(&ac->cmd_response, 0);
 				wake_up(&ac->cmd_wait);
 			}
 			if (ac->cb)
@@ -1438,6 +1443,10 @@
 			rc);
 		goto fail_cmd;
 	}
+	if (atomic_read(&ac->cmd_response)) {
+		pr_err("%s: format = %x not supported\n", __func__, format);
+		goto fail_cmd;
+	}
 	return 0;
 fail_cmd:
 	return -EINVAL;