Merge "ARM: dts: msm: Update bus bandwidth votes for sdhci on 8974"
diff --git a/arch/arm/boot/dts/msm8226-v2.dtsi b/arch/arm/boot/dts/msm8226-v2.dtsi
index 14fe237..787c9fb 100644
--- a/arch/arm/boot/dts/msm8226-v2.dtsi
+++ b/arch/arm/boot/dts/msm8226-v2.dtsi
@@ -80,11 +80,18 @@
 			<         0 0>,
 			< 384000000 2>,
 			< 787200000 4>,
+			< 998400000 5>,
+			<1094400000 6>,
 			<1190400000 7>;
 		qcom,speed2-bin-v2 =
 			<         0 0>,
 			< 384000000 2>,
 			< 787200000 4>,
+			< 998400000 5>,
+			<1094400000 6>,
+			<1190400000 7>,
+			<1305600000 8>,
+			<1344000000 9>,
 			<1401600000 10>;
 		qcom,speed1-bin-v2 =
 			<         0 0>,
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index 2599c46..8d2414c 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -1387,6 +1387,12 @@
 		qcom,irq-is-percpu;
 		interrupts = <1 7 0xf00>;
 	};
+
+	bimc_sharedmem {
+		compatible = "qcom,sharedmem-uio";
+		reg = <0xfc380000 0x00100000>;
+		reg-names = "bimc";
+	};
 };
 
 &gdsc_venus {
diff --git a/arch/arm/boot/dts/msm8610-qrd-skuaa.dtsi b/arch/arm/boot/dts/msm8610-qrd-skuaa.dtsi
index 53abb95..d731ce0 100644
--- a/arch/arm/boot/dts/msm8610-qrd-skuaa.dtsi
+++ b/arch/arm/boot/dts/msm8610-qrd-skuaa.dtsi
@@ -96,3 +96,14 @@
 		qcom,master-en = <1>;
 	};
 };
+
+&sdhc_2 {
+        qcom,nonremovable;
+
+        interrupts = <0 1>;
+        interrupt-map = <0 &intc 0 125 0
+                        1 &intc 0 221 0>;
+        interrupt-names = "hc_irq", "pwr_irq";
+        /delete-property/ cd-gpios;
+};
+
diff --git a/arch/arm/boot/dts/msm8610-qrd-skuab.dtsi b/arch/arm/boot/dts/msm8610-qrd-skuab.dtsi
index 1554575..336553a 100644
--- a/arch/arm/boot/dts/msm8610-qrd-skuab.dtsi
+++ b/arch/arm/boot/dts/msm8610-qrd-skuab.dtsi
@@ -183,3 +183,14 @@
 &dsi_otm8018b_fwvga_vid {
 	qcom,cont-splash-enabled;
 };
+
+&sdhc_2 {
+        qcom,nonremovable;
+
+        interrupts = <0 1>;
+        interrupt-map = <0 &intc 0 125 0
+                        1 &intc 0 221 0>;
+        interrupt-names = "hc_irq", "pwr_irq";
+        /delete-property/ cd-gpios;
+};
+
diff --git a/arch/arm/boot/dts/msm8610.dtsi b/arch/arm/boot/dts/msm8610.dtsi
index 66b81ac..90dae06 100644
--- a/arch/arm/boot/dts/msm8610.dtsi
+++ b/arch/arm/boot/dts/msm8610.dtsi
@@ -1043,6 +1043,12 @@
 		qcom,irq-is-percpu;
 		interrupts = <1 7 0xf00>;
 	};
+
+	bimc_sharedmem {
+		compatible = "qcom,sharedmem-uio";
+		reg = <0xfc380000 0x00100000>;
+		reg-names = "bimc";
+	};
 };
 
 &gdsc_vfe {
diff --git a/arch/arm/boot/dts/msm8926-qrd-skug.dtsi b/arch/arm/boot/dts/msm8926-qrd-skug.dtsi
index c93ea12..dabadda 100644
--- a/arch/arm/boot/dts/msm8926-qrd-skug.dtsi
+++ b/arch/arm/boot/dts/msm8926-qrd-skug.dtsi
@@ -98,3 +98,13 @@
 &dsi_ssd2080m_720_vid {
 	qcom,cont-splash-enabled;
 };
+
+&sdhc_2 {
+	qcom,nonremovable;
+
+	interrupts = <0 1>;
+	interrupt-map = <0 &intc 0 125 0
+			1 &intc 0 221 0>;
+	interrupt-names = "hc_irq", "pwr_irq";
+	/delete-property/ cd-gpios;
+};
diff --git a/arch/arm/boot/dts/msm8926.dtsi b/arch/arm/boot/dts/msm8926.dtsi
index 394f4a9..1919022 100644
--- a/arch/arm/boot/dts/msm8926.dtsi
+++ b/arch/arm/boot/dts/msm8926.dtsi
@@ -53,11 +53,18 @@
 			<         0 0>,
 			< 384000000 2>,
 			< 787200000 4>,
+			< 998400000 5>,
+			<1094400000 6>,
 			<1190400000 7>;
 		qcom,speed2-bin-v1 =
 			<         0 0>,
 			< 384000000 2>,
 			< 787200000 4>,
+			< 998400000 5>,
+			<1094400000 6>,
+			<1190400000 7>,
+			<1305600000 8>,
+			<1344000000 9>,
 			<1401600000 10>;
 		qcom,speed1-bin-v1 =
 			<         0 0>,
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index b8dba3e..63a37f5 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -2454,6 +2454,12 @@
 		interrupts = <0 1 0>;
 	};
 
+	bimc_sharedmem {
+		compatible = "qcom,sharedmem-uio";
+		reg = <0xfc380000 0x00100000>;
+		reg-names = "bimc";
+	};
+
 	qcom,smdtty {
 		compatible = "qcom,smdtty";
 
diff --git a/arch/arm/boot/dts/msm8974pro-ab-pm8941-cdp.dts b/arch/arm/boot/dts/msm8974pro-ab-pm8941-cdp.dts
index 5a01945..b9f2574 100644
--- a/arch/arm/boot/dts/msm8974pro-ab-pm8941-cdp.dts
+++ b/arch/arm/boot/dts/msm8974pro-ab-pm8941-cdp.dts
@@ -25,3 +25,8 @@
 	qcom,pad-pull-on = <0x0 0x3 0x3 0x1>; /* no-pull, pull-up, pull-up, pull-down */
 	qcom,pad-pull-off = <0x0 0x3 0x3 0x1>; /* no-pull, pull-up, pull-up, pull-down */
 };
+
+&ehci {
+	hsusb_vdd_dig-supply = <&pm8841_s2_corner>;
+	qcom,vdd-voltage-level = <1 2 3 5 7>;
+};
diff --git a/arch/arm/boot/dts/msm8974pro-ab-pm8941-mtp.dts b/arch/arm/boot/dts/msm8974pro-ab-pm8941-mtp.dts
index f80551e..99c4ec7 100644
--- a/arch/arm/boot/dts/msm8974pro-ab-pm8941-mtp.dts
+++ b/arch/arm/boot/dts/msm8974pro-ab-pm8941-mtp.dts
@@ -26,3 +26,10 @@
 	qcom,pad-pull-off = <0x0 0x3 0x3 0x1>; /* no-pull, pull-up, pull-up, pull-down */
 	qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
 };
+
+&ehci {
+	status = "ok";
+	qcom,usb2-enable-uicc;
+	hsusb_vdd_dig-supply = <&pm8841_s2_corner>;
+	qcom,vdd-voltage-level = <1 2 3 5 7>;
+};
diff --git a/arch/arm/boot/dts/msm8974pro-ac-pm8941-mtp.dts b/arch/arm/boot/dts/msm8974pro-ac-pm8941-mtp.dts
index 76d0121..a25b8ca 100644
--- a/arch/arm/boot/dts/msm8974pro-ac-pm8941-mtp.dts
+++ b/arch/arm/boot/dts/msm8974pro-ac-pm8941-mtp.dts
@@ -25,3 +25,8 @@
 	qcom,pad-pull-on = <0x0 0x3 0x3 0x1>; /* no-pull, pull-up, pull-up, pull-down */
 	qcom,pad-pull-off = <0x0 0x3 0x3 0x1>; /* no-pull, pull-up, pull-up, pull-down */
 };
+
+&ehci {
+	status = "ok";
+	qcom,usb2-enable-uicc;
+};
diff --git a/arch/arm/boot/dts/msm8974pro-ac-pma8084-pm8941-mtp.dts b/arch/arm/boot/dts/msm8974pro-ac-pma8084-pm8941-mtp.dts
index 8a4ad45..0fe8813 100644
--- a/arch/arm/boot/dts/msm8974pro-ac-pma8084-pm8941-mtp.dts
+++ b/arch/arm/boot/dts/msm8974pro-ac-pma8084-pm8941-mtp.dts
@@ -120,3 +120,8 @@
 		qcom,thermal-node;
 	};
 };
+
+&ehci {
+	status = "ok";
+	qcom,usb2-enable-uicc;
+};
diff --git a/arch/arm/boot/dts/msm8974pro-pm8941.dtsi b/arch/arm/boot/dts/msm8974pro-pm8941.dtsi
index 89939e6..b502078 100644
--- a/arch/arm/boot/dts/msm8974pro-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm8974pro-pm8941.dtsi
@@ -71,6 +71,11 @@
 	qcom,utmi-clk-rate = <24000000>;
 };
 
+&ehci {
+	hsusb_vdd_dig-supply = <&pm8841_s2_corner>;
+	qcom,vdd-voltage-level = <1 2 3 5 7>;
+};
+
 &krait_regulator_pmic {
 	status = "ok";
 
diff --git a/arch/arm/boot/dts/msm8974pro-pma8084.dtsi b/arch/arm/boot/dts/msm8974pro-pma8084.dtsi
index ab4ffb5..28ddda2 100644
--- a/arch/arm/boot/dts/msm8974pro-pma8084.dtsi
+++ b/arch/arm/boot/dts/msm8974pro-pma8084.dtsi
@@ -118,6 +118,8 @@
 		HSUSB_VDDCX-supply = <&pma8084_s2>;
 		HSUSB_1p8-supply = <&pma8084_l6>;
 		HSUSB_3p3-supply = <&pma8084_l24>;
+		hsusb_vdd_dig-supply = <&pma8084_s2_corner>;
+		qcom,vdd-voltage-level = <1 2 3 5 7>;
 	};
 
 	qcom,gdsc@fd8c4024 {
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index 7b13bbc..a923846 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -149,8 +149,6 @@
 			"msm-tsens", NULL),
 	OF_DEV_AUXDATA("qcom,qcedev", 0xFD440000, \
 			"qcedev.0", NULL),
-	OF_DEV_AUXDATA("qcom,qcrypto", 0xFD440000, \
-			"qcrypto.0", NULL),
 	OF_DEV_AUXDATA("qcom,hsic-host", 0xF9A00000, \
 			"msm_hsic_host", NULL),
 	OF_DEV_AUXDATA("qcom,hsic-smsc-hub", 0, "msm_smsc_hub",
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index b730091..1847bf4 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -4976,10 +4976,16 @@
 	CLK_LOOKUP("bus_clk",      gcc_ce2_axi_clk.c,     "qcedev.0"),
 	CLK_LOOKUP("core_clk_src", ce2_clk_src.c,         "qcedev.0"),
 
-	CLK_LOOKUP("core_clk",     gcc_ce2_clk.c,     "qcrypto.0"),
-	CLK_LOOKUP("iface_clk",    gcc_ce2_ahb_clk.c, "qcrypto.0"),
-	CLK_LOOKUP("bus_clk",      gcc_ce2_axi_clk.c, "qcrypto.0"),
-	CLK_LOOKUP("core_clk_src", ce2_clk_src.c,     "qcrypto.0"),
+
+	CLK_LOOKUP("core_clk",     gcc_ce2_clk.c,     "fd440000.qcom,qcrypto"),
+	CLK_LOOKUP("iface_clk",    gcc_ce2_ahb_clk.c, "fd440000.qcom,qcrypto"),
+	CLK_LOOKUP("bus_clk",      gcc_ce2_axi_clk.c, "fd440000.qcom,qcrypto"),
+	CLK_LOOKUP("core_clk_src", ce2_clk_src.c,     "fd440000.qcom,qcrypto"),
+
+	CLK_LOOKUP("core_clk",     gcc_ce2_clk.c,     "fd440000.qcom,qcrypto1"),
+	CLK_LOOKUP("iface_clk",    gcc_ce2_ahb_clk.c, "fd440000.qcom,qcrypto1"),
+	CLK_LOOKUP("bus_clk",      gcc_ce2_axi_clk.c, "fd440000.qcom,qcrypto1"),
+	CLK_LOOKUP("core_clk_src", ce2_clk_src.c,     "fd440000.qcom,qcrypto1"),
 
 	CLK_LOOKUP("core_clk",     gcc_ce1_clk.c,         "qseecom"),
 	CLK_LOOKUP("iface_clk",    gcc_ce1_ahb_clk.c,     "qseecom"),
diff --git a/arch/arm/mach-msm/msm_rq_stats.c b/arch/arm/mach-msm/msm_rq_stats.c
index c99f0ec..af4a6d2 100644
--- a/arch/arm/mach-msm/msm_rq_stats.c
+++ b/arch/arm/mach-msm/msm_rq_stats.c
@@ -30,7 +30,6 @@
 #include <linux/kernel_stat.h>
 #include <linux/tick.h>
 #include <asm/smp_plat.h>
-#include "acpuclock.h"
 #include <linux/suspend.h>
 
 #define MAX_LONG_SIZE 24
@@ -199,7 +198,7 @@
 	switch (val) {
 	case CPU_ONLINE:
 		if (!this_cpu->cur_freq)
-			this_cpu->cur_freq = acpuclk_get_rate(cpu);
+			this_cpu->cur_freq = cpufreq_quick_get(cpu);
 	case CPU_ONLINE_FROZEN:
 		this_cpu->avg_load_maxfreq = 0;
 	}
@@ -402,7 +401,7 @@
 		cpufreq_get_policy(&cpu_policy, i);
 		pcpu->policy_max = cpu_policy.cpuinfo.max_freq;
 		if (cpu_online(i))
-			pcpu->cur_freq = acpuclk_get_rate(i);
+			pcpu->cur_freq = cpufreq_quick_get(i);
 		cpumask_copy(pcpu->related_cpus, cpu_policy.cpus);
 	}
 	freq_transition.notifier_call = cpufreq_transition_handler;
diff --git a/arch/arm/mach-msm/pil-pronto.c b/arch/arm/mach-msm/pil-pronto.c
index a7fc204..a91331e 100644
--- a/arch/arm/mach-msm/pil-pronto.c
+++ b/arch/arm/mach-msm/pil-pronto.c
@@ -62,6 +62,9 @@
 
 #define PRONTO_PMU_CCPU_BOOT_REMAP_ADDR			0x2004
 
+#define PRONTO_PMU_SPARE				0x1088
+#define PRONTO_PMU_SPARE_SSR_BIT			BIT(23)
+
 #define CLK_CTL_WCNSS_RESTART_BIT			BIT(0)
 
 #define AXI_HALTREQ					0x0
@@ -376,7 +379,15 @@
 	struct pronto_data *drv = subsys_to_drv(subsys);
 	struct platform_device *pdev = wcnss_get_platform_device();
 	struct wcnss_wlan_config *pwlanconfig = wcnss_get_wlan_config();
-	int    ret = -1;
+	void __iomem *base = drv->base;
+	u32 reg;
+	int ret = -1;
+
+	if (base) {
+		reg = readl_relaxed(base + PRONTO_PMU_SPARE);
+		reg |= PRONTO_PMU_SPARE_SSR_BIT;
+		writel_relaxed(reg, base + PRONTO_PMU_SPARE);
+	}
 
 	if (pdev && pwlanconfig)
 		ret = wcnss_wlan_power(&pdev->dev, pwlanconfig,
diff --git a/arch/arm/mach-msm/rpm_log.c b/arch/arm/mach-msm/rpm_log.c
old mode 100644
new mode 100755
index 58e8588..1809cea
--- a/arch/arm/mach-msm/rpm_log.c
+++ b/arch/arm/mach-msm/rpm_log.c
@@ -52,6 +52,7 @@
 	char *data;
 	u32 len;
 	u32 pos;
+	struct mutex mutex;
 	u32 max_len;
 	u32 read_idx;
 	struct msm_rpm_log_platform_data *pdata;
@@ -218,6 +219,7 @@
 	if (!access_ok(VERIFY_WRITE, bufu, count))
 		return -EFAULT;
 
+	mutex_lock(&buf->mutex);
 	/* check for more messages if local buffer empty */
 	if (buf->pos == buf->len) {
 		buf->pos = 0;
@@ -226,6 +228,7 @@
 	}
 
 	if ((file->f_flags & O_NONBLOCK) && buf->len == 0)
+		mutex_unlock(&buf->mutex);
 		return -EAGAIN;
 
 	/* loop until new messages arrive */
@@ -241,6 +244,7 @@
 
 	remaining = __copy_to_user(bufu, &(buf->data[buf->pos]), out_len);
 	buf->pos += out_len - remaining;
+	mutex_unlock(&buf->mutex);
 
 	return out_len - remaining;
 }
@@ -287,6 +291,7 @@
 	buf->pdata = pdata;
 	buf->len = 0;
 	buf->pos = 0;
+	mutex_init(&buf->mutex);
 	buf->max_len = PRINTED_LENGTH(pdata->log_len);
 	buf->read_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
 					 MSM_RPM_LOG_HEAD);
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 9d7bb96..f230033 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1137,9 +1137,7 @@
 					uint32_t flags)
 {
 	phys_addr_t pt_val;
-	unsigned int link[230];
-	unsigned int *cmds = &link[0];
-	int sizedwords = 0;
+	unsigned int *link = NULL, *cmds;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	int num_iommu_units;
 	struct kgsl_context *context;
@@ -1160,6 +1158,14 @@
 	}
 	adreno_ctx = ADRENO_CONTEXT(context);
 
+	link = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (link == NULL) {
+		result = -ENOMEM;
+		goto done;
+	}
+
+	cmds = link;
+
 	result = kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_USER);
 
 	if (result)
@@ -1179,17 +1185,11 @@
 		cmds += _adreno_iommu_setstate_v1(device, cmds, pt_val,
 						num_iommu_units, flags);
 
-	sizedwords += (cmds - &link[0]);
-	if (sizedwords == 0) {
-		KGSL_DRV_ERR(device, "no commands generated\n");
-		BUG();
-	}
 	/* invalidate all base pointers */
 	*cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
 	*cmds++ = 0x7fff;
-	sizedwords += 2;
 
-	if (sizedwords > (ARRAY_SIZE(link))) {
+	if ((unsigned int) (cmds - link) > (PAGE_SIZE / sizeof(unsigned int))) {
 		KGSL_DRV_ERR(device, "Temp command buffer overflow\n");
 		BUG();
 	}
@@ -1198,7 +1198,8 @@
 	 * use the global timestamp for iommu clock disablement
 	 */
 	result = adreno_ringbuffer_issuecmds(device, adreno_ctx,
-			KGSL_CMD_FLAGS_PMODE, &link[0], sizedwords);
+			KGSL_CMD_FLAGS_PMODE, link,
+			(unsigned int)(cmds - link));
 
 	/*
 	 * On error disable the IOMMU clock right away otherwise turn it off
@@ -1212,6 +1213,7 @@
 						KGSL_IOMMU_CONTEXT_USER);
 
 done:
+	kfree(link);
 	kgsl_context_put(context);
 	return result;
 }
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index cf577fd..3d9206b 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -724,6 +724,11 @@
 	*cmds++ = val;
 	*cmds++ = 0xFFFFFFFF;
 	*cmds++ = 0xFFFFFFFF;
+
+	/* WAIT_REG_MEM turns back on protected mode - push it off */
+	*cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
+	*cmds++ = 0;
+
 	cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
 	return cmds - start;
 }
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index a535a97..2025d73 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -4150,6 +4150,7 @@
 
 	/* CP registers */
 	adreno_set_protected_registers(device, &index, 0x1C0, 5);
+	adreno_set_protected_registers(device, &index, 0x1EC, 1);
 	adreno_set_protected_registers(device, &index, 0x1F6, 1);
 	adreno_set_protected_registers(device, &index, 0x1F8, 2);
 	adreno_set_protected_registers(device, &index, 0x45E, 2);
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index cef2805..488e5a8 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1048,6 +1048,10 @@
 	*cmds++ = 0x1;
 	*cmds++ = 0x1;
 
+	/* WAIT_REG_MEM turns back on protected mode - push it off */
+	*cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
+	*cmds++ = 0;
+
 	*cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
 	*cmds++ = lock_vars->turn;
 	*cmds++ = 0;
@@ -1062,11 +1066,19 @@
 	*cmds++ = 0x1;
 	*cmds++ = 0x1;
 
+	/* WAIT_REG_MEM turns back on protected mode - push it off */
+	*cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
+	*cmds++ = 0;
+
 	*cmds++ = cp_type3_packet(CP_TEST_TWO_MEMS, 3);
 	*cmds++ = lock_vars->flag[PROC_APPS];
 	*cmds++ = lock_vars->turn;
 	*cmds++ = 0;
 
+	/* TEST_TWO_MEMS turns back on protected mode - push it off */
+	*cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
+	*cmds++ = 0;
+
 	cmds += adreno_add_idle_cmds(adreno_dev, cmds);
 
 	return cmds - start;
@@ -1104,6 +1116,10 @@
 	*cmds++ = 0x1;
 	*cmds++ = 0x1;
 
+	/* WAIT_REG_MEM turns back on protected mode - push it off */
+	*cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
+	*cmds++ = 0;
+
 	cmds += adreno_add_idle_cmds(adreno_dev, cmds);
 
 	return cmds - start;
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index d64d0d3..65e607b 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -401,6 +401,10 @@
 	status = kgsl_allocate_contiguous(&mmu->setstate_memory, PAGE_SIZE);
 	if (status)
 		return status;
+
+	/* Mark the setstate memory as read only */
+	mmu->setstate_memory.flags |= KGSL_MEMFLAGS_GPUREADONLY;
+
 	kgsl_sharedmem_set(device, &mmu->setstate_memory, 0, 0,
 				mmu->setstate_memory.size);
 
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
index 80a0073..d35869c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
@@ -52,8 +52,82 @@
 	{}
 };
 
+#define MAX_OVERFLOW_COUNTERS  15
+#define OVERFLOW_LENGTH 512
+#define OVERFLOW_BUFFER_LENGTH 32
 static struct msm_isp_buf_mgr vfe_buf_mgr;
+static int msm_isp_enable_debugfs(struct msm_isp_statistics *stats);
+static char *stats_str[MAX_OVERFLOW_COUNTERS] = {
+	"imgmaster0_overflow_cnt",
+	"imgmaster1_overflow_cnt",
+	"imgmaster2_overflow_cnt",
+	"imgmaster3_overflow_cnt",
+	"imgmaster4_overflow_cnt",
+	"imgmaster5_overflow_cnt",
+	"imgmaster6_overflow_cnt",
+	"be_overflow_cnt",
+	"bg_overflow_cnt",
+	"bf_overflow_cnt",
+	"awb_overflow_cnt",
+	"rs_overflow_cnt",
+	"cs_overflow_cnt",
+	"ihist_overflow_cnt",
+	"skinbhist_overflow_cnt",
+};
+static int vfe_debugfs_statistics_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
 
+static ssize_t vfe_debugfs_statistics_read(struct file *t_file, char *t_char,
+	size_t t_size_t, loff_t *t_loff_t)
+{
+	int i;
+	char name[OVERFLOW_LENGTH] = {0};
+	int *ptr;
+	char buffer[OVERFLOW_BUFFER_LENGTH] = {0};
+	struct msm_isp_statistics  *stats = (struct msm_isp_statistics *)
+		t_file->private_data;
+	ptr = (int *)(stats);
+	for (i = 0; i < MAX_OVERFLOW_COUNTERS; i++) {
+		strlcat(name, stats_str[i], sizeof(name));
+		strlcat(name, "     ", sizeof(name));
+		snprintf(buffer, sizeof(buffer), "%d", ptr[i]);
+		strlcat(name, buffer, sizeof(name));
+		strlcat(name, "\r\n", sizeof(name));
+	}
+	return simple_read_from_buffer(t_char, t_size_t,
+		t_loff_t, name, strlen(name));
+}
+
+static ssize_t vfe_debugfs_statistics_write(struct file *t_file,
+	const char *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+	struct msm_isp_statistics *stats = (struct msm_isp_statistics *)
+		t_file->private_data;
+	memset(stats, 0, sizeof(struct msm_isp_statistics));
+
+	return sizeof(struct msm_isp_statistics);
+}
+
+static const struct file_operations vfe_debugfs_error = {
+	.open = vfe_debugfs_statistics_open,
+	.read = vfe_debugfs_statistics_read,
+	.write = vfe_debugfs_statistics_write,
+};
+
+static int msm_isp_enable_debugfs(struct msm_isp_statistics *stats)
+{
+	struct dentry *debugfs_base;
+	debugfs_base = debugfs_create_dir("msm_isp", NULL);
+	if (!debugfs_base)
+		return -ENOMEM;
+	if (!debugfs_create_file("stats", S_IRUGO | S_IWUSR, debugfs_base,
+		stats, &vfe_debugfs_error))
+		return -ENOMEM;
+	return 0;
+}
 static int __devinit vfe_probe(struct platform_device *pdev)
 {
 	struct vfe_device *vfe_dev;
@@ -73,6 +147,7 @@
 	};
 
 	vfe_dev = kzalloc(sizeof(struct vfe_device), GFP_KERNEL);
+	vfe_dev->stats = kzalloc(sizeof(struct msm_isp_statistics), GFP_KERNEL);
 	if (!vfe_dev) {
 		pr_err("%s: no enough memory\n", __func__);
 		return -ENOMEM;
@@ -144,6 +219,7 @@
 		kfree(vfe_dev);
 		return -EINVAL;
 	}
+	msm_isp_enable_debugfs(vfe_dev->stats);
 	vfe_dev->buf_mgr->ops->register_ctx(vfe_dev->buf_mgr,
 		&vfe_dev->iommu_ctx[0], vfe_dev->hw_info->num_iommu_ctx);
 	vfe_dev->vfe_open_cnt = 0;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index 334a293..4700469 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -24,7 +24,6 @@
 #include <media/msmb_isp.h>
 #include <mach/msm_bus.h>
 #include <mach/msm_bus_board.h>
-
 #include "msm_buf_mgr.h"
 
 #define MAX_IOMMU_CTX 2
@@ -409,6 +408,24 @@
 	uint32_t frame_id;
 };
 
+struct msm_isp_statistics {
+	int32_t imagemaster0_overflow;
+	int32_t imagemaster1_overflow;
+	int32_t imagemaster2_overflow;
+	int32_t imagemaster3_overflow;
+	int32_t imagemaster4_overflow;
+	int32_t imagemaster5_overflow;
+	int32_t imagemaster6_overflow;
+	int32_t be_overflow;
+	int32_t bg_overflow;
+	int32_t bf_overflow;
+	int32_t awb_overflow;
+	int32_t rs_overflow;
+	int32_t cs_overflow;
+	int32_t ihist_overflow;
+	int32_t skinbhist_overflow;
+};
+
 struct vfe_device {
 	struct platform_device *pdev;
 	struct msm_sd_subdev subdev;
@@ -460,6 +477,7 @@
 	void __iomem *p_avtimer_msw;
 	void __iomem *p_avtimer_lsw;
 	uint8_t ignore_error;
+	struct msm_isp_statistics *stats;
 };
 
 #endif
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
index de67fa0..cf76131 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -41,6 +41,8 @@
 	(~(ping_pong >> (idx + VFE32_STATS_PING_PONG_OFFSET)) & 0x1))
 
 #define VFE32_CLK_IDX 0
+#define MSM_ISP32_TOTAL_WM_UB 792
+
 static struct msm_cam_clk_info msm_vfe32_1_clk_info[] = {
 	/*vfe32 clock info for B-family: 8610 */
 	{"vfe_clk_src", 266670000},
@@ -253,7 +255,6 @@
 static void msm_vfe32_process_error_status(struct vfe_device *vfe_dev)
 {
 	uint32_t error_status1 = vfe_dev->error_info.error_mask1;
-
 	if (error_status1 & BIT(0))
 		pr_err("%s: camif error status: 0x%x\n",
 			__func__, vfe_dev->error_info.camif_status);
@@ -273,34 +274,62 @@
 		pr_err("%s: violation\n", __func__);
 		msm_vfe32_process_violation_status(vfe_dev);
 	}
-	if (error_status1 & BIT(8))
+	if (error_status1 & BIT(8)) {
+		vfe_dev->stats->imagemaster0_overflow++;
 		pr_err("%s: image master 0 bus overflow\n", __func__);
-	if (error_status1 & BIT(9))
+	}
+	if (error_status1 & BIT(9)) {
+		vfe_dev->stats->imagemaster1_overflow++;
 		pr_err("%s: image master 1 bus overflow\n", __func__);
-	if (error_status1 & BIT(10))
+	}
+	if (error_status1 & BIT(10)) {
+		vfe_dev->stats->imagemaster2_overflow++;
 		pr_err("%s: image master 2 bus overflow\n", __func__);
-	if (error_status1 & BIT(11))
+	}
+	if (error_status1 & BIT(11)) {
+		vfe_dev->stats->imagemaster3_overflow++;
 		pr_err("%s: image master 3 bus overflow\n", __func__);
-	if (error_status1 & BIT(12))
+	}
+	if (error_status1 & BIT(12)) {
+		vfe_dev->stats->imagemaster4_overflow++;
 		pr_err("%s: image master 4 bus overflow\n", __func__);
-	if (error_status1 & BIT(13))
+	}
+	if (error_status1 & BIT(13)) {
+		vfe_dev->stats->imagemaster5_overflow++;
 		pr_err("%s: image master 5 bus overflow\n", __func__);
-	if (error_status1 & BIT(14))
+	}
+	if (error_status1 & BIT(14)) {
+		vfe_dev->stats->imagemaster6_overflow++;
 		pr_err("%s: image master 6 bus overflow\n", __func__);
-	if (error_status1 & BIT(15))
+	}
+	if (error_status1 & BIT(15)) {
+		vfe_dev->stats->bg_overflow++;
 		pr_err("%s: status ae/bg bus overflow\n", __func__);
-	if (error_status1 & BIT(16))
+	}
+	if (error_status1 & BIT(16)) {
+		vfe_dev->stats->bf_overflow++;
 		pr_err("%s: status af/bf bus overflow\n", __func__);
-	if (error_status1 & BIT(17))
+	}
+	if (error_status1 & BIT(17)) {
+		vfe_dev->stats->awb_overflow++;
 		pr_err("%s: status awb bus overflow\n", __func__);
-	if (error_status1 & BIT(18))
+	}
+	if (error_status1 & BIT(18)) {
+		vfe_dev->stats->rs_overflow++;
 		pr_err("%s: status rs bus overflow\n", __func__);
-	if (error_status1 & BIT(19))
+	}
+	if (error_status1 & BIT(19)) {
+		vfe_dev->stats->cs_overflow++;
 		pr_err("%s: status cs bus overflow\n", __func__);
-	if (error_status1 & BIT(20))
+	}
+	if (error_status1 & BIT(20)) {
+		vfe_dev->stats->ihist_overflow++;
 		pr_err("%s: status ihist bus overflow\n", __func__);
-	if (error_status1 & BIT(21))
+	}
+	if (error_status1 & BIT(21)) {
+		vfe_dev->stats->skinbhist_overflow++;
 		pr_err("%s: status skin bhist bus overflow\n", __func__);
+	}
 	if (error_status1 & BIT(22))
 		pr_err("%s: axi error\n", __func__);
 }
@@ -787,7 +816,43 @@
 	msm_camera_io_w(xbar_reg_cfg, vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
 }
 
-static void msm_vfe32_cfg_axi_ub(struct vfe_device *vfe_dev)
+static void msm_vfe32_cfg_axi_ub_equal_default(struct vfe_device *vfe_dev)
+{
+	int i;
+	uint32_t ub_offset = 0;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	uint32_t total_image_size = 0;
+	uint32_t num_used_wms = 0;
+	uint32_t prop_size = 0;
+	uint32_t wm_ub_size;
+	uint64_t delta;
+	for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+		if (axi_data->free_wm[i] > 0) {
+			num_used_wms++;
+			total_image_size += axi_data->wm_image_size[i];
+		}
+	}
+	prop_size = MSM_ISP32_TOTAL_WM_UB -
+		axi_data->hw_info->min_wm_ub * num_used_wms;
+	for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+		if (axi_data->free_wm[i]) {
+			delta =
+				(uint64_t)(axi_data->wm_image_size[i] *
+					prop_size);
+			do_div(delta, total_image_size);
+			wm_ub_size = axi_data->hw_info->min_wm_ub +
+				(uint32_t)delta;
+			msm_camera_io_w(ub_offset << 16 |
+				(wm_ub_size - 1), vfe_dev->vfe_base +
+					VFE32_WM_BASE(i) + 0xC);
+			ub_offset += wm_ub_size;
+		} else
+			msm_camera_io_w(0,
+				vfe_dev->vfe_base + VFE32_WM_BASE(i) + 0xC);
+	}
+}
+
+static void msm_vfe32_cfg_axi_ub_equal_slicing(struct vfe_device *vfe_dev)
 {
 	int i;
 	uint32_t ub_offset = 0;
@@ -809,6 +874,16 @@
 	}
 }
 
+static void msm_vfe32_cfg_axi_ub(struct vfe_device *vfe_dev)
+{
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	axi_data->wm_ub_cfg_policy = MSM_WM_UB_CFG_DEFAULT;
+	if (axi_data->wm_ub_cfg_policy == MSM_WM_UB_EQUAL_SLICING)
+		msm_vfe32_cfg_axi_ub_equal_slicing(vfe_dev);
+	else
+		msm_vfe32_cfg_axi_ub_equal_default(vfe_dev);
+}
+
 static void msm_vfe32_update_ping_pong_addr(struct vfe_device *vfe_dev,
 		uint8_t wm_idx, uint32_t pingpong_status, unsigned long paddr)
 {
@@ -1069,6 +1144,7 @@
 	.num_comp_mask = 3,
 	.num_rdi = 3,
 	.num_rdi_master = 3,
+	.min_wm_ub = 64,
 };
 
 static struct msm_vfe_stats_hardware_info msm_vfe32_stats_hw_info = {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index 827dbb0..04136d0 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -480,44 +480,74 @@
 		pr_err_ratelimited("%s: violation\n", __func__);
 		msm_vfe40_process_violation_status(vfe_dev);
 	}
-	if (error_status1 & (1 << 9))
+	if (error_status1 & (1 << 9)) {
+		vfe_dev->stats->imagemaster0_overflow++;
 		pr_err_ratelimited("%s: image master 0 bus overflow\n",
 			__func__);
-	if (error_status1 & (1 << 10))
+	}
+	if (error_status1 & (1 << 10)) {
+		vfe_dev->stats->imagemaster1_overflow++;
 		pr_err_ratelimited("%s: image master 1 bus overflow\n",
 			__func__);
-	if (error_status1 & (1 << 11))
+	}
+	if (error_status1 & (1 << 11)) {
+		vfe_dev->stats->imagemaster2_overflow++;
 		pr_err_ratelimited("%s: image master 2 bus overflow\n",
 			__func__);
-	if (error_status1 & (1 << 12))
+	}
+	if (error_status1 & (1 << 12)) {
+		vfe_dev->stats->imagemaster3_overflow++;
 		pr_err_ratelimited("%s: image master 3 bus overflow\n",
 			__func__);
-	if (error_status1 & (1 << 13))
+	}
+	if (error_status1 & (1 << 13)) {
+		vfe_dev->stats->imagemaster4_overflow++;
 		pr_err_ratelimited("%s: image master 4 bus overflow\n",
 			__func__);
-	if (error_status1 & (1 << 14))
+	}
+	if (error_status1 & (1 << 14)) {
+		vfe_dev->stats->imagemaster5_overflow++;
 		pr_err_ratelimited("%s: image master 5 bus overflow\n",
 			__func__);
-	if (error_status1 & (1 << 15))
+	}
+	if (error_status1 & (1 << 15)) {
+		vfe_dev->stats->imagemaster6_overflow++;
 		pr_err_ratelimited("%s: image master 6 bus overflow\n",
 			__func__);
-	if (error_status1 & (1 << 16))
+	}
+	if (error_status1 & (1 << 16)) {
+		vfe_dev->stats->be_overflow++;
 		pr_err_ratelimited("%s: status be bus overflow\n", __func__);
-	if (error_status1 & (1 << 17))
+	}
+	if (error_status1 & (1 << 17)) {
+		vfe_dev->stats->bg_overflow++;
 		pr_err_ratelimited("%s: status bg bus overflow\n", __func__);
-	if (error_status1 & (1 << 18))
+	}
+	if (error_status1 & (1 << 18)) {
+		vfe_dev->stats->bf_overflow++;
 		pr_err_ratelimited("%s: status bf bus overflow\n", __func__);
-	if (error_status1 & (1 << 19))
+	}
+	if (error_status1 & (1 << 19)) {
+		vfe_dev->stats->awb_overflow++;
 		pr_err_ratelimited("%s: status awb bus overflow\n", __func__);
-	if (error_status1 & (1 << 20))
+	}
+	if (error_status1 & (1 << 20)) {
+		vfe_dev->stats->imagemaster0_overflow++;
 		pr_err_ratelimited("%s: status rs bus overflow\n", __func__);
-	if (error_status1 & (1 << 21))
+	}
+	if (error_status1 & (1 << 21)) {
+		vfe_dev->stats->cs_overflow++;
 		pr_err_ratelimited("%s: status cs bus overflow\n", __func__);
-	if (error_status1 & (1 << 22))
+	}
+	if (error_status1 & (1 << 22)) {
+		vfe_dev->stats->ihist_overflow++;
 		pr_err_ratelimited("%s: status ihist bus overflow\n", __func__);
-	if (error_status1 & (1 << 23))
+	}
+	if (error_status1 & (1 << 23)) {
+		vfe_dev->stats->skinbhist_overflow++;
 		pr_err_ratelimited("%s: status skin bhist bus overflow\n",
 			__func__);
+	}
 }
 
 static void msm_vfe40_read_irq_status(struct vfe_device *vfe_dev,
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 1ca85bd..a563f68 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -342,12 +342,6 @@
 	case HAL_EXTRADATA_RECOVERY_POINT_SEI:
 		ret = HFI_PROPERTY_PARAM_VDEC_RECOVERY_POINT_SEI_EXTRADATA;
 		break;
-	case HAL_EXTRADATA_CLOSED_CAPTION_UD:
-		ret = HFI_PROPERTY_PARAM_VDEC_CLOSED_CAPTION_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_AFD_UD:
-		ret = HFI_PROPERTY_PARAM_VDEC_AFD_EXTRADATA;
-		break;
 	case HAL_EXTRADATA_MULTISLICE_INFO:
 		ret = HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO;
 		break;
@@ -372,6 +366,9 @@
 	case HAL_EXTRADATA_METADATA_MBI:
 		ret = HFI_PROPERTY_PARAM_VENC_MBI_DUMPING;
 		break;
+	case HAL_EXTRADATA_STREAM_USERDATA:
+		ret = HFI_PROPERTY_PARAM_VDEC_STREAM_USERDATA_EXTRADATA;
+		break;
 	default:
 		dprintk(VIDC_WARN, "Extradata index not found: %d\n", index);
 		break;
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index f4ad985..c6fb382 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -49,7 +49,7 @@
 		vidc_err = VIDC_ERR_NOT_SUPPORTED;
 		break;
 	case HFI_ERR_SYS_MAX_SESSIONS_REACHED:
-		vidc_err = VIDC_ERR_MAX_CLIENT;
+		vidc_err = VIDC_ERR_MAX_CLIENTS;
 		break;
 	case HFI_ERR_SYS_SESSION_IN_USE:
 		vidc_err = VIDC_ERR_CLIENT_PRESENT;
@@ -75,6 +75,8 @@
 		vidc_err = VIDC_ERR_FAIL;
 		break;
 	}
+	if (vidc_err != HFI_ERR_NONE)
+		dprintk(VIDC_ERR, "HFI Error: %d\n", vidc_err);
 	return vidc_err;
 }
 
@@ -108,7 +110,8 @@
 	struct hfi_frame_size frame_sz;
 	u8 *data_ptr;
 	int prop_id;
-	dprintk(VIDC_DBG, "RECEIVED:EVENT_NOTIFY");
+	dprintk(VIDC_DBG, "RECEIVED: EVENT_NOTIFY[%u]: %d, 0x%x\n",
+		pkt->session_id, pkt->event_data1, pkt->event_data2);
 	if (sizeof(struct hfi_msg_event_notify_packet)
 		> pkt->size) {
 		dprintk(VIDC_ERR, "hal_process_session_init_done:bad_pkt_size");
@@ -248,27 +251,33 @@
 		hfi_process_sys_error(callback, device_id);
 		break;
 	case HFI_EVENT_SESSION_ERROR:
-		dprintk(VIDC_INFO, "HFI_EVENT_SESSION_ERROR");
+		dprintk(VIDC_INFO,
+			"HFI_EVENT_SESSION_ERROR[%u]\n", pkt->session_id);
 		if (!validate_session_pkt(sessions, sess, session_lock))
 			hfi_process_session_error(callback, device_id, pkt);
 		break;
 	case HFI_EVENT_SESSION_SEQUENCE_CHANGED:
-		dprintk(VIDC_INFO, "HFI_EVENT_SESSION_SEQUENCE_CHANGED");
+		dprintk(VIDC_INFO, "HFI_EVENT_SESSION_SEQUENCE_CHANGED[%u]\n",
+			pkt->session_id);
 		if (!validate_session_pkt(sessions, sess, session_lock))
 			hfi_process_sess_evt_seq_changed(callback,
 				device_id, pkt);
 		break;
 	case HFI_EVENT_SESSION_PROPERTY_CHANGED:
-		dprintk(VIDC_INFO, "HFI_EVENT_SESSION_PROPERTY_CHANGED");
+		dprintk(VIDC_INFO, "HFI_EVENT_SESSION_PROPERTY_CHANGED[%u]\n",
+			pkt->session_id);
 		break;
 	case HFI_EVENT_RELEASE_BUFFER_REFERENCE:
-		dprintk(VIDC_INFO, "HFI_EVENT_RELEASE_BUFFER_REFERENCE\n");
+		dprintk(VIDC_INFO, "HFI_EVENT_RELEASE_BUFFER_REFERENCE[%u]\n",
+			pkt->session_id);
 		if (!validate_session_pkt(sessions, sess, session_lock))
 			hfi_process_evt_release_buffer_ref(callback,
 				device_id, pkt);
 		break;
 	default:
-		dprintk(VIDC_WARN, "hal_process_event_notify:unkown_event_id");
+		dprintk(VIDC_WARN,
+			"hal_process_event_notify: unknown_event_id[%u]\n",
+			pkt->session_id);
 		break;
 	}
 }
@@ -758,7 +767,8 @@
 	struct msm_vidc_cb_cmd_done cmd_done;
 	struct buffer_requirements buff_req;
 
-	dprintk(VIDC_DBG, "Received SESSION_PROPERTY_INFO");
+	dprintk(VIDC_DBG, "Received SESSION_PROPERTY_INFO[%u]\n",
+		pkt->session_id);
 
 	if (pkt->size < sizeof(struct hfi_msg_session_property_info_packet)) {
 		dprintk(VIDC_ERR, "hal_process_session_prop_info:bad_pkt_size");
@@ -800,7 +810,8 @@
 	struct msm_vidc_cb_cmd_done cmd_done;
 	struct vidc_hal_session_init_done session_init_done;
 	struct hal_session *sess_close = NULL;
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_INIT_DONE");
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_INIT_DONE[%u]\n",
+		pkt->session_id);
 	if (sizeof(struct hfi_msg_sys_session_init_done_packet)
 		> pkt->size) {
 		dprintk(VIDC_ERR, "hal_process_session_init_done:bad_pkt_size");
@@ -839,7 +850,8 @@
 		struct hfi_msg_session_load_resources_done_packet *pkt)
 {
 	struct msm_vidc_cb_cmd_done cmd_done;
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_LOAD_RESOURCES_DONE");
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_LOAD_RESOURCES_DONE[%u]\n",
+		pkt->session_id);
 
 	if (sizeof(struct hfi_msg_session_load_resources_done_packet) !=
 		pkt->size) {
@@ -865,7 +877,8 @@
 {
 	struct msm_vidc_cb_cmd_done cmd_done;
 
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_FLUSH_DONE");
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_FLUSH_DONE[%u]\n",
+		pkt->session_id);
 
 	if (sizeof(struct hfi_msg_session_flush_done_packet) != pkt->size) {
 		dprintk(VIDC_ERR, "hal_process_session_flush_done: "
@@ -889,7 +902,8 @@
 {
 	struct msm_vidc_cb_data_done data_done;
 
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_ETB_DONE");
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_ETB_DONE[%u]\n",
+		pkt->session_id);
 
 	if (!pkt || pkt->size <
 		sizeof(struct hfi_msg_session_empty_buffer_done_packet)) {
@@ -930,7 +944,8 @@
 
 	session = (struct hal_session *)
 		((struct hal_session *)	pack->session_id)->session_id;
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_FTB_DONE");
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_FTB_DONE[%u]\n",
+		pack->session_id);
 
 	memset(&data_done, 0, sizeof(struct msm_vidc_cb_data_done));
 
@@ -1028,7 +1043,8 @@
 {
 	struct msm_vidc_cb_cmd_done cmd_done;
 
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_START_DONE");
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_START_DONE[%u]\n",
+		pkt->session_id);
 
 	if (!pkt || pkt->size !=
 		sizeof(struct hfi_msg_session_start_done_packet)) {
@@ -1053,7 +1069,8 @@
 {
 	struct msm_vidc_cb_cmd_done cmd_done;
 
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_STOP_DONE");
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_STOP_DONE[%u]\n",
+		pkt->session_id);
 
 	if (!pkt || pkt->size !=
 		sizeof(struct hfi_msg_session_stop_done_packet)) {
@@ -1078,7 +1095,8 @@
 {
 	struct msm_vidc_cb_cmd_done cmd_done;
 
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_RELEASE_RESOURCES_DONE");
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_RELEASE_RESOURCES_DONE[%u]\n",
+		pkt->session_id);
 
 	if (!pkt || pkt->size !=
 		sizeof(struct hfi_msg_session_release_resources_done_packet)) {
@@ -1102,6 +1120,8 @@
 		struct hfi_msg_session_release_buffers_done_packet *pkt)
 {
 	struct msm_vidc_cb_cmd_done cmd_done;
+	dprintk(VIDC_DBG, "RECEIVED:SESSION_RELEASE_BUFFER_DONE[%u]\n",
+		pkt->session_id);
 	if (!pkt || pkt->size !=
 		sizeof(struct
 			   hfi_msg_session_release_buffers_done_packet)) {
@@ -1129,7 +1149,8 @@
 {
 	struct msm_vidc_cb_cmd_done cmd_done;
 
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_END_DONE");
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_END_DONE[%u]\n",
+		pkt->session_id);
 
 	if (!pkt || pkt->size !=
 		sizeof(struct hfi_msg_sys_session_end_done_packet)) {
@@ -1154,8 +1175,8 @@
 {
 	struct msm_vidc_cb_cmd_done cmd_done;
 
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_ABORT_DONE");
-
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_ABORT_DONE[%u]\n",
+		pkt->session_id);
 	if (!pkt || pkt->size !=
 		sizeof(struct hfi_msg_sys_session_abort_done_packet)) {
 		dprintk(VIDC_ERR, "%s: bad packet/packet size: %d",
@@ -1184,6 +1205,8 @@
 		dprintk(VIDC_ERR, "bad packet/packet size: %d", pkt->size);
 		return;
 	}
+	dprintk(VIDC_DBG, "RECEIVED:SESSION_GET_SEQ_HDR_DONE[%u]\n",
+		pkt->session_id);
 	memset(&data_done, 0, sizeof(struct msm_vidc_cb_data_done));
 	data_done.device_id = device_id;
 	data_done.size = sizeof(struct msm_vidc_cb_data_done);
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 8a0dfc2..7d27cea 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -211,7 +211,7 @@
 		.name = "Extradata Type",
 		.type = V4L2_CTRL_TYPE_MENU,
 		.minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
-		.maximum = V4L2_MPEG_VIDC_EXTRADATA_FRAME_BITS_INFO,
+		.maximum = V4L2_MPEG_VIDC_EXTRADATA_STREAM_USERDATA,
 		.default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
 		.menu_skip_mask = ~(
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
@@ -224,8 +224,6 @@
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_AFD_UD) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER) |
@@ -234,7 +232,8 @@
 			(1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_MPEG2_SEQDISP) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_QP) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_BITS_INFO)
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_BITS_INFO) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_STREAM_USERDATA)
 			),
 		.qmenu = mpeg_video_vidc_extradata,
 		.step = 0,
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 1ba5fcc..9504817 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -657,8 +657,6 @@
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_AFD_UD) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER) |
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index c8cd75e..cd06108 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -391,8 +391,9 @@
 		&inst->completions[SESSION_MSG_INDEX(cmd)],
 		msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
 	if (!rc) {
-		dprintk(VIDC_ERR, "Wait interrupted or timeout: %d\n",
-				SESSION_MSG_INDEX(cmd));
+		dprintk(VIDC_ERR,
+			"%s: Wait interrupted or timeout[%u]: %d\n",
+			__func__, (u32)inst->session, SESSION_MSG_INDEX(cmd));
 		msm_comm_recover_from_session_error(inst);
 		rc = -EIO;
 	} else {
@@ -440,6 +441,21 @@
 	mutex_unlock(&inst->lock);
 }
 
+static void msm_comm_generate_max_clients_error(struct msm_vidc_inst *inst)
+{
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s: invalid input parameters\n", __func__);
+		return;
+	}
+	mutex_lock(&inst->sync_lock);
+	inst->session = NULL;
+	inst->state = MSM_VIDC_CORE_INVALID;
+	msm_vidc_queue_v4l2_event(inst, V4L2_EVENT_MSM_VIDC_MAX_CLIENTS);
+	dprintk(VIDC_WARN,
+			"%s: Too many clients\n", __func__);
+	mutex_unlock(&inst->sync_lock);
+}
+
 static void handle_session_init_done(enum command_response cmd, void *data)
 {
 	struct msm_vidc_cb_cmd_done *response = data;
@@ -477,7 +493,10 @@
 			dprintk(VIDC_ERR,
 				"Session init response from FW : 0x%x",
 				response->status);
-			msm_comm_generate_session_error(inst);
+			if (response->status == VIDC_ERR_MAX_CLIENTS)
+				msm_comm_generate_max_clients_error(inst);
+			else
+				msm_comm_generate_session_error(inst);
 		}
 		signal_session_msg_receipt(cmd, inst);
 	} else {
@@ -1573,8 +1592,8 @@
 		&core->completions[SYS_MSG_INDEX(RELEASE_RESOURCE_DONE)],
 		msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
 	if (!rc) {
-		dprintk(VIDC_ERR, "Wait interrupted or timeout: %d\n",
-				SYS_MSG_INDEX(RELEASE_RESOURCE_DONE));
+		dprintk(VIDC_ERR, "%s: Wait interrupted or timeout: %d\n",
+			__func__, SYS_MSG_INDEX(RELEASE_RESOURCE_DONE));
 		rc = -EIO;
 	}
 release_ocmem_failed:
@@ -1596,8 +1615,8 @@
 		&core->completions[SYS_MSG_INDEX(SYS_INIT_DONE)],
 		msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
 	if (!rc) {
-		dprintk(VIDC_ERR, "Wait interrupted or timeout: %d\n",
-				SYS_MSG_INDEX(SYS_INIT_DONE));
+		dprintk(VIDC_ERR, "%s: Wait interrupted or timeout: %d\n",
+			__func__, SYS_MSG_INDEX(SYS_INIT_DONE));
 		rc = -EIO;
 		goto exit;
 	} else {
@@ -2543,13 +2562,6 @@
 			mutex_unlock(&inst->sync_lock);
 	} else {
 		int64_t time_usec = timeval_to_ns(&vb->v4l2_buf.timestamp);
-
-		rc = msm_vidc_check_session_supported(inst);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"%s: session not supported\n", __func__);
-			goto err_no_mem;
-		}
 		do_div(time_usec, NSEC_PER_USEC);
 		memset(&frame_data, 0 , sizeof(struct vidc_frame_data));
 		frame_data.alloc_len = vb->v4l2_planes[0].length;
@@ -2693,7 +2705,8 @@
 		msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
 	if (!rc) {
 		dprintk(VIDC_ERR,
-			"Wait interrupted or timeout: %d\n",
+			"%s: Wait interrupted or timeout[%u]: %d\n",
+			__func__, (u32)inst->session,
 			SESSION_MSG_INDEX(SESSION_PROPERTY_INFO));
 		inst->state = MSM_VIDC_CORE_INVALID;
 		msm_comm_recover_from_session_error(inst);
@@ -3240,12 +3253,6 @@
 	case V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI:
 		ret = HAL_EXTRADATA_RECOVERY_POINT_SEI;
 		break;
-	case V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD:
-		ret = HAL_EXTRADATA_CLOSED_CAPTION_UD;
-		break;
-	case V4L2_MPEG_VIDC_EXTRADATA_AFD_UD:
-		ret = HAL_EXTRADATA_AFD_UD;
-		break;
 	case V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO:
 		ret = HAL_EXTRADATA_MULTISLICE_INFO;
 		break;
@@ -3273,6 +3280,9 @@
 	case V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI:
 		ret = HAL_EXTRADATA_METADATA_MBI;
 		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_STREAM_USERDATA:
+		ret = HAL_EXTRADATA_STREAM_USERDATA;
+		break;
 	default:
 		dprintk(VIDC_WARN, "Extradata not found: %d\n", index);
 		break;
@@ -3447,19 +3457,25 @@
 				capability->height.min);
 			rc = -ENOTSUPP;
 		}
-		if (!rc) {
-			rc = call_hfi_op(hdev, capability_check,
-				inst->fmts[OUTPUT_PORT]->fourcc,
+		if (msm_vp8_low_tier &&
+			inst->fmts[OUTPUT_PORT]->fourcc == V4L2_PIX_FMT_VP8) {
+			capability->width.max = DEFAULT_WIDTH;
+			capability->width.max = DEFAULT_HEIGHT;
+		}
+		if (!rc && (inst->prop.width[CAPTURE_PORT] >
+			capability->width.max)) {
+			dprintk(VIDC_ERR,
+				"Unsupported width = %u supported max width = %u\n",
 				inst->prop.width[CAPTURE_PORT],
-				&capability->width.max,
-				&capability->height.max);
+				capability->width.max);
+				rc = -ENOTSUPP;
 		}
 
 		if (!rc && (inst->prop.height[CAPTURE_PORT]
 			* inst->prop.width[CAPTURE_PORT] >
 			capability->width.max * capability->height.max)) {
 			dprintk(VIDC_ERR,
-			"Unsupported WxH = (%u)x(%u), Max supported is - (%u)x(%u)",
+			"Unsupported WxH = (%u)x(%u), Max supported is - (%u)x(%u)\n",
 			inst->prop.width[CAPTURE_PORT],
 			inst->prop.height[CAPTURE_PORT],
 			capability->width.max, capability->height.max);
@@ -3470,7 +3486,11 @@
 		mutex_lock(&inst->sync_lock);
 		inst->state = MSM_VIDC_CORE_INVALID;
 		mutex_unlock(&inst->sync_lock);
-		msm_vidc_queue_v4l2_event(inst, V4L2_EVENT_MSM_VIDC_SYS_ERROR);
+		msm_vidc_queue_v4l2_event(inst,
+					V4L2_EVENT_MSM_VIDC_HW_OVERLOAD);
+		dprintk(VIDC_WARN,
+			"%s: Hardware is overloaded\n", __func__);
+		wake_up(&inst->kernel_event_queue);
 	}
 	return rc;
 }
@@ -3520,8 +3540,9 @@
 		&inst->completions[SESSION_MSG_INDEX(SESSION_ABORT_DONE)],
 		msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
 	if (!rc) {
-		dprintk(VIDC_ERR, "%s: Wait interrupted or timeout: %d\n",
-			__func__, SESSION_MSG_INDEX(SESSION_ABORT_DONE));
+		dprintk(VIDC_ERR, "%s: Wait interrupted or timeout[%u]: %d\n",
+			__func__, (u32)inst->session,
+			SESSION_MSG_INDEX(SESSION_ABORT_DONE));
 		msm_comm_generate_sys_error(inst);
 	} else
 		change_inst_state(inst, MSM_VIDC_CLOSE_DONE);
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 0092bcb..37b1e72 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -96,6 +96,12 @@
 
 static int venus_hfi_power_enable(void *dev);
 
+static inline int venus_hfi_prepare_enable_clks(
+	struct venus_hfi_device *device);
+
+static inline void venus_hfi_disable_unprepare_clks(
+	struct venus_hfi_device *device);
+
 static unsigned long venus_hfi_get_clock_rate(struct venus_core_clock *clock,
 		int num_mbs_per_sec);
 
@@ -488,7 +494,7 @@
 	}
 
 	base_addr = device->hal_data->register_base_addr;
-	if (!device->clocks_enabled) {
+	if (device->clk_state != ENABLED_PREPARED) {
 		dprintk(VIDC_WARN,
 			"HFI Write register failed : Clocks are OFF\n");
 		return;
@@ -528,7 +534,7 @@
 	}
 
 	base_addr = device->hal_data->register_base_addr;
-	if (!device->clocks_enabled) {
+	if (device->clk_state != ENABLED_PREPARED) {
 		dprintk(VIDC_WARN,
 			"HFI Read register failed : Clocks are OFF\n");
 		return -EINVAL;
@@ -1062,7 +1068,7 @@
 	}
 	WARN(!mutex_is_locked(&device->clk_pwr_lock),
 				"Clock/power lock must be acquired");
-	if (device->clocks_enabled) {
+	if (device->clk_state == ENABLED_PREPARED) {
 		dprintk(VIDC_DBG, "Clocks already enabled");
 		return 0;
 	}
@@ -1077,7 +1083,7 @@
 			dprintk(VIDC_DBG, "Clock: %s enabled\n", cl->name);
 		}
 	}
-	device->clocks_enabled = 1;
+	device->clk_state = ENABLED_PREPARED;
 	++device->clk_cnt;
 	return 0;
 fail_clk_enable:
@@ -1086,6 +1092,7 @@
 		usleep(100);
 		clk_disable(cl->clk);
 	}
+	device->clk_state = DISABLED_PREPARED;
 	return rc;
 }
 
@@ -1100,7 +1107,7 @@
 	}
 	WARN(!mutex_is_locked(&device->clk_pwr_lock),
 			"Clock/power lock must be acquired");
-	if (!device->clocks_enabled) {
+	if (device->clk_state != ENABLED_PREPARED) {
 		dprintk(VIDC_DBG, "Clocks already disabled");
 		return;
 	}
@@ -1118,7 +1125,7 @@
 		usleep(100);
 		clk_disable(cl->clk);
 	}
-	device->clocks_enabled = 0;
+	device->clk_state = DISABLED_PREPARED;
 	--device->clk_cnt;
 }
 
@@ -1133,9 +1140,11 @@
 		dprintk(VIDC_ERR, "Invalid input: %p\n", device);
 		return -EINVAL;
 	}
+	mutex_lock(&device->clk_pwr_lock);
 	if (venus_hfi_clk_gating_off(device)) {
 		dprintk(VIDC_ERR, "Failed to turn off clk gating\n");
-		return -EIO;
+		rc = -EIO;
+		goto err_clk_gating_off;
 	}
 	/* Halt AXI and AXI OCMEM VBIF Access */
 	reg = venus_hfi_read_register(device, VENUS_VBIF_AXI_HALT_CTRL0);
@@ -1150,6 +1159,8 @@
 			VENUS_VBIF_AXI_HALT_ACK_TIMEOUT_US);
 	if (rc)
 		dprintk(VIDC_WARN, "AXI bus port halt timeout\n");
+err_clk_gating_off:
+	mutex_unlock(&device->clk_pwr_lock);
 	return rc;
 }
 
@@ -1177,7 +1188,7 @@
 		venus_hfi_clk_disable(device);
 		return rc;
 	}
-	venus_hfi_clk_disable(device);
+	venus_hfi_disable_unprepare_clks(device);
 	venus_hfi_iommu_detach(device);
 	rc = regulator_disable(device->gdsc);
 	if (rc) {
@@ -1189,7 +1200,7 @@
 	else
 		venus_hfi_unvote_buses(device, DDR_MEM);
 
-	device->power_enabled = 0;
+	device->power_enabled = false;
 	--device->pwr_cnt;
 	dprintk(VIDC_INFO, "entering power collapse\n");
 already_disabled:
@@ -1225,7 +1236,11 @@
 		goto err_iommu_attach;
 	}
 
-	rc = venus_hfi_clk_enable(device);
+	if (device->clk_state == DISABLED_UNPREPARED)
+		rc = venus_hfi_prepare_enable_clks(device);
+	else if (device->clk_state == DISABLED_PREPARED)
+		rc = venus_hfi_clk_enable(device);
+
 	if (rc) {
 		dprintk(VIDC_ERR, "Failed to enable clocks");
 		goto err_enable_clk;
@@ -1277,7 +1292,7 @@
 		dprintk(VIDC_ERR, "Failed to allocate OCMEM");
 		goto err_alloc_ocmem;
 	}
-	device->power_enabled = 1;
+	device->power_enabled = true;
 	++device->pwr_cnt;
 	dprintk(VIDC_INFO, "resuming from power collapse\n");
 	return rc;
@@ -1307,11 +1322,21 @@
 		dprintk(VIDC_ERR, "Invalid params: %p\n", device);
 		return -EINVAL;
 	}
-	mutex_lock(&device->clk_pwr_lock);
-	if (!device->power_enabled)
-		rc = venus_hfi_power_on(device);
-	mutex_unlock(&device->clk_pwr_lock);
 
+	mutex_lock(&device->clk_pwr_lock);
+	if (!device->power_enabled) {
+		rc = venus_hfi_power_on(device);
+		if (rc) {
+			dprintk(VIDC_ERR, "Failed venus power on");
+			goto fail_power_on;
+		}
+	}
+	rc = venus_hfi_clk_gating_off(device);
+	if (rc)
+		dprintk(VIDC_ERR, "%s : Clock enable failed\n", __func__);
+
+fail_power_on:
+	mutex_unlock(&device->clk_pwr_lock);
 	return rc;
 }
 
@@ -1325,7 +1350,7 @@
 		dprintk(VIDC_ERR, "Invalid params: %p\n", device);
 		return -EINVAL;
 	}
-	if (device->clocks_enabled) {
+	if (device->clk_state == ENABLED_PREPARED) {
 		dprintk(VIDC_DBG, "Clocks are already enabled");
 		goto already_enabled;
 	}
@@ -1347,7 +1372,7 @@
 			        VIDC_WRAPPER_INTR_MASK, VIDC_WRAPPER_INTR_MASK_A2HVCODEC_BMSK, 0);
 	}
 already_enabled:
-	device->clocks_enabled = 1;
+	device->clk_state = ENABLED_PREPARED;
 fail_clk_power_on:
 	return rc;
 }
@@ -1963,7 +1988,7 @@
 		dprintk(VIDC_ERR, "Invalid params: %p\n", device);
 		return;
 	}
-	if (!device->clocks_enabled) {
+	if (device->clk_state != ENABLED_PREPARED) {
 		dprintk(VIDC_DBG, "Clocks are already disabled");
 		goto already_disabled;
 	}
@@ -1980,7 +2005,7 @@
 			msecs_to_jiffies(msm_vidc_pwr_collapse_delay)))
 		dprintk(VIDC_DBG, "PM work already scheduled\n");
 already_disabled:
-	device->clocks_enabled = 0;
+	device->clk_state = DISABLED_PREPARED;
 }
 
 static void venus_hfi_core_clear_interrupt(struct venus_hfi_device *device)
@@ -2781,10 +2806,10 @@
 	struct venus_hfi_device *device = list_first_entry(
 			&hal_ctxt.dev_head, struct venus_hfi_device, list);
 	mutex_lock(&device->clk_pwr_lock);
-	if (device->clocks_enabled || !device->power_enabled) {
+	if (device->clk_state == ENABLED_PREPARED || !device->power_enabled) {
 		dprintk(VIDC_DBG,
 				"Clocks status: %d, Power status: %d, ignore power off\n",
-				device->clocks_enabled, device->power_enabled);
+				device->clk_state, device->power_enabled);
 		goto clks_enabled;
 	}
 	mutex_unlock(&device->clk_pwr_lock);
@@ -2805,7 +2830,7 @@
 	}
 
 	mutex_lock(&device->clk_pwr_lock);
-	if (device->clocks_enabled) {
+	if (device->clk_state == ENABLED_PREPARED) {
 		dprintk(VIDC_ERR,
 				"Clocks are still enabled after PC_PREP_DONE, ignore power off");
 		goto clks_enabled;
@@ -3091,7 +3116,9 @@
 		clk_put(device->resources.clock[i].clk);
 	}
 }
-static inline void venus_hfi_disable_clks(struct venus_hfi_device *device)
+
+static inline void venus_hfi_disable_unprepare_clks(
+	struct venus_hfi_device *device)
 {
 	int i;
 	struct venus_core_clock *cl;
@@ -3099,8 +3126,9 @@
 		dprintk(VIDC_ERR, "Invalid params: %p\n", device);
 		return;
 	}
-	mutex_lock(&device->clk_pwr_lock);
-	if (device->clocks_enabled) {
+
+	WARN_ON(!mutex_is_locked(&device->clk_pwr_lock));
+	if (device->clk_state == ENABLED_PREPARED) {
 		for (i = VCODEC_CLK; i < VCODEC_MAX_CLKS; i++) {
 			if (i == VCODEC_OCMEM_CLK && !device->res->has_ocmem)
 				continue;
@@ -3122,11 +3150,11 @@
 		cl = &device->resources.clock[i];
 		clk_unprepare(cl->clk);
 	}
-	device->clocks_enabled = 0;
+	device->clk_state = DISABLED_UNPREPARED;
 	--device->clk_cnt;
-	mutex_unlock(&device->clk_pwr_lock);
 }
-static inline int venus_hfi_enable_clks(struct venus_hfi_device *device)
+
+static inline int venus_hfi_prepare_enable_clks(struct venus_hfi_device *device)
 {
 	int i = 0;
 	struct venus_core_clock *cl;
@@ -3135,7 +3163,12 @@
 		dprintk(VIDC_ERR, "Invalid params: %p\n", device);
 		return -EINVAL;
 	}
-	mutex_lock(&device->clk_pwr_lock);
+	WARN_ON(!mutex_is_locked(&device->clk_pwr_lock));
+
+	if (device->clk_state == ENABLED_PREPARED) {
+		dprintk(VIDC_DBG, "Clocks already prepared and enabled\n");
+		return 0;
+	}
 	for (i = VCODEC_CLK; i < VCODEC_MAX_CLKS; i++) {
 		if (i == VCODEC_OCMEM_CLK && !device->res->has_ocmem)
 			continue;
@@ -3148,9 +3181,8 @@
 			dprintk(VIDC_DBG, "Clock: %s enabled\n", cl->name);
 		}
 	}
-	device->clocks_enabled = 1;
+	device->clk_state = ENABLED_PREPARED;
 	++device->clk_cnt;
-	mutex_unlock(&device->clk_pwr_lock);
 	return rc;
 fail_clk_enable:
 	for (; i >= 0; i--) {
@@ -3158,9 +3190,10 @@
 		usleep(100);
 		clk_disable_unprepare(cl->clk);
 	}
-	mutex_unlock(&device->clk_pwr_lock);
+	device->clk_state = DISABLED_UNPREPARED;
 	return rc;
 }
+
 static int venus_hfi_register_iommu_domains(struct venus_hfi_device *device,
 					struct msm_vidc_platform_resources *res)
 {
@@ -3519,17 +3552,16 @@
 		mutex_unlock(&device->clk_pwr_lock);
 		goto fail_load_fw;
 	}
-	device->power_enabled = 1;
+	device->power_enabled = true;
 	++device->pwr_cnt;
-	mutex_unlock(&device->clk_pwr_lock);
 	/*Clocks can be enabled only after pil_get since
 	 * gdsc is turned-on in pil_get*/
-	rc = venus_hfi_enable_clks(device);
+	rc = venus_hfi_prepare_enable_clks(device);
+	mutex_unlock(&device->clk_pwr_lock);
 	if (rc) {
 		dprintk(VIDC_ERR, "Failed to enable clocks: %d\n", rc);
 		goto fail_enable_clks;
 	}
-
 	rc = protect_cp_mem(device);
 	if (rc) {
 		dprintk(VIDC_ERR, "Failed to protect memory\n");
@@ -3538,14 +3570,16 @@
 
 	return rc;
 fail_protect_mem:
-	venus_hfi_disable_clks(device);
+	mutex_lock(&device->clk_pwr_lock);
+	venus_hfi_disable_unprepare_clks(device);
+	mutex_unlock(&device->clk_pwr_lock);
 fail_enable_clks:
 	subsystem_put(device->resources.fw.cookie);
 fail_load_fw:
 	mutex_lock(&device->clk_pwr_lock);
 	device->resources.fw.cookie = NULL;
 	regulator_disable(device->gdsc);
-	device->power_enabled = 0;
+	device->power_enabled = false;
 	--device->pwr_cnt;
 	mutex_unlock(&device->clk_pwr_lock);
 fail_enable_gdsc:
@@ -3574,10 +3608,10 @@
 		 */
 		if(venus_hfi_halt_axi(device))
 			dprintk(VIDC_WARN, "Failed to halt AXI\n");
-		venus_hfi_disable_clks(device);
 		mutex_lock(&device->clk_pwr_lock);
+		venus_hfi_disable_unprepare_clks(device);
 		regulator_disable(device->gdsc);
-		device->power_enabled = 0;
+		device->power_enabled = false;
 		--device->pwr_cnt;
 		mutex_unlock(&device->clk_pwr_lock);
 		device->resources.fw.cookie = NULL;
@@ -3678,7 +3712,10 @@
 		rc = device->clk_cnt;
 		break;
 	case DEV_CLOCK_ENABLED:
-		rc = device->clocks_enabled;
+		if (device->clk_state == ENABLED_PREPARED)
+			rc = 1;
+		else
+			rc = 0;
 		break;
 	case DEV_PWR_COUNT:
 		rc = device->pwr_cnt;
@@ -3744,28 +3781,6 @@
 	return rc;
 }
 
-int venus_hfi_capability_check(u32 fourcc, u32 width,
-		u32 *max_width, u32 *max_height)
-{
-	int rc = 0;
-	if (!max_width || !max_height) {
-		dprintk(VIDC_ERR, "%s - invalid parameter\n", __func__);
-		return -EINVAL;
-	}
-
-	if (msm_vp8_low_tier && fourcc == V4L2_PIX_FMT_VP8) {
-		*max_width = DEFAULT_WIDTH;
-		*max_height = DEFAULT_HEIGHT;
-	}
-	if (width > *max_width) {
-		dprintk(VIDC_ERR,
-		"Unsupported width = %u supported max width = %u",
-		width, *max_width);
-		rc = -ENOTSUPP;
-	}
-	return rc;
-}
-
 static void *venus_hfi_add_device(u32 device_id,
 			struct msm_vidc_platform_resources *res,
 			hfi_cmd_response_callback callback)
@@ -3793,9 +3808,9 @@
 
 	hdevice->device_id = device_id;
 	hdevice->callback = callback;
-	hdevice->clocks_enabled = 0;
+	hdevice->clk_state = DISABLED_UNPREPARED;
 	hdevice->clk_cnt = 0;
-	hdevice->power_enabled = 0;
+	hdevice->power_enabled = false;
 	hdevice->pwr_cnt = 0;
 
 	hdevice->vidc_workq = create_singlethread_workqueue(
@@ -3927,7 +3942,6 @@
 	hdev->get_fw_info = venus_hfi_get_fw_info;
 	hdev->get_info = venus_hfi_get_info;
 	hdev->get_stride_scanline = venus_hfi_get_stride_scanline;
-	hdev->capability_check = venus_hfi_capability_check;
 	hdev->get_core_capabilities = venus_hfi_get_core_capabilities;
 	hdev->power_enable = venus_hfi_power_enable;
 }
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.h b/drivers/media/platform/msm/vidc/venus_hfi.h
index 1c1ee59..23a51ba 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.h
+++ b/drivers/media/platform/msm/vidc/venus_hfi.h
@@ -124,6 +124,12 @@
 	BUS_IDX_MAX
 };
 
+enum clock_state {
+	DISABLED_UNPREPARED,
+	ENABLED_PREPARED,
+	DISABLED_PREPARED
+};
+
 struct vidc_mem_addr {
 	u8 *align_device_addr;
 	u8 *align_virtual_addr;
@@ -190,8 +196,8 @@
 	u32 clk_load;
 	u32 bus_load[MSM_VIDC_MAX_DEVICES];
 	unsigned long ocmem_size;
-	u32 clocks_enabled;
-	u32 power_enabled;
+	enum clock_state clk_state;
+	bool power_enabled;
 	enum vidc_clocks clk_gating_level;
 	struct mutex read_lock;
 	struct mutex write_lock;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 75f583f..5566338 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -79,9 +79,8 @@
 #define HFI_EXTRADATA_FRAME_RATE			0x00000007
 #define HFI_EXTRADATA_PANSCAN_WINDOW		0x00000008
 #define HFI_EXTRADATA_RECOVERY_POINT_SEI	0x00000009
-#define HFI_EXTRADATA_CLOSED_CAPTION_UD		0x0000000A
-#define HFI_EXTRADATA_AFD_UD			0x0000000B
 #define HFI_EXTRADATA_MPEG2_SEQDISP		0x0000000D
+#define HFI_EXTRADATA_STREAM_USERDATA		0x0000000E
 #define HFI_EXTRADATA_FRAME_QP			0x0000000F
 #define HFI_EXTRADATA_FRAME_BITS_INFO		0x00000010
 #define HFI_EXTRADATA_MULTISLICE_INFO		0x7F100000
@@ -190,10 +189,6 @@
 
 #define HFI_PROPERTY_PARAM_VDEC_FRAME_ASSEMBLY		\
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00E)
-#define HFI_PROPERTY_PARAM_VDEC_CLOSED_CAPTION_EXTRADATA	\
-	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00F)
-#define HFI_PROPERTY_PARAM_VDEC_AFD_EXTRADATA		\
-	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x010)
 #define HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA		\
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x011)
 #define HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA		\
@@ -206,6 +201,8 @@
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x015)
 #define HFI_PROPERTY_PARAM_VDEC_MPEG2_SEQDISP_EXTRADATA \
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x016)
+#define HFI_PROPERTY_PARAM_VDEC_STREAM_USERDATA_EXTRADATA \
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x017)
 #define HFI_PROPERTY_PARAM_VDEC_FRAME_QP_EXTRADATA \
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x018)
 #define HFI_PROPERTY_PARAM_VDEC_FRAME_BITS_INFO_EXTRADATA \
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 309164a..38c5bdb 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -63,7 +63,7 @@
 	VIDC_ERR_BAD_HANDLE,
 	VIDC_ERR_NOT_SUPPORTED,
 	VIDC_ERR_BAD_STATE,
-	VIDC_ERR_MAX_CLIENT,
+	VIDC_ERR_MAX_CLIENTS,
 	VIDC_ERR_IFRAME_EXPECTED,
 	VIDC_ERR_HW_FATAL,
 	VIDC_ERR_BITSTREAM_ERR,
@@ -91,8 +91,6 @@
 	HAL_EXTRADATA_FRAME_RATE,
 	HAL_EXTRADATA_PANSCAN_WINDOW,
 	HAL_EXTRADATA_RECOVERY_POINT_SEI,
-	HAL_EXTRADATA_CLOSED_CAPTION_UD,
-	HAL_EXTRADATA_AFD_UD,
 	HAL_EXTRADATA_MULTISLICE_INFO,
 	HAL_EXTRADATA_INDEX,
 	HAL_EXTRADATA_NUM_CONCEALED_MB,
@@ -103,6 +101,7 @@
 	HAL_EXTRADATA_FRAME_BITS_INFO,
 	HAL_EXTRADATA_LTR_INFO,
 	HAL_EXTRADATA_METADATA_MBI,
+	HAL_EXTRADATA_STREAM_USERDATA,
 };
 
 enum hal_property {
@@ -1169,8 +1168,6 @@
 	int (*get_info) (void *dev, enum dev_info info);
 	int (*get_stride_scanline)(int color_fmt, int width,
 		int height,	int *stride, int *scanlines);
-	int (*capability_check)(u32 fourcc, u32 width,
-		u32 *max_width, u32 *max_height);
 	int (*session_clean)(void *sess);
 	int (*get_core_capabilities)(void);
 	int (*power_enable)(void *dev);
diff --git a/drivers/media/radio/radio-iris.c b/drivers/media/radio/radio-iris.c
index 02441ec..07d3fde 100644
--- a/drivers/media/radio/radio-iris.c
+++ b/drivers/media/radio/radio-iris.c
@@ -3721,7 +3721,6 @@
 		}
 		saved_val = radio->mute_mode.hard_mute;
 		radio->mute_mode.hard_mute = ctrl->value;
-		radio->mute_mode.soft_mute = IOC_SFT_MUTE;
 		retval = hci_set_fm_mute_mode(
 				&radio->mute_mode,
 				radio->fm_hdev);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index b141963..23edc8a 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -641,7 +641,8 @@
 	mutex_lock(&qsee_bw_mutex);
 	qseecom.bw_scale_down_timer.expires = jiffies +
 		msecs_to_jiffies(duration);
-	add_timer(&(qseecom.bw_scale_down_timer));
+	mod_timer(&(qseecom.bw_scale_down_timer),
+		qseecom.bw_scale_down_timer.expires);
 	qseecom.timer_running = true;
 	mutex_unlock(&qsee_bw_mutex);
 }
@@ -2291,6 +2292,10 @@
 		if (!qseecom.support_bus_scaling) {
 			qsee_disable_clock_vote(handle->dev, CLK_DFAB);
 			qsee_disable_clock_vote(handle->dev, CLK_SFPB);
+		} else {
+			mutex_lock(&qsee_bw_mutex);
+			qseecom_unregister_bus_bandwidth_needs(handle->dev);
+			mutex_unlock(&qsee_bw_mutex);
 		}
 	}
 	return ret;
@@ -3403,6 +3408,13 @@
 		/* Only one client allowed here at a time */
 		mutex_lock(&app_access_lock);
 		if (qseecom.support_bus_scaling) {
+			/* register bus bw in case the client doesn't do it */
+			if (!data->mode) {
+				mutex_lock(&qsee_bw_mutex);
+				__qseecom_register_bus_bandwidth_needs(
+								data, HIGH);
+				mutex_unlock(&qsee_bw_mutex);
+			}
 			ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
 			if (ret) {
 				pr_err("Failed to set bw.\n");
@@ -3435,6 +3447,12 @@
 		/* Only one client allowed here at a time */
 		mutex_lock(&app_access_lock);
 		if (qseecom.support_bus_scaling) {
+			if (!data->mode) {
+				mutex_lock(&qsee_bw_mutex);
+				__qseecom_register_bus_bandwidth_needs(
+								data, HIGH);
+				mutex_unlock(&qsee_bw_mutex);
+			}
 			ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
 			if (ret) {
 				pr_err("Failed to set bw.\n");
@@ -3607,6 +3625,10 @@
 		if (!qseecom.support_bus_scaling) {
 			qsee_disable_clock_vote(data, CLK_DFAB);
 			qsee_disable_clock_vote(data, CLK_SFPB);
+		} else {
+			mutex_lock(&qsee_bw_mutex);
+			qseecom_unregister_bus_bandwidth_needs(data);
+			mutex_unlock(&qsee_bw_mutex);
 		}
 		atomic_dec(&data->ioctl_count);
 		break;
@@ -4338,13 +4360,19 @@
 	struct qseecom_clk *qclk;
 	qclk = &qseecom.qsee;
 
-	if (qseecom.cumulative_mode != INACTIVE) {
+	mutex_lock(&qsee_bw_mutex);
+	mutex_lock(&clk_access_lock);
+
+	if (qseecom.cumulative_mode != INACTIVE &&
+		qseecom.current_mode != INACTIVE) {
 		ret = msm_bus_scale_client_update_request(
 			qseecom.qsee_perf_client, INACTIVE);
 		if (ret)
 			pr_err("Fail to scale down bus\n");
+		else
+			qseecom.current_mode = INACTIVE;
 	}
-	mutex_lock(&clk_access_lock);
+
 	if (qclk->clk_access_cnt) {
 		if (qclk->ce_clk != NULL)
 			clk_disable_unprepare(qclk->ce_clk);
@@ -4352,12 +4380,14 @@
 			clk_disable_unprepare(qclk->ce_core_clk);
 		if (qclk->ce_bus_clk != NULL)
 			clk_disable_unprepare(qclk->ce_bus_clk);
-		if (qseecom.timer_running) {
-			del_timer_sync(&(qseecom.bw_scale_down_timer));
-			qseecom.timer_running = false;
-		}
 	}
+
+	del_timer_sync(&(qseecom.bw_scale_down_timer));
+	qseecom.timer_running = false;
+
 	mutex_unlock(&clk_access_lock);
+	mutex_unlock(&qsee_bw_mutex);
+
 	return 0;
 }
 
@@ -4368,6 +4398,8 @@
 	struct qseecom_clk *qclk;
 	qclk = &qseecom.qsee;
 
+	mutex_lock(&qsee_bw_mutex);
+	mutex_lock(&clk_access_lock);
 	if (qseecom.cumulative_mode >= HIGH)
 		mode = HIGH;
 	else
@@ -4378,9 +4410,10 @@
 			qseecom.qsee_perf_client, mode);
 		if (ret)
 			pr_err("Fail to scale up bus to %d\n", mode);
+		else
+			qseecom.current_mode = mode;
 	}
 
-	mutex_lock(&clk_access_lock);
 	if (qclk->clk_access_cnt) {
 
 		ret = clk_prepare_enable(qclk->ce_core_clk);
@@ -4403,13 +4436,20 @@
 			qclk->clk_access_cnt = 0;
 			goto ce_bus_clk_err;
 		}
+	}
+
+	if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
 		qseecom.bw_scale_down_timer.expires = jiffies +
 			msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
-		add_timer(&(qseecom.bw_scale_down_timer));
+		mod_timer(&(qseecom.bw_scale_down_timer),
+				qseecom.bw_scale_down_timer.expires);
 		qseecom.timer_running = true;
-
 	}
+
 	mutex_unlock(&clk_access_lock);
+	mutex_unlock(&qsee_bw_mutex);
+
+
 	return 0;
 
 ce_bus_clk_err:
@@ -4418,6 +4458,7 @@
 	clk_disable_unprepare(qclk->ce_core_clk);
 err:
 	mutex_unlock(&clk_access_lock);
+	mutex_unlock(&qsee_bw_mutex);
 	return -EIO;
 }
 static struct of_device_id qseecom_match[] = {
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 9d07631..0c70746 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2524,8 +2524,20 @@
 			break;
 		case MMC_BLK_CMD_ERR:
 			ret = mmc_blk_cmd_err(md, card, brq, req, ret);
-			if (!mmc_blk_reset(md, card->host, type))
+			if (!mmc_blk_reset(md, card->host, type)) {
+				if (!ret) {
+					/*
+					 * We have successfully completed block
+					 * request and notified to upper layers.
+					 * As the reset is successful, assume
+					 * h/w is in clean state and proceed
+					 * with new request.
+					 */
+					BUG_ON(card->host->areq);
+					goto start_new_req;
+				}
 				break;
+			}
 			goto cmd_abort;
 		case MMC_BLK_RETRY:
 			if (retry++ < 5)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 062b71b..c1f366e 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -66,6 +66,10 @@
 	MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX,
 			      0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5),
 
+	/* Disable HPI feature for Kingstone card */
+	MMC_FIXUP_EXT_CSD_REV("MMC16G", CID_MANFID_KINGSTON, CID_OEMID_ANY,
+			add_quirk, MMC_QUIRK_BROKEN_HPI, 5),
+
 	/*
 	 * Some Hynix cards exhibit data corruption over reboots if cache is
 	 * enabled. Disable cache for all versions until a class of cards that
@@ -74,6 +78,9 @@
 	MMC_FIXUP("H8G2d", CID_MANFID_HYNIX, CID_OEMID_ANY, add_quirk_mmc,
 		  MMC_QUIRK_CACHE_DISABLE),
 
+	MMC_FIXUP("MMC16G", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_CACHE_DISABLE),
+
 	END_FIXUP
 };
 
@@ -1257,8 +1264,7 @@
 		mmc_set_clock(host, (unsigned int) (*freq));
 	}
 
-	if ((mmc_card_hs400(card) || mmc_card_hs200(card))
-		&& card->host->ops->execute_tuning) {
+	if (mmc_card_hs200(card) && card->host->ops->execute_tuning) {
 		/*
 		 * We try to probe host driver for tuning for any
 		 * frequency, it is host driver responsibility to
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 9b41807..074b6a8 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2854,7 +2854,7 @@
 	 * max. 1ms for reset completion.
 	 */
 	ret = readl_poll_timeout(msm_host->core_mem + CORE_POWER,
-			pwr, !(pwr & CORE_SW_RST), 100, 10);
+			pwr, !(pwr & CORE_SW_RST), 10, 1000);
 
 	if (ret) {
 		dev_err(&pdev->dev, "reset failed (%d)\n", ret);
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index b4a2493..e03d5be 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -242,6 +242,13 @@
 	{"LVS1", 0x060},
 };
 
+static int wcnss_notif_cb(struct notifier_block *this, unsigned long code,
+				void *ss_handle);
+
+static struct notifier_block wnb = {
+	.notifier_call = wcnss_notif_cb,
+};
+
 #define NVBIN_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin"
 
 /*
@@ -2359,6 +2366,9 @@
 
 	if (pil_retry >= WCNSS_MAX_PIL_RETRY) {
 		wcnss_reset_intr();
+		if (penv->wcnss_notif_hdle)
+			subsys_notif_unregister_notifier(penv->wcnss_notif_hdle,
+				&wnb);
 		penv->pil = NULL;
 		goto fail_pil;
 	}
@@ -2552,11 +2562,6 @@
 	return NOTIFY_DONE;
 }
 
-static struct notifier_block wnb = {
-	.notifier_call = wcnss_notif_cb,
-};
-
-
 static const struct file_operations wcnss_node_fops = {
 	.owner = THIS_MODULE,
 	.open = wcnss_node_open,
diff --git a/drivers/rtc/alarm-dev.c b/drivers/rtc/alarm-dev.c
index 1e13ce1..b621547 100644
--- a/drivers/rtc/alarm-dev.c
+++ b/drivers/rtc/alarm-dev.c
@@ -49,6 +49,7 @@
 #define ANDROID_ALARM_SET_AND_WAIT_OLD      _IOW('a', 3, time_t)
 
 static int alarm_opened;
+static DEFINE_MUTEX(alarm_mutex);
 static DEFINE_SPINLOCK(alarm_slock);
 static struct wake_lock alarm_wake_lock;
 static DECLARE_WAIT_QUEUE_HEAD(alarm_wait_queue);
@@ -89,6 +90,7 @@
 
 	switch (ANDROID_ALARM_BASE_CMD(cmd)) {
 	case ANDROID_ALARM_CLEAR(0):
+		mutex_lock(&alarm_mutex);
 		spin_lock_irqsave(&alarm_slock, flags);
 		pr_alarm(IO, "alarm %d clear\n", alarm_type);
 		alarm_try_to_cancel(&alarms[alarm_type]);
@@ -98,11 +100,12 @@
 				wake_unlock(&alarm_wake_lock);
 		}
 		alarm_enabled &= ~alarm_type_mask;
+		spin_unlock_irqrestore(&alarm_slock, flags);
 		if (alarm_type == ANDROID_ALARM_RTC_POWEROFF_WAKEUP)
 			if (!copy_from_user(&new_alarm_time,
 				(void __user *)arg, sizeof(new_alarm_time)))
 				set_power_on_alarm(new_alarm_time.tv_sec, 0);
-		spin_unlock_irqrestore(&alarm_slock, flags);
+		mutex_unlock(&alarm_mutex);
 		break;
 
 	case ANDROID_ALARM_SET_OLD:
@@ -122,6 +125,7 @@
 			goto err1;
 		}
 from_old_alarm_set:
+		mutex_lock(&alarm_mutex);
 		spin_lock_irqsave(&alarm_slock, flags);
 		pr_alarm(IO, "alarm %d set %ld.%09ld\n", alarm_type,
 			new_alarm_time.tv_sec, new_alarm_time.tv_nsec);
@@ -129,11 +133,12 @@
 		alarm_start_range(&alarms[alarm_type],
 			timespec_to_ktime(new_alarm_time),
 			timespec_to_ktime(new_alarm_time));
+		spin_unlock_irqrestore(&alarm_slock, flags);
 		if ((alarm_type == ANDROID_ALARM_RTC_POWEROFF_WAKEUP) &&
 				(ANDROID_ALARM_BASE_CMD(cmd) ==
 				 ANDROID_ALARM_SET(0)))
 			set_power_on_alarm(new_alarm_time.tv_sec, 1);
-		spin_unlock_irqrestore(&alarm_slock, flags);
+		mutex_unlock(&alarm_mutex);
 		if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_SET_AND_WAIT(0)
 		    && cmd != ANDROID_ALARM_SET_AND_WAIT_OLD)
 			break;
diff --git a/drivers/rtc/alarm.c b/drivers/rtc/alarm.c
index 2cf1158..8531de9 100644
--- a/drivers/rtc/alarm.c
+++ b/drivers/rtc/alarm.c
@@ -65,15 +65,18 @@
 static struct rtc_device *alarm_rtc_dev;
 static DEFINE_SPINLOCK(alarm_slock);
 static DEFINE_MUTEX(alarm_setrtc_mutex);
+static DEFINE_MUTEX(power_on_alarm_mutex);
 static struct wake_lock alarm_rtc_wake_lock;
 static struct platform_device *alarm_platform_dev;
 struct alarm_queue alarms[ANDROID_ALARM_TYPE_COUNT];
 static bool suspended;
 static long power_on_alarm;
 
-static void alarm_shutdown(struct platform_device *dev);
+static int set_alarm_time_to_rtc(const long);
+
 void set_power_on_alarm(long secs, bool enable)
 {
+	mutex_lock(&power_on_alarm_mutex);
 	if (enable) {
 		power_on_alarm = secs;
 	} else {
@@ -85,7 +88,9 @@
 		else
 			power_on_alarm = 0;
 	}
-	alarm_shutdown(NULL);
+
+	set_alarm_time_to_rtc(power_on_alarm);
+	mutex_unlock(&power_on_alarm_mutex);
 }
 
 
@@ -521,28 +526,23 @@
 	return 0;
 }
 
-static void alarm_shutdown(struct platform_device *dev)
+static int set_alarm_time_to_rtc(const long power_on_time)
 {
 	struct timespec wall_time;
 	struct rtc_time rtc_time;
 	struct rtc_wkalrm alarm;
-	unsigned long flags;
 	long rtc_secs, alarm_delta, alarm_time;
-	int rc;
+	int rc = -EINVAL;
 
-	spin_lock_irqsave(&alarm_slock, flags);
-
-	if (!power_on_alarm) {
-		spin_unlock_irqrestore(&alarm_slock, flags);
+	if (power_on_time <= 0) {
 		goto disable_alarm;
 	}
-	spin_unlock_irqrestore(&alarm_slock, flags);
 
 	rtc_read_time(alarm_rtc_dev, &rtc_time);
 	getnstimeofday(&wall_time);
 	rtc_tm_to_time(&rtc_time, &rtc_secs);
 	alarm_delta = wall_time.tv_sec - rtc_secs;
-	alarm_time = power_on_alarm - alarm_delta;
+	alarm_time = power_on_time - alarm_delta;
 
 	/*
 	 * Substract ALARM_DELTA from actual alarm time
@@ -558,16 +558,19 @@
 	rtc_time_to_tm(alarm_time, &alarm.time);
 	alarm.enabled = 1;
 	rc = rtc_set_alarm(alarm_rtc_dev, &alarm);
-	if (rc)
+	if (rc){
 		pr_alarm(ERROR, "Unable to set power-on alarm\n");
+		goto disable_alarm;
+	}
 	else
 		pr_alarm(FLOW, "Power-on alarm set to %lu\n",
 				alarm_time);
 
-	return;
+	return 0;
 
 disable_alarm:
 	rtc_alarm_irq_enable(alarm_rtc_dev, 0);
+	return rc;
 }
 
 static struct rtc_task alarm_rtc_task = {
@@ -629,7 +632,6 @@
 static struct platform_driver alarm_driver = {
 	.suspend = alarm_suspend,
 	.resume = alarm_resume,
-	.shutdown = alarm_shutdown,
 	.driver = {
 		.name = "alarm"
 	}
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index d670f8b..d8dc6f9 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -50,7 +50,7 @@
 
 static int msm_spi_pm_resume_runtime(struct device *device);
 static int msm_spi_pm_suspend_runtime(struct device *device);
-
+static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd);
 
 static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
 					struct platform_device *pdev)
@@ -135,6 +135,40 @@
 	}
 }
 
+static inline int msm_spi_request_cs_gpio(struct msm_spi *dd)
+{
+	int cs_num;
+	int rc;
+
+	cs_num = dd->cur_msg->spi->chip_select;
+	if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
+		(!(dd->cs_gpios[cs_num].valid)) &&
+		(dd->cs_gpios[cs_num].gpio_num >= 0)) {
+		rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
+				spi_cs_rsrcs[cs_num]);
+		if (rc) {
+			dev_err(dd->dev,
+				"gpio_request for pin %d failed,error %d\n",
+				dd->cs_gpios[cs_num].gpio_num, rc);
+			return rc;
+		}
+		dd->cs_gpios[cs_num].valid = 1;
+	}
+	return 0;
+}
+
+static inline void msm_spi_free_cs_gpio(struct msm_spi *dd)
+{
+	int cs_num;
+
+	cs_num = dd->cur_msg->spi->chip_select;
+	if (dd->cs_gpios[cs_num].valid) {
+		gpio_free(dd->cs_gpios[cs_num].gpio_num);
+		dd->cs_gpios[cs_num].valid = 0;
+	}
+}
+
+
 /**
  * msm_spi_clk_max_rate: finds the nearest lower rate for a clk
  * @clk the clock for which to find nearest lower rate
@@ -785,117 +819,190 @@
 	msm_spi_bam_pipe_flush(dd, SPI_BAM_PRODUCER_PIPE);
 }
 
+static int
+msm_spi_bam_process_rx(struct msm_spi *dd, u32 *bytes_to_send, u32 desc_cnt)
+{
+	int ret = 0;
+	u32 data_xfr_size = 0, rem_bc = 0;
+	u32 prod_flags = 0;
+
+	rem_bc = dd->cur_rx_transfer->len - dd->bam.curr_rx_bytes_recvd;
+	data_xfr_size = (rem_bc < *bytes_to_send) ? rem_bc : *bytes_to_send;
+
+	/*
+	 * set flags for last descriptor only
+	 */
+	if ((desc_cnt == 1)
+		|| (*bytes_to_send == data_xfr_size))
+		prod_flags = (dd->write_buf)
+			? 0 : (SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD);
+
+	/*
+	 * enqueue read buffer in BAM
+	 */
+	ret = sps_transfer_one(dd->bam.prod.handle,
+			dd->cur_rx_transfer->rx_dma
+				+ dd->bam.curr_rx_bytes_recvd,
+			data_xfr_size, dd, prod_flags);
+	if (ret < 0) {
+		dev_err(dd->dev,
+		"%s: Failed to queue producer BAM transfer",
+		__func__);
+		return ret;
+	}
+
+	dd->bam.curr_rx_bytes_recvd += data_xfr_size;
+	*bytes_to_send -= data_xfr_size;
+	dd->bam.bam_rx_len -= data_xfr_size;
+
+	if (!(dd->cur_rx_transfer->len - dd->bam.curr_rx_bytes_recvd)) {
+		struct spi_transfer *t = dd->cur_rx_transfer;
+		struct spi_transfer *next;
+		if (t->transfer_list.next != &dd->cur_msg->transfers) {
+			next = list_entry(t->transfer_list.next,
+					struct spi_transfer,
+					transfer_list);
+			dd->read_buf  = next->rx_buf;
+			dd->cur_rx_transfer = next;
+			dd->bam.curr_rx_bytes_recvd = 0;
+		}
+	}
+	return data_xfr_size;
+}
+
+static int
+msm_spi_bam_process_tx(struct msm_spi *dd, u32 *bytes_to_send, u32 desc_cnt)
+{
+	int ret = 0;
+	u32 data_xfr_size = 0, rem_bc = 0;
+	u32 cons_flags = 0;
+
+	rem_bc = dd->cur_tx_transfer->len - dd->bam.curr_tx_bytes_sent;
+	data_xfr_size = (rem_bc < *bytes_to_send) ? rem_bc : *bytes_to_send;
+
+	/*
+	 * set flags for last descriptor only
+	*/
+	if ((desc_cnt == 1)
+		|| (*bytes_to_send == data_xfr_size))
+		cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD;
+
+	/*
+	 * enqueue write buffer in BAM
+	 */
+	ret = sps_transfer_one(dd->bam.cons.handle,
+			dd->cur_tx_transfer->tx_dma
+				+ dd->bam.curr_tx_bytes_sent,
+			data_xfr_size, dd, cons_flags);
+	if (ret < 0) {
+		dev_err(dd->dev,
+		"%s: Failed to queue consumer BAM transfer",
+		__func__);
+		return ret;
+	}
+
+	dd->bam.curr_tx_bytes_sent	+= data_xfr_size;
+	*bytes_to_send	-= data_xfr_size;
+	dd->bam.bam_tx_len -= data_xfr_size;
+
+	if (!(dd->cur_tx_transfer->len - dd->bam.curr_tx_bytes_sent)) {
+		struct spi_transfer *t = dd->cur_tx_transfer;
+		struct spi_transfer *next;
+		if (t->transfer_list.next != &dd->cur_msg->transfers) {
+			next = list_entry(t->transfer_list.next,
+					struct spi_transfer,
+					transfer_list);
+			dd->write_buf = next->tx_buf;
+			dd->cur_tx_transfer = next;
+			dd->bam.curr_tx_bytes_sent = 0;
+		}
+	}
+	return data_xfr_size;
+}
+
+
 /**
  * msm_spi_bam_begin_transfer: transfer dd->tx_bytes_remaining bytes
  * using BAM.
  * @brief BAM can transfer SPI_MAX_TRFR_BTWN_RESETS byte at a single
  * transfer. Between transfer QUP must change to reset state. A loop is
- * issuing a single BAM transfer at a time. If another tsranfer is
- * required, it waits for the trasfer to finish, then moving to reset
- * state, and back to run state to issue the next transfer.
- * The function dose not wait for the last transfer to end, or if only
- * a single transfer is required, the function dose not wait for it to
- * end.
- * @timeout max time in jiffies to wait for a transfer to finish.
+ * issuing a single BAM transfer at a time.
  * @return zero on success
  */
 static int
-msm_spi_bam_begin_transfer(struct msm_spi *dd, u32 timeout, u8 bpw)
+msm_spi_bam_begin_transfer(struct msm_spi *dd)
 {
-	u32 bytes_to_send, bytes_sent, n_words_xfr, cons_flags, prod_flags;
-	int ret;
-	/*
-	 * QUP must move to reset mode every 64K-1 bytes of transfer
-	 * (counter is 16 bit)
-	 */
-	if (dd->tx_bytes_remaining > SPI_MAX_TRFR_BTWN_RESETS) {
-		/* assert chip select unconditionally */
-		u32 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
-		if (!(spi_ioc & SPI_IO_C_FORCE_CS))
-			writel_relaxed(spi_ioc | SPI_IO_C_FORCE_CS,
-				dd->base + SPI_IO_CONTROL);
+	u32 tx_bytes_to_send = 0, rx_bytes_to_recv = 0;
+	u32 n_words_xfr;
+	s32 ret = 0;
+	u32 prod_desc_cnt = SPI_BAM_MAX_DESC_NUM - 1;
+	u32 cons_desc_cnt = SPI_BAM_MAX_DESC_NUM - 1;
+	u32 byte_count = 0;
+
+
+	rx_bytes_to_recv = min_t(u32, dd->bam.bam_rx_len,
+				SPI_MAX_TRFR_BTWN_RESETS);
+	tx_bytes_to_send = min_t(u32, dd->bam.bam_tx_len,
+				SPI_MAX_TRFR_BTWN_RESETS);
+	n_words_xfr = DIV_ROUND_UP(rx_bytes_to_recv,
+				dd->bytes_per_word);
+
+	msm_spi_set_mx_counts(dd, n_words_xfr);
+	ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN);
+	if (ret < 0) {
+		dev_err(dd->dev,
+			"%s: Failed to set QUP state to run",
+			__func__);
+		goto xfr_err;
 	}
 
-	/* Following flags are required since we are waiting on all transfers */
-	cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD;
-	/*
-	 * on a balanced transaction, BAM will set the flags on the producer
-	 * pipe based on the flags set on the consumer pipe
-	 */
-	prod_flags = (dd->write_buf) ? 0 : cons_flags;
-
-	while (dd->tx_bytes_remaining > 0) {
-		bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
-		bytes_to_send = min_t(u32, dd->tx_bytes_remaining
-						, SPI_MAX_TRFR_BTWN_RESETS);
-		n_words_xfr = DIV_ROUND_UP(bytes_to_send
-						, dd->bytes_per_word);
-
-		msm_spi_set_mx_counts(dd, n_words_xfr);
-
-		ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN);
-		if (ret < 0) {
-			dev_err(dd->dev,
-				"%s: Failed to set QUP state to run",
-				__func__);
-			goto xfr_err;
+	while ((rx_bytes_to_recv + tx_bytes_to_send) &&
+		((cons_desc_cnt + prod_desc_cnt) > 0)) {
+		if (dd->read_buf && (prod_desc_cnt > 0)) {
+			ret = msm_spi_bam_process_rx(dd, &rx_bytes_to_recv,
+							prod_desc_cnt);
+			if (ret < 0)
+				goto xfr_err;
+			prod_desc_cnt--;
 		}
 
-		/* enqueue read buffer in BAM */
-		if (dd->read_buf) {
-			ret = sps_transfer_one(dd->bam.prod.handle,
-				dd->cur_transfer->rx_dma + bytes_sent,
-				bytes_to_send, dd, prod_flags);
-			if (ret < 0) {
-				dev_err(dd->dev,
-				"%s: Failed to queue producer BAM transfer",
-				__func__);
+		if (dd->write_buf && (cons_desc_cnt > 0)) {
+			ret = msm_spi_bam_process_tx(dd, &tx_bytes_to_send,
+							cons_desc_cnt);
+			if (ret < 0)
 				goto xfr_err;
-			}
+			cons_desc_cnt--;
 		}
-
-		/* enqueue write buffer in BAM */
-		if (dd->write_buf) {
-			ret = sps_transfer_one(dd->bam.cons.handle,
-				dd->cur_transfer->tx_dma + bytes_sent,
-				bytes_to_send, dd, cons_flags);
-			if (ret < 0) {
-				dev_err(dd->dev,
-				"%s: Failed to queue consumer BAM transfer",
-				__func__);
-				goto xfr_err;
-			}
-		}
-
-		dd->tx_bytes_remaining -= bytes_to_send;
-
-		/* move to reset state after SPI_MAX_TRFR_BTWN_RESETS */
-		if (dd->tx_bytes_remaining > 0) {
-			if (!wait_for_completion_timeout(
-				&dd->transfer_complete, timeout)) {
-				dev_err(dd->dev,
-					"%s: SPI transaction timeout",
-					__func__);
-				dd->cur_msg->status = -EIO;
-				ret = -EIO;
-				goto xfr_err;
-			}
-			ret = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
-			if (ret < 0) {
-				dev_err(dd->dev,
-					"%s: Failed to set QUP state to reset",
-					__func__);
-				goto xfr_err;
-			}
-			init_completion(&dd->transfer_complete);
-		}
+		byte_count += ret;
 	}
+
+	dd->tx_bytes_remaining -= min_t(u32, byte_count,
+						SPI_MAX_TRFR_BTWN_RESETS);
 	return 0;
-
 xfr_err:
 	return ret;
 }
 
+static int
+msm_spi_bam_next_transfer(struct msm_spi *dd)
+{
+	if (dd->mode != SPI_BAM_MODE)
+		return 0;
+
+	if (dd->tx_bytes_remaining > 0) {
+		init_completion(&dd->transfer_complete);
+		if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
+			return 0;
+		if ((msm_spi_bam_begin_transfer(dd)) < 0) {
+			dev_err(dd->dev, "%s: BAM transfer setup failed\n",
+				__func__);
+			return 0;
+		}
+		return 1;
+	}
+	return 0;
+}
+
 static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
 {
 	dmov_box *box;
@@ -1098,6 +1205,16 @@
 	return 0;
 }
 
+static int msm_spi_dma_send_next(struct msm_spi *dd)
+{
+	int ret = 0;
+	if (dd->mode == SPI_DMOV_MODE)
+		ret = msm_spi_dm_send_next(dd);
+	if (dd->mode == SPI_BAM_MODE)
+		ret = msm_spi_bam_next_transfer(dd);
+	return ret;
+}
+
 static inline void msm_spi_ack_transfer(struct msm_spi *dd)
 {
 	writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
@@ -1297,14 +1414,14 @@
 }
 
 /**
- * msm_spi_dma_map_buffers: prepares buffer for DMA transfer
+ * msm_spi_dmov_map_buffers: prepares buffer for DMA transfer
  * @return zero on success or negative error code
  *
  * calls dma_map_single() on the read/write buffers, effectively invalidating
  * their cash entries. for For WR-WR and WR-RD transfers, allocates temporary
  * buffer and copy the data to/from the client buffers
  */
-static int msm_spi_dma_map_buffers(struct msm_spi *dd)
+static int msm_spi_dmov_map_buffers(struct msm_spi *dd)
 {
 	struct device *dev;
 	struct spi_transfer *first_xfr;
@@ -1323,7 +1440,7 @@
 	 * For WR-WR and WR-RD transfers, we allocate our own temporary
 	 * buffer and copy the data to/from the client buffers.
 	 */
-	if (dd->multi_xfr) {
+	if (!dd->qup_ver && dd->multi_xfr) {
 		dd->temp_buf = kzalloc(dd->cur_msg_len,
 				       GFP_KERNEL | __GFP_DMA);
 		if (!dd->temp_buf)
@@ -1384,6 +1501,70 @@
 	return ret;
 }
 
+static int msm_spi_bam_map_buffers(struct msm_spi *dd)
+{
+	int ret = -EINVAL;
+	struct device *dev;
+	struct spi_transfer *first_xfr;
+	struct spi_transfer *nxt_xfr;
+	void *tx_buf, *rx_buf;
+	u32 tx_len, rx_len;
+	int num_xfrs_grped = dd->num_xfrs_grped;
+
+	dev = &dd->cur_msg->spi->dev;
+	first_xfr = dd->cur_transfer;
+
+	do {
+		tx_buf = (void *)first_xfr->tx_buf;
+		rx_buf = first_xfr->rx_buf;
+		tx_len = rx_len = first_xfr->len;
+		if (tx_buf != NULL) {
+			first_xfr->tx_dma = dma_map_single(dev, tx_buf,
+							tx_len, DMA_TO_DEVICE);
+			if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
+				ret = -ENOMEM;
+				goto error;
+			}
+		}
+
+		if (rx_buf != NULL) {
+			first_xfr->rx_dma = dma_map_single(dev, rx_buf,	rx_len,
+							DMA_FROM_DEVICE);
+			if (dma_mapping_error(NULL, first_xfr->rx_dma)) {
+				if (tx_buf != NULL)
+					dma_unmap_single(NULL,
+							first_xfr->tx_dma,
+							tx_len, DMA_TO_DEVICE);
+				ret = -ENOMEM;
+				goto error;
+			}
+		}
+
+		nxt_xfr = list_entry(first_xfr->transfer_list.next,
+				struct spi_transfer, transfer_list);
+
+		if (nxt_xfr == NULL)
+			break;
+		num_xfrs_grped--;
+		first_xfr = nxt_xfr;
+	} while (num_xfrs_grped > 0);
+
+	return 0;
+error:
+	msm_spi_dma_unmap_buffers(dd);
+	return ret;
+}
+
+static int msm_spi_dma_map_buffers(struct msm_spi *dd)
+{
+	int ret = 0;
+	if (dd->mode == SPI_DMOV_MODE)
+		ret = msm_spi_dmov_map_buffers(dd);
+	else if (dd->mode == SPI_BAM_MODE)
+		ret = msm_spi_bam_map_buffers(dd);
+	return ret;
+}
+
 static void msm_spi_dmov_unmap_buffers(struct msm_spi *dd)
 {
 	struct device *dev;
@@ -1455,21 +1636,39 @@
 static void msm_spi_bam_unmap_buffers(struct msm_spi *dd)
 {
 	struct device *dev;
+	int num_xfrs_grped = dd->num_xfrs_grped;
+	struct spi_transfer *first_xfr;
+	struct spi_transfer *nxt_xfr;
+	void *tx_buf, *rx_buf;
+	u32  tx_len, rx_len;
+
+	dev = &dd->cur_msg->spi->dev;
+	first_xfr = dd->cur_transfer;
 
 	 /* mapped by client */
 	if (dd->cur_msg->is_dma_mapped)
 		return;
 
-	dev = &dd->cur_msg->spi->dev;
-	if (dd->cur_transfer->rx_buf)
-		dma_unmap_single(dev, dd->cur_transfer->rx_dma,
-				dd->cur_transfer->len,
-				DMA_FROM_DEVICE);
+	do {
+		tx_buf = (void *)first_xfr->tx_buf;
+		rx_buf = first_xfr->rx_buf;
+		tx_len = rx_len = first_xfr->len;
+		if (tx_buf != NULL)
+			dma_unmap_single(dev, first_xfr->tx_dma,
+					tx_len, DMA_TO_DEVICE);
 
-	if (dd->cur_transfer->tx_buf)
-		dma_unmap_single(dev, dd->cur_transfer->tx_dma,
-				dd->cur_transfer->len,
-				DMA_TO_DEVICE);
+		if (rx_buf != NULL)
+			dma_unmap_single(dev, first_xfr->rx_dma,
+					rx_len, DMA_FROM_DEVICE);
+
+		nxt_xfr = list_entry(first_xfr->transfer_list.next,
+				struct spi_transfer, transfer_list);
+
+		if (nxt_xfr == NULL)
+			break;
+		num_xfrs_grped--;
+		first_xfr = nxt_xfr;
+	} while (num_xfrs_grped > 0);
 }
 
 static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd)
@@ -1506,7 +1705,8 @@
 	if (dd->cur_msg_len < 3*dd->input_block_size)
 		return false;
 
-	if (dd->multi_xfr && !dd->read_len && !dd->write_len)
+	if ((dd->qup_ver != SPI_QUP_VERSION_BFAM) &&
+		dd->multi_xfr && !dd->read_len && !dd->write_len)
 		return false;
 
 	if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
@@ -1688,11 +1888,17 @@
 
 	msm_spi_set_transfer_mode(dd, bpw, read_count);
 	msm_spi_set_mx_counts(dd, read_count);
-	if ((dd->mode == SPI_BAM_MODE) || (dd->mode == SPI_DMOV_MODE))
+	if (dd->mode == SPI_DMOV_MODE) {
 		if (msm_spi_dma_map_buffers(dd) < 0) {
 			pr_err("Mapping DMA buffers\n");
 			return;
+			}
+	} else if (dd->mode == SPI_BAM_MODE) {
+			if (msm_spi_dma_map_buffers(dd) < 0) {
+				pr_err("Mapping DMA buffers\n");
+				return;
 		}
+	}
 	msm_spi_set_qup_io_modes(dd);
 	msm_spi_set_spi_config(dd, bpw);
 	msm_spi_set_qup_config(dd, bpw);
@@ -1712,7 +1918,7 @@
 			goto transfer_end;
 		msm_spi_start_write(dd, read_count);
 	} else if (dd->mode == SPI_BAM_MODE) {
-		if ((msm_spi_bam_begin_transfer(dd, timeout, bpw)) < 0)
+		if ((msm_spi_bam_begin_transfer(dd)) < 0)
 			dev_err(dd->dev, "%s: BAM transfer setup failed\n",
 				__func__);
 	}
@@ -1749,9 +1955,10 @@
 					msm_spi_bam_flush(dd);
 				break;
 		}
-	} while (msm_spi_dm_send_next(dd));
+	} while (msm_spi_dma_send_next(dd));
 
-	msm_spi_udelay(dd->cur_transfer->delay_usecs);
+	msm_spi_udelay(dd->xfrs_delay_usec);
+
 transfer_end:
 	msm_spi_dma_unmap_buffers(dd);
 	dd->mode = SPI_MODE_NONE;
@@ -1803,26 +2010,6 @@
 		dd->multi_xfr = 1;
 }
 
-static inline int combine_transfers(struct msm_spi *dd)
-{
-	struct spi_transfer *t = dd->cur_transfer;
-	struct spi_transfer *nxt;
-	int xfrs_grped = 1;
-
-	dd->cur_msg_len = dd->cur_transfer->len;
-	while (t->transfer_list.next != &dd->cur_msg->transfers) {
-		nxt = list_entry(t->transfer_list.next,
-				 struct spi_transfer,
-				 transfer_list);
-		if (t->cs_change != nxt->cs_change)
-			return xfrs_grped;
-		dd->cur_msg_len += nxt->len;
-		xfrs_grped++;
-		t = nxt;
-	}
-	return xfrs_grped;
-}
-
 static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
 {
 	u32 spi_ioc;
@@ -1839,122 +2026,124 @@
 		writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
 }
 
+static inline int combine_transfers(struct msm_spi *dd)
+{
+	struct spi_transfer *t = dd->cur_transfer;
+	struct spi_transfer *nxt;
+	int xfrs_grped = 1;
+	dd->xfrs_delay_usec = 0;
+
+	dd->bam.bam_rx_len = dd->bam.bam_tx_len = 0;
+
+	dd->cur_msg_len = dd->cur_transfer->len;
+
+	if (dd->cur_transfer->tx_buf)
+		dd->bam.bam_tx_len += dd->cur_transfer->len;
+	if (dd->cur_transfer->rx_buf)
+		dd->bam.bam_rx_len += dd->cur_transfer->len;
+
+	while (t->transfer_list.next != &dd->cur_msg->transfers) {
+		nxt = list_entry(t->transfer_list.next,
+				 struct spi_transfer,
+				 transfer_list);
+		if (t->cs_change != nxt->cs_change)
+			return xfrs_grped;
+		if (t->delay_usecs) {
+			dd->xfrs_delay_usec = t->delay_usecs;
+			dev_info(dd->dev, "SPI slave requests delay per txn :%d usecs",
+					t->delay_usecs);
+			return xfrs_grped;
+		}
+		if (nxt->tx_buf)
+			dd->bam.bam_tx_len += nxt->len;
+		if (nxt->rx_buf)
+			dd->bam.bam_rx_len += nxt->len;
+
+		dd->cur_msg_len += nxt->len;
+		xfrs_grped++;
+		t = nxt;
+	}
+
+	if (1 == xfrs_grped)
+		dd->xfrs_delay_usec = dd->cur_transfer->delay_usecs;
+
+	return xfrs_grped;
+}
+
 static void msm_spi_process_message(struct msm_spi *dd)
 {
 	int xfrs_grped = 0;
-	int cs_num;
 	int rc;
-	bool xfer_delay = false;
-	struct spi_transfer *tr;
 
+	dd->num_xfrs_grped = 0;
+	dd->bam.curr_rx_bytes_recvd = dd->bam.curr_tx_bytes_sent = 0;
 	dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
-	cs_num = dd->cur_msg->spi->chip_select;
-	if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
-		(!(dd->cs_gpios[cs_num].valid)) &&
-		(dd->cs_gpios[cs_num].gpio_num >= 0)) {
-		rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
-				spi_cs_rsrcs[cs_num]);
-		if (rc) {
-			dev_err(dd->dev, "gpio_request for pin %d failed with "
-				"error %d\n", dd->cs_gpios[cs_num].gpio_num,
-				rc);
-			return;
-		}
-		dd->cs_gpios[cs_num].valid = 1;
-	}
+	rc = msm_spi_request_cs_gpio(dd);
+	if (rc)
+		return;
 
-	list_for_each_entry(tr,
-				&dd->cur_msg->transfers,
-				transfer_list) {
-		if (tr->delay_usecs) {
-			dev_info(dd->dev, "SPI slave requests delay per txn :%d",
-					tr->delay_usecs);
-			xfer_delay = true;
-			break;
-		}
-	}
-
-	/* Don't combine xfers if delay is needed after every xfer */
-	if (dd->qup_ver || xfer_delay) {
-		if (dd->qup_ver)
-			write_force_cs(dd, 0);
-		list_for_each_entry(dd->cur_transfer,
-				&dd->cur_msg->transfers,
-				transfer_list) {
-			struct spi_transfer *t = dd->cur_transfer;
-			struct spi_transfer *nxt;
-
-			if (t->transfer_list.next != &dd->cur_msg->transfers) {
-				nxt = list_entry(t->transfer_list.next,
+	dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
 						struct spi_transfer,
 						transfer_list);
 
-				if (dd->qup_ver &&
-					t->cs_change == nxt->cs_change)
-					write_force_cs(dd, 1);
-				else if (dd->qup_ver)
-					write_force_cs(dd, 0);
-			}
+	get_transfer_length(dd);
+	if (dd->qup_ver || (dd->multi_xfr && !dd->read_len && !dd->write_len)) {
 
-			dd->cur_msg_len = dd->cur_transfer->len;
-			msm_spi_process_transfer(dd);
-		}
-	} else {
-		dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
-						    struct spi_transfer,
-						    transfer_list);
-		get_transfer_length(dd);
-		if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
-			/*
-			 * Handling of multi-transfers.
-			 * FIFO mode is used by default
-			 */
-			list_for_each_entry(dd->cur_transfer,
-					    &dd->cur_msg->transfers,
-					    transfer_list) {
-				if (!dd->cur_transfer->len)
-					goto error;
-				if (xfrs_grped) {
-					xfrs_grped--;
-					continue;
-				} else {
-					dd->read_len = dd->write_len = 0;
-					xfrs_grped = combine_transfers(dd);
-				}
+		if (dd->qup_ver)
+			write_force_cs(dd, 0);
 
-				dd->cur_tx_transfer = dd->cur_transfer;
-				dd->cur_rx_transfer = dd->cur_transfer;
-				msm_spi_process_transfer(dd);
+		/*
+		 * Handling of multi-transfers.
+		 * FIFO mode is used by default
+		 */
+		list_for_each_entry(dd->cur_transfer,
+					&dd->cur_msg->transfers,
+					transfer_list) {
+			if (!dd->cur_transfer->len)
+				goto error;
+			if (xfrs_grped) {
 				xfrs_grped--;
-			}
-		} else {
-			/* Handling of a single transfer or
-			 * WR-WR or WR-RD transfers
-			 */
-			if ((!dd->cur_msg->is_dma_mapped) &&
-			    (msm_spi_use_dma(dd, dd->cur_transfer,
-					dd->cur_transfer->bits_per_word))) {
-				/* Mapping of DMA buffers */
-				int ret = msm_spi_dma_map_buffers(dd);
-				if (ret < 0) {
-					dd->cur_msg->status = ret;
-					goto error;
-				}
+				continue;
+			} else {
+				dd->read_len = dd->write_len = 0;
+				xfrs_grped = combine_transfers(dd);
+				dd->num_xfrs_grped = xfrs_grped;
+				if (dd->qup_ver)
+					write_force_cs(dd, 1);
 			}
 
 			dd->cur_tx_transfer = dd->cur_transfer;
 			dd->cur_rx_transfer = dd->cur_transfer;
 			msm_spi_process_transfer(dd);
+			if (dd->qup_ver && !dd->xfrs_delay_usec)
+				write_force_cs(dd, 0);
+			xfrs_grped--;
 		}
+	} else {
+		/* Handling of a single transfer or
+		 * WR-WR or WR-RD transfers
+		 */
+		if ((!dd->cur_msg->is_dma_mapped) &&
+			(msm_spi_use_dma(dd, dd->cur_transfer,
+				dd->cur_transfer->bits_per_word))) {
+			/* Mapping of DMA buffers */
+			int ret = msm_spi_dma_map_buffers(dd);
+			if (ret < 0) {
+				dd->cur_msg->status = ret;
+				goto error;
+			}
+		}
+
+		dd->cur_tx_transfer = dd->cur_transfer;
+		dd->cur_rx_transfer = dd->cur_transfer;
+		dd->num_xfrs_grped = 1;
+		msm_spi_process_transfer(dd);
 	}
 
 	return;
 
 error:
-	if (dd->cs_gpios[cs_num].valid) {
-		gpio_free(dd->cs_gpios[cs_num].gpio_num);
-		dd->cs_gpios[cs_num].valid = 0;
-	}
+	msm_spi_free_cs_gpio(dd);
 }
 
 /**
diff --git a/drivers/spi/spi_qsd.h b/drivers/spi/spi_qsd.h
index d538076..a0c399b 100644
--- a/drivers/spi/spi_qsd.h
+++ b/drivers/spi/spi_qsd.h
@@ -291,6 +291,10 @@
 	struct msm_spi_bam_pipe  prod;
 	struct msm_spi_bam_pipe  cons;
 	bool                     deregister_required;
+	u32			 curr_rx_bytes_recvd;
+	u32			 curr_tx_bytes_sent;
+	u32			 bam_rx_len;
+	u32			 bam_tx_len;
 };
 
 struct msm_spi {
@@ -388,6 +392,8 @@
 	struct spi_cs_gpio       cs_gpios[ARRAY_SIZE(spi_cs_rsrcs)];
 	enum msm_spi_qup_version qup_ver;
 	int			 max_trfr_len;
+	int			 num_xfrs_grped;
+	u16			 xfrs_delay_usec;
 };
 
 /* Forward declaration */
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index 6e6457c..b4a56ec 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -40,6 +40,7 @@
 #define MAX_CURRENT_UA 1000000
 #define MAX_RAILS 5
 #define MAX_THRESHOLD 2
+#define MONITOR_ALL_TSENS -1
 
 static struct msm_thermal_data msm_thermal_info;
 static struct delayed_work check_temp_work;
@@ -53,10 +54,13 @@
 static struct work_struct timer_work;
 static struct task_struct *hotplug_task;
 static struct task_struct *freq_mitigation_task;
+static struct task_struct *thermal_monitor_task;
 static struct completion hotplug_notify_complete;
 static struct completion freq_mitigation_complete;
+static struct completion thermal_monitor_complete;
 
 static int enabled;
+static int polling_enabled;
 static int rails_cnt;
 static int psm_rails_cnt;
 static int ocr_rail_cnt;
@@ -78,6 +82,8 @@
 static bool ocr_enabled;
 static bool ocr_nodes_called;
 static bool ocr_probed;
+static bool interrupt_mode_enable;
+static bool msm_thermal_probed;
 static int *tsens_id_map;
 static DEFINE_MUTEX(vdd_rstr_mutex);
 static DEFINE_MUTEX(psm_mutex);
@@ -115,6 +121,21 @@
 	bool freq_thresh_clear;
 };
 
+struct threshold_info;
+struct therm_threshold {
+	int32_t sensor_id;
+	struct sensor_threshold threshold[MAX_THRESHOLD];
+	int32_t trip_triggered;
+	void (*notify)(struct therm_threshold *);
+	struct threshold_info *parent;
+};
+
+struct threshold_info {
+	uint32_t thresh_ct;
+	bool thresh_triggered;
+	struct therm_threshold *thresh_list;
+};
+
 struct rail {
 	const char *name;
 	uint32_t freq_req;
@@ -138,10 +159,16 @@
 	struct attribute_group attr_gp;
 };
 
+enum msm_thresh_list {
+	MSM_VDD_RESTRICTION,
+	MSM_LIST_MAX_NR,
+};
+
 static struct psm_rail *psm_rails;
 static struct psm_rail *ocr_rails;
 static struct rail *rails;
 static struct cpu_info cpus[NR_CPUS];
+static struct threshold_info *thresh;
 
 struct vdd_rstr_enable {
 	struct kobj_attribute ko_attr;
@@ -1195,7 +1222,7 @@
 	do_freq_control(temp);
 
 reschedule:
-	if (enabled)
+	if (polling_enabled)
 		schedule_delayed_work(&check_temp_work,
 				msecs_to_jiffies(msm_thermal_info.poll_ms));
 }
@@ -1532,6 +1559,215 @@
 	return ret;
 }
 
+int therm_set_threshold(struct threshold_info *thresh_inp)
+{
+	int ret = 0, i = 0, err = 0;
+	struct therm_threshold *thresh_ptr;
+
+	if (!thresh_inp) {
+		pr_err("%s: %s: Invalid input\n",
+			KBUILD_MODNAME, __func__);
+		ret = -EINVAL;
+		goto therm_set_exit;
+	}
+
+	thresh_inp->thresh_triggered = false;
+	for (i = 0; i < thresh_inp->thresh_ct; i++) {
+		thresh_ptr = &thresh_inp->thresh_list[i];
+		thresh_ptr->trip_triggered = -1;
+		err = set_threshold(thresh_ptr->sensor_id,
+			thresh_ptr->threshold);
+		if (err) {
+			ret = err;
+			err = 0;
+		}
+	}
+
+therm_set_exit:
+	return ret;
+}
+
+static void vdd_restriction_notify(struct therm_threshold *trig_thresh)
+{
+	int ret = 0;
+	static uint32_t vdd_sens_status;
+
+	if (!vdd_rstr_enabled)
+		return;
+	if (!trig_thresh) {
+		pr_err("%s:%s Invalid input\n", KBUILD_MODNAME, __func__);
+		return;
+	}
+	if (trig_thresh->trip_triggered < 0)
+		goto set_and_exit;
+
+	mutex_lock(&vdd_rstr_mutex);
+	pr_debug("%s: sensor%d reached %d thresh for Vdd restriction\n",
+		KBUILD_MODNAME, trig_thresh->sensor_id,
+		trig_thresh->trip_triggered);
+	switch (trig_thresh->trip_triggered) {
+	case THERMAL_TRIP_CONFIGURABLE_HI:
+		if (vdd_sens_status & BIT(trig_thresh->sensor_id))
+			vdd_sens_status ^= BIT(trig_thresh->sensor_id);
+		break;
+	case THERMAL_TRIP_CONFIGURABLE_LOW:
+		vdd_sens_status |= BIT(trig_thresh->sensor_id);
+		break;
+	default:
+		pr_err("%s:%s: Unsupported trip type\n",
+			KBUILD_MODNAME, __func__);
+		goto unlock_and_exit;
+		break;
+	}
+
+	ret = vdd_restriction_apply_all((vdd_sens_status) ? 1 : 0);
+	if (ret) {
+		pr_err("%s vdd rstr votlage for all failed\n",
+			(vdd_sens_status) ?
+			"Enable" : "Disable");
+			goto unlock_and_exit;
+	}
+
+unlock_and_exit:
+	mutex_unlock(&vdd_rstr_mutex);
+set_and_exit:
+	set_threshold(trig_thresh->sensor_id, trig_thresh->threshold);
+	return;
+}
+
+static __ref int do_thermal_monitor(void *data)
+{
+	int ret = 0, i, j;
+	struct therm_threshold *sensor_list;
+
+	while (!kthread_should_stop()) {
+		wait_for_completion(&thermal_monitor_complete);
+		INIT_COMPLETION(thermal_monitor_complete);
+
+		for (i = 0; i < MSM_LIST_MAX_NR; i++) {
+			if (!thresh[i].thresh_triggered)
+				continue;
+			thresh[i].thresh_triggered = false;
+			for (j = 0; j < thresh[i].thresh_ct; j++) {
+				sensor_list = &thresh[i].thresh_list[j];
+				if (sensor_list->trip_triggered < 0)
+					continue;
+				sensor_list->notify(sensor_list);
+				sensor_list->trip_triggered = -1;
+			}
+		}
+	}
+	return ret;
+}
+
+static void thermal_monitor_init(void)
+{
+	if (thermal_monitor_task)
+		return;
+
+	init_completion(&thermal_monitor_complete);
+	thermal_monitor_task = kthread_run(do_thermal_monitor, NULL,
+		"msm_thermal:therm_monitor");
+	if (IS_ERR(thermal_monitor_task)) {
+		pr_err("%s: Failed to create thermal monitor thread\n",
+				KBUILD_MODNAME);
+		goto init_exit;
+	}
+
+	if (vdd_rstr_enabled)
+		therm_set_threshold(&thresh[MSM_VDD_RESTRICTION]);
+
+init_exit:
+	return;
+}
+
+static int msm_thermal_notify(enum thermal_trip_type type, int temp, void *data)
+{
+	struct therm_threshold *thresh_data = (struct therm_threshold *)data;
+
+	if (thermal_monitor_task) {
+		thresh_data->trip_triggered = type;
+		thresh_data->parent->thresh_triggered = true;
+		complete(&thermal_monitor_complete);
+	} else {
+		pr_err("%s: Thermal monitor task is not initialized\n",
+			KBUILD_MODNAME);
+	}
+	return 0;
+}
+
+static int init_threshold(enum msm_thresh_list index,
+	int sensor_id, int32_t hi_temp, int32_t low_temp,
+	void (*callback)(struct therm_threshold *))
+{
+	int ret = 0, i;
+	struct therm_threshold *thresh_ptr;
+
+	if (!callback || index >= MSM_LIST_MAX_NR || index < 0
+		|| sensor_id == -ENODEV) {
+		pr_err("%s: Invalid input to init_threshold\n",
+			KBUILD_MODNAME);
+		ret = -EINVAL;
+		goto init_thresh_exit;
+	}
+	if (thresh[index].thresh_list) {
+		pr_err("%s: threshold already initialized\n",
+			KBUILD_MODNAME);
+		ret = -EEXIST;
+		goto init_thresh_exit;
+	}
+
+	thresh[index].thresh_ct = (sensor_id == MONITOR_ALL_TSENS) ?
+						max_tsens_num : 1;
+	thresh[index].thresh_triggered = false;
+	thresh[index].thresh_list = kzalloc(sizeof(struct therm_threshold) *
+					thresh[index].thresh_ct, GFP_KERNEL);
+	if (!thresh[index].thresh_list) {
+		pr_err("%s: kzalloc failed\n", KBUILD_MODNAME);
+		ret = -ENOMEM;
+		goto init_thresh_exit;
+	}
+
+	thresh_ptr = thresh[index].thresh_list;
+	if (sensor_id == MONITOR_ALL_TSENS) {
+		for (i = 0; i < max_tsens_num; i++) {
+			thresh_ptr[i].sensor_id = tsens_id_map[i];
+			thresh_ptr[i].notify = callback;
+			thresh_ptr[i].trip_triggered = -1;
+			thresh_ptr[i].parent = &thresh[index];
+			thresh_ptr[i].threshold[0].temp = hi_temp;
+			thresh_ptr[i].threshold[0].trip =
+				THERMAL_TRIP_CONFIGURABLE_HI;
+			thresh_ptr[i].threshold[1].temp = low_temp;
+			thresh_ptr[i].threshold[1].trip =
+				THERMAL_TRIP_CONFIGURABLE_LOW;
+			thresh_ptr[i].threshold[0].notify =
+			thresh_ptr[i].threshold[1].notify = msm_thermal_notify;
+			thresh_ptr[i].threshold[0].data =
+			thresh_ptr[i].threshold[1].data =
+				(void *)&thresh_ptr[i];
+		}
+	} else {
+		thresh_ptr->sensor_id = sensor_id;
+		thresh_ptr->notify = callback;
+		thresh_ptr->trip_triggered = -1;
+		thresh_ptr->parent = &thresh[index];
+		thresh_ptr->threshold[0].temp = hi_temp;
+		thresh_ptr->threshold[0].trip =
+			THERMAL_TRIP_CONFIGURABLE_HI;
+		thresh_ptr->threshold[1].temp = low_temp;
+		thresh_ptr->threshold[1].trip =
+			THERMAL_TRIP_CONFIGURABLE_LOW;
+		thresh_ptr->threshold[0].notify =
+		thresh_ptr->threshold[1].notify = msm_thermal_notify;
+		thresh_ptr->threshold[0].data =
+		thresh_ptr->threshold[1].data = (void *)thresh_ptr;
+	}
+
+init_thresh_exit:
+	return ret;
+}
+
 /*
  * We will reset the cpu frequencies limits here. The core online/offline
  * status will be carried over to the process stopping the msm_thermal, as
@@ -1542,8 +1778,7 @@
 	uint32_t cpu = 0;
 
 	/* make sure check_temp is no longer running */
-	cancel_delayed_work(&check_temp_work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&check_temp_work);
 
 	get_online_cpus();
 	for_each_possible_cpu(cpu) {
@@ -1557,16 +1792,30 @@
 	put_online_cpus();
 }
 
+static void interrupt_mode_init(void)
+{
+	if (!msm_thermal_probed) {
+		interrupt_mode_enable = true;
+		return;
+	}
+	if (polling_enabled) {
+		pr_info("%s: Interrupt mode init\n", KBUILD_MODNAME);
+		polling_enabled = 0;
+		disable_msm_thermal();
+		hotplug_init();
+		freq_mitigation_init();
+		thermal_monitor_init();
+	}
+}
+
 static int __ref set_enabled(const char *val, const struct kernel_param *kp)
 {
 	int ret = 0;
 
 	ret = param_set_bool(val, kp);
-	if (!enabled) {
-		disable_msm_thermal();
-		hotplug_init();
-		freq_mitigation_init();
-	} else
+	if (!enabled)
+		interrupt_mode_init();
+	else
 		pr_info("%s: no action for enabled = %d\n",
 			KBUILD_MODNAME, enabled);
 
@@ -1642,7 +1891,7 @@
 		goto done_cc;
 	}
 
-	if (enabled) {
+	if (polling_enabled) {
 		pr_err("%s: Ignoring request; polling thread is enabled.\n",
 				KBUILD_MODNAME);
 		goto done_cc;
@@ -1795,7 +2044,34 @@
 	return ret;
 }
 
-int __devinit msm_thermal_init(struct msm_thermal_data *pdata)
+int msm_thermal_pre_init(void)
+{
+	int ret = 0;
+
+	tsens_get_max_sensor_num(&max_tsens_num);
+	if (create_sensor_id_map()) {
+		ret = -EINVAL;
+		goto pre_init_exit;
+	}
+
+	if (!thresh) {
+		thresh = kzalloc(
+				sizeof(struct threshold_info) * MSM_LIST_MAX_NR,
+				GFP_KERNEL);
+		if (!thresh) {
+			pr_err("%s:%s: kzalloc failed\n",
+				KBUILD_MODNAME, __func__);
+			ret = -ENOMEM;
+			goto pre_init_exit;
+		}
+		memset(thresh, 0, sizeof(struct threshold_info) *
+			MSM_LIST_MAX_NR);
+	}
+pre_init_exit:
+	return ret;
+}
+
+int msm_thermal_init(struct msm_thermal_data *pdata)
 {
 	int ret = 0;
 	uint32_t cpu;
@@ -1813,15 +2089,13 @@
 		cpus[cpu].freq_thresh_clear = false;
 	}
 	BUG_ON(!pdata);
-	tsens_get_max_sensor_num(&max_tsens_num);
 	memcpy(&msm_thermal_info, pdata, sizeof(struct msm_thermal_data));
 
-	if (create_sensor_id_map())
-		return -EINVAL;
 	if (check_sensor_id(msm_thermal_info.sensor_id))
 		return -EINVAL;
 
 	enabled = 1;
+	polling_enabled = 1;
 	ret = cpufreq_register_notifier(&msm_thermal_cpufreq_notifier,
 			CPUFREQ_POLICY_NOTIFIER);
 	if (ret)
@@ -2271,6 +2545,11 @@
 				__func__);
 			goto read_node_fail;
 		}
+		ret = init_threshold(MSM_VDD_RESTRICTION, MONITOR_ALL_TSENS,
+			data->vdd_rstr_temp_hyst_degC, data->vdd_rstr_temp_degC,
+			vdd_restriction_notify);
+		if (ret)
+			goto read_node_fail;
 		vdd_rstr_enabled = true;
 	}
 read_node_fail:
@@ -2547,6 +2826,9 @@
 	struct msm_thermal_data data;
 
 	memset(&data, 0, sizeof(struct msm_thermal_data));
+	ret = msm_thermal_pre_init();
+	if (ret)
+		goto fail;
 
 	key = "qcom,sensor-id";
 	ret = of_property_read_u32(node, key, &data.sensor_id);
@@ -2615,6 +2897,12 @@
 	}
 	msm_thermal_ioctl_init();
 	ret = msm_thermal_init(&data);
+	msm_thermal_probed = true;
+
+	if (interrupt_mode_enable) {
+		interrupt_mode_init();
+		interrupt_mode_enable = false;
+	}
 
 	return ret;
 fail:
@@ -2628,6 +2916,12 @@
 static int msm_thermal_dev_exit(struct platform_device *inp_dev)
 {
 	msm_thermal_ioctl_cleanup();
+	if (thresh) {
+		if (vdd_rstr_enabled)
+			kfree(thresh[MSM_VDD_RESTRICTION].thresh_list);
+		kfree(thresh);
+		thresh = NULL;
+	}
 	return 0;
 }
 
@@ -2663,6 +2957,7 @@
 	INIT_WORK(&timer_work, timer_work_fn);
 	msm_thermal_add_timer_nodes();
 
+	interrupt_mode_init();
 	return 0;
 }
 late_initcall(msm_thermal_late_init);
diff --git a/drivers/usb/class/ccid_bridge.c b/drivers/usb/class/ccid_bridge.c
index 05483fd..a914902 100644
--- a/drivers/usb/class/ccid_bridge.c
+++ b/drivers/usb/class/ccid_bridge.c
@@ -36,6 +36,10 @@
 #define CCID_CONTROL_TIMEOUT 500 /* msec */
 #define CCID_BRIDGE_MSG_TIMEOUT 1000 /* msec */
 
+static unsigned ccid_bulk_msg_timeout = CCID_BRIDGE_MSG_TIMEOUT;
+module_param_named(bulk_msg_timeout, ccid_bulk_msg_timeout, uint, 0644);
+MODULE_PARM_DESC(bulk_msg_timeout, "Bulk message timeout (msecs)");
+
 struct ccid_bridge {
 	struct usb_device *udev;
 	struct usb_interface *intf;
@@ -304,7 +308,7 @@
 
 	ret = wait_event_interruptible_timeout(ccid->write_wq,
 			ccid->write_result != 0,
-			msecs_to_jiffies(CCID_BRIDGE_MSG_TIMEOUT));
+			msecs_to_jiffies(ccid_bulk_msg_timeout));
 	if (!ret || ret == -ERESTARTSYS) { /* timedout or interrupted */
 		usb_kill_urb(ccid->writeurb);
 		if (!ret)
@@ -375,7 +379,7 @@
 
 	ret = wait_event_interruptible_timeout(ccid->read_wq,
 			ccid->read_result != 0,
-			msecs_to_jiffies(CCID_BRIDGE_MSG_TIMEOUT));
+			msecs_to_jiffies(ccid_bulk_msg_timeout));
 	if (!ret || ret == -ERESTARTSYS) { /* timedout or interrupted */
 		usb_kill_urb(ccid->readurb);
 		if (!ret)
diff --git a/drivers/video/msm/mdss/Makefile b/drivers/video/msm/mdss/Makefile
index cba7c22b..ae13322 100644
--- a/drivers/video/msm/mdss/Makefile
+++ b/drivers/video/msm/mdss/Makefile
@@ -1,14 +1,15 @@
-mdss-mdp3-objs = mdp3.o mdp3_dma.o mdp3_ctrl.o
+mdss-mdp3-objs = mdp3.o mdp3_dma.o mdp3_ctrl.o dsi_status_v2.o
 mdss-mdp3-objs += mdp3_ppp.o mdp3_ppp_hwio.o mdp3_ppp_data.o
 obj-$(CONFIG_FB_MSM_MDSS_MDP3) += mdss-mdp3.o
 
-mdss-mdp-objs := mdss_mdp.o mdss_mdp_ctl.o mdss_mdp_pipe.o mdss_mdp_util.o
+mdss-mdp-objs := mdss_mdp.o mdss_mdp_ctl.o mdss_mdp_pipe.o mdss_mdp_util.o dsi_status_6g.o
 mdss-mdp-objs += mdss_mdp_pp.o
 mdss-mdp-objs += mdss_mdp_intf_video.o
 mdss-mdp-objs += mdss_mdp_intf_cmd.o
 mdss-mdp-objs += mdss_mdp_intf_writeback.o
 mdss-mdp-objs += mdss_mdp_rotator.o
 mdss-mdp-objs += mdss_mdp_overlay.o
+mdss-mdp-objs += mdss_mdp_splash_logo.o
 mdss-mdp-objs += mdss_mdp_wb.o
 obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp.o
 
@@ -42,8 +43,4 @@
 
 obj-$(CONFIG_FB_MSM_MDSS) += mdss_fb.o
 
-ifeq ($(CONFIG_FB_MSM_MDSS_MDP3),y)
-obj-$(CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS) += dsi_status_v2.o
-else
 obj-$(CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS) += mdss_dsi_status.o
-endif
diff --git a/drivers/video/msm/mdss/dsi_status_6g.c b/drivers/video/msm/mdss/dsi_status_6g.c
new file mode 100644
index 0000000..31c9022
--- /dev/null
+++ b/drivers/video/msm/mdss/dsi_status_6g.c
@@ -0,0 +1,109 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#include "mdss_dsi.h"
+#include "mdss_mdp.h"
+
+/*
+ * mdss_check_dsi_ctrl_status() - Check MDP5 DSI controller status periodically.
+ * @work     : dsi controller status data
+ * @interval : duration in milliseconds to schedule work queue
+ *
+ * This function calls check_status API on DSI controller to send the BTA
+ * command. If DSI controller fails to acknowledge the BTA command, it sends
+ * the PANEL_ALIVE=0 status to HAL layer.
+ */
+void mdss_check_dsi_ctrl_status(struct work_struct *work, uint32_t interval)
+{
+	struct dsi_status_data *pstatus_data = NULL;
+	struct mdss_panel_data *pdata = NULL;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_overlay_private *mdp5_data = NULL;
+	struct mdss_mdp_ctl *ctl = NULL;
+	int ret = 0;
+
+	pstatus_data = container_of(to_delayed_work(work),
+		struct dsi_status_data, check_status);
+
+	pdata = dev_get_platdata(&pstatus_data->mfd->pdev->dev);
+	if (!pdata) {
+		pr_err("%s: Panel data not available\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+							panel_data);
+	if (!ctrl_pdata || !ctrl_pdata->check_status) {
+		pr_err("%s: DSI ctrl or status_check callback not available\n",
+								__func__);
+		return;
+	}
+
+	mdp5_data = mfd_to_mdp5_data(pstatus_data->mfd);
+	ctl = mfd_to_ctl(pstatus_data->mfd);
+
+	if (ctl->shared_lock)
+		mutex_lock(ctl->shared_lock);
+	mutex_lock(&mdp5_data->ov_lock);
+
+	if (pstatus_data->mfd->shutdown_pending) {
+		mutex_unlock(&mdp5_data->ov_lock);
+		if (ctl->shared_lock)
+			mutex_unlock(ctl->shared_lock);
+		pr_err("%s: DSI turning off, avoiding BTA status check\n",
+							__func__);
+		return;
+	}
+
+	/*
+	 * For the command mode panels, we return pan display
+	 * IOCTL on vsync interrupt. So, after vsync interrupt comes
+	 * and when DMA_P is in progress, if the panel stops responding
+	 * and if we trigger BTA before DMA_P finishes, then the DSI
+	 * FIFO will not be cleared since the DSI data bus control
+	 * doesn't come back to the host after BTA. This may cause the
+	 * display reset not to be proper. Hence, wait for DMA_P done
+	 * for command mode panels before triggering BTA.
+	 */
+	if (ctl->wait_pingpong)
+		ctl->wait_pingpong(ctl, NULL);
+
+	pr_debug("%s: DSI ctrl wait for ping pong done\n", __func__);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+	ret = ctrl_pdata->check_status(ctrl_pdata);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+
+	mutex_unlock(&mdp5_data->ov_lock);
+	if (ctl->shared_lock)
+		mutex_unlock(ctl->shared_lock);
+
+	if ((pstatus_data->mfd->panel_power_on)) {
+		if (ret > 0) {
+			schedule_delayed_work(&pstatus_data->check_status,
+				msecs_to_jiffies(interval));
+		} else {
+			char *envp[2] = {"PANEL_ALIVE=0", NULL};
+			pdata->panel_info.panel_dead = true;
+			ret = kobject_uevent_env(
+				&pstatus_data->mfd->fbi->dev->kobj,
+							KOBJ_CHANGE, envp);
+			pr_err("%s: Panel has gone bad, sending uevent - %s\n",
+							__func__, envp[0]);
+		}
+	}
+}
diff --git a/drivers/video/msm/mdss/dsi_status_v2.c b/drivers/video/msm/mdss/dsi_status_v2.c
index 565401d..c8915e6 100644
--- a/drivers/video/msm/mdss/dsi_status_v2.c
+++ b/drivers/video/msm/mdss/dsi_status_v2.c
@@ -10,49 +10,26 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/fb.h>
-#include <linux/notifier.h>
 #include <linux/workqueue.h>
 #include <linux/delay.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/iopoll.h>
 #include <linux/kobject.h>
 #include <linux/string.h>
 #include <linux/sysfs.h>
 
-#include "mdss_fb.h"
 #include "mdss_dsi.h"
-#include "mdss_panel.h"
 #include "mdp3_ctrl.h"
 
-#define STATUS_CHECK_INTERVAL 5000
-
-/**
- * dsi_status_data - Stores all the data necessary for this module
- * @fb_notif: Used to egister for the fb events
- * @live_status: Delayed worker structure, used to associate the
- * delayed worker function
- * @mfd: Used to store the msm_fb_data_type received when the notifier
- * call back happens
- * @root: Stores the dir created by debuugfs
- * @debugfs_reset_panel: The debugfs variable used to inject errors
+/*
+ * mdp3_check_dsi_ctrl_status() - Check MDP3 DSI controller status periodically.
+ * @work     : dsi controller status data
+ * @interval : duration in milliseconds to schedule work queue
+ *
+ * This function calls check_status API on DSI controller to send the BTA
+ * command. If DSI controller fails to acknowledge the BTA command, it sends
+ * the PANEL_ALIVE=0 status to HAL layer.
  */
-
-struct dsi_status_data {
-	struct notifier_block fb_notifier;
-	struct delayed_work check_status;
-	struct msm_fb_data_type *mfd;
-	uint32_t check_interval;
-};
-struct dsi_status_data *pstatus_data;
-static uint32_t interval = STATUS_CHECK_INTERVAL;
-
-void check_dsi_ctrl_status(struct work_struct *work)
+void mdp3_check_dsi_ctrl_status(struct work_struct *work,
+				uint32_t interval)
 {
 	struct dsi_status_data *pdsi_status = NULL;
 	struct mdss_panel_data *pdata = NULL;
@@ -61,27 +38,25 @@
 	int ret = 0;
 
 	pdsi_status = container_of(to_delayed_work(work),
-		struct dsi_status_data, check_status);
-	if (!pdsi_status) {
-		pr_err("%s: DSI status data not available\n", __func__);
-		return;
-	}
+	struct dsi_status_data, check_status);
 
 	pdata = dev_get_platdata(&pdsi_status->mfd->pdev->dev);
 	if (!pdata) {
 		pr_err("%s: Panel data not available\n", __func__);
 		return;
 	}
+
 	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
 							panel_data);
+
 	if (!ctrl_pdata || !ctrl_pdata->check_status) {
-		pr_err("%s: DSI ctrl or status_check callback not avilable\n",
+		pr_err("%s: DSI ctrl or status_check callback not available\n",
 								__func__);
 		return;
 	}
+
 	mdp3_session = pdsi_status->mfd->mdp.private1;
 	mutex_lock(&mdp3_session->lock);
-
 	if (!mdp3_session->status) {
 		pr_info("display off already\n");
 		mutex_unlock(&mdp3_session->lock);
@@ -94,107 +69,22 @@
 	if (!ret)
 		ret = ctrl_pdata->check_status(ctrl_pdata);
 	else
-		pr_err("wait_for_dma_done error\n");
-
+		pr_err("%s: wait_for_dma_done error\n", __func__);
 	mutex_unlock(&mdp3_session->lock);
 
 	if ((pdsi_status->mfd->panel_power_on)) {
 		if (ret > 0) {
 			schedule_delayed_work(&pdsi_status->check_status,
-				msecs_to_jiffies(pdsi_status->check_interval));
+						msecs_to_jiffies(interval));
 		} else {
 			char *envp[2] = {"PANEL_ALIVE=0", NULL};
 			pdata->panel_info.panel_dead = true;
 			ret = kobject_uevent_env(
-				&pdsi_status->mfd->fbi->dev->kobj,
-							KOBJ_CHANGE, envp);
+					&pdsi_status->mfd->fbi->dev->kobj,
+					KOBJ_CHANGE, envp);
 			pr_err("%s: Panel has gone bad, sending uevent - %s\n",
 							__func__, envp[0]);
 		}
 	}
 }
 
-/**
- * fb_notifier_callback() - Call back function for the fb_register_client()
- * notifying events
- * @self  : notifier block
- * @event : The event that was triggered
- * @data  : Of type struct fb_event
- *
- * - This function listens for FB_BLANK_UNBLANK and FB_BLANK_POWERDOWN events
- * - Based on the event the delayed work is either scheduled again after
- * PANEL_STATUS_CHECK_INTERVAL or cancelled
- */
-static int fb_event_callback(struct notifier_block *self,
-				unsigned long event, void *data)
-{
-	struct fb_event *evdata = (struct fb_event *)data;
-	struct dsi_status_data *pdata = container_of(self,
-				struct dsi_status_data, fb_notifier);
-	pdata->mfd = (struct msm_fb_data_type *)evdata->info->par;
-
-	if (event == FB_EVENT_BLANK && evdata) {
-		int *blank = evdata->data;
-		switch (*blank) {
-		case FB_BLANK_UNBLANK:
-			schedule_delayed_work(&pdata->check_status,
-			msecs_to_jiffies(STATUS_CHECK_INTERVAL));
-			break;
-		case FB_BLANK_POWERDOWN:
-			cancel_delayed_work(&pdata->check_status);
-			break;
-		}
-	}
-	return 0;
-}
-
-int __init mdss_dsi_status_init(void)
-{
-	int rc;
-
-	pstatus_data = kzalloc(sizeof(struct dsi_status_data),	GFP_KERNEL);
-	if (!pstatus_data) {
-		pr_err("%s: can't alloc mem\n", __func__);
-		rc = -ENOMEM;
-		return rc;
-	}
-
-	memset(pstatus_data, 0, sizeof(struct dsi_status_data));
-
-	pstatus_data->fb_notifier.notifier_call = fb_event_callback;
-
-	rc = fb_register_client(&pstatus_data->fb_notifier);
-	if (rc < 0) {
-		pr_err("%s: fb_register_client failed, returned with rc=%d\n",
-								__func__, rc);
-		kfree(pstatus_data);
-		return -EPERM;
-	}
-
-	pstatus_data->check_interval = interval;
-	pr_info("%s: DSI status check interval:%d\n", __func__, interval);
-
-	INIT_DELAYED_WORK(&pstatus_data->check_status, check_dsi_ctrl_status);
-
-	pr_debug("%s: DSI ctrl status thread initialized\n", __func__);
-
-	return rc;
-}
-
-void __exit mdss_dsi_status_exit(void)
-{
-	fb_unregister_client(&pstatus_data->fb_notifier);
-	cancel_delayed_work_sync(&pstatus_data->check_status);
-	kfree(pstatus_data);
-	pr_debug("%s: DSI ctrl status thread removed\n", __func__);
-}
-
-module_param(interval, uint, 0);
-MODULE_PARM_DESC(interval,
-	"Duration in milliseconds to send BTA command for checking"
-	"DSI status periodically");
-
-module_init(mdss_dsi_status_init);
-module_exit(mdss_dsi_status_exit);
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/msm/mdss/mdp3.c b/drivers/video/msm/mdss/mdp3.c
index ad7cb81..8e2ae4d 100644
--- a/drivers/video/msm/mdss/mdp3.c
+++ b/drivers/video/msm/mdss/mdp3.c
@@ -2266,6 +2266,7 @@
 	.panel_register_done = mdp3_panel_register_done,
 	.fb_stride = mdp3_fb_stride,
 	.fb_mem_alloc_fnc = mdp3_alloc,
+	.check_dsi_status = mdp3_check_dsi_ctrl_status,
 	};
 
 	struct mdp3_intr_cb underrun_cb = {
diff --git a/drivers/video/msm/mdss/mdp3.h b/drivers/video/msm/mdss/mdp3.h
index 15aab59..137a1b8 100644
--- a/drivers/video/msm/mdss/mdp3.h
+++ b/drivers/video/msm/mdss/mdp3.h
@@ -209,6 +209,8 @@
 int mdp3_misr_set(struct mdp_misr *misr_req);
 int mdp3_misr_get(struct mdp_misr *misr_resp);
 void mdp3_enable_regulator(int enable);
+void mdp3_check_dsi_ctrl_status(struct work_struct *work,
+				uint32_t interval);
 
 #define MDP3_REG_WRITE(addr, val) writel_relaxed(val, mdp3_res->mdp_base + addr)
 #define MDP3_REG_READ(addr) readl_relaxed(mdp3_res->mdp_base + addr)
diff --git a/drivers/video/msm/mdss/mdp3_ctrl.c b/drivers/video/msm/mdss/mdp3_ctrl.c
index 9cbff71..8acb9b0 100644
--- a/drivers/video/msm/mdss/mdp3_ctrl.c
+++ b/drivers/video/msm/mdss/mdp3_ctrl.c
@@ -27,10 +27,7 @@
 
 #define VSYNC_EXPIRE_TICK	4
 
-static void mdp3_ctrl_pan_display(struct msm_fb_data_type *mfd,
-					struct mdp_overlay *req,
-					int image_size,
-					int *pipe_ndx);
+static void mdp3_ctrl_pan_display(struct msm_fb_data_type *mfd);
 static int mdp3_overlay_unset(struct msm_fb_data_type *mfd, int ndx);
 static int mdp3_histogram_stop(struct mdp3_session_data *session,
 					u32 block);
@@ -1071,10 +1068,7 @@
 	return 0;
 }
 
-static void mdp3_ctrl_pan_display(struct msm_fb_data_type *mfd,
-					struct mdp_overlay *req,
-					int image_size,
-					int *pipe_ndx)
+static void mdp3_ctrl_pan_display(struct msm_fb_data_type *mfd)
 {
 	struct fb_info *fbi;
 	struct mdp3_session_data *mdp3_session;
diff --git a/drivers/video/msm/mdss/mdp3_ppp.h b/drivers/video/msm/mdss/mdp3_ppp.h
index 9753e94..a0ad3c3 100644
--- a/drivers/video/msm/mdss/mdp3_ppp.h
+++ b/drivers/video/msm/mdss/mdp3_ppp.h
@@ -391,7 +391,7 @@
 uint32_t ppp_bpp(uint32_t type);
 uint32_t ppp_src_config(uint32_t type);
 uint32_t ppp_out_config(uint32_t type);
-uint32_t ppp_pack_pattern(uint32_t type);
+uint32_t ppp_pack_pattern(uint32_t type, uint32_t yuv2rgb);
 uint32_t ppp_dst_op_reg(uint32_t type);
 uint32_t ppp_src_op_reg(uint32_t type);
 bool ppp_per_p_alpha(uint32_t type);
diff --git a/drivers/video/msm/mdss/mdp3_ppp_data.c b/drivers/video/msm/mdss/mdp3_ppp_data.c
index e562ad3..5748842 100644
--- a/drivers/video/msm/mdss/mdp3_ppp_data.c
+++ b/drivers/video/msm/mdss/mdp3_ppp_data.c
@@ -88,6 +88,35 @@
 		CLR_G, CLR_R, 8),
 };
 
+const uint32_t swapped_pack_patt_lut[MDP_IMGTYPE_LIMIT] = {
+	[MDP_RGB_565] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
+	[MDP_BGR_565] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+	[MDP_RGB_888] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
+	[MDP_BGR_888] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+	[MDP_BGRA_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R,
+		CLR_G, CLR_B, 8),
+	[MDP_RGBA_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+		CLR_G, CLR_R, 8),
+	[MDP_ARGB_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+		CLR_G, CLR_R, 8),
+	[MDP_XRGB_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+		CLR_G, CLR_R, 8),
+	[MDP_RGBX_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+		CLR_G, CLR_R, 8),
+	[MDP_Y_CRCB_H2V2] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
+	[MDP_Y_CBCR_H2V2] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
+	[MDP_Y_CBCR_H2V2_ADRENO] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR,
+		CLR_CB, 8),
+	[MDP_Y_CBCR_H2V2_VENUS] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR,
+		CLR_CB, 8),
+	[MDP_YCRYCB_H2V1] = PPP_GET_PACK_PATTERN(CLR_Y,
+		CLR_CB, CLR_Y, CLR_CR, 8),
+	[MDP_Y_CBCR_H2V1] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
+	[MDP_Y_CRCB_H2V1] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
+	[MDP_BGRX_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R,
+		CLR_G, CLR_B, 8),
+};
+
 const uint32_t dst_op_reg[MDP_IMGTYPE_LIMIT] = {
 	[MDP_Y_CRCB_H2V2] = PPP_OP_DST_CHROMA_420,
 	[MDP_Y_CBCR_H2V2] = PPP_OP_DST_CHROMA_420,
@@ -1530,10 +1559,13 @@
 	return out_cfg_lut[type];
 }
 
-uint32_t ppp_pack_pattern(uint32_t type)
+uint32_t ppp_pack_pattern(uint32_t type, uint32_t yuv2rgb)
 {
 	if (MDP_IS_IMGTYPE_BAD(type))
 		return 0;
+	if (yuv2rgb)
+		return swapped_pack_patt_lut[type];
+
 	return pack_patt_lut[type];
 }
 
diff --git a/drivers/video/msm/mdss/mdp3_ppp_hwio.c b/drivers/video/msm/mdss/mdp3_ppp_hwio.c
index eb01d00..a25c2c7 100644
--- a/drivers/video/msm/mdss/mdp3_ppp_hwio.c
+++ b/drivers/video/msm/mdss/mdp3_ppp_hwio.c
@@ -486,7 +486,7 @@
 	return load_secondary_matrix(csc);
 }
 
-int config_ppp_src(struct ppp_img_desc *src)
+int config_ppp_src(struct ppp_img_desc *src, uint32_t yuv2rgb)
 {
 	uint32_t val;
 
@@ -510,12 +510,12 @@
 	val |= (src->roi.x % 2) ? PPP_SRC_BPP_ROI_ODD_X : 0;
 	val |= (src->roi.y % 2) ? PPP_SRC_BPP_ROI_ODD_Y : 0;
 	PPP_WRITEL(val, MDP3_PPP_SRC_FORMAT);
-	PPP_WRITEL(ppp_pack_pattern(src->color_fmt),
+	PPP_WRITEL(ppp_pack_pattern(src->color_fmt, yuv2rgb),
 		MDP3_PPP_SRC_UNPACK_PATTERN1);
 	return 0;
 }
 
-int config_ppp_out(struct ppp_img_desc *dst)
+int config_ppp_out(struct ppp_img_desc *dst, uint32_t yuv2rgb)
 {
 	uint32_t val;
 	bool pseudoplanr_output = false;
@@ -534,7 +534,7 @@
 	if (pseudoplanr_output)
 		val |= PPP_DST_PLANE_PSEUDOPLN;
 	PPP_WRITEL(val, MDP3_PPP_OUT_FORMAT);
-	PPP_WRITEL(ppp_pack_pattern(dst->color_fmt),
+	PPP_WRITEL(ppp_pack_pattern(dst->color_fmt, yuv2rgb),
 		MDP3_PPP_OUT_PACK_PATTERN1);
 
 	val = ((dst->roi.height & MDP3_PPP_XY_MASK) << MDP3_PPP_XY_OFFSET) |
@@ -573,7 +573,7 @@
 
 	PPP_WRITEL(ppp_src_config(bg->color_fmt),
 		MDP3_PPP_BG_FORMAT);
-	PPP_WRITEL(ppp_pack_pattern(bg->color_fmt),
+	PPP_WRITEL(ppp_pack_pattern(bg->color_fmt, 0),
 		MDP3_PPP_BG_UNPACK_PATTERN1);
 	return 0;
 }
@@ -1108,6 +1108,7 @@
 
 int config_ppp_op_mode(struct ppp_blit_op *blit_op)
 {
+	uint32_t yuv2rgb;
 	uint32_t ppp_operation_reg = 0;
 	int sv_slice, sh_slice;
 	int dv_slice, dh_slice;
@@ -1153,6 +1154,7 @@
 
 	config_ppp_csc(blit_op->src.color_fmt,
 		blit_op->dst.color_fmt, &ppp_operation_reg);
+	yuv2rgb = ppp_operation_reg & PPP_OP_CONVERT_YCBCR2RGB;
 
 	if (blit_op->mdp_op & MDPOP_DITHER)
 		ppp_operation_reg |= PPP_OP_DITHER_EN;
@@ -1197,8 +1199,8 @@
 
 	config_ppp_blend(blit_op, &ppp_operation_reg);
 
-	config_ppp_src(&blit_op->src);
-	config_ppp_out(&blit_op->dst);
+	config_ppp_src(&blit_op->src, yuv2rgb);
+	config_ppp_out(&blit_op->dst, yuv2rgb);
 	PPP_WRITEL(ppp_operation_reg, MDP3_PPP_OP_MODE);
 	mb();
 	return 0;
diff --git a/drivers/video/msm/mdss/mdss_dsi.h b/drivers/video/msm/mdss/mdss_dsi.h
index f6f6cd4..962599d 100644
--- a/drivers/video/msm/mdss/mdss_dsi.h
+++ b/drivers/video/msm/mdss/mdss_dsi.h
@@ -302,6 +302,12 @@
 	struct dsi_buf rx_buf;
 };
 
+struct dsi_status_data {
+	struct notifier_block fb_notifier;
+	struct delayed_work check_status;
+	struct msm_fb_data_type *mfd;
+};
+
 int dsi_panel_device_register(struct device_node *pan_node,
 				struct mdss_dsi_ctrl_pdata *ctrl_pdata);
 
diff --git a/drivers/video/msm/mdss/mdss_dsi_host.c b/drivers/video/msm/mdss/mdss_dsi_host.c
index 3cd014c..95e7c6e 100644
--- a/drivers/video/msm/mdss/mdss_dsi_host.c
+++ b/drivers/video/msm/mdss/mdss_dsi_host.c
@@ -878,7 +878,7 @@
 
 	ctrl_restore = __mdss_dsi_cmd_mode_config(ctrl, 1);
 
-	if (rlen == 0) {
+	if (rlen <= 2) {
 		short_response = 1;
 		rx_byte = 4;
 	} else {
@@ -1381,11 +1381,11 @@
 		}
 
 		if (todo & DSI_EV_MDP_BUSY_RELEASE) {
-			spin_lock(&ctrl->mdp_lock);
+			spin_lock_irqsave(&ctrl->mdp_lock, flag);
 			ctrl->mdp_busy = false;
 			mdss_dsi_disable_irq_nosync(ctrl, DSI_MDP_TERM);
 			complete(&ctrl->mdp_comp);
-			spin_unlock(&ctrl->mdp_lock);
+			spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
 
 			/* enable dsi error interrupt */
 			mdss_dsi_err_intr_ctrl(ctrl, DSI_INTR_ERROR_MASK, 1);
diff --git a/drivers/video/msm/mdss/mdss_dsi_status.c b/drivers/video/msm/mdss/mdss_dsi_status.c
index 1dca08e..116b778 100644
--- a/drivers/video/msm/mdss/mdss_dsi_status.c
+++ b/drivers/video/msm/mdss/mdss_dsi_status.c
@@ -30,36 +30,26 @@
 #include "mdss_panel.h"
 #include "mdss_mdp.h"
 
-#define STATUS_CHECK_INTERVAL 5000
+#define STATUS_CHECK_INTERVAL_MS 5000
+#define STATUS_CHECK_INTERVAL_MIN_MS 200
+#define DSI_STATUS_CHECK_DISABLE 1
 
-struct dsi_status_data {
-	struct notifier_block fb_notifier;
-	struct delayed_work check_status;
-	struct msm_fb_data_type *mfd;
-	uint32_t check_interval;
-};
+static uint32_t interval = STATUS_CHECK_INTERVAL_MS;
+static uint32_t dsi_status_disable = DSI_STATUS_CHECK_DISABLE;
 struct dsi_status_data *pstatus_data;
-static uint32_t interval = STATUS_CHECK_INTERVAL;
 
 /*
- * check_dsi_ctrl_status() - Check DSI controller status periodically.
+ * check_dsi_ctrl_status() - Reads MFD structure and
+ * calls platform specific DSI ctrl Status function.
  * @work  : dsi controller status data
- *
- * This function calls check_status API on DSI controller to send the BTA
- * command. If DSI controller fails to acknowledge the BTA command, it sends
- * the PANEL_ALIVE=0 status to HAL layer.
  */
 static void check_dsi_ctrl_status(struct work_struct *work)
 {
 	struct dsi_status_data *pdsi_status = NULL;
-	struct mdss_panel_data *pdata = NULL;
-	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
-	struct mdss_overlay_private *mdp5_data = NULL;
-	struct mdss_mdp_ctl *ctl = NULL;
-	int ret = 0;
 
 	pdsi_status = container_of(to_delayed_work(work),
 		struct dsi_status_data, check_status);
+
 	if (!pdsi_status) {
 		pr_err("%s: DSI status data not available\n", __func__);
 		return;
@@ -70,73 +60,7 @@
 		return;
 	}
 
-	pdata = dev_get_platdata(&pdsi_status->mfd->pdev->dev);
-	if (!pdata) {
-		pr_err("%s: Panel data not available\n", __func__);
-		return;
-	}
-
-	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
-							panel_data);
-	if (!ctrl_pdata || !ctrl_pdata->check_status) {
-		pr_err("%s: DSI ctrl or status_check callback not available\n",
-								__func__);
-		return;
-	}
-
-	mdp5_data = mfd_to_mdp5_data(pdsi_status->mfd);
-	ctl = mfd_to_ctl(pdsi_status->mfd);
-
-	if (ctl->shared_lock)
-		mutex_lock(ctl->shared_lock);
-	mutex_lock(&mdp5_data->ov_lock);
-
-	if (pdsi_status->mfd->shutdown_pending) {
-		mutex_unlock(&mdp5_data->ov_lock);
-		if (ctl->shared_lock)
-			mutex_unlock(ctl->shared_lock);
-		pr_err("%s: DSI turning off, avoiding BTA status check\n",
-							__func__);
-		return;
-	}
-
-	/*
-	 * For the command mode panels, we return pan display
-	 * IOCTL on vsync interrupt. So, after vsync interrupt comes
-	 * and when DMA_P is in progress, if the panel stops responding
-	 * and if we trigger BTA before DMA_P finishes, then the DSI
-	 * FIFO will not be cleared since the DSI data bus control
-	 * doesn't come back to the host after BTA. This may cause the
-	 * display reset not to be proper. Hence, wait for DMA_P done
-	 * for command mode panels before triggering BTA.
-	 */
-	if (ctl->wait_pingpong)
-		ctl->wait_pingpong(ctl, NULL);
-
-	pr_debug("%s: DSI ctrl wait for ping pong done\n", __func__);
-
-	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
-	ret = ctrl_pdata->check_status(ctrl_pdata);
-	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
-
-	mutex_unlock(&mdp5_data->ov_lock);
-	if (ctl->shared_lock)
-		mutex_unlock(ctl->shared_lock);
-
-	if ((pdsi_status->mfd->panel_power_on)) {
-		if (ret > 0) {
-			schedule_delayed_work(&pdsi_status->check_status,
-				msecs_to_jiffies(pdsi_status->check_interval));
-		} else {
-			char *envp[2] = {"PANEL_ALIVE=0", NULL};
-			pdata->panel_info.panel_dead = true;
-			ret = kobject_uevent_env(
-				&pdsi_status->mfd->fbi->dev->kobj,
-							KOBJ_CHANGE, envp);
-			pr_err("%s: Panel has gone bad, sending uevent - %s\n",
-							__func__, envp[0]);
-		}
-	}
+	pdsi_status->mfd->mdp.check_dsi_status(work, interval);
 }
 
 /*
@@ -156,6 +80,19 @@
 	struct fb_event *evdata = data;
 	struct dsi_status_data *pdata = container_of(self,
 				struct dsi_status_data, fb_notifier);
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	pdata->mfd = evdata->info->par;
+	ctrl_pdata = container_of(dev_get_platdata(&pdata->mfd->pdev->dev),
+				struct mdss_dsi_ctrl_pdata, panel_data);
+	if (!ctrl_pdata) {
+		pr_err("%s: DSI ctrl not available\n", __func__);
+		return NOTIFY_BAD;
+	}
+	if (dsi_status_disable) {
+		pr_debug("%s: DSI status disabled\n", __func__);
+		return NOTIFY_DONE;
+	}
 
 	if (event == FB_EVENT_BLANK && evdata) {
 		int *blank = evdata->data;
@@ -166,7 +103,7 @@
 		switch (*blank) {
 		case FB_BLANK_UNBLANK:
 			schedule_delayed_work(&pdata->check_status,
-				msecs_to_jiffies(pdata->check_interval));
+				msecs_to_jiffies(interval));
 			break;
 		case FB_BLANK_POWERDOWN:
 			cancel_delayed_work(&pdata->check_status);
@@ -176,6 +113,41 @@
 	return 0;
 }
 
+static int param_dsi_status_disable(const char *val, struct kernel_param *kp)
+{
+	int ret = 0;
+	int int_val;
+
+	ret = kstrtos32(val, 0, &int_val);
+	if (ret)
+		return ret;
+
+	pr_info("%s: Set DSI status disable to %d\n",
+			__func__, int_val);
+	*((int *)kp->arg) = int_val;
+	return ret;
+}
+
+static int param_set_interval(const char *val, struct kernel_param *kp)
+{
+	int ret = 0;
+	int int_val;
+
+	ret = kstrtos32(val, 0, &int_val);
+	if (ret)
+		return ret;
+	if (int_val < STATUS_CHECK_INTERVAL_MIN_MS) {
+		pr_err("%s: Invalid value %d used, ignoring\n",
+						__func__, int_val);
+		ret = -EINVAL;
+	} else {
+		pr_info("%s: Set check interval to %d msecs\n",
+						__func__, int_val);
+		*((int *)kp->arg) = int_val;
+	}
+	return ret;
+}
+
 int __init mdss_dsi_status_init(void)
 {
 	int rc = 0;
@@ -196,7 +168,6 @@
 		return -EPERM;
 	}
 
-	pstatus_data->check_interval = interval;
 	pr_info("%s: DSI status check interval:%d\n", __func__,	interval);
 
 	INIT_DELAYED_WORK(&pstatus_data->check_status, check_dsi_ctrl_status);
@@ -214,11 +185,17 @@
 	pr_debug("%s: DSI ctrl status work queue removed\n", __func__);
 }
 
-module_param(interval, uint, 0);
+module_param_call(interval, param_set_interval, param_get_uint,
+						&interval, 0644);
 MODULE_PARM_DESC(interval,
 		"Duration in milliseconds to send BTA command for checking"
 		"DSI status periodically");
 
+module_param_call(dsi_status_disable, param_dsi_status_disable, param_get_uint,
+						&dsi_status_disable, 0644);
+MODULE_PARM_DESC(dsi_status_disable,
+		"Disable DSI status check");
+
 module_init(mdss_dsi_status_init);
 module_exit(mdss_dsi_status_exit);
 
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index b769dcb..08ecf1d 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -52,6 +52,7 @@
 #include <mach/msm_memtypes.h>
 
 #include "mdss_fb.h"
+#include "mdss_mdp_splash_logo.h"
 
 #ifdef CONFIG_FB_MSM_TRIPLE_BUFFER
 #define MDSS_FB_NUM 3
@@ -191,47 +192,6 @@
 	return ret;
 }
 
-static int mdss_fb_splash_thread(void *data)
-{
-	struct msm_fb_data_type *mfd = data;
-	int ret = -EINVAL;
-	struct fb_info *fbi = NULL;
-	int ov_index[2];
-
-	if (!mfd || !mfd->fbi || !mfd->mdp.splash_fnc) {
-		pr_err("Invalid input parameter\n");
-		goto end;
-	}
-
-	fbi = mfd->fbi;
-
-	ret = mdss_fb_open(fbi, current->tgid);
-	if (ret) {
-		pr_err("fb_open failed\n");
-		goto end;
-	}
-
-	mfd->bl_updated = true;
-	mdss_fb_set_backlight(mfd, mfd->panel_info->bl_max >> 1);
-
-	ret = mfd->mdp.splash_fnc(mfd, ov_index, MDP_CREATE_SPLASH_OV);
-	if (ret) {
-		pr_err("Splash image failed\n");
-		goto splash_err;
-	}
-
-	do {
-		schedule_timeout_interruptible(SPLASH_THREAD_WAIT_TIMEOUT * HZ);
-	} while (!kthread_should_stop());
-
-	mfd->mdp.splash_fnc(mfd, ov_index, MDP_REMOVE_SPLASH_OV);
-
-splash_err:
-	mdss_fb_release(fbi, current->tgid);
-end:
-	return ret;
-}
-
 static int lcd_backlight_registered;
 
 static void mdss_fb_set_bl_brightness(struct led_classdev *led_cdev,
@@ -310,9 +270,6 @@
 	u32 data[2];
 	struct platform_device *pdev = mfd->pdev;
 
-	mfd->splash_logo_enabled = of_property_read_bool(pdev->dev.of_node,
-				"qcom,mdss-fb-splash-logo-enabled");
-
 	if (of_property_read_u32_array(pdev->dev.of_node, "qcom,mdss-fb-split",
 				       data, 2))
 		return;
@@ -567,15 +524,8 @@
 		break;
 	}
 
-	if (mfd->splash_logo_enabled) {
-		mfd->splash_thread = kthread_run(mdss_fb_splash_thread, mfd,
-				"mdss_fb_splash");
-		if (IS_ERR(mfd->splash_thread)) {
-			pr_err("unable to start splash thread %d\n",
-				mfd->index);
-			mfd->splash_thread = NULL;
-		}
-	}
+	if (mfd->mdp.splash_init_fnc)
+		mfd->mdp.splash_init_fnc(mfd);
 
 	INIT_DELAYED_WORK(&mfd->idle_notify_work, __mdss_fb_idle_notify_work);
 
@@ -1288,6 +1238,7 @@
 	atomic_set(&mfd->mdp_sync_pt_data.commit_cnt, 0);
 	atomic_set(&mfd->commits_pending, 0);
 	atomic_set(&mfd->ioctl_ref_cnt, 0);
+	atomic_set(&mfd->kickoff_pending, 0);
 
 	init_timer(&mfd->no_update.timer);
 	mfd->no_update.timer.function = mdss_fb_no_update_notify_timer_cb;
@@ -1301,6 +1252,7 @@
 	init_waitqueue_head(&mfd->commit_wait_q);
 	init_waitqueue_head(&mfd->idle_wait_q);
 	init_waitqueue_head(&mfd->ioctl_q);
+	init_waitqueue_head(&mfd->kickoff_wait_q);
 
 	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
 	if (ret)
@@ -1381,12 +1333,6 @@
 	pinfo->ref_cnt++;
 	mfd->ref_cnt++;
 
-	/* Stop the splash thread once userspace open the fb node */
-	if (mfd->splash_thread && mfd->ref_cnt > 1) {
-		kthread_stop(mfd->splash_thread);
-		mfd->splash_thread = NULL;
-	}
-
 	return 0;
 
 blank_error:
@@ -1703,6 +1649,25 @@
 	return 0;
 }
 
+static int mdss_fb_wait_for_kickoff(struct msm_fb_data_type *mfd)
+{
+	int ret = 0;
+
+	ret = wait_event_timeout(mfd->kickoff_wait_q,
+			(!atomic_read(&mfd->kickoff_pending) ||
+			 mfd->shutdown_pending),
+			msecs_to_jiffies(WAIT_DISP_OP_TIMEOUT / 2));
+	if (!ret) {
+		pr_err("wait for kickoff timeout %d pending=%d\n",
+				ret, atomic_read(&mfd->kickoff_pending));
+
+	} else if (mfd->shutdown_pending) {
+		pr_debug("Shutdown signalled\n");
+		return -EPERM;
+	}
+
+	return 0;
+}
 
 static int mdss_fb_pan_display_ex(struct fb_info *info,
 		struct mdp_display_commit *disp_commit)
@@ -1741,6 +1706,7 @@
 
 	atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
 	atomic_inc(&mfd->commits_pending);
+	atomic_inc(&mfd->kickoff_pending);
 	wake_up_all(&mfd->commit_wait_q);
 	mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
 	if (wait_for_finish)
@@ -1781,7 +1747,7 @@
 		(var->yoffset / info->fix.ypanstep) * info->fix.ypanstep;
 
 	if (mfd->mdp.dma_fnc)
-		mfd->mdp.dma_fnc(mfd, NULL, 0, NULL);
+		mfd->mdp.dma_fnc(mfd);
 	else
 		pr_warn("dma function not set for panel type=%d\n",
 				mfd->panel.type);
@@ -1833,6 +1799,8 @@
 		if (ret)
 			pr_err("pan display failed %x on fb%d\n", ret,
 					mfd->index);
+		atomic_set(&mfd->kickoff_pending, 0);
+		wake_up_all(&mfd->kickoff_wait_q);
 	}
 	if (!ret)
 		mdss_fb_update_backlight(mfd);
@@ -1869,6 +1837,7 @@
 	}
 
 	atomic_set(&mfd->commits_pending, 0);
+	atomic_set(&mfd->kickoff_pending, 0);
 	wake_up_all(&mfd->idle_wait_q);
 
 	return ret;
@@ -2328,6 +2297,27 @@
 	return ret;
 }
 
+static int __ioctl_wait_idle(struct msm_fb_data_type *mfd, u32 cmd)
+{
+	int ret = 0;
+
+	if (mfd->wait_for_kickoff &&
+		((cmd == MSMFB_OVERLAY_PREPARE) ||
+		(cmd == MSMFB_BUFFER_SYNC) ||
+		(cmd == MSMFB_OVERLAY_SET))) {
+		ret = mdss_fb_wait_for_kickoff(mfd);
+	} else if ((cmd != MSMFB_VSYNC_CTRL) &&
+		(cmd != MSMFB_OVERLAY_VSYNC_CTRL) &&
+		(cmd != MSMFB_ASYNC_BLIT) &&
+		(cmd != MSMFB_BLIT) &&
+		(cmd != MSMFB_NOTIFY_UPDATE)) {
+		ret = mdss_fb_pan_idle(mfd);
+	}
+
+	if (ret)
+		pr_debug("Shutdown pending. Aborting operation %x\n", cmd);
+	return ret;
+}
 
 static int mdss_fb_ioctl(struct fb_info *info, unsigned int cmd,
 			 unsigned long arg)
@@ -2353,17 +2343,9 @@
 
 	mdss_fb_power_setting_idle(mfd);
 
-	if ((cmd != MSMFB_VSYNC_CTRL) && (cmd != MSMFB_OVERLAY_VSYNC_CTRL) &&
-			(cmd != MSMFB_ASYNC_BLIT) && (cmd != MSMFB_BLIT) &&
-			(cmd != MSMFB_NOTIFY_UPDATE) &&
-			(cmd != MSMFB_OVERLAY_PREPARE)) {
-		ret = mdss_fb_pan_idle(mfd);
-		if (ret) {
-			pr_debug("Shutdown pending. Aborting operation %x\n",
-				cmd);
-			goto exit;
-		}
-	}
+	ret = __ioctl_wait_idle(mfd, cmd);
+	if (ret)
+		goto exit;
 
 	switch (cmd) {
 	case MSMFB_CURSOR:
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index 7c47a3e..3416b9e 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -21,6 +21,7 @@
 #include <linux/notifier.h>
 
 #include "mdss_panel.h"
+#include "mdss_mdp_splash_logo.h"
 
 #define MSM_FB_DEFAULT_PAGE_SIZE 2
 #define MFD_KEY  0x11161126
@@ -33,8 +34,6 @@
 #define WAIT_DISP_OP_TIMEOUT ((WAIT_FENCE_FIRST_TIMEOUT + \
 		WAIT_FENCE_FINAL_TIMEOUT) * MDP_MAX_FENCE_FD)
 
-#define SPLASH_THREAD_WAIT_TIMEOUT 3
-
 #ifndef MAX
 #define  MAX(x, y) (((x) > (y)) ? (x) : (y))
 #endif
@@ -69,11 +68,6 @@
 	MDP_NOTIFY_FRAME_TIMEOUT,
 };
 
-enum mdp_splash_event {
-	MDP_CREATE_SPLASH_OV = 0,
-	MDP_REMOVE_SPLASH_OV,
-};
-
 struct disp_info_type_suspend {
 	int op_enable;
 	int panel_power_on;
@@ -122,8 +116,7 @@
 	int (*kickoff_fnc)(struct msm_fb_data_type *mfd,
 					struct mdp_display_commit *data);
 	int (*ioctl_handler)(struct msm_fb_data_type *mfd, u32 cmd, void *arg);
-	void (*dma_fnc)(struct msm_fb_data_type *mfd, struct mdp_overlay *req,
-				int image_len, int *pipe_ndx);
+	void (*dma_fnc)(struct msm_fb_data_type *mfd);
 	int (*cursor_update)(struct msm_fb_data_type *mfd,
 				struct fb_cursor *cursor);
 	int (*lut_update)(struct msm_fb_data_type *mfd, struct fb_cmap *cmap);
@@ -132,9 +125,10 @@
 	int (*update_ad_input)(struct msm_fb_data_type *mfd);
 	int (*panel_register_done)(struct mdss_panel_data *pdata);
 	u32 (*fb_stride)(u32 fb_index, u32 xres, int bpp);
-	int (*splash_fnc) (struct msm_fb_data_type *mfd, int *index, int req);
+	int (*splash_init_fnc)(struct msm_fb_data_type *mfd);
 	struct msm_sync_pt_data *(*get_sync_fnc)(struct msm_fb_data_type *mfd,
 				const struct mdp_buf_sync *buf_sync);
+	void (*check_dsi_status)(struct work_struct *work, uint32_t interval);
 	void *private1;
 };
 
@@ -214,12 +208,13 @@
 	/* for non-blocking */
 	struct task_struct *disp_thread;
 	atomic_t commits_pending;
+	atomic_t kickoff_pending;
 	wait_queue_head_t commit_wait_q;
 	wait_queue_head_t idle_wait_q;
+	wait_queue_head_t kickoff_wait_q;
 	bool shutdown_pending;
 
-	struct task_struct *splash_thread;
-	bool splash_logo_enabled;
+	struct msm_fb_splash_info splash_info;
 
 	wait_queue_head_t ioctl_q;
 	atomic_t ioctl_ref_cnt;
@@ -230,6 +225,7 @@
 
 	u32 dcm_state;
 	struct list_head proc_list;
+	u32 wait_for_kickoff;
 };
 
 static inline void mdss_fb_update_notify_update(struct msm_fb_data_type *mfd)
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index 667e4b7..97d4dc9 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -68,6 +68,7 @@
 	.fb_mem_get_iommu_domain = mdss_fb_mem_get_iommu_domain,
 	.panel_register_done = mdss_panel_register_done,
 	.fb_stride = mdss_mdp_fb_stride,
+	.check_dsi_status = mdss_check_dsi_ctrl_status,
 };
 
 #define DEFAULT_TOTAL_RGB_PIPES 3
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index e528219..1e17205 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -19,6 +19,7 @@
 #include <linux/msm_mdp.h>
 #include <linux/platform_device.h>
 #include <linux/notifier.h>
+#include <linux/kref.h>
 
 #include "mdss.h"
 #include "mdss_mdp_hwio.h"
@@ -32,6 +33,7 @@
 #define MDP_CLK_DEFAULT_RATE	200000000
 #define PHASE_STEP_SHIFT	21
 #define MAX_MIXER_WIDTH		2048
+#define MAX_LINE_BUFFER_WIDTH	2048
 #define MAX_MIXER_HEIGHT	0xFFFF
 #define MAX_IMG_WIDTH		0x3FFF
 #define MAX_IMG_HEIGHT		0x3FFF
@@ -375,7 +377,8 @@
 	struct mdss_mdp_shared_reg_ctrl clk_ctrl;
 	struct mdss_mdp_shared_reg_ctrl clk_status;
 
-	atomic_t ref_cnt;
+	struct kref kref;
+
 	u32 play_cnt;
 	int pid;
 	bool is_handed_off;
@@ -503,6 +506,17 @@
 	return readl_relaxed(mixer->pingpong_base + reg);
 }
 
+static inline int mdss_mdp_iommu_dyn_attach_supported(
+	struct mdss_data_type *mdata)
+{
+	return (mdata->mdp_rev >= MDSS_MDP_HW_REV_103);
+}
+
+static inline int mdss_mdp_line_buffer_width(void)
+{
+	return MAX_LINE_BUFFER_WIDTH;
+}
+
 irqreturn_t mdss_mdp_isr(int irq, void *ptr);
 int mdss_iommu_attach(struct mdss_data_type *mdata);
 int mdss_iommu_dettach(struct mdss_data_type *mdata);
@@ -536,6 +550,12 @@
 			     struct msmfb_data *planes,
 			     int num_planes,
 			     u32 flags);
+int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
+	struct mdp_overlay *req, struct mdss_mdp_pipe **ppipe);
+void mdss_mdp_handoff_cleanup_pipes(struct msm_fb_data_type *mfd,
+							u32 type);
+int mdss_mdp_overlay_release(struct msm_fb_data_type *mfd, int ndx);
+int mdss_mdp_overlay_start(struct msm_fb_data_type *mfd);
 int mdss_mdp_video_addr_setup(struct mdss_data_type *mdata,
 		u32 *offsets,  u32 count);
 int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl);
@@ -692,6 +712,7 @@
 
 int mdss_mdp_get_ctl_mixers(u32 fb_num, u32 *mixer_id);
 u32 mdss_mdp_fb_stride(u32 fb_index, u32 xres, int bpp);
+void mdss_check_dsi_ctrl_status(struct work_struct *work, uint32_t interval);
 
 int mdss_panel_register_done(struct mdss_panel_data *pdata);
 int mdss_mdp_limited_lut_igc_config(struct mdss_mdp_ctl *ctl);
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index 9cf745e..65d28a0 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -2575,7 +2575,8 @@
 		}
 	}
 
-	mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_READY);
+	if (!ctl->shared_lock)
+		mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_READY);
 
 	if (ctl->wait_pingpong)
 		ctl->wait_pingpong(ctl, NULL);
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
index a9595a1..b07aab4 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_video.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -16,7 +16,6 @@
 #include <linux/iopoll.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
-#include <linux/bootmem.h>
 #include <linux/memblock.h>
 
 #include "mdss_fb.h"
@@ -446,8 +445,10 @@
 	if (ctx->polling_en) {
 		rc = mdss_mdp_video_pollwait(ctl);
 	} else {
+		mutex_unlock(&ctl->lock);
 		rc = wait_for_completion_timeout(&ctx->vsync_comp,
 				usecs_to_jiffies(VSYNC_TIMEOUT_US));
+		mutex_lock(&ctl->lock);
 		if (rc == 0) {
 			pr_warn("vsync wait timeout %d, fallback to poll mode\n",
 					ctl->num);
@@ -733,7 +734,6 @@
 {
 	struct mdss_panel_data *pdata = ctl->panel_data;
 	int i, ret = 0;
-	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(ctl->mfd);
 	struct mdss_mdp_video_ctx *ctx;
 	struct mdss_data_type *mdata = ctl->mdata;
 
@@ -768,12 +768,6 @@
 
 error:
 	pdata->panel_info.cont_splash_enabled = 0;
-
-	/* Give back the reserved memory to the system */
-	memblock_free(mdp5_data->splash_mem_addr, mdp5_data->splash_mem_size);
-	free_bootmem_late(mdp5_data->splash_mem_addr,
-				 mdp5_data->splash_mem_size);
-
 	return ret;
 }
 
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 70b266d..057483c 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -35,8 +35,6 @@
 #include "mdss_mdp.h"
 #include "mdss_mdp_rotator.h"
 
-#include "splash.h"
-
 #define VSYNC_PERIOD 16
 #define BORDERFILL_NDX	0x0BF000BF
 #define CHECK_BOUNDS(offset, size, max_size) \
@@ -47,7 +45,6 @@
 
 #define MEM_PROTECT_SD_CTRL 0xF
 
-#define INVALID_PIPE_INDEX 0xFFFF
 #define OVERLAY_MAX 10
 
 struct sd_ctrl_req {
@@ -58,7 +55,6 @@
 static int mdss_mdp_overlay_free_fb_pipe(struct msm_fb_data_type *mfd);
 static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd);
 static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd);
-static int mdss_mdp_overlay_splash_parse_dt(struct msm_fb_data_type *mfd);
 static void __overlay_kickoff_requeue(struct msm_fb_data_type *mfd);
 
 static int mdss_mdp_overlay_sd_ctrl(struct msm_fb_data_type *mfd,
@@ -349,7 +345,7 @@
 		pipe->chroma_sample_v = 0;
 }
 
-static int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
+int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
 				       struct mdp_overlay *req,
 				       struct mdss_mdp_pipe **ppipe)
 {
@@ -823,7 +819,7 @@
 	mutex_unlock(&mfd->lock);
 }
 
-static void __mdss_mdp_handoff_cleanup_pipes(struct msm_fb_data_type *mfd,
+void mdss_mdp_handoff_cleanup_pipes(struct msm_fb_data_type *mfd,
 	u32 type)
 {
 	u32 i, npipes;
@@ -870,7 +866,7 @@
  * from the the splash screen to the android boot animation when the
  * continuous splash screen feature is enabled.
  */
-static int mdss_mdp_overlay_start(struct msm_fb_data_type *mfd)
+int mdss_mdp_overlay_start(struct msm_fb_data_type *mfd)
 {
 	int rc;
 	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
@@ -886,6 +882,7 @@
 			mdss_mdp_batfet_ctrl(mdp5_data->mdata, true);
 		if (!mfd->panel_info->cont_splash_enabled)
 			mdss_iommu_attach(mdp5_data->mdata);
+		mdss_mdp_release_splash_pipe(mfd);
 		return 0;
 	}
 
@@ -922,55 +919,7 @@
 		goto error;
 	}
 
-	if (mfd->panel_info->cont_splash_enabled) {
-		if (mdp5_data->handoff) {
-			/*
-			 * Set up border-fill on the handed off pipes.
-			 * This is needed to ensure that there are no memory
-			 * accesses prior to attaching iommu during continuous
-			 * splash screen case. However, for command mode
-			 * displays, this is not necessary since the panels can
-			 * refresh from their internal memory if no data is sent
-			 * out on the dsi lanes.
-			 */
-			if (ctl && ctl->is_video_mode) {
-				rc = mdss_mdp_display_commit(ctl, NULL);
-				if (!IS_ERR_VALUE(rc)) {
-					mdss_mdp_display_wait4comp(ctl);
-				} else {
-					/*
-					 * Since border-fill setup failed, we
-					 * need to ensure that we turn off the
-					 * MDP timing generator before attaching
-					 * iommu
-					 */
-					pr_err("failed to set BF at handoff\n");
-					mdp5_data->handoff = false;
-					rc = 0;
-				}
-			}
-
-			/* Add all the handed off pipes to the cleanup list */
-			__mdss_mdp_handoff_cleanup_pipes(mfd,
-				MDSS_MDP_PIPE_TYPE_RGB);
-			__mdss_mdp_handoff_cleanup_pipes(mfd,
-				MDSS_MDP_PIPE_TYPE_VIG);
-			__mdss_mdp_handoff_cleanup_pipes(mfd,
-				MDSS_MDP_PIPE_TYPE_DMA);
-		}
-		rc = mdss_mdp_ctl_splash_finish(ctl, mdp5_data->handoff);
-		/*
-		 * Remove the vote for footswitch even if above function
-		 * returned error
-		 */
-		mdss_mdp_footswitch_ctrl_splash(0);
-		if (rc)
-			goto error;
-
-		if (!is_mdss_iommu_attached())
-			mdss_iommu_attach(mdss_res);
-	}
-
+	rc = mdss_mdp_splash_cleanup(mfd, true);
 error:
 	if (rc) {
 		mdss_mdp_ctl_destroy(ctl);
@@ -1099,8 +1048,11 @@
 	int ret = 0;
 	int sd_in_pipe = 0;
 
-	if (ctl->shared_lock)
+	if (ctl->shared_lock) {
+		mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_BEGIN);
+		mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_READY);
 		mutex_lock(ctl->shared_lock);
+	}
 
 	mutex_lock(&mdp5_data->ov_lock);
 	mutex_lock(&mfd->lock);
@@ -1124,7 +1076,9 @@
 			mdp5_data->sd_enabled = 0;
 	}
 
-	mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_BEGIN);
+	if (!ctl->shared_lock)
+		mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_BEGIN);
+
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 
 	if (data)
@@ -1146,14 +1100,18 @@
 	else
 		ret = mdss_mdp_display_commit(mdp5_data->ctl, NULL);
 
+	atomic_set(&mfd->kickoff_pending, 0);
+	wake_up_all(&mfd->kickoff_wait_q);
 	mutex_unlock(&mfd->lock);
 
 	if (IS_ERR_VALUE(ret))
 		goto commit_fail;
 
+	mutex_unlock(&mdp5_data->ov_lock);
 	mdss_mdp_overlay_update_pm(mdp5_data);
 
 	ret = mdss_mdp_display_wait4comp(mdp5_data->ctl);
+	mutex_lock(&mdp5_data->ov_lock);
 
 	if (ret == 0) {
 		mutex_lock(&mfd->lock);
@@ -1178,7 +1136,7 @@
 	return ret;
 }
 
-static int mdss_mdp_overlay_release(struct msm_fb_data_type *mfd, int ndx)
+int mdss_mdp_overlay_release(struct msm_fb_data_type *mfd, int ndx)
 {
 	struct mdss_mdp_pipe *pipe;
 	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
@@ -1402,8 +1360,13 @@
 
 	for (i = 0; i < mdata->ndma_pipes; i++) {
 		pipe = mdata->dma_pipes + i;
-		if (atomic_read(&pipe->ref_cnt) && pipe->mfd)
-			mdss_mdp_overlay_force_cleanup(pipe->mfd);
+
+		if (!mdss_mdp_pipe_map(pipe)) {
+			struct msm_fb_data_type *mfd = pipe->mfd;
+			mdss_mdp_pipe_unmap(pipe);
+			if (mfd)
+				mdss_mdp_overlay_force_cleanup(mfd);
+		}
 	}
 }
 
@@ -1475,8 +1438,7 @@
 
 static int mdss_mdp_overlay_get_fb_pipe(struct msm_fb_data_type *mfd,
 					struct mdss_mdp_pipe **ppipe,
-					int mixer_mux,
-					struct mdp_overlay *req_ov)
+					int mixer_mux)
 {
 	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 	struct mdss_mdp_pipe *pipe;
@@ -1486,7 +1448,10 @@
 					 MDSS_MDP_STAGE_BASE);
 
 	if (pipe == NULL) {
+		struct mdp_overlay req;
+		struct fb_info *fbi = mfd->fbi;
 		struct mdss_mdp_mixer *mixer;
+		int bpp;
 
 		mixer = mdss_mdp_mixer_get(mdp5_data->ctl,
 					MDSS_MDP_MIXER_MUX_LEFT);
@@ -1495,73 +1460,46 @@
 			return -ENODEV;
 		}
 
-		if (req_ov == NULL) {
-			struct mdp_overlay req;
-			struct fb_info *fbi = mfd->fbi;
-			int bpp;
+		memset(&req, 0, sizeof(req));
 
-			memset(&req, 0, sizeof(req));
-
-			bpp = fbi->var.bits_per_pixel / 8;
-			req.id = MSMFB_NEW_REQUEST;
-			req.src.format = mfd->fb_imgType;
-			req.src.height = fbi->var.yres;
-			req.src.width = fbi->fix.line_length / bpp;
-			if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) {
-				if (req.src.width <= mixer->width) {
-					pr_warn("right fb pipe not needed\n");
-					return -EINVAL;
-				}
-
-				req.flags |= MDSS_MDP_RIGHT_MIXER;
-				req.src_rect.x = mixer->width;
-				req.src_rect.w = fbi->var.xres - mixer->width;
-			} else {
-				req.src_rect.x = 0;
-				req.src_rect.w = MIN(fbi->var.xres,
-								mixer->width);
+		bpp = fbi->var.bits_per_pixel / 8;
+		req.id = MSMFB_NEW_REQUEST;
+		req.src.format = mfd->fb_imgType;
+		req.src.height = fbi->var.yres;
+		req.src.width = fbi->fix.line_length / bpp;
+		if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) {
+			if (req.src.width <= mixer->width) {
+				pr_warn("right fb pipe not needed\n");
+				return -EINVAL;
 			}
 
-			req.src_rect.y = 0;
-			req.src_rect.h = req.src.height;
-			req.dst_rect.x = 0;
-			req.dst_rect.y = 0;
-			req.dst_rect.w = req.src_rect.w;
-			req.dst_rect.h = req.src_rect.h;
-			req.z_order = MDSS_MDP_STAGE_BASE;
-
-			pr_debug("allocating base pipe mux=%d\n", mixer_mux);
-
-			ret = mdss_mdp_overlay_pipe_setup(mfd, &req, &pipe);
-			if (ret)
-				return ret;
+			req.src_rect.x = mixer->width;
+			req.src_rect.w = fbi->var.xres - mixer->width;
 		} else {
-			if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) {
-				req_ov->id = MSMFB_NEW_REQUEST;
-				req_ov->flags |= MDSS_MDP_RIGHT_MIXER;
-				req_ov->src_rect.w = MIN(mixer->width,
-						req_ov->src_rect.w >> 1);
-				req_ov->dst_rect.w = req_ov->src_rect.w;
-				req_ov->src_rect.x = req_ov->src_rect.w;
-				req_ov->dst_rect.x = 0;
-			}
-
-			ret = mdss_mdp_overlay_pipe_setup(mfd, req_ov, &pipe);
-			if (ret)
-				return ret;
+			req.src_rect.x = 0;
+			req.src_rect.w = MIN(fbi->var.xres,
+							mixer->width);
 		}
-	}
 
+		req.src_rect.y = 0;
+		req.src_rect.h = req.src.height;
+		req.dst_rect.w = req.src_rect.w;
+		req.dst_rect.h = req.src_rect.h;
+		req.z_order = MDSS_MDP_STAGE_BASE;
+
+		pr_debug("allocating base pipe mux=%d\n", mixer_mux);
+
+		ret = mdss_mdp_overlay_pipe_setup(mfd, &req, &pipe);
+		if (ret)
+			return ret;
+	}
 	pr_debug("ctl=%d pnum=%d\n", mdp5_data->ctl->num, pipe->num);
 
 	*ppipe = pipe;
 	return 0;
 }
 
-static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd,
-					struct mdp_overlay *req,
-					int image_size,
-					int *pipe_ndx)
+static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd)
 {
 	struct mdss_mdp_data *buf;
 	struct mdss_mdp_pipe *pipe;
@@ -1611,7 +1549,7 @@
 	}
 
 	ret = mdss_mdp_overlay_get_fb_pipe(mfd, &pipe,
-					MDSS_MDP_MIXER_MUX_LEFT, req);
+					MDSS_MDP_MIXER_MUX_LEFT);
 	if (ret) {
 		pr_err("unable to allocate base pipe\n");
 		goto pan_display_error;
@@ -1621,14 +1559,13 @@
 		pr_err("unable to map base pipe\n");
 		goto pan_display_error;
 	}
-	if (pipe_ndx)
-		pipe_ndx[0] = pipe->ndx;
 
 	buf = &pipe->back_buf;
 	if (is_mdss_iommu_attached()) {
 		if (!mfd->iova) {
 			pr_err("mfd iova is zero\n");
-			goto attach_err;
+			mdss_mdp_pipe_unmap(pipe);
+			goto pan_display_error;
 		}
 		buf->p[0].addr = mfd->iova;
 	} else {
@@ -1636,26 +1573,21 @@
 	}
 
 	buf->p[0].addr += offset;
-	if (image_size)
-		buf->p[0].len = image_size;
-	else
-		buf->p[0].len = fbi->fix.smem_len - offset;
+	buf->p[0].len = fbi->fix.smem_len - offset;
 	buf->num_planes = 1;
 	mdss_mdp_pipe_unmap(pipe);
 
 	if (fbi->var.xres > MAX_MIXER_WIDTH || mfd->split_display) {
 		ret = mdss_mdp_overlay_get_fb_pipe(mfd, &pipe,
-					   MDSS_MDP_MIXER_MUX_RIGHT, req);
+					   MDSS_MDP_MIXER_MUX_RIGHT);
 		if (ret) {
 			pr_err("unable to allocate right base pipe\n");
-			goto attach_err;
+			goto pan_display_error;
 		}
 		if (mdss_mdp_pipe_map(pipe)) {
 			pr_err("unable to map right base pipe\n");
-			goto attach_err;
+			goto pan_display_error;
 		}
-		if (pipe_ndx)
-			pipe_ndx[1] = pipe->ndx;
 
 		pipe->back_buf = *buf;
 		mdss_mdp_pipe_unmap(pipe);
@@ -1669,13 +1601,6 @@
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 	return;
 
-attach_err:
-	mutex_unlock(&mdp5_data->ov_lock);
-	mdss_mdp_overlay_unset(mfd, pipe->ndx);
-	if (pipe_ndx)
-		pipe_ndx[0] = INVALID_PIPE_INDEX;
-	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
-	return;
 pan_display_error:
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 	mutex_unlock(&mdp5_data->ov_lock);
@@ -2931,9 +2856,9 @@
 
 error:
 	if (rc && ctl) {
-		__mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_RGB);
-		__mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_VIG);
-		__mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_DMA);
+		mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_RGB);
+		mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_VIG);
+		mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_DMA);
 		mdss_mdp_ctl_destroy(ctl);
 		mdp5_data->ctl = NULL;
 		mdp5_data->handoff = false;
@@ -2942,63 +2867,6 @@
 	return rc;
 }
 
-static int mdss_mdp_overlay_splash_image(struct msm_fb_data_type *mfd,
-						int *pipe_ndx, int splash_event)
-{
-	struct mdp_overlay req;
-	int rc = 0;
-	struct fb_info *fbi = NULL;
-	int image_len = 0;
-
-	if (!mfd || !mfd->fbi || !mfd->fbi->screen_base || !pipe_ndx) {
-		pr_err("Invalid input parameter\n");
-		return -EINVAL;
-	}
-
-	fbi = mfd->fbi;
-	image_len = SPLASH_IMAGE_WIDTH * SPLASH_IMAGE_HEIGHT * SPLASH_IMAGE_BPP;
-
-	if (SPLASH_IMAGE_WIDTH > fbi->var.xres ||
-			SPLASH_IMAGE_HEIGHT > fbi->var.yres ||
-			SPLASH_IMAGE_BPP > fbi->var.bits_per_pixel / 8 ||
-			image_len > fbi->fix.smem_len) {
-		pr_err("Invalid splash parameter configuration\n");
-		return -EINVAL;
-	}
-
-	if (splash_event == MDP_CREATE_SPLASH_OV) {
-		pipe_ndx[0] = INVALID_PIPE_INDEX;
-		pipe_ndx[1] = INVALID_PIPE_INDEX;
-
-		memset(&req, 0, sizeof(struct mdp_overlay));
-		req.src.width = req.dst_rect.w = req.src_rect.w =
-				SPLASH_IMAGE_WIDTH;
-		req.src.height = req.dst_rect.h = req.src_rect.h =
-				SPLASH_IMAGE_HEIGHT;
-		req.src.format = SPLASH_IMAGE_FORMAT;
-		req.id = MSMFB_NEW_REQUEST;
-		req.z_order = MDSS_MDP_STAGE_0;
-		req.is_fg = 1;
-		req.alpha = 0xff;
-		req.transp_mask = MDP_TRANSP_NOP;
-		req.dst_rect.x =
-			(fbi->var.xres >> 1) - (SPLASH_IMAGE_WIDTH >> 1);
-		req.dst_rect.y =
-			(fbi->var.yres >> 1) - (SPLASH_IMAGE_HEIGHT >> 1);
-
-		memcpy(fbi->screen_base, splash_bgr888_image, image_len);
-		mdss_mdp_overlay_pan_display(mfd, &req, image_len, pipe_ndx);
-
-	} else if (splash_event == MDP_REMOVE_SPLASH_OV) {
-		if (pipe_ndx[0] != INVALID_PIPE_INDEX)
-			mdss_mdp_overlay_unset(mfd, pipe_ndx[0]);
-		if (pipe_ndx[1] != INVALID_PIPE_INDEX)
-			mdss_mdp_overlay_unset(mfd, pipe_ndx[1]);
-	}
-
-	return rc;
-}
-
 static void __vsync_retire_handle_vsync(struct mdss_mdp_ctl *ctl, ktime_t t)
 {
 	struct msm_fb_data_type *mfd = ctl->mfd;
@@ -3117,7 +2985,7 @@
 	mdp5_interface->panel_register_done = mdss_panel_register_done;
 	mdp5_interface->kickoff_fnc = mdss_mdp_overlay_kickoff;
 	mdp5_interface->get_sync_fnc = mdss_mdp_rotator_sync_pt_get;
-	mdp5_interface->splash_fnc = mdss_mdp_overlay_splash_image;
+	mdp5_interface->splash_init_fnc = mdss_mdp_splash_init;
 
 	mdp5_data = kmalloc(sizeof(struct mdss_overlay_private), GFP_KERNEL);
 	if (!mdp5_data) {
@@ -3140,6 +3008,7 @@
 		goto init_fail;
 	}
 	mfd->mdp.private1 = mdp5_data;
+	mfd->wait_for_kickoff = true;
 
 	rc = mdss_mdp_overlay_fb_parse_dt(mfd);
 	if (rc)
@@ -3210,45 +3079,6 @@
 	return rc;
 }
 
-static __ref int mdss_mdp_overlay_splash_parse_dt(struct msm_fb_data_type *mfd)
-{
-	struct platform_device *pdev = mfd->pdev;
-	struct mdss_overlay_private *mdp5_mdata = mfd_to_mdp5_data(mfd);
-	int len = 0, rc = 0;
-	u32 offsets[2];
-
-	of_find_property(pdev->dev.of_node, "qcom,memblock-reserve", &len);
-
-	if (len < 1) {
-		pr_debug("mem reservation for splash screen fb not present\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	len = len/sizeof(u32);
-
-	rc = of_property_read_u32_array(pdev->dev.of_node,
-			"qcom,memblock-reserve", offsets, len);
-	if (rc) {
-		pr_debug("Error reading mem reserve settings for fb\n");
-		goto error;
-	}
-
-	if (!memblock_is_reserved(offsets[0])) {
-		pr_debug("failed to reserve memory for fb splash\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	mdp5_mdata->splash_mem_addr = offsets[0];
-	mdp5_mdata->splash_mem_size = offsets[1];
-	pr_debug("memaddr=%x size=%x\n", mdp5_mdata->splash_mem_addr,
-		mdp5_mdata->splash_mem_size);
-
-error:
-	return rc;
-}
-
 static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd)
 {
 	int rc = 0;
@@ -3262,13 +3092,5 @@
 			pdev->name);
 	}
 
-	rc = mdss_mdp_overlay_splash_parse_dt(mfd);
-	if (rc && mfd->panel_info->cont_splash_enabled) {
-		pr_err("No rsvd mem found in DT for splash screen\n");
-	} else {
-		pr_debug("Mem reservation not reqd if cont splash diasbled\n");
-		rc = 0;
-	}
-
 	return rc;
 }
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index 50bee17..c522857 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -37,7 +37,7 @@
 static DEFINE_MUTEX(mdss_mdp_sspp_lock);
 static DEFINE_MUTEX(mdss_mdp_smp_lock);
 
-static int mdss_mdp_pipe_free(struct mdss_mdp_pipe *pipe);
+static void mdss_mdp_pipe_free(struct kref *kref);
 static int mdss_mdp_smp_mmb_set(int client_id, unsigned long *smp);
 static void mdss_mdp_smp_mmb_free(unsigned long *smp, bool write);
 static struct mdss_mdp_pipe *mdss_mdp_pipe_search_by_client_id(
@@ -481,21 +481,17 @@
 
 void mdss_mdp_pipe_unmap(struct mdss_mdp_pipe *pipe)
 {
-	int tmp;
-
-	tmp = atomic_dec_return(&pipe->ref_cnt);
-
-	WARN(tmp < 0, "Invalid unmap with ref_cnt=%d", tmp);
-	if (tmp == 0)
-		mdss_mdp_pipe_free(pipe);
+	if (kref_put_mutex(&pipe->kref, mdss_mdp_pipe_free,
+			&mdss_mdp_sspp_lock)) {
+		WARN(1, "Unexpected free pipe during unmap");
+		mutex_unlock(&mdss_mdp_sspp_lock);
+	}
 }
 
 int mdss_mdp_pipe_map(struct mdss_mdp_pipe *pipe)
 {
-	if (!atomic_inc_not_zero(&pipe->ref_cnt)) {
-		pr_err("attempting to map unallocated pipe (%d)", pipe->num);
+	if (!kref_get_unless_zero(&pipe->kref))
 		return -EINVAL;
-	}
 	return 0;
 }
 
@@ -541,7 +537,7 @@
 
 	for (i = off; i < npipes; i++) {
 		pipe = pipe_pool + i;
-		if (atomic_cmpxchg(&pipe->ref_cnt, 0, 1) == 0) {
+		if (atomic_read(&pipe->kref.refcount) == 0) {
 			pipe->mixer = mixer;
 			break;
 		}
@@ -551,7 +547,6 @@
 	if (pipe && mdss_mdp_pipe_fetch_halt(pipe)) {
 		pr_err("%d failed because pipe is in bad state\n",
 			pipe->num);
-		atomic_dec(&pipe->ref_cnt);
 		return NULL;
 	}
 
@@ -559,13 +554,14 @@
 		pr_debug("type=%x   pnum=%d\n", pipe->type, pipe->num);
 		mutex_init(&pipe->pp_res.hist.hist_mutex);
 		spin_lock_init(&pipe->pp_res.hist.hist_lock);
+		kref_init(&pipe->kref);
 	} else if (pipe_share) {
 		/*
 		 * when there is no dedicated wfd blk, DMA pipe can be
 		 * shared as long as its attached to a writeback mixer
 		 */
 		pipe = mdata->dma_pipes + mixer->num;
-		atomic_inc(&pipe->ref_cnt);
+		kref_get(&pipe->kref);
 		pr_debug("pipe sharing for pipe=%d\n", pipe->num);
 	} else {
 		pr_err("no %d type pipes available\n", type);
@@ -587,7 +583,7 @@
 	} else if (pipe != &mdata->dma_pipes[mixer->num]) {
 		pr_err("Requested DMA pnum=%d not available\n",
 			mdata->dma_pipes[mixer->num].num);
-		mdss_mdp_pipe_unmap(pipe);
+		kref_put(&pipe->kref, mdss_mdp_pipe_free);
 		pipe = NULL;
 	} else {
 		pipe->mixer = mixer;
@@ -674,10 +670,13 @@
 	return NULL;
 }
 
-static int mdss_mdp_pipe_free(struct mdss_mdp_pipe *pipe)
+static void mdss_mdp_pipe_free(struct kref *kref)
 {
-	pr_debug("ndx=%x pnum=%d ref_cnt=%d\n", pipe->ndx, pipe->num,
-			atomic_read(&pipe->ref_cnt));
+	struct mdss_mdp_pipe *pipe;
+
+	pipe = container_of(kref, struct mdss_mdp_pipe, kref);
+
+	pr_debug("ndx=%x pnum=%d\n", pipe->ndx, pipe->num);
 
 	if (pipe->play_cnt) {
 		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
@@ -693,8 +692,6 @@
 	pipe->bwc_mode = 0;
 	pipe->mfd = NULL;
 	memset(&pipe->scale, 0, sizeof(struct mdp_scale_data));
-
-	return 0;
 }
 
 static int mdss_mdp_is_pipe_idle(struct mdss_mdp_pipe *pipe,
@@ -802,19 +799,16 @@
 
 int mdss_mdp_pipe_destroy(struct mdss_mdp_pipe *pipe)
 {
-	int tmp;
-
-	tmp = atomic_dec_return(&pipe->ref_cnt);
-
-	if (tmp != 0) {
-		pr_err("unable to free pipe %d while still in use (%d)\n",
-				pipe->num, tmp);
+	if (!kref_put_mutex(&pipe->kref, mdss_mdp_pipe_free,
+			&mdss_mdp_sspp_lock)) {
+		pr_err("unable to free pipe %d while still in use\n",
+				pipe->num);
 		return -EBUSY;
 	}
-	mdss_mdp_pipe_free(pipe);
+
+	mutex_unlock(&mdss_mdp_sspp_lock);
 
 	return 0;
-
 }
 
 /**
@@ -873,7 +867,7 @@
 
 	pipe->is_handed_off = true;
 	pipe->play_cnt = 1;
-	atomic_inc(&pipe->ref_cnt);
+	kref_init(&pipe->kref);
 
 error:
 	return rc;
@@ -1136,6 +1130,13 @@
 		pipe->bg_color);
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN, unpack);
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS, secure);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_OP_MODE, 0);
+
+	if (pipe->type != MDSS_MDP_PIPE_TYPE_DMA) {
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SCALE_CONFIG, 0);
+		if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG)
+			mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_VIG_OP_MODE, 0);
+	}
 
 	return 0;
 }
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
index 8d6d41d..ee892e2 100755
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.c
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -642,7 +642,7 @@
 
 	if (rot_pipe) {
 		struct mdss_mdp_mixer *mixer = rot_pipe->mixer;
-		mdss_mdp_pipe_unmap(rot_pipe);
+		mdss_mdp_pipe_destroy(rot_pipe);
 		tmp = mdss_mdp_ctl_mixer_switch(mixer->ctl,
 				MDSS_MDP_WB_CTL_TYPE_BLOCK);
 		if (!tmp)
diff --git a/drivers/video/msm/mdss/mdss_mdp_splash_logo.c b/drivers/video/msm/mdss/mdss_mdp_splash_logo.c
new file mode 100644
index 0000000..77f6554
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_mdp_splash_logo.c
@@ -0,0 +1,623 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/memblock.h>
+#include <linux/bootmem.h>
+#include <linux/iommu.h>
+#include <linux/fb.h>
+
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "splash.h"
+#include "mdss_mdp_splash_logo.h"
+
+#define INVALID_PIPE_INDEX 0xFFFF
+#define MAX_FRAME_DONE_COUNT_WAIT 2
+
+static int mdss_mdp_splash_alloc_memory(struct msm_fb_data_type *mfd,
+							uint32_t size)
+{
+	int rc;
+	struct msm_fb_splash_info *sinfo;
+	unsigned long buf_size = size;
+	struct mdss_data_type *mdata;
+
+	if (!mfd || !size)
+		return -EINVAL;
+
+	mdata = mfd_to_mdata(mfd);
+	sinfo = &mfd->splash_info;
+
+	if (!mdata || !mdata->iclient || sinfo->splash_buffer)
+		return -EINVAL;
+
+	sinfo->ion_handle = ion_alloc(mdata->iclient, size, SZ_4K,
+				ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
+	if (IS_ERR_OR_NULL(sinfo->ion_handle)) {
+		pr_err("ion memory allocation failed\n");
+		rc = PTR_RET(sinfo->ion_handle);
+		goto end;
+	}
+
+	rc = ion_map_iommu(mdata->iclient, sinfo->ion_handle,
+			mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE),
+			0, SZ_4K, 0, (unsigned long *)&sinfo->iova,
+				(unsigned long *)&buf_size, 0, 0);
+	if (rc) {
+		pr_err("ion memory map failed\n");
+		goto imap_err;
+	}
+
+	sinfo->splash_buffer = ion_map_kernel(mdata->iclient,
+						sinfo->ion_handle);
+	if (IS_ERR_OR_NULL(sinfo->splash_buffer)) {
+		pr_err("ion kernel memory mapping failed\n");
+		rc = IS_ERR(sinfo->splash_buffer);
+		goto kmap_err;
+	}
+
+	return rc;
+
+kmap_err:
+	ion_unmap_iommu(mdata->iclient, sinfo->ion_handle,
+			mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE), 0);
+imap_err:
+	ion_free(mdata->iclient, sinfo->ion_handle);
+end:
+	return rc;
+}
+
+static void mdss_mdp_splash_free_memory(struct msm_fb_data_type *mfd)
+{
+	struct msm_fb_splash_info *sinfo;
+	struct mdss_data_type *mdata;
+
+	if (!mfd)
+		return;
+
+	sinfo = &mfd->splash_info;
+	mdata = mfd_to_mdata(mfd);
+
+	if (!mdata || !mdata->iclient || !sinfo->ion_handle)
+		return;
+
+	ion_unmap_kernel(mdata->iclient, sinfo->ion_handle);
+
+	ion_unmap_iommu(mdata->iclient, sinfo->ion_handle,
+			mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE), 0);
+
+	ion_free(mdata->iclient, sinfo->ion_handle);
+	sinfo->splash_buffer = NULL;
+}
+
+static int mdss_mdp_splash_iommu_attach(struct msm_fb_data_type *mfd)
+{
+	struct iommu_domain *domain;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	int rc;
+
+	/*
+	 * iommu dynamic attach for following conditions.
+	 * 1. it is still not attached
+	 * 2. MDP hardware version supports the feature
+	 * 3. configuration is with valid splash buffer
+	 */
+	if (is_mdss_iommu_attached() ||
+		!mfd->panel_info->cont_splash_enabled ||
+		!mdss_mdp_iommu_dyn_attach_supported(mdp5_data->mdata) ||
+		!mdp5_data->splash_mem_addr ||
+		!mdp5_data->splash_mem_size) {
+		pr_debug("dynamic attach is not supported\n");
+		return -EPERM;
+	}
+
+	domain = msm_get_iommu_domain(mdss_get_iommu_domain(
+						MDSS_IOMMU_DOMAIN_UNSECURE));
+	if (!domain) {
+		pr_debug("mdss iommu domain get failed\n");
+		return -EINVAL;
+	}
+
+	rc = iommu_map(domain, mdp5_data->splash_mem_addr,
+				mdp5_data->splash_mem_addr,
+				mdp5_data->splash_mem_size, IOMMU_READ);
+	if (rc) {
+		pr_debug("iommu memory mapping failed rc=%d\n", rc);
+	} else {
+		rc = mdss_iommu_attach(mdss_res);
+		if (rc) {
+			pr_debug("mdss iommu attach failed\n");
+			iommu_unmap(domain, mdp5_data->splash_mem_addr,
+						mdp5_data->splash_mem_size);
+		} else {
+			mfd->splash_info.iommu_dynamic_attached = true;
+		}
+	}
+
+	return rc;
+}
+
+static void mdss_mdp_splash_unmap_splash_mem(struct msm_fb_data_type *mfd)
+{
+	struct iommu_domain *domain;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+	if (mfd->splash_info.iommu_dynamic_attached) {
+		domain = msm_get_iommu_domain(mdss_get_iommu_domain(
+						MDSS_IOMMU_DOMAIN_UNSECURE));
+		if (!domain) {
+			pr_err("mdss iommu domain get failed\n");
+			return;
+		}
+
+		iommu_unmap(domain, mdp5_data->splash_mem_addr,
+						mdp5_data->splash_mem_size);
+		mfd->splash_info.iommu_dynamic_attached = false;
+	}
+}
+
+void mdss_mdp_release_splash_pipe(struct msm_fb_data_type *mfd)
+{
+	struct msm_fb_splash_info *sinfo;
+
+	if (!mfd || !mfd->splash_info.splash_pipe_allocated)
+		return;
+
+	sinfo = &mfd->splash_info;
+
+	if (sinfo->pipe_ndx[0] != INVALID_PIPE_INDEX)
+		mdss_mdp_overlay_release(mfd, sinfo->pipe_ndx[0]);
+	if (sinfo->pipe_ndx[1] != INVALID_PIPE_INDEX)
+		mdss_mdp_overlay_release(mfd, sinfo->pipe_ndx[1]);
+	sinfo->splash_pipe_allocated = false;
+}
+
+int mdss_mdp_splash_cleanup(struct msm_fb_data_type *mfd,
+					bool use_borderfill)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+	int rc = 0;
+
+	if (!mfd || !mdp5_data)
+		return -EINVAL;
+
+	if (mfd->splash_info.iommu_dynamic_attached ||
+			!mfd->panel_info->cont_splash_enabled)
+		goto end;
+
+	if (use_borderfill && mdp5_data->handoff) {
+		/*
+		 * Set up border-fill on the handed off pipes.
+		 * This is needed to ensure that there are no memory
+		 * accesses prior to attaching iommu during continuous
+		 * splash screen case. However, for command mode
+		 * displays, this is not necessary since the panels can
+		 * refresh from their internal memory if no data is sent
+		 * out on the dsi lanes.
+		 */
+		if (mdp5_data->handoff && ctl && ctl->is_video_mode) {
+			rc = mdss_mdp_display_commit(ctl, NULL);
+			if (!IS_ERR_VALUE(rc)) {
+				mdss_mdp_display_wait4comp(ctl);
+			} else {
+				/*
+				 * Since border-fill setup failed, we
+				 * need to ensure that we turn off the
+				 * MDP timing generator before attaching
+				 * iommu
+				 */
+				pr_err("failed to set BF at handoff\n");
+				mdp5_data->handoff = false;
+			}
+		}
+	}
+
+	if (rc || mdp5_data->handoff) {
+		/* Add all the handed off pipes to the cleanup list */
+		mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_RGB);
+		mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_VIG);
+		mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_DMA);
+	}
+
+	mdss_mdp_ctl_splash_finish(ctl, mdp5_data->handoff);
+
+	if (mdp5_data->splash_mem_addr) {
+		/* Give back the reserved memory to the system */
+		memblock_free(mdp5_data->splash_mem_addr,
+					mdp5_data->splash_mem_size);
+		free_bootmem_late(mdp5_data->splash_mem_addr,
+				 mdp5_data->splash_mem_size);
+	}
+
+	mdss_mdp_footswitch_ctrl_splash(0);
+	if (!is_mdss_iommu_attached()) {
+		rc = mdss_iommu_attach(mdss_res);
+		if (rc)
+			pr_err("iommu attach failed rc=%d\n", rc);
+	}
+
+end:
+	return rc;
+}
+
+static struct mdss_mdp_pipe *mdss_mdp_splash_get_pipe(
+					struct msm_fb_data_type *mfd,
+					struct mdp_overlay *req)
+{
+	struct mdss_mdp_pipe *pipe;
+	int ret;
+	struct mdss_mdp_data *buf;
+	uint32_t image_size = SPLASH_IMAGE_WIDTH * SPLASH_IMAGE_HEIGHT
+						* SPLASH_IMAGE_BPP;
+
+	ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe);
+	if (ret)
+		return NULL;
+
+	if (mdss_mdp_pipe_map(pipe)) {
+		pr_err("unable to map base pipe\n");
+		return NULL;
+	}
+
+	buf = &pipe->back_buf;
+	buf->p[0].addr = mfd->splash_info.iova;
+	buf->p[0].len = image_size;
+	buf->num_planes = 1;
+	mdss_mdp_pipe_unmap(pipe);
+
+	return pipe;
+}
+
+static int mdss_mdp_splash_kickoff(struct msm_fb_data_type *mfd,
+				struct mdss_mdp_img_rect *src_rect,
+				struct mdss_mdp_img_rect *dest_rect)
+{
+	struct mdss_mdp_pipe *pipe;
+	struct fb_info *fbi;
+	struct mdp_overlay req;
+	struct mdss_overlay_private *mdp5_data;
+	struct mdss_data_type *mdata;
+	struct mdss_mdp_mixer *mixer;
+	int ret;
+	bool use_single_pipe = false;
+	struct msm_fb_splash_info *sinfo;
+
+	if (!mfd)
+		return -EINVAL;
+
+	fbi = mfd->fbi;
+	mdp5_data = mfd_to_mdp5_data(mfd);
+	mdata = mfd_to_mdata(mfd);
+	sinfo = &mfd->splash_info;
+
+	if (!mdp5_data || !mdp5_data->ctl)
+		return -EINVAL;
+
+	if (mutex_lock_interruptible(&mdp5_data->ov_lock))
+		return -EINVAL;
+
+	ret = mdss_mdp_overlay_start(mfd);
+	if (ret) {
+		pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
+		goto end;
+	}
+
+	mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_LEFT);
+	if (!mixer) {
+		pr_err("unable to retrieve mixer\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	memset(&req, 0, sizeof(struct mdp_overlay));
+	/*
+	 * use single pipe for
+	 * 1. split display disabled
+	 * 2. splash image is only on one side of panel
+	 */
+	use_single_pipe =
+		!mfd->split_display ||
+		(mfd->split_display &&
+		((dest_rect->x + dest_rect->w) < mfd->split_fb_left ||
+		dest_rect->x > mfd->split_fb_left));
+
+	req.src.width = src_rect->w;
+	if (use_single_pipe)
+		req.src_rect.w = src_rect->w;
+	else
+		req.src_rect.w = min_t(u16, mixer->width, src_rect->w >> 1);
+	req.dst_rect.w = req.src_rect.w;
+	req.src.height = req.dst_rect.h = req.src_rect.h =
+			src_rect->h;
+	req.src.format = SPLASH_IMAGE_FORMAT;
+	req.id = MSMFB_NEW_REQUEST;
+	req.z_order = MDSS_MDP_STAGE_0;
+	req.alpha = 0xff;
+	req.transp_mask = MDP_TRANSP_NOP;
+	req.dst_rect.x = dest_rect->x;
+	req.dst_rect.y = dest_rect->y;
+
+	pipe = mdss_mdp_splash_get_pipe(mfd, &req);
+	if (!pipe) {
+		pr_err("unable to allocate base pipe\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	sinfo->pipe_ndx[0] = pipe->ndx;
+
+	if (!use_single_pipe) {
+		req.id = MSMFB_NEW_REQUEST;
+		req.src_rect.x = src_rect->x + min_t(u16, mixer->width,
+					src_rect->w - req.src_rect.w);
+		req.dst_rect.x = mixer->width;
+		pipe = mdss_mdp_splash_get_pipe(mfd, &req);
+		if (!pipe) {
+			pr_err("unable to allocate right base pipe\n");
+			mdss_mdp_overlay_release(mfd, sinfo->pipe_ndx[0]);
+			ret = -EINVAL;
+			goto end;
+		}
+		sinfo->pipe_ndx[1] = pipe->ndx;
+	}
+	mutex_unlock(&mdp5_data->ov_lock);
+
+	ret = mfd->mdp.kickoff_fnc(mfd, NULL);
+	if (ret) {
+		pr_err("error in displaying image\n");
+		mdss_mdp_overlay_release(mfd, sinfo->pipe_ndx[0] |
+					sinfo->pipe_ndx[1]);
+	}
+
+	return ret;
+end:
+	sinfo->pipe_ndx[0] = INVALID_PIPE_INDEX;
+	sinfo->pipe_ndx[1] = INVALID_PIPE_INDEX;
+	mutex_unlock(&mdp5_data->ov_lock);
+	return ret;
+}
+
+static int mdss_mdp_display_splash_image(struct msm_fb_data_type *mfd)
+{
+	int rc = 0;
+	struct fb_info *fbi;
+	uint32_t image_len = SPLASH_IMAGE_WIDTH * SPLASH_IMAGE_HEIGHT
+						* SPLASH_IMAGE_BPP;
+	struct mdss_mdp_img_rect src_rect, dest_rect;
+	struct msm_fb_splash_info *sinfo;
+
+	if (!mfd || !mfd->fbi) {
+		pr_err("invalid input parameter\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	fbi = mfd->fbi;
+	sinfo = &mfd->splash_info;
+
+	if (SPLASH_IMAGE_WIDTH > fbi->var.xres ||
+		  SPLASH_IMAGE_HEIGHT > fbi->var.yres ||
+		  SPLASH_IMAGE_BPP > (fbi->var.bits_per_pixel >> 3)) {
+		pr_err("invalid splash parameter configuration\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	sinfo->pipe_ndx[0] = INVALID_PIPE_INDEX;
+	sinfo->pipe_ndx[1] = INVALID_PIPE_INDEX;
+
+	src_rect.x = 0;
+	src_rect.y = 0;
+	dest_rect.w = src_rect.w = SPLASH_IMAGE_WIDTH;
+	dest_rect.h = src_rect.h = SPLASH_IMAGE_HEIGHT;
+	dest_rect.x = (fbi->var.xres >> 1) - (SPLASH_IMAGE_WIDTH >> 1);
+	dest_rect.y = (fbi->var.yres >> 1) - (SPLASH_IMAGE_HEIGHT >> 1);
+
+	rc = mdss_mdp_splash_alloc_memory(mfd, image_len);
+	if (rc) {
+		pr_err("splash buffer allocation failed\n");
+		goto end;
+	}
+
+	memcpy(sinfo->splash_buffer, splash_bgr888_image, image_len);
+
+	rc = mdss_mdp_splash_iommu_attach(mfd);
+	if (rc)
+		pr_debug("iommu dynamic attach failed\n");
+
+	rc = mdss_mdp_splash_kickoff(mfd, &src_rect, &dest_rect);
+	if (rc)
+		pr_err("splash image display failed\n");
+	else
+		sinfo->splash_pipe_allocated = true;
+end:
+	return rc;
+}
+
+static int mdss_mdp_splash_ctl_cb(struct notifier_block *self,
+					unsigned long event, void *data)
+{
+	struct msm_fb_splash_info *sinfo = container_of(self,
+					struct msm_fb_splash_info, notifier);
+	struct msm_fb_data_type *mfd;
+
+	if (!sinfo)
+		goto done;
+
+	mfd = container_of(sinfo, struct msm_fb_data_type, splash_info);
+
+	if (!mfd)
+		goto done;
+
+	if (event != MDP_NOTIFY_FRAME_DONE)
+		goto done;
+
+	if (!sinfo->frame_done_count) {
+		mdss_mdp_splash_unmap_splash_mem(mfd);
+		mdss_mdp_splash_cleanup(mfd, false);
+	/* wait for 2 frame done events before releasing memory */
+	} else if (sinfo->frame_done_count > MAX_FRAME_DONE_COUNT_WAIT &&
+			sinfo->splash_thread) {
+		complete(&sinfo->frame_done);
+		sinfo->splash_thread = NULL;
+	}
+
+	/* increase frame done count after pipes are staged from other client */
+	if (!sinfo->splash_pipe_allocated)
+		sinfo->frame_done_count++;
+done:
+	return NOTIFY_OK;
+}
+
+static int mdss_mdp_splash_thread(void *data)
+{
+	struct msm_fb_data_type *mfd = data;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	int ret = -EINVAL;
+
+	if (!mfd) {
+		pr_err("invalid input parameter\n");
+		goto end;
+	}
+
+	lock_fb_info(mfd->fbi);
+	ret = fb_blank(mfd->fbi, FB_BLANK_UNBLANK);
+	if (ret) {
+		pr_err("can't turn on fb!\n");
+		goto end;
+	}
+	unlock_fb_info(mfd->fbi);
+
+	mfd->bl_updated = true;
+	mutex_lock(&mfd->bl_lock);
+	mdss_fb_set_backlight(mfd, mfd->panel_info->bl_max >> 1);
+	mutex_unlock(&mfd->bl_lock);
+
+	init_completion(&mfd->splash_info.frame_done);
+
+	mfd->splash_info.notifier.notifier_call = mdss_mdp_splash_ctl_cb;
+	mdss_mdp_ctl_notifier_register(mdp5_data->ctl,
+				&mfd->splash_info.notifier);
+
+	ret = mdss_mdp_display_splash_image(mfd);
+	if (ret) {
+		/*
+		 * keep thread alive to release dynamically allocated
+		 * resources
+		 */
+		pr_err("splash image display failed\n");
+	}
+
+	/* wait for second display complete to release splash resources */
+	ret = wait_for_completion_killable(&mfd->splash_info.frame_done);
+
+	mdss_mdp_splash_free_memory(mfd);
+
+	mdss_mdp_ctl_notifier_unregister(mdp5_data->ctl,
+				&mfd->splash_info.notifier);
+end:
+	return ret;
+}
+
+static __ref int mdss_mdp_splash_parse_dt(struct msm_fb_data_type *mfd)
+{
+	struct platform_device *pdev = mfd->pdev;
+	struct mdss_overlay_private *mdp5_mdata = mfd_to_mdp5_data(mfd);
+	int len = 0, rc = 0;
+	u32 offsets[2];
+
+	mfd->splash_info.splash_logo_enabled =
+				of_property_read_bool(pdev->dev.of_node,
+				"qcom,mdss-fb-splash-logo-enabled");
+
+	of_find_property(pdev->dev.of_node, "qcom,memblock-reserve", &len);
+	if (len < 1) {
+		pr_debug("mem reservation for splash screen fb not present\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	len = len / sizeof(u32);
+
+	rc = of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,memblock-reserve", offsets, len);
+	if (rc) {
+		pr_debug("error reading mem reserve settings for fb\n");
+		goto error;
+	}
+
+	if (!memblock_is_reserved(offsets[0])) {
+		pr_debug("failed to reserve memory for fb splash\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	mdp5_mdata->splash_mem_addr = offsets[0];
+	mdp5_mdata->splash_mem_size = offsets[1];
+	pr_debug("memaddr=%x size=%x\n", mdp5_mdata->splash_mem_addr,
+		mdp5_mdata->splash_mem_size);
+
+error:
+	if (!rc && !mfd->panel_info->cont_splash_enabled &&
+		mdp5_mdata->splash_mem_addr) {
+		pr_debug("mem reservation not reqd if cont splash disabled\n");
+		memblock_free(mdp5_mdata->splash_mem_addr,
+					mdp5_mdata->splash_mem_size);
+		free_bootmem_late(mdp5_mdata->splash_mem_addr,
+				 mdp5_mdata->splash_mem_size);
+	} else if (rc && mfd->panel_info->cont_splash_enabled) {
+		pr_err("no rsvd mem found in DT for splash screen\n");
+	} else {
+		rc = 0;
+	}
+
+	return rc;
+}
+
+int mdss_mdp_splash_init(struct msm_fb_data_type *mfd)
+{
+	int rc;
+
+	if (!mfd) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = mdss_mdp_splash_parse_dt(mfd);
+	if (rc) {
+		pr_err("splash memory reserve failed\n");
+		goto end;
+	}
+
+	if (!mfd->splash_info.splash_logo_enabled) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	mfd->splash_info.splash_thread = kthread_run(mdss_mdp_splash_thread,
+							mfd, "mdss_fb_splash");
+
+	if (IS_ERR(mfd->splash_info.splash_thread)) {
+		pr_err("unable to start splash thread %d\n", mfd->index);
+		mfd->splash_info.splash_thread = NULL;
+	}
+
+end:
+	return rc;
+}
diff --git a/drivers/video/msm/mdss/mdss_mdp_splash_logo.h b/drivers/video/msm/mdss/mdss_mdp_splash_logo.h
new file mode 100644
index 0000000..0a4e83b
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_mdp_splash_logo.h
@@ -0,0 +1,43 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef MDSS_MDP_SPLASH_LOGO
+#define MDSS_MDP_SPLASH_LOGO
+
+#include <linux/types.h>
+#include <linux/notifier.h>
+#include <linux/kthread.h>
+#include <linux/completion.h>
+
+struct msm_fb_splash_info {
+	struct task_struct	*splash_thread;
+	bool			splash_logo_enabled;
+	bool			iommu_dynamic_attached;
+	struct notifier_block	notifier;
+	uint32_t		frame_done_count;
+	struct completion	frame_done;
+
+	struct ion_handle	*ion_handle;
+	dma_addr_t		iova;
+	void			*splash_buffer;
+	int			pipe_ndx[2];
+	bool			splash_pipe_allocated;
+};
+
+struct msm_fb_data_type;
+
+void mdss_mdp_release_splash_pipe(struct msm_fb_data_type *mfd);
+int mdss_mdp_splash_cleanup(struct msm_fb_data_type *mfd,
+				 bool use_borderfill);
+int mdss_mdp_splash_init(struct msm_fb_data_type *mfd);
+
+#endif
diff --git a/drivers/video/msm/vidc/common/dec/vdec.c b/drivers/video/msm/vidc/common/dec/vdec.c
index 276a48f..0958e07 100755
--- a/drivers/video/msm/vidc/common/dec/vdec.c
+++ b/drivers/video/msm/vidc/common/dec/vdec.c
@@ -899,6 +899,12 @@
 	vcd_meta_buffer->offset = meta_buffers->offset;
 	vcd_meta_buffer->pmem_fd_iommu = meta_buffers->pmem_fd_iommu;
 
+	if (meta_buffers->count > MAX_META_BUFFERS) {
+		ERR("meta buffers maximum count reached, count = %d",
+			meta_buffers->count);
+		return false;
+	}
+
 	if (!vcd_get_ion_status()) {
 		pr_err("PMEM Not available\n");
 		return false;
@@ -1104,6 +1110,12 @@
 	vcd_h264_mv_buffer->pmem_fd = mv_data->pmem_fd;
 	vcd_h264_mv_buffer->offset = mv_data->offset;
 
+	if (mv_data->count > MAX_MV_BUFFERS) {
+		ERR("MV buffers maximum count reached, count = %d",
+			mv_data->count);
+		return false;
+	}
+
 	if (!vcd_get_ion_status()) {
 		pr_err("PMEM not available\n");
 		return false;
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
index b882fe2..d085e03 100644
--- a/include/linux/iopoll.h
+++ b/include/linux/iopoll.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2013 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,7 +17,7 @@
 
 #include <linux/kernel.h>
 #include <linux/types.h>
-#include <linux/jiffies.h>
+#include <linux/hrtimer.h>
 #include <linux/delay.h>
 #include <asm-generic/errno.h>
 #include <asm/io.h>
@@ -36,13 +36,13 @@
  */
 #define readl_poll_timeout(addr, val, cond, sleep_us, timeout_us) \
 ({ \
-	unsigned long timeout = jiffies + usecs_to_jiffies(timeout_us); \
+	ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
 	might_sleep_if(timeout_us); \
 	for (;;) { \
 		(val) = readl(addr); \
 		if (cond) \
 			break; \
-		if (timeout_us && time_after(jiffies, timeout)) { \
+		if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
 			(val) = readl(addr); \
 			break; \
 		} \
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 603bec2..04ad123 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -289,6 +289,25 @@
 	return cmp1.tv64 == cmp2.tv64;
 }
 
+/**
+ * ktime_compare - Compares two ktime_t variables for less, greater or equal
+ * @cmp1:	comparable1
+ * @cmp2:	comparable2
+ *
+ * Returns ...
+ *   cmp1  < cmp2: return <0
+ *   cmp1 == cmp2: return 0
+ *   cmp1  > cmp2: return >0
+ */
+static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2)
+{
+	if (cmp1.tv64 < cmp2.tv64)
+		return -1;
+	if (cmp1.tv64 > cmp2.tv64)
+		return 1;
+	return 0;
+}
+
 static inline s64 ktime_to_us(const ktime_t kt)
 {
 	struct timeval tv = ktime_to_timeval(kt);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 8b604e3..7d51070 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -454,6 +454,7 @@
 #define CID_MANFID_TOSHIBA	0x11
 #define CID_MANFID_MICRON	0x13
 #define CID_MANFID_SAMSUNG	0x15
+#define CID_MANFID_KINGSTON	0x70
 #define CID_MANFID_HYNIX	0x90
 
 #define END_FIXUP { 0 }
diff --git a/include/linux/msm_vidc_dec.h b/include/linux/msm_vidc_dec.h
index 35279bf..b09fc2d 100644
--- a/include/linux/msm_vidc_dec.h
+++ b/include/linux/msm_vidc_dec.h
@@ -58,6 +58,8 @@
 #define VDEC_MSG_EVT_HW_ERROR	(VDEC_MSG_BASE + 14)
 #define VDEC_MSG_EVT_INFO_CONFIG_CHANGED	(VDEC_MSG_BASE + 15)
 #define VDEC_MSG_EVT_INFO_FIELD_DROPPED	(VDEC_MSG_BASE + 16)
+#define VDEC_MSG_EVT_HW_OVERLOAD	(VDEC_MSG_BASE + 17)
+#define VDEC_MSG_EVT_MAX_CLIENTS	(VDEC_MSG_BASE + 18)
 
 /*Buffer flags bits masks.*/
 #define VDEC_BUFFERFLAG_EOS	0x00000001
diff --git a/include/linux/msm_vidc_enc.h b/include/linux/msm_vidc_enc.h
index 4ce3db1..36625a7 100644
--- a/include/linux/msm_vidc_enc.h
+++ b/include/linux/msm_vidc_enc.h
@@ -45,7 +45,8 @@
 #define VEN_MSG_RESUME	9
 #define VEN_MSG_STOP_READING_MSG	10
 #define VEN_MSG_LTRUSE_FAILED	    11
-
+#define VEN_MSG_HW_OVERLOAD	12
+#define VEN_MSG_MAX_CLIENTS	13
 
 /*Buffer flags bits masks*/
 #define VEN_BUFFLAG_EOS	0x00000001
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 1531aa4..f760672 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -1824,29 +1824,30 @@
 #define V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA \
 		(V4L2_CID_MPEG_MSM_VIDC_BASE + 25)
 enum v4l2_mpeg_vidc_extradata {
-	V4L2_MPEG_VIDC_EXTRADATA_NONE,
-	V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION,
-	V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO,
-	V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP,
-	V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP,
-	V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP,
-	V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING,
-	V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE,
-	V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW,
-	V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI,
-	V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD,
-	V4L2_MPEG_VIDC_EXTRADATA_AFD_UD,
-	V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO,
-	V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB,
-	V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER,
-	V4L2_MPEG_VIDC_INDEX_EXTRADATA_INPUT_CROP,
-	V4L2_MPEG_VIDC_INDEX_EXTRADATA_DIGITAL_ZOOM,
-	V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO,
-	V4L2_MPEG_VIDC_EXTRADATA_MPEG2_SEQDISP,
-	V4L2_MPEG_VIDC_EXTRADATA_FRAME_QP,
-	V4L2_MPEG_VIDC_EXTRADATA_FRAME_BITS_INFO,
-	V4L2_MPEG_VIDC_EXTRADATA_LTR,
-	V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI,
+	V4L2_MPEG_VIDC_EXTRADATA_NONE = 0,
+	V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION = 1,
+	V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO = 2,
+	V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP = 3,
+	V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP = 4,
+	V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP = 5,
+	V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING = 6,
+	V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE = 7,
+	V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW = 8,
+	V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI = 9,
+	V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD = 10,
+	V4L2_MPEG_VIDC_EXTRADATA_AFD_UD = 11,
+	V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO = 12,
+	V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB = 13,
+	V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER = 14,
+	V4L2_MPEG_VIDC_INDEX_EXTRADATA_INPUT_CROP = 15,
+	V4L2_MPEG_VIDC_INDEX_EXTRADATA_DIGITAL_ZOOM = 16,
+	V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO = 17,
+	V4L2_MPEG_VIDC_EXTRADATA_MPEG2_SEQDISP = 18,
+	V4L2_MPEG_VIDC_EXTRADATA_FRAME_QP = 19,
+	V4L2_MPEG_VIDC_EXTRADATA_FRAME_BITS_INFO = 20,
+	V4L2_MPEG_VIDC_EXTRADATA_LTR = 21,
+	V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI = 22,
+	V4L2_MPEG_VIDC_EXTRADATA_STREAM_USERDATA = 23,
 };
 
 #define V4L2_CID_MPEG_VIDC_SET_PERF_LEVEL (V4L2_CID_MPEG_MSM_VIDC_BASE + 26)
@@ -2512,6 +2513,8 @@
 		(V4L2_EVENT_MSM_VIDC_START + 6)
 #define V4L2_EVENT_MSM_VIDC_RELEASE_UNQUEUED_BUFFER \
 		(V4L2_EVENT_MSM_VIDC_START + 7)
+#define V4L2_EVENT_MSM_VIDC_HW_OVERLOAD (V4L2_EVENT_MSM_VIDC_START + 8)
+#define V4L2_EVENT_MSM_VIDC_MAX_CLIENTS (V4L2_EVENT_MSM_VIDC_START + 9)
 
 /* Payload for V4L2_EVENT_VSYNC */
 struct v4l2_event_vsync {
diff --git a/include/media/msm/vidc_init.h b/include/media/msm/vidc_init.h
index bcc0370..9c9a270 100644
--- a/include/media/msm/vidc_init.h
+++ b/include/media/msm/vidc_init.h
@@ -20,6 +20,7 @@
 #define VIDC_MAX_NUM_CLIENTS 4
 #define MAX_VIDEO_NUM_OF_BUFF 100
 #define MAX_META_BUFFERS 32
+#define MAX_MV_BUFFERS 32
 
 enum buffer_dir {
 	BUFFER_TYPE_INPUT,
diff --git a/include/media/msm_cam_sensor.h b/include/media/msm_cam_sensor.h
index a164705..46cec76 100644
--- a/include/media/msm_cam_sensor.h
+++ b/include/media/msm_cam_sensor.h
@@ -480,8 +480,8 @@
 	CFG_GET_ACTUATOR_INFO,
 	CFG_SET_ACTUATOR_INFO,
 	CFG_SET_DEFAULT_FOCUS,
-	CFG_SET_POSITION,
 	CFG_MOVE_FOCUS,
+	CFG_SET_POSITION,
 };
 
 enum actuator_type {
diff --git a/include/media/msm_media_info.h b/include/media/msm_media_info.h
index ddf9c8e..663809e 100644
--- a/include/media/msm_media_info.h
+++ b/include/media/msm_media_info.h
@@ -158,8 +158,8 @@
 static inline unsigned int VENUS_BUFFER_SIZE(
 	int color_fmt, int width, int height)
 {
-	unsigned int uv_alignment;
-	unsigned int size = 0;
+	const unsigned int extra_size = 8*1024;
+	unsigned int uv_alignment = 0, size = 0;
 	unsigned int y_plane, uv_plane, y_stride,
 		uv_stride, y_sclines, uv_sclines;
 	if (!width || !height)
@@ -175,7 +175,7 @@
 		uv_alignment = 4096;
 		y_plane = y_stride * y_sclines;
 		uv_plane = uv_stride * uv_sclines + uv_alignment;
-		size = y_plane + uv_plane;
+		size = y_plane + uv_plane + extra_size;
 		size = MSM_MEDIA_ALIGN(size, 4096);
 		break;
 	default:
diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h
index 9028b1a..bbde6ef 100644
--- a/include/media/msm_vidc.h
+++ b/include/media/msm_vidc.h
@@ -179,6 +179,11 @@
 	unsigned int header_bits;
 };
 
+struct msm_vidc_stream_userdata_payload {
+	unsigned int type;
+	unsigned int data[1];
+};
+
 enum msm_vidc_extradata_type {
 	EXTRADATA_NONE = 0x00000000,
 	EXTRADATA_MB_QUANTIZATION = 0x00000001,
@@ -191,6 +196,7 @@
 	EXTRADATA_PANSCAN_WINDOW = 0x00000008,
 	EXTRADATA_RECOVERY_POINT_SEI = 0x00000009,
 	EXTRADATA_MPEG2_SEQDISP = 0x0000000D,
+	EXTRADATA_STREAM_USERDATA = 0x0000000E,
 	EXTRADATA_FRAME_QP = 0x0000000F,
 	EXTRADATA_FRAME_BITS_INFO = 0x00000010,
 	EXTRADATA_MULTISLICE_INFO = 0x7F100000,
@@ -214,4 +220,9 @@
 	FRAME_RECONSTRUCTION_APPROXIMATELY_CORRECT = 0x02,
 };
 
+enum msm_vidc_userdata_type {
+	MSM_VIDC_USERDATA_TYPE_FRAME = 0x1,
+	MSM_VIDC_USERDATA_TYPE_TOP_FIELD = 0x2,
+	MSM_VIDC_USERDATA_TYPE_BOTTOM_FIELD = 0x3,
+};
 #endif
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index f6e37c4..8b833dd 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -3799,6 +3799,15 @@
  * @gfp: context flags
  */
 void cfg80211_ap_stopped(struct net_device *netdev, gfp_t gfp);
+/**
+ * cfg80211_is_gratuitous_arp_unsolicited_na - packet is grat. ARP/unsol. NA
+ * @skb: the input packet, must be an ethernet frame already
+ *
+ * Return: %true if the packet is a gratuitous ARP or unsolicited NA packet.
+ * This is used to drop packets that shouldn't occur because the AP implements
+ * a proxy service.
+ */
+bool cfg80211_is_gratuitous_arp_unsolicited_na(struct sk_buff *skb);
 
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index 1635fc3..07199e0 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -180,6 +180,7 @@
 	int					   stream_id;
 	/* audio cache operations fptr*/
 	int (*fptr_cache_ops)(struct audio_buffer *abuff, int cache_op);
+	atomic_t               unmap_cb_success;
 };
 
 void q6asm_audio_client_free(struct audio_client *ac);
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 4e316e1..8715a79 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -141,6 +141,23 @@
 }
 KERNEL_ATTR_RO(fscaps);
 
+int rcu_expedited;
+static ssize_t rcu_expedited_show(struct kobject *kobj,
+				  struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", rcu_expedited);
+}
+static ssize_t rcu_expedited_store(struct kobject *kobj,
+				   struct kobj_attribute *attr,
+				   const char *buf, size_t count)
+{
+	if (kstrtoint(buf, 0, &rcu_expedited))
+		return -EINVAL;
+
+	return count;
+}
+KERNEL_ATTR_RW(rcu_expedited);
+
 /*
  * Make /sys/kernel/notes give the raw contents of our kernel .notes section.
  */
@@ -182,6 +199,7 @@
 	&kexec_crash_size_attr.attr,
 	&vmcoreinfo_attr.attr,
 #endif
+	&rcu_expedited_attr.attr,
 	NULL
 };
 
diff --git a/kernel/rcu.h b/kernel/rcu.h
index 8ba99cd..20dfba5 100644
--- a/kernel/rcu.h
+++ b/kernel/rcu.h
@@ -109,4 +109,6 @@
 	}
 }
 
+extern int rcu_expedited;
+
 #endif /* __LINUX_RCU_H */
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index a86f174..5746f18 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -45,12 +45,15 @@
 #include <linux/mutex.h>
 #include <linux/export.h>
 #include <linux/hardirq.h>
+#include <linux/module.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/rcu.h>
 
 #include "rcu.h"
 
+module_param(rcu_expedited, int, 0);
+
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 static struct lock_class_key rcu_lock_key;
 struct lockdep_map rcu_lock_map =
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
index 22ecea0..5a0f324 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcutiny_plugin.h
@@ -750,7 +750,10 @@
 		return;
 
 	/* Once we get past the fastpath checks, same code as rcu_barrier(). */
-	rcu_barrier();
+	if (rcu_expedited)
+		synchronize_rcu_expedited();
+	else
+		rcu_barrier();
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu);
 
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 4eec66e..e269782 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1926,7 +1926,10 @@
 			   "Illegal synchronize_sched() in RCU-sched read-side critical section");
 	if (rcu_blocking_is_gp())
 		return;
-	wait_rcu_gp(call_rcu_sched);
+	if (rcu_expedited)
+		synchronize_sched_expedited();
+	else
+		wait_rcu_gp(call_rcu_sched);
 }
 EXPORT_SYMBOL_GPL(synchronize_sched);
 
@@ -1947,7 +1950,10 @@
 			   "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
 	if (rcu_blocking_is_gp())
 		return;
-	wait_rcu_gp(call_rcu_bh);
+	if (rcu_expedited)
+		synchronize_rcu_bh_expedited();
+	else
+		wait_rcu_gp(call_rcu_bh);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
 
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index c023464..beafb9c 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -737,7 +737,10 @@
 			   "Illegal synchronize_rcu() in RCU read-side critical section");
 	if (!rcu_scheduler_active)
 		return;
-	wait_rcu_gp(call_rcu);
+	if (rcu_expedited)
+		synchronize_rcu_expedited();
+	else
+		wait_rcu_gp(call_rcu);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu);
 
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index b81369b..f6c785c 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -419,20 +419,6 @@
 #define REMOVE 1
 #define FIND   0
 
-static inline ktime_t ktime_now(void)
-{
-	struct timespec ts;
-	ktime_get_ts(&ts);
-
-	return timespec_to_ktime(ts);
-}
-
-/* This works even if 32 bit because of careful byte order choice */
-static inline int ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
-{
-	return cmp1.tv64 < cmp2.tv64;
-}
-
 static const char version[] =
 	"Packet Generator for packet performance testing. "
 	"Version: " VERSION "\n";
@@ -677,7 +663,7 @@
 	seq_puts(seq, "\n");
 
 	/* not really stopped, more like last-running-at */
-	stopped = pkt_dev->running ? ktime_now() : pkt_dev->stopped_at;
+	stopped = pkt_dev->running ? ktime_get() : pkt_dev->stopped_at;
 	idle = pkt_dev->idle_acc;
 	do_div(idle, NSEC_PER_USEC);
 
@@ -2141,12 +2127,12 @@
 		return;
 	}
 
-	start_time = ktime_now();
+	start_time = ktime_get();
 	if (remaining < 100000) {
 		/* for small delays (<100us), just loop until limit is reached */
 		do {
-			end_time = ktime_now();
-		} while (ktime_lt(end_time, spin_until));
+			end_time = ktime_get();
+		} while (ktime_compare(end_time, spin_until) < 0);
 	} else {
 		/* see do_nanosleep */
 		hrtimer_init_sleeper(&t, current);
@@ -2162,7 +2148,7 @@
 			hrtimer_cancel(&t.timer);
 		} while (t.task && pkt_dev->running && !signal_pending(current));
 		__set_current_state(TASK_RUNNING);
-		end_time = ktime_now();
+		end_time = ktime_get();
 	}
 
 	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
@@ -3008,8 +2994,7 @@
 			pktgen_clear_counters(pkt_dev);
 			pkt_dev->running = 1;	/* Cranke yeself! */
 			pkt_dev->skb = NULL;
-			pkt_dev->started_at =
-				pkt_dev->next_tx = ktime_now();
+			pkt_dev->started_at = pkt_dev->next_tx = ktime_get();
 
 			set_pkt_overhead(pkt_dev);
 
@@ -3168,7 +3153,7 @@
 
 	kfree_skb(pkt_dev->skb);
 	pkt_dev->skb = NULL;
-	pkt_dev->stopped_at = ktime_now();
+	pkt_dev->stopped_at = ktime_get();
 	pkt_dev->running = 0;
 
 	show_results(pkt_dev, nr_frags);
@@ -3187,7 +3172,7 @@
 			continue;
 		if (best == NULL)
 			best = pkt_dev;
-		else if (ktime_lt(pkt_dev->next_tx, best->next_tx))
+		else if (ktime_compare(pkt_dev->next_tx, best->next_tx) < 0)
 			best = pkt_dev;
 	}
 	if_unlock(t);
@@ -3272,14 +3257,14 @@
 
 static void pktgen_resched(struct pktgen_dev *pkt_dev)
 {
-	ktime_t idle_start = ktime_now();
+	ktime_t idle_start = ktime_get();
 	schedule();
-	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
+	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start));
 }
 
 static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
 {
-	ktime_t idle_start = ktime_now();
+	ktime_t idle_start = ktime_get();
 
 	while (atomic_read(&(pkt_dev->skb->users)) != 1) {
 		if (signal_pending(current))
@@ -3290,7 +3275,7 @@
 		else
 			cpu_relax();
 	}
-	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
+	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start));
 }
 
 static void pktgen_xmit(struct pktgen_dev *pkt_dev)
@@ -3312,7 +3297,7 @@
 	 * "never transmit"
 	 */
 	if (unlikely(pkt_dev->delay == ULLONG_MAX)) {
-		pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
+		pkt_dev->next_tx = ktime_add_ns(ktime_get(), ULONG_MAX);
 		return;
 	}
 
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index f6c74c9..6a77ffc 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -266,6 +266,9 @@
 
 country DZ:
 	(2402 - 2482 @ 40), (N/A, 20)
+	(5150 - 5250 @ 80), (N/A, 23)
+	(5250 - 5350 @ 80), (N/A,23), DFS
+	(5470 - 5670 @ 80), (N/A, 20), DFS
 
 country EC:
 	(2402 - 2482 @ 40), (N/A, 20)
@@ -416,8 +419,8 @@
 
 country ID:
 	# ref: http://www.postel.go.id/content/ID/regulasi/standardisasi/kepdir/bwa%205,8%20ghz.pdf
-	(2402 - 2482 @ 20), (N/A, 20)
-	(5735 - 5815 @ 20), (N/A, 23)
+	(2402 - 2482 @ 20), (N/A, 30)
+	(5735 - 5815 @ 20), (N/A, 30)
 
 country IE: DFS-ETSI
 	(2402 - 2482 @ 40), (N/A, 20)
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 1924758..1f547f9 100755
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -345,6 +345,9 @@
 	struct reg_regdb_search_request *request;
 	const struct ieee80211_regdomain *curdom, *regdom;
 	int i, r;
+	bool set_reg = false;
+
+	mutex_lock(&cfg80211_mutex);
 
 	mutex_lock(&reg_regdb_search_mutex);
 	while (!list_empty(&reg_regdb_search_list)) {
@@ -360,9 +363,7 @@
 				r = reg_copy_regd(&regdom, curdom);
 				if (r)
 					break;
-				mutex_lock(&cfg80211_mutex);
-				set_regdom(regdom);
-				mutex_unlock(&cfg80211_mutex);
+				set_reg = true;
 				break;
 			}
 		}
@@ -370,6 +371,11 @@
 		kfree(request);
 	}
 	mutex_unlock(&reg_regdb_search_mutex);
+
+	if (set_reg)
+		set_regdom(regdom);
+
+	mutex_unlock(&cfg80211_mutex);
 }
 
 static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 1ba7232..b83c5b2 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -10,6 +10,8 @@
 #include <net/cfg80211.h>
 #include <net/ip.h>
 #include <net/dsfield.h>
+#include <net/ndisc.h>
+#include <linux/if_arp.h>
 #include "core.h"
 
 struct ieee80211_rate *
@@ -1150,3 +1152,54 @@
 const unsigned char bridge_tunnel_header[] __aligned(2) =
 	{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
 EXPORT_SYMBOL(bridge_tunnel_header);
+
+bool cfg80211_is_gratuitous_arp_unsolicited_na(struct sk_buff *skb)
+{
+	const struct ethhdr *eth = (void *)skb->data;
+	const struct {
+		struct arphdr hdr;
+		u8 ar_sha[ETH_ALEN];
+		u8 ar_sip[4];
+		u8 ar_tha[ETH_ALEN];
+		u8 ar_tip[4];
+	} __packed *arp;
+	const struct ipv6hdr *ipv6;
+	const struct icmp6hdr *icmpv6;
+
+	switch (eth->h_proto) {
+	case cpu_to_be16(ETH_P_ARP):
+		/* can't say - but will probably be dropped later anyway */
+		if (!pskb_may_pull(skb, sizeof(*eth) + sizeof(*arp)))
+			return false;
+
+		arp = (void *)(eth + 1);
+
+		if ((arp->hdr.ar_op == cpu_to_be16(ARPOP_REPLY) ||
+		     arp->hdr.ar_op == cpu_to_be16(ARPOP_REQUEST)) &&
+		    !memcmp(arp->ar_sip, arp->ar_tip, sizeof(arp->ar_sip)))
+			return true;
+		break;
+	case cpu_to_be16(ETH_P_IPV6):
+		/* can't say - but will probably be dropped later anyway */
+		if (!pskb_may_pull(skb, sizeof(*eth) + sizeof(*ipv6) +
+					sizeof(*icmpv6)))
+			return false;
+
+		ipv6 = (void *)(eth + 1);
+		icmpv6 = (void *)(ipv6 + 1);
+
+		if (icmpv6->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT &&
+		    !memcmp(&ipv6->saddr, &ipv6->daddr, sizeof(ipv6->saddr)))
+			return true;
+		break;
+	default:
+		/*
+		 * no need to support other protocols, proxy service isn't
+		 * specified for any others
+		 */
+		break;
+	}
+
+	return false;
+}
+EXPORT_SYMBOL(cfg80211_is_gratuitous_arp_unsolicited_na);
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.c b/sound/soc/codecs/wcd9xxx-mbhc.c
index 34cb21a..d405667 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.c
+++ b/sound/soc/codecs/wcd9xxx-mbhc.c
@@ -3499,6 +3499,7 @@
 	pr_debug("%s: enter\n", __func__);
 
 	WCD9XXX_BCL_LOCK(mbhc->resmgr);
+	mutex_lock(&mbhc->mbhc_lock);
 	mbhc_status = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_STATUS) & 0x3E;
 
 	if (mbhc->mbhc_state == MBHC_STATE_POTENTIAL_RECOVERY) {
@@ -3671,6 +3672,7 @@
 
  done:
 	pr_debug("%s: leave\n", __func__);
+	mutex_unlock(&mbhc->mbhc_lock);
 	WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
 	return IRQ_HANDLED;
 }
@@ -4518,6 +4520,7 @@
 	pr_debug("%s: enter event %s(%d)\n", __func__,
 		 wcd9xxx_get_event_string(event), event);
 
+	mutex_lock(&mbhc->mbhc_lock);
 	switch (event) {
 	/* MICBIAS usage change */
 	case WCD9XXX_EVENT_PRE_MICBIAS_1_ON:
@@ -4711,6 +4714,7 @@
 		WARN(1, "Unknown event %d\n", event);
 		ret = -EINVAL;
 	}
+	mutex_unlock(&mbhc->mbhc_lock);
 
 	pr_debug("%s: leave\n", __func__);
 
@@ -4920,17 +4924,19 @@
 				  wcd9xxx_mbhc_insert_work);
 	}
 
+	mutex_init(&mbhc->mbhc_lock);
+
 	/* Register event notifier */
 	mbhc->nblock.notifier_call = wcd9xxx_event_notify;
 	ret = wcd9xxx_resmgr_register_notifier(mbhc->resmgr, &mbhc->nblock);
 	if (ret) {
 		pr_err("%s: Failed to register notifier %d\n", __func__, ret);
+		mutex_destroy(&mbhc->mbhc_lock);
 		return ret;
 	}
 
 	wcd9xxx_init_debugfs(mbhc);
 
-
 	/* Disable Impedance detection by default for certain codec types */
 	if (mbhc->mbhc_cb &&
 	    mbhc->mbhc_cb->get_cdc_type() == WCD9XXX_CDC_TYPE_HELICON)
@@ -5015,6 +5021,8 @@
 err_insert_irq:
 	wcd9xxx_resmgr_unregister_notifier(mbhc->resmgr, &mbhc->nblock);
 
+	mutex_destroy(&mbhc->mbhc_lock);
+
 	pr_debug("%s: leave ret %d\n", __func__, ret);
 	return ret;
 }
@@ -5036,6 +5044,7 @@
 	wcd9xxx_free_irq(core_res, mbhc->intr_ids->hph_left_ocp, mbhc);
 	wcd9xxx_free_irq(core_res, mbhc->intr_ids->hph_right_ocp, mbhc);
 
+	mutex_destroy(&mbhc->mbhc_lock);
 	wcd9xxx_resmgr_unregister_notifier(mbhc->resmgr, &mbhc->nblock);
 	wcd9xxx_cleanup_debugfs(mbhc);
 }
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.h b/sound/soc/codecs/wcd9xxx-mbhc.h
index cf25798..91edaca 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.h
+++ b/sound/soc/codecs/wcd9xxx-mbhc.h
@@ -368,6 +368,8 @@
 	struct dentry *debugfs_poke;
 	struct dentry *debugfs_mbhc;
 #endif
+
+	struct mutex mbhc_lock;
 };
 
 #define WCD9XXX_MBHC_CAL_SIZE(buttons, rload) ( \
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index c2cc499..a5c8f8d 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -73,7 +73,7 @@
 static int srs_alsa_ctrl_ever_called;
 static int lsm_mux_slim_port;
 static int slim0_rx_aanc_fb_port;
-static int msm_route_ec_ref_rx = 4; /* NONE */
+static int msm_route_ec_ref_rx = 7; /* NONE */
 static uint32_t voc_session_id = ALL_SESSION_VSID;
 static int msm_route_ext_ec_ref = AFE_PORT_INVALID;
 
@@ -1431,11 +1431,23 @@
 		break;
 	case 2:
 		msm_route_ec_ref_rx = 2;
-		ec_ref_port_id = AFE_PORT_ID_SECONDARY_MI2S_RX;
+		ec_ref_port_id = AFE_PORT_ID_PRIMARY_MI2S_TX;
+		break;
+	case 3:
+		msm_route_ec_ref_rx = 3;
+		ec_ref_port_id = AFE_PORT_ID_SECONDARY_MI2S_TX;
+		break;
+	case 4:
+		msm_route_ec_ref_rx = 4;
+		ec_ref_port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
+		break;
+	case 5:
+		msm_route_ec_ref_rx = 5;
+		ec_ref_port_id = AFE_PORT_ID_QUATERNARY_MI2S_TX;
 		break;
 	default:
-		msm_route_ec_ref_rx = 3; /* NONE */
-		ec_ref_port_id = -1;
+		msm_route_ec_ref_rx = 6; /* NONE */
+		ec_ref_port_id = AFE_PORT_INVALID;
 		break;
 	}
 	adm_ec_ref_rx_id(ec_ref_port_id);
@@ -1445,16 +1457,46 @@
 	return 0;
 }
 
-static const char *const ec_ref_rx[] = { "SLIM_RX", "I2S_RX", "SEC_I2S_RX",
-	"PROXY_RX", "NONE" };
+static const char *const ec_ref_rx[] = { "SLIM_RX", "I2S_RX", "PRI_MI2S_TX",
+	"SEC_MI2S_TX", "TERT_MI2S_TX", "QUAT_MI2S_TX", "PROXY_RX", "NONE"};
 static const struct soc_enum msm_route_ec_ref_rx_enum[] = {
-	SOC_ENUM_SINGLE_EXT(5, ec_ref_rx),
+	SOC_ENUM_SINGLE_EXT(8, ec_ref_rx),
 };
 
-static const struct snd_kcontrol_new ec_ref_rx_mixer_controls[] = {
-	SOC_ENUM_EXT("EC_REF_RX", msm_route_ec_ref_rx_enum[0],
-		     msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put),
-};
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul1 =
+	SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL1 MUX Mux",
+		msm_route_ec_ref_rx_enum[0],
+		msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul2 =
+	SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL2 MUX Mux",
+		msm_route_ec_ref_rx_enum[0],
+		msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul4 =
+	SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL4 MUX Mux",
+		msm_route_ec_ref_rx_enum[0],
+		msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul5 =
+	SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL5 MUX Mux",
+		msm_route_ec_ref_rx_enum[0],
+		msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul6 =
+	SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL6 MUX Mux",
+		msm_route_ec_ref_rx_enum[0],
+		msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul8 =
+	SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL8 MUX Mux",
+		msm_route_ec_ref_rx_enum[0],
+		msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul9 =
+	SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL9 MUX Mux",
+		msm_route_ec_ref_rx_enum[0],
+		msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
 
 static int msm_routing_ext_ec_get(struct snd_kcontrol *kcontrol,
 				  struct snd_ctl_elem_value *ucontrol)
@@ -3577,7 +3619,20 @@
 				&slim0_rx_vi_fb_lch_mux),
 	SND_SOC_DAPM_MUX("VOC_EXT_EC MUX", SND_SOC_NOPM, 0, 0,
 			 &voc_ext_ec_mux),
-
+	SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL1 MUX", SND_SOC_NOPM, 0, 0,
+		&ext_ec_ref_mux_ul1),
+	SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL2 MUX", SND_SOC_NOPM, 0, 0,
+		&ext_ec_ref_mux_ul2),
+	SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL4 MUX", SND_SOC_NOPM, 0, 0,
+		&ext_ec_ref_mux_ul4),
+	SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL5 MUX", SND_SOC_NOPM, 0, 0,
+		&ext_ec_ref_mux_ul5),
+	SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL6 MUX", SND_SOC_NOPM, 0, 0,
+		&ext_ec_ref_mux_ul6),
+	SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL8 MUX", SND_SOC_NOPM, 0, 0,
+		&ext_ec_ref_mux_ul8),
+	SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL9 MUX", SND_SOC_NOPM, 0, 0,
+		&ext_ec_ref_mux_ul9),
 };
 
 static const struct snd_soc_dapm_route intercon[] = {
@@ -3899,6 +3954,63 @@
 	{"VOIP_UL", NULL, "VOC_EXT_EC MUX"},
 	{"VoLTE_UL", NULL, "VOC_EXT_EC MUX"},
 
+	{"AUDIO_REF_EC_UL1 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
+	{"AUDIO_REF_EC_UL1 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
+	{"AUDIO_REF_EC_UL1 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL1 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL1 MUX", "I2S_RX" , "PRI_I2S_TX"},
+	{"AUDIO_REF_EC_UL1 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
+
+	{"AUDIO_REF_EC_UL2 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
+	{"AUDIO_REF_EC_UL2 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
+	{"AUDIO_REF_EC_UL2 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL2 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL2 MUX", "I2S_RX" , "PRI_I2S_TX"},
+	{"AUDIO_REF_EC_UL2 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
+
+	{"AUDIO_REF_EC_UL4 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
+	{"AUDIO_REF_EC_UL4 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
+	{"AUDIO_REF_EC_UL4 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL4 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL4 MUX", "I2S_RX" , "PRI_I2S_TX"},
+	{"AUDIO_REF_EC_UL4 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
+
+	{"AUDIO_REF_EC_UL5 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
+	{"AUDIO_REF_EC_UL5 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
+	{"AUDIO_REF_EC_UL5 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL5 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL5 MUX", "I2S_RX" , "PRI_I2S_TX"},
+	{"AUDIO_REF_EC_UL5 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
+
+	{"AUDIO_REF_EC_UL6 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
+	{"AUDIO_REF_EC_UL6 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
+	{"AUDIO_REF_EC_UL6 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL6 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL6 MUX", "I2S_RX" , "PRI_I2S_TX"},
+	{"AUDIO_REF_EC_UL6 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
+
+	{"AUDIO_REF_EC_UL8 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
+	{"AUDIO_REF_EC_UL8 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
+	{"AUDIO_REF_EC_UL8 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL8 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL8 MUX", "I2S_RX" , "PRI_I2S_TX"},
+	{"AUDIO_REF_EC_UL8 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
+
+	{"AUDIO_REF_EC_UL9 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
+	{"AUDIO_REF_EC_UL9 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
+	{"AUDIO_REF_EC_UL9 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL9 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
+	{"AUDIO_REF_EC_UL9 MUX", "I2S_RX" , "PRI_I2S_TX"},
+	{"AUDIO_REF_EC_UL9 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
+
+	{"MM_UL1", NULL, "AUDIO_REF_EC_UL1 MUX"},
+	{"MM_UL2", NULL, "AUDIO_REF_EC_UL2 MUX"},
+	{"MM_UL4", NULL, "AUDIO_REF_EC_UL4 MUX"},
+	{"MM_UL5", NULL, "AUDIO_REF_EC_UL5 MUX"},
+	{"MM_UL6", NULL, "AUDIO_REF_EC_UL6 MUX"},
+	{"MM_UL8", NULL, "AUDIO_REF_EC_UL8 MUX"},
+	{"MM_UL9", NULL, "AUDIO_REF_EC_UL9 MUX"},
+
 	{"Voice_Tx Mixer", "PRI_TX_Voice", "PRI_I2S_TX"},
 	{"Voice_Tx Mixer", "PRI_MI2S_TX_Voice", "PRI_MI2S_TX"},
 	{"Voice_Tx Mixer", "MI2S_TX_Voice", "MI2S_TX"},
@@ -4427,10 +4539,6 @@
 			ARRAY_SIZE(dolby_dap_param_end_point_controls));
 
 	snd_soc_add_platform_controls(platform,
-				ec_ref_rx_mixer_controls,
-			ARRAY_SIZE(ec_ref_rx_mixer_controls));
-
-	snd_soc_add_platform_controls(platform,
 				get_rms_controls,
 			ARRAY_SIZE(get_rms_controls));
 
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 0d19657..4b9d079 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -676,8 +676,11 @@
 
 		while (cnt >= 0) {
 			if (port->buf[cnt].data) {
-				msm_audio_ion_free(port->buf[cnt].client,
-						   port->buf[cnt].handle);
+				if (!rc)
+					msm_audio_ion_free(
+						port->buf[cnt].client,
+						port->buf[cnt].handle);
+
 				port->buf[cnt].client = NULL;
 				port->buf[cnt].handle = NULL;
 				port->buf[cnt].data = NULL;
@@ -723,7 +726,9 @@
 			(void *)&port->buf[0].phys,
 			(void *)port->buf[0].client,
 			(void *)port->buf[0].handle);
-		msm_audio_ion_free(port->buf[0].client, port->buf[0].handle);
+		if (!rc)
+			msm_audio_ion_free(port->buf[0].client,
+					   port->buf[0].handle);
 		port->buf[0].client = NULL;
 		port->buf[0].handle = NULL;
 	}
@@ -1171,6 +1176,13 @@
 			if (payload[1] != 0) {
 				pr_err("%s: cmd = 0x%x returned error = 0x%x sid:%d\n",
 					__func__, payload[0], payload[1], sid);
+				if (payload[0] ==
+				    ASM_CMD_SHARED_MEM_UNMAP_REGIONS)
+					atomic_set(&ac->unmap_cb_success, 0);
+			} else {
+				if (payload[0] ==
+				    ASM_CMD_SHARED_MEM_UNMAP_REGIONS)
+					atomic_set(&ac->unmap_cb_success, 1);
 			}
 
 			if (atomic_read(&ac->cmd_state)) {
@@ -3184,6 +3196,7 @@
 			TRUE, ((ac->session << 8) | dir));
 	atomic_set(&ac->cmd_state, 1);
 	mem_unmap.hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
+	mem_unmap.mem_map_handle = 0;
 	list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) {
 		buf_node = list_entry(ptr, struct asm_buffer_node,
 						list);
@@ -3195,6 +3208,12 @@
 	}
 	pr_debug("%s: mem_unmap-mem_map_handle: 0x%x",
 		__func__, mem_unmap.mem_map_handle);
+
+	if (mem_unmap.mem_map_handle == 0) {
+		pr_err("%s Do not send null mem handle to DSP\n", __func__);
+		rc = 0;
+		goto fail_cmd;
+	}
 	rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) &mem_unmap);
 	if (rc < 0) {
 		pr_err("mem_unmap op[0x%x]rc[%d]\n",
@@ -3206,7 +3225,13 @@
 	rc = wait_event_timeout(ac->cmd_wait,
 			(atomic_read(&ac->cmd_state) == 0), 5 * HZ);
 	if (!rc) {
-		pr_err("timeout. waited for memory_unmap\n");
+		pr_err("%s timeout. waited for memory_unmap of handle 0x%x\n",
+			__func__, mem_unmap.mem_map_handle);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	} else if (atomic_read(&ac->unmap_cb_success) == 0) {
+		pr_err("%s Error in mem unmap callback of handle 0x%x\n",
+			__func__, mem_unmap.mem_map_handle);
 		rc = -EINVAL;
 		goto fail_cmd;
 	}
@@ -3366,6 +3391,7 @@
 	port = &ac->port[dir];
 	buf_add = (uint32_t)port->buf->phys;
 	mem_unmap.hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
+	mem_unmap.mem_map_handle = 0;
 	list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) {
 		buf_node = list_entry(ptr, struct asm_buffer_node,
 						list);
@@ -3378,6 +3404,12 @@
 
 	pr_debug("%s: mem_unmap-mem_map_handle: 0x%x",
 			__func__, mem_unmap.mem_map_handle);
+
+	if (mem_unmap.mem_map_handle == 0) {
+		pr_err("%s Do not send null mem handle to DSP\n", __func__);
+		rc = 0;
+		goto fail_cmd;
+	}
 	rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) &mem_unmap);
 	if (rc < 0) {
 		pr_err("mmap_regions op[0x%x]rc[%d]\n",
@@ -3388,7 +3420,14 @@
 	rc = wait_event_timeout(ac->cmd_wait,
 			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
 	if (!rc) {
-		pr_err("timeout. waited for memory_unmap\n");
+		pr_err("%s timeout. waited for memory_unmap of handle 0x%x\n",
+			__func__, mem_unmap.mem_map_handle);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	} else if (atomic_read(&ac->unmap_cb_success) == 0) {
+		pr_err("%s Error in mem unmap callback of handle 0x%x\n",
+			__func__, mem_unmap.mem_map_handle);
+		rc = -EINVAL;
 		goto fail_cmd;
 	}
 	rc = 0;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 56efb97..796d6b4 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2102,8 +2102,10 @@
 {
 	struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true);
 
+	mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_PCM);
 	if (!w) {
 		dev_err(dapm->dev, "dapm: unknown pin %s\n", pin);
+		mutex_unlock(&dapm->card->dapm_mutex);
 		return -EINVAL;
 	}
 
@@ -2112,6 +2114,7 @@
 		w->force = 0;
 	dapm_mark_dirty(w, "pin configuration");
 
+	mutex_unlock(&dapm->card->dapm_mutex);
 	return 0;
 }
 
@@ -3101,7 +3104,7 @@
 
 	dev_dbg(rtd->dev, "rtd stream %d event %d\n", stream, event);
 
-	mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+	mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_PCM);
 	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
 		widget_stream_event(pdapm, rtd->cpu_dai->playback_aif, event);
 		widget_stream_event(cdapm, rtd->codec_dai->playback_aif, event);
@@ -3186,8 +3189,10 @@
 {
 	struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true);
 
+	mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_PCM);
 	if (!w) {
 		dev_err(dapm->dev, "dapm: unknown pin %s\n", pin);
+		mutex_unlock(&dapm->card->dapm_mutex);
 		return -EINVAL;
 	}
 
@@ -3195,6 +3200,7 @@
 	w->connected = 1;
 	w->force = 1;
 	dapm_mark_dirty(w, "force enable");
+	mutex_unlock(&dapm->card->dapm_mutex);
 
 	return 0;
 }