Merge "diag: Update last event and last log message values"
diff --git a/Documentation/devicetree/bindings/arm/msm/jtag-mm.txt b/Documentation/devicetree/bindings/arm/msm/jtag-mm.txt
new file mode 100644
index 0000000..21dead3
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/jtag-mm.txt
@@ -0,0 +1,21 @@
+* JTAG-MM
+
+The jtag-mm entry specifies the memory mapped addresses for the debug and ETM
+registers. The jtag-mm driver uses these to save and restore the registers
+using memory mapped access during power collapse so as to retain their state
+accross power collapse. This is necessary in case cp14 access to the registers
+is not permitted.
+
+Required Properties:
+compatible: component name used for driver matching, should be "qcom,jtag-mm"
+reg: physical base address and length of the register set
+reg-names: should be "etm-base" for etm register set and "debug-base" for debug
+	register set.
+
+Example:
+jtag_mm: jtagmm@fc332000 {
+	compatible = "qcom,jtag-mm";
+	reg = <0xfc332000 0x1000>,
+		<0xfc333000 0x1000>;
+	reg-names = "etm-base","debug-base";
+	};
diff --git a/arch/arm/boot/dts/msm9625-coresight.dtsi b/arch/arm/boot/dts/msm9625-coresight.dtsi
index f01fe63..9a49c32 100644
--- a/arch/arm/boot/dts/msm9625-coresight.dtsi
+++ b/arch/arm/boot/dts/msm9625-coresight.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -107,11 +107,23 @@
 		coresight-child-ports = <7>;
 	};
 
+	etm: etm@fc332000 {
+		compatible = "arm,coresight-etm";
+		reg = <0xfc332000 0x1000>;
+
+		coresight-id = <8>;
+		coresight-name = "coresight-etm";
+		coresight-nr-inports = <0>;
+		coresight-outports = <0>;
+		coresight-child-list = <&funnel_in0>;
+		coresight-child-ports = <4>;
+	};
+
 	csr: csr@fc302000 {
 		compatible = "qcom,coresight-csr";
 		reg = <0xfc302000 0x1000>;
 
-		coresight-id = <8>;
+		coresight-id = <9>;
 		coresight-name = "coresight-csr";
 		coresight-nr-inports = <0>;
 	};
diff --git a/arch/arm/boot/dts/msm9625.dtsi b/arch/arm/boot/dts/msm9625.dtsi
index 2fbff6d..d374b59 100644
--- a/arch/arm/boot/dts/msm9625.dtsi
+++ b/arch/arm/boot/dts/msm9625.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -602,6 +602,12 @@
 		qcom,bam-pipe-pair = <2>;
 	};
 
+	jtag_mm: jtagmm@fc332000 {
+		compatible = "qcom,jtag-mm";
+		reg = <0xfc332000 0x1000>,
+			<0xfc330000 0x1000>;
+		reg-names = "etm-base","debug-base";
+	};
 };
 
 /include/ "msm-pm8019-rpm-regulator.dtsi"
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index 136cb5a..079177e 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -329,6 +329,7 @@
 CONFIG_FB_MSM_MDSS=y
 CONFIG_FB_MSM_MDSS_WRITEBACK=y
 CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
+CONFIG_FB_MSM_MDSS_HDMI_MHL_SII8334=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 # CONFIG_LCD_CLASS_DEVICE is not set
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index eb3c315..0738f2d 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -330,6 +330,7 @@
 CONFIG_FB_MSM_MDSS=y
 CONFIG_FB_MSM_MDSS_WRITEBACK=y
 CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
+CONFIG_FB_MSM_MDSS_HDMI_MHL_SII8334=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 # CONFIG_LCD_CLASS_DEVICE is not set
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index afb9ab6..83a5fb0 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -184,6 +184,7 @@
 	select ARM_USE_USER_ACCESSIBLE_TIMERS
 	select MSM_USE_USER_ACCESSIBLE_TIMERS
 	select MSM_CPU_PWRCTL
+	select MSM_LPM_TEST
 
 config ARCH_MSM8930
 	bool "MSM8930"
@@ -374,6 +375,7 @@
 	select MSM_QDSP6V2_CODECS
 	select MSM_AUDIO_QDSP6V2 if SND_SOC
 	select CPU_HAS_L2_PMU
+	select MSM_JTAG_MM if MSM_QDSS
 
 config ARCH_MSM8910
 	bool "MSM8910"
@@ -522,6 +524,17 @@
 	  enables the MPM driver that supports initialization from a device
 	  tree
 
+config MSM_LPM_TEST
+	bool "Low Power Mode test framework"
+	depends on MSM_RPM
+	depends on MSM_PM8X60
+	help
+	  LPM_TEST is a test framework that assists in exercising the low
+	  power mode algorithm on MSM targets. This test framework tracks
+	  notifications sent during entry/exit of the low power modes and
+	  processes them to measure various stats including latency
+	  measurement.
+
 config MSM_XO
 	bool
 
@@ -2260,6 +2273,19 @@
 	  For production builds, you should probably say 'N' here to avoid
 	  potential power, performance and memory penalty.
 
+config MSM_JTAG_MM
+	bool "ETM trace and debug support across power collapse using memory mapped access"
+	help
+	   Enables support for kernel debugging (specifically breakpoints) and
+	   processor tracing using ETM across power collapse both for JTag and
+	   OS hosted software running on the target. Enabling this will ensure
+	   debug and ETM registers are saved and restored across power collapse.
+	   Needed on targets on which cp14 access to debug and ETM registers is
+	   not permitted and so memory mapped access is necessary.
+
+	   For production builds, you should probably say 'N' here to avoid
+	   potential power, performance and memory penalty.
+
 config MSM_ETM
 	tristate "Enable MSM ETM and ETB"
 	depends on ARCH_MSM8X60
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 2d617a9..e92e4de 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -60,6 +60,7 @@
 obj-$(CONFIG_CPU_V6) += idle-v6.o
 obj-$(CONFIG_CPU_V7) += idle-v7.o
 obj-$(CONFIG_MSM_JTAG) += jtag.o
+obj-$(CONFIG_MSM_JTAG_MM) +=  jtag-mm.o
 
 msm-etm-objs := etm.o
 obj-$(CONFIG_MSM_ETM) += msm-etm.o
@@ -328,6 +329,7 @@
 	obj-$(CONFIG_ARCH_MSM8960) += rpm_resources.o
 	obj-$(CONFIG_ARCH_MSM8X60) += rpm_resources.o
 	obj-$(CONFIG_ARCH_MSM9615) += rpm_resources.o
+	obj-$(CONFIG_MSM_LPM_TEST) += test-lpm.o
 endif
 ifdef CONFIG_MSM_RPM_SMD
 	obj-$(CONFIG_ARCH_MSM8974) += lpm_levels.o lpm_resources.o
diff --git a/arch/arm/mach-msm/acpuclock-8064.c b/arch/arm/mach-msm/acpuclock-8064.c
index e8c7680..359a156 100644
--- a/arch/arm/mach-msm/acpuclock-8064.c
+++ b/arch/arm/mach-msm/acpuclock-8064.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -239,6 +239,118 @@
 	{ 0, { 0 } }
 };
 
+static struct acpu_level tbl_PVS0_1512MHz[] __initdata = {
+	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   950000 },
+	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(5),   950000 },
+	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(5),   950000 },
+	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(5),   962500 },
+	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(5),  1000000 },
+	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(5),  1025000 },
+	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(5),  1037500 },
+	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15), 1075000 },
+	{ 1, {  1242000, HFPLL, 1, 0x2E }, L2(15), 1087500 },
+	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15), 1125000 },
+	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15), 1150000 },
+	{ 1, {  1512000, HFPLL, 1, 0x38 }, L2(15), 1162500 },
+	{ 0, { 0 } }
+};
+
+static struct acpu_level tbl_PVS1_1512MHz[] __initdata = {
+	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   950000 },
+	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(5),   950000 },
+	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(5),   950000 },
+	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(5),   962500 },
+	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(5),   975000 },
+	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(5),  1000000 },
+	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(5),  1012500 },
+	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15), 1037500 },
+	{ 1, {  1242000, HFPLL, 1, 0x2E }, L2(15), 1050000 },
+	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15), 1087500 },
+	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15), 1112500 },
+	{ 1, {  1512000, HFPLL, 1, 0x38 }, L2(15), 1125000 },
+	{ 0, { 0 } }
+};
+
+static struct acpu_level tbl_PVS2_1512MHz[] __initdata = {
+	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   925000 },
+	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(5),   925000 },
+	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(5),   925000 },
+	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(5),   925000 },
+	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(5),   937500 },
+	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(5),   950000 },
+	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(5),   975000 },
+	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15), 1000000 },
+	{ 1, {  1242000, HFPLL, 1, 0x2E }, L2(15), 1012500 },
+	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15), 1037500 },
+	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15), 1075000 },
+	{ 1, {  1512000, HFPLL, 1, 0x38 }, L2(15), 1087500 },
+	{ 0, { 0 } }
+};
+
+static struct acpu_level tbl_PVS3_1512MHz[] __initdata = {
+	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   900000 },
+	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(5),   900000 },
+	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(5),   900000 },
+	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(5),   900000 },
+	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(5),   900000 },
+	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(5),   925000 },
+	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(5),   950000 },
+	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15),  975000 },
+	{ 1, {  1242000, HFPLL, 1, 0x2E }, L2(15),  987500 },
+	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15), 1000000 },
+	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15), 1037500 },
+	{ 1, {  1512000, HFPLL, 1, 0x38 }, L2(15), 1050000 },
+	{ 0, { 0 } }
+};
+
+static struct acpu_level tbl_PVS4_1512MHz[] __initdata = {
+	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   875000 },
+	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(5),   875000 },
+	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(5),   875000 },
+	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(5),   875000 },
+	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(5),   887500 },
+	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(5),   900000 },
+	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(5),   925000 },
+	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15),  950000 },
+	{ 1, {  1242000, HFPLL, 1, 0x2E }, L2(15),  962500 },
+	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15),  975000 },
+	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15), 1000000 },
+	{ 1, {  1512000, HFPLL, 1, 0x38 }, L2(15), 1012500 },
+	{ 0, { 0 } }
+};
+
+static struct acpu_level tbl_PVS5_1512MHz[] __initdata = {
+	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   875000 },
+	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(5),   875000 },
+	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(5),   875000 },
+	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(5),   875000 },
+	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(5),   887500 },
+	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(5),   900000 },
+	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(5),   925000 },
+	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15),  937500 },
+	{ 1, {  1242000, HFPLL, 1, 0x2E }, L2(15),  950000 },
+	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15),  962500 },
+	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15),  987500 },
+	{ 1, {  1512000, HFPLL, 1, 0x38 }, L2(15), 1000000 },
+	{ 0, { 0 } }
+};
+
+static struct acpu_level tbl_PVS6_1512MHz[] __initdata = {
+	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   875000 },
+	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(5),   875000 },
+	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(5),   875000 },
+	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(5),   875000 },
+	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(5),   887500 },
+	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(5),   900000 },
+	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(5),   925000 },
+	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15),  937500 },
+	{ 1, {  1242000, HFPLL, 1, 0x2E }, L2(15),  950000 },
+	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15),  962500 },
+	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15),  975000 },
+	{ 1, {  1512000, HFPLL, 1, 0x38 }, L2(15),  987500 },
+	{ 0, { 0 } }
+};
+
 static struct acpu_level tbl_PVS0_1700MHz[] __initdata = {
 	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   950000 },
 	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(5),   950000 },
@@ -519,6 +631,14 @@
 	[2][4] = { tbl_PVS4_2000MHz, sizeof(tbl_PVS4_2000MHz),     25000 },
 	[2][5] = { tbl_PVS5_2000MHz, sizeof(tbl_PVS5_2000MHz),     25000 },
 	[2][6] = { tbl_PVS6_2000MHz, sizeof(tbl_PVS6_2000MHz),     25000 },
+
+	[14][0] = { tbl_PVS0_1512MHz, sizeof(tbl_PVS0_1512MHz),     0 },
+	[14][1] = { tbl_PVS1_1512MHz, sizeof(tbl_PVS1_1512MHz),     25000 },
+	[14][2] = { tbl_PVS2_1512MHz, sizeof(tbl_PVS2_1512MHz),     25000 },
+	[14][3] = { tbl_PVS3_1512MHz, sizeof(tbl_PVS3_1512MHz),     25000 },
+	[14][4] = { tbl_PVS4_1512MHz, sizeof(tbl_PVS4_1512MHz),     25000 },
+	[14][5] = { tbl_PVS5_1512MHz, sizeof(tbl_PVS5_1512MHz),     25000 },
+	[14][6] = { tbl_PVS6_1512MHz, sizeof(tbl_PVS6_1512MHz),     25000 },
 };
 
 static struct acpuclk_krait_params acpuclk_8064_params __initdata = {
diff --git a/arch/arm/mach-msm/board-9625-gpiomux.c b/arch/arm/mach-msm/board-9625-gpiomux.c
index 2f75470..b39dc27 100644
--- a/arch/arm/mach-msm/board-9625-gpiomux.c
+++ b/arch/arm/mach-msm/board-9625-gpiomux.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012-2013, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -142,34 +142,6 @@
 		},
 	},
 	{
-		.gpio	= 16,		/* Sec mi2s ws */
-		.settings = {
-			[GPIOMUX_SUSPENDED] = &mi2s_suspend_cfg,
-			[GPIOMUX_ACTIVE] = &mi2s_active_cfg,
-		},
-	},
-	{
-		.gpio	= 17,		/* Sec mi2s din */
-		.settings = {
-			[GPIOMUX_SUSPENDED] = &mi2s_suspend_cfg,
-			[GPIOMUX_ACTIVE] = &mi2s_active_cfg,
-		},
-	},
-	{
-		.gpio	= 18,		/* Sec mi2s dout */
-		.settings = {
-			[GPIOMUX_SUSPENDED] = &mi2s_suspend_cfg,
-			[GPIOMUX_ACTIVE] = &mi2s_active_cfg,
-		},
-	},
-	{
-		.gpio	= 19,		/* Sec mi2s clk */
-		.settings = {
-			[GPIOMUX_SUSPENDED] = &mi2s_suspend_cfg,
-			[GPIOMUX_ACTIVE] = &mi2s_active_cfg,
-		},
-	},
-	{
 		.gpio	= 71,		/* mi2s mclk */
 		.settings = {
 			[GPIOMUX_SUSPENDED] = &mi2s_suspend_cfg,
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index 43a03a0..be20ebd 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -662,9 +662,9 @@
 
 #define D0_ID		 1
 #define D1_ID		 2
-#define A0_ID		 3
-#define A1_ID		 4
-#define A2_ID		 5
+#define A0_ID		 4
+#define A1_ID		 5
+#define A2_ID		 6
 #define DIFF_CLK_ID	 7
 #define DIV_CLK1_ID	11
 #define DIV_CLK2_ID	12
diff --git a/arch/arm/mach-msm/clock-9625.c b/arch/arm/mach-msm/clock-9625.c
index 151f192..9f8c79b 100644
--- a/arch/arm/mach-msm/clock-9625.c
+++ b/arch/arm/mach-msm/clock-9625.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2175,6 +2175,7 @@
 	CLK_LOOKUP("core_clk", qdss_clk.c, "fc31a000.funnel"),
 	CLK_LOOKUP("core_clk", qdss_clk.c, "fc321000.stm"),
 	CLK_LOOKUP("core_clk", qdss_clk.c, "fc332000.etm"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc332000.jtagmm"),
 
 	CLK_LOOKUP("core_a_clk", qdss_clk.c, "fc322000.tmc"),
 	CLK_LOOKUP("core_a_clk", qdss_clk.c, "fc318000.tpiu"),
@@ -2185,6 +2186,7 @@
 	CLK_LOOKUP("core_a_clk", qdss_clk.c, "fc31a000.funnel"),
 	CLK_LOOKUP("core_a_clk", qdss_clk.c, "fc321000.stm"),
 	CLK_LOOKUP("core_a_clk", qdss_clk.c, "fc332000.etm"),
+	CLK_LOOKUP("core_a_clk", qdss_clk.c, "fc332000.jtagmm"),
 
 };
 
diff --git a/arch/arm/mach-msm/cpuidle.c b/arch/arm/mach-msm/cpuidle.c
index dd2dc1d..2932b7e 100644
--- a/arch/arm/mach-msm/cpuidle.c
+++ b/arch/arm/mach-msm/cpuidle.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -80,8 +80,7 @@
 	cpu_pm_enter();
 #endif
 
-	pm_mode = msm_pm_idle_prepare(dev, drv, index);
-	dev->last_residency = msm_pm_idle_enter(pm_mode);
+	pm_mode = msm_pm_idle_enter(dev, drv, index);
 	for (i = 0; i < dev->state_count; i++) {
 		st_usage = &dev->states_usage[i];
 		if ((enum msm_pm_sleep_mode) cpuidle_get_statedata(st_usage)
diff --git a/arch/arm/mach-msm/idle-v7.S b/arch/arm/mach-msm/idle-v7.S
index 9a22996..6840f1c 100644
--- a/arch/arm/mach-msm/idle-v7.S
+++ b/arch/arm/mach-msm/idle-v7.S
@@ -2,7 +2,7 @@
  * Idle processing for ARMv7-based Qualcomm SoCs.
  *
  * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007-2009, 2011-2012 Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2007-2009, 2011-2013 The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -97,7 +97,7 @@
 	mrc     p15, 0, ip, c13, c0, 1 /* context ID */
 	stmia   r0!, {r1-r9, ip}
 
-#ifdef CONFIG_MSM_JTAG
+#if defined(CONFIG_MSM_JTAG) || defined(CONFIG_MSM_JTAG_MM)
 	bl      msm_jtag_save_state
 #endif
 
@@ -185,7 +185,7 @@
 	blxne	r1
 	dmb
 
-#ifdef CONFIG_MSM_JTAG
+#if defined(CONFIG_MSM_JTAG) || defined(CONFIG_MSM_JTAG_MM)
 	bl	msm_jtag_restore_state
 #endif
 	ldr     r0, =msm_saved_state	/* address of msm_saved_state ptr */
@@ -286,7 +286,7 @@
 	stmfd   sp!, {lr}
 	blxne	r1
 	dmb
-#ifdef CONFIG_MSM_JTAG
+#if defined(CONFIG_MSM_JTAG) || defined(CONFIG_MSM_JTAG_MM)
 	bl      msm_jtag_restore_state
 #endif
 	ldmfd   sp!, {lr}
diff --git a/arch/arm/mach-msm/include/mach/jtag.h b/arch/arm/mach-msm/include/mach/jtag.h
index 3850eff..2131be6 100644
--- a/arch/arm/mach-msm/include/mach/jtag.h
+++ b/arch/arm/mach-msm/include/mach/jtag.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,7 +13,7 @@
 #ifndef __MACH_JTAG_H
 #define __MACH_JTAG_H
 
-#ifdef CONFIG_MSM_JTAG
+#if defined(CONFIG_MSM_JTAG) || defined(CONFIG_MSM_JTAG_MM)
 extern void msm_jtag_save_state(void);
 extern void msm_jtag_restore_state(void);
 #else
diff --git a/arch/arm/mach-msm/jtag-mm.c b/arch/arm/mach-msm/jtag-mm.c
new file mode 100644
index 0000000..af05995
--- /dev/null
+++ b/arch/arm/mach-msm/jtag-mm.c
@@ -0,0 +1,747 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/export.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/coresight.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <mach/scm.h>
+#include <mach/jtag.h>
+
+/* Coresight management registers */
+#define CORESIGHT_ITCTRL	(0xF00)
+#define CORESIGHT_CLAIMSET	(0xFA0)
+#define CORESIGHT_CLAIMCLR	(0xFA4)
+#define CORESIGHT_LAR		(0xFB0)
+#define CORESIGHT_LSR		(0xFB4)
+#define CORESIGHT_AUTHSTATUS	(0xFB8)
+#define CORESIGHT_DEVID		(0xFC8)
+#define CORESIGHT_DEVTYPE	(0xFCC)
+
+#define CORESIGHT_UNLOCK	(0xC5ACCE55)
+
+#define TIMEOUT_US		(100)
+
+#define BM(lsb, msb)		((BIT(msb) - BIT(lsb)) + BIT(msb))
+#define BMVAL(val, lsb, msb)	((val & BM(lsb, msb)) >> lsb)
+#define BVAL(val, n)		((val & BIT(n)) >> n)
+
+/* Trace registers */
+#define ETMCR			(0x000)
+#define ETMCCR			(0x004)
+#define ETMTRIGGER		(0x008)
+#define ETMASICCTLR		(0x00C)
+#define ETMSR			(0x010)
+#define ETMSCR			(0x014)
+#define ETMTSSCR		(0x018)
+#define ETMTECR2		(0x01C)
+#define ETMTEEVR		(0x020)
+#define ETMTECR1		(0x024)
+#define ETMFFLR			(0x02C)
+#define ETMVDEVR		(0x030)
+#define ETMVDCR1		(0x034)
+#define ETMVDCR3		(0x03C)
+#define ETMACVRn(n)		(0x040 + (n * 4))
+#define ETMACTRn(n)		(0x080 + (n * 4))
+#define ETMDCVRn(n)		(0x0C0 + (n * 8))
+#define ETMDCMRn(n)		(0x100 + (n * 8))
+#define ETMCNTRLDVRn(n)		(0x140 + (n * 4))
+#define ETMCNTENRn(n)		(0x150 + (n * 4))
+#define ETMCNTRLDEVRn(n)	(0x160 + (n * 4))
+#define ETMCNTVRn(n)		(0x170 + (n * 4))
+#define ETMSQ12EVR		(0x180)
+#define ETMSQ21EVR		(0x184)
+#define ETMSQ23EVR		(0x188)
+#define ETMSQ31EVR		(0x18C)
+#define ETMSQ32EVR		(0x190)
+#define ETMSQ13EVR		(0x194)
+#define ETMSQR			(0x19C)
+#define ETMEXTOUTEVRn(n)	(0x1A0 + (n * 4))
+#define ETMCIDCVRn(n)		(0x1B0 + (n * 4))
+#define ETMCIDCMR		(0x1BC)
+#define ETMIMPSPEC0		(0x1C0)
+#define ETMIMPSPEC1		(0x1C4)
+#define ETMIMPSPEC2		(0x1C8)
+#define ETMIMPSPEC3		(0x1CC)
+#define ETMIMPSPEC4		(0x1D0)
+#define ETMIMPSPEC5		(0x1D4)
+#define ETMIMPSPEC6		(0x1D8)
+#define ETMIMPSPEC7		(0x1DC)
+#define ETMSYNCFR		(0x1E0)
+#define ETMIDR			(0x1E4)
+#define ETMCCER			(0x1E8)
+#define ETMEXTINSELR		(0x1EC)
+#define ETMTESSEICR		(0x1F0)
+#define ETMEIBCR		(0x1F4)
+#define ETMTSEVR		(0x1F8)
+#define ETMAUXCR		(0x1FC)
+#define ETMTRACEIDR		(0x200)
+#define ETMIDR2			(0x208)
+#define ETMVMIDCVR		(0x240)
+#define ETMCLAIMSET		(0xFA0)
+#define ETMCLAIMCLR		(0xFA4)
+/* ETM Management registers */
+#define ETMOSLAR		(0x300)
+#define ETMOSLSR		(0x304)
+#define ETMOSSRR		(0x308)
+#define ETMPDCR			(0x310)
+#define ETMPDSR			(0x314)
+
+#define ETM_MAX_ADDR_CMP	(16)
+#define ETM_MAX_CNTR		(4)
+#define ETM_MAX_CTXID_CMP	(3)
+
+/* DBG Registers */
+#define DBGDIDR			(0x0)
+#define DBGWFAR			(0x18)
+#define DBGVCR			(0x1C)
+#define DBGDTRRXext		(0x80)
+#define DBGDSCRext		(0x88)
+#define DBGDTRTXext		(0x8C)
+#define DBGDRCR			(0x90)
+#define DBGBVRn(n)		(0x100 + (n * 4))
+#define DBGBCRn(n)		(0x140 + (n * 4))
+#define DBGWVRn(n)		(0x180 + (n * 4))
+#define DBGWCRn(n)		(0x1C0 + (n * 4))
+#define DBGPRCR			(0x310)
+#define DBGITMISCOUT		(0xEF8)
+#define DBGITMISCIN		(0xEFC)
+#define DBGCLAIMSET		(0xFA0)
+#define DBGCLAIMCLR		(0xFA4)
+
+#define DBGDSCR_MASK		(0x6C30FC3C)
+
+#define MAX_DBG_STATE_SIZE	(90)
+#define MAX_ETM_STATE_SIZE	(78)
+
+#define TZ_DBG_ETM_FEAT_ID	(0x8)
+#define TZ_DBG_ETM_VER		(0x400000)
+
+#define ARCH_V3_5		(0x25)
+#define ARM_DEBUG_ARCH_V7B	(0x3)
+
+#define etm_write(etm, val, off)	\
+			__raw_writel(val, etm->base + off)
+#define etm_read(etm, off)	\
+			__raw_readl(etm->base + off)
+
+#define dbg_write(dbg, val, off)	\
+			__raw_writel(val, dbg->base + off)
+#define dbg_read(dbg, off)	\
+			__raw_readl(dbg->base + off)
+
+#define ETM_LOCK(base)						\
+do {									\
+	/* recommended by spec to ensure ETM writes are committed prior
+	 * to resuming execution
+	 */								\
+	mb();								\
+	etm_write(base, 0x0, CORESIGHT_LAR);			\
+} while (0)
+
+#define ETM_UNLOCK(base)						\
+do {									\
+	etm_write(base, CORESIGHT_UNLOCK, CORESIGHT_LAR);	\
+	/* ensure unlock and any pending writes are committed prior to
+	 * programming ETM registers
+	 */								\
+	mb();								\
+} while (0)
+
+#define DBG_LOCK(base)						\
+do {									\
+	/* recommended by spec to ensure ETM writes are committed prior
+	 * to resuming execution
+	 */								\
+	mb();								\
+	dbg_write(base, 0x0, CORESIGHT_LAR);			\
+} while (0)
+
+#define DBG_UNLOCK(base)						\
+do {									\
+	dbg_write(base, CORESIGHT_UNLOCK, CORESIGHT_LAR);	\
+	/* ensure unlock and any pending writes are committed prior to
+	 * programming ETM registers
+	 */								\
+	mb();								\
+} while (0)
+
+uint32_t msm_jtag_save_cntr[NR_CPUS];
+uint32_t msm_jtag_restore_cntr[NR_CPUS];
+
+struct dbg_cpu_ctx {
+	void __iomem		*base;
+	uint32_t		*state;
+};
+
+struct dbg_ctx {
+	uint8_t			arch;
+	uint8_t			nr_wp;
+	uint8_t			nr_bp;
+	uint8_t			nr_ctx_cmp;
+	struct dbg_cpu_ctx	*cpu_ctx[NR_CPUS];
+	bool			save_restore_enabled[NR_CPUS];
+};
+static struct dbg_ctx dbg;
+
+struct etm_cpu_ctx {
+	void __iomem		*base;
+	struct device		*dev;
+	uint32_t		*state;
+};
+
+struct etm_ctx {
+	uint8_t			arch;
+	uint8_t			nr_addr_cmp;
+	uint8_t			nr_data_cmp;
+	uint8_t			nr_cntr;
+	uint8_t			nr_ext_inp;
+	uint8_t			nr_ext_out;
+	uint8_t			nr_ctxid_cmp;
+	struct etm_cpu_ctx	*cpu_ctx[NR_CPUS];
+	bool			save_restore_enabled[NR_CPUS];
+};
+
+static struct etm_ctx etm;
+
+static struct clk *clock[NR_CPUS];
+
+static void etm_set_pwrdwn(struct etm_cpu_ctx *etmdata)
+{
+	uint32_t etmcr;
+
+	/* ensure all writes are complete before setting pwrdwn */
+	mb();
+	etmcr = etm_read(etmdata, ETMCR);
+	etmcr |= BIT(0);
+	etm_write(etmdata, etmcr, ETMCR);
+}
+
+static void etm_clr_pwrdwn(struct etm_cpu_ctx *etmdata)
+{
+	uint32_t etmcr;
+
+	etmcr = etm_read(etmdata, ETMCR);
+	etmcr &= ~BIT(0);
+	etm_write(etmdata, etmcr, ETMCR);
+	/* ensure pwrup completes before subsequent register accesses */
+	mb();
+}
+
+static void etm_set_prog(struct etm_cpu_ctx *etmdata)
+{
+	uint32_t etmcr;
+	int count;
+
+	etmcr = etm_read(etmdata, ETMCR);
+	etmcr |= BIT(10);
+	etm_write(etmdata, etmcr, ETMCR);
+	for (count = TIMEOUT_US; BVAL(etm_read(etmdata, ETMSR), 1) != 1
+				&& count > 0; count--)
+		udelay(1);
+	WARN(count == 0, "timeout while setting prog bit, ETMSR: %#x\n",
+	     etm_read(etmdata, ETMSR));
+}
+
+static inline void etm_save_state(struct etm_cpu_ctx *etmdata)
+{
+	int i, j;
+
+	i = 0;
+	ETM_UNLOCK(etmdata);
+
+	switch (etm.arch) {
+	case ETM_ARCH_V3_5:
+		etmdata->state[i++] = etm_read(etmdata, ETMTRIGGER);
+		etmdata->state[i++] = etm_read(etmdata, ETMASICCTLR);
+		etmdata->state[i++] = etm_read(etmdata, ETMSR);
+		etmdata->state[i++] = etm_read(etmdata, ETMTSSCR);
+		etmdata->state[i++] = etm_read(etmdata, ETMTECR2);
+		etmdata->state[i++] = etm_read(etmdata, ETMTEEVR);
+		etmdata->state[i++] = etm_read(etmdata, ETMTECR1);
+		etmdata->state[i++] = etm_read(etmdata, ETMFFLR);
+		etmdata->state[i++] = etm_read(etmdata, ETMVDEVR);
+		etmdata->state[i++] = etm_read(etmdata, ETMVDCR1);
+		etmdata->state[i++] = etm_read(etmdata, ETMVDCR3);
+		for (j = 0; j < etm.nr_addr_cmp; j++) {
+			etmdata->state[i++] = etm_read(etmdata,
+								ETMACVRn(j));
+			etmdata->state[i++] = etm_read(etmdata,
+								ETMACTRn(j));
+		}
+		for (j = 0; j < etm.nr_data_cmp; j++) {
+			etmdata->state[i++] = etm_read(etmdata,
+								ETMDCVRn(j));
+			etmdata->state[i++] = etm_read(etmdata,
+								ETMDCMRn(j));
+		}
+		for (j = 0; j < etm.nr_cntr; j++) {
+			etmdata->state[i++] = etm_read(etmdata,
+							ETMCNTRLDVRn(j));
+			etmdata->state[i++] = etm_read(etmdata,
+							ETMCNTENRn(j));
+			etmdata->state[i++] = etm_read(etmdata,
+							ETMCNTRLDEVRn(j));
+			etmdata->state[i++] = etm_read(etmdata,
+							ETMCNTVRn(j));
+		}
+		etmdata->state[i++] = etm_read(etmdata, ETMSQ12EVR);
+		etmdata->state[i++] = etm_read(etmdata, ETMSQ21EVR);
+		etmdata->state[i++] = etm_read(etmdata, ETMSQ23EVR);
+		etmdata->state[i++] = etm_read(etmdata, ETMSQ31EVR);
+		etmdata->state[i++] = etm_read(etmdata, ETMSQ32EVR);
+		etmdata->state[i++] = etm_read(etmdata, ETMSQ13EVR);
+		etmdata->state[i++] = etm_read(etmdata, ETMSQR);
+		for (j = 0; j < etm.nr_ext_out; j++)
+			etmdata->state[i++] = etm_read(etmdata,
+							ETMEXTOUTEVRn(j));
+		for (j = 0; j < etm.nr_ctxid_cmp; j++)
+			etmdata->state[i++] = etm_read(etmdata,
+							ETMCIDCVRn(j));
+		etmdata->state[i++] = etm_read(etmdata, ETMCIDCMR);
+		etmdata->state[i++] = etm_read(etmdata, ETMSYNCFR);
+		etmdata->state[i++] = etm_read(etmdata, ETMEXTINSELR);
+		etmdata->state[i++] = etm_read(etmdata, ETMTSEVR);
+		etmdata->state[i++] = etm_read(etmdata, ETMAUXCR);
+		etmdata->state[i++] = etm_read(etmdata, ETMTRACEIDR);
+		etmdata->state[i++] = etm_read(etmdata, ETMVMIDCVR);
+		etmdata->state[i++] = etm_read(etmdata, ETMCLAIMCLR);
+		etmdata->state[i++] = etm_read(etmdata, ETMCR);
+		break;
+	default:
+		pr_err_ratelimited("unsupported etm arch %d in %s\n", etm.arch,
+								__func__);
+	}
+
+	ETM_LOCK(etmdata);
+}
+
+static inline void etm_restore_state(struct etm_cpu_ctx *etmdata)
+{
+	int i, j;
+
+	i = 0;
+	ETM_UNLOCK(etmdata);
+
+	switch (etm.arch) {
+	case ETM_ARCH_V3_5:
+		etm_clr_pwrdwn(etmdata);
+		etm_write(etmdata, etmdata->state[i++], ETMTRIGGER);
+		etm_write(etmdata, etmdata->state[i++], ETMASICCTLR);
+		etm_write(etmdata, etmdata->state[i++], ETMSR);
+		etm_write(etmdata, etmdata->state[i++], ETMTSSCR);
+		etm_write(etmdata, etmdata->state[i++], ETMTECR2);
+		etm_write(etmdata, etmdata->state[i++], ETMTEEVR);
+		etm_write(etmdata, etmdata->state[i++], ETMTECR1);
+		etm_write(etmdata, etmdata->state[i++], ETMFFLR);
+		etm_write(etmdata, etmdata->state[i++], ETMVDEVR);
+		etm_write(etmdata, etmdata->state[i++], ETMVDCR1);
+		etm_write(etmdata, etmdata->state[i++], ETMVDCR3);
+		for (j = 0; j < etm.nr_addr_cmp; j++) {
+			etm_write(etmdata, etmdata->state[i++],
+								ETMACVRn(j));
+			etm_write(etmdata, etmdata->state[i++],
+								ETMACTRn(j));
+		}
+		for (j = 0; j < etm.nr_data_cmp; j++) {
+			etm_write(etmdata, etmdata->state[i++],
+								ETMDCVRn(j));
+			etm_write(etmdata, etmdata->state[i++],
+								ETMDCMRn(j));
+		}
+		for (j = 0; j < etm.nr_cntr; j++) {
+			etm_write(etmdata, etmdata->state[i++],
+							ETMCNTRLDVRn(j));
+			etm_write(etmdata, etmdata->state[i++],
+							ETMCNTENRn(j));
+			etm_write(etmdata, etmdata->state[i++],
+							ETMCNTRLDEVRn(j));
+			etm_write(etmdata, etmdata->state[i++],
+							ETMCNTVRn(j));
+		}
+		etm_write(etmdata, etmdata->state[i++], ETMSQ12EVR);
+		etm_write(etmdata, etmdata->state[i++], ETMSQ21EVR);
+		etm_write(etmdata, etmdata->state[i++], ETMSQ23EVR);
+		etm_write(etmdata, etmdata->state[i++], ETMSQ31EVR);
+		etm_write(etmdata, etmdata->state[i++], ETMSQ32EVR);
+		etm_write(etmdata, etmdata->state[i++], ETMSQ13EVR);
+		etm_write(etmdata, etmdata->state[i++], ETMSQR);
+		for (j = 0; j < etm.nr_ext_out; j++)
+			etm_write(etmdata, etmdata->state[i++],
+							ETMEXTOUTEVRn(j));
+		for (j = 0; j < etm.nr_ctxid_cmp; j++)
+			etm_write(etmdata, etmdata->state[i++],
+							ETMCIDCVRn(j));
+		etm_write(etmdata, etmdata->state[i++], ETMCIDCMR);
+		etm_write(etmdata, etmdata->state[i++], ETMSYNCFR);
+		etm_write(etmdata, etmdata->state[i++], ETMEXTINSELR);
+		etm_write(etmdata, etmdata->state[i++], ETMTSEVR);
+		etm_write(etmdata, etmdata->state[i++], ETMAUXCR);
+		etm_write(etmdata, etmdata->state[i++], ETMTRACEIDR);
+		etm_write(etmdata, etmdata->state[i++], ETMVMIDCVR);
+		etm_write(etmdata, etmdata->state[i++], ETMCLAIMSET);
+		/*
+		 * Set ETMCR at last as we dont know the saved status of pwrdwn
+		 * bit
+		 */
+		etm_write(etmdata, etmdata->state[i++], ETMCR);
+		break;
+	default:
+		pr_err_ratelimited("unsupported etm arch %d in %s\n", etm.arch,
+								__func__);
+	}
+
+	ETM_LOCK(etmdata);
+}
+
+static inline void dbg_save_state(struct dbg_cpu_ctx *dbgdata)
+{
+	int i, j;
+
+	i = 0;
+	DBG_UNLOCK(dbgdata);
+
+	dbgdata->state[i++] =  dbg_read(dbgdata, DBGWFAR);
+	dbgdata->state[i++] =  dbg_read(dbgdata, DBGVCR);
+	for (j = 0; j < dbg.nr_bp; j++) {
+		dbgdata->state[i++] =  dbg_read(dbgdata, DBGBVRn(j));
+		dbgdata->state[i++] =  dbg_read(dbgdata, DBGBCRn(j));
+	}
+	for (j = 0; j < dbg.nr_wp; j++) {
+		dbgdata->state[i++] =  dbg_read(dbgdata, DBGWVRn(j));
+		dbgdata->state[i++] =  dbg_read(dbgdata, DBGWCRn(j));
+	}
+	dbgdata->state[i++] =  dbg_read(dbgdata, DBGPRCR);
+	dbgdata->state[i++] =  dbg_read(dbgdata, DBGCLAIMSET);
+	dbgdata->state[i++] =  dbg_read(dbgdata, DBGCLAIMCLR);
+	dbgdata->state[i++] =  dbg_read(dbgdata, DBGDTRTXext);
+	dbgdata->state[i++] =  dbg_read(dbgdata, DBGDTRRXext);
+	dbgdata->state[i++] =  dbg_read(dbgdata, DBGDSCRext);
+
+	DBG_LOCK(dbgdata);
+}
+
+static inline void dbg_restore_state(struct dbg_cpu_ctx *dbgdata)
+{
+	int i, j;
+
+	i = 0;
+	DBG_UNLOCK(dbgdata);
+
+	dbg_write(dbgdata, dbgdata->state[i++], DBGWFAR);
+	dbg_write(dbgdata, dbgdata->state[i++], DBGVCR);
+	for (j = 0; j < dbg.nr_bp; j++) {
+		dbg_write(dbgdata, dbgdata->state[i++], DBGBVRn(j));
+		dbg_write(dbgdata, dbgdata->state[i++], DBGBCRn(j));
+	}
+	for (j = 0; j < dbg.nr_wp; j++) {
+		dbg_write(dbgdata, dbgdata->state[i++], DBGWVRn(j));
+		dbg_write(dbgdata, dbgdata->state[i++], DBGWCRn(j));
+	}
+	dbg_write(dbgdata, dbgdata->state[i++], DBGPRCR);
+	dbg_write(dbgdata, dbgdata->state[i++], DBGCLAIMSET);
+	dbg_write(dbgdata, dbgdata->state[i++], DBGCLAIMCLR);
+	dbg_write(dbgdata, dbgdata->state[i++], DBGDTRTXext);
+	dbg_write(dbgdata, dbgdata->state[i++], DBGDTRRXext);
+	dbg_write(dbgdata, dbgdata->state[i++] & DBGDSCR_MASK,
+								DBGDSCRext);
+
+	DBG_LOCK(dbgdata);
+}
+
+void msm_jtag_save_state(void)
+{
+	int cpu;
+
+	cpu = raw_smp_processor_id();
+
+	msm_jtag_save_cntr[cpu]++;
+	/* ensure counter is updated before moving forward */
+	mb();
+
+	if (dbg.save_restore_enabled[cpu])
+		dbg_save_state(dbg.cpu_ctx[cpu]);
+	if (etm.save_restore_enabled[cpu])
+		etm_save_state(etm.cpu_ctx[cpu]);
+}
+EXPORT_SYMBOL(msm_jtag_save_state);
+
+void msm_jtag_restore_state(void)
+{
+	int cpu;
+
+	cpu = raw_smp_processor_id();
+
+	/* Attempt restore only if save has been done. If power collapse
+	 * is disabled, hotplug off of non-boot core will result in WFI
+	 * and hence msm_jtag_save_state will not occur. Subsequently,
+	 * during hotplug on of non-boot core when msm_jtag_restore_state
+	 * is called via msm_platform_secondary_init, this check will help
+	 * bail us out without restoring.
+	 */
+	if (msm_jtag_save_cntr[cpu] == msm_jtag_restore_cntr[cpu])
+		return;
+	else if (msm_jtag_save_cntr[cpu] != msm_jtag_restore_cntr[cpu] + 1)
+		pr_err_ratelimited("jtag imbalance, save:%lu, restore:%lu\n",
+				   (unsigned long)msm_jtag_save_cntr[cpu],
+				   (unsigned long)msm_jtag_restore_cntr[cpu]);
+
+	msm_jtag_restore_cntr[cpu]++;
+	/* ensure counter is updated before moving forward */
+	mb();
+
+	if (dbg.save_restore_enabled[cpu])
+		dbg_restore_state(dbg.cpu_ctx[cpu]);
+	if (etm.save_restore_enabled[cpu])
+		etm_restore_state(etm.cpu_ctx[cpu]);
+}
+EXPORT_SYMBOL(msm_jtag_restore_state);
+
+static inline bool etm_arch_supported(uint8_t arch)
+{
+	switch (arch) {
+	case ETM_ARCH_V3_5:
+		break;
+	default:
+		return false;
+	}
+	return true;
+}
+
+static void __devinit etm_init_arch_data(void *info)
+{
+	uint32_t etmidr;
+	uint32_t etmccr;
+	struct etm_cpu_ctx  *etmdata = info;
+
+	/*
+	 * Clear power down bit since when this bit is set writes to
+	 * certain registers might be ignored.
+	 */
+	ETM_UNLOCK(etmdata);
+
+	etm_clr_pwrdwn(etmdata);
+	/* Set prog bit. It will be set from reset but this is included to
+	 * ensure it is set
+	 */
+	etm_set_prog(etmdata);
+
+	/* find all capabilities */
+	etmidr = etm_read(etmdata, ETMIDR);
+	etm.arch = BMVAL(etmidr, 4, 11);
+
+	etmccr = etm_read(etmdata, ETMCCR);
+	etm.nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
+	etm.nr_data_cmp = BMVAL(etmccr, 4, 7);
+	etm.nr_cntr = BMVAL(etmccr, 13, 15);
+	etm.nr_ext_inp = BMVAL(etmccr, 17, 19);
+	etm.nr_ext_out = BMVAL(etmccr, 20, 22);
+	etm.nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
+
+	etm_set_pwrdwn(etmdata);
+
+	ETM_LOCK(etmdata);
+}
+
+static int __devinit jtag_mm_etm_probe(struct platform_device *pdev,
+								uint32_t cpu)
+{
+	struct etm_cpu_ctx *etmdata;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+
+	/* Allocate memory per cpu */
+	etmdata = devm_kzalloc(dev, sizeof(struct etm_cpu_ctx), GFP_KERNEL);
+	if (!etmdata)
+		return -ENOMEM;
+
+	etm.cpu_ctx[cpu] = etmdata;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	etmdata->base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!etmdata->base)
+		return -EINVAL;
+
+	/* Allocate etm state save space per core */
+	etmdata->state = devm_kzalloc(dev,
+			(MAX_ETM_STATE_SIZE * sizeof(uint32_t)), GFP_KERNEL);
+	if (!etmdata->state)
+		return -ENOMEM;
+
+	smp_call_function_single(0, etm_init_arch_data, etmdata, 1);
+
+	if (etm_arch_supported(etm.arch)) {
+		if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) < TZ_DBG_ETM_VER)
+			etm.save_restore_enabled[cpu] = true;
+		else
+			pr_info("etm save-restore supported by TZ\n");
+	} else
+		pr_info("etm arch %u not supported\n", etm.arch);
+	return 0;
+}
+
+static inline bool dbg_arch_supported(uint8_t arch)
+{
+	switch (arch) {
+	case ARM_DEBUG_ARCH_V7B:
+		break;
+	default:
+		return false;
+	}
+	return true;
+}
+
+static void __devinit dbg_init_arch_data(void *info)
+{
+	uint32_t dbgdidr;
+	struct dbg_cpu_ctx *dbgdata = info;
+
+	/* This will run on core0 so use it to populate parameters */
+	dbgdidr = dbg_read(dbgdata, DBGDIDR);
+	dbg.arch = BMVAL(dbgdidr, 16, 19);
+	dbg.nr_ctx_cmp = BMVAL(dbgdidr, 20, 23) + 1;
+	dbg.nr_bp = BMVAL(dbgdidr, 24, 27) + 1;
+	dbg.nr_wp = BMVAL(dbgdidr, 28, 31) + 1;
+}
+
+
+
+static int __devinit jtag_mm_dbg_probe(struct platform_device *pdev,
+								uint32_t cpu)
+{
+	struct dbg_cpu_ctx *dbgdata;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+
+	/* Allocate memory per cpu */
+	dbgdata = devm_kzalloc(dev, sizeof(struct dbg_cpu_ctx), GFP_KERNEL);
+	if (!dbgdata)
+		return -ENOMEM;
+
+	dbg.cpu_ctx[cpu] = dbgdata;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+
+	dbgdata->base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!dbgdata->base)
+		return -EINVAL;
+
+	/* Allocate etm state save space per core */
+	dbgdata->state = devm_kzalloc(dev,
+			(MAX_DBG_STATE_SIZE * sizeof(uint32_t)), GFP_KERNEL);
+	if (!dbgdata->state)
+		return -ENOMEM;
+
+	smp_call_function_single(0, dbg_init_arch_data, dbgdata, 1);
+
+	if (dbg_arch_supported(dbg.arch)) {
+		if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) < TZ_DBG_ETM_VER)
+			dbg.save_restore_enabled[cpu] = true;
+		else
+			pr_info("dbg save-restore supported by TZ\n");
+	} else
+		pr_info("dbg arch %u not supported\n", dbg.arch);
+	return 0;
+}
+
+static int __devinit jtag_mm_probe(struct platform_device *pdev)
+{
+	int etm_ret, dbg_ret, ret;
+	static uint32_t cpu;
+	static uint32_t count;
+	struct device *dev = &pdev->dev;
+
+	cpu = count;
+	count++;
+
+	clock[cpu] = devm_clk_get(dev, "core_clk");
+	if (IS_ERR(clock[cpu])) {
+		ret = PTR_ERR(clock[cpu]);
+		return ret;
+	}
+
+	ret = clk_set_rate(clock[cpu], CORESIGHT_CLK_RATE_TRACE);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(clock[cpu]);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(pdev, clock[cpu]);
+
+	etm_ret  = jtag_mm_etm_probe(pdev, cpu);
+
+	dbg_ret = jtag_mm_dbg_probe(pdev, cpu);
+
+	/* The probe succeeds even when only one of the etm and dbg probes
+	 * succeeds. This allows us to save-restore etm and dbg registers
+	 * independently.
+	 */
+	if (etm_ret && dbg_ret) {
+		clk_disable_unprepare(clock[cpu]);
+		ret = etm_ret;
+	} else
+		ret = 0;
+	return ret;
+}
+
+static int __devexit jtag_mm_remove(struct platform_device *pdev)
+{
+	struct clk *clock = platform_get_drvdata(pdev);
+
+	clk_disable_unprepare(clock);
+	return 0;
+}
+
+static struct of_device_id msm_qdss_mm_match[] = {
+	{ .compatible = "qcom,jtag-mm"},
+	{}
+};
+
+static struct platform_driver jtag_mm_driver = {
+	.probe          = jtag_mm_probe,
+	.remove         = __devexit_p(jtag_mm_remove),
+	.driver         = {
+		.name   = "msm-jtag-mm",
+		.owner	= THIS_MODULE,
+		.of_match_table	= msm_qdss_mm_match,
+		},
+};
+
+static int __init jtag_mm_init(void)
+{
+	return platform_driver_register(&jtag_mm_driver);
+}
+module_init(jtag_mm_init);
+
+static void __exit jtag_mm_exit(void)
+{
+	platform_driver_unregister(&jtag_mm_driver);
+}
+module_exit(jtag_mm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Coresight debug and ETM save-restore driver");
diff --git a/arch/arm/mach-msm/no-pm.c b/arch/arm/mach-msm/no-pm.c
index d460c70..7602981 100644
--- a/arch/arm/mach-msm/no-pm.c
+++ b/arch/arm/mach-msm/no-pm.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -37,14 +37,9 @@
 
 void msm_pm_set_irq_extns(struct msm_pm_irq_calls *irq_calls) {}
 
-int msm_pm_idle_prepare(struct cpuidle_device *dev,
+enum msm_pm_sleep_mode msm_pm_idle_enter(struct cpuidle_device *dev,
 			struct cpuidle_driver *drv, int index)
 {
 	return -ENOSYS;
 }
 
-int msm_pm_idle_enter(enum msm_pm_sleep_mode sleep_mode)
-{
-	return -ENOSYS;
-}
-
diff --git a/arch/arm/mach-msm/pil-pronto.c b/arch/arm/mach-msm/pil-pronto.c
index cfcf5dc..9c89f2d 100644
--- a/arch/arm/mach-msm/pil-pronto.c
+++ b/arch/arm/mach-msm/pil-pronto.c
@@ -235,6 +235,12 @@
 	return pas_init_image(PAS_WCNSS, metadata, size);
 }
 
+static int pil_pronto_mem_setup_trusted(struct pil_desc *pil, phys_addr_t addr,
+			       size_t size)
+{
+	return pas_mem_setup(PAS_WCNSS, addr, size);
+}
+
 static int pil_pronto_reset_trusted(struct pil_desc *pil)
 {
 	return pas_auth_and_reset(PAS_WCNSS);
@@ -247,6 +253,7 @@
 
 static struct pil_reset_ops pil_pronto_ops_trusted = {
 	.init_image = pil_pronto_init_image_trusted,
+	.mem_setup = pil_pronto_mem_setup_trusted,
 	.auth_and_reset = pil_pronto_reset_trusted,
 	.shutdown = pil_pronto_shutdown_trusted,
 	.proxy_vote = pil_pronto_make_proxy_vote,
diff --git a/arch/arm/mach-msm/pil-q6v5-lpass.c b/arch/arm/mach-msm/pil-q6v5-lpass.c
index 5e03aa8..5c498ec 100644
--- a/arch/arm/mach-msm/pil-q6v5-lpass.c
+++ b/arch/arm/mach-msm/pil-q6v5-lpass.c
@@ -153,6 +153,12 @@
 	return pas_init_image(PAS_Q6, metadata, size);
 }
 
+static int pil_lpass_mem_setup_trusted(struct pil_desc *pil, phys_addr_t addr,
+			       size_t size)
+{
+	return pas_mem_setup(PAS_Q6, addr, size);
+}
+
 static int pil_lpass_reset_trusted(struct pil_desc *pil)
 {
 	return pas_auth_and_reset(PAS_Q6);
@@ -165,6 +171,7 @@
 
 static struct pil_reset_ops pil_lpass_ops_trusted = {
 	.init_image = pil_lpass_init_image_trusted,
+	.mem_setup = pil_lpass_mem_setup_trusted,
 	.proxy_vote = pil_q6v5_make_proxy_votes,
 	.proxy_unvote = pil_q6v5_remove_proxy_votes,
 	.auth_and_reset = pil_lpass_reset_trusted,
diff --git a/arch/arm/mach-msm/pil-venus.c b/arch/arm/mach-msm/pil-venus.c
index eb222e3..1fcd3ba 100644
--- a/arch/arm/mach-msm/pil-venus.c
+++ b/arch/arm/mach-msm/pil-venus.c
@@ -401,6 +401,12 @@
 	return pas_init_image(PAS_VIDC, metadata, size);
 }
 
+static int pil_venus_mem_setup_trusted(struct pil_desc *pil, phys_addr_t addr,
+			       size_t size)
+{
+	return pas_mem_setup(PAS_VIDC, addr, size);
+}
+
 static int pil_venus_reset_trusted(struct pil_desc *pil)
 {
 	int rc;
@@ -442,6 +448,7 @@
 
 static struct pil_reset_ops pil_venus_ops_trusted = {
 	.init_image = pil_venus_init_image_trusted,
+	.mem_setup =  pil_venus_mem_setup_trusted,
 	.auth_and_reset = pil_venus_reset_trusted,
 	.shutdown = pil_venus_shutdown_trusted,
 	.proxy_vote = pil_venus_make_proxy_vote,
diff --git a/arch/arm/mach-msm/pm-8x60.c b/arch/arm/mach-msm/pm-8x60.c
index b42ad94..7817533 100644
--- a/arch/arm/mach-msm/pm-8x60.c
+++ b/arch/arm/mach-msm/pm-8x60.c
@@ -474,8 +474,6 @@
 	}
 }
 
-static void *msm_pm_idle_rs_limits;
-
 static void msm_pm_swfi(void)
 {
 	msm_pm_config_hw_before_swfi();
@@ -774,12 +772,13 @@
 	}
 }
 
-int msm_pm_idle_prepare(struct cpuidle_device *dev,
-		struct cpuidle_driver *drv, int index)
+static int msm_pm_idle_prepare(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int index,
+		void **msm_pm_idle_rs_limits)
 {
 	int i;
 	unsigned int power_usage = -1;
-	int ret = 0;
+	int ret = MSM_PM_SLEEP_MODE_NOT_SELECTED;
 	uint32_t modified_time_us = 0;
 	struct msm_pm_time_params time_param;
 
@@ -802,7 +801,6 @@
 		struct cpuidle_state_usage *st_usage = &dev->states_usage[i];
 		enum msm_pm_sleep_mode mode;
 		bool allow;
-		void *rs_limits = NULL;
 		uint32_t power;
 		int idx;
 
@@ -841,17 +839,20 @@
 			/* fall through */
 
 			if (pm_sleep_ops.lowest_limits)
-				rs_limits = pm_sleep_ops.lowest_limits(true,
-						mode, &time_param, &power);
+				*msm_pm_idle_rs_limits =
+					pm_sleep_ops.lowest_limits(
+						true, mode,
+						&time_param, &power);
 
 			if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
 				pr_info("CPU%u: %s: %s, latency %uus, "
 					"sleep %uus, limit %p\n",
 					dev->cpu, __func__, state->desc,
 					time_param.latency_us,
-					time_param.sleep_us, rs_limits);
+					time_param.sleep_us,
+					*msm_pm_idle_rs_limits);
 
-			if (!rs_limits)
+			if (!*msm_pm_idle_rs_limits)
 				allow = false;
 			break;
 
@@ -871,8 +872,6 @@
 				ret = mode;
 			}
 
-			if (MSM_PM_SLEEP_MODE_POWER_COLLAPSE == mode)
-				msm_pm_idle_rs_limits = rs_limits;
 		}
 	}
 
@@ -886,11 +885,27 @@
 	return ret;
 }
 
-int msm_pm_idle_enter(enum msm_pm_sleep_mode sleep_mode)
+enum msm_pm_sleep_mode msm_pm_idle_enter(struct cpuidle_device *dev,
+	struct cpuidle_driver *drv, int index)
 {
 	int64_t time;
-	int exit_stat;
 	bool collapsed = 1;
+	int exit_stat = -1;
+	enum msm_pm_sleep_mode sleep_mode;
+	void *msm_pm_idle_rs_limits = NULL;
+	int sleep_delay = 1;
+	int ret = -ENODEV;
+	int64_t timer_expiration = 0;
+	int notify_rpm = false;
+	bool timer_halted = false;
+
+	sleep_mode = msm_pm_idle_prepare(dev, drv, index,
+		&msm_pm_idle_rs_limits);
+
+	if (!msm_pm_idle_rs_limits) {
+		sleep_mode = MSM_PM_SLEEP_MODE_NOT_SELECTED;
+		goto cpuidle_enter_bail;
+	}
 
 	if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
 		pr_info("CPU%u: %s: mode %d\n",
@@ -898,71 +913,80 @@
 
 	time = ktime_to_ns(ktime_get());
 
-	switch (sleep_mode) {
-	case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
-		msm_pm_swfi();
-		exit_stat = MSM_PM_STAT_IDLE_WFI;
-		break;
-
-	case MSM_PM_SLEEP_MODE_RETENTION:
-		msm_pm_retention();
-		exit_stat = MSM_PM_STAT_RETENTION;
-		break;
-
-	case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
-		collapsed = msm_pm_power_collapse_standalone(true);
-		exit_stat = MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE;
-		break;
-
-	case MSM_PM_SLEEP_MODE_POWER_COLLAPSE: {
-		int64_t timer_expiration = 0;
-		bool timer_halted = false;
-		uint32_t sleep_delay;
-		int ret = -ENODEV;
-		int notify_rpm =
-			(sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE);
+	if (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) {
+		notify_rpm = true;
 		timer_expiration = msm_pm_timer_enter_idle();
 
 		sleep_delay = (uint32_t) msm_pm_convert_and_cap_time(
 			timer_expiration, MSM_PM_SLEEP_TICK_LIMIT);
 		if (sleep_delay == 0) /* 0 would mean infinite time */
 			sleep_delay = 1;
+	}
 
-		if (MSM_PM_DEBUG_IDLE_CLK & msm_pm_debug_mask)
-			clock_debug_print_enabled();
+	if (pm_sleep_ops.enter_sleep)
+		ret = pm_sleep_ops.enter_sleep(sleep_delay,
+			msm_pm_idle_rs_limits,
+			true, notify_rpm);
+	if (!ret) {
 
-		if (pm_sleep_ops.enter_sleep)
-			ret = pm_sleep_ops.enter_sleep(sleep_delay,
-					msm_pm_idle_rs_limits,
-					true, notify_rpm);
-		if (!ret) {
+		switch (sleep_mode) {
+		case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
+			msm_pm_swfi();
+			exit_stat = MSM_PM_STAT_IDLE_WFI;
+			break;
+
+		case MSM_PM_SLEEP_MODE_RETENTION:
+			msm_pm_retention();
+			exit_stat = MSM_PM_STAT_RETENTION;
+			break;
+
+		case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
+			collapsed = msm_pm_power_collapse_standalone(true);
+			exit_stat = MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE;
+			break;
+
+		case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
+			if (MSM_PM_DEBUG_IDLE_CLK & msm_pm_debug_mask)
+				clock_debug_print_enabled();
+
 			collapsed = msm_pm_power_collapse(true);
 			timer_halted = true;
 
-			if (pm_sleep_ops.exit_sleep)
-				pm_sleep_ops.exit_sleep(msm_pm_idle_rs_limits,
-						true, notify_rpm, collapsed);
+			exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
+			msm_pm_timer_exit_idle(timer_halted);
+			break;
+
+		case MSM_PM_SLEEP_MODE_NOT_SELECTED:
+			goto cpuidle_enter_bail;
+			break;
+
+		default:
+			__WARN();
+			goto cpuidle_enter_bail;
+			break;
 		}
+		if (pm_sleep_ops.exit_sleep)
+			pm_sleep_ops.exit_sleep(msm_pm_idle_rs_limits,
+					true, notify_rpm, collapsed);
+
+		time = ktime_to_ns(ktime_get()) - time;
+		msm_pm_ftrace_lpm_exit(smp_processor_id(), sleep_mode,
+					collapsed);
+		if (exit_stat >= 0)
+			msm_pm_add_stat(exit_stat, time);
+		do_div(time, 1000);
+		dev->last_residency = (int) time;
+		return sleep_mode;
+
+	} else if (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) {
 		msm_pm_timer_exit_idle(timer_halted);
-		exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
-		break;
-	}
-
-	default:
-		__WARN();
-		goto cpuidle_enter_bail;
-	}
-
-	time = ktime_to_ns(ktime_get()) - time;
-	msm_pm_add_stat(exit_stat, time);
-	msm_pm_ftrace_lpm_exit(smp_processor_id(), sleep_mode,
-				collapsed);
-
-	do_div(time, 1000);
-	return (int) time;
+		sleep_mode = MSM_PM_SLEEP_MODE_NOT_SELECTED;
+	} else
+		sleep_mode = MSM_PM_SLEEP_MODE_NOT_SELECTED;
 
 cpuidle_enter_bail:
-	return 0;
+	dev->last_residency = 0;
+	return sleep_mode;
 }
 
 void msm_pm_cpu_enter_lowpower(unsigned int cpu)
diff --git a/arch/arm/mach-msm/pm.h b/arch/arm/mach-msm/pm.h
index bd61feb..61ee441 100644
--- a/arch/arm/mach-msm/pm.h
+++ b/arch/arm/mach-msm/pm.h
@@ -52,7 +52,8 @@
 	MSM_PM_SLEEP_MODE_RETENTION = MSM_PM_SLEEP_MODE_APPS_SLEEP,
 	MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND = 5,
 	MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN = 6,
-	MSM_PM_SLEEP_MODE_NR
+	MSM_PM_SLEEP_MODE_NR = 7,
+	MSM_PM_SLEEP_MODE_NOT_SELECTED,
 };
 
 #define MSM_PM_MODE(cpu, mode_nr)  ((cpu) * MSM_PM_SLEEP_MODE_NR + (mode_nr))
@@ -107,10 +108,9 @@
 };
 
 void msm_pm_set_platform_data(struct msm_pm_platform_data *data, int count);
-int msm_pm_idle_prepare(struct cpuidle_device *dev,
+enum msm_pm_sleep_mode msm_pm_idle_enter(struct cpuidle_device *dev,
 			struct cpuidle_driver *drv, int index);
 void msm_pm_set_irq_extns(struct msm_pm_irq_calls *irq_calls);
-int msm_pm_idle_enter(enum msm_pm_sleep_mode sleep_mode);
 void msm_pm_cpu_enter_lowpower(unsigned int cpu);
 void __init msm_pm_set_tz_retention_flag(unsigned int flag);
 
diff --git a/arch/arm/mach-msm/pmu.c b/arch/arm/mach-msm/pmu.c
index cb191fc..21f65f8 100644
--- a/arch/arm/mach-msm/pmu.c
+++ b/arch/arm/mach-msm/pmu.c
@@ -181,7 +181,7 @@
 	 * and point to the appropriate 'struct resource'.
 	 */
 #ifdef CONFIG_ARCH_MSM8625
-	if (cpu_is_msm8625()) {
+	if (cpu_is_msm8625() || cpu_is_msm8625q()) {
 		pmu_devices[0] = &msm8625_cpu_pmu_device;
 		pmu_devices[1] = &msm8625_l2_pmu_device;
 		msm8625_cpu_pmu_device.dev.platform_data = &multicore_data;
diff --git a/arch/arm/mach-msm/rpm_resources.c b/arch/arm/mach-msm/rpm_resources.c
index 43073d3..78c5ae0 100644
--- a/arch/arm/mach-msm/rpm_resources.c
+++ b/arch/arm/mach-msm/rpm_resources.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,7 @@
 #include <linux/spinlock.h>
 #include <linux/cpu.h>
 #include <linux/hrtimer.h>
+#include <linux/platform_device.h>
 #include <mach/rpm.h>
 #include <mach/msm_iomap.h>
 #include <asm/mach-types.h>
@@ -71,6 +72,10 @@
 static int vdd_mem_vlevels[MSM_RPMRS_VDD_MEM_LAST];
 static int vdd_mask;
 
+static DEFINE_PER_CPU(uint32_t , msm_lpm_sleep_time);
+static DEFINE_PER_CPU(int , lpm_permitted_level);
+static DEFINE_PER_CPU(struct atomic_notifier_head, lpm_notify_head);
+
 #define MSM_RPMRS_MAX_RS_REGISTER_COUNT 2
 
 #define RPMRS_ATTR(_name) \
@@ -869,6 +874,13 @@
 	spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
 }
 
+static bool lpm_level_permitted(int cur_level_count)
+{
+	if (__get_cpu_var(lpm_permitted_level) == msm_rpmrs_level_count + 1)
+		return true;
+	return (__get_cpu_var(lpm_permitted_level) == cur_level_count);
+}
+
 s32 msm_cpuidle_get_deep_idle_latency(void)
 {
 	int i;
@@ -904,6 +916,7 @@
 	uint32_t pwr;
 	uint32_t next_wakeup_us = time_param->sleep_us;
 	bool modify_event_timer;
+	int best_level_iter = msm_rpmrs_level_count + 1;
 
 	if (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) {
 		irqs_detectable = msm_mpm_irqs_detectable(from_idle);
@@ -968,6 +981,7 @@
 			level->rs_limits.latency_us[cpu] = level->latency_us;
 			level->rs_limits.power[cpu] = pwr;
 			best_level = level;
+			best_level_iter = i;
 			if (power)
 				*power = pwr;
 			if (modify_event_timer && best_level->latency_us > 1)
@@ -978,6 +992,12 @@
 				time_param->modified_time_us = 0;
 		}
 	}
+	if (best_level && !lpm_level_permitted(best_level_iter))
+		best_level = NULL;
+	else
+		per_cpu(msm_lpm_sleep_time, cpu) =
+			time_param->modified_time_us ?
+			time_param->modified_time_us : time_param->sleep_us;
 
 	return best_level ? &best_level->rs_limits : NULL;
 }
@@ -986,6 +1006,12 @@
 		bool from_idle, bool notify_rpm)
 {
 	int rc = 0;
+	struct msm_lpm_sleep_data sleep_data;
+
+	sleep_data.limits = limits;
+	sleep_data.kernel_sleep = __get_cpu_var(msm_lpm_sleep_time);
+	atomic_notifier_call_chain(&__get_cpu_var(lpm_notify_head),
+		MSM_LPM_STATE_ENTER, &sleep_data);
 
 	if (notify_rpm) {
 		rc = msm_rpmrs_flush_buffer(sclk_count, limits, from_idle);
@@ -1009,6 +1035,9 @@
 
 	if (msm_rpmrs_use_mpm(limits))
 		msm_mpm_exit_sleep(from_idle);
+
+	atomic_notifier_call_chain(&__get_cpu_var(lpm_notify_head),
+			MSM_LPM_STATE_EXIT, NULL);
 }
 
 static int rpmrs_cpu_callback(struct notifier_block *nfb,
@@ -1033,6 +1062,16 @@
 	return NOTIFY_OK;
 }
 
+static struct lpm_test_platform_data lpm_test_pdata;
+
+static struct platform_device msm_lpm_test_device = {
+	.name		= "lpm_test",
+	.id		= -1,
+	.dev		= {
+		.platform_data = &lpm_test_pdata,
+	},
+};
+
 static struct notifier_block __refdata rpmrs_cpu_notifier = {
 	.notifier_call = rpmrs_cpu_callback,
 };
@@ -1041,6 +1080,7 @@
 {
 	int i, k;
 	struct msm_rpmrs_level *levels = data->levels;
+	unsigned int m_cpu = 0;
 
 	msm_rpmrs_level_count = data->num_levels;
 
@@ -1052,6 +1092,16 @@
 	memcpy(msm_rpmrs_levels, levels,
 			msm_rpmrs_level_count * sizeof(struct msm_rpmrs_level));
 
+	lpm_test_pdata.use_qtimer = 0;
+	lpm_test_pdata.msm_lpm_test_levels = msm_rpmrs_levels,
+	lpm_test_pdata.msm_lpm_test_level_count = msm_rpmrs_level_count;
+
+	for_each_possible_cpu(m_cpu)
+		per_cpu(lpm_permitted_level, m_cpu) =
+				msm_rpmrs_level_count + 1;
+
+	platform_device_register(&msm_lpm_test_device);
+
 	memcpy(vdd_dig_vlevels, data->vdd_dig_levels,
 		(MSM_RPMRS_VDD_DIG_MAX + 1) * sizeof(vdd_dig_vlevels[0]));
 
@@ -1087,6 +1137,41 @@
 	return 0;
 }
 
+uint32_t msm_pm_get_pxo(struct msm_rpmrs_limits *limits)
+{
+	return limits->pxo;
+}
+
+uint32_t msm_pm_get_l2_cache(struct msm_rpmrs_limits *limits)
+{
+	return limits->l2_cache;
+}
+
+uint32_t msm_pm_get_vdd_mem(struct msm_rpmrs_limits *limits)
+{
+	return limits->vdd_mem;
+}
+
+uint32_t msm_pm_get_vdd_dig(struct msm_rpmrs_limits *limits)
+{
+	return limits->vdd_dig;
+}
+
+int msm_lpm_register_notifier(int cpu, int level_iter,
+			struct notifier_block *nb, bool is_latency_measure)
+{
+	per_cpu(lpm_permitted_level, cpu) = level_iter;
+	return atomic_notifier_chain_register(&per_cpu(lpm_notify_head,
+			cpu), nb);
+}
+
+int msm_lpm_unregister_notifier(int cpu, struct notifier_block *nb)
+{
+	per_cpu(lpm_permitted_level, cpu) = msm_rpmrs_level_count + 1;
+	return atomic_notifier_chain_unregister(&per_cpu(lpm_notify_head, cpu),
+				nb);
+}
+
 static int __init msm_rpmrs_init(void)
 {
 	struct msm_rpm_iv_pair req;
diff --git a/arch/arm/mach-msm/rpm_resources.h b/arch/arm/mach-msm/rpm_resources.h
index 46d6d94..0a180fb 100644
--- a/arch/arm/mach-msm/rpm_resources.h
+++ b/arch/arm/mach-msm/rpm_resources.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,7 +15,9 @@
 #define __ARCH_ARM_MACH_MSM_RPM_RESOURCES_H
 
 #include <mach/rpm.h>
+#include <linux/notifier.h>
 #include "pm.h"
+#include "test-lpm.h"
 
 enum {
 	MSM_RPMRS_ID_PXO_CLK = 0,
@@ -102,6 +104,94 @@
 	unsigned int rpmrs_target_id[MSM_RPMRS_ID_LAST];
 };
 
+enum {
+	MSM_LPM_STATE_ENTER = 0,
+	MSM_LPM_STATE_EXIT = 1,
+};
+
+/**
+ * struct msm_lpm_sleep_data - abstraction to get sleep data
+ * @limits:	pointer to the msm_rpmrs_limits structure
+ * @kernel_sleep:	kernel sleep time as decided by the power calculation
+ *			algorithm
+ *
+ * This structure is an abstraction to get the limits and kernel sleep time
+ * during enter sleep.
+ */
+
+struct msm_lpm_sleep_data {
+	struct msm_rpmrs_limits *limits;
+	uint32_t kernel_sleep;
+};
+
+#define MSM_PM(field) MSM_RPMRS_##field
+
+/**
+ * msm_pm_get_pxo() -  get the limits for pxo
+ * @limits:            pointer to the msm_rpmrs_limits structure
+ *
+ * This function gets the limits to the resource pxo on
+ * 8960
+ */
+
+uint32_t msm_pm_get_pxo(struct msm_rpmrs_limits *limits);
+
+/**
+ * msm_pm_get_l2_cache() -  get the limits for l2 cache
+ * @limits:            pointer to the msm_rpmrs_limits structure
+ *
+ * This function gets the limits to the resource l2 cache
+ * on 8960
+ */
+
+uint32_t msm_pm_get_l2_cache(struct msm_rpmrs_limits *limits);
+
+/**
+ * msm_pm_get_vdd_mem() -  get the limits for pxo
+ * @limits:            pointer to the msm_rpmrs_limits structure
+ *
+ * This function gets the limits to the resource vdd mem
+ * on 8960
+ */
+
+uint32_t msm_pm_get_vdd_mem(struct msm_rpmrs_limits *limits);
+
+/**
+ * msm_pm_get_vdd_dig() -  get the limits for vdd dig
+ * @limits:            pointer to the msm_rpmrs_limits structure
+ *
+ * This function gets the limits to the resource vdd dig
+ * on 8960
+ */
+
+uint32_t msm_pm_get_vdd_dig(struct msm_rpmrs_limits *limits);
+
+/**
+ * msm_lpm_register_notifier() - register for notifications
+ * @cpu:               cpu to debug
+ * @level_iter:        low power level index to debug
+ * @nb:       notifier block to callback on notifications
+ * @is_latency_measure: is it latency measure
+ *
+ * This function sets the permitted level to the index of the
+ * level under test and registers notifier for callback.
+ */
+
+int msm_lpm_register_notifier(int cpu, int level_iter,
+		struct notifier_block *nb, bool is_latency_measure);
+
+/**
+ * msm_lpm_unregister_notifier() - unregister from notifications
+ * @cpu:               cpu to debug
+ * @nb:       notifier block to callback on notifications
+ *
+ * This function sets the permitted level to a value one more than
+ * available levels count which indicates that all levels are
+ * permitted and it also unregisters notifier for callback.
+ */
+
+int msm_lpm_unregister_notifier(int cpu, struct notifier_block *nb);
+
 #if defined(CONFIG_MSM_RPM)
 
 int msm_rpmrs_set(int ctx, struct msm_rpm_iv_pair *req, int count);
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index 47d311c..1ab1f71 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -1141,6 +1141,7 @@
 		return MSM_CPU_8064;
 
 	case 0x511F06F1:
+	case 0x511F06F2:
 	case 0x512F06F0:
 		return MSM_CPU_8974;
 
@@ -1192,6 +1193,7 @@
 	case 0x512F04D0:
 	case 0x511F06F0:
 	case 0x511F06F1:
+	case 0x511F06F2:
 	case 0x510F05D0:
 		return 1;
 
diff --git a/arch/arm/mach-msm/test-lpm.c b/arch/arm/mach-msm/test-lpm.c
new file mode 100644
index 0000000..dbc8100
--- /dev/null
+++ b/arch/arm/mach-msm/test-lpm.c
@@ -0,0 +1,696 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pm.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <mach/socinfo.h>
+#if defined(CONFIG_MSM_RPM)
+#include "rpm_resources.h"
+#endif
+#include "timer.h"
+#include "test-lpm.h"
+
+#define LPM_STATS_RESET "reset"
+#define LPM_TEST_ALL_LEVELS "lpm"
+#define LPM_TEST_LATENCIES "latency"
+#define LPM_TEST_CLEAR "clear"
+#define BUF_SIZE 200
+#define STAT_BUF_EXTRA_SIZE 500
+#define WAIT_FOR_XO 1
+#define COMM_BUF_SIZE 15
+#define INPUT_COUNT_BUF 10
+#define LPM_DEFAULT_CPU 0
+
+#define SNPRINTF(buf, size, format, ...) \
+{ \
+	if (size > 0) { \
+		int ret; \
+		ret = snprintf(buf, size, format, ## __VA_ARGS__); \
+		if (ret > size) { \
+			buf += size; \
+			size = 0; \
+		} else { \
+			buf += ret; \
+			size -= ret; \
+		} \
+	} \
+} \
+
+static DEFINE_MUTEX(lpm_stats_mutex);
+
+struct lpm_level_stat {
+	char level_name[BUF_SIZE];
+	int64_t min_time;
+	int64_t max_time;
+	int64_t avg_time;
+	int64_t exit_early;
+	int64_t count;
+	unsigned long min_threshold;
+	uint32_t kernel_sleep_time;
+	bool entered;
+};
+
+static DEFINE_PER_CPU(struct lpm_level_stat *, lpm_levels);
+
+static struct dentry *lpm_stat;
+static struct dentry *lpm_ext_comm;
+static struct msm_rpmrs_level *lpm_supp_level;
+static int lpm_level_count;
+static int lpm_level_iter;
+static bool msm_lpm_use_qtimer;
+static unsigned long lpm_sleep_time;
+static bool lpm_latency_test;
+
+static unsigned int timer_interval = 5000;
+module_param_named(lpm_timer_interval_msec, timer_interval, uint,
+	S_IRUGO | S_IWUSR | S_IWGRP);
+
+static unsigned int latency_test_interval = 50;
+module_param_named(lpm_latency_timer_interval_usec, latency_test_interval, uint,
+	S_IRUGO | S_IWUSR | S_IWGRP);
+
+static unsigned int cpu_to_debug = LPM_DEFAULT_CPU;
+static int lpm_cpu_update(const char *val, const struct kernel_param *kp)
+{
+	int ret = 0;
+	unsigned int debug_val;
+
+	ret = kstrtouint(val, 10, &debug_val);
+	if ((ret < 0) || (debug_val >= num_possible_cpus()))
+		return -EINVAL;
+	cpu_to_debug = debug_val;
+	return ret;
+}
+
+static struct kernel_param_ops cpu_debug_events = {
+	.set = lpm_cpu_update,
+};
+
+module_param_cb(cpu_to_debug, &cpu_debug_events, &cpu_to_debug,
+			S_IRUGO | S_IWUSR | S_IWGRP);
+
+static void lpm_populate_name(struct lpm_level_stat *stat,
+		struct msm_rpmrs_level *supp)
+{
+	char nm[BUF_SIZE] = {0};
+	char default_buf[20];
+
+	switch (supp->sleep_mode) {
+	case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
+		strlcat(nm, "WFI ", BUF_SIZE);
+		break;
+	case MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT:
+		strlcat(nm, "WFI voltage Rampdown ", BUF_SIZE);
+		break;
+	case MSM_PM_SLEEP_MODE_RETENTION:
+		strlcat(nm, "Retention ", BUF_SIZE);
+		break;
+	case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
+		strlcat(nm, "Standalone Power collapse ", BUF_SIZE);
+		break;
+	case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
+		strlcat(nm, "Idle Power collapse ", BUF_SIZE);
+		break;
+	case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND:
+		strlcat(nm, "Suspend Power collapse ", BUF_SIZE);
+		break;
+	default:
+		strlcat(nm, "Invalid Mode ", BUF_SIZE);
+		break;
+	}
+
+	switch (msm_pm_get_pxo(&(supp->rs_limits))) {
+	case MSM_PM(PXO_OFF):
+		strlcat(nm, "XO: OFF ", BUF_SIZE);
+		break;
+	case MSM_PM(PXO_ON):
+		strlcat(nm, "XO: ON ", BUF_SIZE);
+		break;
+	default:
+		snprintf(default_buf, sizeof(default_buf),
+			"XO : %d ", msm_pm_get_pxo(&(supp->rs_limits)));
+		strlcat(nm, default_buf , BUF_SIZE);
+		break;
+	}
+
+	switch (msm_pm_get_l2_cache(&(supp->rs_limits))) {
+	case MSM_PM(L2_CACHE_HSFS_OPEN):
+		strlcat(nm, "L2: HSFS ", BUF_SIZE);
+		break;
+	case MSM_PM(L2_CACHE_GDHS):
+		strlcat(nm, "L2: GDHS ", BUF_SIZE);
+		break;
+	case MSM_PM(L2_CACHE_RETENTION):
+		strlcat(nm, "L2: Retention ", BUF_SIZE);
+		break;
+	case MSM_PM(L2_CACHE_ACTIVE):
+		strlcat(nm, "L2: Active ", BUF_SIZE);
+		break;
+	default:
+		snprintf(default_buf, sizeof(default_buf),
+			"L2 : %d ", msm_pm_get_l2_cache(&(supp->rs_limits)));
+		strlcat(nm, default_buf , BUF_SIZE);
+		break;
+	}
+
+	snprintf(default_buf, sizeof(default_buf),
+		"Vdd_mem : %d ", msm_pm_get_vdd_mem(&(supp->rs_limits)));
+	strlcat(nm, default_buf , BUF_SIZE);
+
+	snprintf(default_buf, sizeof(default_buf),
+		"Vdd_dig : %d ", msm_pm_get_vdd_dig(&(supp->rs_limits)));
+	strlcat(nm, default_buf , BUF_SIZE);
+
+	strlcpy(stat->level_name, nm, strnlen(nm, BUF_SIZE));
+}
+
+static int64_t msm_lpm_get_time(void)
+{
+	if (msm_lpm_use_qtimer)
+		return ktime_to_ns(ktime_get());
+
+	return msm_timer_get_sclk_time(NULL);
+}
+
+static bool lpm_get_level(void *v, unsigned int *ct)
+{
+	bool ret = false;
+	int it;
+	struct msm_rpmrs_level *level_enter;
+
+	level_enter = container_of(((struct msm_lpm_sleep_data *)v)->limits,
+			struct msm_rpmrs_level, rs_limits);
+	if (level_enter) {
+		for (it = 0; it < lpm_level_count; it++)
+			if (!memcmp(level_enter , lpm_supp_level + it,
+					sizeof(struct msm_rpmrs_level))) {
+				*ct = it;
+				ret = true;
+				break;
+			}
+	}
+	return ret;
+}
+
+static int lpm_callback(struct notifier_block *self, unsigned long cmd,
+				void *sleep_data)
+{
+	static int64_t time;
+	unsigned int ct;
+	struct lpm_level_stat *stats;
+	stats = per_cpu(lpm_levels, cpu_to_debug);
+	/* Update the stats and get the start/stop time */
+	if (cmd == MSM_LPM_STATE_ENTER && !lpm_latency_test) {
+		time = msm_lpm_get_time();
+		stats[lpm_level_iter].entered = true;
+	} else if ((cmd == MSM_LPM_STATE_EXIT) && (time)
+			&& (!lpm_latency_test)) {
+		int64_t time1;
+		time1 = msm_lpm_get_time();
+		time = time1 - time;
+
+		if ((time < stats[lpm_level_iter].min_time) ||
+			(!stats[lpm_level_iter].min_time))
+			stats[lpm_level_iter].min_time = time;
+
+		if (time > stats[lpm_level_iter].max_time)
+			stats[lpm_level_iter].max_time = time;
+
+		time1 = stats[lpm_level_iter].avg_time *
+			stats[lpm_level_iter].count + time;
+		do_div(time1, ++(stats[lpm_level_iter].count));
+
+		stats[lpm_level_iter].avg_time = time1;
+		do_div(time, NSEC_PER_USEC);
+		if (time < lpm_supp_level[lpm_level_iter].
+				time_overhead_us)
+			stats[lpm_level_iter].exit_early++;
+		time = 0;
+	} else if (cmd == MSM_LPM_STATE_ENTER && lpm_latency_test) {
+
+		struct msm_lpm_sleep_data *data = sleep_data;
+		if ((lpm_get_level(sleep_data, &ct)) &&
+		(stats[ct].min_threshold == 0) &&
+		data->kernel_sleep <= lpm_sleep_time) {
+
+			stats[ct].min_threshold = lpm_sleep_time;
+			stats[ct].kernel_sleep_time =
+				data->kernel_sleep;
+		}
+	}
+	return 0;
+}
+
+static struct notifier_block lpm_idle_nb = {
+	.notifier_call = lpm_callback,
+};
+
+static void lpm_test_initiate(int lpm_level_test)
+{
+	int test_ret;
+
+	/* This will communicate to 'stat' debugfs to skip latency printing*/
+	lpm_sleep_time = 0;
+	lpm_latency_test = false;
+	/* Unregister any infinitely registered level*/
+	msm_lpm_unregister_notifier(cpu_to_debug, &lpm_idle_nb);
+
+	/* Register/Unregister for Notification */
+	while (lpm_level_iter < lpm_level_count) {
+		test_ret = msm_lpm_register_notifier(cpu_to_debug,
+				lpm_level_iter, &lpm_idle_nb, false);
+		if (test_ret < 0) {
+			pr_err("%s: Registering notifier failed\n", __func__);
+			return;
+		}
+		if (!timer_interval)
+			break;
+		msleep(timer_interval);
+		msm_lpm_unregister_notifier(cpu_to_debug, &lpm_idle_nb);
+		if (lpm_level_test == lpm_level_count)
+			lpm_level_iter++;
+		else
+			break;
+	}
+}
+
+static void lpm_latency_test_initiate(unsigned long max_time)
+{
+	int test_ret;
+	lpm_latency_test = true;
+	lpm_sleep_time = latency_test_interval;
+
+	msm_lpm_unregister_notifier(cpu_to_debug, &lpm_idle_nb);
+	if (max_time > lpm_sleep_time) {
+
+		do {
+			test_ret = msm_lpm_register_notifier(cpu_to_debug,
+					lpm_level_count + 1,
+					&lpm_idle_nb, true);
+			if (test_ret) {
+				pr_err("%s: Registering notifier failed\n",
+						__func__);
+				return;
+			}
+			usleep(lpm_sleep_time);
+			/*Unregister to ensure that we dont update the latency
+			during the timer value transistion*/
+			msm_lpm_unregister_notifier(cpu_to_debug,
+				&lpm_idle_nb);
+			lpm_sleep_time += latency_test_interval;
+		} while (lpm_sleep_time < max_time);
+	} else
+		pr_err("%s: Invalid time interval specified\n", __func__);
+
+	lpm_latency_test = false;
+}
+
+static ssize_t lpm_test_comm_read(struct file *fp, char __user *user_buffer,
+				size_t buffer_length, loff_t *position)
+{
+	int i = 0;
+	int count = buffer_length;
+	int alloc_size = 100 * lpm_level_count;
+	char *temp_buf;
+	char *comm_buf;
+	ssize_t ret;
+
+	comm_buf = kzalloc(alloc_size, GFP_KERNEL);
+	if (!comm_buf) {
+		pr_err("%s:Memory alloc failed\n", __func__);
+		ret = 0;
+		goto com_read_failed;
+	}
+	temp_buf = comm_buf;
+
+	SNPRINTF(temp_buf, count, "Low power modes available:\n");
+
+	for (i = 0; i < lpm_level_count; i++)
+		SNPRINTF(temp_buf, count, "%d. %s\n", i,
+			per_cpu(lpm_levels, cpu_to_debug)[i].level_name);
+
+	SNPRINTF(temp_buf, count, "%d. MSM test all lpm\n", i++);
+	SNPRINTF(temp_buf, count, "%d. MSM determine latency\n", i);
+
+	ret = simple_read_from_buffer(user_buffer, buffer_length - count,
+					position, comm_buf, alloc_size);
+	kfree(comm_buf);
+
+com_read_failed:
+	return ret;
+}
+
+char *trimspaces(char *time_buf)
+{
+	int len;
+	char *tail;
+
+	len = strnlen(time_buf, INPUT_COUNT_BUF);
+	tail = time_buf + len;
+	while (isspace(*time_buf) && (time_buf != tail))
+		time_buf++;
+	if (time_buf == tail) {
+		time_buf = NULL;
+		goto exit_trim_spaces;
+	}
+	len = strnlen(time_buf, INPUT_COUNT_BUF);
+	tail = time_buf + len - 1;
+	while (isspace(*tail) && tail != time_buf) {
+		*tail = '\0';
+		tail--;
+	}
+exit_trim_spaces:
+	return time_buf;
+}
+
+static ssize_t lpm_test_comm_write(struct file *fp, const char __user
+			*user_buffer, size_t count, loff_t *position)
+{
+	ssize_t ret;
+	int str_ret;
+	int lpm_level_test;
+	char *new_ptr;
+	char *comm_buf;
+
+	comm_buf = kzalloc(COMM_BUF_SIZE, GFP_KERNEL);
+	if (!comm_buf) {
+		pr_err("\'%s\': kzalloc failed\n", __func__);
+		return -EINVAL;
+	}
+
+	memset(comm_buf, '\0', COMM_BUF_SIZE);
+
+	ret = simple_write_to_buffer(comm_buf, COMM_BUF_SIZE, position,
+					user_buffer, count);
+	new_ptr = trimspaces(comm_buf);
+	if (!new_ptr) {
+		pr_err("%s: Test case number input invalid\n", __func__);
+		goto write_com_failed;
+	}
+
+	if (!memcmp(comm_buf, LPM_TEST_ALL_LEVELS,
+			sizeof(LPM_TEST_ALL_LEVELS) - 1)) {
+		lpm_level_test = lpm_level_count;
+		lpm_level_iter = 0;
+		lpm_test_initiate(lpm_level_test);
+		goto write_com_success;
+	} else if (!memcmp(comm_buf, LPM_TEST_LATENCIES,
+			sizeof(LPM_TEST_LATENCIES) - 1)) {
+		lpm_level_test = lpm_level_count + 1;
+		lpm_latency_test_initiate(timer_interval * USEC_PER_MSEC);
+		goto write_com_success;
+	} else if (!memcmp(comm_buf, LPM_TEST_CLEAR,
+			sizeof(LPM_TEST_CLEAR) - 1)) {
+		msm_lpm_unregister_notifier(cpu_to_debug, &lpm_idle_nb);
+		goto write_com_success;
+	}
+
+	str_ret = kstrtoint(new_ptr, 10, &lpm_level_test);
+	if ((str_ret) || (lpm_level_test > (lpm_level_count + 1)) ||
+		(lpm_level_test < 0))
+		goto write_com_failed;
+
+	lpm_level_iter = lpm_level_test;
+	lpm_test_initiate(lpm_level_test);
+	goto write_com_success;
+
+write_com_failed:
+	ret = -EINVAL;
+write_com_success:
+	kfree(comm_buf);
+	return ret;
+}
+
+static ssize_t lpm_test_stat_read(struct file *fp, char __user *user_buffer,
+				size_t buffer_length, loff_t *position)
+{
+	int i = 0;
+	int j = 0;
+	int count = buffer_length;
+	char *stat_buf;
+	char *stat_buf_start;
+	size_t stat_buf_size;
+	ssize_t ret;
+	int64_t min_ns;
+	int64_t max_ns;
+	int64_t avg_ns;
+	uint32_t min_ms;
+	uint32_t max_ms;
+	uint32_t avg_ms;
+
+	stat_buf_size = ((sizeof(struct lpm_level_stat) * lpm_level_count) +
+				STAT_BUF_EXTRA_SIZE);
+	stat_buf = kzalloc(stat_buf_size, GFP_KERNEL);
+	if (!stat_buf) {
+		pr_err("\'%s\': kzalloc failed\n", __func__);
+		return -EINVAL;
+	}
+	stat_buf_start = stat_buf;
+	mutex_lock(&lpm_stats_mutex);
+	memset(stat_buf, '\0', stat_buf_size);
+	SNPRINTF(stat_buf, count, "\n\nStats for CPU: %d\nTotal Levels: %d\n",
+			cpu_to_debug, lpm_level_count);
+	if (!lpm_sleep_time) {
+		SNPRINTF(stat_buf, count, "Level(s) failed: ");
+		for (i = 0 ; i < lpm_level_count; i++) {
+			if (per_cpu(lpm_levels, cpu_to_debug)[i].entered)
+				continue;
+			else {
+				SNPRINTF(stat_buf, count,
+					"\n%d. %s", ++j, per_cpu(lpm_levels,
+					cpu_to_debug)[i].level_name);
+			}
+		}
+		SNPRINTF(stat_buf, count, "\n\nSTATS:");
+		for (i = 0; i < lpm_level_count; i++) {
+			min_ns = per_cpu(lpm_levels, cpu_to_debug)[i].min_time;
+			min_ms = do_div(min_ns, NSEC_PER_MSEC);
+			max_ns = per_cpu(lpm_levels, cpu_to_debug)[i].max_time;
+			max_ms = do_div(max_ns, NSEC_PER_MSEC);
+			avg_ns = per_cpu(lpm_levels, cpu_to_debug)[i].avg_time;
+			avg_ms = do_div(avg_ns, NSEC_PER_MSEC);
+			SNPRINTF(stat_buf, count, "\nLEVEL: %s\n"
+				"Entered : %lld\n"
+				"Early wakeup : %lld\n"
+				"Min Time (mSec): %lld.%06u\n"
+				"Max Time (mSec): %lld.%06u\n"
+				"Avg Time (mSec): %lld.%06u\n",
+				per_cpu(lpm_levels, cpu_to_debug)[i].level_name,
+				per_cpu(lpm_levels, cpu_to_debug)[i].count,
+				per_cpu(lpm_levels, cpu_to_debug)[i].exit_early,
+				min_ns, min_ms,
+				max_ns, max_ms,
+				avg_ns, avg_ms);
+		}
+	} else {
+		for (i = 0; i < lpm_level_count; i++) {
+			SNPRINTF(stat_buf, count, "\nLEVEL: %s\n"
+				"Min Timer value (uSec): %lu\n"
+				"Kernel sleep time (uSec): %u\n",
+				per_cpu(lpm_levels, cpu_to_debug)[i].level_name,
+				per_cpu(lpm_levels, cpu_to_debug)[i].
+				min_threshold,
+				per_cpu(lpm_levels,
+				cpu_to_debug)[i].kernel_sleep_time);
+		}
+	}
+
+	ret = simple_read_from_buffer(user_buffer, buffer_length - count,
+				position, stat_buf_start, stat_buf_size);
+
+	mutex_unlock(&lpm_stats_mutex);
+	kfree(stat_buf_start);
+	return ret;
+}
+
+static ssize_t lpm_test_stat_write(struct file *fp, const char __user
+				*user_buffer, size_t count, loff_t *position)
+{
+	char buf[sizeof(LPM_STATS_RESET)];
+	int ret;
+	int i;
+	struct lpm_level_stat *stats;
+
+	if (count > sizeof(LPM_STATS_RESET)) {
+		ret = -EINVAL;
+		goto write_debug_failed;
+	}
+
+	simple_write_to_buffer(buf, sizeof(LPM_STATS_RESET), position,
+				user_buffer, count);
+
+	if (memcmp(buf, LPM_STATS_RESET, sizeof(LPM_STATS_RESET) - 1)) {
+		ret = -EINVAL;
+		goto write_debug_failed;
+	}
+
+	mutex_lock(&lpm_stats_mutex);
+	stats = per_cpu(lpm_levels, cpu_to_debug);
+	for (i = 0 ; i < lpm_level_count; i++) {
+		stats[i].entered = 0;
+		stats[i].min_time = 0;
+		stats[i].max_time = 0;
+		stats[i].avg_time = 0;
+		stats[i].count = 0;
+		stats[i].exit_early = 0;
+		stats[i].min_threshold = 0;
+		stats[i].kernel_sleep_time = 0;
+	}
+	mutex_unlock(&lpm_stats_mutex);
+	return count;
+write_debug_failed:
+	return ret;
+}
+
+static void lpm_init_rpm_levels(int test_lpm_level_count,
+		struct msm_rpmrs_level *test_levels)
+{
+	int i = 0;
+	unsigned int m_cpu = 0;
+	struct lpm_level_stat *stat_levels = NULL;
+
+	if (test_lpm_level_count < 0)
+		return;
+
+	lpm_level_count = test_lpm_level_count;
+
+	lpm_supp_level = test_levels;
+	for_each_possible_cpu(m_cpu) {
+		stat_levels = kzalloc(sizeof(struct lpm_level_stat) *
+				lpm_level_count, GFP_KERNEL);
+		if (!stat_levels) {
+			for (i = m_cpu - 1; i >= 0; i--)
+				kfree(per_cpu(lpm_levels, i));
+			return;
+		}
+
+		for (i = 0; i < lpm_level_count; i++)
+			lpm_populate_name(&stat_levels[i], &lpm_supp_level[i]);
+
+		per_cpu(lpm_levels, m_cpu) = stat_levels;
+	}
+}
+
+static const struct file_operations fops_stat = {
+	.read = lpm_test_stat_read,
+	.write = lpm_test_stat_write,
+};
+
+static const struct file_operations fops_comm = {
+	.read = lpm_test_comm_read,
+	.write = lpm_test_comm_write,
+};
+
+static int __devinit lpm_test_init(int test_lpm_level_count,
+		struct msm_rpmrs_level *test_levels)
+{
+	int filevalue;
+	int lpm_comm;
+	int ret = -EINVAL;
+	struct dentry *parent_dir = NULL;
+
+	parent_dir = debugfs_create_dir("msm_lpm_debug", NULL);
+	if (!parent_dir) {
+		pr_err("%s: debugfs directory creation failed\n",
+				__func__);
+		goto init_err;
+	}
+
+	lpm_stat = debugfs_create_file("stat",
+			S_IRUGO | S_IWUSR | S_IWGRP, parent_dir,
+			&filevalue, &fops_stat);
+	if (!lpm_stat) {
+		pr_err("%s: lpm_stats debugfs creation failed\n",
+				__func__);
+		goto init_err;
+	}
+
+	lpm_ext_comm = debugfs_create_file("comm",
+			S_IRUGO | S_IWUSR | S_IWGRP, parent_dir, &lpm_comm,
+			&fops_comm);
+	if (!lpm_ext_comm) {
+		pr_err("%s: lpm_comm debugfs creation failed\n",
+			__func__);
+		debugfs_remove(lpm_stat);
+		goto init_err;
+	}
+
+	/*Query RPM resources and allocate the data sturctures*/
+	lpm_init_rpm_levels(test_lpm_level_count, test_levels);
+	ret = 0;
+
+init_err:
+	return ret;
+}
+
+static int  __devexit lpm_test_exit(struct platform_device *pdev)
+{
+	unsigned int m_cpu = 0;
+
+	kfree(lpm_supp_level);
+	for_each_possible_cpu(m_cpu)
+		kfree(per_cpu(lpm_levels, m_cpu));
+	debugfs_remove(lpm_stat);
+	debugfs_remove(lpm_ext_comm);
+	return 0;
+}
+
+static int __devinit lpm_test_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct lpm_test_platform_data *pdata;
+	struct msm_rpmrs_level *test_levels;
+	int test_lpm_level_count;
+
+	pdata = pdev->dev.platform_data;
+
+	if (!pdata) {
+		dev_err(dev, "no platform data specified\n");
+		return -EINVAL;
+	}
+
+	test_levels = pdata->msm_lpm_test_levels;
+	test_lpm_level_count = pdata->msm_lpm_test_level_count;
+
+	if (pdata->use_qtimer)
+		msm_lpm_use_qtimer = true;
+
+	lpm_test_init(test_lpm_level_count, test_levels);
+
+	return 0;
+}
+
+static struct platform_driver lpm_test_driver = {
+	.probe = lpm_test_probe,
+	.remove = lpm_test_exit,
+	.driver = {
+		.name = "lpm_test",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init lpm_test_platform_driver_init(void)
+{
+	return platform_driver_register(&lpm_test_driver);
+}
+
+late_initcall(lpm_test_platform_driver_init);
diff --git a/arch/arm/mach-msm/test-lpm.h b/arch/arm/mach-msm/test-lpm.h
new file mode 100644
index 0000000..1486f88
--- /dev/null
+++ b/arch/arm/mach-msm/test-lpm.h
@@ -0,0 +1,22 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_TEST_LPM_H
+#define __ARCH_ARM_MACH_MSM_TEST_LPM_H
+
+struct lpm_test_platform_data {
+	struct msm_rpmrs_level *msm_lpm_test_levels;
+	int msm_lpm_test_level_count;
+	bool use_qtimer;
+};
+#endif
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index f6c26c3..8a3a645 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1273,6 +1273,13 @@
 		ret = -ENOMEM;
 		goto fail_free_hdlc;
 	}
+	if (HDLC_OUT_BUF_SIZE < (2*payload_size) + 3) {
+		pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
+				((2*payload_size) + 3));
+		driver->dropped_count++;
+		ret = -EBADMSG;
+		goto fail_free_hdlc;
+	}
 	if (HDLC_OUT_BUF_SIZE - driver->used <= (2*payload_size) + 3) {
 		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
 		if (err) {
@@ -1343,8 +1350,8 @@
 		driver->used = 0;
 	}
 
-	mutex_unlock(&driver->diagchar_mutex);
 	diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
+	mutex_unlock(&driver->diagchar_mutex);
 	if (!timer_in_progress)	{
 		timer_in_progress = 1;
 		ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(500));
diff --git a/drivers/coresight/coresight-etm.c b/drivers/coresight/coresight-etm.c
index 9f96b19..b569aed 100644
--- a/drivers/coresight/coresight-etm.c
+++ b/drivers/coresight/coresight-etm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -91,75 +91,87 @@
  */
 
 /* Trace registers (0x000-0x2FC) */
-#define ETMCR			(0x000)
-#define ETMCCR			(0x004)
-#define ETMTRIGGER		(0x008)
-#define ETMSR			(0x010)
-#define ETMSCR			(0x014)
-#define ETMTSSCR		(0x018)
-#define ETMTEEVR		(0x020)
-#define ETMTECR1		(0x024)
-#define ETMFFLR			(0x02C)
-#define ETMACVRn(n)		(0x040 + (n * 4))
-#define ETMACTRn(n)		(0x080 + (n * 4))
-#define ETMCNTRLDVRn(n)		(0x140 + (n * 4))
-#define ETMCNTENRn(n)		(0x150 + (n * 4))
-#define ETMCNTRLDEVRn(n)	(0x160 + (n * 4))
-#define ETMCNTVRn(n)		(0x170 + (n * 4))
-#define ETMSQ12EVR		(0x180)
-#define ETMSQ21EVR		(0x184)
-#define ETMSQ23EVR		(0x188)
-#define ETMSQ31EVR		(0x18C)
-#define ETMSQ32EVR		(0x190)
-#define ETMSQ13EVR		(0x194)
-#define ETMSQR			(0x19C)
-#define ETMEXTOUTEVRn(n)	(0x1A0 + (n * 4))
-#define ETMCIDCVRn(n)		(0x1B0 + (n * 4))
-#define ETMCIDCMR		(0x1BC)
-#define ETMIMPSPEC0		(0x1C0)
-#define ETMIMPSPEC1		(0x1C4)
-#define ETMIMPSPEC2		(0x1C8)
-#define ETMIMPSPEC3		(0x1CC)
-#define ETMIMPSPEC4		(0x1D0)
-#define ETMIMPSPEC5		(0x1D4)
-#define ETMIMPSPEC6		(0x1D8)
-#define ETMIMPSPEC7		(0x1DC)
-#define ETMSYNCFR		(0x1E0)
-#define ETMIDR			(0x1E4)
-#define ETMCCER			(0x1E8)
-#define ETMEXTINSELR		(0x1EC)
-#define ETMTESSEICR		(0x1F0)
-#define ETMEIBCR		(0x1F4)
-#define ETMTSEVR		(0x1F8)
-#define ETMAUXCR		(0x1FC)
-#define ETMTRACEIDR		(0x200)
-#define ETMVMIDCVR		(0x240)
+#define ETMCR				(0x000)
+#define ETMCCR				(0x004)
+#define ETMTRIGGER			(0x008)
+#define ETMASSICCTLR			(0x00C)
+#define ETMSR				(0x010)
+#define ETMSCR				(0x014)
+#define ETMTSSCR			(0x018)
+#define ETMTECR2			(0x01C)
+#define ETMTEEVR			(0x020)
+#define ETMTECR1			(0x024)
+#define ETMFFLR				(0x02C)
+#define ETMVDEVR			(0x030)
+#define ETMVDCR1			(0x034)
+#define ETMVDCR3			(0x03C)
+#define ETMACVRn(n)			(0x040 + (n * 4))
+#define ETMACTRn(n)			(0x080 + (n * 4))
+#define ETMDCVRn(n)			(0x0C0 + (n * 8))
+#define ETMDCMRn(n)			(0x100 + (n * 8))
+#define ETMCNTRLDVRn(n)			(0x140 + (n * 4))
+#define ETMCNTENRn(n)			(0x150 + (n * 4))
+#define ETMCNTRLDEVRn(n)		(0x160 + (n * 4))
+#define ETMCNTVRn(n)			(0x170 + (n * 4))
+#define ETMSQ12EVR			(0x180)
+#define ETMSQ21EVR			(0x184)
+#define ETMSQ23EVR			(0x188)
+#define ETMSQ31EVR			(0x18C)
+#define ETMSQ32EVR			(0x190)
+#define ETMSQ13EVR			(0x194)
+#define ETMSQR				(0x19C)
+#define ETMEXTOUTEVRn(n)		(0x1A0 + (n * 4))
+#define ETMCIDCVRn(n)			(0x1B0 + (n * 4))
+#define ETMCIDCMR			(0x1BC)
+#define ETMIMPSPEC0			(0x1C0)
+#define ETMIMPSPEC1			(0x1C4)
+#define ETMIMPSPEC2			(0x1C8)
+#define ETMIMPSPEC3			(0x1CC)
+#define ETMIMPSPEC4			(0x1D0)
+#define ETMIMPSPEC5			(0x1D4)
+#define ETMIMPSPEC6			(0x1D8)
+#define ETMIMPSPEC7			(0x1DC)
+#define ETMSYNCFR			(0x1E0)
+#define ETMIDR				(0x1E4)
+#define ETMCCER				(0x1E8)
+#define ETMEXTINSELR			(0x1EC)
+#define ETMTESSEICR			(0x1F0)
+#define ETMEIBCR			(0x1F4)
+#define ETMTSEVR			(0x1F8)
+#define ETMAUXCR			(0x1FC)
+#define ETMTRACEIDR			(0x200)
+#define ETMIDR2				(0x208)
+#define ETMVMIDCVR			(0x240)
 /* Management registers (0x300-0x314) */
-#define ETMOSLAR		(0x300)
-#define ETMOSLSR		(0x304)
-#define ETMOSSRR		(0x308)
-#define ETMPDCR			(0x310)
-#define ETMPDSR			(0x314)
+#define ETMOSLAR			(0x300)
+#define ETMOSLSR			(0x304)
+#define ETMOSSRR			(0x308)
+#define ETMPDCR				(0x310)
+#define ETMPDSR				(0x314)
 
-#define ETM_MAX_ADDR_CMP	(16)
-#define ETM_MAX_CNTR		(4)
-#define ETM_MAX_CTXID_CMP	(3)
+#define ETM_MAX_ADDR_CMP		(16)
+#define ETM_MAX_CNTR			(4)
+#define ETM_MAX_CTXID_CMP		(3)
 
-#define ETM_MODE_EXCLUDE	BIT(0)
-#define ETM_MODE_CYCACC		BIT(1)
-#define ETM_MODE_STALL		BIT(2)
-#define ETM_MODE_TIMESTAMP	BIT(3)
-#define ETM_MODE_CTXID		BIT(4)
-#define ETM_MODE_ALL		(0x1F)
+#define ETM_MODE_EXCLUDE		BIT(0)
+#define ETM_MODE_CYCACC			BIT(1)
+#define ETM_MODE_STALL			BIT(2)
+#define ETM_MODE_TIMESTAMP		BIT(3)
+#define ETM_MODE_CTXID			BIT(4)
+#define ETM_MODE_DATA_TRACE_VAL		BIT(5)
+#define ETM_MODE_DATA_TRACE_ADDR	BIT(6)
+#define ETM_MODE_ALL			(0x7F)
 
-#define ETM_EVENT_MASK		(0x1FFFF)
-#define ETM_SYNC_MASK		(0xFFF)
-#define ETM_ALL_MASK		(0xFFFFFFFF)
+#define ETM_DATACMP_ENABLE		(0x2)
 
-#define ETM_SEQ_STATE_MAX_VAL	(0x2)
+#define ETM_EVENT_MASK			(0x1FFFF)
+#define ETM_SYNC_MASK			(0xFFF)
+#define ETM_ALL_MASK			(0xFFFFFFFF)
 
-#define ETM_REG_DUMP_VER_OFF	(4)
-#define ETM_REG_DUMP_VER	(1)
+#define ETM_SEQ_STATE_MAX_VAL		(0x2)
+
+#define ETM_REG_DUMP_VER_OFF		(4)
+#define ETM_REG_DUMP_VER		(1)
 
 enum etm_addr_type {
 	ETM_ADDR_TYPE_NONE,
@@ -203,6 +215,7 @@
 	uint8_t				nr_ext_inp;
 	uint8_t				nr_ext_out;
 	uint8_t				nr_ctxid_cmp;
+	uint8_t				nr_data_cmp;
 	uint8_t				reset;
 	uint32_t			mode;
 	uint32_t			ctrl;
@@ -210,11 +223,18 @@
 	uint32_t			startstop_ctrl;
 	uint32_t			enable_event;
 	uint32_t			enable_ctrl1;
+	uint32_t			enable_ctrl2;
 	uint32_t			fifofull_level;
 	uint8_t				addr_idx;
 	uint32_t			addr_val[ETM_MAX_ADDR_CMP];
 	uint32_t			addr_acctype[ETM_MAX_ADDR_CMP];
 	uint32_t			addr_type[ETM_MAX_ADDR_CMP];
+	bool				data_trace_support;
+	uint32_t			data_val[ETM_MAX_ADDR_CMP];
+	uint32_t			data_mask[ETM_MAX_ADDR_CMP];
+	uint32_t			viewdata_event;
+	uint32_t			viewdata_ctrl1;
+	uint32_t			viewdata_ctrl3;
 	uint8_t				cntr_idx;
 	uint32_t			cntr_rld_val[ETM_MAX_CNTR];
 	uint32_t			cntr_event[ETM_MAX_CNTR];
@@ -247,8 +267,10 @@
  */
 static void etm_os_unlock(void *info)
 {
-	etm_writel_cp14(0x0, ETMOSLAR);
-	isb();
+	if (cpu_is_krait()) {
+		etm_writel_cp14(0x0, ETMOSLAR);
+		isb();
+	}
 }
 
 /*
@@ -382,6 +404,14 @@
 	ETM_LOCK(drvdata);
 }
 
+static bool etm_version_gte(uint8_t arch, uint8_t base_arch)
+{
+	if (arch >= base_arch && ((arch & PFT_ARCH_MAJOR) != PFT_ARCH_MAJOR))
+		return true;
+	else
+		return false;
+}
+
 static void __etm_enable(void *info)
 {
 	int i;
@@ -409,13 +439,24 @@
 	etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
 	etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
 	etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
+	if (etm_version_gte(drvdata->arch, ETM_ARCH_V1_2))
+		etm_writel(drvdata, drvdata->enable_ctrl2, ETMTECR2);
 	etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
 	etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
 	etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
+	if (drvdata->data_trace_support == true) {
+		etm_writel(drvdata, drvdata->viewdata_event, ETMVDEVR);
+		etm_writel(drvdata, drvdata->viewdata_ctrl1, ETMVDCR1);
+		etm_writel(drvdata, drvdata->viewdata_ctrl3, ETMVDCR3);
+	}
 	for (i = 0; i < drvdata->nr_addr_cmp; i++) {
 		etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
 		etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
 	}
+	for (i = 0; i < drvdata->nr_data_cmp; i++) {
+		etm_writel(drvdata, drvdata->data_val[i], ETMDCVRn(i));
+		etm_writel(drvdata, drvdata->data_mask[i], ETMDCMRn(i));
+	}
 	for (i = 0; i < drvdata->nr_cntr; i++) {
 		etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
 		etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
@@ -594,21 +635,37 @@
 	if (val) {
 		drvdata->mode = ETM_MODE_EXCLUDE;
 		drvdata->ctrl = 0x0;
+		if (etm_version_gte(drvdata->arch, ETM_ARCH_V1_0))
+			drvdata->ctrl |= BIT(11);
 		if (cpu_is_krait_v1()) {
 			drvdata->mode |= ETM_MODE_CYCACC;
 			drvdata->ctrl |= BIT(12);
 		}
 		drvdata->trigger_event = 0x406F;
 		drvdata->startstop_ctrl = 0x0;
+		if (etm_version_gte(drvdata->arch, ETM_ARCH_V1_2))
+			drvdata->enable_ctrl2 = 0x0;
 		drvdata->enable_event = 0x6F;
 		drvdata->enable_ctrl1 = 0x1000000;
 		drvdata->fifofull_level = 0x28;
+		if (drvdata->data_trace_support == true) {
+			drvdata->mode |= (ETM_MODE_DATA_TRACE_VAL |
+						ETM_MODE_DATA_TRACE_ADDR);
+			drvdata->ctrl |= BIT(2) | BIT(3);
+			drvdata->viewdata_event = 0x6F;
+			drvdata->viewdata_ctrl1 = 0x0;
+			drvdata->viewdata_ctrl3 = 0x10000;
+		}
 		drvdata->addr_idx = 0x0;
 		for (i = 0; i < drvdata->nr_addr_cmp; i++) {
 			drvdata->addr_val[i] = 0x0;
 			drvdata->addr_acctype[i] = 0x0;
 			drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
 		}
+		for (i = 0; i < drvdata->nr_data_cmp; i++) {
+			drvdata->data_val[i] = 0;
+			drvdata->data_mask[i] = ~(0);
+		}
 		drvdata->cntr_idx = 0x0;
 		for (i = 0; i < drvdata->nr_cntr; i++) {
 			drvdata->cntr_rld_val[i] = 0x0;
@@ -684,6 +741,17 @@
 		drvdata->ctrl |= (BIT(14) | BIT(15));
 	else
 		drvdata->ctrl &= ~(BIT(14) | BIT(15));
+	if (etm_version_gte(drvdata->arch, ETM_ARCH_V1_0)) {
+		if (drvdata->mode & ETM_MODE_DATA_TRACE_VAL)
+			drvdata->ctrl |= BIT(2);
+		else
+			drvdata->ctrl &= ~(BIT(2));
+
+		if (drvdata->mode & ETM_MODE_DATA_TRACE_ADDR)
+			drvdata->ctrl |= (BIT(3));
+		else
+			drvdata->ctrl &= ~(BIT(3));
+	}
 	spin_unlock(&drvdata->spinlock);
 
 	return size;
@@ -839,6 +907,8 @@
 
 	drvdata->addr_val[idx] = val;
 	drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
+	if (etm_version_gte(drvdata->arch, ETM_ARCH_V1_2))
+		drvdata->enable_ctrl2 |= (1 << idx);
 	spin_unlock(&drvdata->spinlock);
 	return size;
 }
@@ -1039,6 +1109,138 @@
 static DEVICE_ATTR(addr_acctype, S_IRUGO | S_IWUSR, etm_show_addr_acctype,
 		   etm_store_addr_acctype);
 
+static ssize_t etm_show_data_val(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+	uint8_t idx;
+
+	spin_lock(&drvdata->spinlock);
+	idx = drvdata->addr_idx;
+	if (idx % 2 != 0) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+	idx = idx >> 1;
+	if (idx >= drvdata->nr_data_cmp) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+
+	val = drvdata->data_val[idx];
+	spin_unlock(&drvdata->spinlock);
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_data_val(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+	uint8_t idx, data_idx;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	spin_lock(&drvdata->spinlock);
+	idx = drvdata->addr_idx;
+	/* Adjust index to use the correct data comparator */
+	data_idx = idx >> 1;
+	/* Only idx = 0, 2, 4, 6... are valid */
+	if (idx % 2 != 0) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+	if (data_idx >= drvdata->nr_data_cmp) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+	if (!BVAL(drvdata->addr_acctype[idx], ETM_DATACMP_ENABLE)) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+	if (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE) {
+		if (!BVAL(drvdata->addr_acctype[idx + 1], ETM_DATACMP_ENABLE)) {
+			spin_unlock(&drvdata->spinlock);
+			return -EPERM;
+		}
+	}
+
+	drvdata->data_val[data_idx] = val;
+	spin_unlock(&drvdata->spinlock);
+	return size;
+}
+static DEVICE_ATTR(data_val, S_IRUGO | S_IWUSR, etm_show_data_val,
+			etm_store_data_val);
+
+static ssize_t etm_show_data_mask(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long mask;
+	uint8_t idx;
+
+	spin_lock(&drvdata->spinlock);
+	idx = drvdata->addr_idx;
+	if (idx % 2 != 0) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+	idx = idx >> 1;
+	if (idx >= drvdata->nr_data_cmp) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+
+	mask = drvdata->data_mask[idx];
+	spin_unlock(&drvdata->spinlock);
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", mask);
+}
+
+static ssize_t etm_store_data_mask(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long mask;
+	uint8_t idx, data_idx;
+
+	if (sscanf(buf, "%lx", &mask) != 1)
+		return -EINVAL;
+
+	spin_lock(&drvdata->spinlock);
+	idx = drvdata->addr_idx;
+	/* Adjust index to use the correct data comparator */
+	data_idx = idx >> 1;
+	/* Only idx = 0, 2, 4, 6... are valid */
+	if (idx % 2 != 0) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+	if (data_idx >= drvdata->nr_data_cmp) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+	if (!BVAL(drvdata->addr_acctype[idx], ETM_DATACMP_ENABLE)) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+	if (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE) {
+		if (!BVAL(drvdata->addr_acctype[idx + 1], ETM_DATACMP_ENABLE)) {
+			spin_unlock(&drvdata->spinlock);
+			return -EPERM;
+		}
+	}
+
+	drvdata->data_mask[data_idx] = mask;
+	spin_unlock(&drvdata->spinlock);
+	return size;
+}
+static DEVICE_ATTR(data_mask, S_IRUGO | S_IWUSR, etm_show_data_mask,
+			etm_store_data_mask);
+
 static ssize_t etm_show_cntr_idx(struct device *dev,
 				 struct device_attribute *attr, char *buf)
 {
@@ -1604,6 +1806,8 @@
 	&dev_attr_addr_start.attr,
 	&dev_attr_addr_stop.attr,
 	&dev_attr_addr_acctype.attr,
+	&dev_attr_data_val.attr,
+	&dev_attr_data_mask.attr,
 	&dev_attr_cntr_idx.attr,
 	&dev_attr_cntr_rld_val.attr,
 	&dev_attr_cntr_event.attr,
@@ -1681,6 +1885,8 @@
 	switch (arch) {
 	case PFT_ARCH_V1_1:
 		break;
+	case ETM_ARCH_V3_5:
+		break;
 	default:
 		return false;
 	}
@@ -1691,6 +1897,7 @@
 {
 	uint32_t etmidr;
 	uint32_t etmccr;
+	uint32_t etmcr;
 	struct etm_drvdata *drvdata = info;
 
 	ETM_UNLOCK(drvdata);
@@ -1721,6 +1928,19 @@
 	drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
 	drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
 	drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
+	drvdata->nr_data_cmp = BMVAL(etmccr, 4, 7);
+
+	if (etm_version_gte(drvdata->arch, ETM_ARCH_V1_0)) {
+		etmcr = etm_readl(drvdata, ETMCR);
+		etmcr |= (BIT(2) | BIT(3));
+		etm_writel(drvdata, etmcr, ETMCR);
+		etmcr = etm_readl(drvdata, ETMCR);
+		if (BVAL(etmcr, 2) || BVAL(etmcr, 3))
+			drvdata->data_trace_support = true;
+		else
+			drvdata->data_trace_support = false;
+	} else
+		drvdata->data_trace_support = false;
 
 	etm_set_pwrdwn(drvdata);
 	ETM_LOCK(drvdata);
@@ -1734,6 +1954,8 @@
 	drvdata->nr_ext_inp = etmdrvdata[0]->nr_ext_inp;
 	drvdata->nr_ext_out = etmdrvdata[0]->nr_ext_out;
 	drvdata->nr_ctxid_cmp = etmdrvdata[0]->nr_ctxid_cmp;
+	drvdata->nr_data_cmp = etmdrvdata[0]->nr_data_cmp;
+	drvdata->data_trace_support = etmdrvdata[0]->data_trace_support;
 }
 
 static void __devinit etm_init_default_data(struct etm_drvdata *drvdata)
@@ -1749,6 +1971,10 @@
 		drvdata->addr_val[1] = (uint32_t) _etext;
 		drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
 		drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
+		if (etm_version_gte(drvdata->arch, ETM_ARCH_V1_0)) {
+			drvdata->addr_acctype[0] = 0x19;
+			drvdata->addr_acctype[1] = 0x19;
+		}
 	}
 	for (i = 0; i < drvdata->nr_cntr; i++) {
 		drvdata->cntr_event[i] = 0x406F;
@@ -1781,6 +2007,23 @@
 			drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
 		}
 	}
+
+	if (etm_version_gte(drvdata->arch, ETM_ARCH_V1_0))
+		drvdata->ctrl |= BIT(11);
+	if (etm_version_gte(drvdata->arch, ETM_ARCH_V1_2))
+		drvdata->enable_ctrl2 = 0x0;
+	if (drvdata->data_trace_support == true) {
+		drvdata->mode |= (ETM_MODE_DATA_TRACE_VAL |
+						ETM_MODE_DATA_TRACE_ADDR);
+		drvdata->ctrl |= BIT(2) | BIT(3);
+		drvdata->viewdata_ctrl1 = 0x0;
+		drvdata->viewdata_ctrl3 = 0x10000;
+		drvdata->viewdata_event = 0x6F;
+	}
+	for (i = 0; i < drvdata->nr_data_cmp; i++) {
+		drvdata->data_val[i] = 0;
+		drvdata->data_mask[i] = ~(0);
+	}
 }
 
 static int __devinit etm_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index d96b755..8f5addd 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -2,7 +2,7 @@
  * drivers/gpu/ion/ion_cp_heap.c
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -469,6 +469,7 @@
 {
 	unsigned long offset;
 	unsigned long secure_allocation = flags & ION_SECURE;
+	unsigned long force_contig = flags & ION_FORCE_CONTIGUOUS;
 
 	struct ion_cp_heap *cp_heap =
 		container_of(heap, struct ion_cp_heap, heap);
@@ -481,7 +482,8 @@
 		return ION_CP_ALLOCATE_FAIL;
 	}
 
-	if (!secure_allocation && cp_heap->disallow_non_secure_allocation) {
+	if (!force_contig && !secure_allocation &&
+	     cp_heap->disallow_non_secure_allocation) {
 		mutex_unlock(&cp_heap->lock);
 		pr_debug("%s: non-secure allocation disallowed from this heap\n",
 			__func__);
diff --git a/drivers/video/msm/mdss/Kconfig b/drivers/video/msm/mdss/Kconfig
index 56eb90c..7682a49 100644
--- a/drivers/video/msm/mdss/Kconfig
+++ b/drivers/video/msm/mdss/Kconfig
@@ -12,7 +12,7 @@
 	The MDSS HDMI Panel provides support for transmitting TMDS signals of
 	MDSS frame buffer data to connected hdmi compliant TVs, monitors etc.
 
-config FB_MSM_MDSS_HDMI_MHL_8334
+config FB_MSM_MDSS_HDMI_MHL_SII8334
 	depends on FB_MSM_MDSS_HDMI_PANEL
 	bool 'MHL SII8334 support '
 	default n
diff --git a/drivers/video/msm/mdss/Makefile b/drivers/video/msm/mdss/Makefile
index 4deaa8c..17987d4 100644
--- a/drivers/video/msm/mdss/Makefile
+++ b/drivers/video/msm/mdss/Makefile
@@ -19,7 +19,7 @@
 obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_tx.o
 obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_util.o
 obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_edid.o
-obj-$(CONFIG_FB_MSM_MDSS_HDMI_MHL_8334) += mhl_sii8334.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_MHL_SII8334) += mhl_sii8334.o mhl_msc.o
 obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_hdcp.o
 
 obj-$(CONFIG_FB_MSM_MDSS_WRITEBACK) += mdss_wb.o
diff --git a/drivers/video/msm/mdss/mhl_msc.c b/drivers/video/msm/mdss/mhl_msc.c
new file mode 100644
index 0000000..94f6d2b
--- /dev/null
+++ b/drivers/video/msm/mdss/mhl_msc.c
@@ -0,0 +1,489 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/mhl_8334.h>
+#include <linux/vmalloc.h>
+#include <linux/input.h>
+#include "mhl_msc.h"
+
+static struct mhl_tx_ctrl *mhl_ctrl;
+static DEFINE_MUTEX(msc_send_workqueue_mutex);
+
+const char *devcap_reg_name[] = {
+	"DEV_STATE       ",
+	"MHL_VERSION     ",
+	"DEV_CAT         ",
+	"ADOPTER_ID_H    ",
+	"ADOPTER_ID_L    ",
+	"VID_LINK_MODE   ",
+	"AUD_LINK_MODE   ",
+	"VIDEO_TYPE      ",
+	"LOG_DEV_MAP     ",
+	"BANDWIDTH       ",
+	"FEATURE_FLAG    ",
+	"DEVICE_ID_H     ",
+	"DEVICE_ID_L     ",
+	"SCRATCHPAD_SIZE ",
+	"INT_STAT_SIZE   ",
+	"Reserved        ",
+};
+
+static void mhl_print_devcap(u8 offset, u8 devcap)
+{
+	switch (offset) {
+	case DEVCAP_OFFSET_DEV_CAT:
+		pr_debug("DCAP: %02X %s: %02X DEV_TYPE=%X POW=%s\n",
+			offset, devcap_reg_name[offset], devcap,
+			devcap & 0x0F, (devcap & 0x10) ? "y" : "n");
+		break;
+	case DEVCAP_OFFSET_FEATURE_FLAG:
+		pr_debug("DCAP: %02X %s: %02X RCP=%s RAP=%s SP=%s\n",
+			offset, devcap_reg_name[offset], devcap,
+			(devcap & 0x01) ? "y" : "n",
+			(devcap & 0x02) ? "y" : "n",
+			(devcap & 0x04) ? "y" : "n");
+		break;
+	default:
+		pr_debug("DCAP: %02X %s: %02X\n",
+			offset, devcap_reg_name[offset], devcap);
+		break;
+	}
+}
+
+void mhl_register_msc(struct mhl_tx_ctrl *ctrl)
+{
+	if (ctrl)
+		mhl_ctrl = ctrl;
+}
+
+void mhl_msc_send_work(struct work_struct *work)
+{
+	struct mhl_tx_ctrl *mhl_ctrl =
+		container_of(work, struct mhl_tx_ctrl, mhl_msc_send_work);
+	struct msc_cmd_envelope *cmd_env;
+	int ret;
+	/*
+	 * Remove item from the queue
+	 * and schedule it
+	 */
+	mutex_lock(&msc_send_workqueue_mutex);
+	while (!list_empty(&mhl_ctrl->list_cmd)) {
+		cmd_env = list_first_entry(&mhl_ctrl->list_cmd,
+					   struct msc_cmd_envelope,
+					   msc_queue_envelope);
+		list_del(&cmd_env->msc_queue_envelope);
+		mutex_unlock(&msc_send_workqueue_mutex);
+
+		ret = mhl_send_msc_command(mhl_ctrl, &cmd_env->msc_cmd_msg);
+		if (ret == -EAGAIN) {
+			int retry = 2;
+			while (retry--) {
+				ret = mhl_send_msc_command(
+					mhl_ctrl,
+					&cmd_env->msc_cmd_msg);
+				if (ret != -EAGAIN)
+					break;
+			}
+		}
+		if (ret == -EAGAIN)
+			pr_err("%s: send_msc_command retry out!\n", __func__);
+
+		vfree(cmd_env);
+		mutex_lock(&msc_send_workqueue_mutex);
+	}
+	mutex_unlock(&msc_send_workqueue_mutex);
+}
+
+int mhl_queue_msc_command(struct mhl_tx_ctrl *mhl_ctrl,
+			  struct msc_command_struct *req,
+			  int priority_send)
+{
+	struct msc_cmd_envelope *cmd_env;
+
+	mutex_lock(&msc_send_workqueue_mutex);
+	cmd_env = vmalloc(sizeof(struct msc_cmd_envelope));
+	if (!cmd_env) {
+		pr_err("%s: out of memory!\n", __func__);
+		return -ENOMEM;
+	}
+
+	memcpy(&cmd_env->msc_cmd_msg, req,
+	       sizeof(struct msc_command_struct));
+
+	if (priority_send)
+		list_add(&cmd_env->msc_queue_envelope,
+			 &mhl_ctrl->list_cmd);
+	else
+		list_add_tail(&cmd_env->msc_queue_envelope,
+			      &mhl_ctrl->list_cmd);
+	mutex_unlock(&msc_send_workqueue_mutex);
+	queue_work(mhl_ctrl->msc_send_workqueue, &mhl_ctrl->mhl_msc_send_work);
+
+	return 0;
+}
+
+static int mhl_update_devcap(struct mhl_tx_ctrl *mhl_ctrl,
+	int offset, u8 devcap)
+{
+	if (!mhl_ctrl)
+		return -EFAULT;
+	if (offset < 0 || offset > 15)
+		return -EFAULT;
+	mhl_ctrl->devcap[offset] = devcap;
+	mhl_print_devcap(offset, mhl_ctrl->devcap[offset]);
+
+	return 0;
+}
+
+
+int mhl_msc_command_done(struct mhl_tx_ctrl *mhl_ctrl,
+			 struct msc_command_struct *req)
+{
+	switch (req->command) {
+	case MHL_WRITE_STAT:
+		if (req->offset == MHL_STATUS_REG_LINK_MODE) {
+			if (req->payload.data[0]
+			    & MHL_STATUS_PATH_ENABLED)
+				/* Enable TMDS output */
+				mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
+			else
+				/* Disable TMDS output */
+				mhl_tmds_ctrl(mhl_ctrl, TMDS_DISABLE);
+		}
+		break;
+	case MHL_READ_DEVCAP:
+		mhl_update_devcap(mhl_ctrl,
+			req->offset, req->retval);
+		mhl_ctrl->devcap_state |= BIT(req->offset);
+		switch (req->offset) {
+		case MHL_DEV_CATEGORY_OFFSET:
+			if (req->retval & MHL_DEV_CATEGORY_POW_BIT)
+				pr_debug("%s: devcap pow bit set\n",
+					 __func__);
+			else
+				pr_debug("%s: devcap pow bit unset\n",
+					 __func__);
+			break;
+		case DEVCAP_OFFSET_MHL_VERSION:
+		case DEVCAP_OFFSET_INT_STAT_SIZE:
+			break;
+		}
+
+		break;
+	}
+	return 0;
+}
+
+int mhl_msc_send_set_int(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 offset, u8 mask)
+{
+	struct msc_command_struct req;
+	req.command = MHL_SET_INT;
+	req.offset = offset;
+	req.payload.data[0] = mask;
+	return mhl_queue_msc_command(mhl_ctrl, &req, MSC_NORMAL_SEND);
+}
+
+int mhl_msc_send_write_stat(struct mhl_tx_ctrl *mhl_ctrl,
+			    u8 offset, u8 value)
+{
+	struct msc_command_struct req;
+	req.command = MHL_WRITE_STAT;
+	req.offset = offset;
+	req.payload.data[0] = value;
+	return mhl_queue_msc_command(mhl_ctrl, &req, MSC_NORMAL_SEND);
+}
+
+int mhl_msc_send_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 sub_cmd, u8 cmd_data)
+{
+	struct msc_command_struct req;
+	req.command = MHL_MSC_MSG;
+	req.payload.data[0] = sub_cmd;
+	req.payload.data[1] = cmd_data;
+	return mhl_queue_msc_command(mhl_ctrl, &req, MSC_NORMAL_SEND);
+}
+
+/*
+ * Certain MSC msgs such as RCPK, RCPE and RAPK
+ * should be transmitted as a high priority
+ * because these msgs should be sent within
+ * 1000ms of a receipt of RCP/RAP. So such msgs can
+ * be added to the head of msc cmd queue.
+ */
+static int mhl_msc_send_prior_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+				      u8 sub_cmd, u8 cmd_data)
+{
+	struct msc_command_struct req;
+	req.command = MHL_MSC_MSG;
+	req.payload.data[0] = sub_cmd;
+	req.payload.data[1] = cmd_data;
+	return mhl_queue_msc_command(mhl_ctrl, &req, MSC_PRIORITY_SEND);
+}
+
+
+int mhl_msc_read_devcap(struct mhl_tx_ctrl *mhl_ctrl, u8 offset)
+{
+	struct msc_command_struct req;
+	if (offset < 0 || offset > 15)
+		return -EFAULT;
+	req.command = MHL_READ_DEVCAP;
+	req.offset = offset;
+	req.payload.data[0] = 0;
+	return mhl_queue_msc_command(mhl_ctrl, &req, MSC_NORMAL_SEND);
+}
+
+int mhl_msc_read_devcap_all(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	int offset;
+	int ret;
+
+	for (offset = 0; offset < DEVCAP_SIZE; offset++) {
+		ret = mhl_msc_read_devcap(mhl_ctrl, offset);
+		if (ret == -EBUSY)
+			pr_err("%s: queue busy!\n", __func__);
+	}
+	return ret;
+}
+
+
+static void mhl_handle_input(struct mhl_tx_ctrl *mhl_ctrl,
+			     u8 key_code, u16 input_key_code)
+{
+	int key_press = (key_code & 0x80) == 0;
+
+	pr_debug("%s: send key events[%x][%d]\n",
+		 __func__, key_code, key_press);
+	input_report_key(mhl_ctrl->input, input_key_code, key_press);
+	input_sync(mhl_ctrl->input);
+}
+
+
+
+int mhl_rcp_recv(struct mhl_tx_ctrl *mhl_ctrl, u8 key_code)
+{
+	u8 index = key_code & 0x7f;
+	u16 input_key_code;
+
+	if (!mhl_ctrl->rcp_key_code_tbl) {
+		pr_err("%s: RCP Key Code Table not initialized\n", __func__);
+		return -EINVAL;
+	}
+
+	input_key_code = mhl_ctrl->rcp_key_code_tbl[index];
+
+	if ((index < mhl_ctrl->rcp_key_code_tbl_len) &&
+	    (input_key_code > 0)) {
+		/* prior send rcpk */
+		mhl_msc_send_prior_msc_msg(
+			mhl_ctrl,
+			MHL_MSC_MSG_RCPK,
+			key_code);
+
+		if (mhl_ctrl->input)
+			mhl_handle_input(mhl_ctrl, key_code, input_key_code);
+	} else {
+		/* prior send rcpe */
+		mhl_msc_send_prior_msc_msg(
+			mhl_ctrl,
+			MHL_MSC_MSG_RCPE,
+			MHL_RCPE_INEFFECTIVE_KEY_CODE);
+
+		/* send rcpk after rcpe send */
+		mhl_msc_send_prior_msc_msg(
+			mhl_ctrl,
+			MHL_MSC_MSG_RCPK,
+			key_code);
+	}
+	return 0;
+}
+
+
+static int mhl_rap_action(struct mhl_tx_ctrl *mhl_ctrl, u8 action_code)
+{
+	switch (action_code) {
+	case MHL_RAP_CONTENT_ON:
+		mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
+		break;
+	case MHL_RAP_CONTENT_OFF:
+		mhl_tmds_ctrl(mhl_ctrl, TMDS_DISABLE);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int mhl_rap_recv(struct mhl_tx_ctrl *mhl_ctrl, u8 action_code)
+{
+	u8 error_code;
+
+	switch (action_code) {
+	case MHL_RAP_POLL:
+		if (mhl_ctrl->tmds_enabled())
+			error_code = MHL_RAPK_NO_ERROR;
+		else
+			error_code = MHL_RAPK_UNSUPPORTED_ACTION_CODE;
+		break;
+	case MHL_RAP_CONTENT_ON:
+	case MHL_RAP_CONTENT_OFF:
+		mhl_rap_action(mhl_ctrl, action_code);
+		error_code = MHL_RAPK_NO_ERROR;
+		break;
+	default:
+		error_code = MHL_RAPK_UNRECOGNIZED_ACTION_CODE;
+		break;
+	}
+	/* prior send rapk */
+	return mhl_msc_send_prior_msc_msg(
+		mhl_ctrl,
+		MHL_MSC_MSG_RAPK,
+		error_code);
+}
+
+
+int mhl_msc_recv_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 sub_cmd, u8 cmd_data)
+{
+	int rc = 0;
+	switch (sub_cmd) {
+	case MHL_MSC_MSG_RCP:
+		pr_debug("MHL: receive RCP(0x%02x)\n", cmd_data);
+		rc = mhl_rcp_recv(mhl_ctrl, cmd_data);
+		break;
+	case MHL_MSC_MSG_RCPK:
+		pr_debug("MHL: receive RCPK(0x%02x)\n", cmd_data);
+		break;
+	case MHL_MSC_MSG_RCPE:
+		pr_debug("MHL: receive RCPE(0x%02x)\n", cmd_data);
+		break;
+	case MHL_MSC_MSG_RAP:
+		pr_debug("MHL: receive RAP(0x%02x)\n", cmd_data);
+		rc = mhl_rap_recv(mhl_ctrl, cmd_data);
+		break;
+	case MHL_MSC_MSG_RAPK:
+		pr_debug("MHL: receive RAPK(0x%02x)\n", cmd_data);
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+
+int mhl_msc_recv_set_int(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 offset, u8 set_int)
+{
+	if (offset >= 2)
+		return -EFAULT;
+
+	switch (offset) {
+	case 0:
+		if (set_int & MHL_INT_DCAP_CHG) {
+			/* peer dcap has changed */
+			mhl_ctrl->devcap_state = 0;
+			mhl_msc_read_devcap_all(mhl_ctrl);
+		}
+		if (set_int & MHL_INT_DSCR_CHG)
+			pr_debug("%s: dscr chg\n", __func__);
+		if (set_int & MHL_INT_REQ_WRT) {
+			/* SET_INT: GRT_WRT */
+			mhl_msc_send_set_int(
+				mhl_ctrl,
+				MHL_RCHANGE_INT,
+				MHL_INT_GRT_WRT);
+		}
+		if (set_int & MHL_INT_GRT_WRT)
+			pr_debug("%s: recvd req to permit/grant write",
+				 __func__);
+		break;
+	case 1:
+		if (set_int & MHL_INT_EDID_CHG) {
+			/* peer EDID has changed
+			 * toggle HPD to read EDID
+			 */
+			pr_debug("%s: EDID CHG\n", __func__);
+			mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
+			msleep(110);
+			mhl_drive_hpd(mhl_ctrl, HPD_UP);
+		}
+	}
+	return 0;
+}
+
+int mhl_msc_recv_write_stat(struct mhl_tx_ctrl *mhl_ctrl,
+			    u8 offset, u8 value)
+{
+	if (offset >= 2)
+		return -EFAULT;
+
+	switch (offset) {
+	case 0:
+		/*
+		 * connected device bits
+		 * changed and DEVCAP READY
+		 */
+		if (((value ^ mhl_ctrl->devcap_state) &
+		     MHL_STATUS_DCAP_RDY)) {
+			if (value & MHL_STATUS_DCAP_RDY) {
+				mhl_ctrl->devcap_state = 0;
+				mhl_msc_read_devcap_all(mhl_ctrl);
+			} else {
+				/*
+				 * peer dcap turned not ready
+				 * use old devap state
+				 */
+				pr_debug("%s: DCAP RDY bit cleared\n",
+					 __func__);
+			}
+		}
+		break;
+	case 1:
+		/*
+		 * connected device bits
+		 * changed and PATH ENABLED
+		 * bit set
+		 */
+		if ((value ^ mhl_ctrl->path_en_state)
+		    & MHL_STATUS_PATH_ENABLED) {
+			if (value & MHL_STATUS_PATH_ENABLED) {
+				if (mhl_ctrl->tmds_enabled() &&
+				    (mhl_ctrl->devcap[offset] &
+				     MHL_FEATURE_RAP_SUPPORT)) {
+					mhl_msc_send_msc_msg(
+						mhl_ctrl,
+						MHL_MSC_MSG_RAP,
+						MHL_RAP_CONTENT_ON);
+				}
+				mhl_ctrl->path_en_state
+					|= (MHL_STATUS_PATH_ENABLED |
+					    MHL_STATUS_CLK_MODE_NORMAL);
+				mhl_msc_send_write_stat(
+					mhl_ctrl,
+					MHL_STATUS_REG_LINK_MODE,
+					mhl_ctrl->path_en_state);
+			} else {
+				mhl_ctrl->path_en_state
+					&= ~(MHL_STATUS_PATH_ENABLED |
+					     MHL_STATUS_CLK_MODE_NORMAL);
+				mhl_msc_send_write_stat(
+					mhl_ctrl,
+					MHL_STATUS_REG_LINK_MODE,
+					mhl_ctrl->path_en_state);
+			}
+		}
+		break;
+	}
+	mhl_ctrl->path_en_state = value;
+	return 0;
+}
diff --git a/drivers/video/msm/mdss/mhl_msc.h b/drivers/video/msm/mdss/mhl_msc.h
new file mode 100644
index 0000000..9a7b3d6
--- /dev/null
+++ b/drivers/video/msm/mdss/mhl_msc.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MHL_MSC_H__
+#define __MHL_MSC_H__
+#include <linux/mhl_8334.h>
+
+#define MAX_RCP_KEYS_SUPPORTED 256
+
+#define MSC_NORMAL_SEND 0
+#define MSC_PRIORITY_SEND 1
+
+#define TMDS_ENABLE 1
+#define TMDS_DISABLE 0
+
+/******************************************************************/
+/* the below APIs are implemented by the MSC functionality */
+int mhl_msc_command_done(struct mhl_tx_ctrl *mhl_ctrl,
+			 struct msc_command_struct *req);
+
+int mhl_msc_send_set_int(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 offset, u8 mask);
+
+int mhl_msc_send_write_stat(struct mhl_tx_ctrl *mhl_ctrl,
+			    u8 offset, u8 value);
+int mhl_msc_send_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 sub_cmd, u8 cmd_data);
+
+int mhl_msc_recv_set_int(struct mhl_tx_ctrl *mhl_ctrl, u8 offset, u8 set_int);
+
+int mhl_msc_recv_write_stat(struct mhl_tx_ctrl *mhl_ctrl,
+			    u8 offset, u8 value);
+int mhl_msc_recv_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 sub_cmd, u8 cmd_data);
+void mhl_msc_send_work(struct work_struct *work);
+
+/******************************************************************/
+/* Tx should implement these APIs */
+int mhl_send_msc_command(struct mhl_tx_ctrl *mhl_ctrl,
+			 struct msc_command_struct *req);
+void mhl_drive_hpd(struct mhl_tx_ctrl *mhl_ctrl, uint8_t to_state);
+void mhl_tmds_ctrl(struct mhl_tx_ctrl *ctrl, uint8_t on);
+/******************************************************************/
+/* MHL driver registers ctrl with MSC */
+void mhl_register_msc(struct mhl_tx_ctrl *ctrl);
+
+#endif /* __MHL_MSC_H__ */
diff --git a/drivers/video/msm/mdss/mhl_sii8334.c b/drivers/video/msm/mdss/mhl_sii8334.c
index f3be983..7baeef5 100644
--- a/drivers/video/msm/mdss/mhl_sii8334.c
+++ b/drivers/video/msm/mdss/mhl_sii8334.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,8 @@
 #include <linux/of_address.h>
 #include <linux/of_gpio.h>
 #include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/input.h>
 #include <linux/usb/msm_hsusb.h>
 #include <linux/mhl_8334.h>
 
@@ -27,6 +29,7 @@
 #include "mdss.h"
 #include "mdss_panel.h"
 #include "mdss_io_util.h"
+#include "mhl_msc.h"
 
 #define MHL_DRIVER_NAME "sii8334"
 #define COMPATIBLE_NAME "qcom,mhl-sii8334"
@@ -34,39 +37,142 @@
 
 #define pr_debug_intr(...) pr_debug("\n")
 
-enum mhl_gpio_type {
-	MHL_TX_RESET_GPIO,
-	MHL_TX_INTR_GPIO,
-	MHL_TX_PMIC_PWR_GPIO,
-	MHL_TX_MAX_GPIO,
-};
+#define MSC_START_BIT_MSC_CMD        (0x01 << 0)
+#define MSC_START_BIT_VS_CMD        (0x01 << 1)
+#define MSC_START_BIT_READ_REG        (0x01 << 2)
+#define MSC_START_BIT_WRITE_REG        (0x01 << 3)
+#define MSC_START_BIT_WRITE_BURST        (0x01 << 4)
 
-enum mhl_vreg_type {
-	MHL_TX_3V_VREG,
-	MHL_TX_MAX_VREG,
-};
-
-struct mhl_tx_platform_data {
-	/* Data filled from device tree nodes */
-	struct dss_gpio *gpios[MHL_TX_MAX_GPIO];
-	struct dss_vreg *vregs[MHL_TX_MAX_VREG];
-	int irq;
-};
-
-struct mhl_tx_ctrl {
-	struct platform_device *pdev;
-	struct mhl_tx_platform_data *pdata;
-	struct i2c_client *i2c_handle;
-	uint8_t cur_state;
-	uint8_t chip_rev_id;
-	int mhl_mode;
-	struct completion rgnd_done;
-	void (*notify_usb_online)(int online);
-	struct usb_ext_notification *mhl_info;
-	bool disc_enabled;
-	struct power_supply mhl_psy;
-	bool vbus_active;
-	int current_val;
+/* supported RCP key code */
+u16 support_rcp_key_code_tbl[] = {
+	KEY_ENTER,		/* 0x00 Select */
+	KEY_UP,			/* 0x01 Up */
+	KEY_DOWN,		/* 0x02 Down */
+	KEY_LEFT,		/* 0x03 Left */
+	KEY_RIGHT,		/* 0x04 Right */
+	KEY_UNKNOWN,		/* 0x05 Right-up */
+	KEY_UNKNOWN,		/* 0x06 Right-down */
+	KEY_UNKNOWN,		/* 0x07 Left-up */
+	KEY_UNKNOWN,		/* 0x08 Left-down */
+	KEY_MENU,		/* 0x09 Root Menu */
+	KEY_OPTION,		/* 0x0A Setup Menu */
+	KEY_UNKNOWN,		/* 0x0B Contents Menu */
+	KEY_UNKNOWN,		/* 0x0C Favorite Menu */
+	KEY_EXIT,		/* 0x0D Exit */
+	KEY_RESERVED,		/* 0x0E */
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,		/* 0x1F */
+	KEY_NUMERIC_0,		/* 0x20 NUMERIC_0 */
+	KEY_NUMERIC_1,		/* 0x21 NUMERIC_1 */
+	KEY_NUMERIC_2,		/* 0x22 NUMERIC_2 */
+	KEY_NUMERIC_3,		/* 0x23 NUMERIC_3 */
+	KEY_NUMERIC_4,		/* 0x24 NUMERIC_4 */
+	KEY_NUMERIC_5,		/* 0x25 NUMERIC_5 */
+	KEY_NUMERIC_6,		/* 0x26 NUMERIC_6 */
+	KEY_NUMERIC_7,		/* 0x27 NUMERIC_7 */
+	KEY_NUMERIC_8,		/* 0x28 NUMERIC_8 */
+	KEY_NUMERIC_9,		/* 0x29 NUMERIC_9 */
+	KEY_DOT,		/* 0x2A Dot */
+	KEY_ENTER,		/* 0x2B Enter */
+	KEY_ESC,		/* 0x2C Clear */
+	KEY_RESERVED,		/* 0x2D */
+	KEY_RESERVED,		/* 0x2E */
+	KEY_RESERVED,		/* 0x2F */
+	KEY_UNKNOWN,		/* 0x30 Channel Up */
+	KEY_UNKNOWN,		/* 0x31 Channel Down */
+	KEY_UNKNOWN,		/* 0x32 Previous Channel */
+	KEY_UNKNOWN,		/* 0x33 Sound Select */
+	KEY_UNKNOWN,		/* 0x34 Input Select */
+	KEY_UNKNOWN,		/* 0x35 Show Information */
+	KEY_UNKNOWN,		/* 0x36 Help */
+	KEY_UNKNOWN,		/* 0x37 Page Up */
+	KEY_UNKNOWN,		/* 0x38 Page Down */
+	KEY_RESERVED,		/* 0x39 */
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,		/* 0x3F */
+	KEY_RESERVED,		/* 0x40 */
+	KEY_VOLUMEUP,		/* 0x41 Volume Up */
+	KEY_VOLUMEDOWN,		/* 0x42 Volume Down */
+	KEY_MUTE,		/* 0x43 Mute */
+	KEY_PLAY,		/* 0x44 Play */
+	KEY_STOP,		/* 0x45 Stop */
+	KEY_PAUSE,		/* 0x46 Pause */
+	KEY_UNKNOWN,		/* 0x47 Record */
+	KEY_REWIND,		/* 0x48 Rewind */
+	KEY_FASTFORWARD,	/* 0x49 Fast Forward */
+	KEY_UNKNOWN,		/* 0x4A Eject */
+	KEY_FORWARD,		/* 0x4B Forward */
+	KEY_BACK,		/* 0x4C Backward */
+	KEY_RESERVED,		/* 0x4D */
+	KEY_RESERVED,
+	KEY_RESERVED,		/* 0x4F */
+	KEY_UNKNOWN,		/* 0x50 Angle */
+	KEY_UNKNOWN,		/* 0x51 Subtitle */
+	KEY_RESERVED,		/* 0x52 */
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,		/* 0x5F */
+	KEY_PLAYPAUSE,		/* 0x60 Play Function */
+	KEY_PLAYPAUSE,		/* 0x61 Pause_Play Function */
+	KEY_UNKNOWN,		/* 0x62 Record Function */
+	KEY_PAUSE,		/* 0x63 Pause Record Function */
+	KEY_STOP,		/* 0x64 Stop Function  */
+	KEY_MUTE,		/* 0x65 Mute Function */
+	KEY_UNKNOWN,		/* 0x66 Restore Volume Function */
+	KEY_UNKNOWN,		/* 0x67 Tune Function */
+	KEY_UNKNOWN,		/* 0x68 Select Media Function */
+	KEY_RESERVED,		/* 0x69 */
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,		/* 0x70 */
+	KEY_BLUE,			/* 0x71 F1 */
+	KEY_RED,			/* 0x72 F2 */
+	KEY_GREEN,			/* 0x73 F3 */
+	KEY_YELLOW,			/* 0x74 F4 */
+	KEY_UNKNOWN,		/* 0x75 F5 */
+	KEY_RESERVED,		/* 0x76 */
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,		/* 0x7D */
+	KEY_VENDOR,		/* Vendor Specific */
+	KEY_RESERVED,		/* 0x7F */
 };
 
 
@@ -84,13 +190,12 @@
 static irqreturn_t mhl_tx_isr(int irq, void *dev_id);
 static void switch_mode(struct mhl_tx_ctrl *mhl_ctrl,
 			enum mhl_st_type to_mode);
-static void mhl_drive_hpd(struct mhl_tx_ctrl *mhl_ctrl,
-			  uint8_t to_state);
-
 static void mhl_init_reg_settings(struct mhl_tx_ctrl *mhl_ctrl,
-	bool mhl_disc_en);
+				  bool mhl_disc_en);
 
-static int mhl_i2c_reg_read(struct i2c_client *client,
+static uint8_t store_tmds_state;
+
+int mhl_i2c_reg_read(struct i2c_client *client,
 			    uint8_t slave_addr_index, uint8_t reg_offset)
 {
 	int rc = -1;
@@ -107,7 +212,7 @@
 }
 
 
-static int mhl_i2c_reg_write(struct i2c_client *client,
+int mhl_i2c_reg_write(struct i2c_client *client,
 			     uint8_t slave_addr_index, uint8_t reg_offset,
 			     uint8_t value)
 {
@@ -115,7 +220,7 @@
 				 reg_offset, &value);
 }
 
-static void mhl_i2c_reg_modify(struct i2c_client *client,
+void mhl_i2c_reg_modify(struct i2c_client *client,
 			       uint8_t slave_addr_index, uint8_t reg_offset,
 			       uint8_t mask, uint8_t val)
 {
@@ -350,11 +455,11 @@
 	MHL_SII_REG_NAME_WR(REG_INTR5_MASK, 0x00);
 
 	/* Unmask CBUS1 Intrs */
-	MHL_SII_CBUS_WR(0x0009,
+	MHL_SII_REG_NAME_WR(REG_CBUS_INTR_ENABLE,
 		BIT2 | BIT3 | BIT4 | BIT5 | BIT6);
 
 	/* Unmask CBUS2 Intrs */
-	MHL_SII_CBUS_WR(0x001F, BIT2 | BIT3);
+	MHL_SII_REG_NAME_WR(REG_CBUS_MSC_INT2_ENABLE, BIT2 | BIT3);
 
 	for (i = 0; i < 4; i++) {
 		/*
@@ -369,7 +474,6 @@
 		 */
 		MHL_SII_CBUS_WR((0xF0 + i), 0xFF);
 	}
-	return;
 }
 
 static void init_cbus_regs(struct i2c_client *client)
@@ -576,6 +680,7 @@
 
 		/* Force HPD to 0 when not in MHL mode.  */
 		mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
+		mhl_tmds_ctrl(mhl_ctrl, TMDS_DISABLE);
 		/*
 		 * Change TMDS termination to high impedance
 		 * on disconnection.
@@ -592,7 +697,31 @@
 	}
 }
 
-static void mhl_drive_hpd(struct mhl_tx_ctrl *mhl_ctrl, uint8_t to_state)
+uint8_t check_tmds_enabled(void)
+{
+	return store_tmds_state;
+}
+
+void mhl_tmds_ctrl(struct mhl_tx_ctrl *mhl_ctrl, uint8_t on)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+	if (on) {
+		MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, BIT4);
+		mhl_drive_hpd(mhl_ctrl, HPD_UP);
+		/*
+		 * store the state to be used
+		 * before responding to RAP msgs
+		 * this needs to be obtained from
+		 * hdmi driver
+		 */
+		store_tmds_state = 1;
+	} else {
+		MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, 0x00);
+		store_tmds_state = 0;
+	}
+}
+
+void mhl_drive_hpd(struct mhl_tx_ctrl *mhl_ctrl, uint8_t to_state)
 {
 	struct i2c_client *client = mhl_ctrl->i2c_handle;
 
@@ -600,15 +729,6 @@
 	if (to_state == HPD_UP) {
 		/*
 		 * Drive HPD to UP state
-		 *
-		 * The below two reg configs combined
-		 * enable TMDS output.
-		 */
-
-		/* Enable TMDS on TMDS_CCTRL */
-		MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, BIT4);
-
-		/*
 		 * Set HPD_OUT_OVR_EN = HPD State
 		 * EDID read and Un-force HPD (from low)
 		 * propogate to src let HPD float by clearing
@@ -616,15 +736,9 @@
 		 */
 		MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT4, 0x00);
 	} else {
-		/*
-		 * Drive HPD to DOWN state
-		 * Disable TMDS Output on REG_TMDS_CCTRL
-		 * Enable/Disable TMDS output (MHL TMDS output only)
-		 */
+		/* Drive HPD to DOWN state */
 		MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT4, BIT4);
-		MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, 0x00);
 	}
-	return;
 }
 
 static void mhl_msm_connection(struct mhl_tx_ctrl *mhl_ctrl)
@@ -655,7 +769,19 @@
 	val = MHL_SII_PAGE3_RD(0x10);
 	MHL_SII_PAGE3_WR(0x10, val | BIT0);
 
-	return;
+	/*
+	 * indicate DCAP_RDY and DCAP_CHG
+	 * to the peer only after
+	 * msm conn has been established
+	 */
+	mhl_msc_send_write_stat(mhl_ctrl,
+				MHL_STATUS_REG_CONNECTED_RDY,
+				MHL_STATUS_DCAP_RDY);
+
+	mhl_msc_send_set_int(mhl_ctrl,
+			     MHL_RCHANGE_INT,
+			     MHL_INT_DCAP_CHG);
+
 }
 
 static void mhl_msm_disconnection(struct mhl_tx_ctrl *mhl_ctrl)
@@ -668,7 +794,6 @@
 	MHL_SII_PAGE3_WR(0x30, 0xD0);
 
 	switch_mode(mhl_ctrl, POWER_STATE_D3);
-	return;
 }
 
 static int  mhl_msm_read_rgnd_int(struct mhl_tx_ctrl *mhl_ctrl)
@@ -676,8 +801,8 @@
 	uint8_t rgnd_imp;
 	struct i2c_client *client = mhl_ctrl->i2c_handle;
 	/* DISC STATUS REG 2 */
-	rgnd_imp = (mhl_i2c_reg_read(client,
-				     TX_PAGE_3, 0x001C) & (BIT1 | BIT0));
+	rgnd_imp = (mhl_i2c_reg_read(client, TX_PAGE_3, 0x001C) &
+		    (BIT1 | BIT0));
 	pr_debug("imp range read=%02X\n", (int)rgnd_imp);
 
 	if (0x02 == rgnd_imp) {
@@ -820,8 +945,6 @@
 		release_usb_switch_open(mhl_ctrl);
 	}
 	MHL_SII_REG_NAME_WR(REG_INTR4, status);
-
-	return;
 }
 
 static void mhl_misc_isr(struct mhl_tx_ctrl *mhl_ctrl)
@@ -861,9 +984,225 @@
 		 */
 		cbus_stat = MHL_SII_CBUS_RD(0x0D);
 		if (BIT6 & cbus_stat)
-			mhl_drive_hpd(mhl_ctrl, HPD_UP);
+			mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
 	}
-	return;
+}
+
+static void mhl_sii_cbus_process_errors(struct i2c_client *client,
+					u8 int_status)
+{
+	u8 abort_reason = 0;
+
+	if (int_status & BIT2) {
+		abort_reason = MHL_SII_REG_NAME_RD(REG_DDC_ABORT_REASON);
+		pr_debug("%s: CBUS DDC Abort Reason(0x%02x)\n",
+			 __func__, abort_reason);
+	}
+	if (int_status & BIT5) {
+		abort_reason = MHL_SII_REG_NAME_RD(REG_PRI_XFR_ABORT_REASON);
+		pr_debug("%s: CBUS MSC Requestor Abort Reason(0x%02x)\n",
+			 __func__, abort_reason);
+		MHL_SII_REG_NAME_WR(REG_PRI_XFR_ABORT_REASON, 0xFF);
+	}
+	if (int_status & BIT6) {
+		abort_reason = MHL_SII_REG_NAME_RD(
+			REG_CBUS_PRI_FWR_ABORT_REASON);
+		pr_debug("%s: CBUS MSC Responder Abort Reason(0x%02x)\n",
+			 __func__, abort_reason);
+		MHL_SII_REG_NAME_WR(REG_CBUS_PRI_FWR_ABORT_REASON, 0xFF);
+	}
+}
+
+int mhl_send_msc_command(struct mhl_tx_ctrl *mhl_ctrl,
+			 struct msc_command_struct *req)
+{
+	int timeout;
+	u8 start_bit = 0x00;
+	u8 *burst_data;
+	int i;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	if (mhl_ctrl->cur_state != POWER_STATE_D0_MHL) {
+		pr_debug("%s: power_state:%02x CBUS(0x0A):%02x\n",
+			 __func__,
+			 mhl_ctrl->cur_state,
+			 MHL_SII_REG_NAME_RD(REG_CBUS_BUS_STATUS));
+		return -EFAULT;
+	}
+
+	if (!req)
+		return -EFAULT;
+
+	pr_debug("%s: command=0x%02x offset=0x%02x %02x %02x",
+		 __func__,
+		 req->command,
+		 req->offset,
+		 req->payload.data[0],
+		 req->payload.data[1]);
+
+	/* REG_CBUS_PRI_ADDR_CMD = REQ CBUS CMD or OFFSET */
+	MHL_SII_REG_NAME_WR(REG_CBUS_PRI_ADDR_CMD, req->offset);
+	MHL_SII_REG_NAME_WR(REG_CBUS_PRI_WR_DATA_1ST,
+			    req->payload.data[0]);
+
+	switch (req->command) {
+	case MHL_SET_INT:
+	case MHL_WRITE_STAT:
+		start_bit = MSC_START_BIT_WRITE_REG;
+		break;
+	case MHL_READ_DEVCAP:
+		start_bit = MSC_START_BIT_READ_REG;
+		break;
+	case MHL_GET_STATE:
+	case MHL_GET_VENDOR_ID:
+	case MHL_SET_HPD:
+	case MHL_CLR_HPD:
+	case MHL_GET_SC1_ERRORCODE:
+	case MHL_GET_DDC_ERRORCODE:
+	case MHL_GET_MSC_ERRORCODE:
+	case MHL_GET_SC3_ERRORCODE:
+		start_bit = MSC_START_BIT_MSC_CMD;
+		MHL_SII_REG_NAME_WR(REG_CBUS_PRI_ADDR_CMD, req->command);
+		break;
+	case MHL_MSC_MSG:
+		start_bit = MSC_START_BIT_VS_CMD;
+		MHL_SII_REG_NAME_WR(REG_CBUS_PRI_WR_DATA_2ND,
+				    req->payload.data[1]);
+		MHL_SII_REG_NAME_WR(REG_CBUS_PRI_ADDR_CMD, req->command);
+		break;
+	case MHL_WRITE_BURST:
+		start_bit = MSC_START_BIT_WRITE_BURST;
+		MHL_SII_REG_NAME_WR(REG_MSC_WRITE_BURST_LEN, req->length - 1);
+		if (!(req->payload.burst_data)) {
+			pr_err("%s: burst data is null!\n", __func__);
+			goto cbus_send_fail;
+		}
+		burst_data = req->payload.burst_data;
+		for (i = 0; i < req->length; i++, burst_data++)
+			MHL_SII_CBUS_WR(0xC0 + i, *burst_data);
+		break;
+	default:
+		pr_err("%s: unknown command! (%02x)\n",
+		       __func__, req->command);
+		goto cbus_send_fail;
+	}
+
+	INIT_COMPLETION(mhl_ctrl->msc_cmd_done);
+	MHL_SII_REG_NAME_WR(REG_CBUS_PRI_START, start_bit);
+	timeout = wait_for_completion_interruptible_timeout
+		(&mhl_ctrl->msc_cmd_done, msecs_to_jiffies(T_ABORT_NEXT));
+	if (!timeout) {
+		pr_err("%s: cbus_command_send timed out!\n", __func__);
+		goto cbus_send_fail;
+	}
+
+	switch (req->command) {
+	case MHL_READ_DEVCAP:
+		req->retval = MHL_SII_REG_NAME_RD(REG_CBUS_PRI_RD_DATA_1ST);
+		break;
+	case MHL_MSC_MSG:
+		/* check if MSC_MSG NACKed */
+		if (MHL_SII_REG_NAME_RD(REG_MSC_WRITE_BURST_LEN) & BIT6)
+			return -EAGAIN;
+	default:
+		req->retval = 0;
+		break;
+	}
+	mhl_msc_command_done(mhl_ctrl, req);
+	pr_debug("%s: msc cmd done\n", __func__);
+	return 0;
+
+cbus_send_fail:
+	return -EFAULT;
+}
+
+static void mhl_cbus_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t regval;
+	int req_done = 0;
+	uint8_t sub_cmd = 0x0;
+	uint8_t cmd_data = 0x0;
+	int msc_msg_recved = 0;
+	int rc = -1;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	regval = MHL_SII_REG_NAME_RD(REG_CBUS_INTR_STATUS);
+	if (regval == 0xff)
+		return;
+
+	if (regval)
+		MHL_SII_REG_NAME_WR(REG_CBUS_INTR_STATUS, regval);
+
+	pr_debug("%s: CBUS_INT = %02x\n", __func__, regval);
+
+	/* MSC_MSG (RCP/RAP) */
+	if (regval & BIT3) {
+		sub_cmd = MHL_SII_REG_NAME_RD(REG_CBUS_PRI_VS_CMD);
+		cmd_data = MHL_SII_REG_NAME_RD(REG_CBUS_PRI_VS_DATA);
+		msc_msg_recved = 1;
+	}
+	/* MSC_MT_ABRT/MSC_MR_ABRT/DDC_ABORT */
+	if (regval & (BIT6 | BIT5 | BIT2))
+		mhl_sii_cbus_process_errors(client, regval);
+
+	/* MSC_REQ_DONE */
+	if (regval & BIT4)
+		req_done = 1;
+
+	/* look for interrupts on CBUS_MSC_INT2 */
+	regval  = MHL_SII_REG_NAME_RD(REG_CBUS_MSC_INT2_STATUS);
+
+	/* clear all interrupts */
+	if (regval)
+		MHL_SII_REG_NAME_WR(REG_CBUS_MSC_INT2_STATUS, regval);
+
+	pr_debug("%s: CBUS_MSC_INT2 = %02x\n", __func__, regval);
+
+	/* received SET_INT */
+	if (regval & BIT2) {
+		uint8_t intr;
+		intr = MHL_SII_REG_NAME_RD(REG_CBUS_SET_INT_0);
+		MHL_SII_REG_NAME_WR(REG_CBUS_SET_INT_0, intr);
+		mhl_msc_recv_set_int(mhl_ctrl, 0, intr);
+
+		pr_debug("%s: MHL_INT_0 = %02x\n", __func__, intr);
+		intr = MHL_SII_REG_NAME_RD(REG_CBUS_SET_INT_1);
+		MHL_SII_REG_NAME_WR(REG_CBUS_SET_INT_1, intr);
+		mhl_msc_recv_set_int(mhl_ctrl, 1, intr);
+
+		pr_debug("%s: MHL_INT_1 = %02x\n", __func__, intr);
+		MHL_SII_REG_NAME_WR(REG_CBUS_SET_INT_2, 0xFF);
+		MHL_SII_REG_NAME_WR(REG_CBUS_SET_INT_3, 0xFF);
+	}
+
+	/* received WRITE_STAT */
+	if (regval & BIT3) {
+		uint8_t stat;
+		stat = MHL_SII_REG_NAME_RD(REG_CBUS_WRITE_STAT_0);
+		mhl_msc_recv_write_stat(mhl_ctrl, 0, stat);
+
+		pr_debug("%s: MHL_STATUS_0 = %02x\n", __func__, stat);
+		stat = MHL_SII_REG_NAME_RD(REG_CBUS_WRITE_STAT_1);
+		mhl_msc_recv_write_stat(mhl_ctrl, 1, stat);
+		pr_debug("%s: MHL_STATUS_1 = %02x\n", __func__, stat);
+
+		MHL_SII_REG_NAME_WR(REG_CBUS_WRITE_STAT_0, 0xFF);
+		MHL_SII_REG_NAME_WR(REG_CBUS_WRITE_STAT_1, 0xFF);
+		MHL_SII_REG_NAME_WR(REG_CBUS_WRITE_STAT_2, 0xFF);
+		MHL_SII_REG_NAME_WR(REG_CBUS_WRITE_STAT_3, 0xFF);
+	}
+
+	/* received MSC_MSG */
+	if (msc_msg_recved) {
+		/*mhl msc recv msc msg*/
+		rc = mhl_msc_recv_msc_msg(mhl_ctrl, sub_cmd, cmd_data);
+		if (rc)
+			pr_err("MHL: mhl msc recv msc msg failed(%d)!\n", rc);
+	}
+	/* complete last command */
+	if (req_done)
+		complete_all(&mhl_ctrl->msc_cmd_done);
+
 }
 
 static void clear_all_intrs(struct i2c_client *client)
@@ -1002,11 +1341,11 @@
 		mhl_misc_isr(mhl_ctrl);
 
 		/*
-		 * Check for any peer messages for DCAP_CHG etc
+		 * Check for any peer messages for DCAP_CHG, MSC etc
 		 * Dispatch to have the CBUS module working only
 		 * once connected.
-		mhl_cbus_isr(mhl_ctrl);
 		 */
+		mhl_cbus_isr(mhl_ctrl);
 		mhl_hpd_stat_isr(mhl_ctrl);
 	}
 
@@ -1296,6 +1635,59 @@
 	 * such tx specific
 	 */
 	mhl_ctrl->disc_enabled = false;
+	INIT_WORK(&mhl_ctrl->mhl_msc_send_work, mhl_msc_send_work);
+	mhl_ctrl->cur_state = POWER_STATE_D0_MHL;
+	INIT_LIST_HEAD(&mhl_ctrl->list_cmd);
+	init_completion(&mhl_ctrl->msc_cmd_done);
+	mhl_ctrl->msc_send_workqueue = create_singlethread_workqueue
+		("mhl_msc_cmd_queue");
+
+	mhl_ctrl->input = input_allocate_device();
+	if (mhl_ctrl->input) {
+		int i;
+		struct input_dev *input = mhl_ctrl->input;
+
+		mhl_ctrl->rcp_key_code_tbl = vmalloc(
+			ARRAY_SIZE(support_rcp_key_code_tbl));
+		if (!mhl_ctrl->rcp_key_code_tbl) {
+			pr_err("%s: no alloc mem for rcp keycode tbl\n",
+			       __func__);
+			return -ENOMEM;
+		}
+
+		memcpy(mhl_ctrl->rcp_key_code_tbl,
+		       &support_rcp_key_code_tbl[0],
+		       ARRAY_SIZE(support_rcp_key_code_tbl));
+		mhl_ctrl->rcp_key_code_tbl_len = ARRAY_SIZE(
+			support_rcp_key_code_tbl);
+
+		input->phys = "cbus/input0";
+		input->id.bustype = BUS_VIRTUAL;
+		input->id.vendor  = 0x1095;
+		input->id.product = 0x8334;
+		input->id.version = 0xA;
+
+		input->name = "mhl-rcp";
+
+		input->keycode = support_rcp_key_code_tbl;
+		input->keycodesize = sizeof(u16);
+		input->keycodemax = ARRAY_SIZE(support_rcp_key_code_tbl);
+
+		input->evbit[0] = EV_KEY;
+		for (i = 0; i < ARRAY_SIZE(support_rcp_key_code_tbl); i++) {
+			if (support_rcp_key_code_tbl[i] > 1)
+				input_set_capability(input, EV_KEY,
+					support_rcp_key_code_tbl[i]);
+		}
+
+		if (input_register_device(input) < 0) {
+			pr_warn("%s: failed to register input device\n",
+				__func__);
+			input_free_device(input);
+			mhl_ctrl->input = NULL;
+		}
+	}
+
 	rc = mhl_tx_chip_init(mhl_ctrl);
 	if (rc) {
 		pr_err("%s: tx chip init failed [%d]\n",
@@ -1315,7 +1707,7 @@
 				 client->dev.driver->name, mhl_ctrl);
 	if (rc) {
 		pr_err("request_threaded_irq failed, status: %d\n",
-			rc);
+		       rc);
 		goto failed_probe;
 	} else {
 		pr_debug("request_threaded_irq succeeded\n");
@@ -1353,6 +1745,8 @@
 		goto failed_probe;
 	}
 	mhl_ctrl->mhl_info = mhl_info;
+	mhl_register_msc(mhl_ctrl);
+	mhl_ctrl->tmds_enabled = check_tmds_enabled;
 	return 0;
 failed_probe:
 	mhl_gpio_config(mhl_ctrl, 0);
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 0bdacffa..b167b44 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -30,7 +30,11 @@
 #define CORESIGHT_COMPIDR2	(0xFF8)
 #define CORESIGHT_COMPIDR3	(0xFFC)
 
+#define ETM_ARCH_V1_0		(0x00)
+#define ETM_ARCH_V1_2		(0x02)
 #define ETM_ARCH_V3_3		(0x23)
+#define ETM_ARCH_V3_5		(0x25)
+#define PFT_ARCH_MAJOR		(0x30)
 #define PFT_ARCH_V1_1		(0x31)
 
 enum coresight_clk_rate {
diff --git a/include/linux/mhl_8334.h b/include/linux/mhl_8334.h
index d3597dc..cb74b73 100644
--- a/include/linux/mhl_8334.h
+++ b/include/linux/mhl_8334.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,7 @@
 #include <linux/platform_device.h>
 #include <mach/board.h>
 #include <linux/mhl_devcap.h>
+#include <linux/power_supply.h>
 #include <linux/mhl_defs.h>
 
 #define MHL_DEVICE_NAME "sii8334"
@@ -96,6 +97,65 @@
 	struct msc_command_struct* (*msc_command_get_work) (void);
 };
 
+#ifdef CONFIG_FB_MSM_MDSS_HDMI_MHL_SII8334
+enum mhl_gpio_type {
+	MHL_TX_RESET_GPIO,
+	MHL_TX_INTR_GPIO,
+	MHL_TX_PMIC_PWR_GPIO,
+	MHL_TX_MAX_GPIO,
+};
+
+enum mhl_vreg_type {
+	MHL_TX_3V_VREG,
+	MHL_TX_MAX_VREG,
+};
+
+
+struct mhl_tx_platform_data {
+	/* Data filled from device tree nodes */
+	struct dss_gpio *gpios[MHL_TX_MAX_GPIO];
+	struct dss_vreg *vregs[MHL_TX_MAX_VREG];
+	int irq;
+};
+
+struct mhl_tx_ctrl {
+	struct platform_device *pdev;
+	struct mhl_tx_platform_data *pdata;
+	struct i2c_client *i2c_handle;
+	uint8_t cur_state;
+	uint8_t chip_rev_id;
+	int mhl_mode;
+	struct completion rgnd_done;
+	void (*notify_usb_online)(int online);
+	struct usb_ext_notification *mhl_info;
+	bool disc_enabled;
+	struct power_supply mhl_psy;
+	bool vbus_active;
+	int current_val;
+	struct completion msc_cmd_done;
+	uint8_t devcap[16];
+	uint8_t devcap_state;
+	uint8_t path_en_state;
+	uint8_t (*tmds_enabled)(void);
+	struct work_struct mhl_msc_send_work;
+	struct list_head list_cmd;
+	struct input_dev *input;
+	struct workqueue_struct *msc_send_workqueue;
+	u16 *rcp_key_code_tbl;
+	size_t rcp_key_code_tbl_len;
+};
+
+int mhl_i2c_reg_read(struct i2c_client *client,
+		     uint8_t slave_addr_index, uint8_t reg_offset);
+int mhl_i2c_reg_write(struct i2c_client *client,
+		      uint8_t slave_addr_index, uint8_t reg_offset,
+		      uint8_t value);
+void mhl_i2c_reg_modify(struct i2c_client *client,
+			uint8_t slave_addr_index, uint8_t reg_offset,
+			uint8_t mask, uint8_t val);
+
+#endif /* CONFIG_FB_MSM_MDSS_HDMI_MHL_SII8334 */
+
 enum {
 	TX_PAGE_TPI          = 0x00,
 	TX_PAGE_L0           = 0x01,
@@ -204,6 +264,7 @@
 
 #define REG_TMDS_CSTAT	((TX_PAGE_3 << 16) | 0x0040)
 
+#define REG_CBUS_INTR_STATUS            ((TX_PAGE_CBUS << 16) | 0x0008)
 #define REG_CBUS_INTR_ENABLE            ((TX_PAGE_CBUS << 16) | 0x0009)
 
 #define REG_DDC_ABORT_REASON            ((TX_PAGE_CBUS << 16) | 0x000B)
diff --git a/include/linux/mhl_defs.h b/include/linux/mhl_defs.h
index 062bdf9..f23be79 100644
--- a/include/linux/mhl_defs.h
+++ b/include/linux/mhl_defs.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -149,6 +149,7 @@
 
 #define MHL_RCPE_NO_ERROR			0x00
 #define MHL_RCPE_UNSUPPORTED_KEY_CODE		0x01
+#define MHL_RCPE_INEFFECTIVE_KEY_CODE		0x01
 #define MHL_RCPE_BUSY				0x02
 
 #define MHL_RAPK_NO_ERROR			0x00
@@ -156,6 +157,8 @@
 #define MHL_RAPK_UNSUPPORTED_ACTION_CODE	0x02
 #define MHL_RAPK_BUSY				0x03
 
+#define T_ABORT_NEXT                    (2050)
+
 /* MHL spec related defines*/
 enum {
 	/* Command or Data byte acknowledge */
@@ -196,6 +199,8 @@
 	MHL_GET_SC3_ERRORCODE		= 0x6D,
 };
 
+/* Polling. */
+#define MHL_RAP_POLL                    0x00
 /* Turn content streaming ON. */
 #define	MHL_RAP_CONTENT_ON		0x10
 /* Turn content streaming OFF. */
diff --git a/include/linux/msm_ion.h b/include/linux/msm_ion.h
index d423b26..14492ea 100644
--- a/include/linux/msm_ion.h
+++ b/include/linux/msm_ion.h
@@ -1,6 +1,6 @@
 /*
  *
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -75,6 +75,13 @@
 #define ION_SECURE (1 << ION_HEAP_ID_RESERVED)
 
 /**
+ * Flag for clients to force contiguous memort allocation
+ *
+ * Use of this flag is carefully monitored!
+ */
+#define ION_FORCE_CONTIGUOUS (1 << 30)
+
+/**
  * Macro should be used with ion_heap_ids defined above.
  */
 #define ION_HEAP(bit) (1 << (bit))