Merge "msm: mdm: Add support for remote MDM image upgrade" into msm-3.4
diff --git a/Documentation/devicetree/bindings/arm/msm/memory-reserve.txt b/Documentation/devicetree/bindings/arm/msm/memory-reserve.txt
new file mode 100644
index 0000000..068e256
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/memory-reserve.txt
@@ -0,0 +1,42 @@
+* Memory reservations for MSM targets
+
+Large contiguous allocations (generally sizes greater than 64KB) must be
+allocated from a carved out memory pool. The size of the carved out pools
+is based on the sizes drivers need. To properly size the pools, devices
+must specify the size and type of the memory needed. Any driver wanting to
+allocate contiguous memory should indicate this via device tree bindings:
+
+Required parameters:
+- qcom,memory-reservation-type: type of memory to be reserved. This is a
+string defined in arch/arm/mach-msm/memory.c
+- qcom,memory-reservation-size: size of memory to be reserved
+
+Example:
+
+	qcom,a-driver {
+		compatible = "qcom,a-driver";
+		qcom,memory-reservation-type = "EBI1" /* reserve EBI memory */
+		qcom,memory-reservation-size = <0x400000>; /* size 4MB */
+	};
+
+Under some circumstances, it may be necessary to remove a chunk of memory
+from the kernel completely using memblock remove. Note this is different
+than adjusting the memory tags passed in via the bootloader as the virtual
+range is not affected. Any driver needing to remove a block of memory should
+add the appropriate binding:
+
+Required parameters:
+- qcom,memblock-remove: base and size of block to be removed
+
+	qcom,a-driver {
+		compatible = "qcom,a-driver";
+		/* Remove 4MB at 0x200000*/
+		qcom,memblock-remove = <0x200000 0x400000>;
+	};
+
+In order to ensure memory is only reserved when a driver is actually enabled,
+drivers are required to add EXPORT_COMPAT(<name of compatible string>) some
+where in the driver. For the examples above, the driver must add
+EXPORT_COMPAT("qcom,a-driver") to the driver, similar to EXPORT_SYMBOL.
+The EXPORT_COMPAT is to ensure that memory is only carved out if the
+driver is actually enabled, otherwise the memory will not be used.
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_rtb.txt b/Documentation/devicetree/bindings/arm/msm/msm_rtb.txt
new file mode 100644
index 0000000..7b8642b
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_rtb.txt
@@ -0,0 +1,21 @@
+Register Trace Buffer (RTB)
+
+The RTB is used to log discrete events in the system in an uncached buffer that
+can be post processed from RAM dumps. The RTB must reserve memory using
+the msm specific memory reservation bindings (see
+Documentation/devicetree/bindings/arm/msm/memory-reserve.txt).
+
+Required properties
+
+- compatible: "qcom,msm-rtb"
+- qcom,memory-reservation-size: size of reserved memory for the RTB buffer
+- qcom,memory-reservation-type: type of memory to be reserved
+(see memory-reserve.txt for information about memory reservations)
+
+Example:
+
+	qcom,msm-rtb {
+		compatible = "qcom,msm-rtb";
+		qcom,memory-reservation-type = "EBI1";
+		qcom,memory-reservation-size = <0x100000>; /* 1M EBI1 buffer */
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt b/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt
index 82935ed..93b5144 100644
--- a/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt
+++ b/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt
@@ -140,12 +140,25 @@
 				corner values supported on MSM8974 for PMIC
 				PM8841 SMPS 2 (VDD_Dig); nominal voltages for
 				these corners are also shown:
-					0 = Retention    (0.5000 V)
-					1 = SVS Krait    (0.7250 V)
-					2 = SVS SOC      (0.8125 V)
-					3 = Normal       (0.9000 V)
-					4 = Turbo        (0.9875 V)
-					5 = Super Turbo  (1.0500 V)
+					0 = None         (don't care)
+					1 = Retention    (0.5000 V)
+					2 = SVS Krait    (0.7250 V)
+					3 = SVS SOC      (0.8125 V)
+					4 = Normal       (0.9000 V)
+					5 = Turbo        (0.9875 V)
+					6 = Super Turbo  (1.0500 V)
+- qcom,init-disallow-bypass:   Specify that bypass mode should not be used for a
+				given LDO regulator.  When in bypass mode, an
+				LDO performs no regulation and acts as a simple
+				switch.  The RPM can utilize this mode for an
+				LDO that is subregulated from an SMPS when it is
+				possible to reduce the SMPS voltage to the
+				desired LDO output level.  Bypass mode may be
+				disallowed if lower LDO output noise is
+				required.  Supported values are:
+					0 = Allow RPM to utilize LDO bypass mode
+						if possible
+					1 = Disallow LDO bypass mode
 
 All properties specified within the core regulator framework can also be used in
 second level nodes.  These bindings can be found in:
diff --git a/Documentation/devicetree/bindings/platform/msm/qpnp-power-on.txt b/Documentation/devicetree/bindings/platform/msm/qpnp-power-on.txt
new file mode 100644
index 0000000..2e7f9c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/qpnp-power-on.txt
@@ -0,0 +1,39 @@
+Qualcomm QPNP power-on
+
+The qpnp-power-on is a driver which supports the power-on(PON)
+peripheral on Qualcomm PMICs. The supported functionality includes
+power on/off reason, power-key press/release detection and other PON
+features. This peripheral is connected to the host processor via the SPMI
+interface.
+
+Required properties:
+- compatible:	Must be "qcom,qpnp-power-on"
+- reg:		Specifies the SPMI address and size for this PON (power-on) peripheral
+- interrupts:	Specifies the interrupt associated with the power-key.
+
+Optional properties:
+- qcom,pon-key-enable:		Enable power-key detection. It enables monitoring
+				of the KPDPWR_N line (connected to the power-key).
+- qcom,pon-key-dbc-delay:	The debouce delay for the power-key interrupt
+				specifed in us. The value ranges from 2 seconds
+				to 1/64 of a second. Possible values are -
+				- 2, 1, 1/2, 1/4, 1/8, 1/16, 1/32, 1/64
+				- Intermediate value is rounded down to the
+				nearest valid value.
+- qcom,pon-key-pull-up:		The intial state of the KPDPWR_N pin
+				(connected to the power-key)
+				0 = No pull-up
+				1 = pull-up enabled
+
+If any of the above optional property is not defined, the driver will continue
+with the default hardware state.
+
+Example:
+	qcom,power-on@800 {
+		compatible = "qcom,qpnp-power-on";
+		reg = <0x800 0x100>;
+		interrupts = <0x0 0x8 0x1>;
+		qcom,pon-key-enable= <true>;
+		qcom,pon-key-pull-up = <true>;
+		qcom,pon-key-dbc-delay = <15625>;
+	}
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index 87864fd..f462a1e 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -22,6 +22,16 @@
 		#address-cells = <1>;
 		#size-cells = <1>;
 
+		qcom,power-on@800 {
+			compatible = "qcom,qpnp-power-on";
+			reg = <0x800 0x100>;
+			interrupts = <0x0 0x8 0x0>;
+			interrupt-names = "power-key";
+			qcom,pon-key-enable = <1>;
+			qcom,pon-key-dbc-delay = <15625>;
+			qcom,pon-key-pull-up = <1>;
+		};
+
 		pm8941_gpios {
 			spmi-dev-container;
 			compatible = "qcom,qpnp-pin";
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
index f0c635e..91894de 100644
--- a/arch/arm/boot/dts/msm8974-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -109,7 +109,7 @@
 			regulator-name = "8841_s2_corner";
 			qcom,set = <3>;
 			regulator-min-microvolt = <1>;
-			regulator-max-microvolt = <6>;
+			regulator-max-microvolt = <7>;
 			qcom,use-voltage-corner;
 			compatible = "qcom,rpm-regulator-smd";
 			qcom,consumer-supplies = "vdd_dig", "";
@@ -118,7 +118,7 @@
 			regulator-name = "8841_s2_corner_ao";
 			qcom,set = <1>;
 			regulator-min-microvolt = <1>;
-			regulator-max-microvolt = <6>;
+			regulator-max-microvolt = <7>;
 			qcom,use-voltage-corner;
 			compatible = "qcom,rpm-regulator-smd";
 		};
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index 813824e..5aea0ed 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -1912,7 +1912,7 @@
 	ul_powerdown_finish();
 	a2_pc_disabled = 0;
 	a2_pc_disabled_wakelock_skipped = 0;
-	disconnect_ack = 0;
+	disconnect_ack = 1;
 
 	/* Cleanup Channel States */
 	mutex_lock(&bam_pdev_mutexlock);
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index 930de81..63c89f6 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -3968,6 +3968,20 @@
 	},
 };
 
+struct rcg_clk audio_core_lpaif_pcmoe_clk_src = {
+	.cmd_rcgr_reg = LPAIF_PCMOE_CMD_RCGR,
+	.set_rate = set_rate_mnd,
+	.freq_tbl = ftbl_audio_core_lpaif_clock,
+	.current_freq = &rcg_dummy_freq,
+	.base = &virt_bases[LPASS_BASE],
+	.c = {
+		.dbg_name = "audio_core_lpaif_pcmoe_clk_src",
+		.ops = &clk_ops_rcg_mnd,
+		VDD_DIG_FMAX_MAP1(LOW, 12290000),
+		CLK_INIT(audio_core_lpaif_pcmoe_clk_src.c),
+	},
+};
+
 static struct branch_clk audio_core_lpaif_codec_spkr_osr_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_CODEC_SPKR_OSR_CBCR,
 	.parent = &audio_core_lpaif_codec_spkr_clk_src.c,
@@ -4195,6 +4209,17 @@
 	},
 };
 
+struct branch_clk audio_core_lpaif_pcmoe_clk = {
+	.cbcr_reg = AUDIO_CORE_LPAIF_PCM_DATA_OE_CBCR,
+	.parent = &audio_core_lpaif_pcmoe_clk_src.c,
+	.base = &virt_bases[LPASS_BASE],
+	.c = {
+		.dbg_name = "audio_core_lpaif_pcmoe_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(audio_core_lpaif_pcmoe_clk.c),
+	},
+};
+
 static struct branch_clk q6ss_ahb_lfabif_clk = {
 	.cbcr_reg = LPASS_Q6SS_AHB_LFABIF_CBCR,
 	.has_sibling = 1,
@@ -4391,6 +4416,7 @@
 	{&audio_core_lpaif_quad_clk_src.c,	LPASS_BASE, 0x0014},
 	{&audio_core_lpaif_pcm0_clk_src.c,	LPASS_BASE, 0x0013},
 	{&audio_core_lpaif_pcm1_clk_src.c,	LPASS_BASE, 0x0012},
+	{&audio_core_lpaif_pcmoe_clk_src.c,	LPASS_BASE, 0x000f},
 	{&audio_core_slimbus_core_clk.c,	LPASS_BASE, 0x003d},
 	{&audio_core_slimbus_lfabif_clk.c,	LPASS_BASE, 0x003e},
 	{&q6ss_xo_clk.c,			LPASS_BASE, 0x002b},
@@ -4794,6 +4820,8 @@
 	CLK_LOOKUP("core_clk", audio_core_lpaif_pcm1_clk_src.c, ""),
 	CLK_LOOKUP("ebit_clk", audio_core_lpaif_pcm1_ebit_clk.c, ""),
 	CLK_LOOKUP("ibit_clk", audio_core_lpaif_pcm1_ibit_clk.c, ""),
+	CLK_LOOKUP("core_clk_src", audio_core_lpaif_pcmoe_clk_src.c, ""),
+	CLK_LOOKUP("core_clk", audio_core_lpaif_pcmoe_clk.c, ""),
 
 	CLK_LOOKUP("core_clk",       mss_xo_q6_clk.c, "pil-q6v5-mss"),
 	CLK_LOOKUP("bus_clk",       mss_bus_q6_clk.c, "pil-q6v5-mss"),
diff --git a/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h b/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h
index 12bf2b7..a32e168 100644
--- a/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h
+++ b/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h
@@ -29,8 +29,8 @@
  * in this enum correspond to MSM8974 for PMIC PM8841 SMPS 2 (VDD_Dig).
  */
 enum rpm_regulator_voltage_corner {
-	RPM_REGULATOR_CORNER_RETENTION = 1,
-	RPM_REGULATOR_CORNER_NONE = RPM_REGULATOR_CORNER_RETENTION,
+	RPM_REGULATOR_CORNER_NONE = 1,
+	RPM_REGULATOR_CORNER_RETENTION,
 	RPM_REGULATOR_CORNER_SVS_KRAIT,
 	RPM_REGULATOR_CORNER_SVS_SOC,
 	RPM_REGULATOR_CORNER_NORMAL,
diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h
index 9110632..2c3d395 100644
--- a/arch/arm/mach-msm/include/mach/socinfo.h
+++ b/arch/arm/mach-msm/include/mach/socinfo.h
@@ -94,7 +94,9 @@
 int __init socinfo_init(void) __must_check;
 const int read_msm_cpu_type(void);
 const int get_core_count(void);
+const int cpu_is_krait(void);
 const int cpu_is_krait_v1(void);
+const int cpu_is_krait_v2(void);
 
 static inline int cpu_is_msm7x01(void)
 {
diff --git a/arch/arm/mach-msm/msm_bus/Makefile b/arch/arm/mach-msm/msm_bus/Makefile
index ab62c20..924577f 100644
--- a/arch/arm/mach-msm/msm_bus/Makefile
+++ b/arch/arm/mach-msm/msm_bus/Makefile
@@ -2,7 +2,9 @@
 # Makefile for msm-bus driver specific files
 #
 obj-y += msm_bus_core.o msm_bus_fabric.o msm_bus_config.o msm_bus_arb.o
-obj-y += msm_bus_rpm.o msm_bus_bimc.o msm_bus_noc.o
+obj-y += msm_bus_bimc.o msm_bus_noc.o
+obj-$(CONFIG_MSM_RPM) += msm_bus_rpm.o
+obj-$(CONFIG_MSM_RPM_SMD) += msm_bus_rpm_smd.o
 obj-$(CONFIG_ARCH_MSM8X60) += msm_bus_board_8660.o
 obj-$(CONFIG_ARCH_MSM8960) += msm_bus_board_8960.o
 obj-$(CONFIG_ARCH_MSM9615) += msm_bus_board_9615.o
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
index 823f14d..2072cb1 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
@@ -1817,44 +1817,45 @@
 		info->node_info->id, info->node_info->priv_id, add_bw);
 
 	binfo = (struct msm_bus_bimc_info *)fab_pdata->hw_data;
-	if (!info->node_info->qport) {
-		MSM_BUS_DBG("No qos ports to update!\n");
-		return;
-	}
 
 	if (info->node_info->num_mports == 0) {
 		MSM_BUS_DBG("BIMC: Skip Master BW\n");
 		goto skip_mas_bw;
 	}
 
+	ports = info->node_info->num_mports;
 	bw = INTERLEAVED_BW(fab_pdata, add_bw, ports);
-	ports = INTERLEAVED_VAL(fab_pdata, ports);
 
 	for (i = 0; i < ports; i++) {
-		MSM_BUS_DBG("qport: %d\n", info->node_info->qport[i]);
 		sel_cd->mas[info->node_info->masterp[i]].bw += bw;
 		sel_cd->mas[info->node_info->masterp[i]].hw_id =
 			info->node_info->mas_hw_id;
-		qbw.bw = sel_cd->mas[info->node_info->masterp[i]].bw;
-		qbw.ws = info->node_info->ws;
-		/* Threshold low = 90% of bw */
-		qbw.thl = (90 * bw) / 100;
-		/* Threshold medium = bw */
-		qbw.thm = bw;
-		/* Threshold high = 10% more than bw */
-		qbw.thh = (110 * bw) / 100;
-		/* Check if info is a shared master.
-		 * If it is, mark it dirty
-		 * If it isn't, then set QOS Bandwidth
-		 **/
-		MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %ld\n",
+		MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n",
 			info->node_info->priv_id,
 			sel_cd->mas[info->node_info->masterp[i]].bw);
 		if (info->node_info->hw_sel == MSM_BUS_RPM)
 			sel_cd->mas[info->node_info->masterp[i]].dirty = 1;
-		else
+		else {
+			if (!info->node_info->qport) {
+				MSM_BUS_DBG("No qos ports to update!\n");
+				break;
+			}
+			MSM_BUS_DBG("qport: %d\n", info->node_info->qport[i]);
+			qbw.bw = sel_cd->mas[info->node_info->masterp[i]].bw;
+			qbw.ws = info->node_info->ws;
+			/* Threshold low = 90% of bw */
+			qbw.thl = (90 * bw) / 100;
+			/* Threshold medium = bw */
+			qbw.thm = bw;
+			/* Threshold high = 10% more than bw */
+			qbw.thh = (110 * bw) / 100;
+			/* Check if info is a shared master.
+			 * If it is, mark it dirty
+			 * If it isn't, then set QOS Bandwidth
+			 **/
 			msm_bus_bimc_set_qos_bw(binfo,
 				info->node_info->qport[i], &qbw);
+		}
 	}
 
 skip_mas_bw:
@@ -1870,7 +1871,7 @@
 		sel_cd->slv[hop->node_info->slavep[i]].bw += bw;
 		sel_cd->slv[hop->node_info->slavep[i]].hw_id =
 			hop->node_info->slv_hw_id;
-		MSM_BUS_DBG("BIMC: Update slave_bw: ID: %d -> %ld\n",
+		MSM_BUS_DBG("BIMC: Update slave_bw: ID: %d -> %llu\n",
 			hop->node_info->priv_id,
 			sel_cd->slv[hop->node_info->slavep[i]].bw);
 		MSM_BUS_DBG("BIMC: Update slave_bw: index: %d\n",
@@ -1893,6 +1894,7 @@
 	*fab_pdata, void *hw_data, void **cdata)
 {
 	MSM_BUS_DBG("\nReached BIMC Commit\n");
+	msm_bus_remote_hw_commit(fab_pdata, hw_data, cdata);
 	return 0;
 }
 
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_core.h b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
index 264afbd..333fe4b 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_core.h
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
@@ -123,7 +123,7 @@
 struct msm_bus_node_hw_info {
 	bool dirty;
 	unsigned int hw_id;
-	unsigned long bw;
+	uint64_t bw;
 };
 
 struct msm_bus_hw_algorithm {
@@ -202,6 +202,8 @@
 	int curr;
 };
 
+int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+	*fab_pdata, void *hw_data, void **cdata);
 int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabric);
 void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabric);
 struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid);
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_noc.c b/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
index 5179d2a..2597e27 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
@@ -518,14 +518,12 @@
 		return;
 	}
 
-	if (!info->node_info->qport) {
-		MSM_BUS_DBG("NOC: No QoS Ports to update bw\n");
-		return;
+	if (info->node_info->num_mports == 0) {
+		MSM_BUS_DBG("NOC: Skip Master BW\n");
+		goto skip_mas_bw;
 	}
 
 	ports = info->node_info->num_mports;
-	qos_bw.ws = info->node_info->ws;
-
 	bw = INTERLEAVED_BW(fab_pdata, add_bw, ports);
 
 	MSM_BUS_DBG("NOC: Update bw for: %d: %ld\n",
@@ -534,26 +532,36 @@
 		sel_cd->mas[info->node_info->masterp[i]].bw += bw;
 		sel_cd->mas[info->node_info->masterp[i]].hw_id =
 			info->node_info->mas_hw_id;
-		qos_bw.bw = sel_cd->mas[info->node_info->masterp[i]].bw;
-		MSM_BUS_DBG("NOC: Update mas_bw for ID: %d, BW: %ld, QoS: %u\n",
+		MSM_BUS_DBG("NOC: Update mas_bw: ID: %d, BW: %llu ports:%d\n",
 			info->node_info->priv_id,
 			sel_cd->mas[info->node_info->masterp[i]].bw,
-			qos_bw.ws);
+			ports);
 		/* Check if info is a shared master.
 		 * If it is, mark it dirty
 		 * If it isn't, then set QOS Bandwidth
 		 **/
 		if (info->node_info->hw_sel == MSM_BUS_RPM)
 			sel_cd->mas[info->node_info->masterp[i]].dirty = 1;
-		else
+		else {
+			if (!info->node_info->qport) {
+				MSM_BUS_DBG("No qos ports to update!\n");
+				break;
+			}
+			qos_bw.bw = sel_cd->mas[info->node_info->masterp[i]].
+				bw;
+			qos_bw.ws = info->node_info->ws;
 			msm_bus_noc_set_qos_bw(ninfo,
 				info->node_info->qport[i],
 				info->node_info->perm_mode, &qos_bw);
+			MSM_BUS_DBG("NOC: QoS: Update mas_bw: ws: %u\n",
+				qos_bw.ws);
+		}
 	}
 
+skip_mas_bw:
 	ports = hop->node_info->num_sports;
 	if (ports == 0) {
-		MSM_BUS_ERR("\nDIVIDE BY 0, hop: %d\n",
+		MSM_BUS_DBG("\nDIVIDE BY 0, hop: %d\n",
 			hop->node_info->priv_id);
 		return;
 	}
@@ -562,7 +570,7 @@
 		sel_cd->slv[hop->node_info->slavep[i]].bw += bw;
 		sel_cd->slv[hop->node_info->slavep[i]].hw_id =
 			hop->node_info->slv_hw_id;
-		MSM_BUS_DBG("NOC: Update slave_bw for ID: %d -> %ld\n",
+		MSM_BUS_DBG("NOC: Update slave_bw for ID: %d -> %llu\n",
 			hop->node_info->priv_id,
 			sel_cd->slv[hop->node_info->slavep[i]].bw);
 		MSM_BUS_DBG("NOC: Update slave_bw for hw_id: %d, index: %d\n",
@@ -581,6 +589,7 @@
 	*fab_pdata, void *hw_data, void **cdata)
 {
 	MSM_BUS_DBG("\nReached NOC Commit\n");
+	msm_bus_remote_hw_commit(fab_pdata, hw_data, cdata);
 	return 0;
 }
 
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c b/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c
index 4653431..2213132 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c
@@ -946,6 +946,12 @@
 	return status;
 }
 
+int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+	*fab_pdata, void *hw_data, void **cdata)
+{
+	return 0;
+}
+
 int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
 	struct msm_bus_hw_algorithm *hw_algo)
 {
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_rpm_smd.c b/arch/arm/mach-msm/msm_bus/msm_bus_rpm_smd.c
new file mode 100644
index 0000000..88fab96
--- /dev/null
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_rpm_smd.c
@@ -0,0 +1,238 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include "msm_bus_core.h"
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#include <mach/rpm-smd.h>
+
+/* Stubs for backward compatibility */
+void msm_bus_rpm_set_mt_mask()
+{
+}
+
+bool msm_bus_rpm_is_mem_interleaved(void)
+{
+	return true;
+}
+
+struct commit_data {
+	struct msm_bus_node_hw_info *mas_arb;
+	struct msm_bus_node_hw_info *slv_arb;
+};
+
+#ifdef CONFIG_DEBUG_FS
+void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size,
+	void *cdata, int nmasters, int nslaves, int ntslaves)
+{
+	int c;
+	struct commit_data *cd = (struct commit_data *)cdata;
+
+	*curr += scnprintf(buf + *curr, max_size - *curr, "\nMas BW:\n");
+	for (c = 0; c < nmasters; c++)
+		*curr += scnprintf(buf + *curr, max_size - *curr,
+			"%d: %llu\t", cd->mas_arb[c].hw_id,
+			cd->mas_arb[c].bw);
+	*curr += scnprintf(buf + *curr, max_size - *curr, "\nSlave BW:\n");
+	for (c = 0; c < nslaves; c++) {
+		*curr += scnprintf(buf + *curr, max_size - *curr,
+		"%d: %llu\t", cd->slv_arb[c].hw_id,
+		cd->slv_arb[c].bw);
+	}
+}
+#endif
+
+static int msm_bus_rpm_compare_cdata(
+	struct msm_bus_fabric_registration *fab_pdata,
+	struct commit_data *cd1, struct commit_data *cd2)
+{
+	size_t n;
+	int ret;
+
+	n = sizeof(struct msm_bus_node_hw_info) * fab_pdata->nmasters * 2;
+	ret = memcmp(cd1->mas_arb, cd2->mas_arb, n);
+	if (ret) {
+		MSM_BUS_DBG("Master Arb Data not equal\n");
+		return ret;
+	}
+
+	n = sizeof(struct msm_bus_node_hw_info) * fab_pdata->nslaves * 2;
+	ret = memcmp(cd1->slv_arb, cd2->slv_arb, n);
+	if (ret) {
+		MSM_BUS_DBG("Master Arb Data not equal\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int msm_bus_rpm_req(int ctx, uint32_t rsc_type, uint32_t key,
+	struct msm_bus_node_hw_info *hw_info, bool valid)
+{
+	struct msm_rpm_request *rpm_req;
+	int ret = 0, msg_id;
+
+	if (ctx == ACTIVE_CTX)
+		ctx = MSM_RPM_CTX_ACTIVE_SET;
+	else if (ctx == DUAL_CTX)
+		ctx = MSM_RPM_CTX_SLEEP_SET;
+
+	rpm_req = msm_rpm_create_request(ctx, rsc_type, hw_info->hw_id, 1);
+	if (rpm_req == NULL) {
+		MSM_BUS_WARN("RPM: Couldn't create RPM Request\n");
+		return -ENXIO;
+	}
+
+	if (valid) {
+		ret = msm_rpm_add_kvp_data(rpm_req, key, (const uint8_t *)
+			&hw_info->bw, (int)(sizeof(uint64_t)));
+		if (ret) {
+			MSM_BUS_WARN("RPM: Add KVP failed for RPM Req:%u\n",
+				rsc_type);
+			return ret;
+		}
+
+		MSM_BUS_DBG("Added Key: %d, Val: %llu, size: %d\n", key,
+			hw_info->bw, sizeof(uint64_t));
+	} else {
+		/* Invalidate RPM requests */
+		ret = msm_rpm_add_kvp_data(rpm_req, 0, NULL, 0);
+		if (ret) {
+			MSM_BUS_WARN("RPM: Add KVP failed for RPM Req:%u\n",
+				rsc_type);
+			return ret;
+		}
+	}
+
+	msg_id = msm_rpm_send_request(rpm_req);
+	if (!msg_id) {
+		MSM_BUS_WARN("RPM: No message ID for req\n");
+		return -ENXIO;
+	}
+
+	ret = msm_rpm_wait_for_ack(msg_id);
+	if (ret) {
+		MSM_BUS_WARN("RPM: Ack failed\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static int msm_bus_rpm_commit_arb(struct msm_bus_fabric_registration
+	*fab_pdata, int ctx, void *rpm_data,
+	struct commit_data *cd, bool valid)
+{
+	int i, status = 0, rsc_type, key;
+
+	MSM_BUS_DBG("Context: %d\n", ctx);
+	rsc_type = RPM_BUS_MASTER_REQ;
+	key = RPM_MASTER_FIELD_BW;
+	for (i = 0; i < fab_pdata->nmasters; i++) {
+		if (cd->mas_arb[i].dirty) {
+			MSM_BUS_DBG("MAS HWID: %d, BW: %llu DIRTY: %d\n",
+				cd->mas_arb[i].hw_id,
+				cd->mas_arb[i].bw,
+				cd->mas_arb[i].dirty);
+			status = msm_bus_rpm_req(ctx, rsc_type, key,
+				&cd->mas_arb[i], valid);
+			if (status) {
+				MSM_BUS_ERR("RPM: Req fail: mas:%d, bw:%llu\n",
+					cd->mas_arb[i].hw_id,
+					cd->mas_arb[i].bw);
+				break;
+			} else {
+				cd->mas_arb[i].dirty = false;
+			}
+		}
+	}
+
+	rsc_type = RPM_BUS_SLAVE_REQ;
+	key = RPM_SLAVE_FIELD_BW;
+	for (i = 0; i < fab_pdata->nslaves; i++) {
+		if (cd->slv_arb[i].dirty) {
+			MSM_BUS_DBG("SLV HWID: %d, BW: %llu DIRTY: %d\n",
+				cd->slv_arb[i].hw_id,
+				cd->slv_arb[i].bw,
+				cd->slv_arb[i].dirty);
+			status = msm_bus_rpm_req(ctx, rsc_type, key,
+				&cd->slv_arb[i], valid);
+			if (status) {
+				MSM_BUS_ERR("RPM: Req fail: slv:%d, bw:%llu\n",
+					cd->slv_arb[i].hw_id,
+					cd->slv_arb[i].bw);
+				break;
+			} else {
+				cd->slv_arb[i].dirty = false;
+			}
+		}
+	}
+
+	return status;
+}
+
+/**
+* msm_bus_remote_hw_commit() - Commit the arbitration data to RPM
+* @fabric: Fabric for which the data should be committed
+**/
+int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+	*fab_pdata, void *hw_data, void **cdata)
+{
+
+	int ret;
+	bool valid;
+	struct commit_data *dual_cd, *act_cd;
+	void *rpm_data = hw_data;
+
+	MSM_BUS_DBG("\nReached RPM Commit\n");
+	dual_cd = (struct commit_data *)cdata[DUAL_CTX];
+	act_cd = (struct commit_data *)cdata[ACTIVE_CTX];
+
+	/*
+	 * If the arb data for active set and sleep set is
+	 * different, commit both sets.
+	 * If the arb data for active set and sleep set is
+	 * the same, invalidate the sleep set.
+	 */
+	ret = msm_bus_rpm_compare_cdata(fab_pdata, act_cd, dual_cd);
+	if (!ret)
+		/* Invalidate sleep set.*/
+		valid = false;
+	else
+		valid = true;
+
+	ret = msm_bus_rpm_commit_arb(fab_pdata, DUAL_CTX, rpm_data,
+		dual_cd, valid);
+	if (ret)
+		MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n",
+			fab_pdata->id, DUAL_CTX);
+
+	valid = true;
+	ret = msm_bus_rpm_commit_arb(fab_pdata, ACTIVE_CTX, rpm_data, act_cd,
+		valid);
+	if (ret)
+		MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n",
+			fab_pdata->id, ACTIVE_CTX);
+
+	return ret;
+}
+
+int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo)
+{
+	if (!pdata->ahb)
+		pdata->rpm_enabled = 1;
+	return 0;
+}
diff --git a/arch/arm/mach-msm/rpm-regulator-smd.c b/arch/arm/mach-msm/rpm-regulator-smd.c
index fdff231..a8af9e7 100644
--- a/arch/arm/mach-msm/rpm-regulator-smd.c
+++ b/arch/arm/mach-msm/rpm-regulator-smd.c
@@ -68,6 +68,7 @@
 	RPM_REGULATOR_PARAM_QUIET_MODE,
 	RPM_REGULATOR_PARAM_FREQ_REASON,
 	RPM_REGULATOR_PARAM_CORNER,
+	RPM_REGULATOR_PARAM_BYPASS,
 	RPM_REGULATOR_PARAM_MAX,
 };
 
@@ -111,7 +112,8 @@
 	PARAM(HEAD_ROOM,       1,  0,  0,  1, "hr",   0, 0x7FFFFFFF, "qcom,init-head-room"),
 	PARAM(QUIET_MODE,      0,  1,  0,  0, "qm",   0, 2,          "qcom,init-quiet-mode"),
 	PARAM(FREQ_REASON,     0,  1,  0,  1, "resn", 0, 8,          "qcom,init-freq-reason"),
-	PARAM(CORNER,          0,  1,  0,  0, "corn", 0, 5,          "qcom,init-voltage-corner"),
+	PARAM(CORNER,          0,  1,  0,  0, "corn", 0, 6,          "qcom,init-voltage-corner"),
+	PARAM(BYPASS,          1,  0,  0,  0, "bypa", 0, 1,          "qcom,init-disallow-bypass"),
 };
 
 struct rpm_vreg_request {
@@ -440,6 +442,7 @@
 	RPM_VREG_AGGR_MAX(QUIET_MODE, param_aggr, param_reg);
 	RPM_VREG_AGGR_MAX(FREQ_REASON, param_aggr, param_reg);
 	RPM_VREG_AGGR_MAX(CORNER, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(BYPASS, param_aggr, param_reg);
 }
 
 static int rpm_vreg_aggregate_requests(struct rpm_regulator *regulator)
@@ -682,7 +685,7 @@
 	 * regulator_set_voltage function to the actual corner values
 	 * sent to the RPM.
 	 */
-	corner = min_uV - RPM_REGULATOR_CORNER_RETENTION;
+	corner = min_uV - RPM_REGULATOR_CORNER_NONE;
 
 	if (corner < params[RPM_REGULATOR_PARAM_CORNER].min
 	    || corner > params[RPM_REGULATOR_PARAM_CORNER].max) {
@@ -716,7 +719,7 @@
 	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
 
 	return reg->req.param[RPM_REGULATOR_PARAM_CORNER]
-		+ RPM_REGULATOR_CORNER_RETENTION;
+		+ RPM_REGULATOR_CORNER_NONE;
 }
 
 static int rpm_vreg_set_mode(struct regulator_dev *rdev, unsigned int mode)
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index 281e7b8..817c2dc 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -798,6 +798,11 @@
 	};
 }
 
+const int cpu_is_krait(void)
+{
+	return ((read_cpuid_id() & 0xFF00FC00) == 0x51000400);
+}
+
 const int cpu_is_krait_v1(void)
 {
 	switch (read_cpuid_id()) {
@@ -810,3 +815,22 @@
 		return 0;
 	};
 }
+
+const int cpu_is_krait_v2(void)
+{
+	switch (read_cpuid_id()) {
+	case 0x511F04D0:
+	case 0x511F04D1:
+	case 0x511F04D2:
+	case 0x511F04D3:
+	case 0x511F04D4:
+
+	case 0x510F06F0:
+	case 0x510F06F1:
+	case 0x510F06F2:
+		return 1;
+
+	default:
+		return 0;
+	};
+}
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 5798c94..785ba6c 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -994,7 +994,6 @@
 			rc = input_register_handler(&dbs_input_handler);
 		mutex_unlock(&dbs_mutex);
 
-		mutex_init(&this_dbs_info->timer_mutex);
 
 		if (!ondemand_powersave_bias_setspeed(
 					this_dbs_info->cur_policy,
@@ -1071,6 +1070,9 @@
 		return -EFAULT;
 	}
 	for_each_possible_cpu(i) {
+		struct cpu_dbs_info_s *this_dbs_info =
+			&per_cpu(od_cpu_dbs_info, i);
+		mutex_init(&this_dbs_info->timer_mutex);
 		INIT_WORK(&per_cpu(dbs_refresh_work, i), dbs_refresh_callback);
 	}
 
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 43c52f6..b72c847 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -796,133 +796,290 @@
 	return 0;
 }
 
+static void adreno_mark_context_status(struct kgsl_device *device,
+					int recovery_status)
+{
+	struct kgsl_context *context;
+	int next = 0;
+	/*
+	 * Set the reset status of all contexts to
+	 * INNOCENT_CONTEXT_RESET_EXT except for the bad context
+	 * since thats the guilty party, if recovery failed then
+	 * mark all as guilty
+	 */
+	while ((context = idr_get_next(&device->context_idr, &next))) {
+		struct adreno_context *adreno_context = context->devctxt;
+		if (recovery_status) {
+			context->reset_status =
+					KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
+			adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
+		} else if (KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT !=
+			context->reset_status) {
+			if (adreno_context->flags & (CTXT_FLAGS_GPU_HANG ||
+				CTXT_FLAGS_GPU_HANG_RECOVERED))
+				context->reset_status =
+				KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
+			else
+				context->reset_status =
+				KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT;
+		}
+		next = next + 1;
+	}
+}
+
+static void adreno_set_max_ts_for_bad_ctxs(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+	struct kgsl_context *context;
+	struct adreno_context *temp_adreno_context;
+	int next = 0;
+
+	while ((context = idr_get_next(&device->context_idr, &next))) {
+		temp_adreno_context = context->devctxt;
+		if (temp_adreno_context->flags & CTXT_FLAGS_GPU_HANG) {
+			kgsl_sharedmem_writel(&device->memstore,
+				KGSL_MEMSTORE_OFFSET(context->id,
+				soptimestamp),
+				rb->timestamp[context->id]);
+			kgsl_sharedmem_writel(&device->memstore,
+				KGSL_MEMSTORE_OFFSET(context->id,
+				eoptimestamp),
+				rb->timestamp[context->id]);
+		}
+		next = next + 1;
+	}
+}
+
+static void adreno_destroy_recovery_data(struct adreno_recovery_data *rec_data)
+{
+	vfree(rec_data->rb_buffer);
+	vfree(rec_data->bad_rb_buffer);
+}
+
+static int adreno_setup_recovery_data(struct kgsl_device *device,
+					struct adreno_recovery_data *rec_data)
+{
+	int ret = 0;
+	unsigned int ib1_sz, ib2_sz;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+	memset(rec_data, 0, sizeof(*rec_data));
+
+	adreno_regread(device, REG_CP_IB1_BUFSZ, &ib1_sz);
+	adreno_regread(device, REG_CP_IB2_BUFSZ, &ib2_sz);
+	if (ib1_sz || ib2_sz)
+		adreno_regread(device, REG_CP_IB1_BASE, &rec_data->ib1);
+
+	kgsl_sharedmem_readl(&device->memstore, &rec_data->context_id,
+			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+			current_context));
+
+	kgsl_sharedmem_readl(&device->memstore,
+				&rec_data->global_eop,
+				KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+				eoptimestamp));
+
+	rec_data->rb_buffer = vmalloc(rb->buffer_desc.size);
+	if (!rec_data->rb_buffer) {
+		KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
+				rb->buffer_desc.size);
+		return -ENOMEM;
+	}
+
+	rec_data->bad_rb_buffer = vmalloc(rb->buffer_desc.size);
+	if (!rec_data->bad_rb_buffer) {
+		KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
+				rb->buffer_desc.size);
+		ret = -ENOMEM;
+		goto done;
+	}
+
+done:
+	if (ret) {
+		vfree(rec_data->rb_buffer);
+		vfree(rec_data->bad_rb_buffer);
+	}
+	return ret;
+}
+
 static int
-adreno_recover_hang(struct kgsl_device *device)
+_adreno_recover_hang(struct kgsl_device *device,
+			struct adreno_recovery_data *rec_data,
+			bool try_bad_commands)
 {
 	int ret;
-	unsigned int *rb_buffer;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+	struct kgsl_context *context;
+	struct adreno_context *adreno_context = NULL;
+	struct adreno_context *last_active_ctx = adreno_dev->drawctxt_active;
+
+	context = idr_find(&device->context_idr, rec_data->context_id);
+	if (context == NULL) {
+		KGSL_DRV_ERR(device, "Last context unknown id:%d\n",
+			rec_data->context_id);
+	} else {
+		adreno_context = context->devctxt;
+		adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
+	}
+
+	/* Extract valid contents from rb which can still be executed after
+	 * hang */
+	ret = adreno_ringbuffer_extract(rb, rec_data);
+	if (ret)
+		goto done;
+
+	/* restart device */
+	ret = adreno_stop(device);
+	if (ret) {
+		KGSL_DRV_ERR(device, "Device stop failed in recovery\n");
+		goto done;
+	}
+
+	ret = adreno_start(device, true);
+	if (ret) {
+		KGSL_DRV_ERR(device, "Device start failed in recovery\n");
+		goto done;
+	}
+
+	if (context)
+		kgsl_mmu_setstate(&device->mmu, adreno_context->pagetable,
+			KGSL_MEMSTORE_GLOBAL);
+
+	/* Do not try the bad caommands if recovery has failed bad commands
+	 * once already */
+	if (!try_bad_commands)
+		rec_data->bad_rb_size = 0;
+
+	if (rec_data->bad_rb_size) {
+		int idle_ret;
+		/* submit the bad and good context commands and wait for
+		 * them to pass */
+		adreno_ringbuffer_restore(rb, rec_data->bad_rb_buffer,
+					rec_data->bad_rb_size);
+		idle_ret = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+		if (idle_ret) {
+			ret = adreno_stop(device);
+			if (ret) {
+				KGSL_DRV_ERR(device,
+				"Device stop failed in recovery\n");
+				goto done;
+			}
+			ret = adreno_start(device, true);
+			if (ret) {
+				KGSL_DRV_ERR(device,
+				"Device start failed in recovery\n");
+				goto done;
+			}
+			ret = idle_ret;
+			KGSL_DRV_ERR(device,
+			"Bad context commands hung in recovery\n");
+		} else {
+			KGSL_DRV_ERR(device,
+			"Bad context commands succeeded in recovery\n");
+			if (adreno_context)
+				adreno_context->flags = (adreno_context->flags &
+					~CTXT_FLAGS_GPU_HANG) |
+					CTXT_FLAGS_GPU_HANG_RECOVERED;
+			adreno_dev->drawctxt_active = last_active_ctx;
+		}
+	}
+	/* If either the bad command sequence failed or we did not play it */
+	if (ret || !rec_data->bad_rb_size) {
+		adreno_ringbuffer_restore(rb, rec_data->rb_buffer,
+				rec_data->rb_size);
+		ret = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+		if (ret) {
+			/* If we fail here we can try to invalidate another
+			 * context and try recovering again */
+			ret = -EAGAIN;
+			goto done;
+		}
+		/* ringbuffer now has data from the last valid context id,
+		 * so restore the active_ctx to the last valid context */
+		if (rec_data->last_valid_ctx_id) {
+			struct kgsl_context *last_ctx =
+					idr_find(&device->context_idr,
+					rec_data->last_valid_ctx_id);
+			if (last_ctx)
+				adreno_dev->drawctxt_active = last_ctx->devctxt;
+		}
+	}
+done:
+	return ret;
+}
+
+static int
+adreno_recover_hang(struct kgsl_device *device,
+			struct adreno_recovery_data *rec_data)
+{
+	int ret = 0;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
 	unsigned int timestamp;
-	unsigned int num_rb_contents;
-	unsigned int reftimestamp;
-	unsigned int enable_ts;
-	unsigned int soptimestamp;
-	unsigned int eoptimestamp;
-	unsigned int context_id;
-	struct kgsl_context *context;
-	struct adreno_context *adreno_context;
-	int next = 0;
 
-	KGSL_DRV_ERR(device, "Starting recovery from 3D GPU hang....\n");
-	rb_buffer = vmalloc(rb->buffer_desc.size);
-	if (!rb_buffer) {
-		KGSL_MEM_ERR(device,
-			"Failed to allocate memory for recovery: %x\n",
-			rb->buffer_desc.size);
-		return -ENOMEM;
-	}
-	/* Extract valid contents from rb which can stil be executed after
-	 * hang */
-	ret = adreno_ringbuffer_extract(rb, rb_buffer, &num_rb_contents);
-	if (ret)
-		goto done;
-	kgsl_sharedmem_readl(&device->memstore, &context_id,
-				KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-					current_context));
-	context = idr_find(&device->context_idr, context_id);
-	if (context == NULL) {
-		KGSL_DRV_ERR(device, "Last context unknown id:%d\n",
-				context_id);
-		context_id = KGSL_MEMSTORE_GLOBAL;
-	}
+	KGSL_DRV_ERR(device,
+	"Starting recovery from 3D GPU hang. Recovery parameters: IB1: 0x%X, "
+	"Bad context_id: %u, global_eop: 0x%x\n",
+	rec_data->ib1, rec_data->context_id, rec_data->global_eop);
 
 	timestamp = rb->timestamp[KGSL_MEMSTORE_GLOBAL];
 	KGSL_DRV_ERR(device, "Last issued global timestamp: %x\n", timestamp);
 
-	kgsl_sharedmem_readl(&device->memstore, &reftimestamp,
-				KGSL_MEMSTORE_OFFSET(context_id,
-					ref_wait_ts));
-	kgsl_sharedmem_readl(&device->memstore, &enable_ts,
-				KGSL_MEMSTORE_OFFSET(context_id,
-					ts_cmp_enable));
-	kgsl_sharedmem_readl(&device->memstore, &soptimestamp,
-				KGSL_MEMSTORE_OFFSET(context_id,
-					soptimestamp));
-	kgsl_sharedmem_readl(&device->memstore, &eoptimestamp,
-				KGSL_MEMSTORE_OFFSET(context_id,
-					eoptimestamp));
-	/* Make sure memory is synchronized before restarting the GPU */
-	mb();
-	KGSL_CTXT_ERR(device,
-		"Context id that caused a GPU hang: %d\n", context_id);
-	/* restart device */
-	ret = adreno_stop(device);
-	if (ret)
-		goto done;
-	ret = adreno_start(device, true);
-	if (ret)
-		goto done;
-	KGSL_DRV_ERR(device, "Device has been restarted after hang\n");
-	/* Restore timestamp states */
-	kgsl_sharedmem_writel(&device->memstore,
-			KGSL_MEMSTORE_OFFSET(context_id, soptimestamp),
-			soptimestamp);
-	kgsl_sharedmem_writel(&device->memstore,
-			KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp),
-			eoptimestamp);
+	/* We may need to replay commands multiple times based on whether
+	 * multiple contexts hang the GPU */
+	while (true) {
+		if (!ret)
+			ret = _adreno_recover_hang(device, rec_data, true);
+		else
+			ret = _adreno_recover_hang(device, rec_data, false);
 
-	if (num_rb_contents) {
-		kgsl_sharedmem_writel(&device->memstore,
-			KGSL_MEMSTORE_OFFSET(context_id, ref_wait_ts),
-			reftimestamp);
-		kgsl_sharedmem_writel(&device->memstore,
-			KGSL_MEMSTORE_OFFSET(context_id, ts_cmp_enable),
-			enable_ts);
-	}
-	/* Make sure all writes are posted before the GPU reads them */
-	wmb();
-	/* Mark the invalid context so no more commands are accepted from
-	 * that context */
-
-	adreno_context = context->devctxt;
-
-	KGSL_CTXT_ERR(device,
-		"Context that caused a GPU hang: %d\n", adreno_context->id);
-
-	adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
-
-	/*
-	 * Set the reset status of all contexts to
-	 * INNOCENT_CONTEXT_RESET_EXT except for the bad context
-	 * since thats the guilty party
-	 */
-	while ((context = idr_get_next(&device->context_idr, &next))) {
-		if (KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT !=
-			context->reset_status) {
-			if (context->id != context_id)
-				context->reset_status =
-				KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT;
-			else
-				context->reset_status =
-				KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
+		if (-EAGAIN == ret) {
+			/* setup new recovery parameters and retry, this
+			 * means more than 1 contexts are causing hang */
+			adreno_destroy_recovery_data(rec_data);
+			adreno_setup_recovery_data(device, rec_data);
+			KGSL_DRV_ERR(device,
+			"Retry recovery from 3D GPU hang. Recovery parameters: "
+			"IB1: 0x%X, Bad context_id: %u, global_eop: 0x%x\n",
+			rec_data->ib1, rec_data->context_id,
+			rec_data->global_eop);
+		} else {
+			break;
 		}
-		next = next + 1;
 	}
 
-	/* Restore valid commands in ringbuffer */
-	adreno_ringbuffer_restore(rb, rb_buffer, num_rb_contents);
+	if (ret)
+		goto done;
+
+	/* Restore correct states after recovery */
+	if (adreno_dev->drawctxt_active)
+		device->mmu.hwpagetable =
+			adreno_dev->drawctxt_active->pagetable;
+	else
+		device->mmu.hwpagetable = device->mmu.defaultpagetable;
 	rb->timestamp[KGSL_MEMSTORE_GLOBAL] = timestamp;
-	/* wait for idle */
-	ret = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+	kgsl_sharedmem_writel(&device->memstore,
+			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+			eoptimestamp),
+			rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
 done:
-	vfree(rb_buffer);
+	adreno_set_max_ts_for_bad_ctxs(device);
+	adreno_mark_context_status(device, ret);
+	if (!ret)
+		KGSL_DRV_ERR(device, "Recovery succeeded\n");
+	else
+		KGSL_DRV_ERR(device, "Recovery failed\n");
 	return ret;
 }
 
-int adreno_dump_and_recover(struct kgsl_device *device)
+int
+adreno_dump_and_recover(struct kgsl_device *device)
 {
 	int result = -ETIMEDOUT;
+	struct adreno_recovery_data rec_data;
 
 	if (device->state == KGSL_STATE_HUNG)
 		goto done;
@@ -937,7 +1094,8 @@
 		INIT_COMPLETION(device->recovery_gate);
 		/* Detected a hang */
 
-
+		/* Get the recovery data as soon as hang is detected */
+		result = adreno_setup_recovery_data(device, &rec_data);
 		/*
 		 * Trigger an automatic dump of the state to
 		 * the console
@@ -950,11 +1108,14 @@
 		 */
 		kgsl_device_snapshot(device, 1);
 
-		result = adreno_recover_hang(device);
-		if (result)
+		result = adreno_recover_hang(device, &rec_data);
+		adreno_destroy_recovery_data(&rec_data);
+		if (result) {
 			kgsl_pwrctrl_set_state(device, KGSL_STATE_HUNG);
-		else
+		} else {
 			kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
+			mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
+		}
 		complete_all(&device->recovery_gate);
 	}
 done:
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index d1899d8..57f4859 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -33,7 +33,6 @@
 #define KGSL_CMD_FLAGS_NONE             0x00000000
 #define KGSL_CMD_FLAGS_PMODE		0x00000001
 #define KGSL_CMD_FLAGS_NO_TS_CMP	0x00000002
-#define KGSL_CMD_FLAGS_NOT_KERNEL_CMD	0x00000004
 
 /* Command identifiers */
 #define KGSL_CONTEXT_TO_MEM_IDENTIFIER	0x2EADBEEF
@@ -115,6 +114,30 @@
 	unsigned int (*busy_cycles)(struct adreno_device *);
 };
 
+/*
+ * struct adreno_recovery_data - Structure that contains all information to
+ * perform gpu recovery from hangs
+ * @ib1 - IB1 that the GPU was executing when hang happened
+ * @context_id - Context which caused the hang
+ * @global_eop - eoptimestamp at time of hang
+ * @rb_buffer - Buffer that holds the commands from good contexts
+ * @rb_size - Number of valid dwords in rb_buffer
+ * @bad_rb_buffer - Buffer that holds commands from the hanging context
+ * bad_rb_size - Number of valid dwords in bad_rb_buffer
+ * @last_valid_ctx_id - The last context from which commands were placed in
+ * ringbuffer before the GPU hung
+ */
+struct adreno_recovery_data {
+	unsigned int ib1;
+	unsigned int context_id;
+	unsigned int global_eop;
+	unsigned int *rb_buffer;
+	unsigned int rb_size;
+	unsigned int *bad_rb_buffer;
+	unsigned int bad_rb_size;
+	unsigned int last_valid_ctx_id;
+};
+
 extern struct adreno_gpudev adreno_a2xx_gpudev;
 extern struct adreno_gpudev adreno_a3xx_gpudev;
 
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 3eb1aba..5b14a69 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -44,6 +44,8 @@
 #define CTXT_FLAGS_TRASHSTATE		0x00020000
 /* per context timestamps enabled */
 #define CTXT_FLAGS_PER_CONTEXT_TS	0x00040000
+/* Context has caused a GPU hang and recovered properly */
+#define CTXT_FLAGS_GPU_HANG_RECOVERED	0x00008000
 
 struct kgsl_device;
 struct adreno_device;
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
index 7bb65ca..3cc4bcf 100644
--- a/drivers/gpu/msm/adreno_postmortem.c
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -699,6 +699,10 @@
 
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 
+	struct kgsl_memdesc **reg_map;
+	void *reg_map_array;
+	int num_iommu_units = 0;
+
 	mb();
 
 	if (adreno_is_a2xx(adreno_dev))
@@ -780,6 +784,10 @@
 	/* extract the latest ib commands from the buffer */
 	ib_list.count = 0;
 	i = 0;
+	/* get the register mapped array in case we are using IOMMU */
+	num_iommu_units = kgsl_mmu_get_reg_map_desc(&device->mmu,
+							&reg_map_array);
+	reg_map = reg_map_array;
 	for (read_idx = 0; read_idx < num_item; ) {
 		uint32_t this_cmd = rb_copy[read_idx++];
 		if (adreno_cmd_is_ib(this_cmd)) {
@@ -792,7 +800,10 @@
 					ib_list.offsets[i],
 					ib_list.bases[i],
 					ib_list.sizes[i], 0);
-		} else if (this_cmd == cp_type0_packet(MH_MMU_PT_BASE, 1)) {
+		} else if (this_cmd == cp_type0_packet(MH_MMU_PT_BASE, 1) ||
+			(num_iommu_units && this_cmd == (reg_map[0]->gpuaddr +
+			(KGSL_IOMMU_CONTEXT_USER << KGSL_IOMMU_CTX_SHIFT) +
+			KGSL_IOMMU_TTBR0))) {
 
 			KGSL_LOG_DUMP(device, "Current pagetable: %x\t"
 				"pagetable base: %x\n",
@@ -808,6 +819,8 @@
 				cur_pt_base);
 		}
 	}
+	if (num_iommu_units)
+		kfree(reg_map_array);
 
 	/* Restore cur_pt_base back to the pt_base of
 	   the process in whose context the GPU hung */
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index afcceee..d54ce6b 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -493,7 +493,8 @@
 	*/
 	total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
 	total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
-	total_sizedwords += !(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD) ? 2 : 0;
+	/* 2 dwords to store the start of command sequence */
+	total_sizedwords += 2;
 
 	if (adreno_is_a3xx(adreno_dev))
 		total_sizedwords += 7;
@@ -521,10 +522,9 @@
 	rcmd_gpu = rb->buffer_desc.gpuaddr
 		+ sizeof(uint)*(rb->wptr-total_sizedwords);
 
-	if (!(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD)) {
-		GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
-		GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
-	}
+	GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
+	GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
+
 	if (flags & KGSL_CMD_FLAGS_PMODE) {
 		/* disable protected mode error checking */
 		GSL_RB_WRITE(ringcmds, rcmd_gpu,
@@ -926,8 +926,7 @@
 	adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
 
 	*timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
-					drawctxt,
-					KGSL_CMD_FLAGS_NOT_KERNEL_CMD,
+					drawctxt, 0,
 					&link[0], (cmds - link));
 
 	KGSL_CMD_INFO(device, "ctxt %d g %08x numibs %d ts %d\n",
@@ -943,187 +942,347 @@
 	 */
 	adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
 #endif
+	/* If context hung and recovered then return error so that the
+	 * application may handle it */
+	if (drawctxt->flags & CTXT_FLAGS_GPU_HANG_RECOVERED)
+		return -EDEADLK;
+	else
+		return 0;
 
-	return 0;
 }
 
-int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
-				unsigned int *temp_rb_buffer,
-				int *rb_size)
+static int _find_start_of_cmd_seq(struct adreno_ringbuffer *rb,
+					unsigned int *ptr,
+					bool inc)
 {
-	struct kgsl_device *device = rb->device;
-	unsigned int rb_rptr;
-	unsigned int retired_timestamp;
-	unsigned int temp_idx = 0;
-	unsigned int value;
+	int status = -EINVAL;
 	unsigned int val1;
-	unsigned int val2;
-	unsigned int val3;
-	unsigned int copy_rb_contents = 0;
-	struct kgsl_context *context;
-	unsigned int context_id;
+	unsigned int size = rb->buffer_desc.size;
+	unsigned int start_ptr = *ptr;
 
-	GSL_RB_GET_READPTR(rb, &rb->rptr);
-
-	/* current_context is the context that is presently active in the
-	 * GPU, i.e the context in which the hang is caused */
-	kgsl_sharedmem_readl(&device->memstore, &context_id,
-		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-		current_context));
-	KGSL_DRV_ERR(device, "Last context id: %d\n", context_id);
-	context = idr_find(&device->context_idr, context_id);
-	if (context == NULL) {
-		KGSL_DRV_ERR(device,
-			"GPU recovery from hang not possible because last"
-			" context id is invalid.\n");
-		return -EINVAL;
-	}
-	retired_timestamp = kgsl_readtimestamp(device, context,
-					       KGSL_TIMESTAMP_RETIRED);
-	KGSL_DRV_ERR(device, "GPU successfully executed till ts: %x\n",
-			retired_timestamp);
-	/*
-	 * We need to go back in history by 4 dwords from the current location
-	 * of read pointer as 4 dwords are read to match the end of a command.
-	 * Also, take care of wrap around when moving back
-	 */
-	if (rb->rptr >= 4)
-		rb_rptr = (rb->rptr - 4) * sizeof(unsigned int);
-	else
-		rb_rptr = rb->buffer_desc.size -
-			((4 - rb->rptr) * sizeof(unsigned int));
-	/* Read the rb contents going backwards to locate end of last
-	 * sucessfully executed command */
-	while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
-		kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
-		if (value == retired_timestamp) {
-			rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
-							rb->buffer_desc.size);
-			kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
-			rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
-							rb->buffer_desc.size);
-			kgsl_sharedmem_readl(&rb->buffer_desc, &val2, rb_rptr);
-			rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
-							rb->buffer_desc.size);
-			kgsl_sharedmem_readl(&rb->buffer_desc, &val3, rb_rptr);
-			/* match the pattern found at the end of a command */
-			if ((val1 == 2 &&
-				val2 == cp_type3_packet(CP_INTERRUPT, 1)
-				&& val3 == CP_INT_CNTL__RB_INT_MASK) ||
-				(val1 == cp_type3_packet(CP_EVENT_WRITE, 3)
-				&& val2 == CACHE_FLUSH_TS &&
-				val3 == (rb->device->memstore.gpuaddr +
-				KGSL_MEMSTORE_OFFSET(context_id,
-					eoptimestamp)))) {
-				rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
-							rb->buffer_desc.size);
-				KGSL_DRV_ERR(device,
-					"Found end of last executed "
-					"command at offset: %x\n",
-					rb_rptr / sizeof(unsigned int));
+	while ((start_ptr / sizeof(unsigned int)) != rb->wptr) {
+		if (inc)
+			start_ptr = adreno_ringbuffer_inc_wrapped(start_ptr,
+									size);
+		else
+			start_ptr = adreno_ringbuffer_dec_wrapped(start_ptr,
+									size);
+		kgsl_sharedmem_readl(&rb->buffer_desc, &val1, start_ptr);
+		if (KGSL_CMD_IDENTIFIER == val1) {
+			if ((start_ptr / sizeof(unsigned int)) != rb->wptr)
+				start_ptr = adreno_ringbuffer_dec_wrapped(
+							start_ptr, size);
+				*ptr = start_ptr;
+				status = 0;
 				break;
-			} else {
-				if (rb_rptr < (3 * sizeof(unsigned int)))
-					rb_rptr = rb->buffer_desc.size -
-						(3 * sizeof(unsigned int))
-							+ rb_rptr;
-				else
-					rb_rptr -= (3 * sizeof(unsigned int));
+		}
+	}
+	return status;
+}
+
+static int _find_cmd_seq_after_eop_ts(struct adreno_ringbuffer *rb,
+					unsigned int *rb_rptr,
+					unsigned int global_eop,
+					bool inc)
+{
+	int status = -EINVAL;
+	unsigned int temp_rb_rptr = *rb_rptr;
+	unsigned int size = rb->buffer_desc.size;
+	unsigned int val[3];
+	int i = 0;
+	bool check = false;
+
+	if (inc && temp_rb_rptr / sizeof(unsigned int) != rb->wptr)
+		return status;
+
+	do {
+		/* when decrementing we need to decrement first and
+		 * then read make sure we cover all the data */
+		if (!inc)
+			temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
+					temp_rb_rptr, size);
+		kgsl_sharedmem_readl(&rb->buffer_desc, &val[i],
+					temp_rb_rptr);
+
+		if (check && ((inc && val[i] == global_eop) ||
+			(!inc && (val[i] ==
+			cp_type3_packet(CP_MEM_WRITE, 2) ||
+			val[i] == CACHE_FLUSH_TS)))) {
+			/* decrement i, i.e i = (i - 1 + 3) % 3 if
+			 * we are going forward, else increment i */
+			i = (i + 2) % 3;
+			if (val[i] == rb->device->memstore.gpuaddr +
+				KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+						eoptimestamp)) {
+				int j = ((i + 2) % 3);
+				if ((inc && (val[j] == CACHE_FLUSH_TS ||
+						val[j] == cp_type3_packet(
+							CP_MEM_WRITE, 2))) ||
+					(!inc && val[j] == global_eop)) {
+						/* Found the global eop */
+						status = 0;
+						break;
+				}
 			}
+			/* if no match found then increment i again
+			 * since we decremented before matching */
+			i = (i + 1) % 3;
+		}
+		if (inc)
+			temp_rb_rptr = adreno_ringbuffer_inc_wrapped(
+						temp_rb_rptr, size);
+
+		i = (i + 1) % 3;
+		if (2 == i)
+			check = true;
+	} while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr);
+	/* temp_rb_rptr points to the command stream after global eop,
+	 * move backward till the start of command sequence */
+	if (!status) {
+		status = _find_start_of_cmd_seq(rb, &temp_rb_rptr, false);
+		if (!status) {
+			*rb_rptr = temp_rb_rptr;
+			KGSL_DRV_ERR(rb->device,
+			"Offset of cmd sequence after eop timestamp: 0x%x\n",
+			temp_rb_rptr / sizeof(unsigned int));
+		}
+	}
+	if (status)
+		KGSL_DRV_ERR(rb->device,
+		"Failed to find the command sequence after eop timestamp\n");
+	return status;
+}
+
+static int _find_hanging_ib_sequence(struct adreno_ringbuffer *rb,
+				unsigned int *rb_rptr,
+				unsigned int ib1)
+{
+	int status = -EINVAL;
+	unsigned int temp_rb_rptr = *rb_rptr;
+	unsigned int size = rb->buffer_desc.size;
+	unsigned int val[2];
+	int i = 0;
+	bool check = false;
+	bool ctx_switch = false;
+
+	while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
+		kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
+
+		if (check && val[i] == ib1) {
+			/* decrement i, i.e i = (i - 1 + 2) % 2 */
+			i = (i + 1) % 2;
+			if (adreno_cmd_is_ib(val[i])) {
+				/* go till start of command sequence */
+				status = _find_start_of_cmd_seq(rb,
+						&temp_rb_rptr, false);
+				KGSL_DRV_ERR(rb->device,
+				"Found the hanging IB at offset 0x%x\n",
+				temp_rb_rptr / sizeof(unsigned int));
+				break;
+			}
+			/* if no match the increment i since we decremented
+			 * before checking */
+			i = (i + 1) % 2;
+		}
+		/* Make sure you do not encounter a context switch twice, we can
+		 * encounter it once for the bad context as the start of search
+		 * can point to the context switch */
+		if (val[i] == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
+			if (ctx_switch) {
+				KGSL_DRV_ERR(rb->device,
+				"Context switch encountered before bad "
+				"IB found\n");
+				break;
+			}
+			ctx_switch = true;
+		}
+		i = (i + 1) % 2;
+		if (1 == i)
+			check = true;
+		temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
+								size);
+	}
+	if  (!status)
+		*rb_rptr = temp_rb_rptr;
+	return status;
+}
+
+static void _turn_preamble_on_for_ib_seq(struct adreno_ringbuffer *rb,
+				unsigned int rb_rptr)
+{
+	unsigned int temp_rb_rptr = rb_rptr;
+	unsigned int size = rb->buffer_desc.size;
+	unsigned int val[2];
+	int i = 0;
+	bool check = false;
+	bool cmd_start = false;
+
+	/* Go till the start of the ib sequence and turn on preamble */
+	while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
+		kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
+		if (check && KGSL_START_OF_IB_IDENTIFIER == val[i]) {
+			/* decrement i */
+			i = (i + 1) % 2;
+			if (val[i] == cp_nop_packet(4)) {
+				temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
+						temp_rb_rptr, size);
+				kgsl_sharedmem_writel(&rb->buffer_desc,
+					temp_rb_rptr, cp_nop_packet(1));
+			}
+			KGSL_DRV_ERR(rb->device,
+			"Turned preamble on at offset 0x%x\n",
+			temp_rb_rptr / 4);
+			break;
+		}
+		/* If you reach beginning of next command sequence then exit
+		 * First command encountered is the current one so don't break
+		 * on that. */
+		if (KGSL_CMD_IDENTIFIER == val[i]) {
+			if (cmd_start)
+				break;
+			cmd_start = true;
 		}
 
-		if (rb_rptr == 0)
-			rb_rptr = rb->buffer_desc.size - sizeof(unsigned int);
-		else
-			rb_rptr -= sizeof(unsigned int);
+		i = (i + 1) % 2;
+		if (1 == i)
+			check = true;
+		temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
+								size);
 	}
+}
 
-	if ((rb_rptr / sizeof(unsigned int)) == rb->wptr) {
-		KGSL_DRV_ERR(device,
-			"GPU recovery from hang not possible because last"
-			" successful timestamp is overwritten\n");
-		return -EINVAL;
-	}
-	/* rb_rptr is now pointing to the first dword of the command following
-	 * the last sucessfully executed command sequence. Assumption is that
-	 * GPU is hung in the command sequence pointed by rb_rptr */
-	/* make sure the GPU is not hung in a command submitted by kgsl
-	 * itself */
-	kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
-	kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
-				adreno_ringbuffer_inc_wrapped(rb_rptr,
-							rb->buffer_desc.size));
-	if (val1 == cp_nop_packet(1) && val2 == KGSL_CMD_IDENTIFIER) {
-		KGSL_DRV_ERR(device,
-			"GPU recovery from hang not possible because "
-			"of hang in kgsl command\n");
-		return -EINVAL;
-	}
+static void _copy_valid_rb_content(struct adreno_ringbuffer *rb,
+		unsigned int rb_rptr, unsigned int *temp_rb_buffer,
+		int *rb_size, unsigned int *bad_rb_buffer,
+		int *bad_rb_size,
+		int *last_valid_ctx_id)
+{
+	unsigned int good_rb_idx = 0, cmd_start_idx = 0;
+	unsigned int val1 = 0;
+	struct kgsl_context *k_ctxt;
+	struct adreno_context *a_ctxt;
+	unsigned int bad_rb_idx = 0;
+	int copy_rb_contents = 0;
+	unsigned int temp_rb_rptr;
+	unsigned int size = rb->buffer_desc.size;
+	unsigned int good_cmd_start_idx = 0;
 
+	/* Walk the rb from the context switch. Omit any commands
+	 * for an invalid context. */
 	while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
-		kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
-		rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
-						rb->buffer_desc.size);
+		kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
+
+		if (KGSL_CMD_IDENTIFIER == val1) {
+			/* Start is the NOP dword that comes before
+			 * KGSL_CMD_IDENTIFIER */
+			cmd_start_idx = bad_rb_idx - 1;
+			if (copy_rb_contents)
+				good_cmd_start_idx = good_rb_idx - 1;
+		}
+
 		/* check for context switch indicator */
-		if (value == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
-			kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
-			rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
-							rb->buffer_desc.size);
-			BUG_ON(value != cp_type3_packet(CP_MEM_WRITE, 2));
-			kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
-			rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
-							rb->buffer_desc.size);
-			BUG_ON(val1 != (device->memstore.gpuaddr +
-				KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-					current_context)));
-			kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
-			rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
-							rb->buffer_desc.size);
+		if (val1 == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
+			unsigned int temp_idx, val2;
+			/* increment by 3 to get to the context_id */
+			temp_rb_rptr = rb_rptr + (3 * sizeof(unsigned int)) %
+					size;
+			kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
+						temp_rb_rptr);
 
-			/*
-			 * If other context switches were already lost and
-			 * and the current context is the one that is hanging,
-			 * then we cannot recover.  Print an error message
-			 * and leave.
-			 */
-
-			if ((copy_rb_contents == 0) && (value == context_id)) {
-				KGSL_DRV_ERR(device, "GPU recovery could not "
-					"find the previous context\n");
-				return -EINVAL;
-			}
-
-			/*
-			 * If we were copying the commands and got to this point
-			 * then we need to remove the 3 commands that appear
-			 * before KGSL_CONTEXT_TO_MEM_IDENTIFIER
-			 */
-			if (temp_idx)
-				temp_idx -= 3;
 			/* if context switches to a context that did not cause
 			 * hang then start saving the rb contents as those
 			 * commands can be executed */
-			if (value != context_id) {
+			k_ctxt = idr_find(&rb->device->context_idr, val2);
+			if (k_ctxt) {
+				a_ctxt = k_ctxt->devctxt;
+
+			/* If we are changing to a good context and were not
+			 * copying commands then copy over commands to the good
+			 * context */
+			if (!copy_rb_contents && ((k_ctxt &&
+				!(a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) ||
+				!k_ctxt)) {
+				for (temp_idx = cmd_start_idx;
+					temp_idx < bad_rb_idx;
+					temp_idx++)
+					temp_rb_buffer[good_rb_idx++] =
+						bad_rb_buffer[temp_idx];
+				*last_valid_ctx_id = val2;
 				copy_rb_contents = 1;
-				temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
-				temp_rb_buffer[temp_idx++] =
-						KGSL_CMD_IDENTIFIER;
-				temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
-				temp_rb_buffer[temp_idx++] =
-						KGSL_CONTEXT_TO_MEM_IDENTIFIER;
-				temp_rb_buffer[temp_idx++] =
-					cp_type3_packet(CP_MEM_WRITE, 2);
-				temp_rb_buffer[temp_idx++] = val1;
-				temp_rb_buffer[temp_idx++] = value;
-			} else {
+			} else if (copy_rb_contents && k_ctxt &&
+				(a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) {
+				/* If we are changing to bad context then remove
+				 * the dwords we copied for this sequence from
+				 * the good buffer */
+				good_rb_idx = good_cmd_start_idx;
 				copy_rb_contents = 0;
 			}
-		} else if (copy_rb_contents)
-			temp_rb_buffer[temp_idx++] = value;
+			}
+		}
+
+		if (copy_rb_contents)
+			temp_rb_buffer[good_rb_idx++] = val1;
+		/* Copy both good and bad commands for replay to the bad
+		 * buffer */
+		bad_rb_buffer[bad_rb_idx++] = val1;
+
+		rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr, size);
+	}
+	*rb_size = good_rb_idx;
+	*bad_rb_size = bad_rb_idx;
+}
+
+int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
+				struct adreno_recovery_data *rec_data)
+{
+	int status;
+	struct kgsl_device *device = rb->device;
+	unsigned int rb_rptr = rb->wptr * sizeof(unsigned int);
+	struct kgsl_context *context;
+	struct adreno_context *adreno_context;
+
+	context = idr_find(&device->context_idr, rec_data->context_id);
+
+	/* Look for the command stream that is right after the global eop */
+	status = _find_cmd_seq_after_eop_ts(rb, &rb_rptr,
+				rec_data->global_eop + 1, false);
+	if (status)
+		goto done;
+
+	if (context) {
+		adreno_context = context->devctxt;
+
+		if (adreno_context->flags & CTXT_FLAGS_PREAMBLE) {
+			if (rec_data->ib1) {
+				status = _find_hanging_ib_sequence(rb, &rb_rptr,
+								rec_data->ib1);
+				if (status)
+					goto copy_rb_contents;
+			}
+			_turn_preamble_on_for_ib_seq(rb, rb_rptr);
+		} else {
+			status = -EINVAL;
+		}
 	}
 
-	*rb_size = temp_idx;
-	return 0;
+copy_rb_contents:
+	_copy_valid_rb_content(rb, rb_rptr, rec_data->rb_buffer,
+				&rec_data->rb_size,
+				rec_data->bad_rb_buffer,
+				&rec_data->bad_rb_size,
+				&rec_data->last_valid_ctx_id);
+	/* If we failed to get the hanging IB sequence then we cannot execute
+	 * commands from the bad context or preambles not supported */
+	if (status) {
+		rec_data->bad_rb_size = 0;
+		status = 0;
+	}
+	/* If there is no context then that means there are no commands for
+	 * good case */
+	if (!context)
+		rec_data->rb_size = 0;
+done:
+	return status;
 }
 
 void
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index 6429f46..4cc57c2 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -27,6 +27,7 @@
 
 struct kgsl_device;
 struct kgsl_device_private;
+struct adreno_recovery_data;
 
 #define GSL_RB_MEMPTRS_SCRATCH_COUNT	 8
 struct kgsl_rbmemptrs {
@@ -114,8 +115,7 @@
 void kgsl_cp_intrcallback(struct kgsl_device *device);
 
 int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
-				unsigned int *temp_rb_buffer,
-				int *rb_size);
+				struct adreno_recovery_data *rec_data);
 
 void
 adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
@@ -139,4 +139,11 @@
 	return (val + sizeof(unsigned int)) % size;
 }
 
+/* Decrement a value by 4 bytes with wrap-around based on size */
+static inline unsigned int adreno_ringbuffer_dec_wrapped(unsigned int val,
+							unsigned int size)
+{
+	return (val + size - sizeof(unsigned int)) % size;
+}
+
 #endif  /* __ADRENO_RINGBUFFER_H */
diff --git a/drivers/media/video/msm/msm_vfe32.c b/drivers/media/video/msm/msm_vfe32.c
index 38954f8..396729e 100644
--- a/drivers/media/video/msm/msm_vfe32.c
+++ b/drivers/media/video/msm/msm_vfe32.c
@@ -3762,6 +3762,8 @@
 	/* spin_lock_irqsave(&ctrl->state_lock, flags); */
 	struct isp_msg_stats msgStats;
 	msgStats.frameCounter = vfe32_ctrl->share_ctrl->vfeFrameId;
+	if (vfe32_ctrl->simultaneous_sof_stat)
+		msgStats.frameCounter--;
 	msgStats.buffer = bufAddress;
 	switch (statsNum) {
 	case statsAeNum:{
@@ -3844,6 +3846,9 @@
 	uint32_t temp;
 
 	msgStats.frame_id = vfe32_ctrl->share_ctrl->vfeFrameId;
+	if (vfe32_ctrl->simultaneous_sof_stat)
+		msgStats.frame_id--;
+
 	msgStats.status_bits = status_bits;
 
 	msgStats.aec.buff = vfe32_ctrl->aecStatsControl.bufToRender;
@@ -4204,7 +4209,9 @@
 {
 	unsigned long flags;
 	struct axi_ctrl_t *axi_ctrl = (struct axi_ctrl_t *)data;
+	struct vfe32_ctrl_type *vfe32_ctrl = axi_ctrl->share_ctrl->vfe32_ctrl;
 	struct vfe32_isr_queue_cmd *qcmd = NULL;
+	int stat_interrupt;
 
 	CDBG("=== axi32_do_tasklet start ===\n");
 
@@ -4224,11 +4231,32 @@
 		spin_unlock_irqrestore(&axi_ctrl->tasklet_lock,
 			flags);
 
+		if (axi_ctrl->share_ctrl->stats_comp) {
+			stat_interrupt = (qcmd->vfeInterruptStatus0 &
+					VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK);
+		} else {
+			stat_interrupt =
+				(qcmd->vfeInterruptStatus0 &
+					VFE_IRQ_STATUS0_STATS_AEC) |
+				(qcmd->vfeInterruptStatus0 &
+					VFE_IRQ_STATUS0_STATS_AWB) |
+				(qcmd->vfeInterruptStatus0 &
+					VFE_IRQ_STATUS0_STATS_AF) |
+				(qcmd->vfeInterruptStatus0 &
+					VFE_IRQ_STATUS0_STATS_IHIST) |
+				(qcmd->vfeInterruptStatus0 &
+					VFE_IRQ_STATUS0_STATS_RS) |
+				(qcmd->vfeInterruptStatus0 &
+					VFE_IRQ_STATUS0_STATS_CS);
+		}
 		if (qcmd->vfeInterruptStatus0 &
-				VFE_IRQ_STATUS0_CAMIF_SOF_MASK)
+				VFE_IRQ_STATUS0_CAMIF_SOF_MASK) {
+			if (stat_interrupt)
+				vfe32_ctrl->simultaneous_sof_stat = 1;
 			v4l2_subdev_notify(&axi_ctrl->subdev,
 				NOTIFY_VFE_IRQ,
 				(void *)VFE_IRQ_STATUS0_CAMIF_SOF_MASK);
+		}
 
 		/* interrupt to be processed,  *qcmd has the payload.  */
 		if (qcmd->vfeInterruptStatus0 &
@@ -4335,6 +4363,7 @@
 					(void *)VFE_IRQ_STATUS0_SYNC_TIMER2);
 			}
 		}
+		vfe32_ctrl->simultaneous_sof_stat = 0;
 		kfree(qcmd);
 	}
 	CDBG("=== axi32_do_tasklet end ===\n");
diff --git a/drivers/media/video/msm/msm_vfe32.h b/drivers/media/video/msm/msm_vfe32.h
index 542bbf8..2c528da 100644
--- a/drivers/media/video/msm/msm_vfe32.h
+++ b/drivers/media/video/msm/msm_vfe32.h
@@ -1006,6 +1006,8 @@
 	uint32_t snapshot_frame_cnt;
 	struct msm_stats_bufq_ctrl stats_ctrl;
 	struct msm_stats_ops stats_ops;
+
+	uint32_t simultaneous_sof_stat;
 };
 
 #define statsAeNum      0
diff --git a/drivers/media/video/msm_vidc/vidc_hal.c b/drivers/media/video/msm_vidc/vidc_hal.c
index 85e984d..dc38dbf 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.c
+++ b/drivers/media/video/msm_vidc/vidc_hal.c
@@ -1272,18 +1272,6 @@
 			sizeof(struct hfi_h264_db_control);
 		break;
 	}
-	case HAL_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF:
-	{
-		struct hfi_temporal_spatial_tradeoff *hfi;
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF;
-		hfi = (struct hfi_temporal_spatial_tradeoff *)
-			&pkt->rg_property_data[1];
-		hfi->ts_factor = ((struct hfi_temporal_spatial_tradeoff *)
-					pdata)->ts_factor;
-		pkt->size += sizeof(u32)  * 2;
-		break;
-	}
 	case HAL_PARAM_VENC_SESSION_QP:
 	{
 		struct hfi_quantization *hfi;
@@ -1505,8 +1493,6 @@
 		break;
 	case HAL_PARAM_VENC_H264_DEBLOCK_CONTROL:
 		break;
-	case HAL_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF:
-		break;
 	case HAL_PARAM_VENC_SESSION_QP:
 		break;
 	case HAL_CONFIG_VENC_INTRA_PERIOD:
diff --git a/drivers/media/video/msm_vidc/vidc_hal.h b/drivers/media/video/msm_vidc/vidc_hal.h
index a36d7f3..6c7e5df 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.h
+++ b/drivers/media/video/msm_vidc/vidc_hal.h
@@ -155,6 +155,7 @@
 #define HFI_EXTRADATA_VC1_SEQDISP			0x00000004
 #define HFI_EXTRADATA_TIMESTAMP				0x00000005
 #define HFI_EXTRADATA_S3D_FRAME_PACKING		0x00000006
+#define  HFI_EXTRADATA_EOSNAL_DETECTED      0x00000007
 #define HFI_EXTRADATA_MULTISLICE_INFO		0x7F100000
 #define HFI_EXTRADATA_NUM_CONCEALED_MB		0x7F100001
 #define HFI_EXTRADATA_INDEX					0x7F100002
@@ -164,6 +165,11 @@
 #define HFI_INDEX_EXTRADATA_DIGITAL_ZOOM	0x07000010
 #define HFI_INDEX_EXTRADATA_ASPECT_RATIO	0x7F100003
 
+struct HFI_INDEX_EXTRADATA_CONFIG_TYPE {
+	int enable;
+	u32 index_extra_data_id;
+};
+
 struct hfi_extradata_header {
 	u32 size;
 	u32 version;
@@ -196,7 +202,7 @@
 (HFI_PROPERTY_PARAM_OX_START + 0x004)
 #define HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG		\
 	(HFI_PROPERTY_PARAM_OX_START + 0x005)
-#define HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE		\
+#define  HFI_PROPERTY_PARAM_INDEX_EXTRADATA             \
 	(HFI_PROPERTY_PARAM_OX_START + 0x006)
 #define HFI_PROPERTY_PARAM_DIVX_FORMAT					\
 	(HFI_PROPERTY_PARAM_OX_START + 0x007)
@@ -244,6 +250,10 @@
 
 #define HFI_PROPERTY_PARAM_VENC_OX_START				\
 	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x5000)
+#define  HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO       \
+	(HFI_PROPERTY_PARAM_VENC_OX_START + 0x001)
+#define  HFI_PROPERTY_PARAM_VENC_H264_IDR_S3D_FRAME_PACKING_NAL \
+	(HFI_PROPERTY_PARAM_VENC_OX_START + 0x002)
 #define HFI_PROPERTY_CONFIG_VENC_OX_START				\
 	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x6000)
 
@@ -285,10 +295,6 @@
 	u8 rg_data[1];
 };
 
-struct hfi_seq_header_info {
-	u32 max_header_len;
-};
-
 struct hfi_enable_picture {
 	u32 picture_type;
 };
@@ -861,6 +867,14 @@
 	int dev_count;
 };
 
+struct hfi_index_extradata_aspect_ratio_payload {
+	u32 size;
+	u32 version;
+	u32 port_index;
+	u32 saspect_width;
+	u32  saspect_height;
+};
+
 extern struct hal_device_data hal_ctxt;
 
 int vidc_hal_iface_msgq_read(struct hal_device *device, void *pkt);
diff --git a/drivers/media/video/msm_vidc/vidc_hal_helper.h b/drivers/media/video/msm_vidc/vidc_hal_helper.h
index d4e2619..43995eb 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_helper.h
+++ b/drivers/media/video/msm_vidc/vidc_hal_helper.h
@@ -68,8 +68,8 @@
 
 #define HFI_VIDEO_DOMAIN_ENCODER	(HFI_COMMON_BASE + 0x1)
 #define HFI_VIDEO_DOMAIN_DECODER	(HFI_COMMON_BASE + 0x2)
-#define HFI_VIDEO_DOMAIN_VPE		(HFI_COMMON_BASE + 0x3)
-#define HFI_VIDEO_DOMAIN_MBI		(HFI_COMMON_BASE + 0x4)
+#define HFI_VIDEO_DOMAIN_VPE		(HFI_COMMON_BASE + 0x4)
+#define HFI_VIDEO_DOMAIN_MBI		(HFI_COMMON_BASE + 0x8)
 
 #define HFI_DOMAIN_BASE_COMMON		(HFI_COMMON_BASE + 0)
 #define HFI_DOMAIN_BASE_VDEC		(HFI_COMMON_BASE + 0x01000000)
@@ -131,6 +131,7 @@
 #define HFI_H264_PROFILE_STEREO_HIGH		0x00000008
 #define HFI_H264_PROFILE_MULTIVIEW_HIGH		0x00000010
 #define HFI_H264_PROFILE_CONSTRAINED_HIGH	0x00000020
+#define  HFI_H264_PROFILE_CONSTRAINED_BASE  0x00000040
 
 #define HFI_H264_LEVEL_1					0x00000001
 #define HFI_H264_LEVEL_1b					0x00000002
@@ -261,6 +262,10 @@
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x00B)
 #define HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT				\
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x00C)
+#define  HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE        \
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x00D)
+#define  HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED            \
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x00E)
 
 #define HFI_PROPERTY_CONFIG_COMMON_START				\
 	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x2000)
@@ -271,6 +276,8 @@
 	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x3000)
 #define HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM				\
 	(HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x001)
+#define  HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR              \
+	(HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x002)
 
 #define HFI_PROPERTY_CONFIG_VDEC_COMMON_START				\
 	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x4000)
@@ -285,15 +292,13 @@
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x003)
 #define HFI_PROPERTY_PARAM_VENC_RATE_CONTROL				\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x004)
-#define HFI_PROPERTY_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF	\
+#define  HFI_PROPERTY_PARAM_VENC_H264_PICORDER_CNT_TYPE     \
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x005)
-#define HFI_PROPERTY_PARAM_VENC_QUALITY_VS_SPEED			\
-	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x010)
 #define HFI_PROPERTY_PARAM_VENC_SESSION_QP				\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x006)
 #define HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION			\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x007)
-#define HFI_PROPERTY_PARAM_VENC_MPEG4_DATA_PARTITIONING		\
+#define  HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE           \
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x008)
 #define HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION		\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x009)
@@ -301,22 +306,26 @@
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00A)
 #define HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION		\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00B)
-#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO			\
+#define  HFI_PROPERTY_PARAM_VENC_OPEN_GOP                   \
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00C)
 #define HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH				\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00D)
 #define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL			\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00E)
-#define HFI_PROPERTY_PARAM_VENC_VBVBUFFER_SIZE				\
+#define  HFI_PROPERTY_PARAM_VENC_VBV_HRD_BUF_SIZE           \
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00F)
+#define  HFI_PROPERTY_PARAM_VENC_QUALITY_VS_SPEED           \
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x010)
 #define HFI_PROPERTY_PARAM_VENC_MPEG4_QPEL				\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x011)
 #define HFI_PROPERTY_PARAM_VENC_ADVANCED				\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x012)
 #define HFI_PROPERTY_PARAM_VENC_SYNC_FRAME_SEQUENCE_HEADER	\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x013)
-#define HFI_PROPERTY_PARAM_VENC_H264_IDR_S3D_FRAME_PACKING_NAL	\
+#define  HFI_PROPERTY_PARAM_VENC_H264_SPS_ID                \
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x014)
+#define  HFI_PROPERTY_PARAM_VENC_H264_PPS_ID               \
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x015)
 
 #define HFI_PROPERTY_CONFIG_VENC_COMMON_START				\
 	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000)
@@ -328,7 +337,7 @@
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x003)
 #define HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME			\
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x004)
-#define HFI_PROPERTY_CONFIG_VENC_TIMESTAMP_SCALE			\
+#define  HFI_PROPERTY_CONFIG_VENC_SLICE_SIZE                \
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x005)
 #define HFI_PROPERTY_CONFIG_VENC_FRAME_QP				\
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x006)
@@ -357,6 +366,8 @@
 #define HFI_CAPABILITY_SCALE_X				(HFI_COMMON_BASE + 0x6)
 #define HFI_CAPABILITY_SCALE_Y				(HFI_COMMON_BASE + 0x7)
 #define HFI_CAPABILITY_BITRATE				(HFI_COMMON_BASE + 0x8)
+#define  HFI_CAPABILITY_BFRAME				(HFI_COMMON_BASE + 0x9)
+#define  HFI_CAPABILITY_HIERARCHICAL_P_LAYERS	(HFI_COMMON_BASE + 0x10)
 
 struct hfi_capability_supported {
 	u32 capability_type;
@@ -433,10 +444,6 @@
 	u32 bframes;
 };
 
-struct hfi_timestamp_scale {
-	u32 time_stamp_scale;
-};
-
 struct hfi_mpeg4_header_extension {
 	u32 header_extension;
 };
@@ -492,6 +499,10 @@
 	struct hfi_profile_level rg_profile_level[1];
 };
 
+struct hfi_quality_vs_speed {
+	u32 quality_vs_speed;
+};
+
 struct hfi_quantization {
 	u32 qp_i;
 	u32 qp_p;
@@ -499,8 +510,10 @@
 	u32 layer_id;
 };
 
-struct hfi_temporal_spatial_tradeoff {
-	u32 ts_factor;
+struct hfi_quantization_range {
+	u32 min_qp;
+	u32 max_qp;
+	u32 layer_id;
 };
 
 struct hfi_frame_size {
@@ -605,6 +618,8 @@
 	u8 pipe2d;
 	u8 hw_mode;
 	u8 low_delay_enforce;
+	u8 worker_vppsg_delay;
+	int close_gop;
 	int h264_constrain_intra_pred;
 	int h264_transform_8x8_flag;
 	int mpeg4_qpel_enable;
@@ -613,6 +628,9 @@
 	u8 vpp_info_packet_mode;
 	u8 ref_tile_mode;
 	u8 bitstream_flush_mode;
+	u32 vppsg_vspap_fb_sync_delay;
+	u32 rc_initial_delay;
+	u32 peak_bitrate_constraint;
 	u32 ds_display_frame_width;
 	u32 ds_display_frame_height;
 	u32 perf_tune_param_ptr;
@@ -624,6 +642,19 @@
 	u32 h264_num_ref_frames;
 };
 
+struct hfi_vbv_hrd_bufsize {
+	u32 buffer_size;
+};
+
+struct hfi_codec_mask_supported {
+	u32 codecs;
+	u32 video_domains;
+};
+
+struct hfi_seq_header_info {
+	u32 max_hader_len;
+};
+
 #define HFI_CMD_SYS_COMMON_START			\
 	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000)
 #define HFI_CMD_SYS_INIT		(HFI_CMD_SYS_COMMON_START + 0x001)
diff --git a/drivers/media/video/vcap_v4l2.c b/drivers/media/video/vcap_v4l2.c
index 4f7c585..98e2fd9 100644
--- a/drivers/media/video/vcap_v4l2.c
+++ b/drivers/media/video/vcap_v4l2.c
@@ -1116,6 +1116,7 @@
 	unsigned long flags;
 	int rc;
 	unsigned long rate;
+	long rate_rc;
 
 	dprintk(3, "In Stream ON\n");
 	if (determine_mode(c_data) != c_data->op_mode) {
@@ -1142,12 +1143,13 @@
 		}
 
 		rate = c_data->vc_format.clk_freq;
-		rate = clk_round_rate(dev->vcap_clk, rate);
-		if (rate <= 0) {
+		rate_rc = clk_round_rate(dev->vcap_clk, rate);
+		if (rate_rc <= 0) {
 			pr_err("%s: Failed core rnd_rate\n", __func__);
 			rc = -EINVAL;
 			goto free_res;
 		}
+		rate = (unsigned long)rate_rc;
 		rc = clk_set_rate(dev->vcap_clk, rate);
 		if (rc < 0)
 			goto free_res;
@@ -1171,6 +1173,7 @@
 			goto free_res;
 
 		config_vc_format(c_data);
+		c_data->streaming = 1;
 		rc = vb2_streamon(&c_data->vc_vidq, i);
 		if (rc < 0)
 			goto free_res;
@@ -1187,12 +1190,13 @@
 		c_data->dev->vp_client = c_data;
 
 		rate = 160000000;
-		rate = clk_round_rate(dev->vcap_clk, rate);
-		if (rate <= 0) {
+		rate_rc = clk_round_rate(dev->vcap_clk, rate);
+		if (rate_rc <= 0) {
 			pr_err("%s: Failed core rnd_rate\n", __func__);
 			rc = -EINVAL;
 			goto free_res;
 		}
+		rate = (unsigned long)rate_rc;
 		rc = clk_set_rate(dev->vcap_clk, rate);
 		if (rc < 0)
 			goto free_res;
@@ -1255,12 +1259,13 @@
 		}
 
 		rate = c_data->vc_format.clk_freq;
-		rate = clk_round_rate(dev->vcap_clk, rate);
-		if (rate <= 0) {
+		rate_rc = clk_round_rate(dev->vcap_clk, rate);
+		if (rate_rc <= 0) {
 			pr_err("%s: Failed core rnd_rate\n", __func__);
 			rc = -EINVAL;
 			goto free_res;
 		}
+		rate = (unsigned long)rate_rc;
 		rc = clk_set_rate(dev->vcap_clk, rate);
 		if (rc < 0)
 			goto free_res;
@@ -1373,13 +1378,11 @@
 	return 0;
 }
 
-static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+int streamoff_work(struct vcap_client_data *c_data)
 {
-	struct vcap_client_data *c_data = to_client_data(file->private_data);
 	struct vcap_dev *dev = c_data->dev;
 	unsigned long flags;
 	int rc;
-
 	switch (c_data->op_mode) {
 	case VC_VCAP_OP:
 		if (c_data != dev->vc_client) {
@@ -1395,9 +1398,12 @@
 		}
 		dev->vc_resource = 0;
 		spin_unlock_irqrestore(&dev->dev_slock, flags);
-		rc = vb2_streamoff(&c_data->vc_vidq, i);
-		if (rc >= 0)
+		rc = vb2_streamoff(&c_data->vc_vidq,
+				V4L2_BUF_TYPE_VIDEO_CAPTURE);
+		if (rc >= 0) {
+			c_data->streaming = 0;
 			atomic_set(&c_data->dev->vc_enabled, 0);
+		}
 		return rc;
 	case VP_VCAP_OP:
 		if (c_data != dev->vp_client) {
@@ -1490,7 +1496,12 @@
 		pr_err("VCAP Error: %s: Unknown Operation mode", __func__);
 		return -ENOTRECOVERABLE;
 	}
-	return 0;
+}
+
+static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+	struct vcap_client_data *c_data = to_client_data(file->private_data);
+	return streamoff_work(c_data);
 }
 
 static int vidioc_subscribe_event(struct v4l2_fh *fh,
@@ -1646,6 +1657,9 @@
 	if (c_data == NULL)
 		return 0;
 
+	if (c_data->streaming)
+		streamoff_work(c_data);
+
 	spin_lock_irqsave(&dev->dev_slock, flags);
 	atomic_dec(&dev->open_clients);
 	ret = atomic_read(&dev->open_clients);
diff --git a/drivers/media/video/vcap_vc.h b/drivers/media/video/vcap_vc.h
index 57d13cd..792fb14 100644
--- a/drivers/media/video/vcap_vc.h
+++ b/drivers/media/video/vcap_vc.h
@@ -69,11 +69,6 @@
 
 #define VC_BUFFER_WRITTEN (0x3 << 1)
 
-struct vc_reg_data {
-	unsigned data;
-	unsigned addr;
-};
-
 int vc_start_capture(struct vcap_client_data *c_data);
 int vc_hw_kick_off(struct vcap_client_data *c_data);
 void vc_stop_capture(struct vcap_client_data *c_data);
diff --git a/drivers/media/video/vcap_vp.c b/drivers/media/video/vcap_vp.c
index db38902..be1b4ff 100644
--- a/drivers/media/video/vcap_vp.c
+++ b/drivers/media/video/vcap_vp.c
@@ -180,12 +180,13 @@
 		return;
 
 	vp_act = &dev->vp_client->vid_vp_action;
-	irq = vp_work->irq;
 
 	rc = readl_relaxed(VCAP_OFFSET(0x048));
 	while (!(rc & 0x00000100))
 		rc = readl_relaxed(VCAP_OFFSET(0x048));
 
+	irq = readl_relaxed(VCAP_VP_INT_STATUS);
+
 	writel_relaxed(0x00000000, VCAP_VP_BAL_VMOTION_STATE);
 	writel_relaxed(0x40000000, VCAP_VP_REDUCT_AVG_MOTION2);
 
@@ -283,7 +284,7 @@
 	}
 
 	dprintk(1, "%s: irq=0x%08x\n", __func__, irq);
-	if (!(irq & VP_PIC_DONE)) {
+	if (!(irq & (VP_PIC_DONE || VP_MODE_CHANGE))) {
 		writel_relaxed(irq, VCAP_VP_INT_CLEAR);
 		pr_err("VP IRQ shows some error\n");
 		return IRQ_HANDLED;
@@ -307,7 +308,6 @@
 
 	INIT_WORK(&dev->vp_work.work, vp_wq_fnc);
 	dev->vp_work.cd = c_data;
-	dev->vp_work.irq = irq;
 	rc = queue_work(dev->vcap_wq, &dev->vp_work.work);
 
 	disable_irq_nosync(dev->vpirq->start);
@@ -411,7 +411,7 @@
 	void *buf;
 
 	if (!c_data->vid_vp_action.bufMotion) {
-		dprintk(1, "Motion buffer has not been created");
+		pr_err("Motion buffer has not been created");
 		return;
 	}
 
@@ -556,7 +556,7 @@
 	if (c_data->vp_out_fmt.pixfmt == V4L2_PIX_FMT_NV16)
 		chroma_fmt = 1;
 
-	writel_relaxed((c_data->vp_in_fmt.width / 16) << 20 |
+	writel_relaxed((c_data->vp_out_fmt.width / 16) << 20 |
 			chroma_fmt << 11 | 0x1 << 4, VCAP_VP_OUT_CONFIG);
 
 	/* Enable Interrupt */
diff --git a/drivers/media/video/vcap_vp.h b/drivers/media/video/vcap_vp.h
index 47ad8d4..5415e54 100644
--- a/drivers/media/video/vcap_vp.h
+++ b/drivers/media/video/vcap_vp.h
@@ -89,6 +89,7 @@
 #define VCAP_VP_NR_T2_C_BASE_ADDR (VCAP_BASE + 0x4B8)
 
 #define VP_PIC_DONE (0x1 << 0)
+#define VP_MODE_CHANGE (0x1 << 8)
 
 irqreturn_t vp_handler(struct vcap_dev *dev);
 int config_vp_format(struct vcap_client_data *c_data);
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 0c3d4ad..d75cac4 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -60,4 +60,11 @@
           devices support Pulse Width Modulation output with user generated
           patterns. They share a lookup table with size of 64 entries.
 
+config QPNP_POWER_ON
+	tristate "QPNP PMIC POWER-ON Driver"
+	depends on OF_SPMI && SPMI && MSM_QPNP_INT
+	help
+	  This driver supports the power-on functionality on Qualcomm
+	  PNP PMIC. It currently supports reporting the change in status of
+	  the KPDPWR_N line (connected to the power-key).
 endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index 6deb6ee..2b6b806 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -5,3 +5,4 @@
 obj-$(CONFIG_USB_BAM) += usb_bam.o
 obj-$(CONFIG_SPS) += sps/
 obj-$(CONFIG_QPNP_PWM) += qpnp-pwm.o
+obj-$(CONFIG_QPNP_POWER_ON) += qpnp-power-on.o
diff --git a/drivers/platform/msm/qpnp-power-on.c b/drivers/platform/msm/qpnp-power-on.c
new file mode 100644
index 0000000..d8bb884
--- /dev/null
+++ b/drivers/platform/msm/qpnp-power-on.c
@@ -0,0 +1,241 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+
+#define QPNP_PON_RT_STS(base)		(base + 0x10)
+#define QPNP_PON_PULL_CTL(base)		(base + 0x70)
+#define QPNP_PON_DBC_CTL(base)		(base + 0x71)
+
+#define QPNP_PON_CNTL_PULL_UP		BIT(1)
+#define QPNP_PON_CNTL_TRIG_DELAY_MASK	(0x7)
+#define QPNP_PON_KPDPWR_N_SET		BIT(0)
+
+struct qpnp_pon {
+	struct spmi_device *spmi;
+	struct input_dev *pon_input;
+	u32 key_status_irq;
+	u16 base;
+};
+
+static irqreturn_t qpnp_pon_key_irq(int irq, void *_pon)
+{
+	u8 pon_rt_sts;
+	int rc;
+	struct qpnp_pon *pon = _pon;
+
+	rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+				QPNP_PON_RT_STS(pon->base), &pon_rt_sts, 1);
+	if (rc) {
+		dev_err(&pon->spmi->dev, "Unable to read PON RT status\n");
+		return IRQ_HANDLED;
+	}
+
+	input_report_key(pon->pon_input, KEY_POWER,
+				!(pon_rt_sts & QPNP_PON_KPDPWR_N_SET));
+	input_sync(pon->pon_input);
+
+	return IRQ_HANDLED;
+}
+
+static int __devinit qpnp_pon_key_init(struct qpnp_pon *pon)
+{
+	int rc = 0;
+	u32 pullup, delay;
+	u8 pon_cntl;
+
+	pon->key_status_irq = spmi_get_irq_byname(pon->spmi,
+						NULL, "power-key");
+	if (pon->key_status_irq < 0) {
+		dev_err(&pon->spmi->dev, "Unable to get pon key irq\n");
+		return -ENXIO;
+	}
+
+	rc = of_property_read_u32(pon->spmi->dev.of_node,
+					"qcom,pon-key-dbc-delay", &delay);
+	if (rc) {
+		delay = (delay << 6) / USEC_PER_SEC;
+		delay = ilog2(delay);
+
+		rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+				QPNP_PON_DBC_CTL(pon->base), &pon_cntl, 1);
+		if (rc) {
+			dev_err(&pon->spmi->dev, "spmi read addr=%x failed\n",
+						QPNP_PON_DBC_CTL(pon->base));
+			return rc;
+		}
+		pon_cntl &= ~QPNP_PON_CNTL_TRIG_DELAY_MASK;
+		pon_cntl |= (delay & QPNP_PON_CNTL_TRIG_DELAY_MASK);
+		rc = spmi_ext_register_writel(pon->spmi->ctrl, pon->spmi->sid,
+				QPNP_PON_DBC_CTL(pon->base), &pon_cntl, 1);
+		if (rc) {
+			dev_err(&pon->spmi->dev, "spmi write addre=%x failed\n",
+						QPNP_PON_DBC_CTL(pon->base));
+			return rc;
+		}
+	}
+
+	rc = of_property_read_u32(pon->spmi->dev.of_node,
+				"qcom,pon-key-pull-up", &pullup);
+	if (!rc) {
+		rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+				QPNP_PON_PULL_CTL(pon->base), &pon_cntl, 1);
+		if (rc) {
+			dev_err(&pon->spmi->dev, "spmi read addr=%x failed\n",
+						QPNP_PON_PULL_CTL(pon->base));
+			return rc;
+		}
+		if (pullup)
+			pon_cntl |= QPNP_PON_CNTL_PULL_UP;
+		else
+			pon_cntl &= ~QPNP_PON_CNTL_PULL_UP;
+
+		rc = spmi_ext_register_writel(pon->spmi->ctrl, pon->spmi->sid,
+				QPNP_PON_PULL_CTL(pon->base), &pon_cntl, 1);
+		if (rc) {
+			dev_err(&pon->spmi->dev, "spmi write addr=%x failed\n",
+						QPNP_PON_PULL_CTL(pon->base));
+			return rc;
+		}
+	}
+
+	pon->pon_input = input_allocate_device();
+	if (!pon->pon_input) {
+		dev_err(&pon->spmi->dev, "Can't allocate pon button\n");
+		return -ENOMEM;
+	}
+
+	input_set_capability(pon->pon_input, EV_KEY, KEY_POWER);
+	pon->pon_input->name = "qpnp_pon_key";
+	pon->pon_input->phys = "qpnp_pon_key/input0";
+
+	rc = input_register_device(pon->pon_input);
+	if (rc) {
+		dev_err(&pon->spmi->dev, "Can't register pon key: %d\n", rc);
+		goto free_input_dev;
+	}
+
+	rc = request_any_context_irq(pon->key_status_irq, qpnp_pon_key_irq,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+						"qpnp_pon_key_status", pon);
+	if (rc < 0) {
+		dev_err(&pon->spmi->dev, "Can't request %d IRQ for pon: %d\n",
+						pon->key_status_irq, rc);
+		goto unreg_input_dev;
+	}
+
+	device_init_wakeup(&pon->spmi->dev, 1);
+	enable_irq_wake(pon->key_status_irq);
+
+	return rc;
+
+unreg_input_dev:
+	input_unregister_device(pon->pon_input);
+free_input_dev:
+	input_free_device(pon->pon_input);
+	return rc;
+}
+
+static int __devinit qpnp_pon_probe(struct spmi_device *spmi)
+{
+	struct qpnp_pon *pon;
+	struct resource *pon_resource;
+	u32 pon_key_enable = 0;
+	int rc = 0;
+
+	pon = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_pon),
+							GFP_KERNEL);
+	if (!pon) {
+		dev_err(&spmi->dev, "Can't allocate qpnp_pon\n");
+		return -ENOMEM;
+	}
+
+	pon->spmi = spmi;
+
+	pon_resource = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
+	if (!pon_resource) {
+		dev_err(&spmi->dev, "Unable to get PON base address\n");
+		return -ENXIO;
+	}
+	pon->base = pon_resource->start;
+
+	dev_set_drvdata(&spmi->dev, pon);
+
+	/* pon-key-enable property must be set to register pon key */
+	rc = of_property_read_u32(spmi->dev.of_node, "qcom,pon-key-enable",
+							&pon_key_enable);
+	if (rc && rc != -EINVAL) {
+		dev_err(&spmi->dev,
+			"Error reading 'pon-key-enable' property (%d)", rc);
+		return rc;
+	}
+
+	if (pon_key_enable) {
+		rc = qpnp_pon_key_init(pon);
+		if (rc < 0) {
+			dev_err(&spmi->dev, "Failed to register pon-key\n");
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int qpnp_pon_remove(struct spmi_device *spmi)
+{
+	struct qpnp_pon *pon = dev_get_drvdata(&spmi->dev);
+
+	if (pon->pon_input) {
+		free_irq(pon->key_status_irq, pon);
+		input_unregister_device(pon->pon_input);
+	}
+
+	return 0;
+}
+
+static struct of_device_id spmi_match_table[] = {
+	{	.compatible = "qcom,qpnp-power-on",
+	}
+};
+
+static struct spmi_driver qpnp_pon_driver = {
+	.driver		= {
+		.name	= "qcom,qpnp-power-on",
+		.of_match_table = spmi_match_table,
+	},
+	.probe		= qpnp_pon_probe,
+	.remove		= __devexit_p(qpnp_pon_remove),
+};
+
+static int __init qpnp_pon_init(void)
+{
+	return spmi_driver_register(&qpnp_pon_driver);
+}
+module_init(qpnp_pon_init);
+
+static void __exit qpnp_pon_exit(void)
+{
+	return spmi_driver_unregister(&qpnp_pon_driver);
+}
+module_exit(qpnp_pon_exit);
+
+MODULE_DESCRIPTION("QPNP PMIC POWER-ON driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index bf30c0b..68500a3 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -302,7 +302,7 @@
 
 		tty = tty_port_tty_get(&port->port);
 		if (!tty)
-			continue;
+			break;
 
 		list_del_init(&urb->urb_list);
 
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index 1cfbb22..3204552 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -2488,6 +2488,9 @@
 		mfd->cursor_update = mdp_hw_cursor_update;
 		mfd->dma_fnc = mdp4_dtv_overlay;
 		mfd->dma = &dma_e_data;
+		mfd->do_histogram = mdp_do_histogram;
+		mfd->start_histogram = mdp_histogram_start;
+		mfd->stop_histogram = mdp_histogram_stop;
 		mdp4_display_intf_sel(EXTERNAL_INTF_SEL, DTV_INTF);
 		break;
 #endif
diff --git a/drivers/video/msm/mhl/mhl_8334.c b/drivers/video/msm/mhl/mhl_8334.c
index d6e3f6f..646dd29 100644
--- a/drivers/video/msm/mhl/mhl_8334.c
+++ b/drivers/video/msm/mhl/mhl_8334.c
@@ -30,11 +30,11 @@
 #include <linux/regulator/consumer.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
+#include <linux/mhl_8334.h>
 
 #include "msm_fb.h"
 #include "external_common.h"
 #include "hdmi_msm.h"
-#include "mhl_8334.h"
 #include "mhl_i2c_utils.h"
 
 
@@ -53,6 +53,8 @@
 static void release_usb_switch_open(void);
 static void switch_mode(enum mhl_st_type to_mode);
 static irqreturn_t mhl_tx_isr(int irq, void *dev_id);
+void (*notify_usb_online)(int online);
+static void mhl_drive_hpd(uint8_t to_state);
 
 static struct i2c_driver mhl_sii_i2c_driver = {
 	.driver = {
@@ -227,6 +229,58 @@
 	return true;
 }
 
+
+/*  USB_HANDSHAKING FUNCTIONS */
+
+int mhl_device_discovery(const char *name, int *result)
+
+{
+	int timeout ;
+	mhl_i2c_reg_write(TX_PAGE_3, 0x0010, 0x27);
+	msleep(50);
+	if (mhl_msm_state->cur_state == POWER_STATE_D3) {
+		/* give MHL driver chance to handle RGND interrupt */
+		INIT_COMPLETION(mhl_msm_state->rgnd_done);
+		timeout = wait_for_completion_interruptible_timeout
+			(&mhl_msm_state->rgnd_done, HZ/2);
+		if (!timeout) {
+			/* most likely nothing plugged in USB */
+			/* USB HOST connected or already in USB mode */
+			pr_debug("Timedout Returning from discovery mode\n");
+			*result = MHL_DISCOVERY_RESULT_USB;
+			return 0;
+		}
+		*result = mhl_msm_state->mhl_mode ?
+			MHL_DISCOVERY_RESULT_MHL : MHL_DISCOVERY_RESULT_USB;
+	} else
+		/* not in D3. already in MHL mode */
+		*result = MHL_DISCOVERY_RESULT_MHL;
+
+	return 0;
+}
+EXPORT_SYMBOL(mhl_device_discovery);
+
+int mhl_register_callback(const char *name, void (*callback)(int online))
+{
+	pr_debug("%s\n", __func__);
+	if (!callback)
+		return -EINVAL;
+	if (!notify_usb_online)
+		notify_usb_online = callback;
+	return 0;
+}
+EXPORT_SYMBOL(mhl_register_callback);
+
+int mhl_unregister_callback(const char *name)
+{
+	pr_debug("%s\n", __func__);
+	if (notify_usb_online)
+		notify_usb_online = NULL;
+	return 0;
+}
+EXPORT_SYMBOL(mhl_unregister_callback);
+
+
 static void cbus_reset(void)
 {
 	uint8_t i;
@@ -240,7 +294,7 @@
 	/*
 	 * REG_INTR1 and REG_INTR4
 	 */
-	mhl_i2c_reg_write(TX_PAGE_L0, 0x0075, BIT6 | BIT5);
+	mhl_i2c_reg_write(TX_PAGE_L0, 0x0075, BIT6);
 	mhl_i2c_reg_write(TX_PAGE_3, 0x0022,
 		BIT0 | BIT2 | BIT3 | BIT4 | BIT5 | BIT6);
 	/* REG5 */
@@ -340,7 +394,7 @@
 /*
  * Configure the initial reg settings
  */
-static void mhl_init_reg_settings(void)
+static void mhl_init_reg_settings(bool mhl_disc_en)
 {
 
 	/*
@@ -419,15 +473,19 @@
 	/* Pull-up resistance off for IDLE state */
 	mhl_i2c_reg_write(TX_PAGE_3, 0x0013, 0x8C);
 	/* Enable CBUS Discovery */
-	mhl_i2c_reg_write(TX_PAGE_3, 0x0010, 0x27);
+	if (mhl_disc_en)
+		/* Enable MHL Discovery */
+		mhl_i2c_reg_write(TX_PAGE_3, 0x0010, 0x27);
+	else
+		/* Disable MHL Discovery */
+		mhl_i2c_reg_write(TX_PAGE_3, 0x0010, 0x26);
 	mhl_i2c_reg_write(TX_PAGE_3, 0x0016, 0x20);
 	/* MHL CBUS Discovery - immediate comm.  */
 	mhl_i2c_reg_write(TX_PAGE_3, 0x0012, 0x86);
 	/* Do not force HPD to 0 during wake-up from D3 */
-	if (mhl_msm_state->cur_state != POWER_STATE_D3) {
-		mhl_i2c_reg_modify(TX_PAGE_3, 0x0020,
-			       BIT5 | BIT4, BIT4);
-	}
+	if (mhl_msm_state->cur_state != POWER_STATE_D0_MHL)
+		mhl_drive_hpd(HPD_DOWN);
+
 	/* Enable Auto Soft RESET */
 	mhl_i2c_reg_write(TX_PAGE_3, 0x0000, 0x084);
 	/* HDMI Transcode mode enable */
@@ -452,7 +510,10 @@
 	/* MHL spec requires a 100 ms wait here.  */
 	msleep(100);
 
-	mhl_init_reg_settings();
+	/*
+	 * Need to disable MHL discovery
+	 */
+	mhl_init_reg_settings(true);
 
 	/*
 	 * Power down the chip to the
@@ -563,6 +624,7 @@
 
 	/* MHL SII 8334 chip specific init */
 	mhl_chip_init();
+	init_completion(&mhl_msm_state->rgnd_done);
 	return 0;
 
 init_exit:
@@ -583,10 +645,9 @@
 	case POWER_STATE_D0_NO_MHL:
 		break;
 	case POWER_STATE_D0_MHL:
-		mhl_init_reg_settings();
-
+		mhl_init_reg_settings(true);
 		/* REG_DISC_CTRL1 */
-		mhl_i2c_reg_modify(TX_PAGE_3, 0x0010, BIT1, 0);
+		mhl_i2c_reg_modify(TX_PAGE_3, 0x0010, BIT1 | BIT0, BIT0);
 
 		/*
 		 * TPI_DEVICE_POWER_STATE_CTRL_REG
@@ -597,16 +658,15 @@
 	case POWER_STATE_D3:
 		if (mhl_msm_state->cur_state != POWER_STATE_D3) {
 			/* Force HPD to 0 when not in MHL mode.  */
-			mhl_i2c_reg_modify(TX_PAGE_3, 0x0020,
-				BIT5 | BIT4, BIT4);
-
+			mhl_drive_hpd(HPD_DOWN);
 			/*
 			 * Change TMDS termination to high impedance
 			 * on disconnection.
 			 */
 			mhl_i2c_reg_write(TX_PAGE_3, 0x0030, 0xD0);
-			mhl_i2c_reg_modify(TX_PAGE_L1, 0x003D,
-				BIT1 | BIT0, BIT0);
+			msleep(50);
+			mhl_i2c_reg_modify(TX_PAGE_3, 0x0010,
+				BIT1 | BIT0, BIT1);
 			spin_lock_irqsave(&mhl_state_lock, flags);
 			mhl_msm_state->cur_state = POWER_STATE_D3;
 			spin_unlock_irqrestore(&mhl_state_lock, flags);
@@ -619,6 +679,11 @@
 
 static void mhl_drive_hpd(uint8_t to_state)
 {
+	if (mhl_msm_state->cur_state != POWER_STATE_D0_MHL) {
+		pr_err("MHL: invalid state to ctrl HPD\n");
+		return;
+	}
+
 	pr_debug("%s: To state=[0x%x]\n", __func__, to_state);
 	if (to_state == HPD_UP) {
 		/*
@@ -644,6 +709,7 @@
 		 * Disable TMDS Output on REG_TMDS_CCTRL
 		 * Enable/Disable TMDS output (MHL TMDS output only)
 		 */
+		mhl_i2c_reg_modify(TX_PAGE_3, 0x20, BIT4 | BIT5, BIT4);
 		mhl_i2c_reg_modify(TX_PAGE_L0, 0x0080, BIT4, 0x00);
 	}
 	return;
@@ -682,20 +748,11 @@
 
 static void mhl_msm_disconnection(void)
 {
-	uint8_t reg;
-	/* Clear interrupts - REG INTR4 */
-	reg = mhl_i2c_reg_read(TX_PAGE_3, 0x0021);
-	mhl_i2c_reg_write(TX_PAGE_3, 0x0021, reg);
 	/*
 	 * MHL TX CTL1
 	 * Disabling Tx termination
 	 */
 	mhl_i2c_reg_write(TX_PAGE_3, 0x30, 0xD0);
-	/*
-	 * MSC REQUESTOR ABORT REASON
-	 * Clear CBUS_HPD status
-	 */
-	mhl_i2c_reg_modify(TX_PAGE_CBUS, 0x000D, BIT6, 0x00);
 	/* Change HPD line to drive it low */
 	mhl_drive_hpd(HPD_DOWN);
 	/* switch power state to D3 */
@@ -704,11 +761,11 @@
 }
 
 /*
- * If hardware detected a change in impedence and raised an INTR
- * We check the range of this impedence to infer if the connected
+ * If hardware detected a change in impedance and raised an INTR
+ * We check the range of this impedance to infer if the connected
  * device is MHL or USB and take appropriate actions.
  */
-static void mhl_msm_read_rgnd_int(void)
+static int  mhl_msm_read_rgnd_int(void)
 {
 	uint8_t rgnd_imp;
 
@@ -720,20 +777,27 @@
 	 * 10  - 1 kOHM ***(MHL)**** It's range 800 - 1200 OHM from MHL spec
 	 * 11  - short (USB)
 	 */
-	rgnd_imp = mhl_i2c_reg_read(TX_PAGE_3, 0x001C);
+	rgnd_imp = (mhl_i2c_reg_read(TX_PAGE_3, 0x001C) & (BIT1 | BIT0));
 	pr_debug("Imp Range read = %02X\n", (int)rgnd_imp);
 
-
 	if (0x02 == rgnd_imp) {
 		pr_debug("MHL: MHL DEVICE!!!\n");
+		mhl_i2c_reg_modify(TX_PAGE_3, 0x0018, BIT0, BIT0);
 		/*
 		 * Handling the MHL event in driver
 		 */
-		mhl_i2c_reg_modify(TX_PAGE_3, 0x0018, BIT0, BIT0);
+		mhl_msm_state->mhl_mode = TRUE;
+		if (notify_usb_online)
+			notify_usb_online(1);
 	} else {
 		pr_debug("MHL: NON-MHL DEVICE!!!\n");
+		mhl_msm_state->mhl_mode = FALSE;
 		mhl_i2c_reg_modify(TX_PAGE_3, 0x0018, BIT3, BIT3);
+		switch_mode(POWER_STATE_D3);
 	}
+	complete(&mhl_msm_state->rgnd_done);
+	return mhl_msm_state->mhl_mode ?
+		MHL_DISCOVERY_RESULT_MHL : MHL_DISCOVERY_RESULT_USB;
 }
 
 static void force_usb_switch_open(void)
@@ -756,7 +820,7 @@
 
 static void int_4_isr(void)
 {
-	uint8_t status;
+	uint8_t status, reg ;
 
 	/* INTR_STATUS4 */
 	status = mhl_i2c_reg_read(TX_PAGE_3, 0x0021);
@@ -767,7 +831,7 @@
 	 * do nothing.
 	 */
 	if ((0x00 == status) && (mhl_msm_state->cur_state == POWER_STATE_D3)) {
-		mhl_chip_init();
+		pr_debug("MHL: spurious interrupt\n");
 		return;
 	}
 	if (0xFF != status) {
@@ -816,8 +880,13 @@
 
 		if (status & BIT5) {
 			mhl_connect_api(false);
+			/* Clear interrupts - REG INTR4 */
+			reg = mhl_i2c_reg_read(TX_PAGE_3, 0x0021);
+			mhl_i2c_reg_write(TX_PAGE_3, 0x0021, reg);
 			mhl_msm_disconnection();
-			pr_debug("MHL Disconn Drv: INT4 Status = %02X\n",
+			if (notify_usb_online)
+				notify_usb_online(0);
+			pr_debug("MHL Disconnect Drv: INT4 Status = %02X\n",
 				(int)status);
 		}
 
@@ -971,6 +1040,122 @@
 	return;
 }
 
+static void clear_all_intrs(void)
+{
+	uint8_t regval = 0x00;
+	/*
+	* intr status debug
+	*/
+	pr_debug("********* EXITING ISR MASK CHECK ?? *************\n");
+	pr_debug("Drv: INT1 MASK = %02X\n",
+		(int) mhl_i2c_reg_read(TX_PAGE_L0, 0x0071));
+	pr_debug("Drv: INT3 MASK = %02X\n",
+		(int) mhl_i2c_reg_read(TX_PAGE_L0, 0x0077));
+	pr_debug("Drv: INT4 MASK = %02X\n",
+		(int) mhl_i2c_reg_read(TX_PAGE_3, 0x0021));
+	pr_debug("Drv: INT5 MASK = %02X\n",
+		(int) mhl_i2c_reg_read(TX_PAGE_3, 0x0023));
+	pr_debug("Drv: CBUS1 MASK = %02X\n",
+		(int) mhl_i2c_reg_read(TX_PAGE_CBUS, 0x0009));
+	pr_debug("Drv: CBUS2 MASK = %02X\n",
+		(int) mhl_i2c_reg_read(TX_PAGE_CBUS, 0x001F));
+	pr_debug("********* END OF ISR MASK CHECK *************\n");
+
+	pr_debug("********* EXITING IN ISR ?? *************\n");
+	regval = mhl_i2c_reg_read(TX_PAGE_L0, 0x0071);
+	pr_debug("Drv: INT1 Status = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_L0, 0x0071, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_L0, 0x0072);
+	pr_debug("Drv: INT2 Status = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_L0, 0x0072, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_L0, 0x0073);
+	pr_debug("Drv: INT3 Status = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_L0, 0x0073, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_3, 0x0021);
+	pr_debug("Drv: INT4 Status = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_3, 0x0021, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_3, 0x0023);
+	pr_debug("Drv: INT5 Status = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_3, 0x0023, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_CBUS, 0x0008);
+	pr_debug("Drv: cbusInt Status = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0008, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_CBUS, 0x001E);
+	pr_debug("Drv: CBUS INTR_2: %d\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x001E, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00A0);
+	pr_debug("Drv: A0 INT Set = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00A0, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00A1);
+	pr_debug("Drv: A1 INT Set = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00A1, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00A2);
+	pr_debug("Drv: A2 INT Set = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00A2, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00A3);
+	pr_debug("Drv: A3 INT Set = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00A3, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00B0);
+	pr_debug("Drv: B0 STATUS Set = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00B0, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00B1);
+	pr_debug("Drv: B1 STATUS Set = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00B1, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00B2);
+	pr_debug("Drv: B2 STATUS Set = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00B2, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00B3);
+	pr_debug("Drv: B3 STATUS Set = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00B3, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00E0);
+	pr_debug("Drv: E0 STATUS Set = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00E0, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00E1);
+	pr_debug("Drv: E1 STATUS Set = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00E1, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00E2);
+	pr_debug("Drv: E2 STATUS Set = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00E2, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00E3);
+	pr_debug("Drv: E3 STATUS Set = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00E3, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00F0);
+	pr_debug("Drv: F0 INT Set = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00F0, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00F1);
+	pr_debug("Drv: F1 INT Set = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00F1, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00F2);
+	pr_debug("Drv: F2 INT Set = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00F2, regval);
+
+	regval =  mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00F3);
+	pr_debug("Drv: F3 INT Set = %02X\n", (int)regval);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00F3, regval);
+	pr_debug("********* END OF EXITING IN ISR *************\n");
+}
+
 static irqreturn_t mhl_tx_isr(int irq, void *dev_id)
 {
 	/*
@@ -997,6 +1182,7 @@
 		mhl_cbus_isr();
 		int_1_isr();
 	}
+	clear_all_intrs();
 	return IRQ_HANDLED;
 }
 
diff --git a/drivers/video/msm/mhl/mhl_i2c_utils.c b/drivers/video/msm/mhl/mhl_i2c_utils.c
index aab6e02..ee069bb 100644
--- a/drivers/video/msm/mhl/mhl_i2c_utils.c
+++ b/drivers/video/msm/mhl/mhl_i2c_utils.c
@@ -11,9 +11,9 @@
  *
  */
 #include <linux/i2c.h>
+#include <linux/mhl_8334.h>
 
 #include "mhl_i2c_utils.h"
-#include "mhl_8334.h"
 
 uint8_t slave_addrs[MAX_PAGES] = {
 	DEV_PAGE_TPI_0    ,
diff --git a/drivers/video/msm/mhl/mhl_i2c_utils.h b/drivers/video/msm/mhl/mhl_i2c_utils.h
index 76498d4..5a2d199 100644
--- a/drivers/video/msm/mhl/mhl_i2c_utils.h
+++ b/drivers/video/msm/mhl/mhl_i2c_utils.h
@@ -16,8 +16,7 @@
 
 #include <linux/i2c.h>
 #include <linux/types.h>
-
-#include "mhl_defs.h"
+#include <linux/mhl_defs.h>
 
 /*
  * I2C command to the adapter to append
diff --git a/drivers/video/msm/mhl/mhl_8334.h b/include/linux/mhl_8334.h
similarity index 66%
rename from drivers/video/msm/mhl/mhl_8334.h
rename to include/linux/mhl_8334.h
index eba544a..1b19103 100644
--- a/drivers/video/msm/mhl/mhl_8334.h
+++ b/include/linux/mhl_8334.h
@@ -17,9 +17,8 @@
 #include <linux/types.h>
 #include <linux/platform_device.h>
 #include <mach/board.h>
-
-#include "mhl_devcap.h"
-#include "mhl_defs.h"
+#include <linux/mhl_devcap.h>
+#include <linux/mhl_defs.h>
 
 #define MHL_DEVICE_NAME "sii8334"
 #define MHL_DRIVER_NAME "sii8334"
@@ -27,12 +26,48 @@
 #define HPD_UP               1
 #define HPD_DOWN             0
 
+enum discovery_result_enum {
+	MHL_DISCOVERY_RESULT_USB = 0,
+	MHL_DISCOVERY_RESULT_MHL,
+};
+
+/* USB driver interface  */
+
+#ifdef CONFIG_FB_MSM_HDMI_MHL_8334
+ /*  mhl_device_discovery */
+extern int mhl_device_discovery(const char *name, int *result);
+
+/* - register|unregister MHL cable plug callback. */
+extern int mhl_register_callback
+	(const char *name, void (*callback)(int online));
+extern int mhl_unregister_callback(const char *name);
+#else
+static inline int mhl_device_discovery(const char *name, int *result)
+{
+	return -ENODEV;
+}
+
+static inline int
+	mhl_register_callback(const char *name, void (*callback)(int online))
+{
+	return -ENODEV;
+}
+
+static inline int mhl_unregister_callback(const char *name)
+{
+	return -ENODEV;
+}
+#endif
+
 struct mhl_msm_state_t {
 	struct i2c_client *i2c_client;
 	struct i2c_driver *i2c_driver;
 	uint8_t      cur_state;
 	uint8_t chip_rev_id;
 	struct msm_mhl_platform_data *mhl_data;
+	/* Device Discovery stuff */
+	int mhl_mode;
+	struct completion rgnd_done;
 };
 
 enum {
diff --git a/drivers/video/msm/mhl/mhl_defs.h b/include/linux/mhl_defs.h
similarity index 100%
rename from drivers/video/msm/mhl/mhl_defs.h
rename to include/linux/mhl_defs.h
diff --git a/drivers/video/msm/mhl/mhl_devcap.h b/include/linux/mhl_devcap.h
similarity index 100%
rename from drivers/video/msm/mhl/mhl_devcap.h
rename to include/linux/mhl_devcap.h
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index ed136ad..e3d59cd 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -119,6 +119,11 @@
 extern void early_init_devtree(void *);
 #else /* CONFIG_OF_FLATTREE */
 static inline void unflatten_device_tree(void) {}
+static inline void *of_get_flat_dt_prop(unsigned long node, const char *name,
+				 unsigned long *size) { return NULL; }
+
+static inline int of_flat_dt_is_compatible(unsigned long node,
+				const char *name) { return 0; }
 #endif /* CONFIG_OF_FLATTREE */
 
 #endif /* __ASSEMBLY__ */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index f5e1ffa..3e2f39b 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -1751,6 +1751,31 @@
 #define V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF (V4L2_CID_MPEG_MSM_VIDC_BASE+18)
 #define V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS (V4L2_CID_MPEG_MSM_VIDC_BASE+19)
 
+#define V4L2_CID_MPEG_VIDC_VIDEO_H263_PROFILE (V4L2_CID_MPEG_MSM_VIDC_BASE+20)
+enum v4l2_mpeg_vidc_video_h263_profile {
+	V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BASELINE = 0,
+	V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_H320CODING	= 1,
+	V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BACKWARDCOMPATIBLE = 2,
+	V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_ISWV2 = 3,
+	V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_ISWV3 = 4,
+	V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHCOMPRESSION = 5,
+	V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_INTERNET = 6,
+	V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_INTERLACE = 7,
+	V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHLATENCY = 8,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_H263_LEVEL (V4L2_CID_MPEG_MSM_VIDC_BASE+21)
+enum v4l2_mpeg_vidc_video_h263_level {
+	V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_1_0 = 0,
+	V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_2_0 = 1,
+	V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_3_0 = 2,
+	V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_4_0 = 3,
+	V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_4_5 = 4,
+	V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_5_0 = 5,
+	V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_6_0 = 6,
+	V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_7_0 = 7,
+};
+
 /*  Camera class control IDs */
 #define V4L2_CID_CAMERA_CLASS_BASE 	(V4L2_CTRL_CLASS_CAMERA | 0x900)
 #define V4L2_CID_CAMERA_CLASS 		(V4L2_CTRL_CLASS_CAMERA | 1)
diff --git a/include/media/vcap_v4l2.h b/include/media/vcap_v4l2.h
index e1d69d5..f7d1e6b 100644
--- a/include/media/vcap_v4l2.h
+++ b/include/media/vcap_v4l2.h
@@ -126,7 +126,6 @@
 struct vp_work_t {
 	struct work_struct work;
 	struct vcap_client_data *cd;
-	uint32_t irq;
 };
 
 struct vcap_dev {
@@ -221,13 +220,6 @@
 extern struct vcap_hacked_vals hacked_buf[];
 
 #endif
-int free_ion_handle(struct vcap_dev *dev, struct vb2_queue *q,
-					 struct v4l2_buffer *b);
-
-int get_phys_addr(struct vcap_dev *dev, struct vb2_queue *q,
-				  struct v4l2_buffer *b);
-
 int vcvp_qbuf(struct vb2_queue *q, struct v4l2_buffer *b);
 int vcvp_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b);
-
 #endif
diff --git a/sound/soc/msm/msm-pcm-routing.c b/sound/soc/msm/msm-pcm-routing.c
index 7e8e282..c0c679d 100644
--- a/sound/soc/msm/msm-pcm-routing.c
+++ b/sound/soc/msm/msm-pcm-routing.c
@@ -65,6 +65,10 @@
 static const DECLARE_TLV_DB_LINEAR(multimedia2_rx_vol_gain, 0,
 			INT_RX_VOL_MAX_STEPS);
 
+static int msm_route_multimedia5_vol_control;
+static const DECLARE_TLV_DB_LINEAR(multimedia5_rx_vol_gain, 0,
+			INT_RX_VOL_MAX_STEPS);
+
 static int msm_route_compressed_vol_control;
 static const DECLARE_TLV_DB_LINEAR(compressed_rx_vol_gain, 0,
 			INT_RX_VOL_MAX_STEPS);
@@ -798,6 +802,25 @@
 	return 0;
 }
 
+static int msm_routing_get_multimedia5_vol_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+
+	ucontrol->value.integer.value[0] = msm_route_multimedia5_vol_control;
+	return 0;
+}
+
+static int msm_routing_set_multimedia5_vol_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+
+	if (!multi_ch_pcm_set_volume(ucontrol->value.integer.value[0]))
+		msm_route_multimedia5_vol_control =
+			ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
 static int msm_routing_get_compressed_vol_mixer(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_value *ucontrol)
 {
@@ -1737,6 +1760,12 @@
 	msm_routing_set_multimedia2_vol_mixer, multimedia2_rx_vol_gain),
 };
 
+static const struct snd_kcontrol_new multimedia5_vol_mixer_controls[] = {
+	SOC_SINGLE_EXT_TLV("HIFI3 RX Volume", SND_SOC_NOPM, 0,
+	INT_RX_VOL_GAIN, 0, msm_routing_get_multimedia5_vol_mixer,
+	msm_routing_set_multimedia5_vol_mixer, multimedia5_rx_vol_gain),
+};
+
 static const struct snd_kcontrol_new compressed_vol_mixer_controls[] = {
 	SOC_SINGLE_EXT_TLV("COMPRESSED RX Volume", SND_SOC_NOPM, 0,
 	INT_RX_VOL_GAIN, 0, msm_routing_get_compressed_vol_mixer,
@@ -2645,6 +2674,10 @@
 			ARRAY_SIZE(multimedia2_vol_mixer_controls));
 
 	snd_soc_add_platform_controls(platform,
+				multimedia5_vol_mixer_controls,
+			ARRAY_SIZE(multimedia5_vol_mixer_controls));
+
+	snd_soc_add_platform_controls(platform,
 				compressed_vol_mixer_controls,
 			ARRAY_SIZE(compressed_vol_mixer_controls));