Merge "ASoc: msm: Add support for VoLTE"
diff --git a/Documentation/ABI/testing/sysfs-class-devfreq b/Documentation/ABI/testing/sysfs-class-devfreq
index 23d78b5..0ba6ea2 100644
--- a/Documentation/ABI/testing/sysfs-class-devfreq
+++ b/Documentation/ABI/testing/sysfs-class-devfreq
@@ -11,7 +11,7 @@
 Date:		September 2011
 Contact:	MyungJoo Ham <myungjoo.ham@samsung.com>
 Description:
-		The /sys/class/devfreq/.../governor shows the name of the
+		The /sys/class/devfreq/.../governor show or set the name of the
 		governor used by the corresponding devfreq object.
 
 What:		/sys/class/devfreq/.../cur_freq
@@ -19,15 +19,16 @@
 Contact:	MyungJoo Ham <myungjoo.ham@samsung.com>
 Description:
 		The /sys/class/devfreq/.../cur_freq shows the current
-		frequency of the corresponding devfreq object.
+		frequency of the corresponding devfreq object. Same as
+		target_freq when get_cur_freq() is not implemented by
+		devfreq driver.
 
-What:		/sys/class/devfreq/.../central_polling
-Date:		September 2011
-Contact:	MyungJoo Ham <myungjoo.ham@samsung.com>
+What:		/sys/class/devfreq/.../target_freq
+Date:		September 2012
+Contact:	Rajagopal Venkat <rajagopal.venkat@linaro.org>
 Description:
-		The /sys/class/devfreq/.../central_polling shows whether
-		the devfreq ojbect is using devfreq-provided central
-		polling mechanism or not.
+		The /sys/class/devfreq/.../target_freq shows the next governor
+		predicted target frequency of the corresponding devfreq object.
 
 What:		/sys/class/devfreq/.../polling_interval
 Date:		September 2011
@@ -43,6 +44,17 @@
 		(/sys/class/devfreq/.../central_polling is 0), this value
 		may be useless.
 
+What:		/sys/class/devfreq/.../trans_stat
+Date:		October 2012
+Contact:	MyungJoo Ham <myungjoo.ham@samsung.com>
+Descrtiption:
+		This ABI shows the statistics of devfreq behavior on a
+		specific device. It shows the time spent in each state and
+		the number of transitions between states.
+		In order to activate this ABI, the devfreq target device
+		driver should provide the list of available frequencies
+		with its profile.
+
 What:		/sys/class/devfreq/.../userspace/set_freq
 Date:		September 2011
 Contact:	MyungJoo Ham <myungjoo.ham@samsung.com>
@@ -50,3 +62,19 @@
 		The /sys/class/devfreq/.../userspace/set_freq shows and
 		sets the requested frequency for the devfreq object if
 		userspace governor is in effect.
+
+What:		/sys/class/devfreq/.../available_frequencies
+Date:		October 2012
+Contact:	Nishanth Menon <nm@ti.com>
+Description:
+		The /sys/class/devfreq/.../available_frequencies shows
+		the available frequencies of the corresponding devfreq object.
+		This is a snapshot of available frequencies and not limited
+		by the min/max frequency restrictions.
+
+What:		/sys/class/devfreq/.../available_governors
+Date:		October 2012
+Contact:	Nishanth Menon <nm@ti.com>
+Description:
+		The /sys/class/devfreq/.../available_governors shows
+		currently available governors in the system.
diff --git a/Documentation/devicetree/bindings/arm/msm/smp2p.txt b/Documentation/devicetree/bindings/arm/msm/smp2p.txt
index 7a5f506..a7af9e7 100644
--- a/Documentation/devicetree/bindings/arm/msm/smp2p.txt
+++ b/Documentation/devicetree/bindings/arm/msm/smp2p.txt
@@ -2,9 +2,7 @@
 
 Required properties:
 -compatible : should be "qcom,smp2p"
--reg : the location and offset of the irq register base memory
--reg-names : "irq-reg-base", "irq-reg-offset" - string to identify the irq
-             register region and offset values
+-reg : the location of the irq register base memory
 -qcom,remote-pid : the SMP2P remote processor ID (see smp2p_private_api.h)
 -qcom,irq-bitmask : the sending irq bitmask
 -interrupts : the receiving interrupt line
@@ -13,8 +11,7 @@
 
 	qcom,smp2p-modem {
 		compatible = "qcom,smp2p";
-		reg = <0xfa006000 0x1000>, <0x8 0x0>;
-		reg-names = "irq-reg-base", "irq-reg-offset";
+		reg = <0xf9011008 0x4>;
 		qcom,remote-pid = <1>;
 		qcom,irq-bitmask = <0x4000>;
 		interrupts = <0 27 1>;
diff --git a/Documentation/devicetree/bindings/fb/mdss-qpic-panel.txt b/Documentation/devicetree/bindings/fb/mdss-qpic-panel.txt
new file mode 100644
index 0000000..95f0fa4
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-qpic-panel.txt
@@ -0,0 +1,37 @@
+Qualcomm mdss-qpic-panel
+
+mdss-qpic-panel is a panel device which can be driven by qpic.
+
+Required properties:
+- compatible:				Must be "qcom,mdss-qpic-panel"
+- qcom,mdss-pan-res:		A two dimensional array that specifies the panel
+							resolution.
+- qcom,mdss-pan-bpp:		Specifies the panel bits per pixel.
+- qcom,refresh_rate:		Panel refresh rate
+- vdd-supply:				Phandle for vdd regulator device node.
+- avdd-supply:				Phandle for avdd regulator device node.
+- qcom,cs-gpio:				Phandle for cs gpio device node.
+- qcom,te-gpio:				Phandle for te gpio device node.
+- qcom,rst-gpio:			Phandle for rst gpio device node.
+- qcom,ad8-gpio:			Phandle for ad8 gpio device node.
+
+Optional properties:
+- label:					A string used as a descriptive name of the panel
+
+
+Example:
+/ {
+	qcom,mdss_lcdc_ili9341_qvga {
+		compatible = "qcom,mdss-qpic-panel";
+		label = "ili qvga lcdc panel";
+		vdd-supply = <&pm8019_l11>;
+		avdd-supply = <&pm8019_l14>;
+		qcom,cs-gpio = <&msmgpio 21 0>;
+		qcom,te-gpio = <&msmgpio 22 0>;
+		qcom,rst-gpio = <&msmgpio 23 0>;
+		qcom,ad8-gpio = <&msmgpio 20 0>;
+		qcom,mdss-pan-res = <240 320>;
+		qcom,mdss-pan-bpp = <18>;
+		qcom,refresh_rate = <60>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/fb/mdss-qpic.txt b/Documentation/devicetree/bindings/fb/mdss-qpic.txt
new file mode 100644
index 0000000..0fa3a32
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-qpic.txt
@@ -0,0 +1,18 @@
+Qualcomm mdss-qpic
+
+mdss-qpic is a qpic controller device which supports dma transmission to MIPI
+and LCDC panel.
+
+Required properties:
+- compatible:			must be "qcom,mdss_qpic"
+- reg:					offset and length of the register set for the device.
+- reg-names :			names to refer to register sets related to this device
+- interrupts:			IRQ line
+
+Example:
+	qcom,msm_qpic@f9ac0000 {
+		compatible = "qcom,mdss_qpic";
+		reg = <0xf9ac0000 0x24000>;
+		reg-names = "qpic_base";
+		interrupts = <0 251 0>;
+	};
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt
index 2764657..70f8b55 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt
@@ -10,7 +10,7 @@
 - reg:		      Pairs of physical base addresses and region sizes of
 		      memory mapped registers.
 - reg-names:	      Names of the bases for the above registers. "qdsp6_base"
-		      and "halt_base" are expected.
+		      "halt_base", and "restart_reg" are expected.
 - interrupts:         The lpass watchdog interrupt
 - vdd_cx-supply:      Reference to the regulator that supplies the vdd_cx domain.
 - qcom,firmware-name: Base name of the firmware image. Ex. "lpass"
@@ -23,8 +23,9 @@
 	qcom,lpass@fe200000 {
 	        compatible = "qcom,pil-q6v5-lpass";
 	        reg = <0xfe200000 0x00100>,
-	              <0xfd485100 0x00010>;
-		reg-names = "qdsp6_base", "halt_base";
+	              <0xfd485100 0x00010>,
+	              <0xfc4016c0 0x00004>;
+		reg-names = "qdsp6_base", "halt_base", "restart_reg";
 		interrupts = <0 194 1>;
 		vdd_cx-supply = <&pm8841_s2>;
 	        qcom,firmware-name = "lpass";
diff --git a/Documentation/devicetree/bindings/power/opp.txt b/Documentation/devicetree/bindings/power/opp.txt
new file mode 100644
index 0000000..74499e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/opp.txt
@@ -0,0 +1,25 @@
+* Generic OPP Interface
+
+SoCs have a standard set of tuples consisting of frequency and
+voltage pairs that the device will support per voltage domain. These
+are called Operating Performance Points or OPPs.
+
+Properties:
+- operating-points: An array of 2-tuples items, and each item consists
+  of frequency and voltage like <freq-kHz vol-uV>.
+	freq: clock frequency in kHz
+	vol: voltage in microvolt
+
+Examples:
+
+cpu@0 {
+	compatible = "arm,cortex-a9";
+	reg = <0>;
+	next-level-cache = <&L2>;
+	operating-points = <
+		/* kHz    uV */
+		792000  1100000
+		396000  950000
+		198000  850000
+	>;
+};
diff --git a/arch/arm/boot/dts/msm-pm8110.dtsi b/arch/arm/boot/dts/msm-pm8110.dtsi
index c488ab1..ec42cfc 100644
--- a/arch/arm/boot/dts/msm-pm8110.dtsi
+++ b/arch/arm/boot/dts/msm-pm8110.dtsi
@@ -95,5 +95,225 @@
 		reg = <0x1>;
 		#address-cells = <1>;
 		#size-cells = <1>;
+
+		regulator@1400 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_s1";
+			spmi-dev-container;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0x1400 0x300>;
+			status = "disabled";
+
+			qcom,ctl@1400 {
+				reg = <0x1400 0x100>;
+			};
+			qcom,ps@1500 {
+				reg = <0x1500 0x100>;
+			};
+			qcom,freq@1600 {
+				reg = <0x1600 0x100>;
+			};
+		};
+
+		regulator@1700 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_s2";
+			spmi-dev-container;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0x1700 0x300>;
+			status = "disabled";
+
+			qcom,ctl@1700 {
+				reg = <0x1700 0x100>;
+			};
+			qcom,ps@1800 {
+				reg = <0x1800 0x100>;
+			};
+			qcom,freq@1900 {
+				reg = <0x1900 0x100>;
+			};
+		};
+
+		regulator@1a00 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_s3";
+			spmi-dev-container;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0x1a00 0x300>;
+			status = "disabled";
+
+			qcom,ctl@1a00 {
+				reg = <0x1a00 0x100>;
+			};
+			qcom,ps@1b00 {
+				reg = <0x1b00 0x100>;
+			};
+			qcom,freq@1c00 {
+				reg = <0x1c00 0x100>;
+			};
+		};
+
+		regulator@1d00 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_s4";
+			spmi-dev-container;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0x1d00 0x300>;
+			status = "disabled";
+
+			qcom,ctl@1d00 {
+				reg = <0x1d00 0x100>;
+			};
+			qcom,ps@1e00 {
+				reg = <0x1e00 0x100>;
+			};
+			qcom,freq@1f00 {
+				reg = <0x1f00 0x100>;
+			};
+		};
+
+		regulator@4000 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l1";
+			reg = <0x4000 0x100>;
+			status = "disabled";
+		};
+
+		regulator@4100 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l2";
+			reg = <0x4100 0x100>;
+			status = "disabled";
+		};
+
+		regulator@4200 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l3";
+			reg = <0x4200 0x100>;
+			status = "disabled";
+		};
+
+		regulator@4300 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l4";
+			reg = <0x4300 0x100>;
+			status = "disabled";
+		};
+
+		regulator@4400 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l5";
+			reg = <0x4400 0x100>;
+			status = "disabled";
+		};
+
+		regulator@4500 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l6";
+			reg = <0x4500 0x100>;
+			status = "disabled";
+		};
+
+		regulator@4600 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l7";
+			reg = <0x4600 0x100>;
+			status = "disabled";
+		};
+
+		regulator@4700 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l8";
+			reg = <0x4700 0x100>;
+			status = "disabled";
+		};
+
+		regulator@4800 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l9";
+			reg = <0x4800 0x100>;
+			status = "disabled";
+		};
+
+		regulator@4900 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l10";
+			reg = <0x4900 0x100>;
+			status = "disabled";
+		};
+
+		regulator@4b00 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l12";
+			reg = <0x4b00 0x100>;
+			status = "disabled";
+		};
+
+		regulator@4d00 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l14";
+			reg = <0x4d00 0x100>;
+			status = "disabled";
+		};
+
+		regulator@4e00 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l15";
+			reg = <0x4e00 0x100>;
+			status = "disabled";
+		};
+
+		regulator@4f00 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l16";
+			reg = <0x4f00 0x100>;
+			status = "disabled";
+		};
+
+		regulator@5000 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l17";
+			reg = <0x5000 0x100>;
+			status = "disabled";
+		};
+
+		regulator@5100 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l18";
+			reg = <0x5100 0x100>;
+			status = "disabled";
+		};
+
+		regulator@5200 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l19";
+			reg = <0x5200 0x100>;
+			status = "disabled";
+		};
+
+		regulator@5300 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l20";
+			reg = <0x5300 0x100>;
+			status = "disabled";
+		};
+
+		regulator@5400 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l21";
+			reg = <0x5400 0x100>;
+			status = "disabled";
+		};
+
+		regulator@5500 {
+			compatible = "qcom,qpnp-regulator";
+			regulator-name = "8110_l22";
+			reg = <0x5500 0x100>;
+			status = "disabled";
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/msm8226-cdp.dts b/arch/arm/boot/dts/msm8226-cdp.dts
index fa77c35..800cd8f 100644
--- a/arch/arm/boot/dts/msm8226-cdp.dts
+++ b/arch/arm/boot/dts/msm8226-cdp.dts
@@ -90,6 +90,7 @@
 
 	sound {
 		qcom,cdc-mclk-gpios = <&pm8226_gpios 1 0>;
+		qcom,cdc-vdd-spkr-gpios = <&pm8226_gpios 2 0>;
 	};
 };
 
@@ -193,6 +194,13 @@
 	};
 
 	gpio@c100 { /* GPIO 2 */
+		qcom,mode = <1>;
+		qcom,output-type = <0>;
+		qcom,pull = <5>;
+		qcom,vin-sel = <2>;
+		qcom,out-strength = <3>;
+		qcom,src-sel = <2>;
+		qcom,master-en = <1>;
 	};
 
 	gpio@c200 { /* GPIO 3 */
diff --git a/arch/arm/boot/dts/msm8226-mtp.dts b/arch/arm/boot/dts/msm8226-mtp.dts
index e747cb5..3d1fe19 100644
--- a/arch/arm/boot/dts/msm8226-mtp.dts
+++ b/arch/arm/boot/dts/msm8226-mtp.dts
@@ -90,6 +90,7 @@
 
 	sound {
 		qcom,cdc-mclk-gpios = <&pm8226_gpios 1 0>;
+		qcom,cdc-vdd-spkr-gpios = <&pm8226_gpios 2 0>;
 	};
 };
 
@@ -190,6 +191,13 @@
 	};
 
 	gpio@c100 { /* GPIO 2 */
+		qcom,mode = <1>;
+		qcom,output-type = <0>;
+		qcom,pull = <5>;
+		qcom,vin-sel = <2>;
+		qcom,out-strength = <3>;
+		qcom,src-sel = <2>;
+		qcom,master-en = <1>;
 	};
 
 	gpio@c200 { /* GPIO 3 */
diff --git a/arch/arm/boot/dts/msm8226-qrd.dts b/arch/arm/boot/dts/msm8226-qrd.dts
index acc4597..4fa37d6 100644
--- a/arch/arm/boot/dts/msm8226-qrd.dts
+++ b/arch/arm/boot/dts/msm8226-qrd.dts
@@ -90,6 +90,7 @@
 
 	sound {
 		qcom,cdc-mclk-gpios = <&pm8226_gpios 1 0>;
+		qcom,cdc-vdd-spkr-gpios = <&pm8226_gpios 2 0>;
 	};
 };
 
@@ -193,6 +194,13 @@
 	};
 
 	gpio@c100 { /* GPIO 2 */
+		qcom,mode = <1>;
+		qcom,output-type = <0>;
+		qcom,pull = <5>;
+		qcom,vin-sel = <2>;
+		qcom,out-strength = <3>;
+		qcom,src-sel = <2>;
+		qcom,master-en = <1>;
 	};
 
 	gpio@c200 { /* GPIO 3 */
diff --git a/arch/arm/boot/dts/msm8226-smp2p.dtsi b/arch/arm/boot/dts/msm8226-smp2p.dtsi
index 60f63a8..1b08246 100644
--- a/arch/arm/boot/dts/msm8226-smp2p.dtsi
+++ b/arch/arm/boot/dts/msm8226-smp2p.dtsi
@@ -12,8 +12,7 @@
 / {
 	qcom,smp2p-modem {
 		compatible = "qcom,smp2p";
-		reg = <0xfa006000 0x1000>, <0x8 0x0>;
-		reg-names = "irq-reg-base", "irq-reg-offset";
+		reg = <0xf9011008 0x4>;
 		qcom,remote-pid = <1>;
 		qcom,irq-bitmask = <0x4000>;
 		interrupts = <0 27 1>;
@@ -21,8 +20,7 @@
 
 	qcom,smp2p-adsp {
 		compatible = "qcom,smp2p";
-		reg = <0xfa006000 0x1000>, <0x8 0x0>;
-		reg-names = "irq-reg-base", "irq-reg-offset";
+		reg = <0xf9011008 0x4>;
 		qcom,remote-pid = <2>;
 		qcom,irq-bitmask = <0x400>;
 		interrupts = <0 158 1>;
@@ -30,8 +28,7 @@
 
 	qcom,smp2p-wcnss {
 		compatible = "qcom,smp2p";
-		reg = <0xfa006000 0x1000>, <0x8 0x0>;
-		reg-names = "irq-reg-base", "irq-reg-offset";
+		reg = <0xf9011008 0x4>;
 		qcom,remote-pid = <4>;
 		qcom,irq-bitmask = <0x40000>;
 		interrupts = <0 143 1>;
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index 0bc8efd..f975a08 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -231,6 +231,8 @@
 		qcom,audio-routing =
 			"RX_BIAS", "MCLK",
 			"LDO_H", "MCLK",
+			"SPK_OUT", "MCLK",
+			"SPK_OUT", "EXT_VDD_SPKR",
 			"AMIC1", "MIC BIAS1 Internal1",
 			"MIC BIAS1 Internal1", "Handset Mic",
 			"AMIC2", "MIC BIAS2 External",
@@ -380,6 +382,21 @@
 			compatible = "qcom,msm-dai-q6-dev";
 			qcom,msm-dai-q6-dev-id = <240>;
 		};
+
+                qcom,msm-dai-q6-incall-record-rx {
+                        compatible = "qcom,msm-dai-q6-dev";
+                        qcom,msm-dai-q6-dev-id = <32771>;
+                };
+
+                qcom,msm-dai-q6-incall-record-tx {
+                        compatible = "qcom,msm-dai-q6-dev";
+                        qcom,msm-dai-q6-dev-id = <32772>;
+                };
+
+                qcom,msm-dai-q6-incall-music-rx {
+                        compatible = "qcom,msm-dai-q6-dev";
+                        qcom,msm-dai-q6-dev-id = <32773>;
+                };
 	};
 
 	qcom,msm-pcm-hostless {
@@ -594,8 +611,9 @@
 	qcom,lpass@fe200000 {
 		compatible = "qcom,pil-q6v5-lpass";
 		reg = <0xfe200000 0x00100>,
-		      <0xfd485100 0x00010>;
-		reg-names = "qdsp6_base", "halt_base";
+		      <0xfd485100 0x00010>,
+		      <0xfc4016c0 0x00004>;
+		reg-names = "qdsp6_base", "halt_base", "restart_reg";
 		vdd_cx-supply = <&pm8226_s1_corner>;
 		interrupts = <0 162 1>;
 
@@ -723,6 +741,12 @@
 		      <0xfc336000 0x1000>;
 		reg-names = "etm-base","debug-base";
 	};
+
+	qcom,ipc-spinlock@fd484000 {
+		compatible = "qcom,ipc-spinlock-sfpb";
+		reg = <0xfd484000 0x400>;
+		qcom,num-locks = <8>;
+	};
 };
 
 &gdsc_venus {
diff --git a/arch/arm/boot/dts/msm8610-pm.dtsi b/arch/arm/boot/dts/msm8610-pm.dtsi
index d6e143c..ff16b8d 100644
--- a/arch/arm/boot/dts/msm8610-pm.dtsi
+++ b/arch/arm/boot/dts/msm8610-pm.dtsi
@@ -306,10 +306,12 @@
 		qcom,gic-map = <47 172>, /* usb2_hsic_async_wakeup_irq */
 			<53 104>, /* mdss_irq */
 			<62 222>, /* ee0_krait_hlos_spmi_periph_irq */
+			<0xff 56>,  /* q6_wdog_expired_irq */
 			<0xff 57>,  /* mss_to_apps_irq(0) */
 			<0xff 58>,  /* mss_to_apps_irq(1) */
 			<0xff 59>,  /* mss_to_apps_irq(2) */
 			<0xff 60>,  /* mss_to_apps_irq(3) */
+			<0xff 61>,  /* mss_a2_bam_irq */
 			<0xff 173>, /* o_wcss_apss_smd_hi */
 			<0xff 174>, /* o_wcss_apss_smd_med */
 			<0xff 175>, /* o_wcss_apss_smd_low */
@@ -317,17 +319,17 @@
 			<0xff 177>, /* o_wcss_apss_wlan_data_xfer_done */
 			<0xff 178>, /* o_wcss_apss_wlan_rx_data_avail */
 			<0xff 179>, /* o_wcss_apss_asic_intr
-
+			<0xff 181>, /* o_wcss_apss_wdog_bite_and_reset_rdy */
+			<0xff 161>, /* lpass_irq_out_spare[4] /
+			<0xff 162>, /* lpass_irq_out_spare[5]*/
+			<0xff 234>, /* lpass_irq_out_spare[6]*/
+			<0xff 235>, /* lpass_irq_out_spare[7]*/
 			<0xff 188>, /* lpass_irq_out_apcs(0) */
 			<0xff 189>, /* lpass_irq_out_apcs(1) */
 			<0xff 190>, /* lpass_irq_out_apcs(2) */
 			<0xff 191>, /* lpass_irq_out_apcs(3) */
 			<0xff 192>, /* lpass_irq_out_apcs(4) */
-			<0xff 193>, /* lpass_irq_out_apcs(5) */
 			<0xff 194>, /* lpass_irq_out_apcs(6) */
-			<0xff 195>, /* lpass_irq_out_apcs(7) */
-			<0xff 196>, /* lpass_irq_out_apcs(8) */
-			<0xff 197>, /* lpass_irq_out_apcs(9) */
 			<0xff 200>, /* rpm_ipc(4) */
 			<0xff 201>, /* rpm_ipc(5) */
 			<0xff 202>, /* rpm_ipc(6) */
@@ -336,47 +338,54 @@
 			<0xff 205>, /* rpm_ipc(25) */
 			<0xff 206>, /* rpm_ipc(26) */
 			<0xff 207>, /* rpm_ipc(27) */
+			<0xff 258>, /* rpm_ipc(28) */
+			<0xff 259>, /* rpm_ipc(29) */
+			<0xff 275>, /* rpm_ipc(30) */
+			<0xff 276>, /* rpm_ipc(31) */
+			<0xff 269>, /* rpm_wdog_expired_irq */
 			<0xff 240>; /* summary_irq_kpss */
 
 		qcom,gpio-parent = <&msmgpio>;
-		qcom,gpio-map = <3  102>,
-			<4  1 >,
+		qcom,gpio-map = <3  1>,
+			<4  4 >,
 			<5  5 >,
 			<6  9 >,
-			<7  18>,
-			<8  20>,
-			<9  24>,
+			<7  13>,
+			<8  17>,
+			<9  21>,
 			<10  27>,
-			<11  28>,
-			<12  34>,
-			<13  35>,
-			<14  37>,
-			<15  42>,
-			<16  44>,
-			<17  46>,
-			<18  50>,
-			<19  54>,
-			<20  59>,
-			<21  61>,
-			<22  62>,
-			<23  64>,
-			<24  65>,
-			<25  66>,
-			<26  67>,
-			<27  68>,
-			<28  71>,
-			<29  72>,
-			<30  73>,
-			<31  74>,
-			<32  75>,
-			<33  77>,
-			<34  79>,
-			<35  80>,
-			<36  82>,
-			<37  86>,
-			<38  92>,
-			<39  93>,
-			<40  95>;
+			<11  29>,
+			<12  31>,
+			<13  33>,
+			<14  35>,
+			<15  37>,
+			<16  38>,
+			<17  39>,
+			<18  41>,
+			<19  46>,
+			<20  48>,
+			<21  49>,
+			<22  50>,
+			<23  51>,
+			<24  52>,
+			<25  54>,
+			<26  62>,
+			<27  63>,
+			<28  64>,
+			<29  65>,
+			<30  66>,
+			<31  67>,
+			<32  68>,
+			<33  69>,
+			<34  71>,
+			<35  72>,
+			<36  106>,
+			<37  107>,
+			<38  108>,
+			<39  109>,
+			<40  110>,
+			<54  111>,
+			<55  113>;
 	};
 
 	qcom,pm-8x60@fe805664 {
diff --git a/arch/arm/boot/dts/msm8610-regulator.dtsi b/arch/arm/boot/dts/msm8610-regulator.dtsi
index 362d126..f11f04b 100644
--- a/arch/arm/boot/dts/msm8610-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8610-regulator.dtsi
@@ -12,216 +12,215 @@
 
  /* Stub Regulators */
 
- / {
-	pm8110_s1: regulator-s1 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_s1";
-		qcom,hpm-min-load = <100000>;
-		regulator-min-microvolt = <1150000>;
-		regulator-max-microvolt = <1150000>;
-	};
-
+/ {
 	pm8110_s1_corner: regulator-s1-corner {
 		compatible = "qcom,stub-regulator";
 		regulator-name = "8110_s1_corner";
+		qcom,hpm-min-load = <100000>;
 		regulator-min-microvolt = <1>;
 		regulator-max-microvolt = <7>;
 		qcom,consumer-supplies = "vdd_dig", "";
 	};
+};
 
-	pm8110_s2: regulator-s2 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_s2";
-		qcom,hpm-min-load = <100000>;
-		regulator-min-microvolt = <1050000>;
-		regulator-max-microvolt = <1050000>;
-	};
+/* QPNP controlled regulators: */
 
-	pm8110_s3: regulator-s3 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_s3";
-		qcom,hpm-min-load = <100000>;
-		regulator-min-microvolt = <1350000>;
-		regulator-max-microvolt = <1350000>;
-	};
+&spmi_bus {
 
-	pm8110_s4: regulator-s4 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_s4";
-		qcom,hpm-min-load = <100000>;
-		regulator-min-microvolt = <2150000>;
-		regulator-max-microvolt = <2150000>;
-	};
+	qcom,pm8110@1 {
 
-	pm8110_l1: regulator-l1 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l1";
-		parent-supply = <&pm8110_s3>;
-		qcom,hpm-min-load = <10000>;
-		regulator-min-microvolt = <1225000>;
-		regulator-max-microvolt = <1225000>;
-	};
+		pm8110_s1: regulator@1400 {
+			status = "okay";
+			regulator-min-microvolt = <1150000>;
+			regulator-max-microvolt = <1150000>;
+			qcom,enable-time = <500>;
+			qcom,system-load = <100000>;
+			regulator-always-on;
+		};
 
-	pm8110_l2: regulator-l2 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l2";
-		parent-supply = <&pm8110_s3>;
-		qcom,hpm-min-load = <10000>;
-		regulator-min-microvolt = <1200000>;
-		regulator-max-microvolt = <1200000>;
-	};
+		pm8110_s2: regulator@1700 {
+			status = "okay";
+			regulator-min-microvolt = <1050000>;
+			regulator-max-microvolt = <1150000>;
+			qcom,enable-time = <500>;
+			qcom,system-load = <100000>;
+			regulator-always-on;
+		};
 
-	pm8110_l3: regulator-l3 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l3";
-		parent-supply = <&pm8110_s3>;
-		qcom,hpm-min-load = <10000>;
-		regulator-min-microvolt = <1150000>;
-		regulator-max-microvolt = <1150000>;
-	};
+		pm8110_s3: regulator@1a00 {
+			status = "okay";
+			regulator-min-microvolt = <1350000>;
+			regulator-max-microvolt = <1350000>;
+			qcom,enable-time = <500>;
+			qcom,system-load = <100000>;
+			regulator-always-on;
+		};
 
-	pm8110_l4: regulator-l4 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l4";
-		parent-supply = <&pm8110_s3>;
-		qcom,hpm-min-load = <10000>;
-		regulator-min-microvolt = <1200000>;
-		regulator-max-microvolt = <1200000>;
-	};
+		pm8110_s4: regulator@1d00 {
+			status = "okay";
+			regulator-min-microvolt = <2150000>;
+			regulator-max-microvolt = <2150000>;
+			qcom,enable-time = <500>;
+			qcom,system-load = <100000>;
+			regulator-always-on;
+		};
 
-	pm8110_l5: regulator-l5 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l5";
-		parent-supply = <&pm8110_s3>;
-		qcom,hpm-min-load = <10000>;
-		regulator-min-microvolt = <1300000>;
-		regulator-max-microvolt = <1300000>;
-	};
+		pm8110_l1: regulator@4000 {
+			status = "okay";
+			parent-supply = <&pm8110_s3>;
+			regulator-min-microvolt = <1225000>;
+			regulator-max-microvolt = <1225000>;
+			qcom,enable-time = <200>;
+		};
 
-	pm8110_l6: regulator-l6 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l6";
-		parent-supply = <&pm8110_s4>;
-		qcom,hpm-min-load = <10000>;
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <1800000>;
-	};
+		pm8110_l2: regulator@4100 {
+			status = "okay";
+			parent-supply = <&pm8110_s3>;
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1200000>;
+			qcom,enable-time = <200>;
+			qcom,system-load = <10000>;
+			regulator-always-on;
+		};
 
-	pm8110_l7: regulator-l7 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l7";
-		parent-supply = <&pm8110_s4>;
-		qcom,hpm-min-load = <10000>;
-		regulator-min-microvolt = <2050000>;
-		regulator-max-microvolt = <2050000>;
-	};
+		pm8110_l3: regulator@4200 {
+			status = "okay";
+			parent-supply = <&pm8110_s3>;
+			regulator-min-microvolt = <1150000>;
+			regulator-max-microvolt = <1150000>;
+			qcom,enable-time = <200>;
+			qcom,system-load = <10000>;
+			regulator-always-on;
+		};
 
-	pm8110_l8: regulator-l8 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l8";
-		parent-supply = <&pm8110_s4>;
-		qcom,hpm-min-load = <5000>;
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <1800000>;
-	};
+		pm8110_l4: regulator@4300 {
+			status = "okay";
+			parent-supply = <&pm8110_s3>;
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1200000>;
+			qcom,enable-time = <200>;
+		};
 
-	pm8110_l9: regulator-l9 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l9";
-		parent-supply = <&pm8110_s4>;
-		qcom,hpm-min-load = <10000>;
-		regulator-min-microvolt = <2050000>;
-		regulator-max-microvolt = <2050000>;
-	};
+		pm8110_l5: regulator@4400 {
+			status = "okay";
+			parent-supply = <&pm8110_s3>;
+			regulator-min-microvolt = <1300000>;
+			regulator-max-microvolt = <1300000>;
+			qcom,enable-time = <200>;
+		};
 
-	pm8110_l10: regulator-l10 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l10";
-		parent-supply = <&pm8110_s4>;
-		qcom,hpm-min-load = <10000>;
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <1800000>;
-		qcom,consumer-supplies = "vdd_sr2_pll", "";
-	};
+		pm8110_l6: regulator@4500 {
+			status = "okay";
+			parent-supply = <&pm8110_s4>;
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			qcom,enable-time = <200>;
+			qcom,system-load = <10000>;
+			regulator-always-on;
+		};
 
-	pm8110_l12: regulator-l12 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l12";
-		qcom,hpm-min-load = <10000>;
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <3300000>;
-	};
+		pm8110_l7: regulator@4600 {
+			status = "okay";
+			parent-supply = <&pm8110_s4>;
+			regulator-min-microvolt = <2050000>;
+			regulator-max-microvolt = <2050000>;
+			qcom,enable-time = <200>;
+		};
 
-	pm8110_l14: regulator-l14 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l14";
-		parent-supply = <&pm8110_s4>;
-		qcom,hpm-min-load = <10000>;
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <1800000>;
-	};
+		pm8110_l8: regulator@4700 {
+			status = "okay";
+			parent-supply = <&pm8110_s4>;
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			qcom,enable-time = <200>;
+		};
 
-	pm8110_l15: regulator-l15 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l15";
-		qcom,hpm-min-load = <10000>;
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <3300000>;
-	};
+		pm8110_l9: regulator@4800 {
+			status = "okay";
+			parent-supply = <&pm8110_s4>;
+			regulator-min-microvolt = <2050000>;
+			regulator-max-microvolt = <2050000>;
+			qcom,enable-time = <200>;
+		};
 
-	pm8110_l16: regulator-l16 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l16";
-		qcom,hpm-min-load = <10000>;
-		regulator-min-microvolt = <3000000>;
-		regulator-max-microvolt = <3000000>;
-	};
+		pm8110_l10: regulator@4900 {
+			status = "okay";
+			parent-supply = <&pm8110_s4>;
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			qcom,enable-time = <200>;
+			qcom,consumer-supplies = "vdd_sr2_pll", "";
+		};
 
-	pm8110_l17: regulator-l17 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l17";
-		qcom,hpm-min-load = <10000>;
-		regulator-min-microvolt = <2900000>;
-		regulator-max-microvolt = <2900000>;
-	};
+		pm8110_l12: regulator@4b00 {
+			status = "okay";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <3300000>;
+			qcom,enable-time = <200>;
+		};
 
-	pm8110_l18: regulator-l18 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l18";
-		qcom,hpm-min-load = <10000>;
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <2950000>;
-	};
+		pm8110_l14: regulator@4d00 {
+			status = "okay";
+			parent-supply = <&pm8110_s4>;
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			qcom,enable-time = <200>;
+		};
 
-	pm8110_l19: regulator-l19 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l19";
-		qcom,hpm-min-load = <10000>;
-		regulator-min-microvolt = <2850000>;
-		regulator-max-microvolt = <2850000>;
-	};
+		pm8110_l15: regulator@4e00 {
+			status = "okay";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <3300000>;
+			qcom,enable-time = <200>;
+		};
 
-	pm8110_l20: regulator-l20 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l20";
-		qcom,hpm-min-load = <5000>;
-		regulator-min-microvolt = <3075000>;
-		regulator-max-microvolt = <3075000>;
-	};
+		pm8110_l16: regulator@4f00 {
+			status = "okay";
+			regulator-min-microvolt = <3000000>;
+			regulator-max-microvolt = <3000000>;
+			qcom,enable-time = <200>;
+		};
 
-	pm8110_l21: regulator-l21 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l21";
-		qcom,hpm-min-load = <10000>;
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <2950000>;
-	};
+		pm8110_l17: regulator@5000 {
+			status = "okay";
+			regulator-min-microvolt = <2900000>;
+			regulator-max-microvolt = <2900000>;
+			qcom,enable-time = <200>;
+		};
 
-	pm8110_l22: regulator-l22 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "8110_l22";
-		qcom,hpm-min-load = <10000>;
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <3300000>;
+		pm8110_l18: regulator@5100 {
+			status = "okay";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <2950000>;
+			qcom,enable-time = <200>;
+		};
+
+		pm8110_l19: regulator@5200 {
+			status = "okay";
+			regulator-min-microvolt = <2850000>;
+			regulator-max-microvolt = <2850000>;
+			qcom,enable-time = <200>;
+		};
+
+		pm8110_l20: regulator@5300 {
+			status = "okay";
+			regulator-min-microvolt = <3075000>;
+			regulator-max-microvolt = <3075000>;
+			qcom,enable-time = <200>;
+		};
+
+		pm8110_l21: regulator@5400 {
+			status = "okay";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <2950000>;
+			qcom,enable-time = <200>;
+		};
+
+		pm8110_l22: regulator@5500 {
+			status = "okay";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <3300000>;
+			qcom,enable-time = <200>;
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/msm8610.dtsi b/arch/arm/boot/dts/msm8610.dtsi
index b78c3af..18f3df9 100644
--- a/arch/arm/boot/dts/msm8610.dtsi
+++ b/arch/arm/boot/dts/msm8610.dtsi
@@ -431,8 +431,9 @@
 	qcom,lpass@fe200000 {
 		compatible = "qcom,pil-q6v5-lpass";
 		reg = <0xfe200000 0x00100>,
-		      <0xfd485100 0x00010>;
-		reg-names = "qdsp6_base", "halt_base";
+		      <0xfd485100 0x00010>,
+		      <0xfc4016c0 0x00004>;
+		reg-names = "qdsp6_base", "halt_base", "restart_reg";
 		interrupts = <0 162 1>;
 		vdd_cx-supply = <&pm8110_s1_corner>;
 		qcom,firmware-name = "adsp";
@@ -487,8 +488,8 @@
 
 /include/ "msm8610-iommu-domains.dtsi"
 
-/include/ "msm8610-regulator.dtsi"
 /include/ "msm-pm8110.dtsi"
+/include/ "msm8610-regulator.dtsi"
 
 &pm8110_vadc {
 	chan@0 {
diff --git a/arch/arm/boot/dts/msm8974-camera-sensor-cdp-mtp.dtsi b/arch/arm/boot/dts/msm8974-camera-sensor-cdp-mtp.dtsi
index 24438f0..15a549c 100644
--- a/arch/arm/boot/dts/msm8974-camera-sensor-cdp-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8974-camera-sensor-cdp-mtp.dtsi
@@ -61,6 +61,44 @@
 		status = "ok";
 	};
 
+    qcom,camera@20 {
+		compatible = "qcom,imx135";
+		reg = <0x20>;
+		qcom,slave-id = <0x20 0x0 0x1210>;
+		qcom,csiphy-sd-index = <0>;
+		qcom,csid-sd-index = <0>;
+		qcom,mount-angle = <90>;
+		qcom,sensor-name = "imx135";
+		cam_vdig-supply = <&pm8941_l3>;
+		cam_vana-supply = <&pm8941_l17>;
+		cam_vio-supply = <&pm8941_lvs3>;
+		cam_vaf-supply = <&pm8941_l23>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+				     "cam_vaf";
+		qcom,cam-vreg-type = <0 1 0 0>;
+		qcom,cam-vreg-min-voltage = <1225000 0 2850000 3000000>;
+		qcom,cam-vreg-max-voltage = <1225000 0 2850000 3000000>;
+		qcom,cam-vreg-op-mode = <105000 0 80000 100000>;
+		qcom,gpio-no-mux = <0>;
+		gpios = <&msmgpio 15 0>,
+			<&msmgpio 90 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-req-tbl-num = <0 1>;
+		qcom,gpio-req-tbl-flags = <1 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK", "CAM_RESET1";
+		qcom,gpio-set-tbl-num = <1 1>;
+		qcom,gpio-set-tbl-flags = <0 2>;
+		qcom,gpio-set-tbl-delay = <1000 30000>;
+		qcom,csi-lane-assign = <0x4320>;
+		qcom,csi-lane-mask = <0x1F>;
+		qcom,sensor-position = <0>;
+		qcom,sensor-mode = <0>;
+	        qcom,sensor-type = <0>;
+		qcom,cci-master = <0>;
+		status = "ok";
+	};
+
+
 	qcom,camera@6c {
 		compatible = "qcom,ov2720";
 		reg = <0x6c>;
diff --git a/arch/arm/boot/dts/msm8974-smp2p.dtsi b/arch/arm/boot/dts/msm8974-smp2p.dtsi
index 511f91f..964eecb 100644
--- a/arch/arm/boot/dts/msm8974-smp2p.dtsi
+++ b/arch/arm/boot/dts/msm8974-smp2p.dtsi
@@ -12,8 +12,7 @@
 / {
 	qcom,smp2p-modem {
 		compatible = "qcom,smp2p";
-		reg = <0xfa006000 0x1000>, <0x8 0x0>;
-		reg-names = "irq-reg-base", "irq-reg-offset";
+		reg = <0xf9011008 0x4>;
 		qcom,remote-pid = <1>;
 		qcom,irq-bitmask = <0x4000>;
 		interrupts = <0 27 1>;
@@ -21,8 +20,7 @@
 
 	qcom,smp2p-adsp {
 		compatible = "qcom,smp2p";
-		reg = <0xfa006000 0x1000>, <0x8 0x0>;
-		reg-names = "irq-reg-base", "irq-reg-offset";
+		reg = <0xf9011008 0x4>;
 		qcom,remote-pid = <2>;
 		qcom,irq-bitmask = <0x400>;
 		interrupts = <0 158 1>;
@@ -30,8 +28,7 @@
 
 	qcom,smp2p-wcnss {
 		compatible = "qcom,smp2p";
-		reg = <0xfa006000 0x1000>, <0x8 0x0>;
-		reg-names = "irq-reg-base", "irq-reg-offset";
+		reg = <0xf9011008 0x4>;
 		qcom,remote-pid = <4>;
 		qcom,irq-bitmask = <0x40000>;
 		interrupts = <0 143 1>;
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index aa3742f..00518a0 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -741,8 +741,9 @@
 	qcom,lpass@fe200000 {
 		compatible = "qcom,pil-q6v5-lpass";
 		reg = <0xfe200000 0x00100>,
-		      <0xfd485100 0x00010>;
-		reg-names = "qdsp6_base", "halt_base";
+		      <0xfd485100 0x00010>,
+		      <0xfc4016c0 0x00004>;
+		reg-names = "qdsp6_base", "halt_base", "restart_reg";
 		vdd_cx-supply = <&pm8841_s2_corner>;
 		interrupts = <0 162 1>;
 
diff --git a/arch/arm/boot/dts/msm9625-display.dtsi b/arch/arm/boot/dts/msm9625-display.dtsi
new file mode 100644
index 0000000..a160bae
--- /dev/null
+++ b/arch/arm/boot/dts/msm9625-display.dtsi
@@ -0,0 +1,20 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+	qcom,msm_qpic@f9ac0000 {
+		compatible = "qcom,mdss_qpic";
+		reg = <0xf9ac0000 0x24000>;
+		reg-names = "qpic_base";
+		interrupts = <0 251 0>;
+	};
+};
diff --git a/arch/arm/boot/dts/msm9625-smp2p.dtsi b/arch/arm/boot/dts/msm9625-smp2p.dtsi
index 425bf00..02c95e4 100644
--- a/arch/arm/boot/dts/msm9625-smp2p.dtsi
+++ b/arch/arm/boot/dts/msm9625-smp2p.dtsi
@@ -12,8 +12,7 @@
 / {
 	qcom,smp2p-modem {
 		compatible = "qcom,smp2p";
-		reg = <0xfa006000 0x1000>, <0x8 0x0>;
-		reg-names = "irq-reg-base", "irq-reg-offset";
+		reg = <0xf9011008 0x4>;
 		qcom,remote-pid = <1>;
 		qcom,irq-bitmask = <0x4000>;
 		interrupts = <0 27 1>;
@@ -21,8 +20,7 @@
 
 	qcom,smp2p-adsp {
 		compatible = "qcom,smp2p";
-		reg = <0xfa006000 0x1000>, <0x8 0x0>;
-		reg-names = "irq-reg-base", "irq-reg-offset";
+		reg = <0xf9011008 0x4>;
 		qcom,remote-pid = <2>;
 		qcom,irq-bitmask = <0x400>;
 		interrupts = <0 158 1>;
diff --git a/arch/arm/boot/dts/msm9625-v2-cdp.dts b/arch/arm/boot/dts/msm9625-v2-cdp.dts
index 09a89ab..919c6d5 100644
--- a/arch/arm/boot/dts/msm9625-v2-cdp.dts
+++ b/arch/arm/boot/dts/msm9625-v2-cdp.dts
@@ -13,6 +13,8 @@
 /dts-v1/;
 
 /include/ "msm9625-v2.dtsi"
+/include/ "msm9625-display.dtsi"
+/include/ "qpic-panel-ili-qvga.dtsi"
 
 / {
 	model = "Qualcomm MSM 9625V2 CDP";
diff --git a/arch/arm/boot/dts/msmzinc-ion.dtsi b/arch/arm/boot/dts/msmzinc-ion.dtsi
new file mode 100644
index 0000000..4bf078a
--- /dev/null
+++ b/arch/arm/boot/dts/msmzinc-ion.dtsi
@@ -0,0 +1,27 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+	qcom,ion {
+		compatible = "qcom,msm-ion";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,ion-heap@30 { /* SYSTEM HEAP */
+			reg = <30>;
+		};
+
+		qcom,ion-heap@25 { /* IOMMU HEAP */
+			reg = <25>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/msmzinc.dtsi b/arch/arm/boot/dts/msmzinc.dtsi
index d981909..642597d 100644
--- a/arch/arm/boot/dts/msmzinc.dtsi
+++ b/arch/arm/boot/dts/msmzinc.dtsi
@@ -11,6 +11,7 @@
  */
 
 /include/ "skeleton.dtsi"
+/include/ "msmzinc-ion.dtsi"
 
 / {
 	model = "Qualcomm MSM ZINC";
@@ -74,15 +75,12 @@
         qcom,msm-imem@fe805000 {
                 compatible = "qcom,msm-imem";
                 reg = <0xfe805000 0x1000>; /* Address and size of IMEM */
-        };
-
-	qcom,ion {
-		compatible = "qcom,msm-ion";
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		qcom,ion-heap@30 { /* SYSTEM HEAP */
-			reg = <30>;
-		};
 	};
+
+	qcom,msm-rtb {
+		compatible = "qcom,msm-rtb";
+		qcom,memory-reservation-type = "EBI1";
+		qcom,memory-reservation-size = <0x100000>; /* 1M EBI1 buffer */
+	};
+
 };
diff --git a/arch/arm/boot/dts/qpic-panel-ili-qvga.dtsi b/arch/arm/boot/dts/qpic-panel-ili-qvga.dtsi
new file mode 100644
index 0000000..a0c906e
--- /dev/null
+++ b/arch/arm/boot/dts/qpic-panel-ili-qvga.dtsi
@@ -0,0 +1,27 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+	qcom,mdss_lcdc_ili9341_qvga {
+		compatible = "qcom,mdss-qpic-panel";
+		label = "ili qvga lcdc panel";
+		vdd-supply = <&pm8019_l11>;
+		avdd-supply = <&pm8019_l14>;
+		qcom,cs-gpio = <&msmgpio 21 0>;
+		qcom,te-gpio = <&msmgpio 22 0>;
+		qcom,rst-gpio = <&msmgpio 23 0>;
+		qcom,ad8-gpio = <&msmgpio 20 0>;
+		qcom,mdss-pan-res = <240 320>;
+		qcom,mdss-pan-bpp = <18>;
+		qcom,refresh_rate = <60>;
+	};
+};
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index cfdff6f..097e830 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -183,6 +183,17 @@
 CONFIG_SYNC=y
 CONFIG_SW_SYNC=y
 CONFIG_CMA=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCISMD=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_MD=y
@@ -233,6 +244,8 @@
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_STUB=y
 CONFIG_REGULATOR_QPNP=y
+CONFIG_RADIO_IRIS=y
+CONFIG_RADIO_IRIS_TRANSPORT=m
 CONFIG_ION=y
 CONFIG_ION_MSM=y
 CONFIG_MSM_KGSL=y
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index 224df83..663e937 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -329,6 +329,7 @@
 CONFIG_MSM_CSIPHY=y
 CONFIG_MSM_CSID=y
 CONFIG_MSM_ISPIF=y
+CONFIG_IMX135=y
 CONFIG_S5K3L1YX=y
 CONFIG_MSMB_CAMERA=y
 CONFIG_MSMB_JPEG=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index e42aa77..2de81d4d1 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -336,6 +336,7 @@
 CONFIG_MSM_CSID=y
 CONFIG_MSM_ISPIF=y
 CONFIG_S5K3L1YX=y
+CONFIG_IMX135=y
 CONFIG_MSMB_CAMERA=y
 CONFIG_MSMB_JPEG=y
 CONFIG_MSM_VIDC_V4L2=y
diff --git a/arch/arm/configs/msmzinc_defconfig b/arch/arm/configs/msmzinc_defconfig
index b74b204..678b086 100644
--- a/arch/arm/configs/msmzinc_defconfig
+++ b/arch/arm/configs/msmzinc_defconfig
@@ -211,6 +211,7 @@
 CONFIG_GENLOCK_MISCDEVICE=y
 CONFIG_SYNC=y
 CONFIG_SW_SYNC=y
+CONFIG_CMA=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_HAPTIC_ISA1200=y
diff --git a/arch/arm/mach-msm/acpuclock-8226.c b/arch/arm/mach-msm/acpuclock-8226.c
index 8ba1b39..fcbd74d 100644
--- a/arch/arm/mach-msm/acpuclock-8226.c
+++ b/arch/arm/mach-msm/acpuclock-8226.c
@@ -98,13 +98,13 @@
 
 	drv_data.apcs_rcg_config = drv_data.apcs_rcg_cmd + 4;
 
-	drv_data.vdd_cpu = regulator_get(&pdev->dev, "a7_cpu");
+	drv_data.vdd_cpu = devm_regulator_get(&pdev->dev, "a7_cpu");
 	if (IS_ERR(drv_data.vdd_cpu)) {
 		dev_err(&pdev->dev, "regulator for %s get failed\n", "a7_cpu");
 		return PTR_ERR(drv_data.vdd_cpu);
 	}
 
-	drv_data.vdd_mem = regulator_get(&pdev->dev, "a7_mem");
+	drv_data.vdd_mem = devm_regulator_get(&pdev->dev, "a7_mem");
 	if (IS_ERR(drv_data.vdd_mem)) {
 		dev_err(&pdev->dev, "regulator for %s get failed\n", "a7_mem");
 		return PTR_ERR(drv_data.vdd_mem);
diff --git a/arch/arm/mach-msm/acpuclock-9625.c b/arch/arm/mach-msm/acpuclock-9625.c
index b439088..34952fb 100644
--- a/arch/arm/mach-msm/acpuclock-9625.c
+++ b/arch/arm/mach-msm/acpuclock-9625.c
@@ -105,13 +105,13 @@
 	if (!drv_data.apcs_cpu_pwr_ctl)
 		return -ENOMEM;
 
-	drv_data.vdd_cpu = regulator_get(&pdev->dev, "a5_cpu");
+	drv_data.vdd_cpu = devm_regulator_get(&pdev->dev, "a5_cpu");
 	if (IS_ERR(drv_data.vdd_cpu)) {
 		dev_err(&pdev->dev, "regulator for %s get failed\n", "a5_cpu");
 		return PTR_ERR(drv_data.vdd_cpu);
 	}
 
-	drv_data.vdd_mem = regulator_get(&pdev->dev, "a5_mem");
+	drv_data.vdd_mem = devm_regulator_get(&pdev->dev, "a5_mem");
 	if (IS_ERR(drv_data.vdd_mem)) {
 		dev_err(&pdev->dev, "regulator for %s get failed\n", "a5_mem");
 		return PTR_ERR(drv_data.vdd_mem);
diff --git a/arch/arm/mach-msm/acpuclock-cortex.c b/arch/arm/mach-msm/acpuclock-cortex.c
index 9104f98..88bf919 100644
--- a/arch/arm/mach-msm/acpuclock-cortex.c
+++ b/arch/arm/mach-msm/acpuclock-cortex.c
@@ -40,7 +40,7 @@
 #define POLL_INTERVAL_US		1
 #define APCS_RCG_UPDATE_TIMEOUT_US	20
 
-static struct acpuclk_drv_data *acpuclk_init_data;
+static struct acpuclk_drv_data *priv;
 static uint32_t bus_perf_client;
 
 /* Update the bus bandwidth request. */
@@ -48,7 +48,7 @@
 {
 	int ret;
 
-	if (bw >= acpuclk_init_data->bus_scale->num_usecases) {
+	if (bw >= priv->bus_scale->num_usecases) {
 		pr_err("invalid bandwidth request (%d)\n", bw);
 		return;
 	}
@@ -67,15 +67,13 @@
 	int rc = 0;
 
 	/* Increase vdd_mem before vdd_cpu. vdd_mem should be >= vdd_cpu. */
-	rc = regulator_set_voltage(acpuclk_init_data->vdd_mem, vdd_mem,
-		acpuclk_init_data->vdd_max_mem);
+	rc = regulator_set_voltage(priv->vdd_mem, vdd_mem, priv->vdd_max_mem);
 	if (rc) {
 		pr_err("vdd_mem increase failed (%d)\n", rc);
 		return rc;
 	}
 
-	rc = regulator_set_voltage(acpuclk_init_data->vdd_cpu, vdd_cpu,
-		acpuclk_init_data->vdd_max_cpu);
+	rc = regulator_set_voltage(priv->vdd_cpu, vdd_cpu, priv->vdd_max_cpu);
 	if (rc)
 		pr_err("vdd_cpu increase failed (%d)\n", rc);
 
@@ -88,16 +86,14 @@
 	int ret;
 
 	/* Update CPU voltage. */
-	ret = regulator_set_voltage(acpuclk_init_data->vdd_cpu, vdd_cpu,
-		acpuclk_init_data->vdd_max_cpu);
+	ret = regulator_set_voltage(priv->vdd_cpu, vdd_cpu, priv->vdd_max_cpu);
 	if (ret) {
 		pr_err("vdd_cpu decrease failed (%d)\n", ret);
 		return;
 	}
 
 	/* Decrease vdd_mem after vdd_cpu. vdd_mem should be >= vdd_cpu. */
-	ret = regulator_set_voltage(acpuclk_init_data->vdd_mem, vdd_mem,
-		acpuclk_init_data->vdd_max_mem);
+	ret = regulator_set_voltage(priv->vdd_mem, vdd_mem, priv->vdd_max_mem);
 	if (ret)
 		pr_err("vdd_mem decrease failed (%d)\n", ret);
 }
@@ -142,14 +138,14 @@
 {
 	int rc = 0;
 	unsigned int tgt_freq_hz = tgt_s->khz * 1000;
-	struct clkctl_acpu_speed *strt_s = acpuclk_init_data->current_speed;
-	struct clkctl_acpu_speed *cxo_s = &acpuclk_init_data->freq_tbl[0];
-	struct clk *strt = acpuclk_init_data->src_clocks[strt_s->src].clk;
-	struct clk *tgt = acpuclk_init_data->src_clocks[tgt_s->src].clk;
+	struct clkctl_acpu_speed *strt_s = priv->current_speed;
+	struct clkctl_acpu_speed *cxo_s = &priv->freq_tbl[0];
+	struct clk *strt = priv->src_clocks[strt_s->src].clk;
+	struct clk *tgt = priv->src_clocks[tgt_s->src].clk;
 
 	if (strt_s->src == ACPUPLL && tgt_s->src == ACPUPLL) {
 		/* Switch to another always on src */
-		select_clk_source_div(acpuclk_init_data, cxo_s);
+		select_clk_source_div(priv, cxo_s);
 
 		/* Re-program acpu pll */
 		if (atomic)
@@ -167,7 +163,7 @@
 			BUG_ON(clk_prepare_enable(tgt));
 
 		/* Switch back to acpu pll */
-		select_clk_source_div(acpuclk_init_data, tgt_s);
+		select_clk_source_div(priv, tgt_s);
 
 	} else if (strt_s->src != ACPUPLL && tgt_s->src == ACPUPLL) {
 		rc = clk_set_rate(tgt, tgt_freq_hz);
@@ -177,16 +173,16 @@
 		}
 
 		if (atomic)
-			clk_enable(tgt);
+			rc = clk_enable(tgt);
 		else
-			clk_prepare_enable(tgt);
+			rc = clk_prepare_enable(tgt);
 
 		if (rc) {
 			pr_err("ACPU PLL enable failed\n");
 			return rc;
 		}
 
-		select_clk_source_div(acpuclk_init_data, tgt_s);
+		select_clk_source_div(priv, tgt_s);
 
 		if (atomic)
 			clk_disable(strt);
@@ -195,17 +191,17 @@
 
 	} else {
 		if (atomic)
-			clk_enable(tgt);
+			rc = clk_enable(tgt);
 		else
-			clk_prepare_enable(tgt);
+			rc = clk_prepare_enable(tgt);
 
 		if (rc) {
 			pr_err("%s enable failed\n",
-				acpuclk_init_data->src_clocks[tgt_s->src].name);
+				priv->src_clocks[tgt_s->src].name);
 			return rc;
 		}
 
-		select_clk_source_div(acpuclk_init_data, tgt_s);
+		select_clk_source_div(priv, tgt_s);
 
 		if (atomic)
 			clk_disable(strt);
@@ -224,16 +220,16 @@
 	int rc = 0;
 
 	if (reason == SETRATE_CPUFREQ)
-		mutex_lock(&acpuclk_init_data->lock);
+		mutex_lock(&priv->lock);
 
-	strt_s = acpuclk_init_data->current_speed;
+	strt_s = priv->current_speed;
 
 	/* Return early if rate didn't change */
 	if (rate == strt_s->khz)
 		goto out;
 
 	/* Find target frequency */
-	for (tgt_s = acpuclk_init_data->freq_tbl; tgt_s->khz != 0; tgt_s++)
+	for (tgt_s = priv->freq_tbl; tgt_s->khz != 0; tgt_s++)
 		if (tgt_s->khz == rate)
 			break;
 	if (tgt_s->khz == 0) {
@@ -261,7 +257,7 @@
 	if (rc)
 		goto out;
 
-	acpuclk_init_data->current_speed = tgt_s;
+	priv->current_speed = tgt_s;
 	pr_debug("CPU speed change complete\n");
 
 	/* Nothing else to do for SWFI or power-collapse. */
@@ -277,13 +273,13 @@
 
 out:
 	if (reason == SETRATE_CPUFREQ)
-		mutex_unlock(&acpuclk_init_data->lock);
+		mutex_unlock(&priv->lock);
 	return rc;
 }
 
 static unsigned long acpuclk_cortex_get_rate(int cpu)
 {
-	return acpuclk_init_data->current_speed->khz;
+	return priv->current_speed->khz;
 }
 
 #ifdef CONFIG_CPU_FREQ_MSM
@@ -293,18 +289,17 @@
 {
 	int i, freq_cnt = 0;
 
-	/* Construct the freq_table tables from acpuclk_init_data->freq_tbl. */
-	for (i = 0; acpuclk_init_data->freq_tbl[i].khz != 0
+	/* Construct the freq_table tables from priv->freq_tbl. */
+	for (i = 0; priv->freq_tbl[i].khz != 0
 			&& freq_cnt < ARRAY_SIZE(freq_table); i++) {
-		if (!acpuclk_init_data->freq_tbl[i].use_for_scaling)
+		if (!priv->freq_tbl[i].use_for_scaling)
 			continue;
 		freq_table[freq_cnt].index = freq_cnt;
-		freq_table[freq_cnt].frequency =
-			acpuclk_init_data->freq_tbl[i].khz;
+		freq_table[freq_cnt].frequency = priv->freq_tbl[i].khz;
 		freq_cnt++;
 	}
 	/* freq_table not big enough to store all usable freqs. */
-	BUG_ON(acpuclk_init_data->freq_tbl[i].khz != 0);
+	BUG_ON(priv->freq_tbl[i].khz != 0);
 
 	freq_table[freq_cnt].index = freq_cnt;
 	freq_table[freq_cnt].frequency = CPUFREQ_TABLE_END;
@@ -332,43 +327,40 @@
 	unsigned long max_cpu_khz = 0;
 	int i, rc;
 
-	acpuclk_init_data = data;
-	mutex_init(&acpuclk_init_data->lock);
+	priv = data;
+	mutex_init(&priv->lock);
 
-	bus_perf_client = msm_bus_scale_register_client(
-		acpuclk_init_data->bus_scale);
+	bus_perf_client = msm_bus_scale_register_client(priv->bus_scale);
 	if (!bus_perf_client) {
 		pr_err("Unable to register bus client\n");
 		BUG();
 	}
 
 	for (i = 0; i < NUM_SRC; i++) {
-		if (!acpuclk_init_data->src_clocks[i].name)
+		if (!priv->src_clocks[i].name)
 			continue;
-		acpuclk_init_data->src_clocks[i].clk =
-			clk_get(&pdev->dev,
-				acpuclk_init_data->src_clocks[i].name);
-		BUG_ON(IS_ERR(acpuclk_init_data->src_clocks[i].clk));
+		priv->src_clocks[i].clk =
+			devm_clk_get(&pdev->dev, priv->src_clocks[i].name);
+		BUG_ON(IS_ERR(priv->src_clocks[i].clk));
 	}
 
 	/* Improve boot time by ramping up CPU immediately */
-	for (i = 0; acpuclk_init_data->freq_tbl[i].khz != 0; i++)
-		if (acpuclk_init_data->freq_tbl[i].use_for_scaling)
-			max_cpu_khz = acpuclk_init_data->freq_tbl[i].khz;
+	for (i = 0; priv->freq_tbl[i].khz != 0; i++)
+		if (priv->freq_tbl[i].use_for_scaling)
+			max_cpu_khz = priv->freq_tbl[i].khz;
 
 	/* Initialize regulators */
-	rc = increase_vdd(acpuclk_init_data->vdd_max_cpu,
-		acpuclk_init_data->vdd_max_mem);
+	rc = increase_vdd(priv->vdd_max_cpu, priv->vdd_max_mem);
 	if (rc)
 		goto err_vdd;
 
-	rc = regulator_enable(acpuclk_init_data->vdd_mem);
+	rc = regulator_enable(priv->vdd_mem);
 	if (rc) {
 		dev_err(&pdev->dev, "regulator_enable for mem failed\n");
 		goto err_vdd;
 	}
 
-	rc = regulator_enable(acpuclk_init_data->vdd_cpu);
+	rc = regulator_enable(priv->vdd_cpu);
 	if (rc) {
 		dev_err(&pdev->dev, "regulator_enable for cpu failed\n");
 		goto err_vdd_cpu;
@@ -388,15 +380,7 @@
 	return 0;
 
 err_vdd_cpu:
-	regulator_disable(acpuclk_init_data->vdd_mem);
+	regulator_disable(priv->vdd_mem);
 err_vdd:
-	regulator_put(acpuclk_init_data->vdd_mem);
-	regulator_put(acpuclk_init_data->vdd_cpu);
-
-	for (i = 0; i < NUM_SRC; i++) {
-		if (!acpuclk_init_data->src_clocks[i].name)
-			continue;
-		clk_put(acpuclk_init_data->src_clocks[i].clk);
-	}
 	return rc;
 }
diff --git a/arch/arm/mach-msm/acpuclock-krait-debug.c b/arch/arm/mach-msm/acpuclock-krait-debug.c
index a29735e..f11b9fc 100644
--- a/arch/arm/mach-msm/acpuclock-krait-debug.c
+++ b/arch/arm/mach-msm/acpuclock-krait-debug.c
@@ -249,6 +249,56 @@
 }
 DEFINE_SIMPLE_ATTRIBUTE(boost_fops, boost_get, NULL, "%lld\n");
 
+static int acpu_table_show(struct seq_file *m, void *unused)
+{
+	const struct acpu_level *level;
+
+	seq_printf(m, "CPU_KHz  PLL_L_Val   L2_KHz  VDD_Dig  VDD_Mem  ");
+	seq_printf(m, "BW_Mbps  VDD_Core  UA_Core  AVS\n");
+
+	for (level = drv->acpu_freq_tbl; level->speed.khz != 0; level++) {
+
+		const struct l2_level *l2 =
+					&drv->l2_freq_tbl[level->l2_level];
+		u32 bw = drv->bus_scale->usecase[l2->bw_level].vectors[0].ib;
+
+		if (!level->use_for_scaling)
+			continue;
+
+		/* CPU speed information */
+		seq_printf(m, "%7lu  %9u  ",
+				level->speed.khz,
+				level->speed.pll_l_val);
+
+		/* L2 level information */
+		seq_printf(m, "%7lu  %7d  %7d  %7u  ",
+				l2->speed.khz,
+				l2->vdd_dig,
+				l2->vdd_mem,
+				bw / 1000000);
+
+		/* Core voltage information */
+		seq_printf(m, "%8d  %7d  %3d\n",
+				level->vdd_core,
+				level->ua_core,
+				level->avsdscr_setting);
+	}
+
+	return 0;
+}
+
+static int acpu_table_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, acpu_table_show, inode->i_private);
+}
+
+static const struct file_operations acpu_table_fops = {
+	.open		= acpu_table_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
 static void __cpuinit add_scalable_dir(int sc_id)
 {
 	char sc_name[8];
@@ -326,6 +376,8 @@
 							&speed_bin_fops);
 	debugfs_create_file("pvs_bin", S_IRUGO, base_dir, NULL, &pvs_bin_fops);
 	debugfs_create_file("boost_uv", S_IRUGO, base_dir, NULL, &boost_fops);
+	debugfs_create_file("acpu_table", S_IRUGO, base_dir, NULL,
+				&acpu_table_fops);
 
 	for_each_online_cpu(cpu)
 		add_scalable_dir(cpu);
diff --git a/arch/arm/mach-msm/board-8226-gpiomux.c b/arch/arm/mach-msm/board-8226-gpiomux.c
index e8e75df..2b70e7c 100644
--- a/arch/arm/mach-msm/board-8226-gpiomux.c
+++ b/arch/arm/mach-msm/board-8226-gpiomux.c
@@ -90,6 +90,18 @@
 	.pull = GPIOMUX_PULL_DOWN,
 };
 
+static struct gpiomux_setting wcnss_5wire_suspend_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv  = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting wcnss_5wire_active_cfg = {
+	.func = GPIOMUX_FUNC_1,
+	.drv  = GPIOMUX_DRV_6MA,
+	.pull = GPIOMUX_PULL_DOWN,
+};
+
 static struct gpiomux_setting gpio_i2c_config = {
 	.func = GPIOMUX_FUNC_3,
 	.drv = GPIOMUX_DRV_2MA,
@@ -239,6 +251,43 @@
 	},
 };
 
+static struct msm_gpiomux_config wcnss_5wire_interface[] = {
+	{
+		.gpio = 40,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5wire_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg,
+		},
+	},
+	{
+		.gpio = 41,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5wire_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg,
+		},
+	},
+	{
+		.gpio = 42,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5wire_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg,
+		},
+	},
+	{
+		.gpio = 43,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5wire_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg,
+		},
+	},
+	{
+		.gpio = 44,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5wire_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg,
+		},
+	},
+};
 void __init msm8226_init_gpiomux(void)
 {
 	int rc;
@@ -256,6 +305,8 @@
 			ARRAY_SIZE(msm_keypad_configs));
 
 	msm_gpiomux_install(msm_blsp_configs, ARRAY_SIZE(msm_blsp_configs));
+	msm_gpiomux_install(wcnss_5wire_interface,
+				ARRAY_SIZE(wcnss_5wire_interface));
 
 	msm_gpiomux_install(&sd_card_det, 1);
 	msm_gpiomux_install(msm_synaptics_configs,
diff --git a/arch/arm/mach-msm/board-8610.c b/arch/arm/mach-msm/board-8610.c
index 2723e20..5f5366f 100644
--- a/arch/arm/mach-msm/board-8610.c
+++ b/arch/arm/mach-msm/board-8610.c
@@ -49,6 +49,7 @@
 #include "platsmp.h"
 #include "spm.h"
 #include "lpm_resources.h"
+#include "modem_notifier.h"
 
 static struct memtype_reserve msm8610_reserve_table[] __initdata = {
 	[MEMTYPE_SMI] = {
@@ -94,6 +95,8 @@
 
 void __init msm8610_add_drivers(void)
 {
+	msm_init_modem_notifier_list();
+	msm_smd_init();
 	msm_rpm_driver_init();
 	msm_lpmrs_module_init();
 	msm_spm_device_init();
diff --git a/arch/arm/mach-msm/board-9625-gpiomux.c b/arch/arm/mach-msm/board-9625-gpiomux.c
index 1b76441..75aaaec 100644
--- a/arch/arm/mach-msm/board-9625-gpiomux.c
+++ b/arch/arm/mach-msm/board-9625-gpiomux.c
@@ -276,6 +276,57 @@
 	},
 };
 
+static struct gpiomux_setting qpic_lcdc_a_d = {
+	.func = GPIOMUX_FUNC_1,
+	.drv = GPIOMUX_DRV_10MA,
+	.pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting qpic_lcdc_cs = {
+	.func = GPIOMUX_FUNC_1,
+	.drv = GPIOMUX_DRV_10MA,
+	.pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting qpic_lcdc_rs = {
+	.func = GPIOMUX_FUNC_1,
+	.drv = GPIOMUX_DRV_10MA,
+	.pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting qpic_lcdc_te = {
+	.func = GPIOMUX_FUNC_7,
+	.drv = GPIOMUX_DRV_10MA,
+	.pull = GPIOMUX_PULL_NONE,
+};
+
+static struct msm_gpiomux_config msm9625_qpic_lcdc_configs[] __initdata = {
+	{
+		.gpio      = 20,	/* a_d */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &qpic_lcdc_a_d,
+		},
+	},
+	{
+		.gpio      = 21,	/* cs */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &qpic_lcdc_cs,
+		},
+	},
+	{
+		.gpio      = 22,	/* te */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &qpic_lcdc_te,
+		},
+	},
+	{
+		.gpio      = 23,	/* rs */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &qpic_lcdc_rs,
+		},
+	},
+};
+
 void __init msm9625_init_gpiomux(void)
 {
 	int rc;
@@ -296,4 +347,7 @@
 			ARRAY_SIZE(mdm9625_cdc_reset_config));
 	msm_gpiomux_install(sdc2_card_det_config,
 		ARRAY_SIZE(sdc2_card_det_config));
+	msm_gpiomux_install(msm9625_qpic_lcdc_configs,
+			ARRAY_SIZE(msm9625_qpic_lcdc_configs));
+
 }
diff --git a/arch/arm/mach-msm/clock-8226.c b/arch/arm/mach-msm/clock-8226.c
index c823c39..8e2b7c9 100644
--- a/arch/arm/mach-msm/clock-8226.c
+++ b/arch/arm/mach-msm/clock-8226.c
@@ -1919,6 +1919,8 @@
 };
 
 static struct clk_freq_tbl ftbl_camss_mclk0_1_clk[] = {
+	F_MMSS(  19200000,         xo,   1,    0,    0),
+	F_MMSS(  24000000,      gpll0,   5,    1,    5),
 	F_MMSS(  66670000,      gpll0,   9,    0,    0),
 	F_END
 };
@@ -3062,7 +3064,8 @@
 	CLK_LOOKUP("a7sspll", a7sspll.c, "f9011050.qcom,acpuclk"),
 
 	/* WCNSS CLOCKS */
-	CLK_LOOKUP("xo", xo.c, "fb000000.qcom,wcnss-wlan"),
+	CLK_LOOKUP("xo", xo.c,         "fb000000.qcom,wcnss-wlan"),
+	CLK_LOOKUP("rf_clk", cxo_a2.c, "fb000000.qcom,wcnss-wlan"),
 
 	/* BUS DRIVER */
 	CLK_LOOKUP("bus_clk", cnoc_msmbus_clk.c, "msm_config_noc"),
@@ -3179,6 +3182,11 @@
 	CLK_LOOKUP("bus_clk",      gcc_ce1_axi_clk.c,     "qseecom"),
 	CLK_LOOKUP("core_clk_src", ce1_clk_src.c,         "qseecom"),
 
+	CLK_LOOKUP("core_clk",     gcc_ce1_clk.c,         "scm"),
+	CLK_LOOKUP("iface_clk",    gcc_ce1_ahb_clk.c,     "scm"),
+	CLK_LOOKUP("bus_clk",      gcc_ce1_axi_clk.c,     "scm"),
+	CLK_LOOKUP("core_clk_src", ce1_clk_src.c,         "scm"),
+
 	/* SDCC */
 	CLK_LOOKUP("iface_clk", gcc_sdcc1_ahb_clk.c, "f9824000.qcom,sdcc"),
 	CLK_LOOKUP("core_clk", gcc_sdcc1_apps_clk.c, "f9824000.qcom,sdcc"),
@@ -3432,6 +3440,12 @@
 
 static void __init msm8226_clock_post_init(void)
 {
+	/*
+	 * Hold an active set vote for CXO; this is because CXO is expected
+	 * to remain on whenever CPUs aren't power collapsed.
+	 */
+	clk_prepare_enable(&xo_a_clk.c);
+
 	/* Set rates for single-rate clocks. */
 	clk_set_rate(&usb_hs_system_clk_src.c,
 			usb_hs_system_clk_src.freq_tbl[0].freq_hz);
@@ -3517,11 +3531,8 @@
 	 */
 	clk_set_rate(&mmssnoc_ahb_a_clk.c, 40000000);
 
-	/*
-	 * Hold an active set vote for CXO; this is because CXO is expected
-	 * to remain on whenever CPUs aren't power collapsed.
-	 */
-	clk_prepare_enable(&xo_a_clk.c);
+	/* Set an initial rate (fmax at nominal) on the MMSSNOC AXI clock */
+	clk_set_rate(&axi_clk_src.c, 200000000);
 
 	enable_rpm_scaling();
 
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index 0657c21..c5594e2 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -4815,6 +4815,11 @@
 	CLK_LOOKUP("bus_clk",      gcc_ce1_axi_clk.c,     "qseecom"),
 	CLK_LOOKUP("core_clk_src", ce1_clk_src.c,         "qseecom"),
 
+	CLK_LOOKUP("core_clk",     gcc_ce1_clk.c,         "scm"),
+	CLK_LOOKUP("iface_clk",    gcc_ce1_ahb_clk.c,     "scm"),
+	CLK_LOOKUP("bus_clk",      gcc_ce1_axi_clk.c,     "scm"),
+	CLK_LOOKUP("core_clk_src", ce1_clk_src.c,         "scm"),
+
 	CLK_LOOKUP("core_clk", gcc_gp1_clk.c, ""),
 	CLK_LOOKUP("core_clk", gcc_gp2_clk.c, ""),
 	CLK_LOOKUP("core_clk", gcc_gp3_clk.c, ""),
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index fff7fa4..56c4afd 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -93,9 +93,9 @@
 #endif
 
 #define MAX_HOLE_ADDRESS    (PHYS_OFFSET + 0x10000000)
-extern unsigned long memory_hole_offset;
-extern unsigned long memory_hole_start;
-extern unsigned long memory_hole_end;
+extern phys_addr_t memory_hole_offset;
+extern phys_addr_t memory_hole_start;
+extern phys_addr_t memory_hole_end;
 extern unsigned long memory_hole_align;
 extern unsigned long virtual_hole_start;
 extern unsigned long virtual_hole_end;
@@ -107,11 +107,13 @@
 				memory_hole_align)
 
 #define __phys_to_virt(phys)				\
+	(unsigned long)\
 	((MEM_HOLE_END_PHYS_OFFSET && ((phys) >= MEM_HOLE_END_PHYS_OFFSET)) ? \
 	(phys) - MEM_HOLE_END_PHYS_OFFSET + MEM_HOLE_PAGE_OFFSET :	\
 	(phys) - PHYS_OFFSET + PAGE_OFFSET)
 
 #define __virt_to_phys(virt)				\
+	(unsigned long)\
 	((MEM_HOLE_END_PHYS_OFFSET && ((virt) >= MEM_HOLE_PAGE_OFFSET)) ? \
 	(virt) - MEM_HOLE_PAGE_OFFSET + MEM_HOLE_END_PHYS_OFFSET :	\
 	(virt) - PAGE_OFFSET + PHYS_OFFSET)
diff --git a/arch/arm/mach-msm/include/mach/scm.h b/arch/arm/mach-msm/include/mach/scm.h
index 8a06fe3..0cc7bbf 100644
--- a/arch/arm/mach-msm/include/mach/scm.h
+++ b/arch/arm/mach-msm/include/mach/scm.h
@@ -21,7 +21,7 @@
 #define SCM_SVC_SSD			0x7
 #define SCM_SVC_FUSE			0x8
 #define SCM_SVC_PWR			0x9
-#define SCM_SVC_CP			0xC
+#define SCM_SVC_MP			0xC
 #define SCM_SVC_DCVS			0xD
 #define SCM_SVC_TZSCHEDULER		0xFC
 
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_arb.c b/arch/arm/mach-msm/msm_bus/msm_bus_arb.c
index 4336945..4adfe4d 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_arb.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_arb.c
@@ -562,7 +562,7 @@
 	int pnode, src, curr, ctx;
 	uint64_t req_clk, req_bw, curr_clk, curr_bw;
 	struct msm_bus_client *client = (struct msm_bus_client *)cl;
-	if (IS_ERR(client)) {
+	if (IS_ERR_OR_NULL(client)) {
 		MSM_BUS_ERR("msm_bus_scale_client update req error %d\n",
 				(uint32_t)client);
 		return -ENXIO;
diff --git a/arch/arm/mach-msm/pil-q6v5-lpass.c b/arch/arm/mach-msm/pil-q6v5-lpass.c
index dfbda74..ef13c34 100644
--- a/arch/arm/mach-msm/pil-q6v5-lpass.c
+++ b/arch/arm/mach-msm/pil-q6v5-lpass.c
@@ -114,6 +114,8 @@
 	pil_q6v5_shutdown(pil);
 	pil_lpass_disable_clks(drv);
 
+	writel_relaxed(1, drv->restart_reg);
+
 	drv->is_booted = false;
 
 	return 0;
@@ -125,6 +127,11 @@
 	unsigned long start_addr = pil_get_entry_addr(pil);
 	int ret;
 
+	/* Deassert reset to subsystem and wait for propagation */
+	writel_relaxed(0, drv->restart_reg);
+	mb();
+	udelay(2);
+
 	ret = pil_lpass_enable_clks(drv);
 	if (ret)
 		return ret;
@@ -385,6 +392,7 @@
 	struct lpass_data *drv;
 	struct q6v5_data *q6;
 	struct pil_desc *desc;
+	struct resource *res;
 	int ret;
 
 	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
@@ -405,6 +413,11 @@
 	desc->owner = THIS_MODULE;
 	desc->proxy_timeout = PROXY_TIMEOUT_MS;
 
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "restart_reg");
+	q6->restart_reg = devm_request_and_ioremap(&pdev->dev, res);
+	if (!q6->restart_reg)
+		return -ENOMEM;
+
 	q6->core_clk = devm_clk_get(&pdev->dev, "core_clk");
 	if (IS_ERR(q6->core_clk))
 		return PTR_ERR(q6->core_clk);
diff --git a/arch/arm/mach-msm/scm-pas.c b/arch/arm/mach-msm/scm-pas.c
index f73055e..b7271bb 100644
--- a/arch/arm/mach-msm/scm-pas.c
+++ b/arch/arm/mach-msm/scm-pas.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,23 @@
 #define PAS_SHUTDOWN_CMD	6
 #define PAS_IS_SUPPORTED_CMD	7
 
+enum scm_clock_ids {
+	BUS_CLK = 0,
+	CORE_CLK,
+	IFACE_CLK,
+	CORE_CLK_SRC,
+	NUM_CLKS
+};
+
+static const char * const scm_clock_names[NUM_CLKS] = {
+	[BUS_CLK]      = "bus_clk",
+	[CORE_CLK]     = "core_clk",
+	[IFACE_CLK]    = "iface_clk",
+	[CORE_CLK_SRC] = "core_clk_src",
+};
+
+static struct clk *scm_clocks[NUM_CLKS];
+
 int pas_init_image(enum pas_id id, const u8 *metadata, size_t size)
 {
 	int ret;
@@ -108,14 +125,13 @@
 };
 
 static uint32_t scm_perf_client;
-static struct clk *scm_bus_clk;
 
 static DEFINE_MUTEX(scm_pas_bw_mutex);
 static int scm_pas_bw_count;
 
 static int scm_pas_enable_bw(void)
 {
-	int ret = 0;
+	int ret = 0, i;
 
 	if (!scm_perf_client)
 		return -EINVAL;
@@ -123,30 +139,40 @@
 	mutex_lock(&scm_pas_bw_mutex);
 	if (!scm_pas_bw_count) {
 		ret = msm_bus_scale_client_update_request(scm_perf_client, 1);
-		if (ret) {
-			pr_err("bandwidth request failed (%d)\n", ret);
-		} else if (scm_bus_clk) {
-			ret = clk_prepare_enable(scm_bus_clk);
-			if (ret)
-				pr_err("clock enable failed\n");
-		}
-	}
-	if (ret)
-		msm_bus_scale_client_update_request(scm_perf_client, 0);
-	else
+		if (ret)
+			goto err_bus;
 		scm_pas_bw_count++;
+	}
+	for (i = 0; i < NUM_CLKS; i++)
+		if (clk_prepare_enable(scm_clocks[i]))
+			goto err_clk;
+
+	mutex_unlock(&scm_pas_bw_mutex);
+	return ret;
+
+err_clk:
+	pr_err("clk prepare_enable failed (%s)\n", scm_clock_names[i]);
+	for (i--; i >= 0; i--)
+		clk_disable_unprepare(scm_clocks[i]);
+
+err_bus:
+	pr_err("bandwidth request failed (%d)\n", ret);
+	msm_bus_scale_client_update_request(scm_perf_client, 0);
+
 	mutex_unlock(&scm_pas_bw_mutex);
 	return ret;
 }
 
 static void scm_pas_disable_bw(void)
 {
+	int i;
 	mutex_lock(&scm_pas_bw_mutex);
 	if (scm_pas_bw_count-- == 1) {
 		msm_bus_scale_client_update_request(scm_perf_client, 0);
-		if (scm_bus_clk)
-			clk_disable_unprepare(scm_bus_clk);
 	}
+	for (i = NUM_CLKS - 1; i >= 0; i--)
+		clk_disable_unprepare(scm_clocks[i]);
+
 	mutex_unlock(&scm_pas_bw_mutex);
 }
 
@@ -214,17 +240,25 @@
 
 static int __init scm_pas_init(void)
 {
-	if (cpu_is_msm8974()) {
+	int i, rate;
+	for (i = 0; i < NUM_CLKS; i++) {
+		scm_clocks[i] = clk_get_sys("scm", scm_clock_names[i]);
+		if (IS_ERR(scm_clocks[i]))
+			scm_clocks[i] = NULL;
+	}
+
+	/* Fail silently if this clock is not supported */
+	rate = clk_round_rate(scm_clocks[CORE_CLK_SRC], 1);
+	clk_set_rate(scm_clocks[CORE_CLK_SRC], rate);
+
+	if (cpu_is_msm8974() || cpu_is_msm8226()) {
 		scm_pas_bw_tbl[0].vectors[0].src = MSM_BUS_MASTER_CRYPTO_CORE0;
 		scm_pas_bw_tbl[1].vectors[0].src = MSM_BUS_MASTER_CRYPTO_CORE0;
 	} else {
-		scm_bus_clk = clk_get_sys("scm", "bus_clk");
-		if (!IS_ERR(scm_bus_clk)) {
-			clk_set_rate(scm_bus_clk, 64000000);
-		} else {
-			scm_bus_clk = NULL;
+		if (!IS_ERR(scm_clocks[BUS_CLK]))
+			clk_set_rate(scm_clocks[BUS_CLK], 64000000);
+		else
 			pr_warn("unable to get bus clock\n");
-		}
 	}
 
 	scm_perf_client = msm_bus_scale_register_client(&scm_pas_bus_pdata);
diff --git a/arch/arm/mach-msm/smp2p.c b/arch/arm/mach-msm/smp2p.c
index 8066005..7bdcce9 100644
--- a/arch/arm/mach-msm/smp2p.c
+++ b/arch/arm/mach-msm/smp2p.c
@@ -1599,8 +1599,8 @@
  */
 static int __devinit msm_smp2p_probe(struct platform_device *pdev)
 {
-	struct resource *irq_out_base;
-	struct resource *irq_offset;
+	struct resource *r;
+	void *irq_out_ptr;
 	char *key;
 	uint32_t edge;
 	int ret;
@@ -1617,15 +1617,18 @@
 		goto fail;
 	}
 
-	key = "irq-reg-base";
-	irq_out_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
-	if (!irq_out_base)
-		goto missing_key;
-
-	key = "irq-reg-offset";
-	irq_offset = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
-	if (!irq_offset)
-		goto missing_key;
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r) {
+		SMP2P_ERR("%s: failed gathering irq-reg resource for edge %d\n"
+				, __func__, edge);
+		goto fail;
+	}
+	irq_out_ptr = ioremap_nocache(r->start, resource_size(r));
+	if (!irq_out_ptr) {
+		SMP2P_ERR("%s: failed remap from phys to virt for edge %d\n",
+				__func__, edge);
+		return -ENOMEM;
+	}
 
 	key = "qcom,irq-bitmask";
 	ret = of_property_read_u32(node, key, &irq_bitmask);
@@ -1656,9 +1659,7 @@
 	 */
 	smp2p_int_cfgs[edge].in_int_id = irq_line;
 	smp2p_int_cfgs[edge].out_int_mask = irq_bitmask;
-	smp2p_int_cfgs[edge].out_int_ptr =
-		(uint32_t *)((uint32_t)irq_out_base->start +
-				(uint32_t)irq_offset->start);
+	smp2p_int_cfgs[edge].out_int_ptr = irq_out_ptr;
 	smp2p_int_cfgs[edge].is_configured = true;
 	return 0;
 
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index efbd8c6..57c85e1 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -44,6 +44,7 @@
 	HW_PLATFORM_LIQUID  = 9,
 	/* Dragonboard platform id is assigned as 10 in CDT */
 	HW_PLATFORM_DRAGON	= 10,
+	HW_PLATFORM_QRD	= 11,
 	HW_PLATFORM_HRD	= 13,
 	HW_PLATFORM_DTV	= 14,
 	HW_PLATFORM_INVALID
@@ -59,6 +60,7 @@
 	[HW_PLATFORM_MTP] = "MTP",
 	[HW_PLATFORM_LIQUID] = "Liquid",
 	[HW_PLATFORM_DRAGON] = "Dragon",
+	[HW_PLATFORM_QRD] = "QRD",
 	[HW_PLATFORM_HRD] = "HRD",
 	[HW_PLATFORM_DTV] = "DTV",
 };
@@ -249,6 +251,7 @@
 	[105] = MSM_CPU_9615,
 	[106] = MSM_CPU_9615,
 	[107] = MSM_CPU_9615,
+	[171] = MSM_CPU_9615,
 
 	/* 8064 IDs */
 	[109] = MSM_CPU_8064,
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index f16f700..66567bb 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -375,11 +375,11 @@
 	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
 }
 
-unsigned long memory_hole_offset;
+phys_addr_t memory_hole_offset;
 EXPORT_SYMBOL(memory_hole_offset);
-unsigned long memory_hole_start;
+phys_addr_t memory_hole_start;
 EXPORT_SYMBOL(memory_hole_start);
-unsigned long memory_hole_end;
+phys_addr_t memory_hole_end;
 EXPORT_SYMBOL(memory_hole_end);
 unsigned long memory_hole_align;
 EXPORT_SYMBOL(memory_hole_align);
@@ -390,8 +390,8 @@
 void find_memory_hole(void)
 {
 	int i;
-	unsigned long hole_start;
-	unsigned long hole_size;
+	phys_addr_t hole_start;
+	phys_addr_t hole_size;
 	unsigned long hole_end_virt;
 
 	/*
@@ -428,13 +428,13 @@
 
 	memory_hole_offset = memory_hole_start - PHYS_OFFSET;
 	if (!IS_ALIGNED(memory_hole_start, SECTION_SIZE)) {
-		pr_err("memory_hole_start %lx is not aligned to %lx\n",
-			memory_hole_start, SECTION_SIZE);
+		pr_err("memory_hole_start %pa is not aligned to %lx\n",
+			&memory_hole_start, SECTION_SIZE);
 		BUG();
 	}
 	if (!IS_ALIGNED(memory_hole_end, SECTION_SIZE)) {
-		pr_err("memory_hole_end %lx is not aligned to %lx\n",
-			memory_hole_end, SECTION_SIZE);
+		pr_err("memory_hole_end %pa is not aligned to %lx\n",
+			&memory_hole_end, SECTION_SIZE);
 		BUG();
 	}
 
@@ -444,8 +444,9 @@
 	     IS_ALIGNED(memory_hole_end, PMD_SIZE)) ||
 	     (IS_ALIGNED(hole_end_virt, PMD_SIZE) &&
 	      !IS_ALIGNED(memory_hole_end, PMD_SIZE))) {
-		memory_hole_align = max(hole_end_virt & ~PMD_MASK,
-					memory_hole_end & ~PMD_MASK);
+		memory_hole_align = !IS_ALIGNED(hole_end_virt, PMD_SIZE) ?
+					hole_end_virt & ~PMD_MASK :
+					memory_hole_end & ~PMD_MASK;
 		virtual_hole_start = hole_end_virt;
 		virtual_hole_end = hole_end_virt + memory_hole_align;
 		pr_info("Physical memory hole is not aligned. There will be a virtual memory hole from %lx to %lx\n",
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 0e31910..266be05 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -625,39 +625,60 @@
 	} while (pte++, addr += PAGE_SIZE, addr != end);
 }
 
-static void __init alloc_init_section(pud_t *pud, unsigned long addr,
+static void __init map_init_section(pmd_t *pmd, unsigned long addr,
+			unsigned long end, phys_addr_t phys,
+			const struct mem_type *type)
+{
+#ifndef CONFIG_ARM_LPAE
+	/*
+	 * In classic MMU format, puds and pmds are folded in to
+	 * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
+	 * group of L1 entries making up one logical pointer to
+	 * an L2 table (2MB), where as PMDs refer to the individual
+	 * L1 entries (1MB). Hence increment to get the correct
+	 * offset for odd 1MB sections.
+	 * (See arch/arm/include/asm/pgtable-2level.h)
+	 */
+	if (addr & SECTION_SIZE)
+		pmd++;
+#endif
+	do {
+		*pmd = __pmd(phys | type->prot_sect);
+		phys += SECTION_SIZE;
+	} while (pmd++, addr += SECTION_SIZE, addr != end);
+
+	flush_pmd_entry(pmd);
+}
+
+static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
 				      unsigned long end, phys_addr_t phys,
 				      const struct mem_type *type)
 {
 	pmd_t *pmd = pmd_offset(pud, addr);
+	unsigned long next;
 
-	/*
-	 * Try a section mapping - end, addr and phys must all be aligned
-	 * to a section boundary.  Note that PMDs refer to the individual
-	 * L1 entries, whereas PGDs refer to a group of L1 entries making
-	 * up one logical pointer to an L2 table.
-	 */
-	if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
-		pmd_t *p = pmd;
-
-#ifndef CONFIG_ARM_LPAE
-		if (addr & SECTION_SIZE)
-			pmd++;
-#endif
-
-		do {
-			*pmd = __pmd(phys | type->prot_sect);
-			phys += SECTION_SIZE;
-		} while (pmd++, addr += SECTION_SIZE, addr != end);
-
-		flush_pmd_entry(p);
-	} else {
+	do {
 		/*
-		 * No need to loop; pte's aren't interested in the
-		 * individual L1 entries.
+		 * With LPAE, we must loop over to map
+		 * all the pmds for the given range.
 		 */
-		alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
-	}
+		next = pmd_addr_end(addr, end);
+
+		/*
+		 * Try a section mapping - addr, next and phys must all be
+		 * aligned to a section boundary.
+		 */
+		if (type->prot_sect &&
+				((addr | next | phys) & ~SECTION_MASK) == 0) {
+			map_init_section(pmd, addr, next, phys, type);
+		} else {
+			alloc_init_pte(pmd, addr, next,
+						__phys_to_pfn(phys), type);
+		}
+
+		phys += next - addr;
+
+	} while (pmd++, addr = next, addr != end);
 }
 
 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
@@ -668,7 +689,7 @@
 
 	do {
 		next = pud_addr_end(addr, end);
-		alloc_init_section(pud, addr, next, phys, type);
+		alloc_init_pmd(pud, addr, next, phys, type);
 		phys += next - addr;
 	} while (pud++, addr = next, addr != end);
 }
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index ac993ea..50b2831 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -22,6 +22,8 @@
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
 #include <linux/opp.h>
+#include <linux/of.h>
+#include <linux/export.h>
 
 /*
  * Internal data structure organization with the OPP layer library is as
@@ -64,6 +66,7 @@
 	unsigned long u_volt;
 
 	struct device_opp *dev_opp;
+	struct rcu_head head;
 };
 
 /**
@@ -159,6 +162,7 @@
 
 	return v;
 }
+EXPORT_SYMBOL(opp_get_voltage);
 
 /**
  * opp_get_freq() - Gets the frequency corresponding to an available opp
@@ -188,6 +192,7 @@
 
 	return f;
 }
+EXPORT_SYMBOL(opp_get_freq);
 
 /**
  * opp_get_opp_count() - Get number of opps available in the opp list
@@ -220,6 +225,7 @@
 
 	return count;
 }
+EXPORT_SYMBOL(opp_get_opp_count);
 
 /**
  * opp_find_freq_exact() - search for an exact frequency
@@ -229,7 +235,10 @@
  *
  * Searches for exact match in the opp list and returns pointer to the matching
  * opp if found, else returns ERR_PTR in case of error and should be handled
- * using IS_ERR.
+ * using IS_ERR. Error return values can be:
+ * EINVAL:	for bad pointer
+ * ERANGE:	no match found for search
+ * ENODEV:	if device not found in list of registered devices
  *
  * Note: available is a modifier for the search. if available=true, then the
  * match is for exact matching frequency and is available in the stored OPP
@@ -248,7 +257,7 @@
 				bool available)
 {
 	struct device_opp *dev_opp;
-	struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+	struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
 	dev_opp = find_device_opp(dev);
 	if (IS_ERR(dev_opp)) {
@@ -267,6 +276,7 @@
 
 	return opp;
 }
+EXPORT_SYMBOL(opp_find_freq_exact);
 
 /**
  * opp_find_freq_ceil() - Search for an rounded ceil freq
@@ -277,7 +287,11 @@
  * for a device.
  *
  * Returns matching *opp and refreshes *freq accordingly, else returns
- * ERR_PTR in case of error and should be handled using IS_ERR.
+ * ERR_PTR in case of error and should be handled using IS_ERR. Error return
+ * values can be:
+ * EINVAL:	for bad pointer
+ * ERANGE:	no match found for search
+ * ENODEV:	if device not found in list of registered devices
  *
  * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  * protected pointer. The reason for the same is that the opp pointer which is
@@ -288,7 +302,7 @@
 struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
 {
 	struct device_opp *dev_opp;
-	struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+	struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
 	if (!dev || !freq) {
 		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -297,7 +311,7 @@
 
 	dev_opp = find_device_opp(dev);
 	if (IS_ERR(dev_opp))
-		return opp;
+		return ERR_CAST(dev_opp);
 
 	list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
 		if (temp_opp->available && temp_opp->rate >= *freq) {
@@ -309,6 +323,7 @@
 
 	return opp;
 }
+EXPORT_SYMBOL(opp_find_freq_ceil);
 
 /**
  * opp_find_freq_floor() - Search for a rounded floor freq
@@ -319,7 +334,11 @@
  * for a device.
  *
  * Returns matching *opp and refreshes *freq accordingly, else returns
- * ERR_PTR in case of error and should be handled using IS_ERR.
+ * ERR_PTR in case of error and should be handled using IS_ERR. Error return
+ * values can be:
+ * EINVAL:	for bad pointer
+ * ERANGE:	no match found for search
+ * ENODEV:	if device not found in list of registered devices
  *
  * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  * protected pointer. The reason for the same is that the opp pointer which is
@@ -330,7 +349,7 @@
 struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
 {
 	struct device_opp *dev_opp;
-	struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+	struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
 	if (!dev || !freq) {
 		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -339,7 +358,7 @@
 
 	dev_opp = find_device_opp(dev);
 	if (IS_ERR(dev_opp))
-		return opp;
+		return ERR_CAST(dev_opp);
 
 	list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
 		if (temp_opp->available) {
@@ -355,6 +374,7 @@
 
 	return opp;
 }
+EXPORT_SYMBOL(opp_find_freq_floor);
 
 /**
  * opp_add()  - Add an OPP table from a table definitions
@@ -511,7 +531,7 @@
 
 	list_replace_rcu(&opp->node, &new_opp->node);
 	mutex_unlock(&dev_opp_list_lock);
-	synchronize_rcu();
+	kfree_rcu(opp, head);
 
 	/* Notify the change of the OPP availability */
 	if (availability_req)
@@ -521,13 +541,10 @@
 		srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
 					 new_opp);
 
-	/* clean up old opp */
-	new_opp = opp;
-	goto out;
+	return 0;
 
 unlock:
 	mutex_unlock(&dev_opp_list_lock);
-out:
 	kfree(new_opp);
 	return r;
 }
@@ -551,6 +568,7 @@
 {
 	return opp_set_availability(dev, freq, true);
 }
+EXPORT_SYMBOL(opp_enable);
 
 /**
  * opp_disable() - Disable a specific OPP
@@ -572,6 +590,7 @@
 {
 	return opp_set_availability(dev, freq, false);
 }
+EXPORT_SYMBOL(opp_disable);
 
 #ifdef CONFIG_CPU_FREQ
 /**
@@ -674,3 +693,49 @@
 
 	return &dev_opp->head;
 }
+
+#ifdef CONFIG_OF
+/**
+ * of_init_opp_table() - Initialize opp table from device tree
+ * @dev:	device pointer used to lookup device OPPs.
+ *
+ * Register the initial OPP table with the OPP library for given device.
+ */
+int of_init_opp_table(struct device *dev)
+{
+	const struct property *prop;
+	const __be32 *val;
+	int nr;
+
+	prop = of_find_property(dev->of_node, "operating-points", NULL);
+	if (!prop)
+		return -ENODEV;
+	if (!prop->value)
+		return -ENODATA;
+
+	/*
+	 * Each OPP is a set of tuples consisting of frequency and
+	 * voltage like <freq-kHz vol-uV>.
+	 */
+	nr = prop->length / sizeof(u32);
+	if (nr % 2) {
+		dev_err(dev, "%s: Invalid OPP list\n", __func__);
+		return -EINVAL;
+	}
+
+	val = prop->value;
+	while (nr) {
+		unsigned long freq = be32_to_cpup(val++) * 1000;
+		unsigned long volt = be32_to_cpup(val++);
+
+		if (opp_add(dev, freq, volt)) {
+			dev_warn(dev, "%s: Failed to add OPP %ld\n",
+				 __func__, freq);
+			continue;
+		}
+		nr -= 2;
+	}
+
+	return 0;
+}
+#endif
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 0f4d613..d78327f 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -138,6 +138,15 @@
 	struct hlist_head htbl[RPC_HASH_SZ];
 };
 
+struct fastrpc_mmap {
+	struct hlist_node hn;
+	struct ion_handle *handle;
+	void *virt;
+	uint32_t vaddrin;
+	uint32_t vaddrout;
+	int size;
+};
+
 struct fastrpc_buf {
 	struct ion_handle *handle;
 	void *virt;
@@ -146,6 +155,11 @@
 	int used;
 };
 
+struct file_data {
+	spinlock_t hlock;
+	struct hlist_head hlst;
+};
+
 struct fastrpc_device {
 	uint32_t tgid;
 	struct hlist_node hn;
@@ -168,18 +182,31 @@
 	}
 }
 
+static void free_map(struct fastrpc_mmap *map)
+{
+	struct fastrpc_apps *me = &gfa;
+	if (map->handle) {
+		if (map->virt) {
+			ion_unmap_kernel(me->iclient, map->handle);
+			map->virt = 0;
+		}
+		ion_free(me->iclient, map->handle);
+	}
+	map->handle = 0;
+}
+
 static int alloc_mem(struct fastrpc_buf *buf)
 {
 	struct ion_client *clnt = gfa.iclient;
 	struct sg_table *sg;
 	int err = 0;
-
+	buf->handle = 0;
+	buf->virt = 0;
 	buf->handle = ion_alloc(clnt, buf->size, SZ_4K,
 				ION_HEAP(ION_AUDIO_HEAP_ID), 0);
 	VERIFY(err, 0 == IS_ERR_OR_NULL(buf->handle));
 	if (err)
 		goto bail;
-	buf->virt = 0;
 	VERIFY(err, 0 != (buf->virt = ion_map_kernel(clnt, buf->handle)));
 	if (err)
 		goto bail;
@@ -211,7 +238,6 @@
 static void context_list_dtor(struct smq_context_list *me)
 {
 	kfree(me->ls);
-	me->ls = 0;
 }
 
 static void context_list_alloc_ctx(struct smq_context_list *me,
@@ -277,7 +303,6 @@
 	if (rlen < 0) {
 		rlen = ((uint32_t)pages - (uint32_t)obuf->virt) - obuf->size;
 		obuf->size += buf_page_size(rlen);
-		obuf->handle = 0;
 		VERIFY(err, 0 == alloc_mem(obuf));
 		if (err)
 			goto bail;
@@ -314,7 +339,6 @@
 			if (obuf->handle != ibuf->handle)
 				free_mem(obuf);
 			obuf->size += buf_page_size(sizeof(*pages));
-			obuf->handle = 0;
 			VERIFY(err, 0 == alloc_mem(obuf));
 			if (err)
 				goto bail;
@@ -430,10 +454,15 @@
 	outbufs = REMOTE_SCALARS_OUTBUFS(sc);
 	for (i = inbufs; i < inbufs + outbufs; ++i) {
 		if (rpra[i].buf.pv != pra[i].buf.pv) {
-			VERIFY(err, 0 == copy_to_user(pra[i].buf.pv,
+			if (!kernel) {
+				VERIFY(err, 0 == copy_to_user(pra[i].buf.pv,
 					rpra[i].buf.pv, rpra[i].buf.len));
-			if (err)
-				goto bail;
+				if (err)
+					goto bail;
+			} else {
+				memmove(pra[i].buf.pv, rpra[i].buf.pv,
+							rpra[i].buf.len);
+			}
 		}
 	}
 	size = sizeof(*rpra) * REMOTE_SCALARS_OUTHANDLES(sc);
@@ -471,15 +500,17 @@
 		dmac_inv_range(rpra, (char *)rpra + used);
 }
 
-static int fastrpc_invoke_send(struct fastrpc_apps *me, uint32_t handle,
+static int fastrpc_invoke_send(struct fastrpc_apps *me,
+				 uint32_t kernel, uint32_t handle,
 				 uint32_t sc, struct smq_invoke_ctx *ctx,
 				 struct fastrpc_buf *buf)
 {
 	struct smq_msg msg;
 	int err = 0, len;
-
 	msg.pid = current->tgid;
 	msg.tid = current->pid;
+	if (kernel)
+		msg.pid = 0;
 	msg.invoke.header.ctx = ctx;
 	msg.invoke.header.handle = handle;
 	msg.invoke.header.sc = sc;
@@ -595,12 +626,15 @@
 	VERIFY(err, 0 != (fd = kzalloc(sizeof(*fd), GFP_KERNEL)));
 	if (err)
 		goto bail;
+
+	INIT_HLIST_NODE(&fd->hn);
+
 	fd->buf.size = PAGE_SIZE;
 	VERIFY(err, 0 == alloc_mem(&fd->buf));
 	if (err)
 		goto bail;
 	fd->tgid = current->tgid;
-	INIT_HLIST_NODE(&fd->hn);
+
 	*dev = fd;
  bail:
 	if (err)
@@ -681,8 +715,8 @@
 	}
 
 	context_list_alloc_ctx(&me->clst, &ctx);
-	VERIFY(err, 0 == fastrpc_invoke_send(me, invoke->handle, sc, ctx,
-						&obuf));
+	VERIFY(err, 0 == fastrpc_invoke_send(me, kernel, invoke->handle, sc,
+						ctx, &obuf));
 	if (err)
 		goto bail;
 	inv_args(sc, rpra, obuf.used);
@@ -730,7 +764,7 @@
 	ioctl.handle = 1;
 	ioctl.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
 	ioctl.pra = ra;
-	VERIFY(err, 0 == fastrpc_internal_invoke(me, 1, &ioctl, ra));
+	VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 1, &ioctl, ra)));
 	return err;
 }
 
@@ -748,7 +782,155 @@
 	ioctl.handle = 1;
 	ioctl.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
 	ioctl.pra = ra;
-	VERIFY(err, 0 == fastrpc_internal_invoke(me, 1, &ioctl, ra));
+	VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 1, &ioctl, ra)));
+	return err;
+}
+
+static int fastrpc_mmap_on_dsp(struct fastrpc_apps *me,
+					 struct fastrpc_ioctl_mmap *mmap,
+					 struct smq_phy_page *pages,
+					 int num)
+{
+	struct fastrpc_ioctl_invoke ioctl;
+	remote_arg_t ra[3];
+	int err = 0;
+	struct {
+		int pid;
+		uint32_t flags;
+		uint32_t vaddrin;
+		int num;
+	} inargs;
+
+	struct {
+		uint32_t vaddrout;
+	} routargs;
+	inargs.pid = current->tgid;
+	inargs.vaddrin = mmap->vaddrin;
+	inargs.flags = mmap->flags;
+	inargs.num = num;
+	ra[0].buf.pv = &inargs;
+	ra[0].buf.len = sizeof(inargs);
+
+	ra[1].buf.pv = pages;
+	ra[1].buf.len = num * sizeof(*pages);
+
+	ra[2].buf.pv = &routargs;
+	ra[2].buf.len = sizeof(routargs);
+
+	ioctl.handle = 1;
+	ioctl.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
+	ioctl.pra = ra;
+	VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 1, &ioctl, ra)));
+	mmap->vaddrout = routargs.vaddrout;
+	if (err)
+		goto bail;
+bail:
+	return err;
+}
+
+static int fastrpc_munmap_on_dsp(struct fastrpc_apps *me,
+				 struct fastrpc_ioctl_munmap *munmap)
+{
+	struct fastrpc_ioctl_invoke ioctl;
+	remote_arg_t ra[1];
+	int err = 0;
+	struct {
+		int pid;
+		uint32_t vaddrout;
+		int size;
+	} inargs;
+
+	inargs.pid = current->tgid;
+	inargs.size = munmap->size;
+	inargs.vaddrout = munmap->vaddrout;
+	ra[0].buf.pv = &inargs;
+	ra[0].buf.len = sizeof(inargs);
+
+	ioctl.handle = 1;
+	ioctl.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
+	ioctl.pra = ra;
+	VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 1, &ioctl, ra)));
+	return err;
+}
+
+static int fastrpc_internal_munmap(struct fastrpc_apps *me,
+				   struct file_data *fdata,
+				   struct fastrpc_ioctl_munmap *munmap)
+{
+	int err = 0;
+	struct fastrpc_mmap *map = 0, *mapfree = 0;
+	struct hlist_node *pos, *n;
+	VERIFY(err, 0 == (err = fastrpc_munmap_on_dsp(me, munmap)));
+	if (err)
+		goto bail;
+	spin_lock(&fdata->hlock);
+	hlist_for_each_entry_safe(map, pos, n, &fdata->hlst, hn) {
+		if (map->vaddrout == munmap->vaddrout &&
+		    map->size == munmap->size) {
+			hlist_del(&map->hn);
+			mapfree = map;
+			map = 0;
+			break;
+		}
+	}
+	spin_unlock(&fdata->hlock);
+bail:
+	if (mapfree) {
+		free_map(mapfree);
+		kfree(mapfree);
+	}
+	return err;
+}
+
+
+static int fastrpc_internal_mmap(struct fastrpc_apps *me,
+				 struct file_data *fdata,
+				 struct fastrpc_ioctl_mmap *mmap)
+{
+	struct ion_client *clnt = gfa.iclient;
+	struct fastrpc_mmap *map = 0;
+	struct smq_phy_page *pages = 0;
+	void *buf;
+	int len;
+	int num;
+	int err = 0;
+
+	VERIFY(err, 0 != (map = kzalloc(sizeof(*map), GFP_KERNEL)));
+	if (err)
+		goto bail;
+	map->handle = ion_import_dma_buf(clnt, mmap->fd);
+	VERIFY(err, 0 == IS_ERR_OR_NULL(map->handle));
+	if (err)
+		goto bail;
+	VERIFY(err, 0 != (map->virt = ion_map_kernel(clnt, map->handle)));
+	if (err)
+		goto bail;
+	buf = (void *)mmap->vaddrin;
+	len =  mmap->size;
+	num = buf_num_pages(buf, len);
+	VERIFY(err, 0 != (pages = kzalloc(num * sizeof(*pages), GFP_KERNEL)));
+	if (err)
+		goto bail;
+	VERIFY(err, 0 < (num = buf_get_pages(buf, len, num, 1, pages, num)));
+	if (err)
+		goto bail;
+
+	VERIFY(err, 0 == fastrpc_mmap_on_dsp(me, mmap, pages, num));
+	if (err)
+		goto bail;
+	map->vaddrin = mmap->vaddrin;
+	map->vaddrout = mmap->vaddrout;
+	map->size = mmap->size;
+	INIT_HLIST_NODE(&map->hn);
+	spin_lock(&fdata->hlock);
+	hlist_add_head(&map->hn, &fdata->hlst);
+	spin_unlock(&fdata->hlock);
+ bail:
+	if (err && map) {
+		free_map(map);
+		kfree(map);
+	}
+	kfree(pages);
 	return err;
 }
 
@@ -781,22 +963,48 @@
 
 static int fastrpc_device_release(struct inode *inode, struct file *file)
 {
+	struct file_data *fdata = (struct file_data *)file->private_data;
 	(void)fastrpc_release_current_dsp_process();
 	cleanup_current_dev();
+	if (fdata) {
+		struct fastrpc_mmap *map;
+		struct hlist_node *n, *pos;
+		file->private_data = 0;
+		hlist_for_each_entry_safe(map, pos, n, &fdata->hlst, hn) {
+			hlist_del(&map->hn);
+			free_map(map);
+			kfree(map);
+		}
+		kfree(fdata);
+	}
 	return 0;
 }
 
 static int fastrpc_device_open(struct inode *inode, struct file *filp)
 {
 	int err = 0;
-
+	filp->private_data = 0;
 	if (0 != try_module_get(THIS_MODULE)) {
+		struct file_data *fdata = 0;
 		/* This call will cause a dev to be created
 		 * which will addref this module
 		 */
+		VERIFY(err, 0 != (fdata = kzalloc(sizeof(*fdata), GFP_KERNEL)));
+		if (err)
+			goto bail;
+
+		spin_lock_init(&fdata->hlock);
+		INIT_HLIST_HEAD(&fdata->hlst);
+
 		VERIFY(err, 0 == fastrpc_create_current_dsp_process());
 		if (err)
+			goto bail;
+		filp->private_data = fdata;
+bail:
+		if (err) {
 			cleanup_current_dev();
+			kfree(fdata);
+		}
 		module_put(THIS_MODULE);
 	}
 	return err;
@@ -808,8 +1016,11 @@
 {
 	struct fastrpc_apps *me = &gfa;
 	struct fastrpc_ioctl_invoke invoke;
+	struct fastrpc_ioctl_mmap mmap;
+	struct fastrpc_ioctl_munmap munmap;
 	remote_arg_t *pra = 0;
 	void *param = (char *)ioctl_param;
+	struct file_data *fdata = (struct file_data *)file->private_data;
 	int bufs, err = 0;
 
 	switch (ioctl_num) {
@@ -834,6 +1045,29 @@
 		if (err)
 			goto bail;
 		break;
+	case FASTRPC_IOCTL_MMAP:
+		VERIFY(err, 0 == copy_from_user(&mmap, param,
+						sizeof(mmap)));
+		if (err)
+			goto bail;
+		VERIFY(err, 0 == (err = fastrpc_internal_mmap(me, fdata,
+							      &mmap)));
+		if (err)
+			goto bail;
+		VERIFY(err, 0 == copy_to_user(param, &mmap, sizeof(mmap)));
+		if (err)
+			goto bail;
+		break;
+	case FASTRPC_IOCTL_MUNMAP:
+		VERIFY(err, 0 == copy_from_user(&munmap, param,
+						sizeof(munmap)));
+		if (err)
+			goto bail;
+		VERIFY(err, 0 == (err = fastrpc_internal_munmap(me, fdata,
+								&munmap)));
+		if (err)
+			goto bail;
+		break;
 	default:
 		err = -ENOTTY;
 		break;
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index 8932d3c..f2804ad 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -16,7 +16,9 @@
 
 #include <linux/types.h>
 
-#define FASTRPC_IOCTL_INVOKE _IOWR('R', 1, struct fastrpc_ioctl_invoke)
+#define FASTRPC_IOCTL_INVOKE  _IOWR('R', 1, struct fastrpc_ioctl_invoke)
+#define FASTRPC_IOCTL_MMAP    _IOWR('R', 2, struct fastrpc_ioctl_mmap)
+#define FASTRPC_IOCTL_MUNMAP  _IOWR('R', 3, struct fastrpc_ioctl_munmap)
 #define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp"
 #define DEVICE_NAME      "adsprpc-smd"
 
@@ -92,6 +94,20 @@
 	remote_arg_t *pra;	/* remote arguments list */
 };
 
+struct fastrpc_ioctl_munmap {
+	uint32_t vaddrout;	/* address to unmap */
+	int  size;		/* size */
+};
+
+
+struct fastrpc_ioctl_mmap {
+	int fd;			/* ion fd */
+	uint32_t flags;		/* flags for dsp to map with */
+	uint32_t vaddrin;	/* optional virtual address */
+	int  size;		/* size */
+	uint32_t vaddrout;	/* dsps virtual address */
+};
+
 struct smq_null_invoke {
 	struct smq_invoke_ctx *ctx; /* invoke caller context */
 	uint32_t handle;	    /* handle to invoke */
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index 0c93101..071dd69 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -734,8 +734,8 @@
 					(driver->log_on_demand_support)) {
 			driver->apps_rsp_buf[0] = 0x78;
 			/* Copy log code received */
-			*(uint16_t *)(driver->apps_rsp_buf+1) =
-							 *(uint16_t *)buf;
+			*(uint16_t *)(driver->apps_rsp_buf + 1) =
+							*(uint16_t *)(buf + 1);
 			driver->apps_rsp_buf[3] = 0x1;/* Unknown */
 			encode_rsp_and_send(3);
 		}
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index a4eea54..9d9df2a 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -162,6 +162,8 @@
 	smd_channel_t *ch;
 	smd_channel_t *ch_save;
 
+	struct mutex smd_ch_mutex;
+
 	int in_busy_1;
 	int in_busy_2;
 
@@ -249,6 +251,7 @@
 	unsigned char *buf_event_mask_update;
 	unsigned char *buf_feature_mask_update;
 	int read_len_legacy;
+	struct mutex diag_hdlc_mutex;
 	unsigned char *hdlc_buf;
 	unsigned hdlc_count;
 	unsigned hdlc_escape;
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 6a14143..79a73f3 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -611,8 +611,12 @@
 						diag_check_mode_reset(buf)) {
 						return;
 					}
+					mutex_lock(&driver->smd_data[index].
+								smd_ch_mutex);
 					smd_write(driver->smd_data[index].ch,
 							buf, len);
+					mutex_unlock(&driver->smd_data[index].
+								smd_ch_mutex);
 				} else {
 					pr_err("diag: In %s, smd channel %d not open\n",
 						__func__, index);
@@ -1003,6 +1007,9 @@
 {
 	struct diag_hdlc_decode_type hdlc;
 	int ret, type = 0;
+
+	mutex_lock(&driver->diag_hdlc_mutex);
+
 	pr_debug("diag: HDLC decode fn, len of data  %d\n", len);
 	hdlc.dest_ptr = driver->hdlc_buf;
 	hdlc.dest_size = USB_MAX_OUT_BUF;
@@ -1023,14 +1030,17 @@
 	if (hdlc.dest_idx < 4) {
 		pr_err_ratelimited("diag: In %s, message is too short, len: %d, dest len: %d\n",
 			__func__, len, hdlc.dest_idx);
+		mutex_unlock(&driver->diag_hdlc_mutex);
 		return;
 	}
 
 	if (ret) {
 		type = diag_process_apps_pkt(driver->hdlc_buf,
 							  hdlc.dest_idx - 3);
-		if (type < 0)
+		if (type < 0) {
+			mutex_unlock(&driver->diag_hdlc_mutex);
 			return;
+		}
 	} else if (driver->debug_flag) {
 		printk(KERN_ERR "Packet dropped due to bad HDLC coding/CRC"
 				" errors or partial packet received, packet"
@@ -1049,10 +1059,15 @@
 		if (chk_apps_only()) {
 			diag_send_error_rsp(hdlc.dest_idx);
 		} else { /* APQ 8060, Let Q6 respond */
-			if (driver->smd_data[LPASS_DATA].ch)
+			if (driver->smd_data[LPASS_DATA].ch) {
+				mutex_lock(&driver->smd_data[LPASS_DATA].
+								smd_ch_mutex);
 				smd_write(driver->smd_data[LPASS_DATA].ch,
 						driver->hdlc_buf,
 						hdlc.dest_idx - 3);
+				mutex_unlock(&driver->smd_data[LPASS_DATA].
+								smd_ch_mutex);
+			}
 		}
 		type = 0;
 	}
@@ -1067,8 +1082,10 @@
 	if ((driver->smd_data[MODEM_DATA].ch) && (ret) && (type) &&
 						(hdlc.dest_idx > 3)) {
 		APPEND_DEBUG('g');
+		mutex_lock(&driver->smd_data[MODEM_DATA].smd_ch_mutex);
 		smd_write(driver->smd_data[MODEM_DATA].ch,
 					driver->hdlc_buf, hdlc.dest_idx - 3);
+		mutex_unlock(&driver->smd_data[MODEM_DATA].smd_ch_mutex);
 		APPEND_DEBUG('h');
 #ifdef DIAG_DEBUG
 		printk(KERN_INFO "writing data to SMD, pkt length %d\n", len);
@@ -1076,6 +1093,7 @@
 			       1, DUMP_PREFIX_ADDRESS, data, len, 1);
 #endif /* DIAG DEBUG */
 	}
+	mutex_unlock(&driver->diag_hdlc_mutex);
 }
 
 #ifdef CONFIG_DIAG_OVER_USB
@@ -1411,6 +1429,7 @@
 {
 	smd_info->peripheral = peripheral;
 	smd_info->type = type;
+	mutex_init(&smd_info->smd_ch_mutex);
 
 	switch (peripheral) {
 	case MODEM_DATA:
@@ -1525,6 +1544,7 @@
 	diag_debug_buf_idx = 0;
 	driver->read_len_legacy = 0;
 	driver->use_device_tree = has_device_tree();
+	mutex_init(&driver->diag_hdlc_mutex);
 	mutex_init(&driver->diag_cntl_mutex);
 
 	success = diag_smd_constructor(&driver->smd_data[MODEM_DATA],
diff --git a/drivers/crypto/msm/qce.c b/drivers/crypto/msm/qce.c
index 5c1cc5a..24cf30a 100644
--- a/drivers/crypto/msm/qce.c
+++ b/drivers/crypto/msm/qce.c
@@ -2366,6 +2366,7 @@
 	ce_support->aes_ccm  = false;
 	ce_support->ota = pce_dev->ota;
 	ce_support->aligned_only = false;
+	ce_support->is_shared = false;
 	ce_support->bam = false;
 	return 0;
 }
diff --git a/drivers/crypto/msm/qce.h b/drivers/crypto/msm/qce.h
index 8a31003..3ff84cf 100644
--- a/drivers/crypto/msm/qce.h
+++ b/drivers/crypto/msm/qce.h
@@ -113,6 +113,7 @@
 	bool ota;
 	bool aligned_only;
 	bool bam;
+	bool is_shared;
 };
 
 /* Sha operation parameters */
diff --git a/drivers/crypto/msm/qce40.c b/drivers/crypto/msm/qce40.c
index 84d41da..7b0964d 100644
--- a/drivers/crypto/msm/qce40.c
+++ b/drivers/crypto/msm/qce40.c
@@ -2634,6 +2634,7 @@
 	ce_support->aes_ccm = true;
 	ce_support->ota = false;
 	ce_support->aligned_only = false;
+	ce_support->is_shared = false;
 	ce_support->bam = false;
 	return 0;
 }
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 9d8e825..b484c8a 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -61,6 +61,7 @@
 	unsigned char *coh_vmem;    /* Allocated coherent virtual memory */
 	dma_addr_t coh_pmem;	    /* Allocated coherent physical memory */
 	int memsize;				/* Memory allocated */
+	int is_shared;				/* CE HW is shared */
 
 	void __iomem *iobase;	    /* Virtual io base of CE HW  */
 	unsigned int phy_iobase;    /* Physical io base of CE HW    */
@@ -2619,6 +2620,9 @@
 	struct resource *resource;
 	int rc = 0;
 
+	pce_dev->is_shared = of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,ce-hw-shared");
+
 	if (of_property_read_u32((&pdev->dev)->of_node,
 				"qcom,bam-pipe-pair",
 				&pce_dev->ce_sps.pipe_pair_index)) {
@@ -2902,6 +2906,7 @@
 	ce_support->aes_xts = true;
 	ce_support->ota = false;
 	ce_support->bam = true;
+	ce_support->is_shared = (pce_dev->is_shared == 1) ? true : false;
 	ce_support->aes_ccm = true;
 	if (pce_dev->ce_sps.minor_version)
 		ce_support->aligned_only = false;
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 464fa21..1c63b70 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -30,7 +30,7 @@
 comment "DEVFREQ Governors"
 
 config DEVFREQ_GOV_SIMPLE_ONDEMAND
-	bool "Simple Ondemand"
+	tristate "Simple Ondemand"
 	help
 	  Chooses frequency based on the recent load on the device. Works
 	  similar as ONDEMAND governor of CPUFREQ does. A device with
@@ -39,7 +39,7 @@
 	  values to the governor with data field at devfreq_add_device().
 
 config DEVFREQ_GOV_PERFORMANCE
-	bool "Performance"
+	tristate "Performance"
 	help
 	  Sets the frequency at the maximum available frequency.
 	  This governor always returns UINT_MAX as frequency so that
@@ -47,7 +47,7 @@
 	  at any time.
 
 config DEVFREQ_GOV_POWERSAVE
-	bool "Powersave"
+	tristate "Powersave"
 	help
 	  Sets the frequency at the minimum available frequency.
 	  This governor always returns 0 as frequency so that
@@ -55,7 +55,7 @@
 	  at any time.
 
 config DEVFREQ_GOV_USERSPACE
-	bool "Userspace"
+	tristate "Userspace"
 	help
 	  Sets the frequency at the user specified one.
 	  This governor returns the user configured frequency if there
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 70c31d4..89d779c 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -27,21 +27,17 @@
 #include <linux/hrtimer.h>
 #include "governor.h"
 
-struct class *devfreq_class;
+static struct class *devfreq_class;
 
 /*
- * devfreq_work periodically monitors every registered device.
- * The minimum polling interval is one jiffy. The polling interval is
- * determined by the minimum polling period among all polling devfreq
- * devices. The resolution of polling interval is one jiffy.
+ * devfreq core provides delayed work based load monitoring helper
+ * functions. Governors can use these or can implement their own
+ * monitoring mechanism.
  */
-static bool polling;
 static struct workqueue_struct *devfreq_wq;
-static struct delayed_work devfreq_work;
 
-/* wait removing if this is to be removed */
-static struct devfreq *wait_remove_device;
-
+/* The list of all device-devfreq governors */
+static LIST_HEAD(devfreq_governor_list);
 /* The list of all device-devfreq */
 static LIST_HEAD(devfreq_list);
 static DEFINE_MUTEX(devfreq_list_lock);
@@ -73,6 +69,79 @@
 }
 
 /**
+ * devfreq_get_freq_level() - Lookup freq_table for the frequency
+ * @devfreq:	the devfreq instance
+ * @freq:	the target frequency
+ */
+static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
+{
+	int lev;
+
+	for (lev = 0; lev < devfreq->profile->max_state; lev++)
+		if (freq == devfreq->profile->freq_table[lev])
+			return lev;
+
+	return -EINVAL;
+}
+
+/**
+ * devfreq_update_status() - Update statistics of devfreq behavior
+ * @devfreq:	the devfreq instance
+ * @freq:	the update target frequency
+ */
+static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
+{
+	int lev, prev_lev;
+	unsigned long cur_time;
+
+	lev = devfreq_get_freq_level(devfreq, freq);
+	if (lev < 0)
+		return lev;
+
+	cur_time = jiffies;
+	devfreq->time_in_state[lev] +=
+			 cur_time - devfreq->last_stat_updated;
+	if (freq != devfreq->previous_freq) {
+		prev_lev = devfreq_get_freq_level(devfreq,
+						devfreq->previous_freq);
+		devfreq->trans_table[(prev_lev *
+				devfreq->profile->max_state) + lev]++;
+		devfreq->total_trans++;
+	}
+	devfreq->last_stat_updated = cur_time;
+
+	return 0;
+}
+
+/**
+ * find_devfreq_governor() - find devfreq governor from name
+ * @name:	name of the governor
+ *
+ * Search the list of devfreq governors and return the matched
+ * governor's pointer. devfreq_list_lock should be held by the caller.
+ */
+static struct devfreq_governor *find_devfreq_governor(const char *name)
+{
+	struct devfreq_governor *tmp_governor;
+
+	if (unlikely(IS_ERR_OR_NULL(name))) {
+		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+	WARN(!mutex_is_locked(&devfreq_list_lock),
+	     "devfreq_list_lock must be locked.");
+
+	list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
+		if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
+			return tmp_governor;
+	}
+
+	return ERR_PTR(-ENODEV);
+}
+
+/* Load monitoring helper functions for governors use */
+
+/**
  * update_devfreq() - Reevaluate the device and configure frequency.
  * @devfreq:	the devfreq instance.
  *
@@ -90,6 +159,9 @@
 		return -EINVAL;
 	}
 
+	if (!devfreq->governor)
+		return -EINVAL;
+
 	/* Reevaluate the proper frequency */
 	err = devfreq->governor->get_target_freq(devfreq, &freq);
 	if (err)
@@ -116,16 +188,173 @@
 	if (err)
 		return err;
 
+	if (devfreq->profile->freq_table)
+		if (devfreq_update_status(devfreq, freq))
+			dev_err(&devfreq->dev,
+				"Couldn't update frequency transition information.\n");
+
 	devfreq->previous_freq = freq;
 	return err;
 }
+EXPORT_SYMBOL(update_devfreq);
+
+/**
+ * devfreq_monitor() - Periodically poll devfreq objects.
+ * @work:	the work struct used to run devfreq_monitor periodically.
+ *
+ */
+static void devfreq_monitor(struct work_struct *work)
+{
+	int err;
+	struct devfreq *devfreq = container_of(work,
+					struct devfreq, work.work);
+
+	mutex_lock(&devfreq->lock);
+	err = update_devfreq(devfreq);
+	if (err)
+		dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
+
+	queue_delayed_work(devfreq_wq, &devfreq->work,
+				msecs_to_jiffies(devfreq->profile->polling_ms));
+	mutex_unlock(&devfreq->lock);
+}
+
+/**
+ * devfreq_monitor_start() - Start load monitoring of devfreq instance
+ * @devfreq:	the devfreq instance.
+ *
+ * Helper function for starting devfreq device load monitoing. By
+ * default delayed work based monitoring is supported. Function
+ * to be called from governor in response to DEVFREQ_GOV_START
+ * event when device is added to devfreq framework.
+ */
+void devfreq_monitor_start(struct devfreq *devfreq)
+{
+	INIT_DELAYED_WORK_DEFERRABLE(&devfreq->work, devfreq_monitor);
+	if (devfreq->profile->polling_ms)
+		queue_delayed_work(devfreq_wq, &devfreq->work,
+			msecs_to_jiffies(devfreq->profile->polling_ms));
+}
+EXPORT_SYMBOL(devfreq_monitor_start);
+
+/**
+ * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
+ * @devfreq:	the devfreq instance.
+ *
+ * Helper function to stop devfreq device load monitoing. Function
+ * to be called from governor in response to DEVFREQ_GOV_STOP
+ * event when device is removed from devfreq framework.
+ */
+void devfreq_monitor_stop(struct devfreq *devfreq)
+{
+	cancel_delayed_work_sync(&devfreq->work);
+}
+EXPORT_SYMBOL(devfreq_monitor_stop);
+
+/**
+ * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
+ * @devfreq:	the devfreq instance.
+ *
+ * Helper function to suspend devfreq device load monitoing. Function
+ * to be called from governor in response to DEVFREQ_GOV_SUSPEND
+ * event or when polling interval is set to zero.
+ *
+ * Note: Though this function is same as devfreq_monitor_stop(),
+ * intentionally kept separate to provide hooks for collecting
+ * transition statistics.
+ */
+void devfreq_monitor_suspend(struct devfreq *devfreq)
+{
+	mutex_lock(&devfreq->lock);
+	if (devfreq->stop_polling) {
+		mutex_unlock(&devfreq->lock);
+		return;
+	}
+
+	devfreq->stop_polling = true;
+	mutex_unlock(&devfreq->lock);
+	cancel_delayed_work_sync(&devfreq->work);
+}
+EXPORT_SYMBOL(devfreq_monitor_suspend);
+
+/**
+ * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
+ * @devfreq:    the devfreq instance.
+ *
+ * Helper function to resume devfreq device load monitoing. Function
+ * to be called from governor in response to DEVFREQ_GOV_RESUME
+ * event or when polling interval is set to non-zero.
+ */
+void devfreq_monitor_resume(struct devfreq *devfreq)
+{
+	mutex_lock(&devfreq->lock);
+	if (!devfreq->stop_polling)
+		goto out;
+
+	if (!delayed_work_pending(&devfreq->work) &&
+			devfreq->profile->polling_ms)
+		queue_delayed_work(devfreq_wq, &devfreq->work,
+			msecs_to_jiffies(devfreq->profile->polling_ms));
+	devfreq->stop_polling = false;
+
+out:
+	mutex_unlock(&devfreq->lock);
+}
+EXPORT_SYMBOL(devfreq_monitor_resume);
+
+/**
+ * devfreq_interval_update() - Update device devfreq monitoring interval
+ * @devfreq:    the devfreq instance.
+ * @delay:      new polling interval to be set.
+ *
+ * Helper function to set new load monitoring polling interval. Function
+ * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
+ */
+void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
+{
+	unsigned int cur_delay = devfreq->profile->polling_ms;
+	unsigned int new_delay = *delay;
+
+	mutex_lock(&devfreq->lock);
+	devfreq->profile->polling_ms = new_delay;
+
+	if (devfreq->stop_polling)
+		goto out;
+
+	/* if new delay is zero, stop polling */
+	if (!new_delay) {
+		mutex_unlock(&devfreq->lock);
+		cancel_delayed_work_sync(&devfreq->work);
+		return;
+	}
+
+	/* if current delay is zero, start polling with new delay */
+	if (!cur_delay) {
+		queue_delayed_work(devfreq_wq, &devfreq->work,
+			msecs_to_jiffies(devfreq->profile->polling_ms));
+		goto out;
+	}
+
+	/* if current delay is greater than new delay, restart polling */
+	if (cur_delay > new_delay) {
+		mutex_unlock(&devfreq->lock);
+		cancel_delayed_work_sync(&devfreq->work);
+		mutex_lock(&devfreq->lock);
+		if (!devfreq->stop_polling)
+			queue_delayed_work(devfreq_wq, &devfreq->work,
+			      msecs_to_jiffies(devfreq->profile->polling_ms));
+	}
+out:
+	mutex_unlock(&devfreq->lock);
+}
+EXPORT_SYMBOL(devfreq_interval_update);
 
 /**
  * devfreq_notifier_call() - Notify that the device frequency requirements
  *			   has been changed out of devfreq framework.
- * @nb		the notifier_block (supposed to be devfreq->nb)
- * @type	not used
- * @devp	not used
+ * @nb:		the notifier_block (supposed to be devfreq->nb)
+ * @type:	not used
+ * @devp:	not used
  *
  * Called by a notifier that uses devfreq->nb.
  */
@@ -143,59 +372,34 @@
 }
 
 /**
- * _remove_devfreq() - Remove devfreq from the device.
+ * _remove_devfreq() - Remove devfreq from the list and release its resources.
  * @devfreq:	the devfreq struct
  * @skip:	skip calling device_unregister().
- *
- * Note that the caller should lock devfreq->lock before calling
- * this. _remove_devfreq() will unlock it and free devfreq
- * internally. devfreq_list_lock should be locked by the caller
- * as well (not relased at return)
- *
- * Lock usage:
- * devfreq->lock: locked before call.
- *		  unlocked at return (and freed)
- * devfreq_list_lock: locked before call.
- *		      kept locked at return.
- *		      if devfreq is centrally polled.
- *
- * Freed memory:
- * devfreq
  */
 static void _remove_devfreq(struct devfreq *devfreq, bool skip)
 {
-	if (!mutex_is_locked(&devfreq->lock)) {
-		WARN(true, "devfreq->lock must be locked by the caller.\n");
+	mutex_lock(&devfreq_list_lock);
+	if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
+		mutex_unlock(&devfreq_list_lock);
+		dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
 		return;
 	}
-	if (!devfreq->governor->no_central_polling &&
-	    !mutex_is_locked(&devfreq_list_lock)) {
-		WARN(true, "devfreq_list_lock must be locked by the caller.\n");
-		return;
-	}
+	list_del(&devfreq->node);
+	mutex_unlock(&devfreq_list_lock);
 
-	if (devfreq->being_removed)
-		return;
-
-	devfreq->being_removed = true;
+	if (devfreq->governor)
+		devfreq->governor->event_handler(devfreq,
+						 DEVFREQ_GOV_STOP, NULL);
 
 	if (devfreq->profile->exit)
 		devfreq->profile->exit(devfreq->dev.parent);
 
-	if (devfreq->governor->exit)
-		devfreq->governor->exit(devfreq);
-
 	if (!skip && get_device(&devfreq->dev)) {
 		device_unregister(&devfreq->dev);
 		put_device(&devfreq->dev);
 	}
 
-	if (!devfreq->governor->no_central_polling)
-		list_del(&devfreq->node);
-
-	mutex_unlock(&devfreq->lock);
 	mutex_destroy(&devfreq->lock);
-
 	kfree(devfreq);
 }
 
@@ -210,163 +414,39 @@
 static void devfreq_dev_release(struct device *dev)
 {
 	struct devfreq *devfreq = to_devfreq(dev);
-	bool central_polling = !devfreq->governor->no_central_polling;
 
-	/*
-	 * If devfreq_dev_release() was called by device_unregister() of
-	 * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and
-	 * being_removed is already set. This also partially checks the case
-	 * where devfreq_dev_release() is called from a thread other than
-	 * the one called _remove_devfreq(); however, this case is
-	 * dealt completely with another following being_removed check.
-	 *
-	 * Because being_removed is never being
-	 * unset, we do not need to worry about race conditions on
-	 * being_removed.
-	 */
-	if (devfreq->being_removed)
-		return;
-
-	if (central_polling)
-		mutex_lock(&devfreq_list_lock);
-
-	mutex_lock(&devfreq->lock);
-
-	/*
-	 * Check being_removed flag again for the case where
-	 * devfreq_dev_release() was called in a thread other than the one
-	 * possibly called _remove_devfreq().
-	 */
-	if (devfreq->being_removed) {
-		mutex_unlock(&devfreq->lock);
-		goto out;
-	}
-
-	/* devfreq->lock is unlocked and removed in _removed_devfreq() */
 	_remove_devfreq(devfreq, true);
-
-out:
-	if (central_polling)
-		mutex_unlock(&devfreq_list_lock);
-}
-
-/**
- * devfreq_monitor() - Periodically poll devfreq objects.
- * @work: the work struct used to run devfreq_monitor periodically.
- *
- */
-static void devfreq_monitor(struct work_struct *work)
-{
-	static unsigned long last_polled_at;
-	struct devfreq *devfreq, *tmp;
-	int error;
-	unsigned long jiffies_passed;
-	unsigned long next_jiffies = ULONG_MAX, now = jiffies;
-	struct device *dev;
-
-	/* Initially last_polled_at = 0, polling every device at bootup */
-	jiffies_passed = now - last_polled_at;
-	last_polled_at = now;
-	if (jiffies_passed == 0)
-		jiffies_passed = 1;
-
-	mutex_lock(&devfreq_list_lock);
-	list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) {
-		mutex_lock(&devfreq->lock);
-		dev = devfreq->dev.parent;
-
-		/* Do not remove tmp for a while */
-		wait_remove_device = tmp;
-
-		if (devfreq->governor->no_central_polling ||
-		    devfreq->next_polling == 0) {
-			mutex_unlock(&devfreq->lock);
-			continue;
-		}
-		mutex_unlock(&devfreq_list_lock);
-
-		/*
-		 * Reduce more next_polling if devfreq_wq took an extra
-		 * delay. (i.e., CPU has been idled.)
-		 */
-		if (devfreq->next_polling <= jiffies_passed) {
-			error = update_devfreq(devfreq);
-
-			/* Remove a devfreq with an error. */
-			if (error && error != -EAGAIN) {
-
-				dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n",
-					error, devfreq->governor->name);
-
-				/*
-				 * Unlock devfreq before locking the list
-				 * in order to avoid deadlock with
-				 * find_device_devfreq or others
-				 */
-				mutex_unlock(&devfreq->lock);
-				mutex_lock(&devfreq_list_lock);
-				/* Check if devfreq is already removed */
-				if (IS_ERR(find_device_devfreq(dev)))
-					continue;
-				mutex_lock(&devfreq->lock);
-				/* This unlocks devfreq->lock and free it */
-				_remove_devfreq(devfreq, false);
-				continue;
-			}
-			devfreq->next_polling = devfreq->polling_jiffies;
-		} else {
-			devfreq->next_polling -= jiffies_passed;
-		}
-
-		if (devfreq->next_polling)
-			next_jiffies = (next_jiffies > devfreq->next_polling) ?
-					devfreq->next_polling : next_jiffies;
-
-		mutex_unlock(&devfreq->lock);
-		mutex_lock(&devfreq_list_lock);
-	}
-	wait_remove_device = NULL;
-	mutex_unlock(&devfreq_list_lock);
-
-	if (next_jiffies > 0 && next_jiffies < ULONG_MAX) {
-		polling = true;
-		queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies);
-	} else {
-		polling = false;
-	}
 }
 
 /**
  * devfreq_add_device() - Add devfreq feature to the device
  * @dev:	the device to add devfreq feature.
  * @profile:	device-specific profile to run devfreq.
- * @governor:	the policy to choose frequency.
+ * @governor_name:	name of the policy to choose frequency.
  * @data:	private data for the governor. The devfreq framework does not
  *		touch this value.
  */
 struct devfreq *devfreq_add_device(struct device *dev,
 				   struct devfreq_dev_profile *profile,
-				   const struct devfreq_governor *governor,
+				   const char *governor_name,
 				   void *data)
 {
 	struct devfreq *devfreq;
+	struct devfreq_governor *governor;
 	int err = 0;
 
-	if (!dev || !profile || !governor) {
+	if (!dev || !profile || !governor_name) {
 		dev_err(dev, "%s: Invalid parameters.\n", __func__);
 		return ERR_PTR(-EINVAL);
 	}
 
-
-	if (!governor->no_central_polling) {
-		mutex_lock(&devfreq_list_lock);
-		devfreq = find_device_devfreq(dev);
-		mutex_unlock(&devfreq_list_lock);
-		if (!IS_ERR(devfreq)) {
-			dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
-			err = -EINVAL;
-			goto err_out;
-		}
+	mutex_lock(&devfreq_list_lock);
+	devfreq = find_device_devfreq(dev);
+	mutex_unlock(&devfreq_list_lock);
+	if (!IS_ERR(devfreq)) {
+		dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
+		err = -EINVAL;
+		goto err_out;
 	}
 
 	devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
@@ -383,92 +463,316 @@
 	devfreq->dev.class = devfreq_class;
 	devfreq->dev.release = devfreq_dev_release;
 	devfreq->profile = profile;
-	devfreq->governor = governor;
+	strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
 	devfreq->previous_freq = profile->initial_freq;
 	devfreq->data = data;
-	devfreq->next_polling = devfreq->polling_jiffies
-			      = msecs_to_jiffies(devfreq->profile->polling_ms);
 	devfreq->nb.notifier_call = devfreq_notifier_call;
 
+	devfreq->trans_table =	devm_kzalloc(dev, sizeof(unsigned int) *
+						devfreq->profile->max_state *
+						devfreq->profile->max_state,
+						GFP_KERNEL);
+	devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) *
+						devfreq->profile->max_state,
+						GFP_KERNEL);
+	devfreq->last_stat_updated = jiffies;
+
 	dev_set_name(&devfreq->dev, dev_name(dev));
 	err = device_register(&devfreq->dev);
 	if (err) {
 		put_device(&devfreq->dev);
+		mutex_unlock(&devfreq->lock);
 		goto err_dev;
 	}
 
-	if (governor->init)
-		err = governor->init(devfreq);
-	if (err)
-		goto err_init;
-
 	mutex_unlock(&devfreq->lock);
 
-	if (governor->no_central_polling)
-		goto out;
-
 	mutex_lock(&devfreq_list_lock);
-
 	list_add(&devfreq->node, &devfreq_list);
 
-	if (devfreq_wq && devfreq->next_polling && !polling) {
-		polling = true;
-		queue_delayed_work(devfreq_wq, &devfreq_work,
-				   devfreq->next_polling);
-	}
+	governor = find_devfreq_governor(devfreq->governor_name);
+	if (!IS_ERR(governor))
+		devfreq->governor = governor;
+	if (devfreq->governor)
+		err = devfreq->governor->event_handler(devfreq,
+					DEVFREQ_GOV_START, NULL);
 	mutex_unlock(&devfreq_list_lock);
-out:
+	if (err) {
+		dev_err(dev, "%s: Unable to start governor for the device\n",
+			__func__);
+		goto err_init;
+	}
+
 	return devfreq;
 
 err_init:
+	list_del(&devfreq->node);
 	device_unregister(&devfreq->dev);
 err_dev:
-	mutex_unlock(&devfreq->lock);
 	kfree(devfreq);
 err_out:
 	return ERR_PTR(err);
 }
+EXPORT_SYMBOL(devfreq_add_device);
 
 /**
  * devfreq_remove_device() - Remove devfreq feature from a device.
- * @devfreq	the devfreq instance to be removed
+ * @devfreq:	the devfreq instance to be removed
  */
 int devfreq_remove_device(struct devfreq *devfreq)
 {
-	bool central_polling;
-
 	if (!devfreq)
 		return -EINVAL;
 
-	central_polling = !devfreq->governor->no_central_polling;
-
-	if (central_polling) {
-		mutex_lock(&devfreq_list_lock);
-		while (wait_remove_device == devfreq) {
-			mutex_unlock(&devfreq_list_lock);
-			schedule();
-			mutex_lock(&devfreq_list_lock);
-		}
-	}
-
-	mutex_lock(&devfreq->lock);
-	_remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */
-
-	if (central_polling)
-		mutex_unlock(&devfreq_list_lock);
+	_remove_devfreq(devfreq, false);
 
 	return 0;
 }
+EXPORT_SYMBOL(devfreq_remove_device);
+
+/**
+ * devfreq_suspend_device() - Suspend devfreq of a device.
+ * @devfreq: the devfreq instance to be suspended
+ */
+int devfreq_suspend_device(struct devfreq *devfreq)
+{
+	if (!devfreq)
+		return -EINVAL;
+
+	if (!devfreq->governor)
+		return 0;
+
+	return devfreq->governor->event_handler(devfreq,
+				DEVFREQ_GOV_SUSPEND, NULL);
+}
+EXPORT_SYMBOL(devfreq_suspend_device);
+
+/**
+ * devfreq_resume_device() - Resume devfreq of a device.
+ * @devfreq: the devfreq instance to be resumed
+ */
+int devfreq_resume_device(struct devfreq *devfreq)
+{
+	if (!devfreq)
+		return -EINVAL;
+
+	if (!devfreq->governor)
+		return 0;
+
+	return devfreq->governor->event_handler(devfreq,
+				DEVFREQ_GOV_RESUME, NULL);
+}
+EXPORT_SYMBOL(devfreq_resume_device);
+
+/**
+ * devfreq_add_governor() - Add devfreq governor
+ * @governor:	the devfreq governor to be added
+ */
+int devfreq_add_governor(struct devfreq_governor *governor)
+{
+	struct devfreq_governor *g;
+	struct devfreq *devfreq;
+	int err = 0;
+
+	if (!governor) {
+		pr_err("%s: Invalid parameters.\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&devfreq_list_lock);
+	g = find_devfreq_governor(governor->name);
+	if (!IS_ERR(g)) {
+		pr_err("%s: governor %s already registered\n", __func__,
+		       g->name);
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	list_add(&governor->node, &devfreq_governor_list);
+
+	list_for_each_entry(devfreq, &devfreq_list, node) {
+		int ret = 0;
+		struct device *dev = devfreq->dev.parent;
+
+		if (!strncmp(devfreq->governor_name, governor->name,
+			     DEVFREQ_NAME_LEN)) {
+			/* The following should never occur */
+			if (devfreq->governor) {
+				dev_warn(dev,
+					 "%s: Governor %s already present\n",
+					 __func__, devfreq->governor->name);
+				ret = devfreq->governor->event_handler(devfreq,
+							DEVFREQ_GOV_STOP, NULL);
+				if (ret) {
+					dev_warn(dev,
+						 "%s: Governor %s stop = %d\n",
+						 __func__,
+						 devfreq->governor->name, ret);
+				}
+				/* Fall through */
+			}
+			devfreq->governor = governor;
+			ret = devfreq->governor->event_handler(devfreq,
+						DEVFREQ_GOV_START, NULL);
+			if (ret) {
+				dev_warn(dev, "%s: Governor %s start=%d\n",
+					 __func__, devfreq->governor->name,
+					 ret);
+			}
+		}
+	}
+
+err_out:
+	mutex_unlock(&devfreq_list_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(devfreq_add_governor);
+
+/**
+ * devfreq_remove_device() - Remove devfreq feature from a device.
+ * @governor:	the devfreq governor to be removed
+ */
+int devfreq_remove_governor(struct devfreq_governor *governor)
+{
+	struct devfreq_governor *g;
+	struct devfreq *devfreq;
+	int err = 0;
+
+	if (!governor) {
+		pr_err("%s: Invalid parameters.\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&devfreq_list_lock);
+	g = find_devfreq_governor(governor->name);
+	if (IS_ERR(g)) {
+		pr_err("%s: governor %s not registered\n", __func__,
+		       governor->name);
+		err = PTR_ERR(g);
+		goto err_out;
+	}
+	list_for_each_entry(devfreq, &devfreq_list, node) {
+		int ret;
+		struct device *dev = devfreq->dev.parent;
+
+		if (!strncmp(devfreq->governor_name, governor->name,
+			     DEVFREQ_NAME_LEN)) {
+			/* we should have a devfreq governor! */
+			if (!devfreq->governor) {
+				dev_warn(dev, "%s: Governor %s NOT present\n",
+					 __func__, governor->name);
+				continue;
+				/* Fall through */
+			}
+			ret = devfreq->governor->event_handler(devfreq,
+						DEVFREQ_GOV_STOP, NULL);
+			if (ret) {
+				dev_warn(dev, "%s: Governor %s stop=%d\n",
+					 __func__, devfreq->governor->name,
+					 ret);
+			}
+			devfreq->governor = NULL;
+		}
+	}
+
+	list_del(&governor->node);
+err_out:
+	mutex_unlock(&devfreq_list_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(devfreq_remove_governor);
 
 static ssize_t show_governor(struct device *dev,
 			     struct device_attribute *attr, char *buf)
 {
+	if (!to_devfreq(dev)->governor)
+		return -EINVAL;
+
 	return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
 }
 
+static ssize_t store_governor(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct devfreq *df = to_devfreq(dev);
+	int ret;
+	char str_governor[DEVFREQ_NAME_LEN + 1];
+	struct devfreq_governor *governor;
+
+	ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
+	if (ret != 1)
+		return -EINVAL;
+
+	mutex_lock(&devfreq_list_lock);
+	governor = find_devfreq_governor(str_governor);
+	if (IS_ERR(governor)) {
+		ret = PTR_ERR(governor);
+		goto out;
+	}
+	if (df->governor == governor)
+		goto out;
+
+	if (df->governor) {
+		ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
+		if (ret) {
+			dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
+				 __func__, df->governor->name, ret);
+			goto out;
+		}
+	}
+	df->governor = governor;
+	strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
+	ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
+	if (ret)
+		dev_warn(dev, "%s: Governor %s not started(%d)\n",
+			 __func__, df->governor->name, ret);
+out:
+	mutex_unlock(&devfreq_list_lock);
+
+	if (!ret)
+		ret = count;
+	return ret;
+}
+static ssize_t show_available_governors(struct device *d,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct devfreq_governor *tmp_governor;
+	ssize_t count = 0;
+
+	mutex_lock(&devfreq_list_lock);
+	list_for_each_entry(tmp_governor, &devfreq_governor_list, node)
+		count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
+				   "%s ", tmp_governor->name);
+	mutex_unlock(&devfreq_list_lock);
+
+	/* Truncate the trailing space */
+	if (count)
+		count--;
+
+	count += sprintf(&buf[count], "\n");
+
+	return count;
+}
+
 static ssize_t show_freq(struct device *dev,
 			 struct device_attribute *attr, char *buf)
 {
+	unsigned long freq;
+	struct devfreq *devfreq = to_devfreq(dev);
+
+	if (devfreq->profile->get_cur_freq &&
+		!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
+			return sprintf(buf, "%lu\n", freq);
+
+	return sprintf(buf, "%lu\n", devfreq->previous_freq);
+}
+
+static ssize_t show_target_freq(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
 	return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
 }
 
@@ -486,39 +790,19 @@
 	unsigned int value;
 	int ret;
 
+	if (!df->governor)
+		return -EINVAL;
+
 	ret = sscanf(buf, "%u", &value);
 	if (ret != 1)
-		goto out;
+		return -EINVAL;
 
-	mutex_lock(&df->lock);
-	df->profile->polling_ms = value;
-	df->next_polling = df->polling_jiffies
-			 = msecs_to_jiffies(value);
-	mutex_unlock(&df->lock);
-
+	df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
 	ret = count;
 
-	if (df->governor->no_central_polling)
-		goto out;
-
-	mutex_lock(&devfreq_list_lock);
-	if (df->next_polling > 0 && !polling) {
-		polling = true;
-		queue_delayed_work(devfreq_wq, &devfreq_work,
-				   df->next_polling);
-	}
-	mutex_unlock(&devfreq_list_lock);
-out:
 	return ret;
 }
 
-static ssize_t show_central_polling(struct device *dev,
-				    struct device_attribute *attr, char *buf)
-{
-	return sprintf(buf, "%d\n",
-		       !to_devfreq(dev)->governor->no_central_polling);
-}
-
 static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
 			      const char *buf, size_t count)
 {
@@ -529,7 +813,7 @@
 
 	ret = sscanf(buf, "%lu", &value);
 	if (ret != 1)
-		goto out;
+		return -EINVAL;
 
 	mutex_lock(&df->lock);
 	max = df->max_freq;
@@ -543,7 +827,6 @@
 	ret = count;
 unlock:
 	mutex_unlock(&df->lock);
-out:
 	return ret;
 }
 
@@ -563,7 +846,7 @@
 
 	ret = sscanf(buf, "%lu", &value);
 	if (ret != 1)
-		goto out;
+		return -EINVAL;
 
 	mutex_lock(&df->lock);
 	min = df->min_freq;
@@ -577,7 +860,6 @@
 	ret = count;
 unlock:
 	mutex_unlock(&df->lock);
-out:
 	return ret;
 }
 
@@ -587,34 +869,92 @@
 	return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);
 }
 
+static ssize_t show_available_freqs(struct device *d,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct devfreq *df = to_devfreq(d);
+	struct device *dev = df->dev.parent;
+	struct opp *opp;
+	ssize_t count = 0;
+	unsigned long freq = 0;
+
+	rcu_read_lock();
+	do {
+		opp = opp_find_freq_ceil(dev, &freq);
+		if (IS_ERR(opp))
+			break;
+
+		count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
+				   "%lu ", freq);
+		freq++;
+	} while (1);
+	rcu_read_unlock();
+
+	/* Truncate the trailing space */
+	if (count)
+		count--;
+
+	count += sprintf(&buf[count], "\n");
+
+	return count;
+}
+
+static ssize_t show_trans_table(struct device *dev, struct device_attribute *attr,
+				char *buf)
+{
+	struct devfreq *devfreq = to_devfreq(dev);
+	ssize_t len;
+	int i, j, err;
+	unsigned int max_state = devfreq->profile->max_state;
+
+	err = devfreq_update_status(devfreq, devfreq->previous_freq);
+	if (err)
+		return 0;
+
+	len = sprintf(buf, "   From  :   To\n");
+	len += sprintf(buf + len, "         :");
+	for (i = 0; i < max_state; i++)
+		len += sprintf(buf + len, "%8u",
+				devfreq->profile->freq_table[i]);
+
+	len += sprintf(buf + len, "   time(ms)\n");
+
+	for (i = 0; i < max_state; i++) {
+		if (devfreq->profile->freq_table[i]
+					== devfreq->previous_freq) {
+			len += sprintf(buf + len, "*");
+		} else {
+			len += sprintf(buf + len, " ");
+		}
+		len += sprintf(buf + len, "%8u:",
+				devfreq->profile->freq_table[i]);
+		for (j = 0; j < max_state; j++)
+			len += sprintf(buf + len, "%8u",
+				devfreq->trans_table[(i * max_state) + j]);
+		len += sprintf(buf + len, "%10u\n",
+			jiffies_to_msecs(devfreq->time_in_state[i]));
+	}
+
+	len += sprintf(buf + len, "Total transition : %u\n",
+					devfreq->total_trans);
+	return len;
+}
+
 static struct device_attribute devfreq_attrs[] = {
-	__ATTR(governor, S_IRUGO, show_governor, NULL),
+	__ATTR(governor, S_IRUGO | S_IWUSR, show_governor, store_governor),
+	__ATTR(available_governors, S_IRUGO, show_available_governors, NULL),
 	__ATTR(cur_freq, S_IRUGO, show_freq, NULL),
-	__ATTR(central_polling, S_IRUGO, show_central_polling, NULL),
+	__ATTR(available_frequencies, S_IRUGO, show_available_freqs, NULL),
+	__ATTR(target_freq, S_IRUGO, show_target_freq, NULL),
 	__ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
 	       store_polling_interval),
 	__ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
 	__ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq),
+	__ATTR(trans_stat, S_IRUGO, show_trans_table, NULL),
 	{ },
 };
 
-/**
- * devfreq_start_polling() - Initialize data structure for devfreq framework and
- *			   start polling registered devfreq devices.
- */
-static int __init devfreq_start_polling(void)
-{
-	mutex_lock(&devfreq_list_lock);
-	polling = false;
-	devfreq_wq = create_freezable_workqueue("devfreq_wq");
-	INIT_DELAYED_WORK_DEFERRABLE(&devfreq_work, devfreq_monitor);
-	mutex_unlock(&devfreq_list_lock);
-
-	devfreq_monitor(&devfreq_work.work);
-	return 0;
-}
-late_initcall(devfreq_start_polling);
-
 static int __init devfreq_init(void)
 {
 	devfreq_class = class_create(THIS_MODULE, "devfreq");
@@ -622,7 +962,15 @@
 		pr_err("%s: couldn't create class\n", __FILE__);
 		return PTR_ERR(devfreq_class);
 	}
+
+	devfreq_wq = create_freezable_workqueue("devfreq_wq");
+	if (IS_ERR(devfreq_wq)) {
+		class_destroy(devfreq_class);
+		pr_err("%s: couldn't create workqueue\n", __FILE__);
+		return PTR_ERR(devfreq_wq);
+	}
 	devfreq_class->dev_attrs = devfreq_attrs;
+
 	return 0;
 }
 subsys_initcall(devfreq_init);
@@ -630,6 +978,7 @@
 static void __exit devfreq_exit(void)
 {
 	class_destroy(devfreq_class);
+	destroy_workqueue(devfreq_wq);
 }
 module_exit(devfreq_exit);
 
@@ -641,10 +990,15 @@
 /**
  * devfreq_recommended_opp() - Helper function to get proper OPP for the
  *			     freq value given to target callback.
- * @dev		The devfreq user device. (parent of devfreq)
- * @freq	The frequency given to target function
- * @flags	Flags handed from devfreq framework.
+ * @dev:	The devfreq user device. (parent of devfreq)
+ * @freq:	The frequency given to target function
+ * @flags:	Flags handed from devfreq framework.
  *
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. The reason for the same is that the opp pointer which is
+ * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * under the locked area. The pointer returned must be used prior to unlocking
+ * with rcu_read_unlock() to maintain the integrity of the pointer.
  */
 struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
 				    u32 flags)
@@ -656,14 +1010,14 @@
 		opp = opp_find_freq_floor(dev, freq);
 
 		/* If not available, use the closest opp */
-		if (opp == ERR_PTR(-ENODEV))
+		if (opp == ERR_PTR(-ERANGE))
 			opp = opp_find_freq_ceil(dev, freq);
 	} else {
 		/* The freq is an lower bound. opp should be higher */
 		opp = opp_find_freq_ceil(dev, freq);
 
 		/* If not available, use the closest opp */
-		if (opp == ERR_PTR(-ENODEV))
+		if (opp == ERR_PTR(-ERANGE))
 			opp = opp_find_freq_floor(dev, freq);
 	}
 
@@ -674,35 +1028,49 @@
  * devfreq_register_opp_notifier() - Helper function to get devfreq notified
  *				   for any changes in the OPP availability
  *				   changes
- * @dev		The devfreq user device. (parent of devfreq)
- * @devfreq	The devfreq object.
+ * @dev:	The devfreq user device. (parent of devfreq)
+ * @devfreq:	The devfreq object.
  */
 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
 {
-	struct srcu_notifier_head *nh = opp_get_notifier(dev);
+	struct srcu_notifier_head *nh;
+	int ret = 0;
 
+	rcu_read_lock();
+	nh = opp_get_notifier(dev);
 	if (IS_ERR(nh))
-		return PTR_ERR(nh);
-	return srcu_notifier_chain_register(nh, &devfreq->nb);
+		ret = PTR_ERR(nh);
+	rcu_read_unlock();
+	if (!ret)
+		ret = srcu_notifier_chain_register(nh, &devfreq->nb);
+
+	return ret;
 }
 
 /**
  * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
  *				     notified for any changes in the OPP
  *				     availability changes anymore.
- * @dev		The devfreq user device. (parent of devfreq)
- * @devfreq	The devfreq object.
+ * @dev:	The devfreq user device. (parent of devfreq)
+ * @devfreq:	The devfreq object.
  *
  * At exit() callback of devfreq_dev_profile, this must be included if
  * devfreq_recommended_opp is used.
  */
 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
 {
-	struct srcu_notifier_head *nh = opp_get_notifier(dev);
+	struct srcu_notifier_head *nh;
+	int ret = 0;
 
+	rcu_read_lock();
+	nh = opp_get_notifier(dev);
 	if (IS_ERR(nh))
-		return PTR_ERR(nh);
-	return srcu_notifier_chain_unregister(nh, &devfreq->nb);
+		ret = PTR_ERR(nh);
+	rcu_read_unlock();
+	if (!ret)
+		ret = srcu_notifier_chain_unregister(nh, &devfreq->nb);
+
+	return ret;
 }
 
 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c
index 88ddc77..f6c54bd 100644
--- a/drivers/devfreq/exynos4_bus.c
+++ b/drivers/devfreq/exynos4_bus.c
@@ -73,6 +73,16 @@
 #define EX4210_LV_NUM	(LV_2 + 1)
 #define EX4x12_LV_NUM	(LV_4 + 1)
 
+/**
+ * struct busfreq_opp_info - opp information for bus
+ * @rate:	Frequency in hertz
+ * @volt:	Voltage in microvolts corresponding to this OPP
+ */
+struct busfreq_opp_info {
+	unsigned long rate;
+	unsigned long volt;
+};
+
 struct busfreq_data {
 	enum exynos4_busf_type type;
 	struct device *dev;
@@ -80,7 +90,7 @@
 	bool disabled;
 	struct regulator *vdd_int;
 	struct regulator *vdd_mif; /* Exynos4412/4212 only */
-	struct opp *curr_opp;
+	struct busfreq_opp_info curr_oppinfo;
 	struct exynos4_ppmu dmc[2];
 
 	struct notifier_block pm_notifier;
@@ -296,13 +306,14 @@
 };
 
 
-static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
+static int exynos4210_set_busclk(struct busfreq_data *data,
+				 struct busfreq_opp_info *oppi)
 {
 	unsigned int index;
 	unsigned int tmp;
 
 	for (index = LV_0; index < EX4210_LV_NUM; index++)
-		if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk)
+		if (oppi->rate == exynos4210_busclk_table[index].clk)
 			break;
 
 	if (index == EX4210_LV_NUM)
@@ -361,13 +372,14 @@
 	return 0;
 }
 
-static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp)
+static int exynos4x12_set_busclk(struct busfreq_data *data,
+				 struct busfreq_opp_info *oppi)
 {
 	unsigned int index;
 	unsigned int tmp;
 
 	for (index = LV_0; index < EX4x12_LV_NUM; index++)
-		if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk)
+		if (oppi->rate == exynos4x12_mifclk_table[index].clk)
 			break;
 
 	if (index == EX4x12_LV_NUM)
@@ -576,11 +588,12 @@
 	return -EINVAL;
 }
 
-static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
-			       struct opp *oldopp)
+static int exynos4_bus_setvolt(struct busfreq_data *data,
+			       struct busfreq_opp_info *oppi,
+			       struct busfreq_opp_info *oldoppi)
 {
 	int err = 0, tmp;
-	unsigned long volt = opp_get_voltage(opp);
+	unsigned long volt = oppi->volt;
 
 	switch (data->type) {
 	case TYPE_BUSF_EXYNOS4210:
@@ -595,11 +608,11 @@
 		if (err)
 			break;
 
-		tmp = exynos4x12_get_intspec(opp_get_freq(opp));
+		tmp = exynos4x12_get_intspec(oppi->rate);
 		if (tmp < 0) {
 			err = tmp;
 			regulator_set_voltage(data->vdd_mif,
-					      opp_get_voltage(oldopp),
+					      oldoppi->volt,
 					      MAX_SAFEVOLT);
 			break;
 		}
@@ -609,7 +622,7 @@
 		/*  Try to recover */
 		if (err)
 			regulator_set_voltage(data->vdd_mif,
-					      opp_get_voltage(oldopp),
+					      oldoppi->volt,
 					      MAX_SAFEVOLT);
 		break;
 	default:
@@ -626,17 +639,26 @@
 	struct platform_device *pdev = container_of(dev, struct platform_device,
 						    dev);
 	struct busfreq_data *data = platform_get_drvdata(pdev);
-	struct opp *opp = devfreq_recommended_opp(dev, _freq, flags);
-	unsigned long freq = opp_get_freq(opp);
-	unsigned long old_freq = opp_get_freq(data->curr_opp);
+	struct opp *opp;
+	unsigned long freq;
+	unsigned long old_freq = data->curr_oppinfo.rate;
+	struct busfreq_opp_info	new_oppinfo;
 
-	if (IS_ERR(opp))
+	rcu_read_lock();
+	opp = devfreq_recommended_opp(dev, _freq, flags);
+	if (IS_ERR(opp)) {
+		rcu_read_unlock();
 		return PTR_ERR(opp);
+	}
+	new_oppinfo.rate = opp_get_freq(opp);
+	new_oppinfo.volt = opp_get_voltage(opp);
+	rcu_read_unlock();
+	freq = new_oppinfo.rate;
 
 	if (old_freq == freq)
 		return 0;
 
-	dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp));
+	dev_dbg(dev, "targetting %lukHz %luuV\n", freq, new_oppinfo.volt);
 
 	mutex_lock(&data->lock);
 
@@ -644,17 +666,18 @@
 		goto out;
 
 	if (old_freq < freq)
-		err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+		err = exynos4_bus_setvolt(data, &new_oppinfo,
+					  &data->curr_oppinfo);
 	if (err)
 		goto out;
 
 	if (old_freq != freq) {
 		switch (data->type) {
 		case TYPE_BUSF_EXYNOS4210:
-			err = exynos4210_set_busclk(data, opp);
+			err = exynos4210_set_busclk(data, &new_oppinfo);
 			break;
 		case TYPE_BUSF_EXYNOS4x12:
-			err = exynos4x12_set_busclk(data, opp);
+			err = exynos4x12_set_busclk(data, &new_oppinfo);
 			break;
 		default:
 			err = -EINVAL;
@@ -664,11 +687,12 @@
 		goto out;
 
 	if (old_freq > freq)
-		err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+		err = exynos4_bus_setvolt(data, &new_oppinfo,
+					  &data->curr_oppinfo);
 	if (err)
 		goto out;
 
-	data->curr_opp = opp;
+	data->curr_oppinfo = new_oppinfo;
 out:
 	mutex_unlock(&data->lock);
 	return err;
@@ -702,7 +726,7 @@
 
 	exynos4_read_ppmu(data);
 	busier_dmc = exynos4_get_busier_dmc(data);
-	stat->current_frequency = opp_get_freq(data->curr_opp);
+	stat->current_frequency = data->curr_oppinfo.rate;
 
 	if (busier_dmc)
 		addr = S5P_VA_DMC1;
@@ -933,6 +957,7 @@
 	struct busfreq_data *data = container_of(this, struct busfreq_data,
 						 pm_notifier);
 	struct opp *opp;
+	struct busfreq_opp_info	new_oppinfo;
 	unsigned long maxfreq = ULONG_MAX;
 	int err = 0;
 
@@ -943,18 +968,29 @@
 
 		data->disabled = true;
 
+		rcu_read_lock();
 		opp = opp_find_freq_floor(data->dev, &maxfreq);
+		if (IS_ERR(opp)) {
+			rcu_read_unlock();
+			dev_err(data->dev, "%s: unable to find a min freq\n",
+				__func__);
+			return PTR_ERR(opp);
+		}
+		new_oppinfo.rate = opp_get_freq(opp);
+		new_oppinfo.volt = opp_get_voltage(opp);
+		rcu_read_unlock();
 
-		err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+		err = exynos4_bus_setvolt(data, &new_oppinfo,
+					  &data->curr_oppinfo);
 		if (err)
 			goto unlock;
 
 		switch (data->type) {
 		case TYPE_BUSF_EXYNOS4210:
-			err = exynos4210_set_busclk(data, opp);
+			err = exynos4210_set_busclk(data, &new_oppinfo);
 			break;
 		case TYPE_BUSF_EXYNOS4x12:
-			err = exynos4x12_set_busclk(data, opp);
+			err = exynos4x12_set_busclk(data, &new_oppinfo);
 			break;
 		default:
 			err = -EINVAL;
@@ -962,7 +998,7 @@
 		if (err)
 			goto unlock;
 
-		data->curr_opp = opp;
+		data->curr_oppinfo = new_oppinfo;
 unlock:
 		mutex_unlock(&data->lock);
 		if (err)
@@ -987,7 +1023,7 @@
 	struct device *dev = &pdev->dev;
 	int err = 0;
 
-	data = kzalloc(sizeof(struct busfreq_data), GFP_KERNEL);
+	data = devm_kzalloc(&pdev->dev, sizeof(struct busfreq_data), GFP_KERNEL);
 	if (data == NULL) {
 		dev_err(dev, "Cannot allocate memory.\n");
 		return -ENOMEM;
@@ -1012,63 +1048,52 @@
 		err = -EINVAL;
 	}
 	if (err)
-		goto err_regulator;
+		return err;
 
-	data->vdd_int = regulator_get(dev, "vdd_int");
+	data->vdd_int = devm_regulator_get(dev, "vdd_int");
 	if (IS_ERR(data->vdd_int)) {
 		dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
-		err = PTR_ERR(data->vdd_int);
-		goto err_regulator;
+		return PTR_ERR(data->vdd_int);
 	}
 	if (data->type == TYPE_BUSF_EXYNOS4x12) {
-		data->vdd_mif = regulator_get(dev, "vdd_mif");
+		data->vdd_mif = devm_regulator_get(dev, "vdd_mif");
 		if (IS_ERR(data->vdd_mif)) {
 			dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n");
-			err = PTR_ERR(data->vdd_mif);
-			regulator_put(data->vdd_int);
-			goto err_regulator;
-
+			return PTR_ERR(data->vdd_mif);
 		}
 	}
 
+	rcu_read_lock();
 	opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
 	if (IS_ERR(opp)) {
+		rcu_read_unlock();
 		dev_err(dev, "Invalid initial frequency %lu kHz.\n",
-		       exynos4_devfreq_profile.initial_freq);
-		err = PTR_ERR(opp);
-		goto err_opp_add;
+			exynos4_devfreq_profile.initial_freq);
+		return PTR_ERR(opp);
 	}
-	data->curr_opp = opp;
+	data->curr_oppinfo.rate = opp_get_freq(opp);
+	data->curr_oppinfo.volt = opp_get_voltage(opp);
+	rcu_read_unlock();
 
 	platform_set_drvdata(pdev, data);
 
 	busfreq_mon_reset(data);
 
 	data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile,
-					   &devfreq_simple_ondemand, NULL);
-	if (IS_ERR(data->devfreq)) {
-		err = PTR_ERR(data->devfreq);
-		goto err_opp_add;
-	}
+					   "simple_ondemand", NULL);
+	if (IS_ERR(data->devfreq))
+		return PTR_ERR(data->devfreq);
 
 	devfreq_register_opp_notifier(dev, data->devfreq);
 
 	err = register_pm_notifier(&data->pm_notifier);
 	if (err) {
 		dev_err(dev, "Failed to setup pm notifier\n");
-		goto err_devfreq_add;
+		devfreq_remove_device(data->devfreq);
+		return err;
 	}
 
 	return 0;
-err_devfreq_add:
-	devfreq_remove_device(data->devfreq);
-err_opp_add:
-	if (data->vdd_mif)
-		regulator_put(data->vdd_mif);
-	regulator_put(data->vdd_int);
-err_regulator:
-	kfree(data);
-	return err;
 }
 
 static __devexit int exynos4_busfreq_remove(struct platform_device *pdev)
@@ -1077,10 +1102,6 @@
 
 	unregister_pm_notifier(&data->pm_notifier);
 	devfreq_remove_device(data->devfreq);
-	regulator_put(data->vdd_int);
-	if (data->vdd_mif)
-		regulator_put(data->vdd_mif);
-	kfree(data);
 
 	return 0;
 }
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index ea7f13c..fad7d63 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -18,7 +18,24 @@
 
 #define to_devfreq(DEV)	container_of((DEV), struct devfreq, dev)
 
+/* Devfreq events */
+#define DEVFREQ_GOV_START			0x1
+#define DEVFREQ_GOV_STOP			0x2
+#define DEVFREQ_GOV_INTERVAL			0x3
+#define DEVFREQ_GOV_SUSPEND			0x4
+#define DEVFREQ_GOV_RESUME			0x5
+
 /* Caution: devfreq->lock must be locked before calling update_devfreq */
 extern int update_devfreq(struct devfreq *devfreq);
 
+extern void devfreq_monitor_start(struct devfreq *devfreq);
+extern void devfreq_monitor_stop(struct devfreq *devfreq);
+extern void devfreq_monitor_suspend(struct devfreq *devfreq);
+extern void devfreq_monitor_resume(struct devfreq *devfreq);
+extern void devfreq_interval_update(struct devfreq *devfreq,
+					unsigned int *delay);
+
+extern int devfreq_add_governor(struct devfreq_governor *governor);
+extern int devfreq_remove_governor(struct devfreq_governor *governor);
+
 #endif /* _GOVERNOR_H */
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
index af75ddd..c72f942 100644
--- a/drivers/devfreq/governor_performance.c
+++ b/drivers/devfreq/governor_performance.c
@@ -10,6 +10,7 @@
  */
 
 #include <linux/devfreq.h>
+#include <linux/module.h>
 #include "governor.h"
 
 static int devfreq_performance_func(struct devfreq *df,
@@ -26,14 +27,41 @@
 	return 0;
 }
 
-static int performance_init(struct devfreq *devfreq)
+static int devfreq_performance_handler(struct devfreq *devfreq,
+				unsigned int event, void *data)
 {
-	return update_devfreq(devfreq);
+	int ret = 0;
+
+	if (event == DEVFREQ_GOV_START) {
+		mutex_lock(&devfreq->lock);
+		ret = update_devfreq(devfreq);
+		mutex_unlock(&devfreq->lock);
+	}
+
+	return ret;
 }
 
-const struct devfreq_governor devfreq_performance = {
+static struct devfreq_governor devfreq_performance = {
 	.name = "performance",
-	.init = performance_init,
 	.get_target_freq = devfreq_performance_func,
-	.no_central_polling = true,
+	.event_handler = devfreq_performance_handler,
 };
+
+static int __init devfreq_performance_init(void)
+{
+	return devfreq_add_governor(&devfreq_performance);
+}
+subsys_initcall(devfreq_performance_init);
+
+static void __exit devfreq_performance_exit(void)
+{
+	int ret;
+
+	ret = devfreq_remove_governor(&devfreq_performance);
+	if (ret)
+		pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+	return;
+}
+module_exit(devfreq_performance_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
index fec0cdb..0c6bed5 100644
--- a/drivers/devfreq/governor_powersave.c
+++ b/drivers/devfreq/governor_powersave.c
@@ -10,6 +10,7 @@
  */
 
 #include <linux/devfreq.h>
+#include <linux/module.h>
 #include "governor.h"
 
 static int devfreq_powersave_func(struct devfreq *df,
@@ -23,14 +24,41 @@
 	return 0;
 }
 
-static int powersave_init(struct devfreq *devfreq)
+static int devfreq_powersave_handler(struct devfreq *devfreq,
+				unsigned int event, void *data)
 {
-	return update_devfreq(devfreq);
+	int ret = 0;
+
+	if (event == DEVFREQ_GOV_START) {
+		mutex_lock(&devfreq->lock);
+		ret = update_devfreq(devfreq);
+		mutex_unlock(&devfreq->lock);
+	}
+
+	return ret;
 }
 
-const struct devfreq_governor devfreq_powersave = {
+static struct devfreq_governor devfreq_powersave = {
 	.name = "powersave",
-	.init = powersave_init,
 	.get_target_freq = devfreq_powersave_func,
-	.no_central_polling = true,
+	.event_handler = devfreq_powersave_handler,
 };
+
+static int __init devfreq_powersave_init(void)
+{
+	return devfreq_add_governor(&devfreq_powersave);
+}
+subsys_initcall(devfreq_powersave_init);
+
+static void __exit devfreq_powersave_exit(void)
+{
+	int ret;
+
+	ret = devfreq_remove_governor(&devfreq_powersave);
+	if (ret)
+		pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+	return;
+}
+module_exit(devfreq_powersave_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index a2e3eae..0720ba8 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -10,8 +10,10 @@
  */
 
 #include <linux/errno.h>
+#include <linux/module.h>
 #include <linux/devfreq.h>
 #include <linux/math64.h>
+#include "governor.h"
 
 /* Default constants for DevFreq-Simple-Ondemand (DFSO) */
 #define DFSO_UPTHRESHOLD	(90)
@@ -88,7 +90,58 @@
 	return 0;
 }
 
-const struct devfreq_governor devfreq_simple_ondemand = {
+static int devfreq_simple_ondemand_handler(struct devfreq *devfreq,
+				unsigned int event, void *data)
+{
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		devfreq_monitor_start(devfreq);
+		break;
+
+	case DEVFREQ_GOV_STOP:
+		devfreq_monitor_stop(devfreq);
+		break;
+
+	case DEVFREQ_GOV_INTERVAL:
+		devfreq_interval_update(devfreq, (unsigned int *)data);
+		break;
+
+	case DEVFREQ_GOV_SUSPEND:
+		devfreq_monitor_suspend(devfreq);
+		break;
+
+	case DEVFREQ_GOV_RESUME:
+		devfreq_monitor_resume(devfreq);
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static struct devfreq_governor devfreq_simple_ondemand = {
 	.name = "simple_ondemand",
 	.get_target_freq = devfreq_simple_ondemand_func,
+	.event_handler = devfreq_simple_ondemand_handler,
 };
+
+static int __init devfreq_simple_ondemand_init(void)
+{
+	return devfreq_add_governor(&devfreq_simple_ondemand);
+}
+subsys_initcall(devfreq_simple_ondemand_init);
+
+static void __exit devfreq_simple_ondemand_exit(void)
+{
+	int ret;
+
+	ret = devfreq_remove_governor(&devfreq_simple_ondemand);
+	if (ret)
+		pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+	return;
+}
+module_exit(devfreq_simple_ondemand_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index 0681246..35de6e8 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -14,6 +14,7 @@
 #include <linux/devfreq.h>
 #include <linux/pm.h>
 #include <linux/mutex.h>
+#include <linux/module.h>
 #include "governor.h"
 
 struct userspace_data {
@@ -116,10 +117,46 @@
 	devfreq->data = NULL;
 }
 
-const struct devfreq_governor devfreq_userspace = {
+static int devfreq_userspace_handler(struct devfreq *devfreq,
+			unsigned int event, void *data)
+{
+	int ret = 0;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		ret = userspace_init(devfreq);
+		break;
+	case DEVFREQ_GOV_STOP:
+		userspace_exit(devfreq);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static struct devfreq_governor devfreq_userspace = {
 	.name = "userspace",
 	.get_target_freq = devfreq_userspace_func,
-	.init = userspace_init,
-	.exit = userspace_exit,
-	.no_central_polling = true,
+	.event_handler = devfreq_userspace_handler,
 };
+
+static int __init devfreq_userspace_init(void)
+{
+	return devfreq_add_governor(&devfreq_userspace);
+}
+subsys_initcall(devfreq_userspace_init);
+
+static void __exit devfreq_userspace_exit(void)
+{
+	int ret;
+
+	ret = devfreq_remove_governor(&devfreq_userspace);
+	if (ret)
+		pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+	return;
+}
+module_exit(devfreq_userspace_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/ion/ion_cma_heap.c b/drivers/gpu/ion/ion_cma_heap.c
index 8063138..a01b347 100644
--- a/drivers/gpu/ion/ion_cma_heap.c
+++ b/drivers/gpu/ion/ion_cma_heap.c
@@ -127,8 +127,8 @@
 	struct device *dev = heap->priv;
 	struct ion_cma_buffer_info *info = buffer->priv_virt;
 
-	dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
-		info->handle);
+	dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
+		&info->handle);
 
 	*addr = info->handle;
 	*len = buffer->size;
diff --git a/drivers/gpu/ion/ion_cma_secure_heap.c b/drivers/gpu/ion/ion_cma_secure_heap.c
index 496e5b4..3be3a00 100644
--- a/drivers/gpu/ion/ion_cma_secure_heap.c
+++ b/drivers/gpu/ion/ion_cma_secure_heap.c
@@ -164,8 +164,8 @@
 	struct device *dev = heap->priv;
 	struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
 
-	dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
-		info->handle);
+	dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
+		&info->handle);
 
 	*addr = info->handle;
 	*len = buffer->size;
diff --git a/drivers/gpu/ion/msm/ion_cp_common.c b/drivers/gpu/ion/msm/ion_cp_common.c
index 8c9b95d..48c2efb 100644
--- a/drivers/gpu/ion/msm/ion_cp_common.c
+++ b/drivers/gpu/ion/msm/ion_cp_common.c
@@ -55,7 +55,7 @@
 	cmd.permission_type = permission_type;
 	cmd.lock = SCM_CP_PROTECT;
 
-	return scm_call(SCM_SVC_CP, SCM_CP_LOCK_CMD_ID,
+	return scm_call(SCM_SVC_MP, SCM_CP_LOCK_CMD_ID,
 			&cmd, sizeof(cmd), NULL, 0);
 }
 
@@ -68,7 +68,7 @@
 	cmd.permission_type = permission_type;
 	cmd.lock = SCM_CP_UNPROTECT;
 
-	return scm_call(SCM_SVC_CP, SCM_CP_LOCK_CMD_ID,
+	return scm_call(SCM_SVC_MP, SCM_CP_LOCK_CMD_ID,
 			&cmd, sizeof(cmd), NULL, 0);
 }
 
@@ -154,7 +154,7 @@
 	request.chunks.chunk_list_size = nchunks;
 	request.chunks.chunk_size = chunk_size;
 
-	return scm_call(SCM_SVC_CP, MEM_PROTECT_LOCK_ID,
+	return scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID,
 			&request, sizeof(request), &resp, sizeof(resp));
 
 }
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index bf45a63..9f4d791 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -17,6 +17,7 @@
 #include <linux/sched.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/delay.h>
 
 #include <mach/socinfo.h>
 #include <mach/msm_bus_board.h>
@@ -121,14 +122,21 @@
  * If the values of these registers are same after
  * KGSL_TIMEOUT_PART time, GPU hang is reported in
  * kernel log.
+ * *****ALERT******ALERT********ALERT*************
+ * Order of registers below is important, registers
+ * from LONG_IB_DETECT_REG_INDEX_START to
+ * LONG_IB_DETECT_REG_INDEX_END are used in long ib detection.
  */
-unsigned int hang_detect_regs[] = {
+#define LONG_IB_DETECT_REG_INDEX_START 1
+#define LONG_IB_DETECT_REG_INDEX_END 5
+
+unsigned int ft_detect_regs[] = {
 	A3XX_RBBM_STATUS,
-	REG_CP_RB_RPTR,
+	REG_CP_RB_RPTR,   /* LONG_IB_DETECT_REG_INDEX_START */
 	REG_CP_IB1_BASE,
 	REG_CP_IB1_BUFSZ,
 	REG_CP_IB2_BASE,
-	REG_CP_IB2_BUFSZ,
+	REG_CP_IB2_BUFSZ, /* LONG_IB_DETECT_REG_INDEX_END */
 	0,
 	0,
 	0,
@@ -137,7 +145,7 @@
 	0
 };
 
-const unsigned int hang_detect_regs_count = ARRAY_SIZE(hang_detect_regs);
+const unsigned int ft_detect_regs_count = ARRAY_SIZE(ft_detect_regs);
 
 /*
  * This is the master list of all GPU cores that are supported by this
@@ -1222,7 +1230,7 @@
 	int status = -EINVAL;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 
-	if (KGSL_STATE_DUMP_AND_RECOVER != device->state)
+	if (KGSL_STATE_DUMP_AND_FT != device->state)
 		kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
 
 	/* Power up the device */
@@ -1277,16 +1285,16 @@
 
 	/* Assign correct RBBM status register to hang detect regs
 	 */
-	hang_detect_regs[0] = adreno_dev->gpudev->reg_rbbm_status;
+	ft_detect_regs[0] = adreno_dev->gpudev->reg_rbbm_status;
 
 	/* Add A3XX specific registers for hang detection */
 	if (adreno_is_a3xx(adreno_dev)) {
-		hang_detect_regs[6] = A3XX_RBBM_PERFCTR_SP_7_LO;
-		hang_detect_regs[7] = A3XX_RBBM_PERFCTR_SP_7_HI;
-		hang_detect_regs[8] = A3XX_RBBM_PERFCTR_SP_6_LO;
-		hang_detect_regs[9] = A3XX_RBBM_PERFCTR_SP_6_HI;
-		hang_detect_regs[10] = A3XX_RBBM_PERFCTR_SP_5_LO;
-		hang_detect_regs[11] = A3XX_RBBM_PERFCTR_SP_5_HI;
+		ft_detect_regs[6] = A3XX_RBBM_PERFCTR_SP_7_LO;
+		ft_detect_regs[7] = A3XX_RBBM_PERFCTR_SP_7_HI;
+		ft_detect_regs[8] = A3XX_RBBM_PERFCTR_SP_6_LO;
+		ft_detect_regs[9] = A3XX_RBBM_PERFCTR_SP_6_HI;
+		ft_detect_regs[10] = A3XX_RBBM_PERFCTR_SP_5_LO;
+		ft_detect_regs[11] = A3XX_RBBM_PERFCTR_SP_5_HI;
 	}
 
 	status = kgsl_mmu_start(device);
@@ -1309,12 +1317,9 @@
 	if (status)
 		goto error_irq_off;
 
-	/*
-	 * While recovery is on we do not want timer to
-	 * fire and attempt to change any device state
-	 */
-
-	if (KGSL_STATE_DUMP_AND_RECOVER != device->state)
+	/* While fault tolerance is on we do not want timer to
+	 * fire and attempt to change any device state */
+	if (KGSL_STATE_DUMP_AND_FT != device->state)
 		mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
 
 	device->reset_counter++;
@@ -1328,7 +1333,8 @@
 	kgsl_mmu_stop(&device->mmu);
 
 error_clk_off:
-	kgsl_pwrctrl_disable(device);
+	if (KGSL_STATE_DUMP_AND_FT != device->state)
+		kgsl_pwrctrl_disable(device);
 
 	return status;
 }
@@ -1356,26 +1362,26 @@
 }
 
 static void adreno_mark_context_status(struct kgsl_device *device,
-					int recovery_status)
+					int ft_status)
 {
 	struct kgsl_context *context;
 	int next = 0;
 	/*
 	 * Set the reset status of all contexts to
 	 * INNOCENT_CONTEXT_RESET_EXT except for the bad context
-	 * since thats the guilty party, if recovery failed then
+	 * since thats the guilty party, if fault tolerance failed then
 	 * mark all as guilty
 	 */
 	while ((context = idr_get_next(&device->context_idr, &next))) {
 		struct adreno_context *adreno_context = context->devctxt;
-		if (recovery_status) {
+		if (ft_status) {
 			context->reset_status =
 					KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
 			adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
 		} else if (KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT !=
 			context->reset_status) {
 			if (adreno_context->flags & (CTXT_FLAGS_GPU_HANG |
-				CTXT_FLAGS_GPU_HANG_RECOVERED))
+				CTXT_FLAGS_GPU_HANG_FT))
 				context->reset_status =
 				KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
 			else
@@ -1410,103 +1416,305 @@
 	}
 }
 
-static void adreno_destroy_recovery_data(struct adreno_recovery_data *rec_data)
+static void adreno_destroy_ft_data(struct adreno_ft_data *ft_data)
 {
-	vfree(rec_data->rb_buffer);
-	vfree(rec_data->bad_rb_buffer);
+	vfree(ft_data->rb_buffer);
+	vfree(ft_data->bad_rb_buffer);
+	vfree(ft_data->good_rb_buffer);
 }
 
-static int adreno_setup_recovery_data(struct kgsl_device *device,
-					struct adreno_recovery_data *rec_data)
+static int _find_start_of_cmd_seq(struct adreno_ringbuffer *rb,
+					unsigned int *ptr,
+					bool inc)
+{
+	int status = -EINVAL;
+	unsigned int val1;
+	unsigned int size = rb->buffer_desc.size;
+	unsigned int start_ptr = *ptr;
+
+	while ((start_ptr / sizeof(unsigned int)) != rb->wptr) {
+		if (inc)
+			start_ptr = adreno_ringbuffer_inc_wrapped(start_ptr,
+									size);
+		else
+			start_ptr = adreno_ringbuffer_dec_wrapped(start_ptr,
+									size);
+		kgsl_sharedmem_readl(&rb->buffer_desc, &val1, start_ptr);
+		if (KGSL_CMD_IDENTIFIER == val1) {
+			if ((start_ptr / sizeof(unsigned int)) != rb->wptr)
+				start_ptr = adreno_ringbuffer_dec_wrapped(
+							start_ptr, size);
+				*ptr = start_ptr;
+				status = 0;
+				break;
+		}
+	}
+	return status;
+}
+
+static int _find_cmd_seq_after_eop_ts(struct adreno_ringbuffer *rb,
+					unsigned int *rb_rptr,
+					unsigned int global_eop,
+					bool inc)
+{
+	int status = -EINVAL;
+	unsigned int temp_rb_rptr = *rb_rptr;
+	unsigned int size = rb->buffer_desc.size;
+	unsigned int val[3];
+	int i = 0;
+	bool check = false;
+
+	if (inc && temp_rb_rptr / sizeof(unsigned int) != rb->wptr)
+		return status;
+
+	do {
+		/*
+		 * when decrementing we need to decrement first and
+		 * then read make sure we cover all the data
+		 */
+		if (!inc)
+			temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
+					temp_rb_rptr, size);
+		kgsl_sharedmem_readl(&rb->buffer_desc, &val[i],
+					temp_rb_rptr);
+
+		if (check && ((inc && val[i] == global_eop) ||
+			(!inc && (val[i] ==
+			cp_type3_packet(CP_MEM_WRITE, 2) ||
+			val[i] == CACHE_FLUSH_TS)))) {
+			/* decrement i, i.e i = (i - 1 + 3) % 3 if
+			 * we are going forward, else increment i */
+			i = (i + 2) % 3;
+			if (val[i] == rb->device->memstore.gpuaddr +
+				KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+						eoptimestamp)) {
+				int j = ((i + 2) % 3);
+				if ((inc && (val[j] == CACHE_FLUSH_TS ||
+						val[j] == cp_type3_packet(
+							CP_MEM_WRITE, 2))) ||
+					(!inc && val[j] == global_eop)) {
+						/* Found the global eop */
+						status = 0;
+						break;
+				}
+			}
+			/* if no match found then increment i again
+			 * since we decremented before matching */
+			i = (i + 1) % 3;
+		}
+		if (inc)
+			temp_rb_rptr = adreno_ringbuffer_inc_wrapped(
+						temp_rb_rptr, size);
+
+		i = (i + 1) % 3;
+		if (2 == i)
+			check = true;
+	} while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr);
+	/* temp_rb_rptr points to the command stream after global eop,
+	 * move backward till the start of command sequence */
+	if (!status) {
+		status = _find_start_of_cmd_seq(rb, &temp_rb_rptr, false);
+		if (!status) {
+			*rb_rptr = temp_rb_rptr;
+			KGSL_FT_INFO(rb->device,
+			"Offset of cmd sequence after eop timestamp: 0x%x\n",
+			temp_rb_rptr / sizeof(unsigned int));
+		}
+	}
+	if (status)
+		KGSL_FT_ERR(rb->device,
+		"Failed to find the command sequence after eop timestamp\n");
+	return status;
+}
+
+static int _find_hanging_ib_sequence(struct adreno_ringbuffer *rb,
+				unsigned int *rb_rptr,
+				unsigned int ib1)
+{
+	int status = -EINVAL;
+	unsigned int temp_rb_rptr = *rb_rptr;
+	unsigned int size = rb->buffer_desc.size;
+	unsigned int val[2];
+	int i = 0;
+	bool check = false;
+	bool ctx_switch = false;
+
+	while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
+		kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
+
+		if (check && val[i] == ib1) {
+			/* decrement i, i.e i = (i - 1 + 2) % 2 */
+			i = (i + 1) % 2;
+			if (adreno_cmd_is_ib(val[i])) {
+				/* go till start of command sequence */
+				status = _find_start_of_cmd_seq(rb,
+						&temp_rb_rptr, false);
+
+				KGSL_FT_INFO(rb->device,
+				"Found the hanging IB at offset 0x%x\n",
+				temp_rb_rptr / sizeof(unsigned int));
+				break;
+			}
+			/* if no match the increment i since we decremented
+			 * before checking */
+			i = (i + 1) % 2;
+		}
+		/* Make sure you do not encounter a context switch twice, we can
+		 * encounter it once for the bad context as the start of search
+		 * can point to the context switch */
+		if (val[i] == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
+			if (ctx_switch) {
+				KGSL_FT_ERR(rb->device,
+				"Context switch encountered before bad "
+				"IB found\n");
+				break;
+			}
+			ctx_switch = true;
+		}
+		i = (i + 1) % 2;
+		if (1 == i)
+			check = true;
+		temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
+								size);
+	}
+	if  (!status)
+		*rb_rptr = temp_rb_rptr;
+	return status;
+}
+
+static void adreno_setup_ft_data(struct kgsl_device *device,
+					struct adreno_ft_data *ft_data)
 {
 	int ret = 0;
-	unsigned int ib1_sz, ib2_sz;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+	struct kgsl_context *context;
+	struct adreno_context *adreno_context;
+	unsigned int rb_rptr = rb->wptr * sizeof(unsigned int);
 
-	memset(rec_data, 0, sizeof(*rec_data));
+	memset(ft_data, 0, sizeof(*ft_data));
+	ft_data->start_of_replay_cmds = 0xFFFFFFFF;
+	ft_data->replay_for_snapshot = 0xFFFFFFFF;
 
-	adreno_regread(device, REG_CP_IB1_BUFSZ, &ib1_sz);
-	adreno_regread(device, REG_CP_IB2_BUFSZ, &ib2_sz);
-	if (ib1_sz || ib2_sz)
-		adreno_regread(device, REG_CP_IB1_BASE, &rec_data->ib1);
+	adreno_regread(device, REG_CP_IB1_BASE, &ft_data->ib1);
 
-	kgsl_sharedmem_readl(&device->memstore, &rec_data->context_id,
+	kgsl_sharedmem_readl(&device->memstore, &ft_data->context_id,
 			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
 			current_context));
 
 	kgsl_sharedmem_readl(&device->memstore,
-				&rec_data->global_eop,
-				KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-				eoptimestamp));
+			&ft_data->global_eop,
+			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+			eoptimestamp));
 
-	rec_data->rb_buffer = vmalloc(rb->buffer_desc.size);
-	if (!rec_data->rb_buffer) {
+	ft_data->rb_buffer = vmalloc(rb->buffer_desc.size);
+	if (!ft_data->rb_buffer) {
 		KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
 				rb->buffer_desc.size);
-		return -ENOMEM;
+		return;
 	}
 
-	rec_data->bad_rb_buffer = vmalloc(rb->buffer_desc.size);
-	if (!rec_data->bad_rb_buffer) {
+	ft_data->bad_rb_buffer = vmalloc(rb->buffer_desc.size);
+	if (!ft_data->bad_rb_buffer) {
 		KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
 				rb->buffer_desc.size);
-		ret = -ENOMEM;
-		goto done;
+		return;
 	}
-	rec_data->fault = device->mmu.fault;
 
-done:
+	ft_data->good_rb_buffer = vmalloc(rb->buffer_desc.size);
+	if (!ft_data->good_rb_buffer) {
+		KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
+				rb->buffer_desc.size);
+		return;
+	}
+	ft_data->status = 0;
+
+	/* find the start of bad command sequence in rb */
+	context = idr_find(&device->context_idr, ft_data->context_id);
+	/* Look for the command stream that is right after the global eop */
+
+	if (!context) {
+		/*
+		 * If there is no context then fault tolerance does not need to
+		 * replay anything, just reset GPU and thats it
+		 */
+		return;
+	}
+
+	ft_data->ft_policy = adreno_dev->ft_policy;
+
+	if (!ft_data->ft_policy)
+		ft_data->ft_policy = KGSL_FT_DEFAULT_POLICY;
+
+	ret = _find_cmd_seq_after_eop_ts(rb, &rb_rptr,
+					ft_data->global_eop + 1, false);
 	if (ret) {
-		vfree(rec_data->rb_buffer);
-		vfree(rec_data->bad_rb_buffer);
+		ft_data->ft_policy |= KGSL_FT_TEMP_DISABLE;
+		return;
+	} else
+		ft_data->ft_policy &= ~KGSL_FT_TEMP_DISABLE;
+
+	ft_data->start_of_replay_cmds = rb_rptr;
+
+	adreno_context = context->devctxt;
+	if (adreno_context->flags & CTXT_FLAGS_PREAMBLE) {
+		if (ft_data->ib1) {
+			ret = _find_hanging_ib_sequence(rb,
+					&rb_rptr, ft_data->ib1);
+			if (ret) {
+				KGSL_FT_ERR(device,
+				"Start not found for replay IB sequence\n");
+				ret = 0;
+				return;
+			}
+			ft_data->start_of_replay_cmds = rb_rptr;
+			ft_data->replay_for_snapshot = rb_rptr;
+		}
 	}
-	return ret;
 }
 
 static int
-_adreno_recover_hang(struct kgsl_device *device,
-			struct adreno_recovery_data *rec_data,
-			bool try_bad_commands)
+_adreno_check_long_ib(struct kgsl_device *device)
 {
-	int ret;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
-	struct kgsl_context *context;
-	struct adreno_context *adreno_context = NULL;
-	struct adreno_context *last_active_ctx = adreno_dev->drawctxt_active;
+	unsigned int curr_global_ts = 0;
 
-	context = idr_find(&device->context_idr, rec_data->context_id);
-	if (context == NULL) {
-		KGSL_DRV_ERR(device, "Last context unknown id:%d\n",
-			rec_data->context_id);
+	/* check if the global ts is still the same */
+	kgsl_sharedmem_readl(&device->memstore,
+			&curr_global_ts,
+			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+			eoptimestamp));
+
+	/* Mark long ib as handled */
+	adreno_dev->long_ib = 0;
+
+	if (curr_global_ts == adreno_dev->long_ib_ts) {
+		KGSL_FT_ERR(device,
+			"IB ran too long, invalidate ctxt\n");
+		return 1;
 	} else {
-		adreno_context = context->devctxt;
-		adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
-		/*
-		 * set the invalid ts flag to 0 for this context since we have
-		 * detected a hang for it
-		 */
-		context->wait_on_invalid_ts = false;
+		/* Do nothing GPU has gone ahead */
+		KGSL_FT_INFO(device, "false long ib detection return\n");
+		return 0;
 	}
+}
 
-	/* Extract valid contents from rb which can still be executed after
-	 * hang */
-	ret = adreno_ringbuffer_extract(rb, rec_data);
-	if (ret)
-		goto done;
+static int
+_adreno_ft_restart_device(struct kgsl_device *device,
+			   struct kgsl_context *context)
+{
+
+	struct adreno_context *adreno_context = context->devctxt;
 
 	/* restart device */
-	ret = adreno_stop(device);
-	if (ret) {
-		KGSL_DRV_ERR(device, "Device stop failed in recovery\n");
-		goto done;
+	if (adreno_stop(device)) {
+		KGSL_FT_ERR(device, "Device stop failed\n");
+		return 1;
 	}
 
-	ret = adreno_start(device, true);
-	if (ret) {
-		KGSL_DRV_ERR(device, "Device start failed in recovery\n");
-		goto done;
+	if (adreno_start(device, true)) {
+		KGSL_FT_ERR(device, "Device start failed\n");
+		return 1;
 	}
 
 	if (context)
@@ -1516,83 +1724,284 @@
 	/* If iommu is used then we need to make sure that the iommu clocks
 	 * are on since there could be commands in pipeline that touch iommu */
 	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
-		ret = kgsl_mmu_enable_clk(&device->mmu,
-			KGSL_IOMMU_CONTEXT_USER);
-		if (ret)
-			goto done;
+		if (kgsl_mmu_enable_clk(&device->mmu,
+				KGSL_IOMMU_CONTEXT_USER))
+			return 1;
 	}
 
-	/* Do not try the bad commands if recovery has failed bad commands
-	 * once already or if hang is due to a fault */
-	if (!try_bad_commands || rec_data->fault)
-		rec_data->bad_rb_size = 0;
+	return 0;
+}
 
-	if (rec_data->bad_rb_size) {
-		int idle_ret;
-		/* submit the bad and good context commands and wait for
-		 * them to pass */
-		adreno_ringbuffer_restore(rb, rec_data->bad_rb_buffer,
-					rec_data->bad_rb_size);
-		idle_ret = adreno_idle(device);
-		if (idle_ret) {
-			ret = adreno_stop(device);
-			if (ret) {
-				KGSL_DRV_ERR(device,
-				"Device stop failed in recovery\n");
-				goto done;
-			}
-			ret = adreno_start(device, true);
-			if (ret) {
-				KGSL_DRV_ERR(device,
-				"Device start failed in recovery\n");
-				goto done;
-			}
-			if (context)
-				kgsl_mmu_setstate(&device->mmu,
-						adreno_context->pagetable,
-						KGSL_MEMSTORE_GLOBAL);
+static inline void
+_adreno_debug_ft_info(struct kgsl_device *device,
+			struct adreno_ft_data *ft_data)
+{
 
-			if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
-				ret = kgsl_mmu_enable_clk(&device->mmu,
-						KGSL_IOMMU_CONTEXT_USER);
-				if (ret)
-					goto done;
-			}
+	/*
+	 * Dumping rb is a very useful tool to debug FT.
+	 * It will tell us if we are extracting the rb correctly
+	 * NOP'ing the right IB, skipping the EOF correctly etc.
+	 */
+	if (device->ft_log >= 7)  {
 
-			ret = idle_ret;
-			KGSL_DRV_ERR(device,
-			"Bad context commands hung in recovery\n");
-		} else {
-			KGSL_DRV_ERR(device,
-			"Bad context commands succeeded in recovery\n");
-			if (adreno_context)
-				adreno_context->flags = (adreno_context->flags &
-					~CTXT_FLAGS_GPU_HANG) |
-					CTXT_FLAGS_GPU_HANG_RECOVERED;
-			adreno_dev->drawctxt_active = last_active_ctx;
-		}
+		/* Print fault tolerance data here */
+		KGSL_FT_INFO(device, "Temp RB buffer size 0x%X\n",
+			ft_data->rb_size);
+		adreno_dump_rb(device, ft_data->rb_buffer,
+			ft_data->rb_size<<2, 0, ft_data->rb_size);
+
+		KGSL_FT_INFO(device, "Bad RB buffer size 0x%X\n",
+			ft_data->bad_rb_size);
+		adreno_dump_rb(device, ft_data->bad_rb_buffer,
+			ft_data->bad_rb_size<<2, 0, ft_data->bad_rb_size);
+
+		KGSL_FT_INFO(device, "Good RB buffer size 0x%X\n",
+			ft_data->good_rb_size);
+		adreno_dump_rb(device, ft_data->good_rb_buffer,
+			ft_data->good_rb_size<<2, 0, ft_data->good_rb_size);
+
 	}
-	/* If either the bad command sequence failed or we did not play it */
-	if (ret || !rec_data->bad_rb_size) {
-		adreno_ringbuffer_restore(rb, rec_data->rb_buffer,
-				rec_data->rb_size);
+}
+
+static int
+_adreno_ft_resubmit_rb(struct kgsl_device *device,
+			struct adreno_ringbuffer *rb,
+			struct kgsl_context *context,
+			struct adreno_ft_data *ft_data,
+			unsigned int *buff, unsigned int size)
+{
+	unsigned int ret = 0;
+	unsigned int retry_num = 0;
+
+	_adreno_debug_ft_info(device, ft_data);
+
+	do {
+		ret = _adreno_ft_restart_device(device, context);
+		if (ret == 0)
+			break;
+		/*
+		 * If device restart fails sleep for 20ms before
+		 * attempting restart. This allows GPU HW to settle
+		 * and improve the chances of next restart to be
+		 * successful.
+		 */
+		msleep(20);
+		KGSL_FT_ERR(device, "Retry device restart %d\n", retry_num);
+		retry_num++;
+	} while (retry_num < 4);
+
+	if (ret) {
+		KGSL_FT_ERR(device, "Device restart failed\n");
+		BUG_ON(1);
+		goto done;
+	}
+
+	if (size) {
+
+		/* submit commands and wait for them to pass */
+		adreno_ringbuffer_restore(rb, buff, size);
+
 		ret = adreno_idle(device);
-		if (ret) {
-			/* If we fail here we can try to invalidate another
-			 * context and try recovering again */
-			ret = -EAGAIN;
-			goto done;
+	}
+
+done:
+	return ret;
+}
+
+
+static int
+_adreno_ft(struct kgsl_device *device,
+			struct adreno_ft_data *ft_data)
+{
+	int ret = 0, i;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+	struct kgsl_context *context;
+	struct adreno_context *adreno_context = NULL;
+	struct adreno_context *last_active_ctx = adreno_dev->drawctxt_active;
+	unsigned int long_ib = 0;
+
+	context = idr_find(&device->context_idr, ft_data->context_id);
+	if (context == NULL) {
+		KGSL_FT_ERR(device, "Last context unknown id:%d\n",
+			ft_data->context_id);
+		goto play_good_cmds;
+	} else {
+		adreno_context = context->devctxt;
+		adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
+		/*
+		 * set the invalid ts flag to 0 for this context since we have
+		 * detected a hang for it
+		 */
+		context->wait_on_invalid_ts = false;
+
+		if (!(adreno_context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) {
+			KGSL_FT_ERR(device, "Fault tolerance not supported\n");
+			goto play_good_cmds;
 		}
-		/* ringbuffer now has data from the last valid context id,
-		 * so restore the active_ctx to the last valid context */
-		if (rec_data->last_valid_ctx_id) {
-			struct kgsl_context *last_ctx =
-					idr_find(&device->context_idr,
-					rec_data->last_valid_ctx_id);
-			if (last_ctx)
-				adreno_dev->drawctxt_active = last_ctx->devctxt;
+
+		/*
+		 *  This flag will be set by userspace for contexts
+		 *  that do not want to be fault tolerant (ex: OPENCL)
+		 */
+		if (adreno_context->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE) {
+			ft_data->status = 1;
+			KGSL_FT_ERR(device,
+			"No FT set for this context play good cmds\n");
+			goto play_good_cmds;
+		}
+
+	}
+
+	/* Check if we detected a long running IB, if false return */
+	if (adreno_dev->long_ib) {
+		long_ib = _adreno_check_long_ib(device);
+		if (!long_ib) {
+			adreno_context->flags &= ~CTXT_FLAGS_GPU_HANG;
+			return 0;
 		}
 	}
+
+	/*
+	 * Extract valid contents from rb which can still be executed after
+	 * hang
+	 */
+	adreno_ringbuffer_extract(rb, ft_data);
+
+	/* If long IB detected do not attempt replay of bad cmds */
+	if (long_ib) {
+		_adreno_debug_ft_info(device, ft_data);
+		goto play_good_cmds;
+	}
+
+	if ((ft_data->ft_policy & KGSL_FT_DISABLE) ||
+		(ft_data->ft_policy & KGSL_FT_TEMP_DISABLE)) {
+		KGSL_FT_ERR(device, "NO FT policy play only good cmds\n");
+		ft_data->status = 1;
+		goto play_good_cmds;
+	}
+
+	/* Do not try the reply if hang is due to a pagefault */
+	if (adreno_context->pagefault) {
+		if ((ft_data->context_id == adreno_context->id) &&
+			(ft_data->global_eop == adreno_context->pagefault_ts)) {
+			ft_data->ft_policy &= ~KGSL_FT_REPLAY;
+			KGSL_FT_ERR(device, "MMU fault skipping replay\n");
+		}
+
+		adreno_context->pagefault = 0;
+	}
+
+	if (ft_data->ft_policy & KGSL_FT_REPLAY) {
+		ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
+				ft_data->bad_rb_buffer, ft_data->bad_rb_size);
+
+		if (ret) {
+			KGSL_FT_ERR(device, "Replay status: 1\n");
+			ft_data->status = 1;
+		} else
+			goto play_good_cmds;
+	}
+
+	if (ft_data->ft_policy & KGSL_FT_SKIPIB) {
+		for (i = 0; i < ft_data->bad_rb_size; i++) {
+			if ((ft_data->bad_rb_buffer[i] ==
+					CP_HDR_INDIRECT_BUFFER_PFD) &&
+				(ft_data->bad_rb_buffer[i+1] == ft_data->ib1)) {
+
+				ft_data->bad_rb_buffer[i] = cp_nop_packet(2);
+				ft_data->bad_rb_buffer[i+1] =
+							KGSL_NOP_IB_IDENTIFIER;
+				ft_data->bad_rb_buffer[i+2] =
+							KGSL_NOP_IB_IDENTIFIER;
+				break;
+			}
+		}
+
+		if ((i == (ft_data->bad_rb_size)) || (!ft_data->ib1)) {
+			KGSL_FT_ERR(device, "Bad IB to NOP not found\n");
+			ft_data->status = 1;
+			goto play_good_cmds;
+		}
+
+		ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
+				ft_data->bad_rb_buffer, ft_data->bad_rb_size);
+
+		if (ret) {
+			KGSL_FT_ERR(device, "NOP faulty IB status: 1\n");
+			ft_data->status = 1;
+		} else {
+			ft_data->status = 0;
+			goto play_good_cmds;
+		}
+	}
+
+	if (ft_data->ft_policy & KGSL_FT_SKIPFRAME) {
+		for (i = 0; i < ft_data->bad_rb_size; i++) {
+			if (ft_data->bad_rb_buffer[i] ==
+					KGSL_END_OF_FRAME_IDENTIFIER) {
+				ft_data->bad_rb_buffer[0] = cp_nop_packet(i);
+				break;
+			}
+		}
+
+		/* EOF not found in RB, discard till EOF in
+		   next IB submission */
+		if (i == ft_data->bad_rb_size) {
+			adreno_context->flags |= CTXT_FLAGS_SKIP_EOF;
+			KGSL_FT_INFO(device,
+			"EOF not found in RB, skip next issueib till EOF\n");
+			ft_data->bad_rb_buffer[0] = cp_nop_packet(i);
+		}
+
+		ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
+				ft_data->bad_rb_buffer, ft_data->bad_rb_size);
+
+		if (ret) {
+			KGSL_FT_ERR(device, "Skip EOF status: 1\n");
+			ft_data->status = 1;
+		} else {
+			ft_data->status = 0;
+			goto play_good_cmds;
+		}
+	}
+
+play_good_cmds:
+
+	if (ft_data->status)
+		KGSL_FT_ERR(device, "Bad context commands failed\n");
+	else {
+		KGSL_FT_INFO(device, "Bad context commands success\n");
+
+		if (adreno_context) {
+			adreno_context->flags = (adreno_context->flags &
+				~CTXT_FLAGS_GPU_HANG) | CTXT_FLAGS_GPU_HANG_FT;
+		}
+		adreno_dev->drawctxt_active = last_active_ctx;
+	}
+
+	ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
+			ft_data->good_rb_buffer, ft_data->good_rb_size);
+
+	if (ret) {
+		/* If we fail here we can try to invalidate another
+		 * context and try fault tolerance again */
+		ret = -EAGAIN;
+		KGSL_FT_ERR(device, "Playing good commands unsuccessful\n");
+		goto done;
+	} else
+		KGSL_FT_INFO(device, "Playing good commands successful\n");
+
+	/* ringbuffer now has data from the last valid context id,
+	 * so restore the active_ctx to the last valid context */
+	if (ft_data->last_valid_ctx_id) {
+		struct kgsl_context *last_ctx =
+				idr_find(&device->context_idr,
+				ft_data->last_valid_ctx_id);
+		if (last_ctx)
+			adreno_dev->drawctxt_active = last_ctx->devctxt;
+	}
+
 done:
 	/* Turn off iommu clocks */
 	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
@@ -1601,40 +2010,38 @@
 }
 
 static int
-adreno_recover_hang(struct kgsl_device *device,
-			struct adreno_recovery_data *rec_data)
+adreno_ft(struct kgsl_device *device,
+			struct adreno_ft_data *ft_data)
 {
 	int ret = 0;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
 	unsigned int timestamp;
 
-	KGSL_DRV_ERR(device,
-	"Starting recovery from 3D GPU hang. Recovery parameters: IB1: 0x%X, "
+	KGSL_FT_INFO(device,
+	"Start Parameters: IB1: 0x%X, "
 	"Bad context_id: %u, global_eop: 0x%x\n",
-	rec_data->ib1, rec_data->context_id, rec_data->global_eop);
+	ft_data->ib1, ft_data->context_id, ft_data->global_eop);
 
 	timestamp = rb->timestamp[KGSL_MEMSTORE_GLOBAL];
-	KGSL_DRV_ERR(device, "Last issued global timestamp: %x\n", timestamp);
+	KGSL_FT_INFO(device, "Last issued global timestamp: %x\n", timestamp);
 
 	/* We may need to replay commands multiple times based on whether
 	 * multiple contexts hang the GPU */
 	while (true) {
-		if (!ret)
-			ret = _adreno_recover_hang(device, rec_data, true);
-		else
-			ret = _adreno_recover_hang(device, rec_data, false);
+
+		ret = _adreno_ft(device, ft_data);
 
 		if (-EAGAIN == ret) {
-			/* setup new recovery parameters and retry, this
+			/* setup new fault tolerance parameters and retry, this
 			 * means more than 1 contexts are causing hang */
-			adreno_destroy_recovery_data(rec_data);
-			adreno_setup_recovery_data(device, rec_data);
-			KGSL_DRV_ERR(device,
-			"Retry recovery from 3D GPU hang. Recovery parameters: "
+			adreno_destroy_ft_data(ft_data);
+			adreno_setup_ft_data(device, ft_data);
+			KGSL_FT_INFO(device,
+			"Retry. Parameters: "
 			"IB1: 0x%X, Bad context_id: %u, global_eop: 0x%x\n",
-			rec_data->ib1, rec_data->context_id,
-			rec_data->global_eop);
+			ft_data->ib1, ft_data->context_id,
+			ft_data->global_eop);
 		} else {
 			break;
 		}
@@ -1643,7 +2050,7 @@
 	if (ret)
 		goto done;
 
-	/* Restore correct states after recovery */
+	/* Restore correct states after fault tolerance */
 	if (adreno_dev->drawctxt_active)
 		device->mmu.hwpagetable =
 			adreno_dev->drawctxt_active->pagetable;
@@ -1662,34 +2069,39 @@
 done:
 	adreno_set_max_ts_for_bad_ctxs(device);
 	adreno_mark_context_status(device, ret);
-	if (!ret)
-		KGSL_DRV_ERR(device, "Recovery succeeded\n");
-	else
-		KGSL_DRV_ERR(device, "Recovery failed\n");
+	KGSL_FT_ERR(device, "policy 0x%X status 0x%x\n",
+			ft_data->ft_policy, ret);
 	return ret;
 }
 
 int
-adreno_dump_and_recover(struct kgsl_device *device)
+adreno_dump_and_exec_ft(struct kgsl_device *device)
 {
 	int result = -ETIMEDOUT;
-	struct adreno_recovery_data rec_data;
+	struct adreno_ft_data ft_data;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	unsigned int curr_pwrlevel;
 
 	if (device->state == KGSL_STATE_HUNG)
 		goto done;
-	if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
+	if (device->state == KGSL_STATE_DUMP_AND_FT) {
 		mutex_unlock(&device->mutex);
-		wait_for_completion(&device->recovery_gate);
+		wait_for_completion(&device->ft_gate);
 		mutex_lock(&device->mutex);
 		if (device->state != KGSL_STATE_HUNG)
 			result = 0;
 	} else {
-		kgsl_pwrctrl_set_state(device, KGSL_STATE_DUMP_AND_RECOVER);
-		INIT_COMPLETION(device->recovery_gate);
+		kgsl_pwrctrl_set_state(device, KGSL_STATE_DUMP_AND_FT);
+		INIT_COMPLETION(device->ft_gate);
 		/* Detected a hang */
 
-		/* Get the recovery data as soon as hang is detected */
-		result = adreno_setup_recovery_data(device, &rec_data);
+		/* Run fault tolerance at max power level */
+		curr_pwrlevel = pwr->active_pwrlevel;
+		kgsl_pwrctrl_pwrlevel_change(device, pwr->max_pwrlevel);
+
+		/* Get the fault tolerance data as soon as hang is detected */
+		adreno_setup_ft_data(device, &ft_data);
 		/*
 		 * Trigger an automatic dump of the state to
 		 * the console
@@ -1697,25 +2109,43 @@
 		kgsl_postmortem_dump(device, 0);
 
 		/*
-		 * Make a GPU snapshot.  For now, do it after the PM dump so we
-		 * can at least be sure the PM dump will work as it always has
+		 * If long ib is detected, do not attempt postmortem or
+		 * snapshot, if GPU is still executing commands
+		 * we will get errors
 		 */
-		kgsl_device_snapshot(device, 1);
+		if (!adreno_dev->long_ib) {
+			/*
+			 * Trigger an automatic dump of the state to
+			 * the console
+			 */
+			kgsl_postmortem_dump(device, 0);
 
-		result = adreno_recover_hang(device, &rec_data);
-		adreno_destroy_recovery_data(&rec_data);
+			/*
+			* Make a GPU snapshot.  For now, do it after the
+			* PM dump so we can at least be sure the PM dump
+			* will work as it always has
+			*/
+			kgsl_device_snapshot(device, 1);
+		}
+
+		result = adreno_ft(device, &ft_data);
+		adreno_destroy_ft_data(&ft_data);
+
+		/* restore power level */
+		kgsl_pwrctrl_pwrlevel_change(device, curr_pwrlevel);
+
 		if (result) {
 			kgsl_pwrctrl_set_state(device, KGSL_STATE_HUNG);
 		} else {
 			kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
 			mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
 		}
-		complete_all(&device->recovery_gate);
+		complete_all(&device->ft_gate);
 	}
 done:
 	return result;
 }
-EXPORT_SYMBOL(adreno_dump_and_recover);
+EXPORT_SYMBOL(adreno_dump_and_exec_ft);
 
 static int adreno_getproperty(struct kgsl_device *device,
 				enum kgsl_property_type type,
@@ -1882,7 +2312,7 @@
 	do {
 		if (time_after(jiffies, wait)) {
 			/* Check to see if the core is hung */
-			if (adreno_hang_detect(device, regs))
+			if (adreno_ft_detect(device, regs))
 				return -ETIMEDOUT;
 
 			wait = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART);
@@ -1906,7 +2336,7 @@
 	unsigned int rbbm_status;
 	unsigned long wait_time;
 	unsigned long wait_time_part;
-	unsigned int prev_reg_val[hang_detect_regs_count];
+	unsigned int prev_reg_val[ft_detect_regs_count];
 
 	memset(prev_reg_val, 0, sizeof(prev_reg_val));
 
@@ -1939,7 +2369,7 @@
 		if (time_after(jiffies, wait_time_part)) {
 				wait_time_part = jiffies +
 					msecs_to_jiffies(KGSL_TIMEOUT_PART);
-				if ((adreno_hang_detect(device, prev_reg_val)))
+				if ((adreno_ft_detect(device, prev_reg_val)))
 					goto err;
 		}
 
@@ -1947,9 +2377,9 @@
 
 err:
 	KGSL_DRV_ERR(device, "spun too long waiting for RB to idle\n");
-	if (KGSL_STATE_DUMP_AND_RECOVER != device->state &&
-		!adreno_dump_and_recover(device)) {
-		wait_time = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
+	if (KGSL_STATE_DUMP_AND_FT != device->state &&
+		!adreno_dump_and_exec_ft(device)) {
+		wait_time = jiffies + ADRENO_IDLE_TIMEOUT;
 		goto retry;
 	}
 	return -ETIMEDOUT;
@@ -2297,16 +2727,28 @@
 
 
 
-unsigned int adreno_hang_detect(struct kgsl_device *device,
+unsigned int adreno_ft_detect(struct kgsl_device *device,
 						unsigned int *prev_reg_val)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	unsigned int curr_reg_val[hang_detect_regs_count];
-	unsigned int hang_detected = 1;
+	unsigned int curr_reg_val[ft_detect_regs_count];
+	unsigned int fast_hang_detected = 1;
+	unsigned int long_ib_detected = 1;
 	unsigned int i;
 	static unsigned long next_hang_detect_time;
+	static unsigned int prev_global_ts;
+	unsigned int curr_global_ts = 0;
+	unsigned int curr_context_id = 0;
+	static struct adreno_context *curr_context;
+	static struct kgsl_context *context;
 
 	if (!adreno_dev->fast_hang_detect)
+		fast_hang_detected = 0;
+
+	if (!adreno_dev->long_ib_detect)
+		long_ib_detected = 0;
+
+	if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED))
 		return 0;
 
 	if (is_adreno_rbbm_status_idle(device)) {
@@ -2340,20 +2782,125 @@
 		next_hang_detect_time = (jiffies +
 			msecs_to_jiffies(KGSL_TIMEOUT_PART-1));
 
-	for (i = 0; i < hang_detect_regs_count; i++) {
-
-		if (hang_detect_regs[i] == 0)
+	/* Read the current Hang detect reg values here */
+	for (i = 0; i < ft_detect_regs_count; i++) {
+		if (ft_detect_regs[i] == 0)
 			continue;
-
-		adreno_regread(device, hang_detect_regs[i],
-					   &curr_reg_val[i]);
-		if (curr_reg_val[i] != prev_reg_val[i]) {
-			prev_reg_val[i] = curr_reg_val[i];
-			hang_detected = 0;
-		}
+		adreno_regread(device, ft_detect_regs[i],
+			&curr_reg_val[i]);
 	}
 
-	return hang_detected;
+	/* Read the current global timestamp here */
+	kgsl_sharedmem_readl(&device->memstore,
+			&curr_global_ts,
+			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+			eoptimestamp));
+	/* Make sure the memstore read has posted */
+	mb();
+
+	if (curr_global_ts == prev_global_ts) {
+
+		/* Get the current context here */
+		if (context == NULL) {
+			kgsl_sharedmem_readl(&device->memstore,
+				&curr_context_id,
+				KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+				current_context));
+			/* Make sure the memstore read has posted */
+			mb();
+			context = idr_find(&device->context_idr,
+				curr_context_id);
+			if (context != NULL) {
+				curr_context = context->devctxt;
+				curr_context->ib_gpu_time_used = 0;
+			} else {
+				KGSL_DRV_ERR(device,
+					"Fault tolerance no context found\n");
+			}
+		}
+
+		if (curr_context != NULL) {
+
+			curr_context->ib_gpu_time_used += KGSL_TIMEOUT_PART;
+			KGSL_FT_INFO(device,
+			"Proc %s used GPU Time %d ms on timestamp 0x%X\n",
+			curr_context->pid_name, curr_context->ib_gpu_time_used,
+			curr_global_ts+1);
+
+			for (i = 0; i < ft_detect_regs_count; i++) {
+				if (curr_reg_val[i] != prev_reg_val[i]) {
+					fast_hang_detected = 0;
+
+					/* Check for long IB here */
+					if ((i >=
+						LONG_IB_DETECT_REG_INDEX_START)
+						&&
+						(i <=
+						LONG_IB_DETECT_REG_INDEX_END))
+						long_ib_detected = 0;
+				}
+			}
+
+			if (fast_hang_detected) {
+				KGSL_FT_ERR(device,
+					"Proc %s, ctxt_id %d ts %d triggered fault tolerance"
+					" on global ts %d\n",
+					curr_context->pid_name, curr_context->id
+					, (kgsl_readtimestamp(device, context,
+					KGSL_TIMESTAMP_RETIRED)+1),
+					curr_global_ts+1);
+				return 1;
+			}
+
+			if ((long_ib_detected) &&
+				(!(curr_context->flags &
+				 CTXT_FLAGS_NO_FAULT_TOLERANCE))) {
+				curr_context->ib_gpu_time_used +=
+					KGSL_TIMEOUT_PART;
+				if (curr_context->ib_gpu_time_used >
+					KGSL_TIMEOUT_LONG_IB_DETECTION) {
+					if (adreno_dev->long_ib_ts !=
+						curr_global_ts) {
+						KGSL_FT_ERR(device,
+						"Proc %s, ctxt_id %d ts %d"
+						"used GPU for %d ms long ib "
+						"detected on global ts %d\n",
+						curr_context->pid_name,
+						curr_context->id,
+						(kgsl_readtimestamp(device,
+						context,
+						KGSL_TIMESTAMP_RETIRED)+1),
+						curr_context->ib_gpu_time_used,
+						curr_global_ts+1);
+						adreno_dev->long_ib = 1;
+						adreno_dev->long_ib_ts =
+								curr_global_ts;
+						curr_context->ib_gpu_time_used =
+								0;
+						return 1;
+					}
+				}
+			}
+		} else {
+			KGSL_FT_ERR(device,
+				"Last context unknown id:%d\n",
+				curr_context_id);
+		}
+	} else {
+		/* GPU is moving forward */
+		prev_global_ts = curr_global_ts;
+		context = NULL;
+		curr_context = NULL;
+		adreno_dev->long_ib = 0;
+		adreno_dev->long_ib_ts = 0;
+	}
+
+
+	/* If hangs are not detected copy the current reg values
+	 * to previous values and return no hang */
+	for (i = 0; i < ft_detect_regs_count; i++)
+			prev_reg_val[i] = curr_reg_val[i];
+	return 0;
 }
 
 /**
@@ -2362,7 +2909,8 @@
  * @context - pointer to the active KGSL context
  * @timestamp - the timestamp that the process was waiting for
  *
- * Process a possible GPU hang and try to recover from it cleanly
+ * Process a possible GPU hang and try fault tolerance from it
+ * cleanly
  */
 static int adreno_handle_hang(struct kgsl_device *device,
 	struct kgsl_context *context, unsigned int timestamp)
@@ -2370,6 +2918,7 @@
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	unsigned int context_id = _get_context_id(context);
 	unsigned int ts_issued;
+	unsigned int rptr;
 
 	/* Do one last check to see if we somehow made it through */
 	if (kgsl_check_timestamp(device, context, timestamp))
@@ -2377,15 +2926,22 @@
 
 	ts_issued = adreno_dev->ringbuffer.timestamp[context_id];
 
-	KGSL_DRV_ERR(device,
+	adreno_regread(device, REG_CP_RB_RPTR, &rptr);
+
+	/* Make sure timestamp check finished before triggering a hang */
+	mb();
+
+	KGSL_DRV_WARN(device,
 		     "Device hang detected while waiting for timestamp: "
 		     "<%d:0x%x>, last submitted timestamp: <%d:0x%x>, "
-		     "wptr: 0x%x\n",
-		      context_id, timestamp, context_id, ts_issued,
-		      adreno_dev->ringbuffer.wptr);
+		     "retired timestamp: <%d:0x%x>, wptr: 0x%x, rptr: 0x%x\n",
+		      context_id, timestamp, context_id, ts_issued, context_id,
+			kgsl_readtimestamp(device, context,
+			KGSL_TIMESTAMP_RETIRED),
+		      adreno_dev->ringbuffer.wptr, rptr);
 
-	/* Return 0 after a successful recovery */
-	if (!adreno_dump_and_recover(device))
+	/* Return 0 after a successful fault tolerance */
+	if (!adreno_dump_and_exec_ft(device))
 		return 0;
 
 	return -ETIMEDOUT;
@@ -2439,7 +2995,7 @@
 	struct adreno_context *adreno_ctx = context ? context->devctxt : NULL;
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
 	unsigned int context_id = _get_context_id(context);
-	unsigned int prev_reg_val[hang_detect_regs_count];
+	unsigned int prev_reg_val[ft_detect_regs_count];
 	unsigned int time_elapsed = 0;
 	unsigned int wait;
 	int ts_compare = 1;
@@ -2498,7 +3054,7 @@
 		}
 
 		/* Check to see if the GPU is hung */
-		if (adreno_hang_detect(device, prev_reg_val)) {
+		if (adreno_ft_detect(device, prev_reg_val)) {
 			ret = adreno_handle_hang(device, context, timestamp);
 			break;
 		}
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index c1f2423..949ac97 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -34,6 +34,7 @@
 #define KGSL_CMD_FLAGS_NONE             0x00000000
 #define KGSL_CMD_FLAGS_PMODE		0x00000001
 #define KGSL_CMD_FLAGS_INTERNAL_ISSUE	0x00000002
+#define KGSL_CMD_FLAGS_EOF	        0x00000100
 
 /* Command identifiers */
 #define KGSL_CONTEXT_TO_MEM_IDENTIFIER	0x2EADBEEF
@@ -41,6 +42,8 @@
 #define KGSL_CMD_INTERNAL_IDENTIFIER	0x2EEDD00D
 #define KGSL_START_OF_IB_IDENTIFIER	0x2EADEABE
 #define KGSL_END_OF_IB_IDENTIFIER	0x2ABEDEAD
+#define KGSL_END_OF_FRAME_IDENTIFIER	0x2E0F2E0F
+#define KGSL_NOP_IB_IDENTIFIER	        0x20F20F20
 
 #ifdef CONFIG_MSM_SCM
 #define ADRENO_DEFAULT_PWRSCALE_POLICY  (&kgsl_pwrscale_policy_tz)
@@ -101,6 +104,11 @@
 	unsigned int instruction_size;
 	unsigned int ib_check_level;
 	unsigned int fast_hang_detect;
+	unsigned int ft_policy;
+	unsigned int long_ib_detect;
+	unsigned int long_ib;
+	unsigned int long_ib_ts;
+	unsigned int ft_pf_policy;
 	unsigned int gpulist_index;
 	struct ocmem_buf *ocmem_hdl;
 	unsigned int ocmem_base;
@@ -133,8 +141,8 @@
 };
 
 /*
- * struct adreno_recovery_data - Structure that contains all information to
- * perform gpu recovery from hangs
+ * struct adreno_ft_data - Structure that contains all information to
+ * perform gpu fault tolerance
  * @ib1 - IB1 that the GPU was executing when hang happened
  * @context_id - Context which caused the hang
  * @global_eop - eoptimestamp at time of hang
@@ -142,11 +150,19 @@
  * @rb_size - Number of valid dwords in rb_buffer
  * @bad_rb_buffer - Buffer that holds commands from the hanging context
  * bad_rb_size - Number of valid dwords in bad_rb_buffer
+ * @good_rb_buffer - Buffer that holds commands from good contexts
+ * good_rb_size - Number of valid dwords in good_rb_buffer
  * @last_valid_ctx_id - The last context from which commands were placed in
  * ringbuffer before the GPU hung
+ * @step - Current fault tolerance step being executed
+ * @err_code - Fault tolerance error code
  * @fault - Indicates whether the hang was caused due to a pagefault
+ * @start_of_replay_cmds - Offset in ringbuffer from where commands can be
+ * replayed during fault tolerance
+ * @replay_for_snapshot - Offset in ringbuffer where IB's can be saved for
+ * replaying with snapshot
  */
-struct adreno_recovery_data {
+struct adreno_ft_data {
 	unsigned int ib1;
 	unsigned int context_id;
 	unsigned int global_eop;
@@ -154,10 +170,32 @@
 	unsigned int rb_size;
 	unsigned int *bad_rb_buffer;
 	unsigned int bad_rb_size;
+	unsigned int *good_rb_buffer;
+	unsigned int good_rb_size;
 	unsigned int last_valid_ctx_id;
-	int fault;
+	unsigned int status;
+	unsigned int ft_policy;
+	unsigned int err_code;
+	unsigned int start_of_replay_cmds;
+	unsigned int replay_for_snapshot;
 };
 
+/* Fault Tolerance policy flags */
+#define  KGSL_FT_DISABLE                  BIT(0)
+#define  KGSL_FT_REPLAY                   BIT(1)
+#define  KGSL_FT_SKIPIB                   BIT(2)
+#define  KGSL_FT_SKIPFRAME                BIT(3)
+#define  KGSL_FT_TEMP_DISABLE             BIT(4)
+#define  KGSL_FT_DEFAULT_POLICY           (KGSL_FT_REPLAY + KGSL_FT_SKIPIB)
+
+/* Pagefault policy flags */
+#define KGSL_FT_PAGEFAULT_INT_ENABLE         0x00000001
+#define KGSL_FT_PAGEFAULT_GPUHALT_ENABLE     0x00000002
+#define KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE   0x00000004
+#define KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT    0x00000008
+#define KGSL_FT_PAGEFAULT_DEFAULT_POLICY     (KGSL_FT_PAGEFAULT_INT_ENABLE + \
+					KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
+
 extern struct adreno_gpudev adreno_a2xx_gpudev;
 extern struct adreno_gpudev adreno_a3xx_gpudev;
 
@@ -179,8 +217,8 @@
 extern const unsigned int a330_registers[];
 extern const unsigned int a330_registers_count;
 
-extern unsigned int hang_detect_regs[];
-extern const unsigned int hang_detect_regs_count;
+extern unsigned int ft_detect_regs[];
+extern const unsigned int ft_detect_regs_count;
 
 
 int adreno_idle(struct kgsl_device *device);
@@ -211,9 +249,12 @@
 void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
 		int hang);
 
-int adreno_dump_and_recover(struct kgsl_device *device);
+int adreno_dump_and_exec_ft(struct kgsl_device *device);
 
-unsigned int adreno_hang_detect(struct kgsl_device *device,
+void adreno_dump_rb(struct kgsl_device *device, const void *buf,
+			 size_t len, int start, int size);
+
+unsigned int adreno_ft_detect(struct kgsl_device *device,
 						unsigned int *prev_reg_val);
 
 static inline int adreno_is_a200(struct adreno_device *adreno_dev)
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 08c800e..9adfe69 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -52,8 +52,8 @@
 	0x2240, 0x227e,
 	0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
 	0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
-	0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
-	0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
+	0x22ff, 0x22ff, 0x2340, 0x2343,
+	0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
 	0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
 	0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
 	0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
@@ -61,8 +61,8 @@
 	0x25f0, 0x25f0,
 	0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
 	0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
-	0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
-	0x2750, 0x2756, 0x2760, 0x2760, 0x300C, 0x300E, 0x301C, 0x301D,
+	0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743,
+	0x300C, 0x300E, 0x301C, 0x301D,
 	0x302A, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031, 0x3034, 0x3036,
 	0x303C, 0x303C, 0x305E, 0x305F,
 };
@@ -70,7 +70,7 @@
 const unsigned int a3xx_registers_count = ARRAY_SIZE(a3xx_registers) / 2;
 
 /* Removed the following HLSQ register ranges from being read during
- * recovery since reading the registers may cause the device to hang:
+ * fault tolerance since reading the registers may cause the device to hang:
  */
 const unsigned int a3xx_hlsq_registers[] = {
 	0x0e00, 0x0e05, 0x0e0c, 0x0e0c, 0x0e22, 0x0e23,
@@ -2824,10 +2824,6 @@
 	{ A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 },
 	/* Set up VBIF_ROUND_ROBIN_QOS_ARB */
 	{ A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003 },
-	/* Disable VBIF clock gating. This is to enable AXI running
-	 * higher frequency than GPU.
-	 */
-	{ A3XX_VBIF_CLKON, 1 },
 	{0, 0},
 };
 
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index 1989ff5..ef599e9 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2008-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2008-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -64,5 +64,33 @@
 	adreno_dev->fast_hang_detect = 1;
 	debugfs_create_u32("fast_hang_detect", 0644, device->d_debugfs,
 			   &adreno_dev->fast_hang_detect);
+	/*
+	 * FT policy can be set to any of the options below.
+	 * KGSL_FT_DISABLE -> BIT(0) Set to disable FT
+	 * KGSL_FT_REPLAY  -> BIT(1) Set to enable replay
+	 * KGSL_FT_SKIPIB  -> BIT(2) Set to skip IB
+	 * KGSL_FT_SKIPFRAME -> BIT(3) Set to skip frame
+	 * by default set FT policy to KGSL_FT_DEFAULT_POLICY
+	 */
+	adreno_dev->ft_policy = KGSL_FT_DEFAULT_POLICY;
+	debugfs_create_u32("ft_policy", 0644, device->d_debugfs,
+			   &adreno_dev->ft_policy);
+	/* By default enable long IB detection */
+	adreno_dev->long_ib_detect = 1;
+	debugfs_create_u32("long_ib_detect", 0644, device->d_debugfs,
+			   &adreno_dev->long_ib_detect);
 
+	/*
+	 * FT pagefault policy can be set to any of the options below.
+	 * KGSL_FT_PAGEFAULT_INT_ENABLE -> BIT(0) set to enable pagefault INT
+	 * KGSL_FT_PAGEFAULT_GPUHALT_ENABLE  -> BIT(1) Set to enable GPU HALT on
+	 * pagefaults. This stalls the GPU on a pagefault on IOMMU v1 HW.
+	 * KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE  -> BIT(2) Set to log only one
+	 * pagefault per page.
+	 * KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT -> BIT(3) Set to log only one
+	 * pagefault per INT.
+	 */
+	 adreno_dev->ft_pf_policy = KGSL_FT_PAGEFAULT_DEFAULT_POLICY;
+	 debugfs_create_u32("ft_pagefault_policy", 0644, device->d_debugfs,
+			&adreno_dev->ft_pf_policy);
 }
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index b109e14..023b057 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -155,6 +155,8 @@
 	if (drawctxt == NULL)
 		return -ENOMEM;
 
+	drawctxt->pid = task_pid_nr(current);
+	strlcpy(drawctxt->pid_name, current->comm, TASK_COMM_LEN);
 	drawctxt->pagetable = pagetable;
 	drawctxt->bin_base_offset = 0;
 	drawctxt->id = context->id;
@@ -177,6 +179,9 @@
 		drawctxt->flags |= CTXT_FLAGS_USER_GENERATED_TS;
 	}
 
+	if (flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
+		drawctxt->flags |= CTXT_FLAGS_NO_FAULT_TOLERANCE;
+
 	ret = adreno_dev->gpudev->ctxt_create(adreno_dev, drawctxt);
 	if (ret)
 		goto err;
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 65dbd4c..aba29ae 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,6 +13,8 @@
 #ifndef __ADRENO_DRAWCTXT_H
 #define __ADRENO_DRAWCTXT_H
 
+#include <linux/sched.h>
+
 #include "adreno_pm4types.h"
 #include "a2xx_reg.h"
 
@@ -44,12 +46,16 @@
 #define CTXT_FLAGS_TRASHSTATE		BIT(10)
 /* per context timestamps enabled */
 #define CTXT_FLAGS_PER_CONTEXT_TS	BIT(11)
-/* Context has caused a GPU hang and recovered properly */
-#define CTXT_FLAGS_GPU_HANG_RECOVERED	BIT(12)
+/* Context has caused a GPU hang and fault tolerance successful */
+#define CTXT_FLAGS_GPU_HANG_FT	BIT(12)
 /* Context is being destroyed so dont save it */
 #define CTXT_FLAGS_BEING_DESTROYED	BIT(13)
 /* User mode generated timestamps enabled */
 #define CTXT_FLAGS_USER_GENERATED_TS    BIT(14)
+/* Context skip till EOF */
+#define CTXT_FLAGS_SKIP_EOF             BIT(15)
+/* Context no fault tolerance */
+#define CTXT_FLAGS_NO_FAULT_TOLERANCE  BIT(16)
 
 struct kgsl_device;
 struct adreno_device;
@@ -82,8 +88,13 @@
 };
 
 struct adreno_context {
+	pid_t pid;
+	char pid_name[TASK_COMM_LEN];
 	unsigned int id;
+	unsigned int ib_gpu_time_used;
 	uint32_t flags;
+	uint32_t pagefault;
+	unsigned long pagefault_ts;
 	struct kgsl_pagetable *pagetable;
 	struct kgsl_memdesc gpustate;
 	unsigned int reg_restore[3];
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
index 2b9c286..29d689b 100644
--- a/drivers/gpu/msm/adreno_postmortem.c
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -305,7 +305,7 @@
 #endif
 }
 
-static void adreno_dump_rb(struct kgsl_device *device, const void *buf,
+void adreno_dump_rb(struct kgsl_device *device, const void *buf,
 			 size_t len, int start, int size)
 {
 	const uint32_t *ptr = buf;
@@ -729,6 +729,7 @@
 	unsigned int ts_processed = 0xdeaddead;
 	struct kgsl_context *context;
 	unsigned int context_id;
+	unsigned int rbbm_status;
 
 	static struct ib_list ib_list;
 
@@ -738,12 +739,16 @@
 
 	mb();
 
-	msm_clk_dump_debug_info();
+	if (device->pm_dump_enable) {
+		msm_clk_dump_debug_info();
 
-	if (adreno_is_a2xx(adreno_dev))
-		adreno_dump_a2xx(device);
-	else if (adreno_is_a3xx(adreno_dev))
-		adreno_dump_a3xx(device);
+		if (adreno_is_a2xx(adreno_dev))
+			adreno_dump_a2xx(device);
+		else if (adreno_is_a3xx(adreno_dev))
+			adreno_dump_a3xx(device);
+	}
+
+	kgsl_regread(device, adreno_dev->gpudev->reg_rbbm_status, &rbbm_status);
 
 	pt_base = kgsl_mmu_get_current_ptbase(&device->mmu);
 	cur_pt_base = pt_base;
@@ -758,6 +763,18 @@
 	kgsl_regread(device, REG_CP_IB2_BASE, &cp_ib2_base);
 	kgsl_regread(device, REG_CP_IB2_BUFSZ, &cp_ib2_bufsz);
 
+	/* If postmortem dump is not enabled, dump minimal set and return */
+	if (!device->pm_dump_enable) {
+
+		KGSL_LOG_DUMP(device,
+			"STATUS %08X | IB1:%08X/%08X | IB2: %08X/%08X"
+			" | RPTR: %04X | WPTR: %04X\n",
+			rbbm_status,  cp_ib1_base, cp_ib1_bufsz, cp_ib2_base,
+			cp_ib2_bufsz, cp_rb_rptr, cp_rb_wptr);
+
+		return 0;
+	}
+
 	kgsl_sharedmem_readl(&device->memstore,
 			(unsigned int *) &context_id,
 			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
@@ -766,7 +783,7 @@
 	if (context) {
 		ts_processed = kgsl_readtimestamp(device, context,
 						  KGSL_TIMESTAMP_RETIRED);
-		KGSL_LOG_DUMP(device, "CTXT: %d  TIMESTM RTRD: %08X\n",
+		KGSL_LOG_DUMP(device, "FT CTXT: %d  TIMESTM RTRD: %08X\n",
 				context->id, ts_processed);
 	} else
 		KGSL_LOG_DUMP(device, "BAD CTXT: %d\n", context_id);
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 69b34fa..4333f2f 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -64,7 +64,7 @@
 	unsigned long wait_time;
 	unsigned long wait_timeout = msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
 	unsigned long wait_time_part;
-	unsigned int prev_reg_val[hang_detect_regs_count];
+	unsigned int prev_reg_val[ft_detect_regs_count];
 
 	memset(prev_reg_val, 0, sizeof(prev_reg_val));
 
@@ -109,7 +109,7 @@
 		if (time_after(jiffies, wait_time_part)) {
 			wait_time_part = jiffies +
 				msecs_to_jiffies(KGSL_TIMEOUT_PART);
-			if ((adreno_hang_detect(rb->device,
+			if ((adreno_ft_detect(rb->device,
 						prev_reg_val))){
 				KGSL_DRV_ERR(rb->device,
 				"Hang detected while waiting for freespace in"
@@ -129,7 +129,7 @@
 		continue;
 
 err:
-		if (!adreno_dump_and_recover(rb->device)) {
+		if (!adreno_dump_and_exec_ft(rb->device)) {
 			if (context && context->flags & CTXT_FLAGS_GPU_HANG) {
 				KGSL_CTXT_WARN(rb->device,
 				"Context %p caused a gpu hang. Will not accept commands for context %d\n",
@@ -138,7 +138,7 @@
 			}
 			wait_time = jiffies + wait_timeout;
 		} else {
-			/* GPU is hung and we cannot recover */
+			/* GPU is hung and fault tolerance failed */
 			BUG();
 		}
 	}
@@ -581,7 +581,7 @@
 	if (adreno_is_a2xx(adreno_dev))
 		total_sizedwords += 2; /* CP_WAIT_FOR_IDLE */
 
-	total_sizedwords += 2; /* scratchpad ts for recovery */
+	total_sizedwords += 2; /* scratchpad ts for fault tolerance */
 	total_sizedwords += 3; /* sop timestamp */
 	total_sizedwords += 4; /* eop timestamp */
 
@@ -629,7 +629,7 @@
 	}
 	timestamp = rb->timestamp[context_id];
 
-	/* scratchpad ts for recovery */
+	/* scratchpad ts for fault tolerance */
 	GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1));
 	GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
 
@@ -756,6 +756,11 @@
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
 	}
 
+	if (flags & KGSL_CMD_FLAGS_EOF) {
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_END_OF_FRAME_IDENTIFIER);
+	}
+
 	adreno_ringbuffer_submit(rb);
 
 	return timestamp;
@@ -999,20 +1004,12 @@
 	drawctxt = context->devctxt;
 
 	if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
-		KGSL_CTXT_WARN(device, "Context %p caused a gpu hang.."
+		KGSL_CTXT_ERR(device, "proc %s failed fault tolerance"
 			" will not accept commands for context %d\n",
-			drawctxt, drawctxt->id);
+			drawctxt->pid_name, drawctxt->id);
 		return -EDEADLK;
 	}
 
-	cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4),
-				GFP_KERNEL);
-	if (!link) {
-		KGSL_CORE_ERR("kzalloc(%d) failed\n",
-			sizeof(unsigned int) * (numibs * 3 + 4));
-		return -ENOMEM;
-	}
-
 	/*When preamble is enabled, the preamble buffer with state restoration
 	commands are stored in the first node of the IB chain. We can skip that
 	if a context switch hasn't occured */
@@ -1021,6 +1018,27 @@
 		adreno_dev->drawctxt_active == drawctxt)
 		start_index = 1;
 
+	if (drawctxt->flags & CTXT_FLAGS_SKIP_EOF) {
+		KGSL_CTXT_ERR(device,
+			"proc %s triggered fault tolerance"
+			" skipping commands for context till EOF %d\n",
+			drawctxt->pid_name, drawctxt->id);
+		if (flags & KGSL_CMD_FLAGS_EOF)
+			drawctxt->flags &= ~CTXT_FLAGS_SKIP_EOF;
+		if (start_index)
+			numibs = 1;
+		else
+			numibs = 0;
+	}
+
+	cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4),
+				GFP_KERNEL);
+	if (!link) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n",
+			sizeof(unsigned int) * (numibs * 3 + 4));
+		return -ENOMEM;
+	}
+
 	if (!start_index) {
 		*cmds++ = cp_nop_packet(1);
 		*cmds++ = KGSL_START_OF_IB_IDENTIFIER;
@@ -1060,7 +1078,7 @@
 
 	*timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
 					drawctxt,
-					0,
+					(flags & KGSL_CMD_FLAGS_EOF),
 					&link[0], (cmds - link), *timestamp);
 
 #ifdef CONFIG_MSM_KGSL_CFF_DUMP
@@ -1072,173 +1090,21 @@
 	adreno_idle(device);
 #endif
 
-	/* If context hung and recovered then return error so that the
-	 * application may handle it */
-
-	ret = (drawctxt->flags & CTXT_FLAGS_GPU_HANG_RECOVERED) ?
-		-EDEADLK : 0;
+	/*
+	 * If context hung and recovered then return error so that the
+	 * application may handle it
+	 */
+	if (drawctxt->flags & CTXT_FLAGS_GPU_HANG_FT) {
+		drawctxt->flags &= ~CTXT_FLAGS_GPU_HANG_FT;
+		ret = -EPROTO;
+	} else
+		ret = 0;
 
 done:
 	kfree(link);
 	return ret;
 }
 
-static int _find_start_of_cmd_seq(struct adreno_ringbuffer *rb,
-					unsigned int *ptr,
-					bool inc)
-{
-	int status = -EINVAL;
-	unsigned int val1;
-	unsigned int size = rb->buffer_desc.size;
-	unsigned int start_ptr = *ptr;
-
-	while ((start_ptr / sizeof(unsigned int)) != rb->wptr) {
-		if (inc)
-			start_ptr = adreno_ringbuffer_inc_wrapped(start_ptr,
-									size);
-		else
-			start_ptr = adreno_ringbuffer_dec_wrapped(start_ptr,
-									size);
-		kgsl_sharedmem_readl(&rb->buffer_desc, &val1, start_ptr);
-		if (KGSL_CMD_IDENTIFIER == val1) {
-			if ((start_ptr / sizeof(unsigned int)) != rb->wptr)
-				start_ptr = adreno_ringbuffer_dec_wrapped(
-							start_ptr, size);
-				*ptr = start_ptr;
-				status = 0;
-				break;
-		}
-	}
-	return status;
-}
-
-static int _find_cmd_seq_after_eop_ts(struct adreno_ringbuffer *rb,
-					unsigned int *rb_rptr,
-					unsigned int global_eop,
-					bool inc)
-{
-	int status = -EINVAL;
-	unsigned int temp_rb_rptr = *rb_rptr;
-	unsigned int size = rb->buffer_desc.size;
-	unsigned int val[3];
-	int i = 0;
-	bool check = false;
-
-	if (inc && temp_rb_rptr / sizeof(unsigned int) != rb->wptr)
-		return status;
-
-	do {
-		/* when decrementing we need to decrement first and
-		 * then read make sure we cover all the data */
-		if (!inc)
-			temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
-					temp_rb_rptr, size);
-		kgsl_sharedmem_readl(&rb->buffer_desc, &val[i],
-					temp_rb_rptr);
-
-		if (check && ((inc && val[i] == global_eop) ||
-			(!inc && (val[i] ==
-			cp_type3_packet(CP_MEM_WRITE, 2) ||
-			val[i] == CACHE_FLUSH_TS)))) {
-			/* decrement i, i.e i = (i - 1 + 3) % 3 if
-			 * we are going forward, else increment i */
-			i = (i + 2) % 3;
-			if (val[i] == rb->device->memstore.gpuaddr +
-				KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-						eoptimestamp)) {
-				int j = ((i + 2) % 3);
-				if ((inc && (val[j] == CACHE_FLUSH_TS ||
-						val[j] == cp_type3_packet(
-							CP_MEM_WRITE, 2))) ||
-					(!inc && val[j] == global_eop)) {
-						/* Found the global eop */
-						status = 0;
-						break;
-				}
-			}
-			/* if no match found then increment i again
-			 * since we decremented before matching */
-			i = (i + 1) % 3;
-		}
-		if (inc)
-			temp_rb_rptr = adreno_ringbuffer_inc_wrapped(
-						temp_rb_rptr, size);
-
-		i = (i + 1) % 3;
-		if (2 == i)
-			check = true;
-	} while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr);
-	/* temp_rb_rptr points to the command stream after global eop,
-	 * move backward till the start of command sequence */
-	if (!status) {
-		status = _find_start_of_cmd_seq(rb, &temp_rb_rptr, false);
-		if (!status) {
-			*rb_rptr = temp_rb_rptr;
-			KGSL_DRV_ERR(rb->device,
-			"Offset of cmd sequence after eop timestamp: 0x%x\n",
-			temp_rb_rptr / sizeof(unsigned int));
-		}
-	}
-	if (status)
-		KGSL_DRV_ERR(rb->device,
-		"Failed to find the command sequence after eop timestamp\n");
-	return status;
-}
-
-static int _find_hanging_ib_sequence(struct adreno_ringbuffer *rb,
-				unsigned int *rb_rptr,
-				unsigned int ib1)
-{
-	int status = -EINVAL;
-	unsigned int temp_rb_rptr = *rb_rptr;
-	unsigned int size = rb->buffer_desc.size;
-	unsigned int val[2];
-	int i = 0;
-	bool check = false;
-	bool ctx_switch = false;
-
-	while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
-		kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
-
-		if (check && val[i] == ib1) {
-			/* decrement i, i.e i = (i - 1 + 2) % 2 */
-			i = (i + 1) % 2;
-			if (adreno_cmd_is_ib(val[i])) {
-				/* go till start of command sequence */
-				status = _find_start_of_cmd_seq(rb,
-						&temp_rb_rptr, false);
-				KGSL_DRV_ERR(rb->device,
-				"Found the hanging IB at offset 0x%x\n",
-				temp_rb_rptr / sizeof(unsigned int));
-				break;
-			}
-			/* if no match the increment i since we decremented
-			 * before checking */
-			i = (i + 1) % 2;
-		}
-		/* Make sure you do not encounter a context switch twice, we can
-		 * encounter it once for the bad context as the start of search
-		 * can point to the context switch */
-		if (val[i] == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
-			if (ctx_switch) {
-				KGSL_DRV_ERR(rb->device,
-				"Context switch encountered before bad "
-				"IB found\n");
-				break;
-			}
-			ctx_switch = true;
-		}
-		i = (i + 1) % 2;
-		if (1 == i)
-			check = true;
-		temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
-								size);
-	}
-	if  (!status)
-		*rb_rptr = temp_rb_rptr;
-	return status;
-}
-
 static void _turn_preamble_on_for_ib_seq(struct adreno_ringbuffer *rb,
 				unsigned int rb_rptr)
 {
@@ -1261,7 +1127,7 @@
 				kgsl_sharedmem_writel(&rb->buffer_desc,
 					temp_rb_rptr, cp_nop_packet(1));
 			}
-			KGSL_DRV_ERR(rb->device,
+			KGSL_FT_INFO(rb->device,
 			"Turned preamble on at offset 0x%x\n",
 			temp_rb_rptr / 4);
 			break;
@@ -1283,21 +1149,41 @@
 	}
 }
 
-static void _copy_valid_rb_content(struct adreno_ringbuffer *rb,
-		unsigned int rb_rptr, unsigned int *temp_rb_buffer,
-		int *rb_size, unsigned int *bad_rb_buffer,
-		int *bad_rb_size,
-		int *last_valid_ctx_id)
+void adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
+				struct adreno_ft_data *ft_data)
 {
-	unsigned int good_rb_idx = 0, cmd_start_idx = 0;
+	struct kgsl_device *device = rb->device;
+	unsigned int rb_rptr = ft_data->start_of_replay_cmds;
+	unsigned int good_rb_idx = 0, bad_rb_idx = 0, temp_rb_idx = 0;
+	unsigned int last_good_cmd_end_idx = 0, last_bad_cmd_end_idx = 0;
+	unsigned int cmd_start_idx = 0;
 	unsigned int val1 = 0;
-	struct kgsl_context *k_ctxt;
-	struct adreno_context *a_ctxt;
-	unsigned int bad_rb_idx = 0;
 	int copy_rb_contents = 0;
 	unsigned int temp_rb_rptr;
+	struct kgsl_context *k_ctxt;
+	struct adreno_context *a_ctxt;
 	unsigned int size = rb->buffer_desc.size;
-	unsigned int good_cmd_start_idx = 0;
+	unsigned int *temp_rb_buffer = ft_data->rb_buffer;
+	int *rb_size = &ft_data->rb_size;
+	unsigned int *bad_rb_buffer = ft_data->bad_rb_buffer;
+	int *bad_rb_size = &ft_data->bad_rb_size;
+	unsigned int *good_rb_buffer = ft_data->good_rb_buffer;
+	int *good_rb_size = &ft_data->good_rb_size;
+
+	/*
+	 * If the start index from where commands need to be copied is invalid
+	 * then no need to save off any commands
+	 */
+	if (0xFFFFFFFF == ft_data->start_of_replay_cmds)
+		return;
+
+	k_ctxt = idr_find(&device->context_idr, ft_data->context_id);
+	if (k_ctxt) {
+		a_ctxt = k_ctxt->devctxt;
+		if (a_ctxt->flags & CTXT_FLAGS_PREAMBLE)
+			_turn_preamble_on_for_ib_seq(rb, rb_rptr);
+	}
+	k_ctxt = NULL;
 
 	/* Walk the rb from the context switch. Omit any commands
 	 * for an invalid context. */
@@ -1307,9 +1193,11 @@
 		if (KGSL_CMD_IDENTIFIER == val1) {
 			/* Start is the NOP dword that comes before
 			 * KGSL_CMD_IDENTIFIER */
-			cmd_start_idx = bad_rb_idx - 1;
-			if (copy_rb_contents)
-				good_cmd_start_idx = good_rb_idx - 1;
+			cmd_start_idx = temp_rb_idx - 1;
+			if ((copy_rb_contents) && (good_rb_idx))
+				last_good_cmd_end_idx = good_rb_idx - 1;
+			if ((!copy_rb_contents) && (bad_rb_idx))
+				last_bad_cmd_end_idx = bad_rb_idx - 1;
 		}
 
 		/* check for context switch indicator */
@@ -1335,86 +1223,48 @@
 				!(a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) ||
 				!k_ctxt)) {
 				for (temp_idx = cmd_start_idx;
-					temp_idx < bad_rb_idx;
+					temp_idx < temp_rb_idx;
 					temp_idx++)
-					temp_rb_buffer[good_rb_idx++] =
-						bad_rb_buffer[temp_idx];
-				*last_valid_ctx_id = val2;
+					good_rb_buffer[good_rb_idx++] =
+						temp_rb_buffer[temp_idx];
+				ft_data->last_valid_ctx_id = val2;
 				copy_rb_contents = 1;
+				/* remove the good commands from bad buffer */
+				bad_rb_idx = last_bad_cmd_end_idx;
 			} else if (copy_rb_contents && k_ctxt &&
 				(a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) {
-				/* If we are changing to bad context then remove
-				 * the dwords we copied for this sequence from
-				 * the good buffer */
-				good_rb_idx = good_cmd_start_idx;
+
+				/* If we are changing back to a bad context
+				 * from good ctxt and were not copying commands
+				 * to bad ctxt then copy over commands to
+				 * the bad context */
+				for (temp_idx = cmd_start_idx;
+					temp_idx < temp_rb_idx;
+					temp_idx++)
+					bad_rb_buffer[bad_rb_idx++] =
+						temp_rb_buffer[temp_idx];
+				/* If we are changing to bad context then
+				 * remove the dwords we copied for this
+				 * sequence from the good buffer */
+				good_rb_idx = last_good_cmd_end_idx;
 				copy_rb_contents = 0;
 			}
 			}
 		}
 
 		if (copy_rb_contents)
-			temp_rb_buffer[good_rb_idx++] = val1;
-		/* Copy both good and bad commands for replay to the bad
-		 * buffer */
-		bad_rb_buffer[bad_rb_idx++] = val1;
+			good_rb_buffer[good_rb_idx++] = val1;
+		else
+			bad_rb_buffer[bad_rb_idx++] = val1;
+
+		/* Copy both good and bad commands to temp buffer */
+		temp_rb_buffer[temp_rb_idx++] = val1;
 
 		rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr, size);
 	}
-	*rb_size = good_rb_idx;
+	*good_rb_size = good_rb_idx;
 	*bad_rb_size = bad_rb_idx;
-}
-
-int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
-				struct adreno_recovery_data *rec_data)
-{
-	int status;
-	struct kgsl_device *device = rb->device;
-	unsigned int rb_rptr = rb->wptr * sizeof(unsigned int);
-	struct kgsl_context *context;
-	struct adreno_context *adreno_context;
-
-	context = idr_find(&device->context_idr, rec_data->context_id);
-
-	/* Look for the command stream that is right after the global eop */
-	status = _find_cmd_seq_after_eop_ts(rb, &rb_rptr,
-				rec_data->global_eop + 1, false);
-	if (status)
-		goto done;
-
-	if (context) {
-		adreno_context = context->devctxt;
-
-		if (adreno_context->flags & CTXT_FLAGS_PREAMBLE) {
-			if (rec_data->ib1) {
-				status = _find_hanging_ib_sequence(rb, &rb_rptr,
-								rec_data->ib1);
-				if (status)
-					goto copy_rb_contents;
-			}
-			_turn_preamble_on_for_ib_seq(rb, rb_rptr);
-		} else {
-			status = -EINVAL;
-		}
-	}
-
-copy_rb_contents:
-	_copy_valid_rb_content(rb, rb_rptr, rec_data->rb_buffer,
-				&rec_data->rb_size,
-				rec_data->bad_rb_buffer,
-				&rec_data->bad_rb_size,
-				&rec_data->last_valid_ctx_id);
-	/* If we failed to get the hanging IB sequence then we cannot execute
-	 * commands from the bad context or preambles not supported */
-	if (status) {
-		rec_data->bad_rb_size = 0;
-		status = 0;
-	}
-	/* If there is no context then that means there are no commands for
-	 * good case */
-	if (!context)
-		rec_data->rb_size = 0;
-done:
-	return status;
+	*rb_size = temp_rb_idx;
 }
 
 void
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index e87b506..fa03c05 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -27,7 +27,7 @@
 
 struct kgsl_device;
 struct kgsl_device_private;
-struct adreno_recovery_data;
+struct adreno_ft_data;
 
 #define GSL_RB_MEMPTRS_SCRATCH_COUNT	 8
 struct kgsl_rbmemptrs {
@@ -114,8 +114,8 @@
 
 void kgsl_cp_intrcallback(struct kgsl_device *device);
 
-int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
-				struct adreno_recovery_data *rec_data);
+void adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
+				struct adreno_ft_data *ft_data);
 
 void
 adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index e2869d4..2b0d5d9 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -3047,21 +3047,21 @@
 			kgsl_idle(device);
 
 	}
-	KGSL_LOG_DUMP(device, "|%s| Dump Started\n", device->name);
-	KGSL_LOG_DUMP(device, "POWER: FLAGS = %08lX | ACTIVE POWERLEVEL = %08X",
+
+	if (device->pm_dump_enable) {
+
+		KGSL_LOG_DUMP(device,
+			"POWER: NAP ALLOWED = %d | START_STOP_SLEEP_WAKE = %d\n"
+			, pwr->nap_allowed, pwr->strtstp_sleepwake);
+
+		KGSL_LOG_DUMP(device,
+			"POWER: FLAGS = %08lX | ACTIVE POWERLEVEL = %08X",
 			pwr->power_flags, pwr->active_pwrlevel);
 
-	KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ",
-		pwr->interval_timeout);
+		KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ",
+			pwr->interval_timeout);
 
-	KGSL_LOG_DUMP(device, "POWER: NAP ALLOWED = %d | START_STOP_SLEEP_WAKE = %d\n",
-		pwr->nap_allowed, pwr->strtstp_sleepwake);
-
-	KGSL_LOG_DUMP(device, "GRP_CLK = %lu ",
-				  kgsl_get_clkrate(pwr->grp_clks[0]));
-
-	KGSL_LOG_DUMP(device, "BUS CLK = %lu ",
-		kgsl_get_clkrate(pwr->ebi1_clk));
+	}
 
 	/* Disable the idle timer so we don't get interrupted */
 	del_timer_sync(&device->idle_timer);
@@ -3089,7 +3089,7 @@
 	/* On a manual trigger, turn on the interrupts and put
 	   the clocks to sleep.  They will recover themselves
 	   on the next event.  For a hang, leave things as they
-	   are until recovery kicks in. */
+	   are until fault tolerance kicks in. */
 
 	if (manual) {
 		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
@@ -3099,8 +3099,6 @@
 		kgsl_pwrctrl_sleep(device);
 	}
 
-	KGSL_LOG_DUMP(device, "|%s| Dump Finished\n", device->name);
-
 	return 0;
 }
 EXPORT_SYMBOL(kgsl_postmortem_dump);
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index 76998db..9dfda32 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -70,6 +70,20 @@
 	return 0;
 }
 
+static int pm_enabled_set(void *data, u64 val)
+{
+	struct kgsl_device *device = data;
+	device->pm_dump_enable = val;
+	return 0;
+}
+
+static int pm_enabled_get(void *data, u64 *val)
+{
+	struct kgsl_device *device = data;
+	*val = device->pm_dump_enable;
+	return 0;
+}
+
 
 DEFINE_SIMPLE_ATTRIBUTE(pm_regs_enabled_fops,
 			pm_regs_enabled_get,
@@ -79,6 +93,10 @@
 			pm_ib_enabled_get,
 			pm_ib_enabled_set, "%llu\n");
 
+DEFINE_SIMPLE_ATTRIBUTE(pm_enabled_fops,
+			pm_enabled_get,
+			pm_enabled_set, "%llu\n");
+
 static inline int kgsl_log_set(unsigned int *log_val, void *data, u64 val)
 {
 	*log_val = min((unsigned int)val, (unsigned int)KGSL_LOG_LEVEL_MAX);
@@ -105,6 +123,7 @@
 KGSL_DEBUGFS_LOG(ctxt_log);
 KGSL_DEBUGFS_LOG(mem_log);
 KGSL_DEBUGFS_LOG(pwr_log);
+KGSL_DEBUGFS_LOG(ft_log);
 
 static int memfree_hist_print(struct seq_file *s, void *unused)
 {
@@ -166,6 +185,7 @@
 	device->drv_log = KGSL_LOG_LEVEL_DEFAULT;
 	device->mem_log = KGSL_LOG_LEVEL_DEFAULT;
 	device->pwr_log = KGSL_LOG_LEVEL_DEFAULT;
+	device->ft_log = KGSL_LOG_LEVEL_DEFAULT;
 
 	debugfs_create_file("log_level_cmd", 0644, device->d_debugfs, device,
 			    &cmd_log_fops);
@@ -179,6 +199,8 @@
 				&pwr_log_fops);
 	debugfs_create_file("memfree_history", 0444, device->d_debugfs, device,
 				&memfree_hist_fops);
+	debugfs_create_file("log_level_ft", 0644, device->d_debugfs, device,
+				&ft_log_fops);
 
 	/* Create postmortem dump control files */
 
@@ -193,6 +215,9 @@
 			    &pm_regs_enabled_fops);
 	debugfs_create_file("ib_enabled", 0644, pm_d_debugfs, device,
 				    &pm_ib_enabled_fops);
+	device->pm_dump_enable = 0;
+	debugfs_create_file("enable", 0644, pm_d_debugfs, device,
+				    &pm_enabled_fops);
 
 }
 
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 66390fc..aca5660 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -24,9 +24,10 @@
 #include "kgsl_pwrscale.h"
 #include <linux/sync.h>
 
-#define KGSL_TIMEOUT_NONE       0
-#define KGSL_TIMEOUT_DEFAULT    0xFFFFFFFF
-#define KGSL_TIMEOUT_PART       50 /* 50 msec */
+#define KGSL_TIMEOUT_NONE           0
+#define KGSL_TIMEOUT_DEFAULT        0xFFFFFFFF
+#define KGSL_TIMEOUT_PART           50 /* 50 msec */
+#define KGSL_TIMEOUT_LONG_IB_DETECTION  2000 /* 2 sec*/
 
 #define FIRST_TIMEOUT (HZ / 2)
 
@@ -46,7 +47,7 @@
 #define KGSL_STATE_SLEEP	0x00000008
 #define KGSL_STATE_SUSPEND	0x00000010
 #define KGSL_STATE_HUNG		0x00000020
-#define KGSL_STATE_DUMP_AND_RECOVER	0x00000040
+#define KGSL_STATE_DUMP_AND_FT	0x00000040
 #define KGSL_STATE_SLUMBER	0x00000080
 
 #define KGSL_GRAPHICS_MEMORY_LOW_WATERMARK  0x1000000
@@ -185,7 +186,7 @@
 	wait_queue_head_t wait_queue;
 	struct workqueue_struct *work_queue;
 	struct device *parentdev;
-	struct completion recovery_gate;
+	struct completion ft_gate;
 	struct dentry *d_debugfs;
 	struct idr context_idr;
 	struct early_suspend display_off;
@@ -211,6 +212,8 @@
 	int drv_log;
 	int mem_log;
 	int pwr_log;
+	int ft_log;
+	int pm_dump_enable;
 	struct kgsl_pwrscale pwrscale;
 	struct kobject pwrscale_kobj;
 	struct work_struct ts_expired_ws;
@@ -230,7 +233,7 @@
 #define KGSL_DEVICE_COMMON_INIT(_dev) \
 	.hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\
 	.suspend_gate = COMPLETION_INITIALIZER((_dev).suspend_gate),\
-	.recovery_gate = COMPLETION_INITIALIZER((_dev).recovery_gate),\
+	.ft_gate = COMPLETION_INITIALIZER((_dev).ft_gate),\
 	.idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
 			kgsl_idle_check),\
 	.ts_expired_ws  = __WORK_INITIALIZER((_dev).ts_expired_ws,\
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index 9a1a431..cb206ac 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -22,6 +22,7 @@
 #include "kgsl_device.h"
 #include "kgsl_sharedmem.h"
 #include "kgsl_trace.h"
+#include "adreno.h"
 
 #define KGSL_PAGETABLE_SIZE \
 	ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \
@@ -403,11 +404,22 @@
 {
 	unsigned int reg;
 	unsigned int ptbase;
+	struct kgsl_device *device;
+	struct adreno_device *adreno_dev;
+	unsigned int no_page_fault_log = 0;
 
-	kgsl_regread(mmu->device, MH_MMU_PAGE_FAULT, &reg);
-	kgsl_regread(mmu->device, MH_MMU_PT_BASE, &ptbase);
+	device = mmu->device;
+	adreno_dev = ADRENO_DEVICE(device);
 
-	KGSL_MEM_CRIT(mmu->device,
+	kgsl_regread(device, MH_MMU_PAGE_FAULT, &reg);
+	kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
+
+
+	if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE)
+		no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, reg);
+
+	if (!no_page_fault_log)
+		KGSL_MEM_CRIT(mmu->device,
 			"mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
 			reg & ~(PAGE_SIZE - 1),
 			kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase),
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index cc01d37..e3cea88 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -283,10 +283,16 @@
 	struct kgsl_iommu_device *iommu_dev;
 	unsigned int ptbase, fsr;
 	unsigned int pid;
-
 	struct _mem_entry prev, next;
 	unsigned int fsynr0, fsynr1;
 	int write;
+	struct kgsl_device *device;
+	struct adreno_device *adreno_dev;
+	unsigned int no_page_fault_log = 0;
+	unsigned int curr_context_id = 0;
+	unsigned int curr_global_ts = 0;
+	static struct adreno_context *curr_context;
+	static struct kgsl_context *context;
 
 	ret = get_iommu_unit(dev, &mmu, &iommu_unit);
 	if (ret)
@@ -298,6 +304,8 @@
 		goto done;
 	}
 	iommu = mmu->priv;
+	device = mmu->device;
+	adreno_dev = ADRENO_DEVICE(device);
 
 	ptbase = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
 					iommu_dev->ctx_id, TTBR0);
@@ -324,27 +332,55 @@
 		iommu_dev->ctx_id, fsr, fsynr0, fsynr1,
 		write ? "write" : "read");
 
-	_check_if_freed(iommu_dev, addr, pid);
+	if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE)
+		no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr);
 
-	KGSL_LOG_DUMP(iommu_dev->kgsldev, "---- nearby memory ----\n");
+	if (!no_page_fault_log) {
+		KGSL_MEM_CRIT(iommu_dev->kgsldev,
+			"GPU PAGE FAULT: addr = %lX pid = %d\n",
+			addr, kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase));
+		KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X\n",
+			iommu_dev->ctx_id, fsr);
 
-	_find_mem_entries(mmu, addr, ptbase, &prev, &next);
+		_check_if_freed(iommu_dev, addr, pid);
 
-	if (prev.gpuaddr)
-		_print_entry(iommu_dev->kgsldev, &prev);
-	else
-		KGSL_LOG_DUMP(iommu_dev->kgsldev, "*EMPTY*\n");
+		KGSL_LOG_DUMP(iommu_dev->kgsldev, "---- nearby memory ----\n");
 
-	KGSL_LOG_DUMP(iommu_dev->kgsldev, " <- fault @ %8.8lX\n", addr);
+		_find_mem_entries(mmu, addr, ptbase, &prev, &next);
 
-	if (next.gpuaddr != 0xFFFFFFFF)
-		_print_entry(iommu_dev->kgsldev, &next);
-	else
-		KGSL_LOG_DUMP(iommu_dev->kgsldev, "*EMPTY*\n");
+		if (prev.gpuaddr)
+			_print_entry(iommu_dev->kgsldev, &prev);
+		else
+			KGSL_LOG_DUMP(iommu_dev->kgsldev, "*EMPTY*\n");
+
+		KGSL_LOG_DUMP(iommu_dev->kgsldev, " <- fault @ %8.8lX\n", addr);
+
+		if (next.gpuaddr != 0xFFFFFFFF)
+			_print_entry(iommu_dev->kgsldev, &next);
+		else
+			KGSL_LOG_DUMP(iommu_dev->kgsldev, "*EMPTY*\n");
+
+	}
 
 	mmu->fault = 1;
 	iommu_dev->fault = 1;
 
+	kgsl_sharedmem_readl(&device->memstore, &curr_context_id,
+		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
+	context = idr_find(&device->context_idr, curr_context_id);
+	if (context != NULL)
+			curr_context = context->devctxt;
+
+	kgsl_sharedmem_readl(&device->memstore, &curr_global_ts,
+		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, eoptimestamp));
+
+	/*
+	 * Store pagefault's timestamp in adreno context,
+	 * this information will be used in GFT
+	 */
+	curr_context->pagefault = 1;
+	curr_context->pagefault_ts = curr_global_ts;
+
 	trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr,
 			kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase),
 			write ? "write" : "read");
@@ -355,7 +391,8 @@
 	 * the GPU and trigger a snapshot. To stall the transaction return
 	 * EBUSY error.
 	 */
-	ret = -EBUSY;
+	if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
+		ret = -EBUSY;
 done:
 	return ret;
 }
diff --git a/drivers/gpu/msm/kgsl_log.h b/drivers/gpu/msm/kgsl_log.h
index 81a35e0..a7832e4 100644
--- a/drivers/gpu/msm/kgsl_log.h
+++ b/drivers/gpu/msm/kgsl_log.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2008-2011, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2008-2011,2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -103,6 +103,15 @@
 #define KGSL_PWR_CRIT(_dev, fmt, args...) \
 KGSL_LOG_CRIT(_dev->dev, _dev->pwr_log, fmt, ##args)
 
+#define KGSL_FT_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->ft_log, fmt, ##args)
+#define KGSL_FT_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->ft_log, fmt, ##args)
+#define KGSL_FT_ERR(_dev, fmt, args...) \
+KGSL_LOG_ERR(_dev->dev, _dev->ft_log, fmt, ##args)
+#define KGSL_FT_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->ft_log, fmt, ##args)
+
 /* Core error messages - these are for core KGSL functions that have
    no device associated with them (such as memory) */
 
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 01b255c..ccaceb3 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -333,6 +333,35 @@
 }
 EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase);
 
+unsigned int
+kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu, unsigned int pt_base,
+					unsigned int addr)
+{
+	struct kgsl_pagetable *pt;
+	unsigned int ret = 0;
+
+	if (!mmu->mmu_ops || !mmu->mmu_ops->mmu_pt_equal)
+		return 0;
+	spin_lock(&kgsl_driver.ptlock);
+	list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
+		if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
+			if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) {
+				ret = 1;
+				break;
+			} else {
+				pt->fault_addr = (addr & ~(PAGE_SIZE-1));
+				ret = 0;
+				break;
+			}
+
+		}
+	}
+	spin_unlock(&kgsl_driver.ptlock);
+
+	return ret;
+}
+EXPORT_SYMBOL(kgsl_mmu_log_fault_addr);
+
 int kgsl_mmu_init(struct kgsl_device *device)
 {
 	int status = 0;
@@ -438,6 +467,7 @@
 
 	pagetable->name = name;
 	pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(ptsize);
+	pagetable->fault_addr = 0xFFFFFFFF;
 
 	/*
 	 * create a separate kgsl pool for IOMMU, global mappings can be mapped
@@ -693,6 +723,8 @@
 {
 	struct gen_pool *pool;
 	int size;
+	unsigned int start_addr = 0;
+	unsigned int end_addr = 0;
 
 	if (memdesc->size == 0 || memdesc->gpuaddr == 0)
 		return 0;
@@ -704,10 +736,19 @@
 
 	size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
 
+	start_addr = memdesc->gpuaddr;
+	end_addr = (memdesc->gpuaddr + size);
+
 	if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
 		spin_lock(&pagetable->lock);
 	pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc,
 					&pagetable->tlb_flags);
+
+	/* If buffer is unmapped 0 fault addr */
+	if ((pagetable->fault_addr >= start_addr) &&
+		(pagetable->fault_addr < end_addr))
+		pagetable->fault_addr = 0;
+
 	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
 		spin_lock(&pagetable->lock);
 	/* Remove the statistics */
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 0458a13..2d48e86 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -114,6 +114,7 @@
 	} stats;
 	const struct kgsl_mmu_pt_ops *pt_ops;
 	unsigned int tlb_flags;
+	unsigned int fault_addr;
 	void *priv;
 };
 
@@ -211,6 +212,8 @@
 			uint32_t flags);
 int kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu,
 					unsigned int pt_base);
+unsigned int kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu,
+			unsigned int pt_base, unsigned int addr);
 int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
 			enum kgsl_deviceid id);
 void kgsl_mmu_ptpool_destroy(void *ptpool);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index d9dbad8..c716731 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1081,7 +1081,7 @@
 			}
 		}
 	} else if (device->state & (KGSL_STATE_HUNG |
-					KGSL_STATE_DUMP_AND_RECOVER)) {
+					KGSL_STATE_DUMP_AND_FT)) {
 		kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
 	}
 
@@ -1120,7 +1120,7 @@
 		break;
 	case KGSL_STATE_INIT:
 	case KGSL_STATE_HUNG:
-	case KGSL_STATE_DUMP_AND_RECOVER:
+	case KGSL_STATE_DUMP_AND_FT:
 		if (test_bit(KGSL_PWRFLAGS_CLK_ON,
 					 &device->pwrctrl.power_flags))
 			break;
@@ -1144,9 +1144,9 @@
 		mutex_unlock(&device->mutex);
 		wait_for_completion(&device->hwaccess_gate);
 		mutex_lock(&device->mutex);
-	} else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
+	} else if (device->state == KGSL_STATE_DUMP_AND_FT) {
 		mutex_unlock(&device->mutex);
-		wait_for_completion(&device->recovery_gate);
+		wait_for_completion(&device->ft_gate);
 		mutex_lock(&device->mutex);
 	} else if (device->state == KGSL_STATE_SLUMBER)
 		kgsl_pwrctrl_wake(device);
@@ -1385,7 +1385,7 @@
 		return "SUSPEND";
 	case KGSL_STATE_HUNG:
 		return "HUNG";
-	case KGSL_STATE_DUMP_AND_RECOVER:
+	case KGSL_STATE_DUMP_AND_FT:
 		return "DNR";
 	case KGSL_STATE_SLUMBER:
 		return "SLUMBER";
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index c4647a1..42e79a0 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -572,10 +572,9 @@
 	/* Freeze the snapshot on a hang until it gets read */
 	device->snapshot_frozen = (hang) ? 1 : 0;
 
-	/* log buffer info to aid in ramdump recovery */
-	KGSL_DRV_ERR(device, "snapshot created at va %p pa %lx size %d\n",
-			device->snapshot, __pa(device->snapshot),
-			device->snapshot_size);
+	/* log buffer info to aid in ramdump fault tolerance */
+	KGSL_DRV_ERR(device, "snapshot created at pa %lx size %d\n",
+			__pa(device->snapshot),	device->snapshot_size);
 	if (hang)
 		sysfs_notify(&device->snapshot_kobj, NULL, "timestamp");
 	return 0;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 9ed3d53..41f79be 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -693,8 +693,8 @@
 
 	spin_lock_irq(&client->buffer_lock);
 	client->use_wake_lock = false;
-	wake_lock_destroy(&client->wake_lock);
 	spin_unlock_irq(&client->buffer_lock);
+	wake_lock_destroy(&client->wake_lock);
 
 	return 0;
 }
diff --git a/drivers/iommu/msm_iommu_sec.c b/drivers/iommu/msm_iommu_sec.c
index 29cf0c1..44146a4 100644
--- a/drivers/iommu/msm_iommu_sec.c
+++ b/drivers/iommu/msm_iommu_sec.c
@@ -88,7 +88,7 @@
 		return 0;
 
 	of_node_put(np);
-	ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_PTBL_SIZE, &spare,
+	ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_SIZE, &spare,
 			sizeof(spare), psize, sizeof(psize));
 	if (ret) {
 		pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
@@ -111,7 +111,7 @@
 	pinit.paddr = virt_to_phys(buf);
 	pinit.size = psize[0];
 
-	ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_PTBL_INIT, &pinit,
+	ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_INIT, &pinit,
 			sizeof(pinit), &ptbl_ret, sizeof(ptbl_ret));
 	if (ret) {
 		pr_err("scm call IOMMU_SECURE_PTBL_INIT failed\n");
@@ -142,7 +142,7 @@
 
 	cfg.id = sec_id;
 
-	ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_CFG, &cfg, sizeof(cfg),
+	ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_CFG, &cfg, sizeof(cfg),
 			&scm_ret, sizeof(scm_ret));
 	if (ret || scm_ret) {
 		pr_err("scm call IOMMU_SECURE_CFG failed\n");
@@ -167,7 +167,7 @@
 	map.info.va = va;
 	map.info.size = len;
 
-	if (scm_call(SCM_SVC_CP, IOMMU_SECURE_MAP, &map, sizeof(map), &ret,
+	if (scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP, &map, sizeof(map), &ret,
 								sizeof(ret)))
 		return -EINVAL;
 	if (ret)
@@ -242,7 +242,7 @@
 		map.plist.size = SZ_1M;
 	}
 
-	ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_MAP, &map, sizeof(map),
+	ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP, &map, sizeof(map),
 			&scm_ret, sizeof(scm_ret));
 	kfree(pa_list);
 	return ret;
@@ -260,7 +260,7 @@
 	mi.va = va;
 	mi.size = len;
 
-	ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_UNMAP, &mi, sizeof(mi),
+	ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_UNMAP, &mi, sizeof(mi),
 			&scm_ret, sizeof(scm_ret));
 	return ret;
 }
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 52b7994..dce37e5 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -121,6 +121,7 @@
 	events->notified_index = 0;
 	events->bytes_read_no_event = 0;
 	events->current_event_data_size = 0;
+	events->wakeup_events_counter = 0;
 }
 
 static inline void dvb_dmxdev_flush_output(struct dvb_ringbuffer *buffer,
@@ -222,6 +223,10 @@
 	int new_write_index;
 	int data_event;
 
+	/* Check if the event is disabled */
+	if (events->event_mask.disable_mask & event->type)
+		return 0;
+
 	/* Check if we are adding an event that user already read its data */
 	if (events->bytes_read_no_event) {
 		data_event = 1;
@@ -241,7 +246,7 @@
 		if (data_event) {
 			if (res) {
 				/*
-				 * Data relevent to this event was fully
+				 * Data relevant to this event was fully
 				 * consumed already, discard event.
 				 */
 				events->bytes_read_no_event -= res;
@@ -266,6 +271,9 @@
 	events->queue[events->write_index] = *event;
 	events->write_index = new_write_index;
 
+	if (!(events->event_mask.no_wakeup_mask & event->type))
+		events->wakeup_events_counter++;
+
 	return 0;
 }
 
@@ -280,6 +288,9 @@
 	events->notified_index =
 		dvb_dmxdev_advance_event_idx(events->notified_index);
 
+	if (!(events->event_mask.no_wakeup_mask & event->type))
+		events->wakeup_events_counter--;
+
 	return 0;
 }
 
@@ -291,6 +302,13 @@
 	int data_event;
 
 	/*
+	 * If data events are not enabled on this filter,
+	 * there's nothing to update.
+	 */
+	if (events->data_read_event_masked)
+		return 0;
+
+	/*
 	 * Go through all events that were notified and
 	 * remove them from the events queue if their respective
 	 * data was read.
@@ -364,7 +382,7 @@
 		if (data_event) {
 			if (res) {
 				/*
-				 * Data relevent to this event was
+				 * Data relevant to this event was
 				 * fully consumed, remove it from the queue.
 				 */
 				bytes_read -= res;
@@ -616,6 +634,9 @@
 		}
 		dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE);
 		dvb_dmxdev_flush_events(&dmxdev->dvr_output_events);
+		dmxdev->dvr_output_events.event_mask.disable_mask = 0;
+		dmxdev->dvr_output_events.event_mask.no_wakeup_mask = 0;
+		dmxdev->dvr_output_events.event_mask.wakeup_threshold = 1;
 		dmxdev->dvr_feeds_count = 0;
 		dmxdev->dvr_buffer_mode = DMX_BUFFER_MODE_INTERNAL;
 		dmxdev->dvr_priv_buff_handle = NULL;
@@ -1429,24 +1450,58 @@
 static int dvb_dmxdev_reuse_decoder_buf(struct dmxdev_filter *dmxdevfilter,
 						int cookie)
 {
-	if ((dmxdevfilter->type == DMXDEV_TYPE_PES) &&
-		(dmxdevfilter->params.pes.output == DMX_OUT_DECODER)) {
-		struct dmxdev_feed *feed;
-		int ret = -ENODEV;
+	struct dmxdev_feed *feed;
 
-		/* Only one feed should be in the list in case of decoder */
-		feed = list_first_entry(&dmxdevfilter->feed.ts,
-					struct dmxdev_feed, next);
+	if ((dmxdevfilter->type != DMXDEV_TYPE_PES) ||
+		(dmxdevfilter->params.pes.output != DMX_OUT_DECODER) ||
+		(dmxdevfilter->events.event_mask.disable_mask &
+			DMX_EVENT_NEW_ES_DATA))
+		return -EPERM;
 
-		if (feed->ts->reuse_decoder_buffer)
-			ret = feed->ts->reuse_decoder_buffer(
-							feed->ts,
-							cookie);
+	/* Only one feed should be in the list in case of decoder */
+	feed = list_first_entry(&dmxdevfilter->feed.ts,
+				struct dmxdev_feed, next);
 
-		return ret;
-	}
+	if (feed->ts->reuse_decoder_buffer)
+		return feed->ts->reuse_decoder_buffer(feed->ts, cookie);
 
-	return -EPERM;
+	return -ENODEV;
+}
+
+static int dvb_dmxdev_set_event_mask(struct dmxdev_filter *dmxdevfilter,
+				struct dmx_events_mask *event_mask)
+{
+	if (!event_mask ||
+		(event_mask->wakeup_threshold >= DMX_EVENT_QUEUE_SIZE))
+		return -EINVAL;
+
+	if (dmxdevfilter->state == DMXDEV_STATE_GO)
+		return -EBUSY;
+
+	/*
+	 * Overflow event is not allowed to be masked.
+	 * This is because if overflow occurs, demux stops outputting data
+	 * until user is notified. If user is using events to read the data,
+	 * the overflow event must be always enabled or otherwise we would
+	 * never recover from overflow state.
+	 */
+	event_mask->disable_mask &= ~(u32)DMX_EVENT_BUFFER_OVERFLOW;
+	event_mask->no_wakeup_mask &= ~(u32)DMX_EVENT_BUFFER_OVERFLOW;
+
+	dmxdevfilter->events.event_mask = *event_mask;
+
+	return 0;
+}
+
+static int dvb_dmxdev_get_event_mask(struct dmxdev_filter *dmxdevfilter,
+				struct dmx_events_mask *event_mask)
+{
+	if (!event_mask)
+		return -EINVAL;
+
+	*event_mask = dmxdevfilter->events.event_mask;
+
+	return 0;
 }
 
 static int dvb_dmxdev_ts_fullness_callback(
@@ -1708,11 +1763,13 @@
 	}
 
 	/*
-	 * Decoder filters have no data in the data buffer and their
-	 * events can be removed now from the queue.
+	 * If no-data events are enabled on this filter,
+	 * the events can be removed from the queue when
+	 * user gets them.
+	 * For filters with data events enabled, the event is removed
+	 * from the queue only when the respective data is read.
 	 */
-	if ((dmxdevfilter->type == DMXDEV_TYPE_PES) &&
-		(dmxdevfilter->params.pes.output == DMX_OUT_DECODER))
+	if (dmxdevfilter->events.data_read_event_masked)
 		dmxdevfilter->events.read_index =
 			dvb_dmxdev_advance_event_idx(
 				dmxdevfilter->events.read_index);
@@ -2538,6 +2595,9 @@
 		(*secfilter)->filter_mask[2] = 0;
 
 		filter->todo = 0;
+		filter->events.data_read_event_masked =
+			filter->events.event_mask.disable_mask &
+			DMX_EVENT_NEW_SECTION;
 
 		ret = filter->feed.sec.feed->start_filtering(
 				filter->feed.sec.feed);
@@ -2558,6 +2618,21 @@
 			filter->params.pes.rec_chunk_size =
 				filter->buffer.size >> 2;
 
+		if (filter->params.pes.output == DMX_OUT_TS_TAP)
+			dmxdev->dvr_output_events.data_read_event_masked =
+			 dmxdev->dvr_output_events.event_mask.disable_mask &
+			 DMX_EVENT_NEW_REC_CHUNK;
+		else if (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
+			filter->events.data_read_event_masked =
+				filter->events.event_mask.disable_mask &
+				DMX_EVENT_NEW_REC_CHUNK;
+		else if (filter->params.pes.output == DMX_OUT_TAP)
+			filter->events.data_read_event_masked =
+				filter->events.event_mask.disable_mask &
+				DMX_EVENT_NEW_PES;
+		else
+			filter->events.data_read_event_masked = 1;
+
 		ret = 0;
 		list_for_each_entry(feed, &filter->feed.ts, next) {
 			ret = dvb_dmxdev_start_feed(dmxdev, filter, feed);
@@ -2627,6 +2702,9 @@
 	dmxdevfilter->priv_buff_handle = NULL;
 	dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
 	dvb_dmxdev_flush_events(&dmxdevfilter->events);
+	dmxdevfilter->events.event_mask.disable_mask = DMX_EVENT_NEW_ES_DATA;
+	dmxdevfilter->events.event_mask.no_wakeup_mask = 0;
+	dmxdevfilter->events.event_mask.wakeup_threshold = 1;
 
 	dmxdevfilter->type = DMXDEV_TYPE_NONE;
 	dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
@@ -3223,6 +3301,24 @@
 		mutex_unlock(&dmxdevfilter->mutex);
 		break;
 
+	case DMX_SET_EVENTS_MASK:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ERESTARTSYS;
+		}
+		ret = dvb_dmxdev_set_event_mask(dmxdevfilter, parg);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
+	case DMX_GET_EVENTS_MASK:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ERESTARTSYS;
+		}
+		ret = dvb_dmxdev_get_event_mask(dmxdevfilter, parg);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
 	default:
 		ret = -EINVAL;
 		break;
@@ -3258,10 +3354,9 @@
 	if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer))
 		mask |= (POLLIN | POLLRDNORM);
 
-	if (dmxdevfilter->events.notified_index !=
-		dmxdevfilter->events.write_index) {
+	if (dmxdevfilter->events.wakeup_events_counter >=
+		dmxdevfilter->events.event_mask.wakeup_threshold)
 		mask |= POLLPRI;
-	}
 
 	return mask;
 }
@@ -3430,8 +3525,8 @@
 		if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer))
 			mask |= (POLLIN | POLLRDNORM);
 
-		if (dmxdev->dvr_output_events.notified_index !=
-			dmxdev->dvr_output_events.write_index)
+		if (dmxdev->dvr_output_events.wakeup_events_counter >=
+			dmxdev->dvr_output_events.event_mask.wakeup_threshold)
 			mask |= POLLPRI;
 	} else {
 		poll_wait(file, &dmxdev->dvr_input_buffer.queue, wait);
diff --git a/drivers/media/dvb/dvb-core/dmxdev.h b/drivers/media/dvb/dvb-core/dmxdev.h
index a55b4f0..1443de5 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.h
+++ b/drivers/media/dvb/dvb-core/dmxdev.h
@@ -68,8 +68,8 @@
 	struct dmx_section_feed *feed;
 };
 
-struct dmxdev_events_queue {
 #define DMX_EVENT_QUEUE_SIZE	500 /* number of events */
+struct dmxdev_events_queue {
 	/*
 	 * indices used to manage events queue.
 	 * read_index advanced when relevent data is read
@@ -94,6 +94,22 @@
 	u32 current_event_data_size;
 	u32 current_event_start_offset;
 
+	/* current setting of the events masking */
+	struct dmx_events_mask event_mask;
+
+	/*
+	 * indicates if an event used for data-reading from demux
+	 * filter is enabled or not. These are events on which
+	 * user may wait for before calling read() on the demux filter.
+	 */
+	int data_read_event_masked;
+
+	/*
+	 * holds the current number of pending events in the
+	 * events queue that are considered as a wake-up source
+	 */
+	u32 wakeup_events_counter;
+
 	struct dmx_filter_event queue[DMX_EVENT_QUEUE_SIZE];
 };
 
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
index 691edc3..ddf4c67 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -59,6 +59,79 @@
 		false : true;
 }
 
+static struct msm_cam_clk_info ispif_8960_clk_info[] = {
+	{"csi_pix_clk", 0},
+	{"csi_rdi_clk", 0},
+	{"csi_pix1_clk", 0},
+	{"csi_rdi1_clk", 0},
+	{"csi_rdi2_clk", 0},
+};
+
+static struct msm_cam_clk_info ispif_8974_clk_info_vfe0[] = {
+	{"camss_vfe_vfe_clk", -1},
+	{"camss_csi_vfe_clk", -1},
+};
+
+static struct msm_cam_clk_info ispif_8974_clk_info_vfe1[] = {
+	{"camss_vfe_vfe_clk1", -1},
+	{"camss_csi_vfe_clk1", -1},
+};
+
+static int msm_ispif_clk_enable(struct ispif_device *ispif,
+	enum msm_ispif_vfe_intf vfe_intf, int enable)
+{
+	int rc = 0;
+
+	if (enable)
+		pr_debug("enable clk for VFE%d\n", vfe_intf);
+	else
+		pr_debug("disable clk for VFE%d\n", vfe_intf);
+
+	if (ispif->csid_version < CSID_VERSION_V2) {
+		rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info,
+			ispif->ispif_clk[vfe_intf], 2, enable);
+		if (rc) {
+			pr_err("%s: cannot enable clock, error = %d\n",
+				__func__, rc);
+			goto end;
+		}
+	} else if (ispif->csid_version == CSID_VERSION_V2) {
+		rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info,
+			ispif->ispif_clk[vfe_intf],
+			ARRAY_SIZE(ispif_8960_clk_info),
+			enable);
+		if (rc) {
+			pr_err("%s: cannot enable clock, error = %d\n",
+				__func__, rc);
+			goto end;
+		}
+	} else if (ispif->csid_version >= CSID_VERSION_V3) {
+		if (vfe_intf == VFE0) {
+			rc = msm_cam_clk_enable(&ispif->pdev->dev,
+				ispif_8974_clk_info_vfe0,
+				ispif->ispif_clk[vfe_intf],
+				ARRAY_SIZE(ispif_8974_clk_info_vfe0), enable);
+		} else {
+			rc = msm_cam_clk_enable(&ispif->pdev->dev,
+				ispif_8974_clk_info_vfe1,
+				ispif->ispif_clk[vfe_intf],
+				ARRAY_SIZE(ispif_8974_clk_info_vfe1), enable);
+		}
+		if (rc) {
+			pr_err("%s: cannot enable clock, error = %d\n",
+				__func__, rc);
+			goto end;
+		}
+	} else {
+		pr_err("%s: unsupported version=%d\n", __func__,
+			ispif->csid_version);
+		goto end;
+	}
+
+end:
+	return rc;
+}
+
 static int msm_ispif_intf_reset(struct ispif_device *ispif,
 	struct msm_ispif_param_data *params)
 {
@@ -97,8 +170,8 @@
 		unsigned long flags;
 
 		spin_lock_irqsave(&ispif->auto_complete_lock, flags);
-		ispif->wait_timeout = 0;
-		init_completion(&ispif->reset_complete);
+		ispif->wait_timeout[params->vfe_intf] = 0;
+		init_completion(&ispif->reset_complete[params->vfe_intf]);
 		spin_unlock_irqrestore(&ispif->auto_complete_lock, flags);
 
 		if (params->vfe_intf == VFE0)
@@ -106,14 +179,15 @@
 		else
 			msm_camera_io_w(data, ispif->base +
 				ISPIF_RST_CMD_1_ADDR);
+
 		lrc = wait_for_completion_interruptible_timeout(
-			&ispif->reset_complete, jiffes);
+			&ispif->reset_complete[params->vfe_intf], jiffes);
 		if (lrc < 0 || !lrc) {
 			pr_err("%s: wait timeout ret = %ld\n", __func__, lrc);
 			rc = -EIO;
 
 			spin_lock_irqsave(&ispif->auto_complete_lock, flags);
-			ispif->wait_timeout = 1;
+			ispif->wait_timeout[params->vfe_intf] = 1;
 			spin_unlock_irqrestore(
 				&ispif->auto_complete_lock, flags);
 		}
@@ -129,8 +203,12 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&ispif->auto_complete_lock, flags);
-	ispif->wait_timeout = 0;
-	init_completion(&ispif->reset_complete);
+	ispif->wait_timeout[VFE0] = 0;
+	init_completion(&ispif->reset_complete[VFE0]);
+	if (ispif->csid_version >= CSID_VERSION_V3) {
+		ispif->wait_timeout[VFE1] = 0;
+		init_completion(&ispif->reset_complete[VFE1]);
+	}
 	spin_unlock_irqrestore(&ispif->auto_complete_lock, flags);
 
 	BUG_ON(!ispif);
@@ -139,22 +217,40 @@
 
 	msm_camera_io_w(ISPIF_RST_CMD_MASK, ispif->base + ISPIF_RST_CMD_ADDR);
 
-	if (ispif->csid_version >= CSID_VERSION_V3)
-		msm_camera_io_w_mb(ISPIF_RST_CMD_1_MASK, ispif->base +
-			ISPIF_RST_CMD_1_ADDR);
-
 	lrc = wait_for_completion_interruptible_timeout(
-		&ispif->reset_complete, jiffes);
+		&ispif->reset_complete[VFE0], jiffes);
 
 	if (lrc < 0 || !lrc) {
 		pr_err("%s: wait timeout ret = %ld\n", __func__, lrc);
 		rc = -EIO;
 
 		spin_lock_irqsave(&ispif->auto_complete_lock, flags);
-		ispif->wait_timeout = 1;
+		ispif->wait_timeout[VFE0] = 1;
 		spin_unlock_irqrestore(&ispif->auto_complete_lock, flags);
+
+		goto end;
 	}
 
+	if (ispif->csid_version >= CSID_VERSION_V3) {
+		msm_camera_io_w_mb(ISPIF_RST_CMD_1_MASK, ispif->base +
+			ISPIF_RST_CMD_1_ADDR);
+
+		lrc = wait_for_completion_interruptible_timeout(
+			&ispif->reset_complete[VFE1], jiffes);
+
+		if (lrc < 0 || !lrc) {
+			pr_err("%s: wait timeout ret = %ld\n", __func__, lrc);
+			rc = -EIO;
+
+			spin_lock_irqsave(&ispif->auto_complete_lock, flags);
+			ispif->wait_timeout[VFE1] = 1;
+			spin_unlock_irqrestore(&ispif->auto_complete_lock,
+				flags);
+		}
+
+	}
+
+end:
 	return rc;
 }
 
@@ -181,20 +277,19 @@
 	}
 
 	if (ispif->csid_version <= CSID_VERSION_V2) {
-		if (ispif->ispif_clk[intftype] == NULL) {
+		if (ispif->ispif_clk[vfe_intf][intftype] == NULL) {
 			CDBG("%s: ispif NULL clk\n", __func__);
 			return;
 		}
 
-		rc = clk_set_rate(ispif->ispif_clk[intftype], csid);
+		rc = clk_set_rate(ispif->ispif_clk[vfe_intf][intftype], csid);
 		if (rc) {
 			pr_err("%s: clk_set_rate failed %d\n", __func__, rc);
 			return;
 		}
 	}
 
-	data = msm_camera_io_r(ispif->base + ISPIF_INPUT_SEL_ADDR +
-		(0x200 * vfe_intf));
+	data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_INPUT_SEL(vfe_intf));
 	switch (intftype) {
 	case PIX0:
 		data &= ~(BIT(1) | BIT(0));
@@ -218,8 +313,8 @@
 		break;
 	}
 	if (data)
-		msm_camera_io_w_mb(data, ispif->base + ISPIF_INPUT_SEL_ADDR +
-			(0x200 * vfe_intf));
+		msm_camera_io_w_mb(data, ispif->base +
+			ISPIF_VFE_m_INPUT_SEL(vfe_intf));
 }
 
 static void msm_ispif_enable_intf_cids(struct ispif_device *ispif,
@@ -236,19 +331,19 @@
 
 	switch (intftype) {
 	case PIX0:
-		intf_addr = ISPIF_PIX_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf);
+		intf_addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe_intf, 0);
 		break;
 	case RDI0:
-		intf_addr = ISPIF_RDI_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf);
+		intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 0);
 		break;
 	case PIX1:
-		intf_addr = ISPIF_PIX_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf);
+		intf_addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe_intf, 1);
 		break;
 	case RDI1:
-		intf_addr = ISPIF_RDI_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf);
+		intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 1);
 		break;
 	case RDI2:
-		intf_addr = ISPIF_RDI_2_INTF_CID_MASK_ADDR + (0x200 * vfe_intf);
+		intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 2);
 		break;
 	default:
 		pr_err("%s: invalid intftype=%d\n", __func__, intftype);
@@ -280,23 +375,23 @@
 	switch (intftype) {
 	case PIX0:
 		data = msm_camera_io_r(ispif->base +
-			ISPIF_PIX_0_STATUS_ADDR + (0x200 * vfe_intf));
+			ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0));
 		break;
 	case RDI0:
 		data = msm_camera_io_r(ispif->base +
-			ISPIF_RDI_0_STATUS_ADDR + (0x200 * vfe_intf));
+			ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0));
 		break;
 	case PIX1:
 		data = msm_camera_io_r(ispif->base +
-			ISPIF_PIX_1_STATUS_ADDR + (0x200 * vfe_intf));
+			ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1));
 		break;
 	case RDI1:
 		data = msm_camera_io_r(ispif->base +
-			ISPIF_RDI_1_STATUS_ADDR + (0x200 * vfe_intf));
+			ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1));
 		break;
 	case RDI2:
 		data = msm_camera_io_r(ispif->base +
-			ISPIF_RDI_2_STATUS_ADDR + (0x200 * vfe_intf));
+			ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2));
 		break;
 	}
 	if ((data & 0xf) != 0xf)
@@ -330,14 +425,22 @@
 	BUG_ON(!params);
 
 	vfe_intf = params->vfe_intf;
+
+	rc = msm_ispif_clk_enable(ispif, vfe_intf, 1);
+	if (rc < 0) {
+		pr_err("%s: unable to enable clocks for VFE%d", __func__,
+			vfe_intf);
+		return rc;
+	}
+
 	if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
 		pr_err("%s: invalid interface type\n", __func__);
 		return -EINVAL;
 	}
 
-	msm_camera_io_w(0x0, ispif->base + ISPIF_IRQ_MASK_ADDR);
-	msm_camera_io_w(0x0, ispif->base + ISPIF_IRQ_MASK_1_ADDR);
-	msm_camera_io_w_mb(0x0, ispif->base + ISPIF_IRQ_MASK_2_ADDR);
+	msm_camera_io_w(0x0, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe_intf));
+	msm_camera_io_w(0x0, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe_intf));
+	msm_camera_io_w_mb(0x0, ispif->base + ISPIF_VFE_m_IRQ_MASK_2(vfe_intf));
 
 	for (i = 0; i < params->num; i++) {
 		intftype = params->entries[i].intftype;
@@ -371,26 +474,29 @@
 	}
 
 	msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base +
-		ISPIF_IRQ_MASK_ADDR);
+		ISPIF_VFE_m_IRQ_MASK_0(vfe_intf));
 
 	msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base +
-		ISPIF_IRQ_CLEAR_ADDR);
+		ISPIF_VFE_m_IRQ_CLEAR_0(vfe_intf));
 
 	msm_camera_io_w(ISPIF_IRQ_STATUS_1_MASK, ispif->base +
-		ISPIF_IRQ_MASK_1_ADDR);
+		ISPIF_VFE_m_IRQ_MASK_1(vfe_intf));
 
 	msm_camera_io_w(ISPIF_IRQ_STATUS_1_MASK, ispif->base +
-		ISPIF_IRQ_CLEAR_1_ADDR);
+		ISPIF_VFE_m_IRQ_CLEAR_1(vfe_intf));
 
 	msm_camera_io_w(ISPIF_IRQ_STATUS_2_MASK, ispif->base +
-		ISPIF_IRQ_MASK_2_ADDR);
+		ISPIF_VFE_m_IRQ_MASK_2(vfe_intf));
 
 	msm_camera_io_w(ISPIF_IRQ_STATUS_2_MASK, ispif->base +
-		ISPIF_IRQ_CLEAR_2_ADDR);
+		ISPIF_VFE_m_IRQ_CLEAR_2(vfe_intf));
 
 	msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
 		ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
 
+
+	rc = msm_ispif_clk_enable(ispif, vfe_intf, 0);
+
 	return rc;
 }
 
@@ -436,14 +542,12 @@
 	/* cmd for PIX0, PIX1, RDI0, RDI1 */
 	if (ispif->applied_intf_cmd[vfe_intf].intf_cmd != 0xFFFFFFFF) {
 		msm_camera_io_w_mb(ispif->applied_intf_cmd[vfe_intf].intf_cmd,
-			ispif->base + ISPIF_INTF_CMD_ADDR +
-			(0x200 * vfe_intf));
+			ispif->base + ISPIF_VFE_m_INTF_CMD_0(vfe_intf));
 	}
 	/* cmd for RDI2 */
 	if (ispif->applied_intf_cmd[vfe_intf].intf_cmd1 != 0xFFFFFFFF)
 		msm_camera_io_w_mb(ispif->applied_intf_cmd[vfe_intf].intf_cmd1,
-			ispif->base + ISPIF_INTF_CMD_1_ADDR +
-			(0x200 * vfe_intf));
+			ispif->base + ISPIF_VFE_m_INTF_CMD_1(vfe_intf));
 }
 
 static int msm_ispif_stop_immediately(struct ispif_device *ispif,
@@ -464,6 +568,7 @@
 		msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype,
 			cid_mask, params->vfe_intf, 0);
 	}
+
 	return rc;
 }
 
@@ -472,6 +577,13 @@
 {
 	int rc;
 
+	rc = msm_ispif_clk_enable(ispif, params->vfe_intf, 1);
+	if (rc < 0) {
+		pr_err("%s: unable to enable clocks for VFE%d", __func__,
+			params->vfe_intf);
+		return rc;
+	}
+
 	rc = msm_ispif_intf_reset(ispif, params);
 	if (rc) {
 		pr_err("%s: msm_ispif_intf_reset failed. rc=%d\n",
@@ -480,6 +592,9 @@
 	}
 
 	msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY, params);
+
+	msm_ispif_clk_enable(ispif, params->vfe_intf, 0);
+
 	return rc;
 }
 
@@ -489,11 +604,21 @@
 	int i, rc = 0;
 	uint16_t cid_mask = 0;
 	uint32_t intf_addr;
+	enum msm_ispif_vfe_intf vfe_intf;
 
 	BUG_ON(!ispif);
 	BUG_ON(!params);
 
-	if (!msm_ispif_is_intf_valid(ispif->csid_version, params->vfe_intf)) {
+	vfe_intf = params->vfe_intf;
+
+	rc = msm_ispif_clk_enable(ispif, params->vfe_intf, 1);
+	if (rc < 0) {
+		pr_err("%s: unable to enable clocks for VFE%d", __func__,
+			params->vfe_intf);
+		return rc;
+	}
+
+	if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
 		pr_err("%s: invalid interface type\n", __func__);
 		return -EINVAL;
 	}
@@ -507,24 +632,19 @@
 
 		switch (params->entries[i].intftype) {
 		case PIX0:
-			intf_addr = ISPIF_PIX_0_STATUS_ADDR +
-				(0x200 * params->vfe_intf);
+			intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0);
 			break;
 		case RDI0:
-			intf_addr = ISPIF_RDI_0_STATUS_ADDR +
-				(0x200 * params->vfe_intf);
+			intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0);
 			break;
 		case PIX1:
-			intf_addr = ISPIF_PIX_1_STATUS_ADDR +
-				(0x200 * params->vfe_intf);
+			intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1);
 			break;
 		case RDI1:
-			intf_addr = ISPIF_RDI_1_STATUS_ADDR +
-				(0x200 * params->vfe_intf);
+			intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1);
 			break;
 		case RDI2:
-			intf_addr = ISPIF_RDI_2_STATUS_ADDR +
-				(0x200 * params->vfe_intf);
+			intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2);
 			break;
 		default:
 			pr_err("%s: invalid intftype=%d\n", __func__,
@@ -539,8 +659,11 @@
 
 		/* disable CIDs in CID_MASK register */
 		msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype,
-			cid_mask, params->vfe_intf, 0);
+			cid_mask, vfe_intf, 0);
 	}
+
+	msm_ispif_clk_enable(ispif, vfe_intf, 0);
+
 	return rc;
 }
 
@@ -577,26 +700,26 @@
 	BUG_ON(!out);
 
 	out[VFE0].ispifIrqStatus0 = msm_camera_io_r(ispif->base +
-		ISPIF_IRQ_STATUS_ADDR);
+		ISPIF_VFE_m_IRQ_STATUS_0(VFE0));
 	msm_camera_io_w(out[VFE0].ispifIrqStatus0,
-		ispif->base + ISPIF_IRQ_CLEAR_ADDR);
+		ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(VFE0));
 
 	out[VFE0].ispifIrqStatus1 = msm_camera_io_r(ispif->base +
-		ISPIF_IRQ_STATUS_1_ADDR);
+		ISPIF_VFE_m_IRQ_STATUS_1(VFE0));
 	msm_camera_io_w(out[VFE0].ispifIrqStatus1,
-		ispif->base + ISPIF_IRQ_CLEAR_1_ADDR);
+		ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(VFE0));
 
 	out[VFE0].ispifIrqStatus2 = msm_camera_io_r(ispif->base +
-		ISPIF_IRQ_STATUS_2_ADDR);
+		ISPIF_VFE_m_IRQ_STATUS_2(VFE0));
 	msm_camera_io_w_mb(out[VFE0].ispifIrqStatus2,
-		ispif->base + ISPIF_IRQ_CLEAR_2_ADDR);
+		ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(VFE0));
 
 	if (out[VFE0].ispifIrqStatus0 & ISPIF_IRQ_STATUS_MASK) {
 		if (out[VFE0].ispifIrqStatus0 & RESET_DONE_IRQ) {
 			unsigned long flags;
 			spin_lock_irqsave(&ispif->auto_complete_lock, flags);
-			if (ispif->wait_timeout == 0)
-				complete(&ispif->reset_complete);
+			if (ispif->wait_timeout[VFE0] == 0)
+				complete(&ispif->reset_complete[VFE0]);
 			spin_unlock_irqrestore(
 				&ispif->auto_complete_lock, flags);
 		}
@@ -617,19 +740,29 @@
 	}
 	if (ispif->csid_version >= CSID_VERSION_V3) {
 		out[VFE1].ispifIrqStatus0 = msm_camera_io_r(ispif->base +
-			ISPIF_IRQ_STATUS_ADDR + 0x200);
+			ISPIF_VFE_m_IRQ_STATUS_0(VFE1));
 		msm_camera_io_w(out[VFE1].ispifIrqStatus0,
-			ispif->base + ISPIF_IRQ_CLEAR_ADDR + 0x200);
+			ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(VFE1));
 
 		out[VFE1].ispifIrqStatus1 = msm_camera_io_r(ispif->base +
-			ISPIF_IRQ_STATUS_1_ADDR + 0x200);
+			ISPIF_VFE_m_IRQ_STATUS_1(VFE1));
 		msm_camera_io_w(out[VFE1].ispifIrqStatus1,
+			ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(VFE1));
 
-				ispif->base + ISPIF_IRQ_CLEAR_1_ADDR + 0x200);
 		out[VFE1].ispifIrqStatus2 = msm_camera_io_r(ispif->base +
-			ISPIF_IRQ_STATUS_2_ADDR + 0x200);
+			ISPIF_VFE_m_IRQ_STATUS_2(VFE1));
 		msm_camera_io_w_mb(out[VFE1].ispifIrqStatus2,
-			ispif->base + ISPIF_IRQ_CLEAR_2_ADDR + 0x200);
+			ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(VFE1));
+
+
+		if (out[VFE1].ispifIrqStatus0 & RESET_DONE_IRQ) {
+			unsigned long flags;
+			spin_lock_irqsave(&ispif->auto_complete_lock, flags);
+			if (ispif->wait_timeout[VFE1] == 0)
+				complete(&ispif->reset_complete[VFE1]);
+			spin_unlock_irqrestore(
+				&ispif->auto_complete_lock, flags);
+		}
 
 		if (out[VFE1].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ)
 			pr_err("%s: VFE1 pix0 overflow.\n", __func__);
@@ -657,20 +790,6 @@
 	return IRQ_HANDLED;
 }
 
-static struct msm_cam_clk_info ispif_8960_clk_info[] = {
-	{"csi_pix_clk", 0},
-	{"csi_rdi_clk", 0},
-	{"csi_pix1_clk", 0},
-	{"csi_rdi1_clk", 0},
-	{"csi_rdi2_clk", 0},
-};
-static struct msm_cam_clk_info ispif_8974_clk_info[] = {
-	{"camss_vfe_vfe_clk", -1},
-	{"camss_csi_vfe_clk", -1},
-	{"camss_vfe_vfe_clk1", -1},
-	{"camss_csi_vfe_clk1", -1},
-};
-
 static int msm_ispif_init(struct ispif_device *ispif,
 	uint32_t csid_version)
 {
@@ -693,41 +812,27 @@
 	memset(ispif->sof_count, 0, sizeof(ispif->sof_count));
 
 	ispif->csid_version = csid_version;
-	if (ispif->csid_version < CSID_VERSION_V2) {
-		rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info,
-			ispif->ispif_clk, 2, 1);
-		if (rc) {
-			pr_err("%s: cannot enable clock, error = %d\n",
-				__func__, rc);
-			goto end;
-		}
-	} else if (ispif->csid_version == CSID_VERSION_V2) {
-		rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info,
-			ispif->ispif_clk, ARRAY_SIZE(ispif_8960_clk_info), 1);
-		if (rc) {
-			pr_err("%s: cannot enable clock, error = %d\n",
-				__func__, rc);
-			goto end;
-		}
-	} else if (ispif->csid_version >= CSID_VERSION_V3) {
-		rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_8974_clk_info,
-			ispif->ispif_clk, ARRAY_SIZE(ispif_8974_clk_info), 1);
-		if (rc) {
-			pr_err("%s: cannot enable clock, error = %d\n",
-				__func__, rc);
-			goto end;
-		}
-	} else {
-		pr_err("%s: unsupported version=%d\n", __func__,
-			ispif->csid_version);
-		goto end;
+	rc = msm_ispif_clk_enable(ispif, VFE0, 1);
+	if (rc < 0) {
+		pr_err("%s: unable to enable clocks for VFE0", __func__);
+		goto error_clk0;
 	}
+
+	if (ispif->csid_version >= CSID_VERSION_V3) {
+		rc = msm_ispif_clk_enable(ispif, VFE1, 1);
+		if (rc < 0) {
+			pr_err("%s: unable to enable clocks for VFE1",
+				__func__);
+			goto error_clk1;
+		}
+	}
+
 	ispif->base = ioremap(ispif->mem->start,
 		resource_size(ispif->mem));
 	if (!ispif->base) {
 		rc = -ENOMEM;
 		pr_err("%s: nomem\n", __func__);
-		goto error_clk;
+		goto end;
 	}
 	rc = request_irq(ispif->irq->start, msm_io_ispif_irq,
 		IRQF_TRIGGER_RISING, "ispif", ispif);
@@ -745,23 +850,21 @@
 	free_irq(ispif->irq->start, ispif);
 error_irq:
 	iounmap(ispif->base);
-error_clk:
-	if (ispif->csid_version < CSID_VERSION_V2) {
-		msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info,
-		ispif->ispif_clk, 2, 0);
-	} else if (ispif->csid_version == CSID_VERSION_V2) {
-		msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info,
-		ispif->ispif_clk, ARRAY_SIZE(ispif_8960_clk_info), 0);
-	} else if (ispif->csid_version >= CSID_VERSION_V3) {
-		msm_cam_clk_enable(&ispif->pdev->dev, ispif_8974_clk_info,
-			ispif->ispif_clk, ARRAY_SIZE(ispif_8974_clk_info), 0);
-	}
+
 end:
+	if (ispif->csid_version >= CSID_VERSION_V3)
+		msm_ispif_clk_enable(ispif, VFE1, 0);
+
+error_clk1:
+	msm_ispif_clk_enable(ispif, VFE0, 0);
+
+error_clk0:
 	return rc;
 }
 
 static void msm_ispif_release(struct ispif_device *ispif)
 {
+	int i;
 	BUG_ON(!ispif);
 
 	if (ispif->ispif_state != ISPIF_POWER_UP) {
@@ -770,6 +873,9 @@
 		return;
 	}
 
+	for (i = 0; i < VFE_MAX; i++)
+		msm_ispif_clk_enable(ispif, i, 1);
+
 	/* make sure no streaming going on */
 	msm_ispif_reset(ispif);
 
@@ -777,16 +883,9 @@
 
 	iounmap(ispif->base);
 
-	if (ispif->csid_version < CSID_VERSION_V2) {
-		msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info,
-		ispif->ispif_clk, 2, 0);
-	} else if (ispif->csid_version == CSID_VERSION_V2) {
-		msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info,
-			ispif->ispif_clk, ARRAY_SIZE(ispif_8960_clk_info), 0);
-	} else if (ispif->csid_version >= CSID_VERSION_V3) {
-		msm_cam_clk_enable(&ispif->pdev->dev, ispif_8974_clk_info,
-			ispif->ispif_clk, ARRAY_SIZE(ispif_8974_clk_info), 0);
-	}
+	for (i = 0; i < VFE_MAX; i++)
+		msm_ispif_clk_enable(ispif, i, 0);
+
 	ispif->ispif_state = ISPIF_POWER_DOWN;
 }
 
@@ -801,7 +900,6 @@
 	BUG_ON(!pcdata);
 
 	mutex_lock(&ispif->mutex);
-
 	switch (pcdata->cfg_type) {
 	case ISPIF_ENABLE_REG_DUMP:
 		ispif->enb_dump_reg = pcdata->reg_dump; /* save dump config */
@@ -903,6 +1001,7 @@
 {
 	int rc;
 	struct ispif_device *ispif;
+	int i;
 
 	ispif = kzalloc(sizeof(struct ispif_device), GFP_KERNEL);
 	if (!ispif) {
@@ -961,7 +1060,9 @@
 	ispif->ispif_state = ISPIF_POWER_DOWN;
 	ispif->open_cnt = 0;
 	spin_lock_init(&ispif->auto_complete_lock);
-	ispif->wait_timeout = 0;
+	for (i = 0; i < VFE_MAX; i++)
+		ispif->wait_timeout[i] = 0;
+
 	return 0;
 
 error:
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
index f8c3cce..ef7a1bf 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
@@ -47,15 +47,15 @@
 	void __iomem *base;
 	struct mutex mutex;
 	uint8_t start_ack_pending;
-	struct completion reset_complete;
+	struct completion reset_complete[VFE_MAX];
 	spinlock_t auto_complete_lock;
-	uint8_t wait_timeout;
+	uint8_t wait_timeout[VFE_MAX];
 	uint32_t csid_version;
 	int enb_dump_reg;
 	uint32_t open_cnt;
 	struct ispif_sof_count sof_count[VFE_MAX];
 	struct ispif_intf_cmd applied_intf_cmd[VFE_MAX];
 	enum msm_ispif_state_t ispif_state;
-	struct clk *ispif_clk[INTF_MAX];
+	struct clk *ispif_clk[VFE_MAX][INTF_MAX];
 };
 #endif
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
index cdbebea..afd91d1 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
@@ -14,32 +14,41 @@
 #define __MSM_ISPIF_HWREG_V1_H__
 
 /* common registers */
-#define ISPIF_RST_CMD_ADDR                  0x0000
-#define ISPIF_RST_CMD_1_ADDR                0x0000 /* undefined */
-#define ISPIF_INTF_CMD_ADDR                 0x0004
-#define ISPIF_INTF_CMD_1_ADDR               0x0030
-#define ISPIF_CTRL_ADDR                     0x0008
-#define ISPIF_INPUT_SEL_ADDR                0x000C
-#define ISPIF_PIX_0_INTF_CID_MASK_ADDR      0x0010
-#define ISPIF_RDI_0_INTF_CID_MASK_ADDR      0x0014
-#define ISPIF_PIX_1_INTF_CID_MASK_ADDR      0x0038
-#define ISPIF_RDI_1_INTF_CID_MASK_ADDR      0x003C
-#define ISPIF_RDI_2_INTF_CID_MASK_ADDR      0x0044
-#define ISPIF_PIX_0_STATUS_ADDR             0x0024
-#define ISPIF_RDI_0_STATUS_ADDR             0x0028
-#define ISPIF_PIX_1_STATUS_ADDR             0x0060
-#define ISPIF_RDI_1_STATUS_ADDR             0x0064
-#define ISPIF_RDI_2_STATUS_ADDR             0x006C
-#define ISPIF_IRQ_MASK_ADDR                 0x0100
-#define ISPIF_IRQ_CLEAR_ADDR                0x0104
-#define ISPIF_IRQ_STATUS_ADDR               0x0108
-#define ISPIF_IRQ_MASK_1_ADDR               0x010C
-#define ISPIF_IRQ_CLEAR_1_ADDR              0x0110
-#define ISPIF_IRQ_STATUS_1_ADDR             0x0114
-#define ISPIF_IRQ_MASK_2_ADDR               0x0118
-#define ISPIF_IRQ_CLEAR_2_ADDR              0x011C
-#define ISPIF_IRQ_STATUS_2_ADDR             0x0120
-#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR     0x0124
+#define ISPIF_RST_CMD_ADDR                       0x0000
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR          0x0124
+
+#define ISPIF_VFE(m)                             (0x0)
+
+#define ISPIF_VFE_m_CTRL_0(m)                    (0x0008 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_0(m)                (0x0100 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_1(m)                (0x010C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_2(m)                (0x0118 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_0(m)              (0x0108 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_1(m)              (0x0114 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_2(m)              (0x0120 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_0(m)               (0x0104 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_1(m)               (0x0110 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_2(m)               (0x011C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INPUT_SEL(m)                 (0x000C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_0(m)                (0x0004 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_1(m)                (0x0030 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_PIX_INTF_n_CID_MASK(m, n)    (0x0010 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n)    (0x0014 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_PIX_OUTPUT_n_MISR(m, n)      (0x0290 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_0(m, n)    (0x0298 + ISPIF_VFE(m) + 8*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_1(m, n)    (0x029C + ISPIF_VFE(m) + 8*(n))
+#define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n)      (0x0024 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n)      (0x0028 + ISPIF_VFE(m) + 4*(n))
+
+/* Defines for compatibility with newer ISPIF versions */
+#define ISPIF_RST_CMD_1_ADDR                     (0x0000)
+#define ISPIF_VFE_m_PIX_INTF_n_CROP(m, n)        (0x0000 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_3D_THRESHOLD(m)              (0x0000 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_OUTPUT_SEL(m)                (0x0000 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_3D_DESKEW_SIZE(m)            (0x0000 + ISPIF_VFE(m))
+
+
+
 
 /*ISPIF RESET BITS*/
 #define VFE_CLK_DOMAIN_RST                 BIT(31)
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h
index 37b19f5..80b32d4 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h
@@ -16,42 +16,34 @@
 /* common registers */
 #define ISPIF_RST_CMD_ADDR                       0x008
 #define ISPIF_RST_CMD_1_ADDR                     0x00C
-#define ISPIF_INTF_CMD_ADDR                      0x248
-#define ISPIF_INTF_CMD_1_ADDR                    0x24C
-#define ISPIF_CTRL_ADDR                          0x008
-#define ISPIF_INPUT_SEL_ADDR                     0x244
-#define ISPIF_PIX_0_INTF_CID_MASK_ADDR           0x254
-#define ISPIF_RDI_0_INTF_CID_MASK_ADDR           0x264
-#define ISPIF_PIX_1_INTF_CID_MASK_ADDR           0x258
-#define ISPIF_RDI_1_INTF_CID_MASK_ADDR           0x268
-#define ISPIF_RDI_2_INTF_CID_MASK_ADDR           0x26C
-#define ISPIF_PIX_0_STATUS_ADDR                  0x2C0
-#define ISPIF_RDI_0_STATUS_ADDR                  0x2D0
-#define ISPIF_PIX_1_STATUS_ADDR                  0x2C4
-#define ISPIF_RDI_1_STATUS_ADDR                  0x2D4
-#define ISPIF_RDI_2_STATUS_ADDR                  0x2D8
-#define ISPIF_IRQ_MASK_ADDR                      0x208
-#define ISPIF_IRQ_CLEAR_ADDR                     0x230
-#define ISPIF_IRQ_STATUS_ADDR                    0x21C
-#define ISPIF_IRQ_MASK_1_ADDR                    0x20C
-#define ISPIF_IRQ_CLEAR_1_ADDR                   0x234
-#define ISPIF_IRQ_STATUS_1_ADDR                  0x220
-#define ISPIF_IRQ_MASK_2_ADDR                    0x210
-#define ISPIF_IRQ_CLEAR_2_ADDR                   0x238
-#define ISPIF_IRQ_STATUS_2_ADDR                  0x224
 #define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR          0x01C
 
-/* new */
-#define ISPIF_VFE_m_CTRL_0_ADDR                  0x200
-#define ISPIF_VFE_m_IRQ_MASK_0                   0x208
-#define ISPIF_VFE_m_IRQ_MASK_1                   0x20C
-#define ISPIF_VFE_m_IRQ_MASK_2                   0x210
-#define ISPIF_VFE_m_IRQ_STATUS_0                 0x21C
-#define ISPIF_VFE_m_IRQ_STATUS_1                 0x220
-#define ISPIF_VFE_m_IRQ_STATUS_2                 0x224
-#define ISPIF_VFE_m_IRQ_CLEAR_0                  0x230
-#define ISPIF_VFE_m_IRQ_CLEAR_1                  0x234
-#define ISPIF_VFE_m_IRQ_CLEAR_2                  0x238
+#define ISPIF_VFE(m)                             ((m) * 0x200)
+
+#define ISPIF_VFE_m_CTRL_0(m)                    (0x200 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_0(m)                (0x208 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_1(m)                (0x20C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_2(m)                (0x210 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_0(m)              (0x21C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_1(m)              (0x220 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_2(m)              (0x224 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_0(m)               (0x230 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_1(m)               (0x234 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_2(m)               (0x238 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INPUT_SEL(m)                 (0x244 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_0(m)                (0x248 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_1(m)                (0x24C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_PIX_INTF_n_CID_MASK(m, n)    (0x254 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n)    (0x264 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_PIX_INTF_n_CROP(m, n)        (0x278 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_3D_THRESHOLD(m)              (0x288 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_OUTPUT_SEL(m)                (0x28C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_PIX_OUTPUT_n_MISR(m, n)      (0x290 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_0(m, n)    (0x298 + ISPIF_VFE(m) + 8*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_1(m, n)    (0x29C + ISPIF_VFE(m) + 8*(n))
+#define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n)      (0x2C0 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n)      (0x2D0 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_3D_DESKEW_SIZE(m)            (0x2E4 + ISPIF_VFE(m))
 
 /*ISPIF RESET BITS*/
 #define VFE_CLK_DOMAIN_RST                       BIT(31)
diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c
index 6418f21..fbc2b93 100644
--- a/drivers/media/platform/msm/camera_v2/msm.c
+++ b/drivers/media/platform/msm/camera_v2/msm.c
@@ -355,7 +355,7 @@
 	if (WARN_ON(!msm_subdev))
 		return -EINVAL;
 
-	if (WARN_ON(!msm_v4l2_dev) && WARN_ON(!msm_v4l2_dev->dev))
+	if (WARN_ON(!msm_v4l2_dev) || WARN_ON(!msm_v4l2_dev->dev))
 		return -EIO;
 
 	return __msm_sd_register_subdev(&msm_subdev->sd);
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index ca5e646..8f63d33 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -563,6 +563,44 @@
 	return 0;
 }
 
+static int msm_cpp_flush_frames(struct cpp_device *cpp_dev)
+{
+	struct v4l2_event v4l2_evt;
+	struct msm_queue_cmd *frame_qcmd;
+	struct msm_cpp_frame_info_t *process_frame;
+	struct msm_device_queue *queue;
+	struct msm_queue_cmd *event_qcmd;
+
+	do {
+		if (cpp_dev->realtime_q.len != 0) {
+			queue = &cpp_dev->realtime_q;
+		} else if (cpp_dev->offline_q.len != 0) {
+			queue = &cpp_dev->offline_q;
+		} else {
+			pr_debug("All frames flushed\n");
+			break;
+		}
+		frame_qcmd = msm_dequeue(queue, list_frame);
+		process_frame = frame_qcmd->command;
+		kfree(frame_qcmd);
+		event_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_ATOMIC);
+		if (!event_qcmd) {
+			pr_err("Insufficient memory. return");
+			return -ENOMEM;
+		}
+		atomic_set(&event_qcmd->on_heap, 1);
+		event_qcmd->command = process_frame;
+		CPP_DBG("fid %d\n", process_frame->frame_id);
+		msm_enqueue(&cpp_dev->eventData_q, &event_qcmd->list_eventdata);
+
+		v4l2_evt.id = process_frame->inst_id;
+		v4l2_evt.type = V4L2_EVENT_CPP_FRAME_DONE;
+		v4l2_event_queue(cpp_dev->msm_sd.sd.devnode, &v4l2_evt);
+	} while ((cpp_dev->realtime_q.len != 0) ||
+		(cpp_dev->offline_q.len != 0));
+	return 0;
+}
+
 static int msm_cpp_cfg(struct cpp_device *cpp_dev,
 	struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
 {
@@ -755,7 +793,7 @@
 		rc = msm_cpp_cfg(cpp_dev, ioctl_ptr);
 		break;
 	case VIDIOC_MSM_CPP_FLUSH_QUEUE:
-		rc = msm_cpp_send_frame_to_hardware(cpp_dev);
+		rc = msm_cpp_flush_frames(cpp_dev);
 		break;
 	case VIDIOC_MSM_CPP_GET_EVENTPAYLOAD: {
 		struct msm_device_queue *queue = &cpp_dev->eventData_q;
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
index 499b36c..9be4704 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -75,10 +75,6 @@
 module_param(video_nonsecure_ion_heap, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(video_nonsecure_ion_heap, "ION heap for non-secure video buffer allocation");
 
-static int generate_es_events;
-module_param(generate_es_events, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(generate_es_events, "Generate new elementary stream data events");
-
 /* Value of TS packet scramble bits field for even key */
 static int mpq_sdmx_scramble_even = 0x2;
 module_param(mpq_sdmx_scramble_even, int, S_IRUGO | S_IWUSR);
@@ -1224,13 +1220,6 @@
 {
 	struct mpq_demux *mpq_demux = feed->demux->priv;
 
-	if (!generate_es_events) {
-		MPQ_DVB_ERR_PRINT(
-			"%s: Cannot release decoder buffer when not working with new elementary stream data events\n",
-			__func__);
-		return -EPERM;
-	}
-
 	if (cookie < 0) {
 		MPQ_DVB_ERR_PRINT("%s: invalid cookie parameter\n", __func__);
 		return -EINVAL;
@@ -2834,13 +2823,11 @@
 					__func__);
 			}
 
-			if (generate_es_events) {
-				mpq_dmx_prepare_es_event_data(
-					&packet, &meta_data, feed_data,
-					stream_buffer, &data);
+			mpq_dmx_prepare_es_event_data(
+				&packet, &meta_data, feed_data,
+				stream_buffer, &data);
 
-				feed->data_ready_cb.ts(&feed->feed.ts, &data);
-			}
+			feed->data_ready_cb.ts(&feed->feed.ts, &data);
 
 			feed_data->pending_pattern_len = 0;
 			mpq_streambuffer_get_data_rw_offset(
@@ -2975,15 +2962,13 @@
 					NULL,
 					&feed_data->frame_offset);
 
-				if (generate_es_events) {
-					mpq_dmx_prepare_es_event_data(
-						&packet, &meta_data,
-						feed_data,
-						stream_buffer, &data);
+				mpq_dmx_prepare_es_event_data(
+					&packet, &meta_data,
+					feed_data,
+					stream_buffer, &data);
 
-					feed->data_ready_cb.ts(
-						&feed->feed.ts, &data);
-				}
+				feed->data_ready_cb.ts(
+					&feed->feed.ts, &data);
 			} else {
 				MPQ_DVB_ERR_PRINT(
 					"%s: received PUSI"
@@ -4122,6 +4107,8 @@
 	int ret;
 	int pes_cnt = 0;
 	struct dmx_data_ready data_event;
+	struct dmx_data_ready data;
+	struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
 
 	if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
 		goto decoder_filter_check_overflow;
@@ -4249,15 +4236,11 @@
 		mpq_dmx_update_decoder_stat(mpq_demux);
 		mpq_streambuffer_pkt_write(sbuf, &packet, (u8 *)&meta_data);
 
-		if (generate_es_events) {
-			struct dmx_data_ready data;
-			struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
-			mpq_dmx_prepare_es_event_data(
-				&packet, &meta_data, &mpq_feed->video_info,
-				sbuf, &data);
-			MPQ_DVB_DBG_PRINT("%s: Notify ES Event\n", __func__);
-			feed->data_ready_cb.ts(&feed->feed.ts, &data);
-		}
+		mpq_dmx_prepare_es_event_data(
+			&packet, &meta_data, &mpq_feed->video_info,
+			sbuf, &data);
+		MPQ_DVB_DBG_PRINT("%s: Notify ES Event\n", __func__);
+		feed->data_ready_cb.ts(&feed->feed.ts, &data);
 
 		spin_unlock(&mpq_feed->video_info.video_buffer_lock);
 	}
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index f8460be..40e09b6 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1165,6 +1165,16 @@
 			sizeof(struct hfi_index_extradata_config);
 		break;
 	}
+	case HAL_PARAM_VENC_SLICE_DELIVERY_MODE:
+	{
+		struct hfi_enable *hfi;
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE;
+		hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
+		hfi->enable = ((struct hal_enable *) pdata)->enable;
+		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		break;
+	}
 	case HAL_CONFIG_VPE_DEINTERLACE:
 		break;
 	/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
@@ -1187,7 +1197,6 @@
 	case HAL_PARAM_VDEC_MB_QUANTIZATION:
 	case HAL_PARAM_VDEC_NUM_CONCEALED_MB:
 	case HAL_PARAM_VDEC_H264_ENTROPY_SWITCHING:
-	case HAL_PARAM_VENC_SLICE_DELIVERY_MODE:
 	case HAL_PARAM_VENC_MPEG4_DATA_PARTITIONING:
 	case HAL_CONFIG_BUFFER_COUNT_ACTUAL:
 	case HAL_CONFIG_VDEC_MULTI_STREAM:
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index be9458d..102e1ec 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -305,11 +305,213 @@
 	callback(RELEASE_RESOURCE_DONE, &cmd_done);
 }
 
+static inline void copy_cap_prop(
+		struct hfi_capability_supported *in,
+		struct vidc_hal_session_init_done *sess_init_done)
+{
+	struct hal_capability_supported *out = NULL;
+	switch (in->capability_type) {
+	case HFI_CAPABILITY_FRAME_WIDTH:
+		out = &sess_init_done->width;
+		break;
+
+	case HFI_CAPABILITY_FRAME_HEIGHT:
+		out = &sess_init_done->height;
+		break;
+
+	case HFI_CAPABILITY_MBS_PER_FRAME:
+		out = &sess_init_done->mbs_per_frame;
+		break;
+
+	case HFI_CAPABILITY_MBS_PER_SECOND:
+		out = &sess_init_done->mbs_per_sec;
+		break;
+
+	case HFI_CAPABILITY_FRAMERATE:
+		out = &sess_init_done->frame_rate;
+		break;
+
+	case HFI_CAPABILITY_SCALE_X:
+		out = &sess_init_done->scale_x;
+		break;
+
+	case HFI_CAPABILITY_SCALE_Y:
+		out = &sess_init_done->scale_y;
+		break;
+
+	case HFI_CAPABILITY_BITRATE:
+		out = &sess_init_done->bitrate;
+		break;
+	}
+
+	if (in && out) {
+		out->capability_type =
+			(enum hal_capability)in->capability_type;
+		out->min = in->min;
+		out->max = in->max;
+		out->step_size = in->step_size;
+	}
+}
+
 enum vidc_status hfi_process_sess_init_done_prop_read(
 	struct hfi_msg_sys_session_init_done_packet *pkt,
-	struct msm_vidc_cb_cmd_done *cmddone)
+	struct vidc_hal_session_init_done *sess_init_done)
 {
-	return VIDC_ERR_NONE;
+	u32 rem_bytes, num_properties;
+	u8 *data_ptr;
+	u32 status = VIDC_ERR_NONE;
+	u32 prop_id, next_offset = 0;
+
+	rem_bytes = pkt->size - sizeof(struct
+			hfi_msg_sys_session_init_done_packet) + sizeof(u32);
+
+	if (rem_bytes == 0) {
+		dprintk(VIDC_ERR,
+			"hfi_msg_sys_session_init_done:missing_prop_info");
+		return VIDC_ERR_FAIL;
+	}
+
+	status = hfi_map_err_status((u32)pkt->error_type);
+
+	if (status)
+		return status;
+
+	data_ptr = (u8 *) &pkt->rg_property_data[0];
+	num_properties = pkt->num_properties;
+
+	while ((status == VIDC_ERR_NONE) && num_properties &&
+		   (rem_bytes >= sizeof(u32))) {
+		prop_id = *((u32 *)data_ptr);
+		next_offset = sizeof(u32);
+
+		switch (prop_id) {
+		case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED:
+		{
+			struct hfi_capability_supported_info *prop =
+				(struct hfi_capability_supported_info *)
+				(data_ptr + next_offset);
+			u32 num_capabilities;
+			struct hfi_capability_supported *cap_ptr;
+
+			if ((rem_bytes - next_offset) < sizeof(*cap_ptr)) {
+				status = VIDC_ERR_BAD_PARAM;
+				break;
+			}
+
+			num_capabilities = prop->num_capabilities;
+			cap_ptr = &prop->rg_data[0];
+			next_offset += sizeof(u32);
+
+			while (num_capabilities &&
+				((rem_bytes - next_offset) >= sizeof(u32))) {
+				copy_cap_prop(cap_ptr, sess_init_done);
+				cap_ptr++;
+				next_offset += sizeof(*cap_ptr);
+				num_capabilities--;
+			}
+			num_properties--;
+			break;
+		}
+		case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED:
+		{
+			struct hfi_uncompressed_format_supported *prop =
+				(struct hfi_uncompressed_format_supported *)
+				(data_ptr + next_offset);
+
+			u32 num_format_entries;
+			char *fmt_ptr;
+			struct hfi_uncompressed_plane_info *plane_info;
+
+			if ((rem_bytes - next_offset) < sizeof(*prop)) {
+				status = VIDC_ERR_BAD_PARAM;
+				break;
+			}
+			num_format_entries = prop->format_entries;
+			next_offset = sizeof(*prop) - sizeof(u32);
+			fmt_ptr = (char *)&prop->rg_format_info[0];
+
+			while (num_format_entries) {
+				u32 bytes_to_skip;
+				plane_info =
+				(struct hfi_uncompressed_plane_info *) fmt_ptr;
+
+				if ((rem_bytes - next_offset) <
+						sizeof(*plane_info)) {
+					status = VIDC_ERR_BAD_PARAM;
+					break;
+				}
+				bytes_to_skip = sizeof(*plane_info) -
+					sizeof(struct
+					hfi_uncompressed_plane_constraints) +
+					plane_info->num_planes *
+					sizeof(struct
+					hfi_uncompressed_plane_constraints);
+
+				fmt_ptr +=  bytes_to_skip;
+				next_offset += bytes_to_skip;
+				num_format_entries--;
+			}
+			num_properties--;
+			break;
+		}
+		case HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED:
+		{
+			struct hfi_properties_supported *prop =
+				(struct hfi_properties_supported *)
+				(data_ptr + next_offset);
+
+			next_offset += sizeof(*prop) - sizeof(u32)
+				+ prop->num_properties * sizeof(u32);
+			num_properties--;
+			break;
+		}
+		case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED:
+		{
+			struct hfi_profile_level_supported *prop =
+				(struct hfi_profile_level_supported *)
+				(data_ptr + next_offset);
+
+			next_offset += sizeof(*prop) -
+				sizeof(struct hfi_profile_level) +
+				prop->profile_count *
+				sizeof(struct hfi_profile_level);
+			num_properties--;
+			break;
+		}
+		case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED:
+		{
+			next_offset +=
+				sizeof(struct hfi_nal_stream_format_supported);
+			num_properties--;
+			break;
+		}
+		case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT:
+		{
+			next_offset += sizeof(u32);
+			num_properties--;
+			break;
+		}
+		case HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE:
+		{
+			next_offset += sizeof(u32);
+			num_properties--;
+			break;
+		}
+		case HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH:
+		{
+			next_offset +=
+				sizeof(struct hfi_intra_refresh);
+			num_properties--;
+			break;
+		}
+		default:
+			dprintk(VIDC_DBG,
+				"%s default case - 0x%x", __func__, prop_id);
+		}
+		rem_bytes -= next_offset;
+		data_ptr += next_offset;
+	}
+	return status;
 }
 
 static void hfi_process_sess_get_prop_buf_req(
@@ -493,7 +695,7 @@
 	cmd_done.data = &session_init_done;
 	if (!cmd_done.status) {
 		cmd_done.status = hfi_process_sess_init_done_prop_read(
-			pkt, &cmd_done);
+			pkt, &session_init_done);
 	}
 	cmd_done.size = sizeof(struct vidc_hal_session_init_done);
 	callback(SESSION_INIT_DONE, &cmd_done);
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index 663cc40..d782227 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -682,6 +682,13 @@
 {
 	return 0;
 }
+
+static int msm_v4l2_enum_framesizes(struct file *file, void *fh,
+				struct v4l2_frmsizeenum *fsize)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+	return msm_vidc_enum_framesizes((void *)vidc_inst, fsize);
+}
 static const struct v4l2_ioctl_ops msm_v4l2_ioctl_ops = {
 	.vidioc_querycap = msm_v4l2_querycap,
 	.vidioc_enum_fmt_vid_cap_mplane = msm_v4l2_enum_fmt,
@@ -703,7 +710,8 @@
 	.vidioc_decoder_cmd = msm_v4l2_decoder_cmd,
 	.vidioc_encoder_cmd = msm_v4l2_encoder_cmd,
 	.vidioc_s_parm = msm_v4l2_s_parm,
-	.vidioc_g_parm = msm_v4l2_g_parm
+	.vidioc_g_parm = msm_v4l2_g_parm,
+	.vidioc_enum_framesizes = msm_v4l2_enum_framesizes,
 };
 
 static const struct v4l2_ioctl_ops msm_v4l2_enc_ioctl_ops = {
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 5966d12..8f8e723 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -20,10 +20,6 @@
 #include "msm_vidc_debug.h"
 
 #define MSM_VDEC_DVC_NAME "msm_vdec_8974"
-#define DEFAULT_HEIGHT 720
-#define DEFAULT_WIDTH 1280
-#define MAX_SUPPORTED_WIDTH 3820
-#define MAX_SUPPORTED_HEIGHT 2160
 #define MIN_NUM_OUTPUT_BUFFERS 4
 #define MAX_NUM_OUTPUT_BUFFERS 6
 
@@ -248,7 +244,7 @@
 	return (MAX_SUPPORTED_WIDTH * MAX_SUPPORTED_HEIGHT * 3/2)/2;
 }
 
-static const struct msm_vidc_format vdec_formats[] = {
+struct msm_vidc_format vdec_formats[] = {
 	{
 		.name = "YCbCr Semiplanar 4:2:0",
 		.description = "Y/CbCr 4:2:0",
@@ -582,7 +578,6 @@
 	int stride, scanlines;
 	int extra_idx = 0;
 	int rc = 0;
-	int ret;
 	int i;
 	struct hal_buffer_requirements *buff_req_buffer;
 	if (!inst || !f || !inst->core || !inst->core->device) {
@@ -602,6 +597,12 @@
 		if (inst->in_reconfig == true) {
 			inst->prop.height = inst->reconfig_height;
 			inst->prop.width = inst->reconfig_width;
+			rc = msm_vidc_check_session_supported(inst);
+			if (rc) {
+				dprintk(VIDC_ERR,
+				"%s: session not supported\n", __func__);
+				goto exit;
+			}
 		}
 		f->fmt.pix_mp.height = inst->prop.height;
 		f->fmt.pix_mp.width = inst->prop.width;
@@ -612,10 +613,20 @@
 		frame_sz.height = inst->prop.height;
 		dprintk(VIDC_DBG, "width = %d, height = %d\n",
 				frame_sz.width, frame_sz.height);
-		ret = msm_comm_try_set_prop(inst,
+		rc = msm_comm_try_set_prop(inst,
 			HAL_PARAM_FRAME_SIZE, &frame_sz);
-		ret = ret || msm_comm_try_get_bufreqs(inst);
-		if (ret || (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) {
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s: Failed : Frame size setting\n", __func__);
+			goto exit;
+		}
+		rc = msm_comm_try_get_bufreqs(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s: Failed : Buffer requirements\n", __func__);
+			goto exit;
+		}
+		if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 			for (i = 0; i < fmt->num_planes; ++i) {
 				f->fmt.pix_mp.plane_fmt[i].sizeimage =
 					fmt->get_frame_size(i,
@@ -680,6 +691,7 @@
 			f->type);
 		rc = -EINVAL;
 	}
+exit:
 	return rc;
 }
 int msm_vdec_s_parm(struct msm_vidc_inst *inst, struct v4l2_streamparm *a)
@@ -717,7 +729,7 @@
 }
 int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
 {
-	const struct msm_vidc_format *fmt = NULL;
+	struct msm_vidc_format *fmt = NULL;
 	struct hal_frame_size frame_sz;
 	int extra_idx = 0;
 	int rc = 0;
@@ -787,6 +799,12 @@
 	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 		inst->prop.width = f->fmt.pix_mp.width;
 		inst->prop.height = f->fmt.pix_mp.height;
+		rc = msm_vidc_check_session_supported(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s: session not supported\n", __func__);
+			goto err_invalid_fmt;
+		}
 		fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
 				ARRAY_SIZE(vdec_formats),
 				f->fmt.pix_mp.pixelformat,
@@ -1161,6 +1179,10 @@
 	inst->fmts[CAPTURE_PORT] = &vdec_formats[0];
 	inst->prop.height = DEFAULT_HEIGHT;
 	inst->prop.width = DEFAULT_WIDTH;
+	inst->capability.height.min = MIN_SUPPORTED_HEIGHT;
+	inst->capability.height.max = DEFAULT_HEIGHT;
+	inst->capability.width.min = MIN_SUPPORTED_WIDTH;
+	inst->capability.width.max = DEFAULT_WIDTH;
 	inst->prop.fps = 30;
 	inst->prop.prev_time_stamp = 0;
 	return rc;
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 072f4ab..9aa8175 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -19,10 +19,7 @@
 #include "msm_vidc_debug.h"
 
 #define MSM_VENC_DVC_NAME "msm_venc_8974"
-#define DEFAULT_HEIGHT 720
-#define DEFAULT_WIDTH 1280
 #define MIN_NUM_OUTPUT_BUFFERS 4
-#define MAX_NUM_OUTPUT_BUFFERS 8
 #define MIN_BIT_RATE 64000
 #define MAX_BIT_RATE 160000000
 #define DEFAULT_BIT_RATE 64000
@@ -88,6 +85,27 @@
 	"High Latency",
 };
 
+static const char *const mpeg_video_vidc_extradata[] = {
+	"Extradata none",
+	"Extradata MB Quantization",
+	"Extradata Interlace Video",
+	"Extradata VC1 Framedisp",
+	"Extradata VC1 Seqdisp",
+	"Extradata timestamp",
+	"Extradata S3D Frame Packing",
+	"Extradata Frame Rate",
+	"Extradata Panscan Window",
+	"Extradata Recovery point SEI",
+	"Extradata Closed Caption UD",
+	"Extradata AFD UD",
+	"Extradata Multislice info",
+	"Extradata number of concealed MB",
+	"Extradata metadata filler",
+	"Extradata input crop",
+	"Extradata digital zoom",
+	"Extradata aspect ratio",
+};
+
 enum msm_venc_ctrl_cluster {
 	MSM_VENC_CTRL_CLUSTER_QP = 1,
 	MSM_VENC_CTRL_CLUSTER_INTRA_PERIOD,
@@ -439,6 +457,18 @@
 		.cluster = MSM_VENC_CTRL_CLUSTER_SLICING,
 	},
 	{
+		.id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_DELIVERY_MODE,
+		.name = "Slice delivery mode",
+		.type = V4L2_CTRL_TYPE_BUTTON,
+		.minimum = 0,
+		.maximum = 1,
+		.default_value = 0,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+		.cluster = MSM_VENC_CTRL_CLUSTER_SLICING,
+	},
+	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE,
 		.name = "Intra Refresh Mode",
 		.type = V4L2_CTRL_TYPE_MENU,
@@ -557,6 +587,36 @@
 		.qmenu = NULL,
 		.cluster = 0,
 	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA,
+		.name = "Extradata Type",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
+		.maximum = V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO,
+		.default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
+		.menu_skip_mask = ~(
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_AFD_UD) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER) |
+			(1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_INPUT_CROP) |
+			(1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_DIGITAL_ZOOM) |
+			(1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO)
+			),
+		.qmenu = mpeg_video_vidc_extradata,
+		.step = 0,
+	},
 };
 
 #define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls)
@@ -578,7 +638,7 @@
 	return sz;
 }
 
-static const struct msm_vidc_format venc_formats[] = {
+static struct msm_vidc_format venc_formats[] = {
 	{
 		.name = "YCbCr Semiplanar 4:2:0",
 		.description = "Y/CbCr 4:2:0",
@@ -640,6 +700,9 @@
 	struct hal_buffer_count_actual new_buf_count;
 	enum hal_property property_id;
 	struct hfi_device *hdev;
+	struct hal_buffer_requirements *buff_req;
+	struct v4l2_ctrl *ctrl = NULL;
+	u32 extradata = 0;
 	if (!q || !q->drv_priv) {
 		dprintk(VIDC_ERR, "Invalid input, q = %p\n", q);
 		return -EINVAL;
@@ -655,13 +718,33 @@
 	switch (q->type) {
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
 		*num_planes = 1;
-		if (*num_buffers < MIN_NUM_OUTPUT_BUFFERS ||
-				*num_buffers > MAX_NUM_OUTPUT_BUFFERS)
-			*num_buffers = MIN_NUM_OUTPUT_BUFFERS;
+		buff_req = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
+		*num_buffers = buff_req->buffer_count_actual =
+			max(*num_buffers, buff_req->buffer_count_actual);
+			if (*num_buffers > VIDEO_MAX_FRAME) {
+				dprintk(VIDC_ERR,
+					"Failed : No of slices requested = %d"\
+					" Max supported slices = %d",
+					*num_buffers, VIDEO_MAX_FRAME);
+				rc = -EINVAL;
+				break;
+			}
+		ctrl = v4l2_ctrl_find(&inst->ctrl_handler,
+				V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
+		if (ctrl)
+			extradata = v4l2_ctrl_g_ctrl(ctrl);
+		if (extradata)
+			*num_planes = *num_planes + 1;
+		inst->fmts[CAPTURE_PORT]->num_planes = *num_planes;
 		for (i = 0; i < *num_planes; i++) {
 			sizes[i] = inst->fmts[CAPTURE_PORT]->get_frame_size(
 					i, inst->prop.height, inst->prop.width);
 		}
+		property_id = HAL_PARAM_BUFFER_COUNT_ACTUAL;
+		new_buf_count.buffer_type = HAL_BUFFER_OUTPUT;
+		new_buf_count.buffer_count_actual = *num_buffers;
+		rc = call_hfi_op(hdev, session_set_property, inst->session,
+			property_id, &new_buf_count);
 		break;
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
 		rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
@@ -1342,6 +1425,25 @@
 		multi_slice_control.slice_size = ctrl->val;
 		pdata = &multi_slice_control;
 		break;
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_DELIVERY_MODE: {
+		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE);
+		if ((temp_ctrl->val ==
+				V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) &&
+			(inst->fmts[CAPTURE_PORT]->fourcc ==
+				V4L2_PIX_FMT_H264 ||
+			inst->fmts[CAPTURE_PORT]->fourcc ==
+				V4L2_PIX_FMT_H264_NO_SC)) {
+			property_id = HAL_PARAM_VENC_SLICE_DELIVERY_MODE;
+			enable.enable = true;
+		} else {
+			dprintk(VIDC_WARN,
+				"Failed : slice delivery mode is valid "\
+				"only for H264 encoder and MB based slicing");
+			enable.enable = false;
+		}
+		pdata = &enable;
+		break;
+	}
 	case V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE: {
 		struct v4l2_ctrl *air_mbs, *air_ref, *cir_mbs;
 		air_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS);
@@ -1452,6 +1554,15 @@
 		inst->mode = VIDC_SECURE;
 		dprintk(VIDC_INFO, "Setting secure mode to :%d\n", inst->mode);
 		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA:
+	{
+		struct hal_extradata_enable extra;
+		property_id = HAL_PARAM_INDEX_EXTRADATA;
+		extra.index = msm_comm_get_hal_extradata_index(ctrl->val);
+		extra.enable = 1;
+		pdata = &extra;
+		break;
+	}
 	default:
 		rc = -ENOTSUPP;
 		break;
@@ -1676,7 +1787,7 @@
 }
 int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
 {
-	const struct msm_vidc_format *fmt = NULL;
+	struct msm_vidc_format *fmt = NULL;
 	struct hal_frame_size frame_sz;
 	int rc = 0;
 	int i;
@@ -1707,6 +1818,12 @@
 	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 		inst->prop.width = f->fmt.pix_mp.width;
 		inst->prop.height = f->fmt.pix_mp.height;
+		rc = msm_vidc_check_session_supported(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s: session not supported\n", __func__);
+			goto exit;
+		}
 		frame_sz.buffer_type = HAL_BUFFER_INPUT;
 		frame_sz.width = inst->prop.width;
 		frame_sz.height = inst->prop.height;
@@ -1768,6 +1885,7 @@
 	const struct msm_vidc_format *fmt = NULL;
 	int rc = 0;
 	int i;
+	int extra_idx = 0;
 	if (!inst || !f) {
 		dprintk(VIDC_ERR,
 			"Invalid input, inst = %p, format = %p\n", inst, f);
@@ -1788,6 +1906,16 @@
 			fmt->get_frame_size(i, inst->prop.height,
 					inst->prop.width);
 		}
+		extra_idx = EXTRADATA_IDX(fmt->num_planes);
+		if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
+			f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
+				inst->buff_req.buffer
+				[HAL_BUFFER_EXTRADATA_OUTPUT].buffer_size;
+		}
+		for (i = 0; i < fmt->num_planes; ++i) {
+			inst->bufq[CAPTURE_PORT].vb2_bufq.plane_sizes[i] =
+			f->fmt.pix_mp.plane_fmt[i].sizeimage;
+		}
 	} else {
 		dprintk(VIDC_ERR,
 			"Buf type not recognized, type = %d\n",	f->type);
@@ -1827,6 +1955,7 @@
 	int i;
 	struct vidc_buffer_addr_info buffer_info;
 	struct hfi_device *hdev;
+	int extra_idx = 0;
 
 	if (!inst || !inst->core || !inst->core->device) {
 		dprintk(VIDC_ERR, "%s invalid parameters", __func__);
@@ -1839,24 +1968,41 @@
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
 		break;
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
-		for (i = 0; i < b->length; i++) {
-			dprintk(VIDC_DBG,
-				"device_addr = %ld, size = %d\n",
+		if (b->length != inst->fmts[CAPTURE_PORT]->num_planes) {
+			dprintk(VIDC_ERR,
+				"Planes mismatch: needed: %d, allocated: %d\n",
+				inst->fmts[CAPTURE_PORT]->num_planes,
+				b->length);
+			rc = -EINVAL;
+			break;
+		}
+
+		for (i = 0; (i < b->length) && (i < VIDEO_MAX_PLANES); i++) {
+			dprintk(VIDC_DBG, "device_addr = 0x%lx, size = %d\n",
 				b->m.planes[i].m.userptr,
 				b->m.planes[i].length);
-			buffer_info.buffer_size = b->m.planes[i].length;
-			buffer_info.buffer_type = HAL_BUFFER_OUTPUT;
-			buffer_info.num_buffers = 1;
-			buffer_info.align_device_addr =
-				b->m.planes[i].m.userptr;
-			buffer_info.extradata_size = 0;
-			buffer_info.extradata_addr = 0;
-			rc = call_hfi_op(hdev, session_set_buffers,
-				(void *)inst->session, &buffer_info);
-			if (rc)
-				dprintk(VIDC_ERR,
-					"vidc_hal_session_set_buffers failed");
 		}
+		buffer_info.buffer_size = b->m.planes[0].length;
+		buffer_info.buffer_type = HAL_BUFFER_OUTPUT;
+		buffer_info.num_buffers = 1;
+		buffer_info.align_device_addr =
+			b->m.planes[0].m.userptr;
+
+		extra_idx = EXTRADATA_IDX(b->length);
+		if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
+			buffer_info.extradata_addr =
+				b->m.planes[extra_idx].m.userptr;
+			dprintk(VIDC_DBG, "extradata: 0x%lx\n",
+					b->m.planes[extra_idx].m.userptr);
+			buffer_info.extradata_size =
+				b->m.planes[extra_idx].length;
+		}
+
+		rc = call_hfi_op(hdev, session_set_buffers,
+				(void *)inst->session, &buffer_info);
+		if (rc)
+			dprintk(VIDC_ERR,
+					"vidc_hal_session_set_buffers failed");
 		break;
 	default:
 		dprintk(VIDC_ERR,
@@ -1869,8 +2015,7 @@
 int msm_venc_release_buf(struct msm_vidc_inst *inst,
 					struct v4l2_buffer *b)
 {
-	int rc = 0;
-	int i;
+	int i, rc = 0, extra_idx = 0;
 	struct vidc_buffer_addr_info buffer_info;
 	struct hfi_device *hdev;
 
@@ -1891,24 +2036,36 @@
 	switch (b->type) {
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
 		break;
-	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: {
+		if (b->length !=
+			inst->fmts[CAPTURE_PORT]->num_planes) {
+			dprintk(VIDC_ERR,
+					"Planes mismatch: needed: %d, to release: %d\n",
+					inst->fmts[CAPTURE_PORT]->num_planes,
+					b->length);
+			rc = -EINVAL;
+			break;
+		}
 		for (i = 0; i < b->length; i++) {
 			dprintk(VIDC_DBG,
-				"Release device_addr = %ld, size = %d, %d\n",
+				"Release device_addr = 0x%lx, size = %d, %d\n",
 				b->m.planes[i].m.userptr,
 				b->m.planes[i].length, inst->state);
-			buffer_info.buffer_size = b->m.planes[i].length;
-			buffer_info.buffer_type = HAL_BUFFER_OUTPUT;
-			buffer_info.num_buffers = 1;
-			buffer_info.align_device_addr =
-				 b->m.planes[i].m.userptr;
-			buffer_info.extradata_size = 0;
-			buffer_info.extradata_addr = 0;
-			buffer_info.response_required = false;
-			rc = call_hfi_op(hdev, session_release_buffers,
+		}
+		buffer_info.buffer_size = b->m.planes[0].length;
+		buffer_info.buffer_type = HAL_BUFFER_OUTPUT;
+		buffer_info.num_buffers = 1;
+		buffer_info.align_device_addr =
+			b->m.planes[0].m.userptr;
+		extra_idx = EXTRADATA_IDX(b->length);
+		if (extra_idx && (extra_idx < VIDEO_MAX_PLANES))
+			buffer_info.extradata_addr =
+			b->m.planes[extra_idx].m.userptr;
+		buffer_info.response_required = false;
+		rc = call_hfi_op(hdev, session_release_buffers,
 				(void *)inst->session, &buffer_info);
-			if (rc)
-				dprintk(VIDC_ERR,
+		if (rc)
+			dprintk(VIDC_ERR,
 					"vidc_hal_session_release_buffers failed\n");
 		}
 		break;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 218987e..0fbfd72 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -299,6 +299,30 @@
 	return -EINVAL;
 }
 
+
+int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize)
+{
+	struct msm_vidc_inst *inst = instance;
+	struct msm_vidc_core_capability *capability = NULL;
+
+	if (!inst || !fsize) {
+		dprintk(VIDC_ERR, "%s: invalid parameter: %p %p\n",
+				__func__, inst, fsize);
+		return -EINVAL;
+	}
+	if (!inst->core)
+		return -EINVAL;
+
+	capability = &inst->capability;
+	fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+	fsize->stepwise.min_width = capability->width.min;
+	fsize->stepwise.max_width = capability->width.max;
+	fsize->stepwise.step_width = capability->width.step_size;
+	fsize->stepwise.min_height = capability->height.min;
+	fsize->stepwise.max_height = capability->height.max;
+	fsize->stepwise.step_height = capability->height.step_size;
+	return 0;
+}
 static void *vidc_get_userptr(void *alloc_ctx, unsigned long vaddr,
 				unsigned long size, int write)
 {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index ba3e393..4346a4e 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -174,8 +174,8 @@
 	}
 	return &fmt[i];
 }
-const struct msm_vidc_format *msm_comm_get_pixel_fmt_fourcc(
-	const struct msm_vidc_format fmt[], int size, int fourcc, int fmt_type)
+struct msm_vidc_format *msm_comm_get_pixel_fmt_fourcc(
+	struct msm_vidc_format fmt[], int size, int fourcc, int fmt_type)
 {
 	int i;
 	if (!fmt) {
@@ -350,7 +350,15 @@
 	struct msm_vidc_cb_cmd_done *response = data;
 	struct msm_vidc_inst *inst;
 	if (response) {
+		struct vidc_hal_session_init_done *session_init_done =
+			(struct vidc_hal_session_init_done *) response->data;
 		inst = (struct msm_vidc_inst *)response->session_id;
+
+		inst->capability.width = session_init_done->width;
+		inst->capability.height = session_init_done->height;
+		inst->capability.frame_rate =
+				session_init_done->frame_rate;
+		inst->capability.capability_set = true;
 		signal_session_msg_receipt(cmd, inst);
 	} else {
 		dprintk(VIDC_ERR,
@@ -394,8 +402,11 @@
 		inst->reconfig_height = event_notify->height;
 		inst->reconfig_width = event_notify->width;
 		inst->in_reconfig = true;
-		v4l2_event_queue_fh(&inst->event_handler, &dqevent);
-		wake_up(&inst->kernel_event_queue);
+		rc = msm_vidc_check_session_supported(inst);
+		if (!rc) {
+			v4l2_event_queue_fh(&inst->event_handler, &dqevent);
+			wake_up(&inst->kernel_event_queue);
+		}
 		return;
 	} else {
 		dprintk(VIDC_ERR,
@@ -1789,6 +1800,13 @@
 			mutex_unlock(&inst->sync_lock);
 	} else {
 		int64_t time_usec = timeval_to_ns(&vb->v4l2_buf.timestamp);
+
+		rc = msm_vidc_check_session_supported(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s: session not supported\n", __func__);
+			goto err_no_mem;
+		}
 		do_div(time_usec, NSEC_PER_USEC);
 		memset(&frame_data, 0 , sizeof(struct vidc_frame_data));
 		frame_data.alloc_len = vb->v4l2_planes[0].length;
@@ -2359,3 +2377,54 @@
 				hdev->hfi_device_data, type);
 	return rc;
 }
+
+int msm_vidc_check_session_supported(struct msm_vidc_inst *inst)
+{
+	struct msm_vidc_core_capability *capability;
+	int rc = 0;
+	struct v4l2_event dqevent;
+
+	if (!inst) {
+		dprintk(VIDC_WARN, "%s: Invalid parameter\n", __func__);
+		return -EINVAL;
+	}
+	capability = &inst->capability;
+
+	if (inst->capability.capability_set) {
+		if (msm_vp8_low_tier &&
+			inst->fmts[OUTPUT_PORT]->fourcc == V4L2_PIX_FMT_VP8) {
+			capability->width.max = DEFAULT_WIDTH;
+			capability->height.max = DEFAULT_HEIGHT;
+		}
+		if (inst->prop.width < capability->width.min ||
+			inst->prop.width > capability->width.max ||
+			(inst->prop.width % capability->width.step_size != 0)) {
+			dprintk(VIDC_ERR,
+			"Unsupported width = %d range min(%u) - max(%u) step_size(%u)",
+			inst->prop.width, capability->width.min,
+			capability->width.max, capability->width.step_size);
+			rc = -ENOTSUPP;
+		}
+
+		if (inst->prop.height < capability->height.min ||
+			inst->prop.height > capability->height.max ||
+			(inst->prop.height %
+			capability->height.step_size != 0)) {
+			dprintk(VIDC_ERR,
+			"Unsupported height = %d range min(%u) - max(%u) step_size(%u)",
+			inst->prop.height, capability->height.min,
+			capability->height.max, capability->height.step_size);
+			rc = -ENOTSUPP;
+		}
+	}
+	if (rc) {
+		mutex_lock(&inst->sync_lock);
+		inst->state = MSM_VIDC_CORE_INVALID;
+		mutex_unlock(&inst->sync_lock);
+		dqevent.type = V4L2_EVENT_MSM_VIDC_SYS_ERROR;
+		dqevent.id = 0;
+		v4l2_event_queue_fh(&inst->event_handler, &dqevent);
+		wake_up(&inst->kernel_event_queue);
+	}
+	return rc;
+}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index 4f3deb6..862dfab 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -21,8 +21,8 @@
 struct msm_vidc_core *get_vidc_core(int core_id);
 const struct msm_vidc_format *msm_comm_get_pixel_fmt_index(
 	const struct msm_vidc_format fmt[], int size, int index, int fmt_type);
-const struct msm_vidc_format *msm_comm_get_pixel_fmt_fourcc(
-	const struct msm_vidc_format fmt[], int size, int fourcc, int fmt_type);
+struct msm_vidc_format *msm_comm_get_pixel_fmt_fourcc(
+	struct msm_vidc_format fmt[], int size, int fourcc, int fmt_type);
 struct buf_queue *msm_comm_get_vb2q(
 		struct msm_vidc_inst *inst, enum v4l2_buf_type type);
 int msm_comm_try_state(struct msm_vidc_inst *inst, int state);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index 62158b0..5948c7c 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -18,7 +18,8 @@
 int msm_vidc_debug = 0x3;
 int msm_fw_debug = 0x18;
 int msm_fw_debug_mode = 0x1;
-int msm_fw_low_power_mode = 0x0;
+int msm_fw_low_power_mode = 0x1;
+int msm_vp8_low_tier = 0x1;
 
 struct debug_buffer {
 	char ptr[MAX_DBG_BUF_SIZE];
@@ -165,6 +166,11 @@
 		dprintk(VIDC_ERR, "debugfs_create_file: fail\n");
 		goto failed_create_dir;
 	}
+	if (!debugfs_create_u32("vp8_low_tier", S_IRUGO | S_IWUSR,
+			parent, &msm_vp8_low_tier)) {
+		dprintk(VIDC_ERR, "debugfs_create_file: fail\n");
+		goto failed_create_dir;
+	}
 failed_create_dir:
 	return dir;
 }
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.h b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
index fb06af6..ea6dd70 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
@@ -45,6 +45,7 @@
 extern int msm_fw_debug;
 extern int msm_fw_debug_mode;
 extern int msm_fw_low_power_mode;
+extern int msm_vp8_low_tier;
 
 #define dprintk(__level, __fmt, arg...)	\
 	do { \
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 8238d42..1bfbaa6 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -37,6 +37,14 @@
 #define MSM_VIDC_VERSION KERNEL_VERSION(0, 0, 1);
 #define MAX_DEBUGFS_NAME 50
 #define DEFAULT_TIMEOUT 3
+#define DEFAULT_HEIGHT 1080
+#define DEFAULT_WIDTH 1920
+#define MIN_SUPPORTED_WIDTH 32
+#define MIN_SUPPORTED_HEIGHT 32
+#define MAX_SUPPORTED_WIDTH 3820
+#define MAX_SUPPORTED_HEIGHT 2160
+
+
 
 #define V4L2_EVENT_VIDC_BASE  10
 
@@ -166,6 +174,13 @@
 	VIDC_SECURE,
 };
 
+struct msm_vidc_core_capability {
+	struct hal_capability_supported width;
+	struct hal_capability_supported height;
+	struct hal_capability_supported frame_rate;
+	u32 capability_set;
+};
+
 struct msm_vidc_core {
 	struct list_head list;
 	struct mutex sync_lock, lock;
@@ -189,7 +204,7 @@
 	void *session;
 	struct session_prop prop;
 	int state;
-	const struct msm_vidc_format *fmts[MAX_PORT_NUM];
+	struct msm_vidc_format *fmts[MAX_PORT_NUM];
 	struct buf_queue bufq[MAX_PORT_NUM];
 	struct list_head pendingq;
 	struct list_head internalbufs;
@@ -212,6 +227,7 @@
 	struct msm_vidc_debug debug;
 	struct buf_count count;
 	enum msm_vidc_mode mode;
+	struct msm_vidc_core_capability capability;
 };
 
 extern struct msm_vidc_drv *vidc_driver;
@@ -238,4 +254,5 @@
 void handle_cmd_response(enum command_response cmd, void *data);
 int msm_vidc_trigger_ssr(struct msm_vidc_core *core,
 	enum hal_ssr_trigger_type type);
+int msm_vidc_check_session_supported(struct msm_vidc_inst *inst);
 #endif
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 232ad90..995c655 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -2604,7 +2604,7 @@
 		}
 	}
 
-	rc = scm_call(SCM_SVC_CP, TZBSP_MEM_PROTECT_VIDEO_VAR, &memprot,
+	rc = scm_call(SCM_SVC_MP, TZBSP_MEM_PROTECT_VIDEO_VAR, &memprot,
 			sizeof(memprot), &resp, sizeof(resp));
 	if (rc)
 		dprintk(VIDC_ERR,
diff --git a/drivers/media/platform/msm/wfd/wfd-ioctl.c b/drivers/media/platform/msm/wfd/wfd-ioctl.c
index 9fb7c6d..3d11400 100644
--- a/drivers/media/platform/msm/wfd/wfd-ioctl.c
+++ b/drivers/media/platform/msm/wfd/wfd-ioctl.c
@@ -271,11 +271,15 @@
 		mmap_context.ion_client = wfd_dev->ion_client;
 		rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
 				ENC_MMAP, &mmap_context);
-		if (rc || !enc_mregion->paddr) {
+		if (rc) {
 			WFD_MSG_ERR("Failed to map input memory\n");
 			goto alloc_fail;
+		} else if (!enc_mregion->paddr) {
+			WFD_MSG_ERR("ENC_MMAP returned success" \
+				"but failed to map input memory\n");
+			rc = -EINVAL;
+			goto alloc_fail;
 		}
-
 		WFD_MSG_DBG("NOTE: enc paddr = [%p->%p], kvaddr = %p\n",
 				enc_mregion->paddr, (int8_t *)
 				enc_mregion->paddr + enc_mregion->size,
@@ -303,7 +307,7 @@
 		rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
 				MDP_MMAP, (void *)&mmap_context);
 
-		if (rc || !mdp_mregion->paddr) {
+		if (rc) {
 			WFD_MSG_ERR(
 				"Failed to map to mdp, rc = %d, paddr = 0x%p\n",
 				rc, mdp_mregion->paddr);
@@ -311,6 +315,14 @@
 			mdp_mregion->paddr = NULL;
 			mdp_mregion->ion_handle = NULL;
 			goto mdp_mmap_fail;
+		} else if (!mdp_mregion->paddr) {
+			WFD_MSG_ERR("MDP_MMAP returned success" \
+				"but failed to map to MDP\n");
+			rc = -EINVAL;
+			mdp_mregion->kvaddr = NULL;
+			mdp_mregion->paddr = NULL;
+			mdp_mregion->ion_handle = NULL;
+			goto mdp_mmap_fail;
 		}
 
 		mdp_buf.inst = inst->mdp_inst;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index cfa5487..e8096d1 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -523,7 +523,7 @@
 	 TI wl127x chips.
 
 config TSIF
-	depends on ARCH_MSM
+	depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_APQ8064
 	tristate "TSIF (Transport Stream InterFace) support"
 	default n
 	---help---
diff --git a/drivers/mtd/devices/msm_qpic_nand.c b/drivers/mtd/devices/msm_qpic_nand.c
index f7e8c9f..c37a4a4 100644
--- a/drivers/mtd/devices/msm_qpic_nand.c
+++ b/drivers/mtd/devices/msm_qpic_nand.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2264,19 +2264,22 @@
 	 */
 	bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE;
 
+	rc = sps_phy2h(bam.phys_addr, &nand_info->sps.bam_handle);
+	if (!rc)
+		goto init_sps_ep;
 	rc = sps_register_bam_device(&bam, &nand_info->sps.bam_handle);
 	if (rc) {
-		pr_err("sps_register_bam_device() failed with %d\n", rc);
+		pr_err("%s: sps_register_bam_device() failed with %d\n",
+			__func__, rc);
 		goto out;
 	}
-	pr_info("BAM device registered: bam_handle 0x%x\n",
-			nand_info->sps.bam_handle);
-
+	pr_info("%s: BAM device registered: bam_handle 0x%x\n",
+			__func__, nand_info->sps.bam_handle);
+init_sps_ep:
 	rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_prod,
 					SPS_DATA_PROD_PIPE_INDEX);
 	if (rc)
-		goto unregister_bam;
-
+		goto out;
 	rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_cons,
 					SPS_DATA_CONS_PIPE_INDEX);
 	if (rc)
@@ -2291,22 +2294,20 @@
 	msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
 deinit_data_prod:
 	msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
-unregister_bam:
-	sps_deregister_bam_device(nand_info->sps.bam_handle);
 out:
 	return rc;
 }
 
 /*
- * This function de-registers BAM device, disconnects and frees its end points
- * for all the pipes.
+ * This function disconnects and frees its end points for all the pipes.
+ * Since the BAM is shared resource, it is not deregistered as its handle
+ * might be in use with LCDC.
  */
 static void msm_nand_bam_free(struct msm_nand_info *nand_info)
 {
 	msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
 	msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
 	msm_nand_deinit_endpoint(nand_info, &nand_info->sps.cmd_pipe);
-	sps_deregister_bam_device(nand_info->sps.bam_handle);
 }
 
 /* This function enables DMA support for the NANDc in BAM mode. */
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 63acde1..21a936e 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -200,7 +200,6 @@
 	struct work_struct clock_off_w; /* work for actual clock off */
 	struct workqueue_struct *hsuart_wq; /* hsuart workqueue */
 	struct mutex clk_mutex; /* mutex to guard against clock off/clock on */
-	struct work_struct reset_bam_rx; /* work for reset bam rx endpoint */
 	struct work_struct disconnect_rx_endpoint; /* disconnect rx_endpoint */
 	bool tty_flush_receive;
 	enum uart_core_type uart_type;
@@ -219,6 +218,7 @@
 	/* BLSP UART required BUS Scaling data */
 	struct msm_bus_scale_pdata *bus_scale_table;
 	bool rx_discard_flush_issued;
+	int rx_count_callback;
 };
 
 #define MSM_UARTDM_BURST_SIZE 16   /* DM burst size (in bytes) */
@@ -823,22 +823,6 @@
 }
 
 
-/* Reset BAM RX Endpoint Pipe Index from workqueue context*/
-
-static void hsuart_reset_bam_rx_work(struct work_struct *w)
-{
-	struct msm_hs_port *msm_uport = container_of(w, struct msm_hs_port,
-							reset_bam_rx);
-	struct uart_port *uport = &msm_uport->uport;
-	struct msm_hs_rx *rx = &msm_uport->rx;
-	struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
-
-	sps_disconnect(sps_pipe_handle);
-	msm_hs_spsconnect_rx(uport);
-
-	msm_serial_hs_rx_tlet((unsigned long) &rx->tlet);
-}
-
 /*
  * termios :  new ktermios
  * oldtermios:  old ktermios previous setting
@@ -1073,13 +1057,6 @@
 	if (!is_blsp_uart(msm_uport) && msm_uport->rx.flush != FLUSH_SHUTDOWN)
 		msm_uport->rx.flush = FLUSH_STOP;
 
-	/* During uart port close, due to spurious rx stale interrupt,
-	 * the rx state machine is causing BUG_ON to be hit in
-	 * msm_hs_shutdown causing kernel panic.
-	 * Hence fixing the same by handling the rx state machine.
-	 */
-	if (is_blsp_uart(msm_uport) && msm_uport->rx.flush == FLUSH_DATA_READY)
-		msm_uport->rx.flush = FLUSH_SHUTDOWN;
 }
 
 /*  Transmit the next chunk of data */
@@ -1178,10 +1155,6 @@
 		printk(KERN_ERR "Error: rx started in buffer state = %x",
 		       buffer_pending);
 
-	if (is_blsp_uart(msm_uport)) {
-		/* Issue RX BAM Start IFC command */
-		msm_hs_write(uport, UARTDM_CR_ADDR, START_RX_BAM_IFC);
-	}
 	msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
 	msm_hs_write(uport, UARTDM_DMRX_ADDR, UARTDM_RX_BUF_SIZE);
 	msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_ENABLE);
@@ -1204,6 +1177,19 @@
 	/* Calling next DMOV API. Hence mb() here. */
 	mb();
 
+	if (is_blsp_uart(msm_uport)) {
+		/*
+		 * RX-transfer will be automatically re-activated
+		 * after last data of previous transfer was read.
+		 */
+		data = (RX_STALE_AUTO_RE_EN | RX_TRANS_AUTO_RE_ACTIVATE |
+					RX_DMRX_CYCLIC_EN);
+		msm_hs_write(uport, UARTDM_RX_TRANS_CTRL_ADDR, data);
+		/* Issue RX BAM Start IFC command */
+		msm_hs_write(uport, UARTDM_CR_ADDR, START_RX_BAM_IFC);
+		mb();
+	}
+
 	msm_uport->rx.flush = FLUSH_NONE;
 
 	if (is_blsp_uart(msm_uport)) {
@@ -1276,7 +1262,6 @@
 {
 	int retval;
 	int rx_count;
-	static int remaining_rx_count, bytes_pending;
 	unsigned long status;
 	unsigned long flags;
 	unsigned int error_f = 0;
@@ -1284,17 +1269,24 @@
 	struct msm_hs_port *msm_uport;
 	unsigned int flush;
 	struct tty_struct *tty;
+	struct sps_event_notify *notify;
+	struct msm_hs_rx *rx;
+	struct sps_pipe *sps_pipe_handle;
+	u32 sps_flags = SPS_IOVEC_FLAG_EOT;
 
 	msm_uport = container_of((struct tasklet_struct *)tlet_ptr,
 				 struct msm_hs_port, rx.tlet);
 	uport = &msm_uport->uport;
 	tty = uport->state->port.tty;
+	notify = &msm_uport->notify;
+	rx = &msm_uport->rx;
 
 	status = msm_hs_read(uport, UARTDM_SR_ADDR);
 
 	spin_lock_irqsave(&uport->lock, flags);
 
-	msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
+	if (!is_blsp_uart(msm_uport))
+		msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
 
 	/* overflow is not connect to data in a FIFO */
 	if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
@@ -1351,24 +1343,13 @@
 	if (flush >= FLUSH_DATA_INVALID)
 		goto out;
 
-	rx_count = msm_hs_read(uport, UARTDM_RX_TOTAL_SNAP_ADDR);
-
 	if (is_blsp_uart(msm_uport)) {
-		if (rx_count > UARTDM_RX_BUF_SIZE) {
-			if (bytes_pending) {
-				rx_count = remaining_rx_count;
-				bytes_pending = 0;
-			} else {
-				remaining_rx_count = rx_count -
-						UARTDM_RX_BUF_SIZE;
-				if (remaining_rx_count)
-					bytes_pending = 1;
-				rx_count = UARTDM_RX_BUF_SIZE;
-			}
-		}
+		rx_count = msm_uport->rx_count_callback;
+	} else {
+		rx_count = msm_hs_read(uport, UARTDM_RX_TOTAL_SNAP_ADDR);
+		/* order the read of rx.buffer */
+		rmb();
 	}
-	/* order the read of rx.buffer */
-	rmb();
 
 	if (0 != (uport->read_status_mask & CREAD)) {
 		retval = tty_insert_flip_string(tty, msm_uport->rx.buffer,
@@ -1382,9 +1363,17 @@
 	/* order the read of rx.buffer and the start of next rx xfer */
 	wmb();
 
-	if (!msm_uport->rx.buffer_pending)
-		msm_hs_start_rx_locked(uport);
-
+	if (!msm_uport->rx.buffer_pending) {
+		if (is_blsp_uart(msm_uport)) {
+			msm_uport->rx.flush = FLUSH_NONE;
+			sps_pipe_handle = rx->prod.pipe_handle;
+			/* Queue transfer request to SPS */
+			sps_transfer_one(sps_pipe_handle, rx->rbuffer,
+				UARTDM_RX_BUF_SIZE, msm_uport, sps_flags);
+		} else {
+			msm_hs_start_rx_locked(uport);
+		}
+	}
 out:
 	if (msm_uport->rx.buffer_pending) {
 		if (hs_serial_debug_mask)
@@ -1502,7 +1491,10 @@
 	struct msm_hs_port *msm_uport =
 		(struct msm_hs_port *)
 		((struct sps_event_notify *)notify)->user;
+	struct uart_port *uport;
+	unsigned long flags;
 
+	uport = &(msm_uport->uport);
 	msm_uport->notify = *notify;
 	pr_debug("%s: sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
 		__func__, notify->event_id,
@@ -1510,8 +1502,12 @@
 		notify->data.transfer.iovec.size,
 		notify->data.transfer.iovec.flags);
 
-	if (msm_uport->rx.flush == FLUSH_NONE)
+	if (msm_uport->rx.flush == FLUSH_NONE) {
+		spin_lock_irqsave(&uport->lock, flags);
+		msm_uport->rx_count_callback = notify->data.transfer.iovec.size;
+		spin_unlock_irqrestore(&uport->lock, flags);
 		tasklet_schedule(&msm_uport->rx.tlet);
+	}
 }
 
 /*
@@ -1691,6 +1687,8 @@
 	int ret;
 	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
 	struct circ_buf *tx_buf = &uport->state->xmit;
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
 
 	mutex_lock(&msm_uport->clk_mutex);
 	spin_lock_irqsave(&uport->lock, flags);
@@ -1717,26 +1715,26 @@
 	switch (msm_uport->clk_req_off_state) {
 	case CLK_REQ_OFF_START:
 		msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_ISSUED;
-		if (is_blsp_uart(msm_uport)) {
-			/* Stale interrupt when RX-FIFO is empty
-			 * will fire if STALE_IRQ_EMPTY bit is set
-			 * for UART Core v1.4
-			 */
-			msm_hs_write(uport, UARTDM_BCR_ADDR,
-					UARTDM_BCR_STALE_IRQ_EMPTY);
+
+		if (!is_blsp_uart(msm_uport)) {
+			msm_hs_write(uport, UARTDM_CR_ADDR, FORCE_STALE_EVENT);
+			/*
+			* Before returning make sure that device writel
+			* completed. Hence mb() requires here.
+			*/
+			mb();
 		}
-		msm_hs_write(uport, UARTDM_CR_ADDR, FORCE_STALE_EVENT);
-		/*
-		 * Before returning make sure that device writel completed.
-		 * Hence mb() requires here.
-		 */
-		mb();
 		spin_unlock_irqrestore(&uport->lock, flags);
 		mutex_unlock(&msm_uport->clk_mutex);
 		return 0;  /* RXSTALE flush not complete - retry */
 	case CLK_REQ_OFF_RXSTALE_ISSUED:
 	case CLK_REQ_OFF_FLUSH_ISSUED:
 		spin_unlock_irqrestore(&uport->lock, flags);
+		if (is_blsp_uart(msm_uport)) {
+			msm_uport->clk_req_off_state =
+				CLK_REQ_OFF_RXSTALE_FLUSHED;
+			sps_disconnect(sps_pipe_handle);
+		}
 		mutex_unlock(&msm_uport->clk_mutex);
 		return 0;  /* RXSTALE flush not complete - retry */
 	case CLK_REQ_OFF_RXSTALE_FLUSHED:
@@ -1846,24 +1844,13 @@
 		mb();
 
 		if (msm_uport->clk_req_off_state ==
-					CLK_REQ_OFF_RXSTALE_ISSUED) {
+					CLK_REQ_OFF_RXSTALE_ISSUED)
 			msm_uport->clk_req_off_state =
-				CLK_REQ_OFF_FLUSH_ISSUED;
+					CLK_REQ_OFF_FLUSH_ISSUED;
 
-			if (is_blsp_uart(msm_uport)) {
-				/* Reset BCR Register for UARTDM Core v14*/
-				msm_hs_write(uport, UARTDM_BCR_ADDR, 0x0);
-			}
-		}
-
-		if (rx->flush == FLUSH_NONE) {
+		if (!is_blsp_uart(msm_uport) && (rx->flush == FLUSH_NONE)) {
 			rx->flush = FLUSH_DATA_READY;
-			if (is_blsp_uart(msm_uport)) {
-				queue_work(msm_uport->hsuart_wq,
-					&msm_uport->reset_bam_rx);
-			} else {
-				msm_dmov_flush(msm_uport->dma_rx_channel, 1);
-			}
+			msm_dmov_flush(msm_uport->dma_rx_channel, 1);
 		}
 	}
 	/* tx ready interrupt */
@@ -1993,8 +1980,14 @@
 			mb();
 		}
 		hrtimer_try_to_cancel(&msm_uport->clk_off_timer);
-		if (msm_uport->rx.flush == FLUSH_SHUTDOWN)
+		if (msm_uport->rx.flush == FLUSH_SHUTDOWN) {
+			if (is_blsp_uart(msm_uport)) {
+				spin_unlock_irqrestore(&uport->lock, flags);
+				msm_hs_spsconnect_rx(uport);
+				spin_lock_irqsave(&uport->lock, flags);
+			}
 			msm_hs_start_rx_locked(uport);
+		}
 		if (msm_uport->rx.flush == FLUSH_STOP)
 			msm_uport->rx.flush = FLUSH_IGNORE;
 		msm_uport->clk_state = MSM_HS_CLK_ON;
@@ -2265,9 +2258,10 @@
 
 		tx->command_ptr->dst_row_addr =
 			msm_uport->uport.mapbase + UARTDM_TF_ADDR;
+
+		msm_uport->imr_reg |= UARTDM_ISR_RXSTALE_BMSK;
 	}
 
-	msm_uport->imr_reg |= UARTDM_ISR_RXSTALE_BMSK;
 	/* Enable reading the current CTS, no harm even if CTS is ignored */
 	msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
 
@@ -2378,7 +2372,10 @@
 	}
 
 	/* Set up Uart Receive */
-	msm_hs_write(uport, UARTDM_RFWR_ADDR, 0);
+	if (is_blsp_uart(msm_uport))
+		msm_hs_write(uport, UARTDM_RFWR_ADDR, 32);
+	else
+		msm_hs_write(uport, UARTDM_RFWR_ADDR, 0);
 
 	INIT_DELAYED_WORK(&rx->flip_insert_work, flip_insert_work);
 
@@ -2975,9 +2972,6 @@
 
 	INIT_WORK(&msm_uport->clock_off_w, hsuart_clock_off_work);
 
-	/* Init work for Reset Rx bam endpoints */
-	INIT_WORK(&msm_uport->reset_bam_rx, hsuart_reset_bam_rx_work);
-
 	/* Init work for sps_disconnect in stop_rx_locked */
 	INIT_WORK(&msm_uport->disconnect_rx_endpoint,
 				hsuart_disconnect_rx_endpoint_work);
diff --git a/drivers/tty/serial/msm_serial_hs_hwreg.h b/drivers/tty/serial/msm_serial_hs_hwreg.h
index 9fa4f55..cdd0450 100644
--- a/drivers/tty/serial/msm_serial_hs_hwreg.h
+++ b/drivers/tty/serial/msm_serial_hs_hwreg.h
@@ -68,6 +68,14 @@
  */
 #define UARTDM_BCR_STALE_IRQ_EMPTY	0x2
 
+/* TRANSFER_CONTROL Register for UARTDM Core v1.4 */
+#define UARTDM_RX_TRANS_CTRL_ADDR      0xcc
+
+/* TRANSFER_CONTROL Register bits */
+#define RX_STALE_AUTO_RE_EN		0x1
+#define RX_TRANS_AUTO_RE_ACTIVATE	0x2
+#define RX_DMRX_CYCLIC_EN		0x4
+
 /* write only register */
 #define UARTDM_CSR_115200 0xFF
 #define UARTDM_CSR_57600  0xEE
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index e5aca6f..b48785c 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -535,6 +535,7 @@
 static int dwc3_msm_dbm_ep_unconfig(u8 usb_ep)
 {
 	u8 dbm_ep;
+	u32 data;
 
 	dev_dbg(context->dev, "%s\n", __func__);
 
@@ -548,10 +549,18 @@
 
 	context->ep_num_mapping[dbm_ep] = 0;
 
-	dwc3_msm_write_reg(context->base, DBM_EP_CFG(dbm_ep), 0);
+	data = dwc3_msm_read_reg(context->base, DBM_EP_CFG(dbm_ep));
+	data &= (~0x1);
+	dwc3_msm_write_reg(context->base, DBM_EP_CFG(dbm_ep), data);
 
 	/* Reset the dbm endpoint */
 	dwc3_msm_dbm_ep_soft_reset(dbm_ep, true);
+	/*
+	 * 10 usec delay is required before deasserting DBM endpoint reset
+	 * according to hardware programming guide.
+	 */
+	udelay(10);
+	dwc3_msm_dbm_ep_soft_reset(dbm_ep, false);
 
 	return 0;
 }
@@ -888,6 +897,8 @@
 	}
 	(*new_ep_ops) = (*ep->ops);
 	new_ep_ops->queue = dwc3_msm_ep_queue;
+	new_ep_ops->disable = ep->ops->disable;
+
 	ep->ops = new_ep_ops;
 
 	/*
@@ -1328,24 +1339,27 @@
 	dwc3_msm_write_readback(msm->base, SS_PHY_PARAM_CTRL_1, 0x07, 0x5);
 }
 
-static void dwc3_msm_block_reset(void)
+static void dwc3_msm_block_reset(bool core_reset)
 {
+
 	struct dwc3_msm *mdwc = context;
 	int ret  = 0;
 
-	ret = dwc3_msm_link_clk_reset(1);
-	if (ret)
-		return;
+	if (core_reset) {
+		ret = dwc3_msm_link_clk_reset(1);
+		if (ret)
+			return;
 
-	usleep_range(1000, 1200);
-	ret = dwc3_msm_link_clk_reset(0);
-	if (ret)
-		return;
+		usleep_range(1000, 1200);
+		ret = dwc3_msm_link_clk_reset(0);
+		if (ret)
+			return;
 
-	usleep_range(10000, 12000);
+		usleep_range(10000, 12000);
 
-	/* Reinitialize QSCRATCH registers after block reset */
-	dwc3_msm_qscratch_reg_init(mdwc);
+		/* Reinitialize QSCRATCH registers after block reset */
+		dwc3_msm_qscratch_reg_init(mdwc);
+	}
 
 	/* Reset the DBM */
 	dwc3_msm_dbm_soft_reset(1);
diff --git a/drivers/usb/dwc3/dwc3_otg.c b/drivers/usb/dwc3/dwc3_otg.c
index 01fad76..282f49e 100644
--- a/drivers/usb/dwc3/dwc3_otg.c
+++ b/drivers/usb/dwc3/dwc3_otg.c
@@ -206,6 +206,20 @@
 			return ret;
 		}
 		dwc3_otg_notify_host_mode(otg, on);
+
+		/*
+		 * Perform USB hardware RESET (both core reset and DBM reset)
+		 * when moving from host to peripheral. This is required for
+		 * peripheral mode to work.
+		 */
+		if (ext_xceiv && ext_xceiv->otg_capability &&
+						ext_xceiv->ext_block_reset)
+			ext_xceiv->ext_block_reset(true);
+
+		/* re-init core and OTG registers as block reset clears these */
+		dwc3_post_host_reset_core_init(dwc);
+		if (ext_xceiv && !ext_xceiv->otg_capability)
+			dwc3_otg_reset(dotg);
 	}
 
 	return 0;
@@ -253,7 +267,6 @@
 {
 	struct dwc3_otg *dotg = container_of(otg, struct dwc3_otg, otg);
 	struct dwc3_ext_xceiv *ext_xceiv = dotg->ext_xceiv;
-	struct dwc3 *dwc = dotg->dwc;
 
 	if (!otg->gadget)
 		return -EINVAL;
@@ -262,20 +275,11 @@
 		dev_dbg(otg->phy->dev, "%s: turn on gadget %s\n",
 					__func__, otg->gadget->name);
 
-		/*
-		 * Hardware reset is required to support below scenarios:
-		 * 1. Host <-> peripheral switching
-		 * 2. Once an endpoint is configured in DBM (BAM) mode, it
-		 * can be unconfigured only after RESET
-		 */
+		/* Core reset is not required during start peripheral. Only
+		 * DBM reset is required, hence perform only DBM reset here */
 		if (ext_xceiv && ext_xceiv->otg_capability &&
 						ext_xceiv->ext_block_reset)
-			ext_xceiv->ext_block_reset();
-
-		/* re-init core and OTG registers as block reset clears these */
-		dwc3_post_host_reset_core_init(dwc);
-		if (ext_xceiv && !ext_xceiv->otg_capability)
-			dwc3_otg_reset(dotg);
+			ext_xceiv->ext_block_reset(false);
 
 		dwc3_otg_set_peripheral_regs(dotg);
 		usb_gadget_vbus_connect(otg->gadget);
diff --git a/drivers/usb/dwc3/dwc3_otg.h b/drivers/usb/dwc3/dwc3_otg.h
index d3b1b4a..c2fab53 100644
--- a/drivers/usb/dwc3/dwc3_otg.h
+++ b/drivers/usb/dwc3/dwc3_otg.h
@@ -109,7 +109,7 @@
 	void	(*notify_ext_events)(struct usb_otg *otg,
 					enum dwc3_ext_events ext_event);
 	/* for block reset USB core */
-	void	(*ext_block_reset)(void);
+	void	(*ext_block_reset)(bool core_reset);
 };
 
 /* for external transceiver driver */
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index 5b97148..d4bdf99 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -77,8 +77,8 @@
 #include "f_rndis.c"
 #include "rndis.c"
 #include "f_qc_ecm.c"
-#include "u_bam_data.c"
 #include "f_mbim.c"
+#include "u_bam_data.c"
 #include "f_ecm.c"
 #include "f_qc_rndis.c"
 #include "u_ether.c"
@@ -865,10 +865,18 @@
 	fmbim_cleanup();
 }
 
+
+/* mbim transport string */
+static char mbim_transports[MAX_XPORT_STR_LEN];
+
 static int mbim_function_bind_config(struct android_usb_function *f,
 					  struct usb_configuration *c)
 {
-	return mbim_bind_config(c, 0);
+	char *trans;
+
+	pr_debug("%s: mbim transport is %s", __func__, mbim_transports);
+	trans = strim(mbim_transports);
+	return mbim_bind_config(c, 0, trans);
 }
 
 static int mbim_function_ctrlrequest(struct android_usb_function *f,
@@ -878,12 +886,34 @@
 	return mbim_ctrlrequest(cdev, c);
 }
 
+static ssize_t mbim_transports_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", mbim_transports);
+}
+
+static ssize_t mbim_transports_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	strlcpy(mbim_transports, buf, sizeof(mbim_transports));
+	return size;
+}
+
+static DEVICE_ATTR(mbim_transports, S_IRUGO | S_IWUSR, mbim_transports_show,
+				   mbim_transports_store);
+
+static struct device_attribute *mbim_function_attributes[] = {
+	&dev_attr_mbim_transports,
+	NULL
+};
+
 static struct android_usb_function mbim_function = {
 	.name		= "usb_mbim",
 	.cleanup	= mbim_function_cleanup,
 	.bind_config	= mbim_function_bind_config,
 	.init		= mbim_function_init,
 	.ctrlrequest	= mbim_function_ctrlrequest,
+	.attributes		= mbim_function_attributes,
 };
 
 #ifdef CONFIG_SND_PCM
diff --git a/drivers/usb/gadget/f_mbim.c b/drivers/usb/gadget/f_mbim.c
index 893f315..52e3126 100644
--- a/drivers/usb/gadget/f_mbim.c
+++ b/drivers/usb/gadget/f_mbim.c
@@ -84,6 +84,7 @@
 	wait_queue_head_t read_wq;
 	wait_queue_head_t write_wq;
 
+	enum transport_type		xport;
 	u8				port_num;
 	struct data_port		bam_port;
 	struct mbim_notify_port		not_port;
@@ -143,6 +144,9 @@
 #define NTB_OUT_SIZE		(0x1000)
 #define NDP_IN_DIVISOR		(0x4)
 
+#define NTB_DEFAULT_IN_SIZE_IPA	(0x2000)
+#define NTB_OUT_SIZE_IPA		(0x2000)
+
 #define FORMATS_SUPPORTED	USB_CDC_NCM_NTB16_SUPPORTED
 
 static struct usb_cdc_ncm_ntb_parameters ntb_parameters = {
@@ -659,26 +663,49 @@
 	return 0;
 }
 
+int mbim_configure_params(void)
+{
+	struct teth_aggr_params aggr_params;
+	int ret = 0;
+
+	aggr_params.dl.aggr_prot = TETH_AGGR_PROTOCOL_MBIM;
+	aggr_params.dl.max_datagrams = ntb_parameters.wNtbOutMaxDatagrams;
+	aggr_params.dl.max_transfer_size_byte = ntb_parameters.dwNtbInMaxSize;
+
+	aggr_params.ul.aggr_prot = TETH_AGGR_PROTOCOL_MBIM;
+	aggr_params.ul.max_datagrams = ntb_parameters.wNtbOutMaxDatagrams;
+	aggr_params.ul.max_transfer_size_byte = ntb_parameters.dwNtbOutMaxSize;
+
+	ret = teth_bridge_set_aggr_params(&aggr_params);
+	if (ret)
+		pr_err("%s: teth_bridge_set_aggr_params failed\n", __func__);
+
+	return ret;
+}
+
 static int mbim_bam_connect(struct f_mbim *dev)
 {
 	int ret;
 	u8 src_connection_idx, dst_connection_idx;
 	struct usb_gadget *gadget = dev->cdev->gadget;
+	enum peer_bam bam_name = (dev->xport == USB_GADGET_XPORT_BAM2BAM_IPA) ?
+							IPA_P_BAM : A2_P_BAM;
 
 	pr_info("dev:%p portno:%d\n", dev, dev->port_num);
 
-	src_connection_idx = usb_bam_get_connection_idx(gadget->name, A2_P_BAM,
-		USB_TO_PEER_PERIPHERAL, dev->port_num);
-	dst_connection_idx = usb_bam_get_connection_idx(gadget->name, A2_P_BAM,
-		PEER_PERIPHERAL_TO_USB, dev->port_num);
+	src_connection_idx = usb_bam_get_connection_idx(gadget->name, bam_name,
+					USB_TO_PEER_PERIPHERAL, dev->port_num);
+	dst_connection_idx = usb_bam_get_connection_idx(gadget->name, bam_name,
+					PEER_PERIPHERAL_TO_USB, dev->port_num);
 	if (src_connection_idx < 0 || dst_connection_idx < 0) {
 		pr_err("%s: usb_bam_get_connection_idx failed\n", __func__);
 		return ret;
 	}
 
 	ret = bam_data_connect(&dev->bam_port, dev->port_num,
-		USB_GADGET_XPORT_BAM2BAM, src_connection_idx,
-		dst_connection_idx, USB_FUNC_MBIM);
+		dev->xport, src_connection_idx, dst_connection_idx,
+		USB_FUNC_MBIM);
+
 	if (ret) {
 		pr_err("bam_data_setup failed: err:%d\n",
 				ret);
@@ -1603,7 +1630,8 @@
  * Context: single threaded during gadget setup
  * Returns zero on success, else negative errno.
  */
-int mbim_bind_config(struct usb_configuration *c, unsigned portno)
+int mbim_bind_config(struct usb_configuration *c, unsigned portno,
+					 char *xport_name)
 {
 	struct f_mbim	*mbim = NULL;
 	int status = 0;
@@ -1662,6 +1690,19 @@
 	mbim->function.disable = mbim_disable;
 	mbim->function.suspend = mbim_suspend;
 	mbim->function.resume = mbim_resume;
+	mbim->xport = str_to_xport(xport_name);
+
+	if (mbim->xport != USB_GADGET_XPORT_BAM2BAM_IPA) {
+		/* Use BAM2BAM by default if not IPA */
+		mbim->xport = USB_GADGET_XPORT_BAM2BAM;
+	} else  {
+		/* For IPA we use limit of 16 */
+		ntb_parameters.wNtbOutMaxDatagrams = 16;
+		/* For IPA this is proven to give maximum throughput */
+		ntb_parameters.dwNtbInMaxSize =
+		cpu_to_le32(NTB_DEFAULT_IN_SIZE_IPA);
+		ntb_parameters.dwNtbOutMaxSize = cpu_to_le32(NTB_OUT_SIZE_IPA);
+	}
 
 	INIT_LIST_HEAD(&mbim->cpkt_req_q);
 	INIT_LIST_HEAD(&mbim->cpkt_resp_q);
diff --git a/drivers/usb/gadget/f_qdss.c b/drivers/usb/gadget/f_qdss.c
index 6518095..cece500 100644
--- a/drivers/usb/gadget/f_qdss.c
+++ b/drivers/usb/gadget/f_qdss.c
@@ -410,7 +410,6 @@
 
 	clear_eps(f);
 	clear_desc(c->cdev->gadget, f);
-	msm_dwc3_restart_usb_session();
 }
 
 static void qdss_eps_disable(struct usb_function *f)
@@ -467,13 +466,13 @@
 	qdss->usb_connected = 0;
 	spin_unlock_irqrestore(&qdss->lock, flags);
 
+	/*cancell all active xfers*/
+	qdss_eps_disable(f);
+
 	status = uninit_data(qdss->data);
 	if (status)
 		pr_err("%s: uninit_data error\n", __func__);
 
-	/*cancell all active xfers*/
-	qdss_eps_disable(f);
-
 	schedule_work(&qdss->disconnect_w);
 }
 
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 801d24d..7ac5b64 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -687,6 +687,12 @@
 	rndis_reset_cmplt_type *resp;
 	rndis_resp_t *r;
 	struct rndis_params *params = rndis_per_dev_params + configNr;
+	u32 length;
+	u8 *xbuf;
+
+	/* drain the response queue */
+	while ((xbuf = rndis_get_next_response(configNr, &length)))
+		rndis_free_response(configNr, xbuf);
 
 	r = rndis_add_response(configNr, sizeof(rndis_reset_cmplt_type));
 	if (!r)
diff --git a/drivers/usb/gadget/u_bam_data.c b/drivers/usb/gadget/u_bam_data.c
index 83f885a..eec9e37 100644
--- a/drivers/usb/gadget/u_bam_data.c
+++ b/drivers/usb/gadget/u_bam_data.c
@@ -183,6 +183,8 @@
 	int ret;
 
 	if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+		if (d->func_type == USB_FUNC_MBIM)
+			teth_bridge_disconnect();
 		if (d->func_type == USB_FUNC_ECM)
 			ecm_ipa_disconnect(d->ipa_params.priv);
 		ret = usb_bam_disconnect_ipa(&d->ipa_params);
@@ -195,13 +197,28 @@
 {
 	struct bam_data_port *port = container_of(w, struct bam_data_port,
 						  connect_w);
+	struct teth_bridge_connect_params connect_params;
 	struct bam_data_ch_info *d = &port->data_ch;
+	ipa_notify_cb usb_notify_cb;
+	void *priv;
 	u32 sps_params;
 	int ret;
 
 	pr_debug("%s: Connect workqueue started", __func__);
 
 	if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+		if (d->func_type == USB_FUNC_MBIM) {
+			ret = teth_bridge_init(&usb_notify_cb, &priv);
+			if (ret) {
+				pr_err("%s:teth_bridge_init() failed\n",
+				      __func__);
+				return;
+			}
+			d->ipa_params.notify = usb_notify_cb;
+			d->ipa_params.priv = priv;
+			d->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
+		}
+
 		d->ipa_params.client = IPA_CLIENT_USB_CONS;
 		d->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
 		if (d->func_type == USB_FUNC_ECM) {
@@ -227,6 +244,23 @@
 				__func__, ret);
 			return;
 		}
+
+		if (d->func_type == USB_FUNC_MBIM) {
+			connect_params.ipa_usb_pipe_hdl =
+				d->ipa_params.prod_clnt_hdl;
+			connect_params.usb_ipa_pipe_hdl =
+				d->ipa_params.cons_clnt_hdl;
+			connect_params.tethering_mode =
+				TETH_TETHERING_MODE_MBIM;
+			ret = teth_bridge_connect(&connect_params);
+			if (ret) {
+				pr_err("%s:teth_bridge_connect() failed\n",
+				      __func__);
+				return;
+			}
+			mbim_configure_params();
+		}
+
 		if (d->func_type == USB_FUNC_ECM) {
 			ret = ecm_ipa_connect(d->ipa_params.cons_clnt_hdl,
 				d->ipa_params.prod_clnt_hdl,
@@ -417,6 +451,7 @@
 	d->dst_connection_idx = dst_connection_idx;
 
 	d->trans = trans;
+	d->func_type = func;
 
 	if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
 		d->ipa_params.src_pipe = &(d->src_pipe_idx);
@@ -425,8 +460,6 @@
 		d->ipa_params.dst_idx = dst_connection_idx;
 	}
 
-	d->func_type = func;
-
 	queue_work(bam_data_wq, &port->connect_w);
 
 	return 0;
diff --git a/drivers/usb/host/ehci-msm-hsic.c b/drivers/usb/host/ehci-msm-hsic.c
index ca4b01a..9879122 100644
--- a/drivers/usb/host/ehci-msm-hsic.c
+++ b/drivers/usb/host/ehci-msm-hsic.c
@@ -2106,7 +2106,9 @@
 	}
 
 	if (mehci->async_irq) {
-		disable_irq_wake(mehci->async_irq);
+		/* Async IRQ is used only in absence of dedicated wakeup irq */
+		if (!mehci->wakeup_irq)
+			disable_irq_wake(mehci->async_irq);
 		free_irq(mehci->async_irq, mehci);
 	}
 	/*
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
index 66363eb..521ace0 100644
--- a/drivers/usb/host/ehci-msm.c
+++ b/drivers/usb/host/ehci-msm.c
@@ -117,6 +117,8 @@
 		return  -ENOMEM;
 	}
 
+	hcd_to_bus(hcd)->skip_resume = true;
+
 	hcd->irq = platform_get_irq(pdev, 0);
 	if (hcd->irq < 0) {
 		dev_err(&pdev->dev, "Unable to get IRQ resource\n");
diff --git a/drivers/usb/host/ehci-msm2.c b/drivers/usb/host/ehci-msm2.c
index faa5625..6727996 100644
--- a/drivers/usb/host/ehci-msm2.c
+++ b/drivers/usb/host/ehci-msm2.c
@@ -1057,6 +1057,8 @@
 		return  -ENOMEM;
 	}
 
+	hcd_to_bus(hcd)->skip_resume = true;
+
 	hcd->irq = platform_get_irq(pdev, 0);
 	if (hcd->irq < 0) {
 		dev_err(&pdev->dev, "Unable to get IRQ resource\n");
diff --git a/drivers/usb/host/ehci-msm72k.c b/drivers/usb/host/ehci-msm72k.c
index 76cd977..bab330c 100644
--- a/drivers/usb/host/ehci-msm72k.c
+++ b/drivers/usb/host/ehci-msm72k.c
@@ -681,6 +681,8 @@
 	if (!hcd)
 		return  -ENOMEM;
 
+	hcd_to_bus(hcd)->skip_resume = true;
+
 	hcd->irq = platform_get_irq(pdev, 0);
 	if (hcd->irq < 0) {
 		usb_put_hcd(hcd);
diff --git a/drivers/usb/host/hbm.c b/drivers/usb/host/hbm.c
index d48a631..1a0c0aa 100644
--- a/drivers/usb/host/hbm.c
+++ b/drivers/usb/host/hbm.c
@@ -39,6 +39,7 @@
 #define PIPE_PRODUCER	1
 #define MAX_PIPE_NUM	16
 #define HBM_QH_MAP_PIPE	0xffffffc0
+#define QTD_CERR_MASK	0xfffff3ff
 
 struct hbm_msm {
 	u32 *base;
@@ -257,6 +258,7 @@
 {
 	struct ehci_hcd	*ehci = hcd_to_ehci(hcd);
 	struct list_head qtd_list;
+	struct ehci_qtd *qtd;
 
 	INIT_LIST_HEAD(&qtd_list);
 
@@ -272,5 +274,11 @@
 
 	if (!qh_urb_transaction(ehci, urb, &qtd_list, mem_flags))
 		return -ENOMEM;
+
+	/* set err counter in qTD token to zero */
+	qtd = list_entry(qtd_list.next, struct ehci_qtd, qtd_list);
+	if (qtd != NULL)
+		qtd->hw_token &= QTD_CERR_MASK;
+
 	return hbm_submit_async(ehci, urb, &qtd_list, mem_flags);
 }
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index 7760d28..5750e0d 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -3651,7 +3651,7 @@
 	cmd_buf.vmid_idx = MSM_OTG_VMID_IDX;
 	cmd_buf.mem_type = MSM_OTG_MEM_TYPE;
 
-	ret = scm_call(SCM_SVC_CP, MSM_OTG_CMD_ID, &cmd_buf,
+	ret = scm_call(SCM_SVC_MP, MSM_OTG_CMD_ID, &cmd_buf,
 				sizeof(cmd_buf), NULL, 0);
 
 	if (ret)
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index 77eb9c2..63a842d 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -1609,7 +1609,6 @@
 		outpdw(MDP_BASE + 0x0014, 0x0);	/* start DMA */
 	} else if (term == MDP_OVERLAY0_TERM) {
 		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-		mdp_lut_enable();
 		outpdw(MDP_BASE + 0x0004, 0);
 	} else if (term == MDP_OVERLAY1_TERM) {
 		mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index 8515782..e850321 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -71,7 +71,7 @@
 		}
 	}
 	if (flags & MDSS_MDP_PERF_UPDATE_BUS) {
-		bus_ab_quota = bus_ab_quota << MDSS_MDP_BUS_FACTOR_SHIFT;
+		bus_ab_quota = bus_ib_quota << MDSS_MDP_BUS_FACTOR_SHIFT;
 		bus_ib_quota = MDSS_MDP_BUS_FUDGE_FACTOR(bus_ib_quota);
 		bus_ib_quota <<= MDSS_MDP_BUS_FACTOR_SHIFT;
 
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 57bf9f2..7d0d862 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -1747,9 +1747,12 @@
 	mutex_lock(&mfd->sync_mutex);
 	if (mfd->is_committing) {
 		mutex_unlock(&mfd->sync_mutex);
-		ret = wait_for_completion_timeout(&mfd->commit_comp,
+		ret = wait_for_completion_interruptible_timeout(
+				&mfd->commit_comp,
 			msecs_to_jiffies(WAIT_FENCE_TIMEOUT));
 		if (ret <= 0)
+			ret = -ERESTARTSYS;
+		else if (!ret)
 			pr_err("%s wait for commit_comp timeout %d %d",
 				__func__, ret, mfd->is_committing);
 	} else {
@@ -3502,17 +3505,17 @@
 static int msmfb_handle_buf_sync_ioctl(struct msm_fb_data_type *mfd,
 						struct mdp_buf_sync *buf_sync)
 {
-	int i, fence_cnt = 0, ret;
+	int i, fence_cnt = 0, ret = 0;
 	int acq_fen_fd[MDP_MAX_FENCE_FD];
 	struct sync_fence *fence;
 
-	if ((buf_sync->acq_fen_fd_cnt == 0) ||
-		(buf_sync->acq_fen_fd_cnt > MDP_MAX_FENCE_FD) ||
+	if ((buf_sync->acq_fen_fd_cnt > MDP_MAX_FENCE_FD) ||
 		(mfd->timeline == NULL))
 		return -EINVAL;
 
-	ret = copy_from_user(acq_fen_fd, buf_sync->acq_fen_fd,
-			buf_sync->acq_fen_fd_cnt * sizeof(int));
+	if (buf_sync->acq_fen_fd_cnt)
+		ret = copy_from_user(acq_fen_fd, buf_sync->acq_fen_fd,
+				buf_sync->acq_fen_fd_cnt * sizeof(int));
 	if (ret) {
 		pr_err("%s:copy_from_user failed", __func__);
 		return ret;
@@ -3531,6 +3534,10 @@
 	fence_cnt = i;
 	if (ret)
 		goto buf_sync_err_1;
+	mfd->acq_fen_cnt = fence_cnt;
+	if (buf_sync->flags & MDP_BUF_SYNC_FLAG_WAIT)
+		msm_fb_wait_for_fence(mfd);
+
 	mfd->cur_rel_sync_pt = sw_sync_pt_create(mfd->timeline,
 			mfd->timeline_value + 2);
 	if (mfd->cur_rel_sync_pt == NULL) {
@@ -3557,7 +3564,6 @@
 		pr_err("%s:copy_to_user failed", __func__);
 		goto buf_sync_err_2;
 	}
-	mfd->acq_fen_cnt = buf_sync->acq_fen_fd_cnt;
 	mutex_unlock(&mfd->sync_mutex);
 	return ret;
 buf_sync_err_2:
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 281c72a..235248c 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -25,12 +25,12 @@
  * struct devfreq_dev_status - Data given from devfreq user device to
  *			     governors. Represents the performance
  *			     statistics.
- * @total_time		The total time represented by this instance of
+ * @total_time:		The total time represented by this instance of
  *			devfreq_dev_status
- * @busy_time		The time that the device was working among the
+ * @busy_time:		The time that the device was working among the
  *			total_time.
- * @current_frequency	The operating frequency.
- * @private_data	An entry not specified by the devfreq framework.
+ * @current_frequency:	The operating frequency.
+ * @private_data:	An entry not specified by the devfreq framework.
  *			A device and a specific governor may have their
  *			own protocol with private_data. However, because
  *			this is governor-specific, a governor using this
@@ -54,23 +54,27 @@
 
 /**
  * struct devfreq_dev_profile - Devfreq's user device profile
- * @initial_freq	The operating frequency when devfreq_add_device() is
+ * @initial_freq:	The operating frequency when devfreq_add_device() is
  *			called.
- * @polling_ms		The polling interval in ms. 0 disables polling.
- * @target		The device should set its operating frequency at
+ * @polling_ms:		The polling interval in ms. 0 disables polling.
+ * @target:		The device should set its operating frequency at
  *			freq or lowest-upper-than-freq value. If freq is
  *			higher than any operable frequency, set maximum.
  *			Before returning, target function should set
  *			freq at the current frequency.
  *			The "flags" parameter's possible values are
  *			explained above with "DEVFREQ_FLAG_*" macros.
- * @get_dev_status	The device should provide the current performance
+ * @get_dev_status:	The device should provide the current performance
  *			status to devfreq, which is used by governors.
- * @exit		An optional callback that is called when devfreq
+ * @get_cur_freq:	The device should provide the current frequency
+ *			at which it is operating.
+ * @exit:		An optional callback that is called when devfreq
  *			is removing the devfreq object due to error or
  *			from devfreq_remove_device() call. If the user
  *			has registered devfreq->nb at a notifier-head,
  *			this is the time to unregister it.
+ * @freq_table:	Optional list of frequencies to support statistics.
+ * @max_state:	The size of freq_table.
  */
 struct devfreq_dev_profile {
 	unsigned long initial_freq;
@@ -79,63 +83,63 @@
 	int (*target)(struct device *dev, unsigned long *freq, u32 flags);
 	int (*get_dev_status)(struct device *dev,
 			      struct devfreq_dev_status *stat);
+	int (*get_cur_freq)(struct device *dev, unsigned long *freq);
 	void (*exit)(struct device *dev);
+
+	unsigned int *freq_table;
+	unsigned int max_state;
 };
 
 /**
  * struct devfreq_governor - Devfreq policy governor
- * @name		Governor's name
- * @get_target_freq	Returns desired operating frequency for the device.
+ * @node:		list node - contains registered devfreq governors
+ * @name:		Governor's name
+ * @get_target_freq:	Returns desired operating frequency for the device.
  *			Basically, get_target_freq will run
  *			devfreq_dev_profile.get_dev_status() to get the
  *			status of the device (load = busy_time / total_time).
  *			If no_central_polling is set, this callback is called
  *			only with update_devfreq() notified by OPP.
- * @init		Called when the devfreq is being attached to a device
- * @exit		Called when the devfreq is being removed from a
- *			device. Governor should stop any internal routines
- *			before return because related data may be
- *			freed after exit().
- * @no_central_polling	Do not use devfreq's central polling mechanism.
- *			When this is set, devfreq will not call
- *			get_target_freq with devfreq_monitor(). However,
- *			devfreq will call get_target_freq with
- *			devfreq_update() notified by OPP framework.
+ * @event_handler:      Callback for devfreq core framework to notify events
+ *                      to governors. Events include per device governor
+ *                      init and exit, opp changes out of devfreq, suspend
+ *                      and resume of per device devfreq during device idle.
  *
  * Note that the callbacks are called with devfreq->lock locked by devfreq.
  */
 struct devfreq_governor {
+	struct list_head node;
+
 	const char name[DEVFREQ_NAME_LEN];
 	int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
-	int (*init)(struct devfreq *this);
-	void (*exit)(struct devfreq *this);
-	const bool no_central_polling;
+	int (*event_handler)(struct devfreq *devfreq,
+				unsigned int event, void *data);
 };
 
 /**
  * struct devfreq - Device devfreq structure
- * @node	list node - contains the devices with devfreq that have been
+ * @node:	list node - contains the devices with devfreq that have been
  *		registered.
- * @lock	a mutex to protect accessing devfreq.
- * @dev		device registered by devfreq class. dev.parent is the device
+ * @lock:	a mutex to protect accessing devfreq.
+ * @dev:	device registered by devfreq class. dev.parent is the device
  *		using devfreq.
- * @profile	device-specific devfreq profile
- * @governor	method how to choose frequency based on the usage.
- * @nb		notifier block used to notify devfreq object that it should
+ * @profile:	device-specific devfreq profile
+ * @governor:	method how to choose frequency based on the usage.
+ * @governor_name:	devfreq governor name for use with this devfreq
+ * @nb:		notifier block used to notify devfreq object that it should
  *		reevaluate operable frequencies. Devfreq users may use
  *		devfreq.nb to the corresponding register notifier call chain.
- * @polling_jiffies	interval in jiffies.
- * @previous_freq	previously configured frequency value.
- * @next_polling	the number of remaining jiffies to poll with
- *			"devfreq_monitor" executions to reevaluate
- *			frequency/voltage of the device. Set by
- *			profile's polling_ms interval.
- * @data	Private data of the governor. The devfreq framework does not
+ * @work:	delayed work for load monitoring.
+ * @previous_freq:	previously configured frequency value.
+ * @data:	Private data of the governor. The devfreq framework does not
  *		touch this.
- * @being_removed	a flag to mark that this object is being removed in
- *			order to prevent trying to remove the object multiple times.
- * @min_freq	Limit minimum frequency requested by user (0: none)
- * @max_freq	Limit maximum frequency requested by user (0: none)
+ * @min_freq:	Limit minimum frequency requested by user (0: none)
+ * @max_freq:	Limit maximum frequency requested by user (0: none)
+ * @stop_polling:	 devfreq polling status of a device.
+ * @total_trans:	Number of devfreq transitions
+ * @trans_table:	Statistics of devfreq transitions
+ * @time_in_state:	Statistics of devfreq states
+ * @last_stat_updated:	The last time stat updated
  *
  * This structure stores the devfreq information for a give device.
  *
@@ -152,26 +156,33 @@
 	struct device dev;
 	struct devfreq_dev_profile *profile;
 	const struct devfreq_governor *governor;
+	char governor_name[DEVFREQ_NAME_LEN];
 	struct notifier_block nb;
+	struct delayed_work work;
 
-	unsigned long polling_jiffies;
 	unsigned long previous_freq;
-	unsigned int next_polling;
 
 	void *data; /* private data for governors */
 
-	bool being_removed;
-
 	unsigned long min_freq;
 	unsigned long max_freq;
+	bool stop_polling;
+
+	/* information for device freqeuncy transition */
+	unsigned int total_trans;
+	unsigned int *trans_table;
+	unsigned long *time_in_state;
+	unsigned long last_stat_updated;
 };
 
 #if defined(CONFIG_PM_DEVFREQ)
 extern struct devfreq *devfreq_add_device(struct device *dev,
 				  struct devfreq_dev_profile *profile,
-				  const struct devfreq_governor *governor,
+				  const char *governor_name,
 				  void *data);
 extern int devfreq_remove_device(struct devfreq *devfreq);
+extern int devfreq_suspend_device(struct devfreq *devfreq);
+extern int devfreq_resume_device(struct devfreq *devfreq);
 
 /* Helper functions for devfreq user device driver with OPP. */
 extern struct opp *devfreq_recommended_opp(struct device *dev,
@@ -181,23 +192,13 @@
 extern int devfreq_unregister_opp_notifier(struct device *dev,
 					   struct devfreq *devfreq);
 
-#ifdef CONFIG_DEVFREQ_GOV_POWERSAVE
-extern const struct devfreq_governor devfreq_powersave;
-#endif
-#ifdef CONFIG_DEVFREQ_GOV_PERFORMANCE
-extern const struct devfreq_governor devfreq_performance;
-#endif
-#ifdef CONFIG_DEVFREQ_GOV_USERSPACE
-extern const struct devfreq_governor devfreq_userspace;
-#endif
 #ifdef CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND
-extern const struct devfreq_governor devfreq_simple_ondemand;
 /**
  * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq
  *	and devfreq_add_device
- * @ upthreshold	If the load is over this value, the frequency jumps.
+ * @upthreshold:	If the load is over this value, the frequency jumps.
  *			Specify 0 to use the default. Valid value = 0 to 100.
- * @ downdifferential	If the load is under upthreshold - downdifferential,
+ * @downdifferential:	If the load is under upthreshold - downdifferential,
  *			the governor may consider slowing the frequency down.
  *			Specify 0 to use the default. Valid value = 0 to 100.
  *			downdifferential < upthreshold must hold.
@@ -214,7 +215,7 @@
 #else /* !CONFIG_PM_DEVFREQ */
 static struct devfreq *devfreq_add_device(struct device *dev,
 					  struct devfreq_dev_profile *profile,
-					  struct devfreq_governor *governor,
+					  const char *governor_name,
 					  void *data)
 {
 	return NULL;
@@ -225,6 +226,16 @@
 	return 0;
 }
 
+static int devfreq_suspend_device(struct devfreq *devfreq)
+{
+	return 0;
+}
+
+static int devfreq_resume_device(struct devfreq *devfreq)
+{
+	return 0;
+}
+
 static struct opp *devfreq_recommended_opp(struct device *dev,
 					   unsigned long *freq, u32 flags)
 {
@@ -243,11 +254,6 @@
 	return -EINVAL;
 }
 
-#define devfreq_powersave	NULL
-#define devfreq_performance	NULL
-#define devfreq_userspace	NULL
-#define devfreq_simple_ondemand	NULL
-
 #endif /* CONFIG_PM_DEVFREQ */
 
 #endif /* __LINUX_DEVFREQ_H__ */
diff --git a/include/linux/dvb/dmx.h b/include/linux/dvb/dmx.h
index 2cea256..6e50578 100644
--- a/include/linux/dvb/dmx.h
+++ b/include/linux/dvb/dmx.h
@@ -189,28 +189,28 @@
 /* Events associated with each demux filter */
 enum dmx_event {
 	/* New PES packet is ready to be consumed */
-	DMX_EVENT_NEW_PES,
+	DMX_EVENT_NEW_PES = 0x00000001,
 
 	/* New section is ready to be consumed */
-	DMX_EVENT_NEW_SECTION,
+	DMX_EVENT_NEW_SECTION = 0x00000002,
 
 	/* New recording chunk is ready to be consumed */
-	DMX_EVENT_NEW_REC_CHUNK,
+	DMX_EVENT_NEW_REC_CHUNK = 0x00000004,
 
 	/* New PCR value is ready */
-	DMX_EVENT_NEW_PCR,
+	DMX_EVENT_NEW_PCR = 0x00000008,
 
 	/* Overflow */
-	DMX_EVENT_BUFFER_OVERFLOW,
+	DMX_EVENT_BUFFER_OVERFLOW = 0x00000010,
 
 	/* Section was dropped due to CRC error */
-	DMX_EVENT_SECTION_CRC_ERROR,
+	DMX_EVENT_SECTION_CRC_ERROR = 0x00000020,
 
 	/* End-of-stream, no more data from this filter */
-	DMX_EVENT_EOS,
+	DMX_EVENT_EOS = 0x00000040,
 
 	/* New Elementary Stream data is ready */
-	DMX_EVENT_NEW_ES_DATA
+	DMX_EVENT_NEW_ES_DATA = 0x00000080
 };
 
 /* Flags passed in filter events */
@@ -552,7 +552,6 @@
 	int handle;
 };
 
-
 struct dmx_decoder_buffers {
 	/*
 	 * Specify if linear buffer support is requested. If set, buffers_num
@@ -587,6 +586,35 @@
 	__u32 key_ladder_id;
 };
 
+struct dmx_events_mask {
+	/*
+	 * Bitmask of events to be disabled (dmx_event).
+	 * Disabled events will not be notified to the user.
+	 * By default all events are enabled except for
+	 * DMX_EVENT_NEW_ES_DATA.
+	 * Overflow event can't be disabled.
+	 */
+	__u32 disable_mask;
+
+	/*
+	 * Bitmask of events that will not wake-up the user
+	 * when user calls poll with POLLPRI flag.
+	 * Events that are used as wake-up source should not be
+	 * disabled in disable_mask or they would not be used
+	 * as a wake-up source.
+	 * By default all enabled events are set as wake-up events.
+	 * Overflow event can't be disabled as a wake-up source.
+	 */
+	__u32 no_wakeup_mask;
+
+	/*
+	 * Number of ready wake-up events which will trigger
+	 * a wake-up when user calls poll with POLLPRI flag.
+	 * Default is set to 1.
+	 */
+	__u32 wakeup_threshold;
+};
+
 #define DMX_START                _IO('o', 41)
 #define DMX_STOP                 _IO('o', 42)
 #define DMX_SET_FILTER           _IOW('o', 43, struct dmx_sct_filter_params)
@@ -611,6 +639,8 @@
 #define DMX_SET_DECODER_BUFFER	 _IOW('o', 63, struct dmx_decoder_buffers)
 #define DMX_REUSE_DECODER_BUFFER _IO('o', 64)
 #define DMX_SET_SECURE_MODE	_IOW('o', 65, struct dmx_secure_mode)
+#define DMX_SET_EVENTS_MASK	_IOW('o', 66, struct dmx_events_mask)
+#define DMX_GET_EVENTS_MASK	_IOR('o', 67, struct dmx_events_mask)
 
 
 #endif /*_DVBDMX_H_*/
diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h
index 4e62b4f..b7d393f 100644
--- a/include/linux/msm_kgsl.h
+++ b/include/linux/msm_kgsl.h
@@ -20,6 +20,8 @@
 #define KGSL_CONTEXT_TRASH_STATE	0x00000020
 #define KGSL_CONTEXT_PER_CONTEXT_TS	0x00000040
 #define KGSL_CONTEXT_USER_GENERATED_TS	0x00000080
+#define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
+
 
 #define KGSL_CONTEXT_INVALID 0xffffffff
 
diff --git a/include/linux/msm_vidc_dec.h b/include/linux/msm_vidc_dec.h
index cc864f0..35279bf 100644
--- a/include/linux/msm_vidc_dec.h
+++ b/include/linux/msm_vidc_dec.h
@@ -282,7 +282,8 @@
 	VDEC_CODECTYPE_MPEG1 = 0x9,
 	VDEC_CODECTYPE_MPEG2 = 0xa,
 	VDEC_CODECTYPE_VC1 = 0xb,
-	VDEC_CODECTYPE_VC1_RCV = 0xc
+	VDEC_CODECTYPE_VC1_RCV = 0xc,
+	VDEC_CODECTYPE_HEVC = 0xd,
 };
 
 enum vdec_mpeg2_profile {
diff --git a/include/linux/opp.h b/include/linux/opp.h
index 2a4e5fa..214e0eb 100644
--- a/include/linux/opp.h
+++ b/include/linux/opp.h
@@ -48,6 +48,14 @@
 
 struct srcu_notifier_head *opp_get_notifier(struct device *dev);
 
+#ifdef CONFIG_OF
+int of_init_opp_table(struct device *dev);
+#else
+static inline int of_init_opp_table(struct device *dev)
+{
+	return -EINVAL;
+}
+#endif /* CONFIG_OF */
 #else
 static inline unsigned long opp_get_voltage(struct opp *opp)
 {
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index a1d0445..74b09cb 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -402,6 +402,7 @@
 #define V4L2_PIX_FMT_DIVX_311  v4l2_fourcc('D', 'I', 'V', '3') /* DIVX311     */
 #define V4L2_PIX_FMT_DIVX      v4l2_fourcc('D', 'I', 'V', 'X') /* DIVX        */
 #define V4L2_PIX_FMT_VP8 v4l2_fourcc('V', 'P', '8', '0') /* ON2 VP8 stream */
+#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') /* for HEVC stream */
 
 /*  Vendor-specific formats   */
 #define V4L2_PIX_FMT_CPIA1    v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
@@ -1851,6 +1852,8 @@
 };
 #define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_GOB		\
 		(V4L2_CID_MPEG_MSM_VIDC_BASE+27)
+#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_DELIVERY_MODE	\
+	(V4L2_CID_MPEG_MSM_VIDC_BASE+28)
 /*  Camera class control IDs */
 #define V4L2_CID_CAMERA_CLASS_BASE 	(V4L2_CTRL_CLASS_CAMERA | 0x900)
 #define V4L2_CID_CAMERA_CLASS 		(V4L2_CTRL_CLASS_CAMERA | 1)
diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h
index c53d604..f632ad6 100644
--- a/include/media/msm_vidc.h
+++ b/include/media/msm_vidc.h
@@ -8,6 +8,7 @@
 
 enum core_id {
 	MSM_VIDC_CORE_0 = 0,
+	MSM_VIDC_CORE_1,      /* for Q6 core */
 	MSM_VIDC_CORES_MAX,
 };
 
@@ -45,6 +46,7 @@
 int msm_vidc_dqevent(void *instance, struct v4l2_event *event);
 int msm_vidc_wait(void *instance);
 int msm_vidc_s_parm(void *instance, struct v4l2_streamparm *a);
+int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize);
 #endif
 struct msm_vidc_interlace_payload {
 	unsigned int format;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index ae34bf5..d1e73a4 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -61,6 +61,7 @@
 DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
 {
 
+	.lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
 	.clock_base =
 	{
 		{
@@ -1619,8 +1620,6 @@
 	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
 	int i;
 
-	raw_spin_lock_init(&cpu_base->lock);
-
 	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
 		cpu_base->clock_base[i].cpu_base = cpu_base;
 		timerqueue_init_head(&cpu_base->clock_base[i].active);
diff --git a/sound/soc/msm/apq8064.c b/sound/soc/msm/apq8064.c
index 6943ba0..2b4e21f 100644
--- a/sound/soc/msm/apq8064.c
+++ b/sound/soc/msm/apq8064.c
@@ -41,8 +41,9 @@
 #define MSM_SLIM_0_RX_MAX_CHANNELS		2
 #define MSM_SLIM_0_TX_MAX_CHANNELS		4
 
-#define BTSCO_RATE_8KHZ 8000
-#define BTSCO_RATE_16KHZ 16000
+#define SAMPLE_RATE_8KHZ 8000
+#define SAMPLE_RATE_16KHZ 16000
+#define SAMPLE_RATE_48KHZ 48000
 
 #define BOTTOM_SPK_AMP_POS	0x1
 #define BOTTOM_SPK_AMP_NEG	0x2
@@ -67,6 +68,7 @@
 enum {
 	SLIM_1_RX_1 = 145, /* BT-SCO and USB TX */
 	SLIM_1_TX_1 = 146, /* BT-SCO and USB RX */
+	SLIM_1_TX_2 = 147, /* USB RX */
 	SLIM_3_RX_1 = 151, /* External echo-cancellation ref */
 	SLIM_3_RX_2 = 152, /* External echo-cancellation ref */
 	SLIM_3_TX_1 = 153, /* HDMI RX */
@@ -90,8 +92,11 @@
 static int msm_slim_0_tx_ch = 1;
 static int msm_slim_3_rx_ch = 1;
 
-static int msm_btsco_rate = BTSCO_RATE_8KHZ;
+static int msm_slim_1_rate = SAMPLE_RATE_8KHZ;
 static int msm_btsco_ch = 1;
+static int msm_slim_1_rx_ch = 1;
+static int msm_slim_1_tx_ch = 1;
+
 static int hdmi_rate_variable;
 static int rec_mode = INCALL_REC_MONO;
 
@@ -651,11 +656,14 @@
 	SOC_ENUM_SINGLE_EXT(2, hdmi_rate),
 };
 
-static const char *btsco_rate_text[] = {"8000", "16000"};
-static const struct soc_enum msm_btsco_enum[] = {
-		SOC_ENUM_SINGLE_EXT(2, btsco_rate_text),
+static const char * const slim1_rate_text[] = {"8000", "16000", "48000"};
+static const struct soc_enum msm_slim_1_rate_enum[] = {
+	SOC_ENUM_SINGLE_EXT(3, slim1_rate_text),
 };
-
+static const char * const slim1_tx_ch_text[] = {"One", "Two"};
+static const struct soc_enum msm_slim_1_tx_ch_enum[] = {
+	SOC_ENUM_SINGLE_EXT(2, slim1_tx_ch_text),
+};
 static int msm_slim_0_rx_ch_get(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol)
 {
@@ -671,7 +679,7 @@
 	msm_slim_0_rx_ch = ucontrol->value.integer.value[0] + 1;
 
 	pr_debug("%s: msm_slim_0_rx_ch = %d\n", __func__,
-			msm_slim_0_rx_ch);
+		 msm_slim_0_rx_ch);
 	return 1;
 }
 
@@ -694,6 +702,27 @@
 	return 1;
 }
 
+static int msm_slim_1_tx_ch_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s: msm_slim_1_tx_ch  = %d\n", __func__,
+		 msm_slim_1_tx_ch);
+
+	ucontrol->value.integer.value[0] = msm_slim_1_tx_ch - 1;
+	return 0;
+}
+
+static int msm_slim_1_tx_ch_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	msm_slim_1_tx_ch = ucontrol->value.integer.value[0] + 1;
+
+	pr_debug("%s: msm_slim_1_tx_ch = %d\n", __func__,
+		 msm_slim_1_tx_ch);
+
+	return 1;
+}
+
 static int msm_slim_3_rx_ch_get(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol)
 {
@@ -713,31 +742,35 @@
 	return 1;
 }
 
-static int msm_btsco_rate_get(struct snd_kcontrol *kcontrol,
+static int msm_slim_1_rate_get(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_value *ucontrol)
 {
-	pr_debug("%s: msm_btsco_rate  = %d", __func__,
-					msm_btsco_rate);
-	ucontrol->value.integer.value[0] = msm_btsco_rate;
+	pr_debug("%s: msm_slim_1_rate  = %d", __func__,
+		 msm_slim_1_rate);
+
+	ucontrol->value.integer.value[0] = msm_slim_1_rate;
 	return 0;
 }
 
-static int msm_btsco_rate_put(struct snd_kcontrol *kcontrol,
+static int msm_slim_1_rate_put(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_value *ucontrol)
 {
 	switch (ucontrol->value.integer.value[0]) {
 	case 8000:
-		msm_btsco_rate = BTSCO_RATE_8KHZ;
+		msm_slim_1_rate = SAMPLE_RATE_8KHZ;
 		break;
 	case 16000:
-		msm_btsco_rate = BTSCO_RATE_16KHZ;
+		msm_slim_1_rate = SAMPLE_RATE_16KHZ;
+		break;
+	case 48000:
+		msm_slim_1_rate = SAMPLE_RATE_48KHZ;
 		break;
 	default:
-		msm_btsco_rate = BTSCO_RATE_8KHZ;
+		msm_slim_1_rate = SAMPLE_RATE_8KHZ;
 		break;
 	}
-	pr_debug("%s: msm_btsco_rate = %d\n", __func__,
-					msm_btsco_rate);
+	pr_debug("%s: msm_slim_1_rate = %d\n", __func__,
+		 msm_slim_1_rate);
 	return 0;
 }
 
@@ -780,8 +813,10 @@
 		msm_slim_0_rx_ch_get, msm_slim_0_rx_ch_put),
 	SOC_ENUM_EXT("SLIM_0_TX Channels", msm_enum[2],
 		msm_slim_0_tx_ch_get, msm_slim_0_tx_ch_put),
-	SOC_ENUM_EXT("Internal BTSCO SampleRate", msm_btsco_enum[0],
-		msm_btsco_rate_get, msm_btsco_rate_put),
+	SOC_ENUM_EXT("SLIM_1_TX Channels", msm_slim_1_tx_ch_enum[0],
+		      msm_slim_1_tx_ch_get, msm_slim_1_tx_ch_put),
+	SOC_ENUM_EXT("SLIM_1 SampleRate", msm_slim_1_rate_enum[0],
+		      msm_slim_1_rate_get, msm_slim_1_rate_put),
 	SOC_SINGLE_EXT("Incall Rec Mode", SND_SOC_NOPM, 0, 1, 0,
 			msm_incall_rec_mode_get, msm_incall_rec_mode_put),
 	SOC_ENUM_EXT("SLIM_3_RX Channels", msm_enum[1],
@@ -1001,7 +1036,7 @@
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	int ret = 0;
-	unsigned int rx_ch = SLIM_1_RX_1, tx_ch = SLIM_1_TX_1;
+	unsigned int rx_ch = SLIM_1_RX_1, tx_ch[2] = {SLIM_1_TX_1, SLIM_1_TX_2};
 
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
 		pr_debug("%s: APQ BT/USB TX -> SLIMBUS_1_RX -> MDM TX shared ch %d\n",
@@ -1015,10 +1050,11 @@
 			goto end;
 		}
 	} else {
-		pr_debug("%s: MDM RX -> SLIMBUS_1_TX -> APQ BT/USB Rx shared ch %d\n",
-			__func__, tx_ch);
+		pr_debug("%s: MDM RX ->SLIMBUS_1_TX ->APQ BT/USB Rx shared ch %d %d\n",
+			  __func__, tx_ch[0], tx_ch[1]);
 
-		ret = snd_soc_dai_set_channel_map(cpu_dai, 1, &tx_ch, 0, 0);
+		ret = snd_soc_dai_set_channel_map(cpu_dai, msm_slim_1_tx_ch,
+						  tx_ch, 0, 0);
 		if (ret < 0) {
 			pr_err("%s: Erorr %d setting SLIM_1 TX channel map\n",
 				__func__, ret);
@@ -1358,16 +1394,46 @@
 					struct snd_pcm_hw_params *params)
 {
 	struct snd_interval *rate = hw_param_interval(params,
-					SNDRV_PCM_HW_PARAM_RATE);
+						      SNDRV_PCM_HW_PARAM_RATE);
 
 	struct snd_interval *channels = hw_param_interval(params,
-					SNDRV_PCM_HW_PARAM_CHANNELS);
+						SNDRV_PCM_HW_PARAM_CHANNELS);
 
-	rate->min = rate->max = msm_btsco_rate;
+	rate->min = rate->max = msm_slim_1_rate;
 	channels->min = channels->max = msm_btsco_ch;
 
 	return 0;
 }
+static int msm_slim_1_rx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+					    struct snd_pcm_hw_params *params)
+{
+	struct snd_interval *rate = hw_param_interval(params,
+						      SNDRV_PCM_HW_PARAM_RATE);
+
+	struct snd_interval *channels = hw_param_interval(params,
+						SNDRV_PCM_HW_PARAM_CHANNELS);
+
+	rate->min = rate->max = msm_slim_1_rate;
+	channels->min = channels->max = msm_slim_1_rx_ch;
+
+	return 0;
+}
+
+static int msm_slim_1_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+					    struct snd_pcm_hw_params *params)
+{
+	struct snd_interval *rate = hw_param_interval(params,
+						      SNDRV_PCM_HW_PARAM_RATE);
+
+	struct snd_interval *channels = hw_param_interval(params,
+						SNDRV_PCM_HW_PARAM_CHANNELS);
+
+	rate->min = rate->max = msm_slim_1_rate;
+	channels->min = channels->max = msm_slim_1_tx_ch;
+
+	return 0;
+}
+
 static int msm_auxpcm_be_params_fixup(struct snd_soc_pcm_runtime *rtd,
 					struct snd_pcm_hw_params *params)
 {
@@ -1969,7 +2035,7 @@
 		.codec_dai_name = "msm-stub-rx",
 		.no_pcm = 1,
 		.be_id = MSM_BACKEND_DAI_SLIMBUS_1_RX,
-		.be_hw_params_fixup = msm_btsco_be_hw_params_fixup,
+		.be_hw_params_fixup = msm_slim_1_rx_be_hw_params_fixup,
 		.ops = &msm_slimbus_1_be_ops,
 		.ignore_pmdown_time = 1, /* this dainlink has playback support */
 
@@ -1983,7 +2049,7 @@
 		.codec_dai_name = "msm-stub-tx",
 		.no_pcm = 1,
 		.be_id = MSM_BACKEND_DAI_SLIMBUS_1_TX,
-		.be_hw_params_fixup =  msm_btsco_be_hw_params_fixup,
+		.be_hw_params_fixup =  msm_slim_1_tx_be_hw_params_fixup,
 		.ops = &msm_slimbus_1_be_ops,
 	},
 	/* Ultrasound TX Back End DAI Link */
diff --git a/sound/soc/msm/msm8226.c b/sound/soc/msm/msm8226.c
index 328e287..4dc6505 100644
--- a/sound/soc/msm/msm8226.c
+++ b/sound/soc/msm/msm8226.c
@@ -86,6 +86,7 @@
 static struct mutex cdc_mclk_mutex;
 static struct clk *codec_clk;
 static int clk_users;
+static int vdd_spkr_gpio = -1;
 
 static int msm_snd_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable,
 					bool dapm)
@@ -149,6 +150,30 @@
 	return 0;
 }
 
+static int msm8226_vdd_spkr_event(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *kcontrol, int event)
+{
+	pr_debug("%s: event = %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (vdd_spkr_gpio >= 0) {
+			gpio_direction_output(vdd_spkr_gpio, 1);
+			pr_debug("%s: Enabled 5V external supply for speaker\n",
+					__func__);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		if (vdd_spkr_gpio >= 0) {
+			gpio_direction_output(vdd_spkr_gpio, 0);
+			pr_debug("%s: Disabled 5V external supply for speaker\n",
+					__func__);
+		}
+		break;
+	}
+	return 0;
+}
+
 static const struct snd_soc_dapm_widget msm8226_dapm_widgets[] = {
 
 	SND_SOC_DAPM_SUPPLY("MCLK",  SND_SOC_NOPM, 0, 0,
@@ -165,6 +190,9 @@
 	SND_SOC_DAPM_MIC("Digital Mic4", NULL),
 	SND_SOC_DAPM_MIC("Digital Mic5", NULL),
 	SND_SOC_DAPM_MIC("Digital Mic6", NULL),
+
+	SND_SOC_DAPM_SUPPLY("EXT_VDD_SPKR",  SND_SOC_NOPM, 0, 0,
+	msm8226_vdd_spkr_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 };
 
 static const char *const slim0_rx_ch_text[] = {"One", "Two"};
@@ -1104,19 +1132,44 @@
 		goto err;
 	}
 
+	vdd_spkr_gpio = of_get_named_gpio(pdev->dev.of_node,
+				"qcom,cdc-vdd-spkr-gpios", 0);
+	if (vdd_spkr_gpio < 0) {
+		dev_err(&pdev->dev,
+			"Looking up %s property in node %s failed %d\n",
+			"qcom, cdc-vdd-spkr-gpios",
+			pdev->dev.of_node->full_name, vdd_spkr_gpio);
+	} else {
+		ret = gpio_request(vdd_spkr_gpio, "TAPAN_CODEC_VDD_SPKR");
+		if (ret) {
+			/* GPIO to enable EXT VDD exists, but failed request */
+			dev_err(card->dev,
+					"%s: Failed to request tapan vdd spkr gpio %d\n",
+					__func__, vdd_spkr_gpio);
+			goto err;
+		}
+	}
+
 	ret = msm8226_prepare_codec_mclk(card);
 	if (ret)
-		goto err;
+		goto err_vdd_spkr;
 
 	ret = snd_soc_register_card(card);
 	if (ret) {
 		dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
 			ret);
-		goto err;
+		goto err_vdd_spkr;
 	}
 	mutex_init(&cdc_mclk_mutex);
 
 	return 0;
+
+err_vdd_spkr:
+	if (vdd_spkr_gpio >= 0) {
+		gpio_free(vdd_spkr_gpio);
+		vdd_spkr_gpio = -1;
+	}
+
 err:
 	if (pdata->mclk_gpio > 0) {
 		dev_dbg(&pdev->dev, "%s free gpio %d\n",
@@ -1134,6 +1187,8 @@
 	struct msm8226_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
 
 	gpio_free(pdata->mclk_gpio);
+	gpio_free(vdd_spkr_gpio);
+	vdd_spkr_gpio = -1;
 	snd_soc_unregister_card(card);
 
 	return 0;
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 916be0b..5cf1b90 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -485,7 +485,7 @@
 
 	switch (params_format(params)) {
 	case SNDRV_PCM_FORMAT_S16_LE:
-	case SNDRV_PCM_FMTBIT_SPECIAL:
+	case SNDRV_PCM_FORMAT_SPECIAL:
 		dai_data->port_config.i2s.bit_width = 16;
 		break;
 	case SNDRV_PCM_FORMAT_S24_LE:
@@ -561,7 +561,7 @@
 
 	switch (params_format(params)) {
 	case SNDRV_PCM_FORMAT_S16_LE:
-	case SNDRV_PCM_FMTBIT_SPECIAL:
+	case SNDRV_PCM_FORMAT_SPECIAL:
 		dai_data->port_config.slim_sch.bit_width = 16;
 		break;
 	case SNDRV_PCM_FORMAT_S24_LE: