Merge "msm: mdss: add error check for iommu attach API"
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_thermal.txt b/Documentation/devicetree/bindings/arm/msm/msm_thermal.txt
index 15b94ca..bbc9f08 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_thermal.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_thermal.txt
@@ -88,6 +88,18 @@
 - <consumer_supply_name>-supply = <&phandle_of_regulator>: consumer_supply_name
 			is the name that's defined in thermal driver.
 			phandle_of_regulator is defined by reuglator device tree.
+- qcom,default-temp:    Default cpu temperature limit for SoC. It is an optional
+			property. Not defining this property requires a full truth
+			table for qcom,efuse temperature map and valid efuse info for
+			qcom,efuse-data otherwise feature will be disabled.
+- qcom,efuse-data:      Efuse data for getting device parts info for cpu temperature
+			limit recommendation for SoC. It expects below data in order to
+			read target parts, efuse address, efuse size, row number to be
+			read, starting bit number of the row for identifying device parts
+			and number of bits to read from start bit as bit mask.
+- qcom,efuse-temperature-map: Truth table of efuse value temperature value pair for
+			different parts. if qcom,default temp is defined, then it can
+			specify only pairs which deviate from default temperature.
 
 Optional child nodes
 - qti,pmic-opt-curr-temp: Threshold temperature for requesting optimum current (request
@@ -147,6 +159,9 @@
 		qti,pmic-opt-curr-temp-hysteresis = <10>;
 		qti,pmic-opt-curr-regs = "vdd-dig";
 		vdd-dig-supply=<&pm8841_s2_floor_corner>
+		qcom,default-temp = <80>;
+		qcom,efuse-data = <0xfc4b8000 0x1000 23 30 0x3>;
+		qcom,efuse-temperature-map = <0x0 80>, <0x1 70>, <0x2 80>, <0x3 80>;
 
 		qcom,vdd-dig-rstr{
 			qcom,vdd-rstr-reg = "vdd-dig";
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
index f6b4923..3144045 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -172,6 +172,8 @@
 					horizontal back porch (HBP) blanking period.
 - qcom,mdss-dsi-hsa-power-mode:		Boolean to determine DSI lane state during
 					horizontal sync active (HSA) mode.
+- qcom,mdss-dsi-last-line-interleave	Boolean to determine if last line
+					interleave flag needs to be enabled.
 - qcom,mdss-dsi-bllp-eof-power-mode:	Boolean to determine DSI lane state during
 					blanking low power period (BLLP) EOF mode.
 - qcom,mdss-dsi-bllp-power-mode:	Boolean to determine DSI lane state during
@@ -347,6 +349,7 @@
 		qcom,mdss-dsi-hsa-power-mode;
 		qcom,mdss-dsi-bllp-eof-power-mode;
 		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-last-line-interleave;
 		qcom,mdss-dsi-traffic-mode = <0>;
 		qcom,mdss-dsi-virtual-channel-id = <0>;
 		qcom,mdss-dsi-color-order = <0>;
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp.txt b/Documentation/devicetree/bindings/leds/leds-qpnp.txt
index 749c594..1aca300 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp.txt
@@ -52,13 +52,15 @@
 RGB Led is a tri-colored led, Red, Blue & Green.
 
 Required properties for RGB led:
-- qcom,mode: mode the led should operate in, options "pwm" and "lpg"
-- qcom,pwm-channel: pwm channel the led will operate on
+- qcom,mode: mode the led should operate in, options "pwm" and "lpg". "manual" mode is not supported for RGB led.
 
 Required properties for PWM mode only:
+- qcom,pwm-channel: pwm channel the led will operate on
 - qcom,pwm-us: time the pwm device will modulate at (us)
 
 Required properties for LPG mode only:
+- qcom,pwm-channel: pwm channel the led will operate on
+- qcom,pwm-us: time the pwm device will modulate at (us)
 - qcom,duty-pcts: array of values for duty cycle to go through
 - qcom,start-idx: starting point duty-pcts array
 
@@ -93,6 +95,7 @@
 
 Required properties for LPG mode only:
 - qcom,pwm-channel: pwm channel the led will operate on
+- qcom,pwm-us: time the pwm device will modulate at (us)
 - qcom,duty-pcts: array of values for duty cycle to go through
 - qcom,start-idx: starting point duty-pcts array
 
@@ -106,9 +109,7 @@
 and the required rows are enabled by specifying values in the properties.
 
 Required properties for keypad backlight:
-- qcom,mode: mode the led should operate in, options "pwm" and "lpg"
-- qcom,pwm-channel: pwm channel the led will operate on
-- qcom,pwm-us: time the pwm device will modulate at (us)
+- qcom,mode: mode the led should operate in, options "pwm" and "lpg". "manual" mode is not supported for keypad backlight.
 - qcom,row-id: specify the id of the row. Supported values are 0 to 3.
 
 Optional properties for keypad backlight:
@@ -118,9 +119,12 @@
 - qcom,always-on: specify if the module has to be always on
 
 Required properties for PWM mode only:
+- qcom,pwm-channel: pwm channel the led will operate on
 - qcom,pwm-us: time the pwm device will modulate at (us)
 
 Required properties for LPG mode only:
+- qcom,pwm-channel: pwm channel the led will operate on
+- qcom,pwm-us: time the pwm device will modulate at (us)
 - qcom,duty-pcts: array of values for duty cycle to go through
 - qcom,start-idx: starting point duty-pcts array
 
@@ -201,6 +205,7 @@
 			linux,name = "led:rgb_green";
 			qcom,mode = "lpg";
 			qcom,pwm-channel = <5>;
+			qcom,pwm-us = <1000>;
 			qcom,duty-ms = <20>;
 			qcom,start-idx = <1>;
 			qcom,idx-len = <10>;
diff --git a/Documentation/devicetree/bindings/media/video/msm-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cci.txt
index f256d78..da54138 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cci.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cci.txt
@@ -171,6 +171,22 @@
     - 0 -> MASTER 0
     - 1 -> MASTER 1
 
+Optional properties:
+- qcom,cam-vreg-name : should contain names of all regulators needed by this
+    actuator
+    - "cam_vaf"
+- qcom,cam-vreg-type : should contain regulator type for regulators mentioned in
+    qcom,cam-vreg-name property (in the same order)
+    - 0 for LDO and 1 for LVS
+- qcom,cam-vreg-min-voltage : should contain minimum voltage level in mcrovolts
+   for regulators mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,cam-vreg-max-voltage : should contain maximum voltage level in mcrovolts
+   for regulators mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,cam-vreg-op-mode : should contain the maximum current in microamps
+   required from the regulators mentioned in the qcom,cam-vreg-name property
+   (in the same order).
+- cam_vaf-supply : should contain regulator from which AF voltage is supplied
+
 Example:
 
    qcom,cci@0xfda0c000 {
@@ -200,8 +216,14 @@
         actuator0: qcom,actuator@18 {
                 cell-index = <0>;
                 reg = <0x18>;
-                compatible = "qcom,actuator";
-                qcom,cci-master = <0>;
+		compatible = "qcom,actuator";
+		qcom,cci-master = <0>;
+		cam_vaf-supply = <&pm8941_l23>;
+		qcom,cam-vreg-name = "cam_vaf";
+		qcom,cam-vreg-type = <0>;
+		qcom,cam-vreg-min-voltage = <3000000>;
+		qcom,cam-vreg-max-voltage = <3000000>;
+		qcom,cam-vreg-op-mode = <100000>;
         };
 
        qcom,s5k3l1yx@6e {
diff --git a/Documentation/devicetree/bindings/memory.txt b/Documentation/devicetree/bindings/memory.txt
index 32f6a24..3ee245e 100644
--- a/Documentation/devicetree/bindings/memory.txt
+++ b/Documentation/devicetree/bindings/memory.txt
@@ -36,6 +36,7 @@
 	reg = <(baseaddr) (size)>;
 	(linux,contiguous-region);
 	(linux,default-contiguous-region);
+	(linux,reserve-region);
 	(linux,memory-limit);
         label = (unique_name);
 };
@@ -49,6 +50,9 @@
 linux,default-contiguous-region: property indicating that the region
 		is the default region for all contiguous memory
 		allocations, Linux specific (optional)
+linux,reserve-region: property indicating that the contiguous memory will
+		not be given back to the system allocator. The memory be
+		always be available for contiguous use.
 linux,memory-limit: property specifying an upper bound on the physical address
 		of the region if the region is placed dynamically. If no limit
 		is specificed, the region may be placed anywhere in the physical
diff --git a/Documentation/devicetree/bindings/sound/voice-svc.txt b/Documentation/devicetree/bindings/sound/voice-svc.txt
index deca7f5..bb8649f 100644
--- a/Documentation/devicetree/bindings/sound/voice-svc.txt
+++ b/Documentation/devicetree/bindings/sound/voice-svc.txt
@@ -1,4 +1,6 @@
 * Voice Service binding
+Provides an interface to access voice services
+exposed by DSP over APR interface.
 
 Required properties:
 - compatible : "qcom,msm-voice-svc"
diff --git a/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
index 5201827..8eb9b64 100644
--- a/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
+++ b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
@@ -27,6 +27,15 @@
 should be performed during boot up.
 - qcom,wlan-rx-buff-count: WLAN RX buffer count is a configurable value,
 using a smaller count for this buffer will reduce the memory usage.
+- qcom,wcnss-pm : <Core rail LDO#, PA rail LDO#, XO settling time,
+                   RPM power collapse enabled, standalone power collapse enabled>
+        Power manager related parameter for LDO configuration.
+        11     -  WCN CORE rail LDO number
+        21     -  WCN PA rail LDO number
+        1200   -  WCN XO settling time (usec)
+        1      -  WCN RPM power collapse enabled
+        1      -  WCN standalone power collapse enabled
+
 
 Example:
 
@@ -51,4 +60,5 @@
         qcom,has-48mhz-xo;
         qcom,has-pronto-hw;
         qcom,wcnss-adc_tm = <&pm8226_adc_tm>;
+	qcom,wcnss-pm = <11 21 1200 1 1>;
     };
diff --git a/arch/arm/boot/dts/msm8226-mdss.dtsi b/arch/arm/boot/dts/msm8226-mdss.dtsi
index 5776926..cedec5f 100644
--- a/arch/arm/boot/dts/msm8226-mdss.dtsi
+++ b/arch/arm/boot/dts/msm8226-mdss.dtsi
@@ -76,7 +76,13 @@
 				     <0x0D8 0x00000707>,
 				     <0x124 0x00000003>;
 		qcom,mdp-settings = <0x02E0 0x000000A5>,
-				    <0x02E4 0x00000055>;
+				    <0x02E4 0x00000055>,
+				    <0x03AC 0xC0000CCC>,
+				    <0x03B4 0xC0000000>,
+				    <0x03BC 0x00C00C00>,
+				    <0x04A8 0x0CCCC000>,
+				    <0x04B0 0xC0000000>,
+				    <0x04B8 0xC0000000>;
 
 		/* buffer parameters to calculate prefill bandwidth */
 		qcom,mdss-prefill-outstanding-buffer-bytes = <1024>;
diff --git a/arch/arm/boot/dts/msm8226-v2.dtsi b/arch/arm/boot/dts/msm8226-v2.dtsi
index 14fe237..787c9fb 100644
--- a/arch/arm/boot/dts/msm8226-v2.dtsi
+++ b/arch/arm/boot/dts/msm8226-v2.dtsi
@@ -80,11 +80,18 @@
 			<         0 0>,
 			< 384000000 2>,
 			< 787200000 4>,
+			< 998400000 5>,
+			<1094400000 6>,
 			<1190400000 7>;
 		qcom,speed2-bin-v2 =
 			<         0 0>,
 			< 384000000 2>,
 			< 787200000 4>,
+			< 998400000 5>,
+			<1094400000 6>,
+			<1190400000 7>,
+			<1305600000 8>,
+			<1344000000 9>,
 			<1401600000 10>;
 		qcom,speed1-bin-v2 =
 			<         0 0>,
diff --git a/arch/arm/boot/dts/msm8610.dtsi b/arch/arm/boot/dts/msm8610.dtsi
index 90dae06..d243b78 100644
--- a/arch/arm/boot/dts/msm8610.dtsi
+++ b/arch/arm/boot/dts/msm8610.dtsi
@@ -895,6 +895,9 @@
 		qcom,hotplug-temp-hysteresis = <20>;
 		qcom,cpu-sensors = "tsens_tz_sensor5", "tsens_tz_sensor5",
 				"tsens_tz_sensor5", "tsens_tz_sensor5";
+		qcom,default-temp = <80>;
+		qcom,efuse-data = <0xfc4b8000 0x1000 23 30 0x3>;
+		qcom,efuse-temperature-map = <0x1 70>;
 		qcom,vdd-restriction-temp = <5>;
 		qcom,vdd-restriction-temp-hysteresis = <10>;
 		vdd-dig-supply = <&pm8110_s1_floor_corner>;
diff --git a/arch/arm/boot/dts/msm8926.dtsi b/arch/arm/boot/dts/msm8926.dtsi
index 394f4a9..1919022 100644
--- a/arch/arm/boot/dts/msm8926.dtsi
+++ b/arch/arm/boot/dts/msm8926.dtsi
@@ -53,11 +53,18 @@
 			<         0 0>,
 			< 384000000 2>,
 			< 787200000 4>,
+			< 998400000 5>,
+			<1094400000 6>,
 			<1190400000 7>;
 		qcom,speed2-bin-v1 =
 			<         0 0>,
 			< 384000000 2>,
 			< 787200000 4>,
+			< 998400000 5>,
+			<1094400000 6>,
+			<1190400000 7>,
+			<1305600000 8>,
+			<1344000000 9>,
 			<1401600000 10>;
 		qcom,speed1-bin-v1 =
 			<         0 0>,
diff --git a/arch/arm/boot/dts/msm8974-mdss.dtsi b/arch/arm/boot/dts/msm8974-mdss.dtsi
index d83a235..3fef47a 100644
--- a/arch/arm/boot/dts/msm8974-mdss.dtsi
+++ b/arch/arm/boot/dts/msm8974-mdss.dtsi
@@ -35,6 +35,7 @@
 		qcom,mdss-ab-factor = <2 1>;		/* 2 times    */
 		qcom,mdss-ib-factor = <6 5>;		/* 1.2 times  */
 		qcom,mdss-clk-factor = <5 4>;		/* 1.25 times */
+		qcom,mdss-ib-factor-overlap = <7 4>;	/* 1.75 times  */
 
 		qcom,max-clk-rate = <320000000>;
 		qcom,mdss-pipe-vig-off = <0x00001200 0x00001600
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index e0f2ef2..4eb26a2 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -335,7 +335,7 @@
 		qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
 
 		qcom,msm-bus,name = "sdcc1";
-		qcom,msm-bus,num-cases = <8>;
+		qcom,msm-bus,num-cases = <9>;
 		qcom,msm-bus,num-paths = <1>;
 		qcom,msm-bus,vectors-KBps = <78 512 0 0>, /* No vote */
 				<78 512 1600 3200>,    /* 400 KB/s*/
@@ -344,8 +344,11 @@
 				<78 512 200000 400000>, /* 50 MB/s */
 				<78 512 400000 800000>, /* 100 MB/s */
 				<78 512 800000 1600000>, /* 200 MB/s */
+				<78 512 800000 1600000>, /* 400 MB/s */
 				<78 512 2048000 4096000>; /* Max. bandwidth */
-		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000 100000000 200000000 4294967295>;
+		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
+						100000000 200000000 400000000
+						4294967295>;
 		qcom,dat1-mpm-int = <42>;
 		status = "disable";
 	};
@@ -498,7 +501,7 @@
 		qcom,cpu-dma-latency-us = <200>;
 
 		qcom,msm-bus,name = "sdhc1";
-		qcom,msm-bus,num-cases = <8>;
+		qcom,msm-bus,num-cases = <9>;
 		qcom,msm-bus,num-paths = <1>;
 		qcom,msm-bus,vectors-KBps = <78 512 0 0>, /* No vote */
 				<78 512 1600 3200>,    /* 400 KB/s*/
@@ -507,8 +510,11 @@
 				<78 512 200000 400000>, /* 50 MB/s */
 				<78 512 400000 800000>, /* 100 MB/s */
 				<78 512 800000 1600000>, /* 200 MB/s */
+				<78 512 800000 1600000>, /* 400 MB/s */
 				<78 512 2048000 4096000>; /* Max. bandwidth */
-		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000 100000000 200000000 4294967295>;
+		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
+						100000000 200000000 400000000
+						4294967295>;
 		qcom,dat1-mpm-int = <42>;
 		status = "disable";
 	};
@@ -1732,6 +1738,10 @@
 		compatible = "qti,msm-pcm-loopback";
 	};
 
+	qcom,msm-voice-svc {
+		compatible = "qcom,msm-voice-svc";
+	};
+
 	qcom,msm-dai-q6 {
 		compatible = "qcom,msm-dai-q6";
 		qcom,msm-dai-q6-sb-0-rx {
diff --git a/arch/arm/boot/dts/msm8974pro-ab-pm8941-cdp.dts b/arch/arm/boot/dts/msm8974pro-ab-pm8941-cdp.dts
index 5a01945..b9f2574 100644
--- a/arch/arm/boot/dts/msm8974pro-ab-pm8941-cdp.dts
+++ b/arch/arm/boot/dts/msm8974pro-ab-pm8941-cdp.dts
@@ -25,3 +25,8 @@
 	qcom,pad-pull-on = <0x0 0x3 0x3 0x1>; /* no-pull, pull-up, pull-up, pull-down */
 	qcom,pad-pull-off = <0x0 0x3 0x3 0x1>; /* no-pull, pull-up, pull-up, pull-down */
 };
+
+&ehci {
+	hsusb_vdd_dig-supply = <&pm8841_s2_corner>;
+	qcom,vdd-voltage-level = <1 2 3 5 7>;
+};
diff --git a/arch/arm/boot/dts/msm8974pro-ab-pm8941-mtp.dts b/arch/arm/boot/dts/msm8974pro-ab-pm8941-mtp.dts
index f80551e..99c4ec7 100644
--- a/arch/arm/boot/dts/msm8974pro-ab-pm8941-mtp.dts
+++ b/arch/arm/boot/dts/msm8974pro-ab-pm8941-mtp.dts
@@ -26,3 +26,10 @@
 	qcom,pad-pull-off = <0x0 0x3 0x3 0x1>; /* no-pull, pull-up, pull-up, pull-down */
 	qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
 };
+
+&ehci {
+	status = "ok";
+	qcom,usb2-enable-uicc;
+	hsusb_vdd_dig-supply = <&pm8841_s2_corner>;
+	qcom,vdd-voltage-level = <1 2 3 5 7>;
+};
diff --git a/arch/arm/boot/dts/msm8974pro-ab-pm8941.dtsi b/arch/arm/boot/dts/msm8974pro-ab-pm8941.dtsi
index a44bc56..d0bf90a 100644
--- a/arch/arm/boot/dts/msm8974pro-ab-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm8974pro-ab-pm8941.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -22,3 +22,29 @@
 		      <217 0x10000>,
 		      <218 0x10000>;
 };
+
+&soc {
+	i2c@f9928000 { /* BLSP1 QUP6 */
+		nfc-nci@e {
+			compatible = "qcom,nfc-nci";
+			reg = <0x0e>;
+			qcom,irq-gpio = <&msmgpio 57 0x00>;
+			qcom,dis-gpio = <&msmgpio 13 0x00>;
+			qcom,clk-src = "BBCLK2";
+			interrupt-parent = <&msmgpio>;
+			interrupts = <57 0>;
+			qcom,clk-gpio = <&pm8941_gpios 32 0>;
+		};
+	};
+};
+
+&pm8941_gpios {
+	gpio@df00 {
+		/* NFC clk request */
+		qcom,mode = <0>;                /* QPNP_PIN_MODE_DIG_IN */
+		qcom,pull = <5>;                /* QPNP_PIN_PULL_NO */
+		qcom,vin-sel = <2>;             /* QPNP_PIN_VIN2 */
+		qcom,src-sel = <2>;             /* QPNP_PIN_SEL_FUNC_1 */
+		qcom,master-en = <1>;
+	};
+};
diff --git a/arch/arm/boot/dts/msm8974pro-ac-pm8941-mtp.dts b/arch/arm/boot/dts/msm8974pro-ac-pm8941-mtp.dts
index 76d0121..a25b8ca 100644
--- a/arch/arm/boot/dts/msm8974pro-ac-pm8941-mtp.dts
+++ b/arch/arm/boot/dts/msm8974pro-ac-pm8941-mtp.dts
@@ -25,3 +25,8 @@
 	qcom,pad-pull-on = <0x0 0x3 0x3 0x1>; /* no-pull, pull-up, pull-up, pull-down */
 	qcom,pad-pull-off = <0x0 0x3 0x3 0x1>; /* no-pull, pull-up, pull-up, pull-down */
 };
+
+&ehci {
+	status = "ok";
+	qcom,usb2-enable-uicc;
+};
diff --git a/arch/arm/boot/dts/msm8974pro-ac-pma8084-pm8941-mtp.dts b/arch/arm/boot/dts/msm8974pro-ac-pma8084-pm8941-mtp.dts
index 8a4ad45..0fe8813 100644
--- a/arch/arm/boot/dts/msm8974pro-ac-pma8084-pm8941-mtp.dts
+++ b/arch/arm/boot/dts/msm8974pro-ac-pma8084-pm8941-mtp.dts
@@ -120,3 +120,8 @@
 		qcom,thermal-node;
 	};
 };
+
+&ehci {
+	status = "ok";
+	qcom,usb2-enable-uicc;
+};
diff --git a/arch/arm/boot/dts/msm8974pro-pm8941.dtsi b/arch/arm/boot/dts/msm8974pro-pm8941.dtsi
index 89939e6..b502078 100644
--- a/arch/arm/boot/dts/msm8974pro-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm8974pro-pm8941.dtsi
@@ -71,6 +71,11 @@
 	qcom,utmi-clk-rate = <24000000>;
 };
 
+&ehci {
+	hsusb_vdd_dig-supply = <&pm8841_s2_corner>;
+	qcom,vdd-voltage-level = <1 2 3 5 7>;
+};
+
 &krait_regulator_pmic {
 	status = "ok";
 
diff --git a/arch/arm/boot/dts/msm8974pro-pma8084.dtsi b/arch/arm/boot/dts/msm8974pro-pma8084.dtsi
index ab4ffb5..28ddda2 100644
--- a/arch/arm/boot/dts/msm8974pro-pma8084.dtsi
+++ b/arch/arm/boot/dts/msm8974pro-pma8084.dtsi
@@ -118,6 +118,8 @@
 		HSUSB_VDDCX-supply = <&pma8084_s2>;
 		HSUSB_1p8-supply = <&pma8084_l6>;
 		HSUSB_3p3-supply = <&pma8084_l24>;
+		hsusb_vdd_dig-supply = <&pma8084_s2_corner>;
+		qcom,vdd-voltage-level = <1 2 3 5 7>;
 	};
 
 	qcom,gdsc@fd8c4024 {
diff --git a/arch/arm/boot/dts/msm8974pro.dtsi b/arch/arm/boot/dts/msm8974pro.dtsi
index d398f72..bb4c619 100644
--- a/arch/arm/boot/dts/msm8974pro.dtsi
+++ b/arch/arm/boot/dts/msm8974pro.dtsi
@@ -1593,12 +1593,12 @@
 		<26 512 0 3680000>, <89 604 0 6224000>,
 		/* Nominal / Nominal */
 		<26 512 0 4912000>, <89 604 0 6224000>,
-		/* low Turbo / Nominal */
-		<26 512 0 6400000>, <89 604 0 6224000>,
+		/* Turbo / Nominal */
+		<26 512 0 7464000>, <89 604 0 6224000>,
+		/* low Nominal / low Turbo */
+		<26 512 0 3680000>, <89 604 0 7398000>,
 		/* Nominal / low Turbo */
 		<26 512 0 4912000>, <89 604 0 7398000>,
-		/* low Turbo / low Turbo */
-		<26 512 0 6400000>, <89 604 0 7398000>,
 		/* Turbo / low Turbo */
 		<26 512 0 7464000>, <89 604 0 7398000>,
 		/* Nominal / Turbo */
diff --git a/arch/arm/kernel/perf_event_msm_krait.c b/arch/arm/kernel/perf_event_msm_krait.c
index 1fb5fd3..1c338f7 100644
--- a/arch/arm/kernel/perf_event_msm_krait.c
+++ b/arch/arm/kernel/perf_event_msm_krait.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2012, 2014 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -454,37 +454,29 @@
 	/* Disable counter */
 	armv7_pmnc_disable_counter(idx);
 
-	/*
-	 * Set event (if destined for PMNx counters)
-	 * We don't need to set the event if it's a cycle count
-	 */
-	if (idx != ARMV7_IDX_CYCLE_COUNTER) {
-		val = hwc->config_base;
-		val &= KRAIT_EVENT_MASK;
+	val = hwc->config_base;
+	val &= KRAIT_EVENT_MASK;
 
-		if (val < 0x40) {
-			armv7_pmnc_write_evtsel(idx, hwc->config_base);
-		} else {
-			event = get_krait_evtinfo(val, &evtinfo);
+	/* set event for ARM-architected events, and filter for CC */
+	if ((val < 0x40) || (idx == ARMV7_IDX_CYCLE_COUNTER)) {
+		armv7_pmnc_write_evtsel(idx, hwc->config_base);
+	} else {
+		event = get_krait_evtinfo(val, &evtinfo);
 
-			if (event == -EINVAL)
-				goto krait_out;
+		if (event == -EINVAL)
+			goto krait_out;
 
-			/* Restore Mode-exclusion bits */
-			event |= (hwc->config_base & KRAIT_MODE_EXCL_MASK);
+		/* Restore Mode-exclusion bits */
+		event |= (hwc->config_base & KRAIT_MODE_EXCL_MASK);
 
-			/*
-			 * Set event (if destined for PMNx counters)
-			 * We don't need to set the event if it's a cycle count
-			 */
-			armv7_pmnc_write_evtsel(idx, event);
-			val = 0x0;
-			asm volatile("mcr p15, 0, %0, c9, c15, 0" : :
-				"r" (val));
-			val = evtinfo.group_setval;
-			gr = evtinfo.groupcode;
-			krait_evt_setup(gr, val, evtinfo.armv7_evt_type);
-		}
+		/* Set event (if destined for PMNx counters) */
+		armv7_pmnc_write_evtsel(idx, event);
+		val = 0x0;
+		asm volatile("mcr p15, 0, %0, c9, c15, 0" : :
+			"r" (val));
+		val = evtinfo.group_setval;
+		gr = evtinfo.groupcode;
+		krait_evt_setup(gr, val, evtinfo.armv7_evt_type);
 	}
 
 	/* Enable interrupt for this counter */
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 6b551cd..3721809 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -1837,22 +1837,6 @@
 	  Support for receiving handset events like headset detect,
 	  headset switch and clamshell state.
 
-config MSM_RMT_STORAGE_CLIENT
-	depends on (ARCH_MSM && MSM_ONCRPCROUTER)
-	default n
-	bool "Remote Storage RPC client"
-	help
-	  Provide RPC mechanism for remote processors to access storage
-	  device on apps processor.
-
-config MSM_RMT_STORAGE_CLIENT_STATS
-	depends on (MSM_RMT_STORAGE_CLIENT && DEBUG_FS)
-	default n
-	bool "Remote storage RPC client performance statistics"
-	help
-	  Collects performance statistics and shows this information
-	  through a debugfs file rmt_storage_stats.
-
 config MSM_DALRPC
 	bool "DAL RPC support"
 	default n
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 5f7f0d9..c945b10 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -314,7 +314,6 @@
 
 obj-$(CONFIG_HTC_PWRSINK) += htc_pwrsink.o
 obj-$(CONFIG_HTC_HEADSET) += htc_headset.o
-obj-$(CONFIG_MSM_RMT_STORAGE_CLIENT) += rmt_storage_client.o
 obj-$(CONFIG_MSM_RPM) += rpm.o rpm_resources.o
 obj-$(CONFIG_MSM_LPM_TEST) += test-lpm.o
 obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd.o lpm_levels.o
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index 7b13bbc..a923846 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -149,8 +149,6 @@
 			"msm-tsens", NULL),
 	OF_DEV_AUXDATA("qcom,qcedev", 0xFD440000, \
 			"qcedev.0", NULL),
-	OF_DEV_AUXDATA("qcom,qcrypto", 0xFD440000, \
-			"qcrypto.0", NULL),
 	OF_DEV_AUXDATA("qcom,hsic-host", 0xF9A00000, \
 			"msm_hsic_host", NULL),
 	OF_DEV_AUXDATA("qcom,hsic-smsc-hub", 0, "msm_smsc_hub",
diff --git a/arch/arm/mach-msm/clock-8226.c b/arch/arm/mach-msm/clock-8226.c
index 7a7c008..e16bcb1 100644
--- a/arch/arm/mach-msm/clock-8226.c
+++ b/arch/arm/mach-msm/clock-8226.c
@@ -3497,8 +3497,36 @@
 					"fda08400.qcom,csid"),
 
 	/* ISPIF clocks */
+
 	CLK_LOOKUP("ispif_ahb_clk", camss_ispif_ahb_clk.c,
-		"fda0a000.qcom,ispif"),
+					"fda0a000.qcom,ispif"),
+	CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
+					"fda0a000.qcom,ispif"),
+	CLK_LOOKUP("csi0_ahb_clk", camss_csi0_ahb_clk.c,
+					"fda0a000.qcom,ispif"),
+	CLK_LOOKUP("csi0_src_clk", csi0_clk_src.c,
+					"fda0a000.qcom,ispif"),
+	CLK_LOOKUP("csi0_phy_clk", camss_csi0phy_clk.c,
+					"fda0a000.qcom,ispif"),
+	CLK_LOOKUP("csi0_clk", camss_csi0_clk.c,
+					"fda0a000.qcom,ispif"),
+	CLK_LOOKUP("csi0_pix_clk", camss_csi0pix_clk.c,
+					"fda0a000.qcom,ispif"),
+	CLK_LOOKUP("csi0_rdi_clk", camss_csi0rdi_clk.c,
+					"fda0a000.qcom,ispif"),
+
+	CLK_LOOKUP("csi1_ahb_clk", camss_csi1_ahb_clk.c,
+					"fda0a000.qcom,ispif"),
+	CLK_LOOKUP("csi1_src_clk", csi1_clk_src.c,
+					"fda0a000.qcom,ispif"),
+	CLK_LOOKUP("csi1_phy_clk", camss_csi1phy_clk.c,
+					"fda0a000.qcom,ispif"),
+	CLK_LOOKUP("csi1_clk", camss_csi1_clk.c,
+					"fda0a000.qcom,ispif"),
+	CLK_LOOKUP("csi1_pix_clk", camss_csi1pix_clk.c,
+					"fda0a000.qcom,ispif"),
+	CLK_LOOKUP("csi1_rdi_clk", camss_csi1rdi_clk.c,
+					"fda0a000.qcom,ispif"),
 	CLK_LOOKUP("camss_vfe_vfe_clk", camss_vfe_vfe0_clk.c,
 		"fda0a000.qcom,ispif"),
 	CLK_LOOKUP("camss_csi_vfe_clk", camss_csi_vfe0_clk.c,
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index b730091..1847bf4 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -4976,10 +4976,16 @@
 	CLK_LOOKUP("bus_clk",      gcc_ce2_axi_clk.c,     "qcedev.0"),
 	CLK_LOOKUP("core_clk_src", ce2_clk_src.c,         "qcedev.0"),
 
-	CLK_LOOKUP("core_clk",     gcc_ce2_clk.c,     "qcrypto.0"),
-	CLK_LOOKUP("iface_clk",    gcc_ce2_ahb_clk.c, "qcrypto.0"),
-	CLK_LOOKUP("bus_clk",      gcc_ce2_axi_clk.c, "qcrypto.0"),
-	CLK_LOOKUP("core_clk_src", ce2_clk_src.c,     "qcrypto.0"),
+
+	CLK_LOOKUP("core_clk",     gcc_ce2_clk.c,     "fd440000.qcom,qcrypto"),
+	CLK_LOOKUP("iface_clk",    gcc_ce2_ahb_clk.c, "fd440000.qcom,qcrypto"),
+	CLK_LOOKUP("bus_clk",      gcc_ce2_axi_clk.c, "fd440000.qcom,qcrypto"),
+	CLK_LOOKUP("core_clk_src", ce2_clk_src.c,     "fd440000.qcom,qcrypto"),
+
+	CLK_LOOKUP("core_clk",     gcc_ce2_clk.c,     "fd440000.qcom,qcrypto1"),
+	CLK_LOOKUP("iface_clk",    gcc_ce2_ahb_clk.c, "fd440000.qcom,qcrypto1"),
+	CLK_LOOKUP("bus_clk",      gcc_ce2_axi_clk.c, "fd440000.qcom,qcrypto1"),
+	CLK_LOOKUP("core_clk_src", ce2_clk_src.c,     "fd440000.qcom,qcrypto1"),
 
 	CLK_LOOKUP("core_clk",     gcc_ce1_clk.c,         "qseecom"),
 	CLK_LOOKUP("iface_clk",    gcc_ce1_ahb_clk.c,     "qseecom"),
diff --git a/arch/arm/mach-msm/cpufreq.c b/arch/arm/mach-msm/cpufreq.c
index c81720e..a5240cc 100644
--- a/arch/arm/mach-msm/cpufreq.c
+++ b/arch/arm/mach-msm/cpufreq.c
@@ -329,24 +329,40 @@
 	 * before the CPU is brought up.
 	 */
 	case CPU_DEAD:
-	case CPU_UP_CANCELED:
 		if (is_clk) {
 			clk_disable_unprepare(cpu_clk[cpu]);
 			clk_disable_unprepare(l2_clk);
 			update_l2_bw(NULL);
 		}
 		break;
+	case CPU_UP_CANCELED:
+		if (is_clk) {
+			clk_unprepare(cpu_clk[cpu]);
+			clk_unprepare(l2_clk);
+			update_l2_bw(NULL);
+		}
+		break;
 	case CPU_UP_PREPARE:
 		if (is_clk) {
-			rc = clk_prepare_enable(l2_clk);
+			rc = clk_prepare(l2_clk);
 			if (rc < 0)
 				return NOTIFY_BAD;
-			rc = clk_prepare_enable(cpu_clk[cpu]);
+			rc = clk_prepare(cpu_clk[cpu]);
 			if (rc < 0)
 				return NOTIFY_BAD;
 			update_l2_bw(&cpu);
 		}
 		break;
+	case CPU_STARTING:
+		if (is_clk) {
+			rc = clk_enable(l2_clk);
+			if (rc < 0)
+				return NOTIFY_BAD;
+			rc = clk_enable(cpu_clk[cpu]);
+			if (rc < 0)
+				return NOTIFY_BAD;
+		}
+		break;
 	default:
 		break;
 	}
diff --git a/arch/arm/mach-msm/include/mach/ocmem_priv.h b/arch/arm/mach-msm/include/mach/ocmem_priv.h
index 00aedb6..c94fa0e 100644
--- a/arch/arm/mach-msm/include/mach/ocmem_priv.h
+++ b/arch/arm/mach-msm/include/mach/ocmem_priv.h
@@ -206,7 +206,7 @@
 int check_notifier(int);
 const char *get_name(int);
 int get_tz_id(int);
-int ocmem_enable_sec_program(int);
+int ocmem_restore_sec_program(int);
 int ocmem_enable_dump(enum ocmem_client, unsigned long, unsigned long);
 int ocmem_disable_dump(enum ocmem_client, unsigned long, unsigned long);
 int check_id(int);
diff --git a/arch/arm/mach-msm/include/mach/qdsp6v2/apr_us.h b/arch/arm/mach-msm/include/mach/qdsp6v2/apr_us.h
index 661d496..8de3d60 100644
--- a/arch/arm/mach-msm/include/mach/qdsp6v2/apr_us.h
+++ b/arch/arm/mach-msm/include/mach/qdsp6v2/apr_us.h
@@ -45,6 +45,28 @@
 
 #define USM_STREAM_CMD_CLOSE				0x0001230A
 
+#define USM_STREAM_CMD_SET_PARAM			0x00012731
+struct usm_stream_cmd_set_param {
+	struct apr_hdr hdr;
+	u32            buf_addr_lsw;
+	u32            buf_addr_msw;
+	u32            mem_map_handle;
+	u32            buf_size;
+	u32            module_id;
+	u32            param_id;
+} __packed;
+
+#define USM_STREAM_CMD_GET_PARAM			0x00012732
+struct usm_stream_cmd_get_param {
+	struct apr_hdr hdr;
+	u32            buf_addr_lsw;
+	u32            buf_addr_msw;
+	u32            mem_map_handle;
+	u32            buf_size;
+	u32            module_id;
+	u32            param_id;
+} __packed;
+
 /* Encoder configuration definitions */
 #define USM_STREAM_CMD_SET_ENC_PARAM			0x0001230B
 /* Decoder configuration definitions */
diff --git a/arch/arm/mach-msm/include/mach/qdsp6v2/usf.h b/arch/arm/mach-msm/include/mach/qdsp6v2/usf.h
index faf50d2..9944353 100644
--- a/arch/arm/mach-msm/include/mach/qdsp6v2/usf.h
+++ b/arch/arm/mach-msm/include/mach/qdsp6v2/usf.h
@@ -39,6 +39,15 @@
 #define US_GET_VERSION  _IOWR(USF_IOCTL_MAGIC, 9, \
 				struct us_version_info_type)
 
+#define US_SET_TX_STREAM_PARAM   _IOW(USF_IOCTL_MAGIC, 10, \
+				struct us_stream_param_type)
+#define US_GET_TX_STREAM_PARAM  _IOWR(USF_IOCTL_MAGIC, 11, \
+				struct us_stream_param_type)
+#define US_SET_RX_STREAM_PARAM   _IOW(USF_IOCTL_MAGIC, 12, \
+				struct us_stream_param_type)
+#define US_GET_RX_STREAM_PARAM  _IOWR(USF_IOCTL_MAGIC, 13, \
+				struct us_stream_param_type)
+
 /* Special timeout values */
 #define USF_NO_WAIT_TIMEOUT	0x00000000
 /* Infinitive */
@@ -130,6 +139,8 @@
 	uint16_t params_data_size;
 /* Pointer to the parameters */
 	uint8_t *params_data;
+/* Max size of buffer for get and set parameter */
+	uint32_t max_get_set_param_buf_size;
 };
 
 struct us_input_info_type {
@@ -273,4 +284,15 @@
 	char *pbuf;
 };
 
+struct us_stream_param_type {
+/* Id of module */
+	uint32_t module_id;
+/* Id of parameter */
+	uint32_t param_id;
+/* Size of memory of the parameter buffer */
+	uint32_t buf_size;
+/* Pointer to the memory of the parameter buffer */
+	uint8_t __user *pbuf;
+};
+
 #endif /* __USF_H__ */
diff --git a/arch/arm/mach-msm/ocmem.c b/arch/arm/mach-msm/ocmem.c
index 99e54b7..4a8cc39 100644
--- a/arch/arm/mach-msm/ocmem.c
+++ b/arch/arm/mach-msm/ocmem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -788,6 +788,7 @@
 	struct device   *dev = &pdev->dev;
 	struct clk *ocmem_core_clk = NULL;
 	struct clk *ocmem_iface_clk = NULL;
+	int rc;
 
 	if (!pdev->dev.of_node) {
 		dev_info(dev, "Missing Configuration in Device Tree\n");
@@ -830,10 +831,23 @@
 
 	platform_set_drvdata(pdev, ocmem_pdata);
 
+	rc = ocmem_enable_core_clock();
+	if (rc < 0)
+		goto core_clk_fail;
+
+	rc = ocmem_enable_iface_clock();
+	if (rc < 0)
+		goto iface_clk_fail;
+
 	/* Parameter to be updated based on TZ */
 	/* Allow the OCMEM CSR to be programmed */
-	if (ocmem_enable_sec_program(OCMEM_SECURE_DEV_ID))
+	if (ocmem_restore_sec_program(OCMEM_SECURE_DEV_ID)) {
+		ocmem_disable_iface_clock();
+		ocmem_disable_core_clock();
 		return -EBUSY;
+	}
+	ocmem_disable_iface_clock();
+	ocmem_disable_core_clock();
 
 	if (ocmem_debugfs_init(pdev))
 		dev_err(dev, "ocmem: No debugfs node available\n");
@@ -860,6 +874,11 @@
 
 	dev_dbg(dev, "initialized successfully\n");
 	return 0;
+
+iface_clk_fail:
+	ocmem_disable_core_clock();
+core_clk_fail:
+	return rc;
 }
 
 static int __devexit msm_ocmem_remove(struct platform_device *pdev)
diff --git a/arch/arm/mach-msm/ocmem_core.c b/arch/arm/mach-msm/ocmem_core.c
index f753391..9c2afb9 100644
--- a/arch/arm/mach-msm/ocmem_core.c
+++ b/arch/arm/mach-msm/ocmem_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -583,7 +583,7 @@
 	return 0;
 }
 
-int ocmem_enable_sec_program(int sec_id)
+int ocmem_restore_sec_program(int sec_id)
 {
 	return 0;
 }
@@ -702,7 +702,7 @@
 	return rc;
 }
 
-int ocmem_enable_sec_program(int sec_id)
+int ocmem_restore_sec_program(int sec_id)
 {
 	int rc, scm_ret = 0;
 	struct msm_scm_sec_cfg {
@@ -953,7 +953,6 @@
 	unsigned end_m = num_banks;
 	unsigned long region_offset = 0;
 	struct ocmem_hw_region *region;
-	int rc = 0;
 
 	if (offset < 0)
 		return -EINVAL;
@@ -974,14 +973,6 @@
 		(region_end >= num_regions))
 			return -EINVAL;
 
-	rc = ocmem_enable_core_clock();
-
-	if (rc < 0) {
-		pr_err("ocmem: Power transistion request for client %s (id: %d) failed\n",
-				get_name(id), id);
-		return rc;
-	}
-
 	mutex_lock(&region_ctrl_lock);
 
 	for (i = region_start; i <= region_end; i++) {
@@ -1035,11 +1026,10 @@
 
 	}
 	mutex_unlock(&region_ctrl_lock);
-	ocmem_disable_core_clock();
+
 	return 0;
 invalid_transition:
 	mutex_unlock(&region_ctrl_lock);
-	ocmem_disable_core_clock();
 	pr_err("ocmem_core: Invalid state transition detected for %d\n", id);
 	pr_err("ocmem_core: Offset %lx Len %lx curr_state %x new_state %x\n",
 			offset, len, curr_state, new_state);
@@ -1122,9 +1112,37 @@
 
 static int ocmem_power_show(struct seq_file *f, void *dummy)
 {
+	int rc = 0;
+
+	rc = ocmem_enable_core_clock();
+
+	if (rc < 0)
+		goto core_clock_fail;
+
+	rc = ocmem_enable_iface_clock();
+
+	if (rc < 0)
+		goto iface_clock_fail;
+
+	rc = ocmem_restore_sec_program(OCMEM_SECURE_DEV_ID);
+	if (rc < 0) {
+		pr_err("ocmem: Failed to restore security programming\n");
+		goto restore_config_fail;
+	}
 	ocmem_power_show_sw_state(f, dummy);
 	ocmem_power_show_hw_state(f, dummy);
+
+	ocmem_disable_iface_clock();
+	ocmem_disable_core_clock();
+
 	return 0;
+
+restore_config_fail:
+	ocmem_disable_iface_clock();
+iface_clock_fail:
+	ocmem_disable_core_clock();
+core_clock_fail:
+	return -EINVAL;
 }
 
 static int ocmem_power_open(struct inode *inode, struct file *file)
diff --git a/arch/arm/mach-msm/ocmem_sched.c b/arch/arm/mach-msm/ocmem_sched.c
index 8e25bb2..78279ed 100644
--- a/arch/arm/mach-msm/ocmem_sched.c
+++ b/arch/arm/mach-msm/ocmem_sched.c
@@ -634,17 +634,11 @@
 {
 	int rc = 0;
 
-	rc = ocmem_enable_core_clock();
+	rc = ocmem_restore_sec_program(OCMEM_SECURE_DEV_ID);
 
-	if (rc < 0)
-		goto core_clock_fail;
-
-
-	if (is_iface_access(req->owner)) {
-		rc = ocmem_enable_iface_clock();
-
-		if (rc < 0)
-			goto iface_clock_fail;
+	if (rc < 0) {
+		pr_err("ocmem: Failed to restore security programming\n");
+		goto lock_failed;
 	}
 
 	rc = ocmem_lock(req->owner, phys_to_offset(req->req_start), req->req_sz,
@@ -670,11 +664,6 @@
 process_map_fail:
 	ocmem_unlock(req->owner, phys_to_offset(req->req_start), req->req_sz);
 lock_failed:
-	if (is_iface_access(req->owner))
-		ocmem_disable_iface_clock();
-iface_clock_fail:
-	ocmem_disable_core_clock();
-core_clock_fail:
 	pr_err("ocmem: Failed to map ocmem request\n");
 	return rc;
 }
@@ -698,9 +687,6 @@
 		goto unlock_failed;
 	}
 
-	if (is_iface_access(req->owner))
-		ocmem_disable_iface_clock();
-	ocmem_disable_core_clock();
 	pr_debug("ocmem: Unmapped request %p\n", req);
 	return 0;
 
@@ -1345,9 +1331,21 @@
 	if (rc < 0)
 		return -EINVAL;
 
+	rc = ocmem_enable_core_clock();
+
+	if (rc < 0)
+		goto core_clock_fail;
+
+	if (is_iface_access(req->owner)) {
+		rc = ocmem_enable_iface_clock();
+
+		if (rc < 0)
+			goto iface_clock_fail;
+	}
+
 	rc = process_map(req, req->req_start, req->req_end);
 	if (rc < 0)
-		return -EINVAL;
+		goto map_error;
 
 	offset = phys_to_offset(req->req_start);
 
@@ -1366,7 +1364,14 @@
 		BUG();
 	}
 	return 0;
+
 power_ctl_error:
+map_error:
+if (is_iface_access(req->owner))
+	ocmem_disable_iface_clock();
+iface_clock_fail:
+	ocmem_disable_core_clock();
+core_clock_fail:
 	return -EINVAL;
 }
 
@@ -1536,6 +1541,10 @@
 				}
 			}
 
+			if (is_iface_access(req->owner))
+				ocmem_disable_iface_clock();
+			ocmem_disable_core_clock();
+
 			rc = do_free(req);
 			if (rc < 0) {
 				pr_err("ocmem: Failed to free %p\n", req);
@@ -1556,6 +1565,9 @@
 				pr_err("Failed to switch OFF memory macros\n");
 				goto free_fail;
 			}
+			if (is_iface_access(req->owner))
+				ocmem_disable_iface_clock();
+			ocmem_disable_core_clock();
 		}
 
 		/* free the allocation */
@@ -1658,6 +1670,10 @@
 		rc = process_unmap(req, req->req_start, req->req_end);
 		if (rc < 0)
 			return -EINVAL;
+
+		if (is_iface_access(req->owner))
+			ocmem_disable_iface_clock();
+		ocmem_disable_core_clock();
 	} else
 		return -EINVAL;
 
@@ -1768,6 +1784,9 @@
 			rc = process_unmap(req, req->req_start, req->req_end);
 			if (rc < 0)
 				goto shrink_fail;
+			if (is_iface_access(req->owner))
+				ocmem_disable_iface_clock();
+			ocmem_disable_core_clock();
 		}
 		rc = do_free(req);
 		if (rc < 0)
@@ -2254,6 +2273,17 @@
 
 	if (req->req_sz != 0) {
 
+		rc = ocmem_enable_core_clock();
+
+		if (rc < 0)
+			goto core_clock_fail;
+
+		if (is_iface_access(req->owner)) {
+			rc = ocmem_enable_iface_clock();
+
+			if (rc < 0)
+				goto iface_clock_fail;
+		}
 		rc = process_map(req, req->req_start, req->req_end);
 		if (rc < 0)
 			goto map_error;
@@ -2275,6 +2305,11 @@
 map_error:
 	handle->req = NULL;
 	do_free(req);
+	if (is_iface_access(req->owner))
+		ocmem_disable_iface_clock();
+iface_clock_fail:
+	ocmem_disable_core_clock();
+core_clock_fail:
 do_allocate_error:
 	ocmem_destroy_req(req);
 	return -EINVAL;
@@ -2303,6 +2338,17 @@
 	inc_ocmem_stat(zone_of(req), NR_ASYNC_ALLOCATIONS);
 
 	if (req->req_sz != 0) {
+		rc = ocmem_enable_core_clock();
+
+		if (rc < 0)
+			goto core_clock_fail;
+
+		if (is_iface_access(req->owner)) {
+			rc = ocmem_enable_iface_clock();
+
+			if (rc < 0)
+				goto iface_clock_fail;
+		}
 
 		rc = process_map(req, req->req_start, req->req_end);
 		if (rc < 0)
@@ -2333,6 +2379,11 @@
 map_error:
 	handle->req = NULL;
 	do_free(req);
+	if (is_iface_access(req->owner))
+		ocmem_disable_iface_clock();
+iface_clock_fail:
+	ocmem_disable_core_clock();
+core_clock_fail:
 do_allocate_error:
 	ocmem_destroy_req(req);
 	return -EINVAL;
diff --git a/arch/arm/mach-msm/perf_debug.c b/arch/arm/mach-msm/perf_debug.c
index 3a87c78..c4f7d45 100644
--- a/arch/arm/mach-msm/perf_debug.c
+++ b/arch/arm/mach-msm/perf_debug.c
@@ -48,6 +48,7 @@
 	"23 msm: perf: Fix cpu id logic in tracectr notifier\n"
 	"24 msm: perf: tracectr: Initialize cnts after hotplug\n"
 	"25 Perf: Reset pmu after hotplug\n"
+	"26 msm: perf: set filter bits for cycle counter on krait\n"
 ;
 
 static ssize_t desc_read(struct file *fp, char __user *buf,
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_aac.c b/arch/arm/mach-msm/qdsp6v2/audio_aac.c
index caeb79d..d95c356 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_aac.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_aac.c
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2008 Google, Inc.
  * Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -232,7 +232,12 @@
 		kfree(audio);
 		return -ENOMEM;
 	}
-
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
 	/* open in T/NT mode */
 	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
 		rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
@@ -261,11 +266,6 @@
 		rc = -EACCES;
 		goto fail;
 	}
-	rc = audio_aio_open(audio, file);
-	if (rc < 0) {
-		pr_err("audio_aio_open rc=%d\n", rc);
-		goto fail;
-	}
 
 #ifdef CONFIG_DEBUG_FS
 	snprintf(name, sizeof name, "msm_aac_%04x", audio->ac->session);
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_amrnb.c b/arch/arm/mach-msm/qdsp6v2/audio_amrnb.c
index fc023c1..5d57293 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_amrnb.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_amrnb.c
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2008 Google, Inc.
  * Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -91,7 +91,12 @@
 		kfree(audio);
 		return -ENOMEM;
 	}
-
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
 	/* open in T/NT mode */
 	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
 		rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
@@ -119,11 +124,6 @@
 		rc = -EACCES;
 		goto fail;
 	}
-	rc = audio_aio_open(audio, file);
-	if (rc < 0) {
-		pr_err("audio_aio_open rc=%d\n", rc);
-		goto fail;
-	}
 
 #ifdef CONFIG_DEBUG_FS
 	snprintf(name, sizeof name, "msm_amrnb_%04x", audio->ac->session);
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_amrwb.c b/arch/arm/mach-msm/qdsp6v2/audio_amrwb.c
index 256da4d..d2728b7 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_amrwb.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_amrwb.c
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2008 Google, Inc.
  * Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -93,6 +93,12 @@
 		kfree(audio);
 		return -ENOMEM;
 	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
 
 	/* open in T/NT mode */
 	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
@@ -121,11 +127,6 @@
 		rc = -EACCES;
 		goto fail;
 	}
-	rc = audio_aio_open(audio, file);
-	if (rc < 0) {
-		pr_err("audio_aio_open rc=%d\n", rc);
-		goto fail;
-	}
 
 #ifdef CONFIG_DEBUG_FS
 	snprintf(name, sizeof name, "msm_amrwb_%04x", audio->ac->session);
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_amrwbplus.c b/arch/arm/mach-msm/qdsp6v2/audio_amrwbplus.c
index 544bf9c..8169914 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_amrwbplus.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_amrwbplus.c
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2008 Google, Inc.
  * Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -170,6 +170,12 @@
 		kfree(audio);
 		return -ENOMEM;
 	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
 
 	/* open in T/NT mode */
 	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
@@ -198,11 +204,6 @@
 		rc = -EACCES;
 		goto fail;
 	}
-	rc = audio_aio_open(audio, file);
-	if (rc < 0) {
-		pr_err("audio_aio_open rc=%d\n", rc);
-		goto fail;
-	}
 
 	config_debug_fs(audio);
 	pr_debug("%s: AMRWBPLUS dec success mode[%d]session[%d]\n", __func__,
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_evrc.c b/arch/arm/mach-msm/qdsp6v2/audio_evrc.c
index 3498e69..2de2178 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_evrc.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_evrc.c
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2008 Google, Inc.
  * Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -99,6 +99,12 @@
 		kfree(audio);
 		return -ENOMEM;
 	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
 
 	/* open in T/NT mode */
 	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
@@ -127,11 +133,6 @@
 		rc = -EACCES;
 		goto fail;
 	}
-	rc = audio_aio_open(audio, file);
-	if (rc < 0) {
-		pr_err("audio_aio_open rc=%d\n", rc);
-		goto fail;
-	}
 
 #ifdef CONFIG_DEBUG_FS
 	snprintf(name, sizeof name, "msm_evrc_%04x", audio->ac->session);
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_mp3.c b/arch/arm/mach-msm/qdsp6v2/audio_mp3.c
index d2f0270..9132486 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_mp3.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_mp3.c
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2008 Google, Inc.
  * Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -98,6 +98,12 @@
 		kfree(audio);
 		return -ENOMEM;
 	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
 
 	/* open in T/NT mode */
 	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
@@ -127,11 +133,6 @@
 		rc = -EACCES;
 		goto fail;
 	}
-	rc = audio_aio_open(audio, file);
-	if (rc < 0) {
-		pr_err("audio_aio_open rc=%d\n", rc);
-		goto fail;
-	}
 
 #ifdef CONFIG_DEBUG_FS
 	snprintf(name, sizeof name, "msm_mp3_%04x", audio->ac->session);
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c b/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c
index 0a8ce8e..ecea6d0 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2008 Google, Inc.
  * Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -252,6 +252,12 @@
 		kfree(audio);
 		return -ENOMEM;
 	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
 
 	/* open in T/NT mode */
 	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
@@ -281,11 +287,6 @@
 		rc = -EACCES;
 		goto fail;
 	}
-	rc = audio_aio_open(audio, file);
-	if (rc < 0) {
-		pr_err("audio_aio_open rc=%d\n", rc);
-		goto fail;
-	}
 
 #ifdef CONFIG_DEBUG_FS
 	snprintf(name, sizeof name, "msm_multi_aac_%04x", audio->ac->session);
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_qcelp.c b/arch/arm/mach-msm/qdsp6v2/audio_qcelp.c
index 4993226..acfcb65 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_qcelp.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_qcelp.c
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2008 Google, Inc.
  * Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -104,6 +104,12 @@
 		kfree(audio);
 		return -ENOMEM;
 	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
 
 	/* open in T/NT mode */
 	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
@@ -132,11 +138,6 @@
 		rc = -EACCES;
 		goto fail;
 	}
-	rc = audio_aio_open(audio, file);
-	if (rc < 0) {
-		pr_err("audio_aio_open rc=%d\n", rc);
-		goto fail;
-	}
 
 #ifdef CONFIG_DEBUG_FS
 	snprintf(name, sizeof name, "msm_qcelp_%04x", audio->ac->session);
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_wma.c b/arch/arm/mach-msm/qdsp6v2/audio_wma.c
index 5e3de86..5a64a69 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_wma.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_wma.c
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2008 Google, Inc.
  * Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -138,6 +138,12 @@
 		kfree(audio);
 		return -ENOMEM;
 	}
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
 	/* open in T/NT mode */
 	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
 		rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
@@ -166,11 +172,6 @@
 		rc = -EACCES;
 		goto fail;
 	}
-	rc = audio_aio_open(audio, file);
-	if (rc < 0) {
-		pr_err("audio_aio_open rc=%d\n", rc);
-		goto fail;
-	}
 
 #ifdef CONFIG_DEBUG_FS
 	snprintf(name, sizeof name, "msm_wma_%04x", audio->ac->session);
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_wmapro.c b/arch/arm/mach-msm/qdsp6v2/audio_wmapro.c
index ce49cac..7d0edf0 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_wmapro.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_wmapro.c
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2008 Google, Inc.
  * Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -198,6 +198,12 @@
 		return -ENOMEM;
 	}
 
+	rc = audio_aio_open(audio, file);
+	if (rc < 0) {
+		pr_err("%s: audio_aio_open rc=%d\n",
+			__func__, rc);
+		goto fail;
+	}
 	/* open in T/NT mode */
 	if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
 		rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
@@ -226,11 +232,6 @@
 		rc = -EACCES;
 		goto fail;
 	}
-	rc = audio_aio_open(audio, file);
-	if (rc < 0) {
-		pr_err("audio_aio_open rc=%d\n", rc);
-		goto fail;
-	}
 
 #ifdef CONFIG_DEBUG_FS
 	snprintf(name, sizeof name, "msm_wmapro_%04x", audio->ac->session);
diff --git a/arch/arm/mach-msm/qdsp6v2/ultrasound/q6usm.h b/arch/arm/mach-msm/qdsp6v2/ultrasound/q6usm.h
index 7ab4ec3..58a6431 100644
--- a/arch/arm/mach-msm/qdsp6v2/ultrasound/q6usm.h
+++ b/arch/arm/mach-msm/qdsp6v2/ultrasound/q6usm.h
@@ -65,6 +65,14 @@
 	spinlock_t	dsp_lock;
 	/* extended parameters, related to q6 variants */
 	void		*ext;
+	/* physical address of parameter buffer */
+	dma_addr_t	param_phys;
+	/* buffer which stores the parameter data */
+	u8		*param_buf;
+	/* size of parameter buffer */
+	uint32_t	param_buf_size;
+	/* parameter buffer memory handle */
+	void		*param_buf_mem_handle;
 };
 
 struct us_client {
@@ -88,6 +96,8 @@
 int q6usm_cmd(struct us_client *usc, int cmd);
 int q6usm_us_client_buf_alloc(unsigned int dir, struct us_client *usc,
 			      unsigned int bufsz, unsigned int bufcnt);
+int q6usm_us_param_buf_alloc(unsigned int dir, struct us_client *usc,
+			      unsigned int bufsz);
 int q6usm_enc_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg);
 int q6usm_dec_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg);
 int q6usm_read(struct us_client *usc, uint32_t read_ind);
@@ -104,5 +114,9 @@
 int q6usm_set_us_detection(struct us_client *usc,
 			   struct usm_session_cmd_detect_info *detect_info,
 			   uint16_t detect_info_size);
+int q6usm_set_us_stream_param(int dir, struct us_client *usc,
+		uint32_t module_id, uint32_t param_id, uint32_t buf_size);
+int q6usm_get_us_stream_param(int dir, struct us_client *usc,
+		uint32_t module_id, uint32_t param_id, uint32_t buf_size);
 
 #endif /* __Q6_USM_H__ */
diff --git a/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c b/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
index 192aaf9..1c42020 100644
--- a/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
+++ b/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
@@ -27,8 +27,8 @@
 #include "usfcdev.h"
 
 /* The driver version*/
-#define DRV_VERSION "1.6.1"
-#define USF_VERSION_ID 0x0161
+#define DRV_VERSION "1.7.1"
+#define USF_VERSION_ID 0x0171
 
 /* Standard timeout in the asynchronous ops */
 #define USF_TIMEOUT_JIFFIES (1*HZ) /* 1 sec */
@@ -36,6 +36,8 @@
 /* Undefined USF device */
 #define USF_UNDEF_DEV_ID 0xffff
 
+/* TX memory mapping flag */
+#define USF_VM_READ 1
 /* RX memory mapping flag */
 #define USF_VM_WRITE 2
 
@@ -973,6 +975,13 @@
 		return rc;
 	}
 
+	rc = q6usm_us_param_buf_alloc(OUT, usf_xx->usc,
+			config_tx.us_xx_info.max_get_set_param_buf_size);
+	if (rc) {
+		(void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
+		return rc;
+	}
+
 	rc = q6usm_enc_cfg_blk(usf_xx->usc,
 			       &usf_xx->encdec_cfg);
 	if (!rc &&
@@ -1027,6 +1036,13 @@
 		return rc;
 	}
 
+	rc = q6usm_us_param_buf_alloc(IN, usf_xx->usc,
+			config_rx.us_xx_info.max_get_set_param_buf_size);
+	if (rc) {
+		(void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
+		return rc;
+	}
+
 	rc = q6usm_dec_cfg_blk(usf_xx->usc,
 			       &usf_xx->encdec_cfg);
 	if (rc)
@@ -1270,6 +1286,120 @@
 	return rc;
 } /* usf_get_version */
 
+static int usf_set_stream_param(struct usf_xx_type *usf_xx,
+				unsigned long arg, int dir)
+{
+	struct us_stream_param_type set_stream_param;
+	struct us_client *usc = usf_xx->usc;
+	struct us_port_data *port = &usc->port[dir];
+	int rc = 0;
+
+	if (port->param_buf == NULL) {
+		pr_err("%s: parameter buffer is null\n",
+			__func__);
+		return -EFAULT;
+	}
+
+	rc = copy_from_user(&set_stream_param,
+			(struct us_stream_param_type __user *) arg,
+			sizeof(set_stream_param));
+
+	if (rc) {
+		pr_err("%s: copy set_stream_param from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	if (set_stream_param.buf_size > port->param_buf_size) {
+		pr_err("%s: buf_size (%d) > maximum buf size (%d)\n",
+			__func__, set_stream_param.buf_size,
+			port->param_buf_size);
+		return -EINVAL;
+	}
+
+	if (set_stream_param.buf_size == 0) {
+		pr_err("%s: buf_size is 0\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = copy_from_user(port->param_buf,
+			(uint8_t __user *) set_stream_param.pbuf,
+			set_stream_param.buf_size);
+	if (rc) {
+		pr_err("%s: copy param buf from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	rc = q6usm_set_us_stream_param(dir, usc, set_stream_param.module_id,
+					set_stream_param.param_id,
+					set_stream_param.buf_size);
+	if (rc) {
+		pr_err("%s: q6usm_set_us_stream_param failed; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	return rc;
+} /* usf_set_stream_param */
+
+static int usf_get_stream_param(struct usf_xx_type *usf_xx,
+				unsigned long arg, int dir)
+{
+	struct us_stream_param_type get_stream_param;
+	struct us_client *usc = usf_xx->usc;
+	struct us_port_data *port = &usc->port[dir];
+	int rc = 0;
+
+	if (port->param_buf == NULL) {
+		pr_err("%s: parameter buffer is null\n",
+			__func__);
+		return -EFAULT;
+	}
+
+	rc = copy_from_user(&get_stream_param,
+			(struct us_stream_param_type __user *) arg,
+			sizeof(get_stream_param));
+
+	if (rc) {
+		pr_err("%s: copy get_stream_param from user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	if (get_stream_param.buf_size > port->param_buf_size) {
+		pr_err("%s: buf_size (%d) > maximum buf size (%d)\n",
+			__func__, get_stream_param.buf_size,
+			port->param_buf_size);
+		return -EINVAL;
+	}
+
+	if (get_stream_param.buf_size == 0) {
+		pr_err("%s: buf_size is 0\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = q6usm_get_us_stream_param(dir, usc, get_stream_param.module_id,
+					get_stream_param.param_id,
+					get_stream_param.buf_size);
+	if (rc) {
+		pr_err("%s: q6usm_get_us_stream_param failed; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	rc = copy_to_user((uint8_t __user *) get_stream_param.pbuf,
+			port->param_buf,
+			get_stream_param.buf_size);
+	if (rc) {
+		pr_err("%s: copy param buf to user; rc=%d\n",
+			__func__, rc);
+		return -EFAULT;
+	}
+
+	return rc;
+} /* usf_get_stream_param */
+
 static long usf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
 	int rc = 0;
@@ -1402,6 +1532,26 @@
 		break;
 	} /* US_GET_VERSION */
 
+	case US_SET_TX_STREAM_PARAM: {
+		rc = usf_set_stream_param(&usf->usf_tx, arg, OUT);
+		break;
+	} /* US_SET_TX_STREAM_PARAM */
+
+	case US_GET_TX_STREAM_PARAM: {
+		rc = usf_get_stream_param(&usf->usf_tx, arg, OUT);
+		break;
+	} /* US_GET_TX_STREAM_PARAM */
+
+	case US_SET_RX_STREAM_PARAM: {
+		rc = usf_set_stream_param(&usf->usf_rx, arg, IN);
+		break;
+	} /* US_SET_RX_STREAM_PARAM */
+
+	case US_GET_RX_STREAM_PARAM: {
+		rc = usf_get_stream_param(&usf->usf_rx, arg, IN);
+		break;
+	} /* US_GET_RX_STREAM_PARAM */
+
 	default:
 		pr_err("%s: unsupported IOCTL command [%d]\n",
 		       __func__,
diff --git a/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c b/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c
index af3c1f5..1a1d587 100644
--- a/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c
+++ b/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c
@@ -53,7 +53,7 @@
 
 static struct usm_mmap this_mmap;
 
-static void q6usm_add_mmaphdr(struct us_client *usc, struct apr_hdr *hdr,
+static void q6usm_add_mmaphdr(struct apr_hdr *hdr,
 			      uint32_t pkt_size, bool cmd_flg, u32 token)
 {
 	hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
@@ -68,20 +68,20 @@
 	return;
 }
 
-static int q6usm_memory_map(struct us_client *usc, uint32_t buf_add, int dir,
-		     uint32_t bufsz, uint32_t bufcnt)
+static int q6usm_memory_map(uint32_t buf_add, int dir, uint32_t bufsz,
+		uint32_t bufcnt, uint32_t session, uint32_t *mem_handle)
 {
 	struct usm_cmd_memory_map_region mem_region_map;
 	int rc = 0;
 
-	if ((usc == NULL) || (usc->apr == NULL) || (this_mmap.apr == NULL)) {
+	if (this_mmap.apr == NULL) {
 		pr_err("%s: APR handle NULL\n", __func__);
 		return -EINVAL;
 	}
 
-	q6usm_add_mmaphdr(usc, &mem_region_map.hdr,
+	q6usm_add_mmaphdr(&mem_region_map.hdr,
 			  sizeof(struct usm_cmd_memory_map_region), true,
-			  ((usc->session << 8) | dir));
+			  ((session << 8) | dir));
 
 	mem_region_map.hdr.opcode = USM_CMD_SHARED_MEM_MAP_REGION;
 	mem_region_map.mempool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
@@ -108,32 +108,29 @@
 		rc = -ETIME;
 		pr_err("%s: timeout. waited for memory_map\n", __func__);
 	} else {
-		struct us_port_data *port = &usc->port[dir];
-
-		*((uint32_t *)(port->ext)) = this_mmap.mem_handle;
+		*mem_handle = this_mmap.mem_handle;
 		rc = 0;
 	}
 fail_cmd:
 	return rc;
 }
 
-int q6usm_memory_unmap(struct us_client *usc, uint32_t buf_add, int dir)
+int q6usm_memory_unmap(uint32_t buf_add, int dir, uint32_t session,
+			uint32_t mem_handle)
 {
 	struct usm_cmd_memory_unmap_region mem_unmap;
-	struct us_port_data *port = &usc->port[dir];
 	int rc = 0;
 
-	if ((usc == NULL) || (usc->apr == NULL) || (this_mmap.apr == NULL)) {
+	if (this_mmap.apr == NULL) {
 		pr_err("%s: APR handle NULL\n", __func__);
 		return -EINVAL;
 	}
 
-	port = &usc->port[dir];
-	q6usm_add_mmaphdr(usc, &mem_unmap.hdr,
+	q6usm_add_mmaphdr(&mem_unmap.hdr,
 			  sizeof(struct usm_cmd_memory_unmap_region), true,
-			  ((usc->session << 8) | dir));
+			  ((session << 8) | dir));
 	mem_unmap.hdr.opcode = USM_CMD_SHARED_MEM_UNMAP_REGION;
-	mem_unmap.mem_map_handle = *((uint32_t *)(port->ext));
+	mem_unmap.mem_map_handle = mem_handle;
 
 	rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_unmap);
 	if (rc < 0) {
@@ -209,7 +206,8 @@
 		return 0;
 	}
 
-	rc = q6usm_memory_unmap(usc, port->phys, dir);
+	rc = q6usm_memory_unmap(port->phys, dir, usc->session,
+				*((uint32_t *)port->ext));
 	pr_debug("%s: data[%p]phys[%p][%p]\n", __func__,
 		 (void *)port->data, (void *)port->phys, (void *)&port->phys);
 	/* 4K boundary is required by the API with QDSP6 */
@@ -224,6 +222,45 @@
 	return rc;
 }
 
+int q6usm_us_param_buf_free(unsigned int dir,
+			struct us_client *usc)
+{
+	struct us_port_data *port;
+	int rc = 0;
+	uint32_t size = 0;
+
+	if ((usc == NULL) ||
+		((dir != IN) && (dir != OUT)))
+		return -EINVAL;
+
+	mutex_lock(&usc->cmd_lock);
+	port = &usc->port[dir];
+	if (port == NULL) {
+		mutex_unlock(&usc->cmd_lock);
+		return -EINVAL;
+	}
+
+	if (port->param_buf == NULL) {
+		mutex_unlock(&usc->cmd_lock);
+		return 0;
+	}
+
+	rc = q6usm_memory_unmap(port->param_phys, dir, usc->session,
+				*((uint32_t *)port->param_buf_mem_handle));
+	pr_debug("%s: data[%p]phys[%p][%p]\n", __func__,
+		 (void *)port->param_buf, (void *)port->param_phys,
+		 (void *)&port->param_phys);
+	/* 4K boundary is required by the API with QDSP6 */
+	size = (port->param_buf_size + MEM_4K_OFFSET) & MEM_4K_MASK;
+	dma_free_coherent(NULL, size, port->param_buf, port->param_phys);
+	port->param_buf = NULL;
+	port->param_phys = 0;
+	port->param_buf_size = 0;
+
+	mutex_unlock(&usc->cmd_lock);
+	return rc;
+}
+
 void q6usm_us_client_free(struct us_client *usc)
 {
 	int loopcnt = 0;
@@ -240,6 +277,7 @@
 			continue;
 		pr_debug("%s: loopcnt = %d\n", __func__, loopcnt);
 		q6usm_us_client_buf_free(loopcnt, usc);
+		q6usm_us_param_buf_free(loopcnt, usc);
 	}
 	q6usm_session_free(usc);
 	apr_deregister(usc->apr);
@@ -279,7 +317,7 @@
 		pr_err("%s: us_client allocation failed\n", __func__);
 		return NULL;
 	}
-	p_mem_handle = kzalloc(sizeof(uint32_t) * 2, GFP_KERNEL);
+	p_mem_handle = kzalloc(sizeof(uint32_t) * 4, GFP_KERNEL);
 	if (p_mem_handle == NULL) {
 		pr_err("%s: p_mem_handle allocation failed\n", __func__);
 		kfree(usc);
@@ -320,6 +358,7 @@
 		mutex_init(&usc->port[lcnt].lock);
 		spin_lock_init(&usc->port[lcnt].dsp_lock);
 		usc->port[lcnt].ext = (void *)p_mem_handle++;
+		usc->port[lcnt].param_buf_mem_handle = (void *)p_mem_handle++;
 		pr_err("%s: usc->port[%d].ext=%p;\n",
 		       __func__, lcnt, usc->port[lcnt].ext);
 	}
@@ -372,11 +411,71 @@
 		 (void *)&port->phys);
 
 	size = (size + MEM_4K_OFFSET) & MEM_4K_MASK;
-	rc = q6usm_memory_map(usc, port->phys, dir, size, 1);
+	rc = q6usm_memory_map(port->phys, dir, size, 1, usc->session,
+				(uint32_t *)port->ext);
 	if (rc < 0) {
 		pr_err("%s: CMD Memory_map failed\n", __func__);
 		mutex_unlock(&usc->cmd_lock);
 		q6usm_us_client_buf_free(dir, usc);
+		q6usm_us_param_buf_free(dir, usc);
+	} else {
+		mutex_unlock(&usc->cmd_lock);
+		rc = 0;
+	}
+
+	return rc;
+}
+
+int q6usm_us_param_buf_alloc(unsigned int dir,
+			struct us_client *usc,
+			unsigned int bufsz)
+{
+	int rc = 0;
+	struct us_port_data *port = NULL;
+	unsigned int size = 0;
+
+	if ((usc == NULL) ||
+		((dir != IN) && (dir != OUT)) ||
+		(usc->session <= 0 || usc->session > SESSION_MAX)) {
+		pr_err("%s: wrong parameters: direction=%d, bufsz=%d\n",
+			__func__, dir, bufsz);
+		return -EINVAL;
+	}
+
+	mutex_lock(&usc->cmd_lock);
+
+	port = &usc->port[dir];
+
+	if (bufsz == 0) {
+		pr_debug("%s: bufsz=0, get/set param commands are forbidden\n",
+			__func__);
+		port->param_buf = NULL;
+		mutex_unlock(&usc->cmd_lock);
+		return rc;
+	}
+
+	port->param_buf = dma_alloc_coherent(NULL, bufsz,
+					&(port->param_phys), GFP_KERNEL);
+	if (port->param_buf == NULL) {
+		pr_err("%s: Parameter buffer allocation failed\n", __func__);
+		mutex_unlock(&usc->cmd_lock);
+		return -ENOMEM;
+	}
+
+	port->param_buf_size = bufsz;
+	pr_debug("%s: param_buf[%p]; param_phys[%p]; [%p]\n", __func__,
+		 (void *)port->param_buf,
+		 (void *)port->param_phys,
+		 (void *)&port->param_phys);
+
+	size = (bufsz + MEM_4K_OFFSET) & MEM_4K_MASK;
+	rc = q6usm_memory_map(port->param_phys, (IN | OUT), size, 1,
+			usc->session, (uint32_t *)port->param_buf_mem_handle);
+	if (rc < 0) {
+		pr_err("%s: CMD Memory_map failed\n", __func__);
+		mutex_unlock(&usc->cmd_lock);
+		q6usm_us_client_buf_free(dir, usc);
+		q6usm_us_param_buf_free(dir, usc);
 	} else {
 		mutex_unlock(&usc->cmd_lock);
 		rc = 0;
@@ -471,6 +570,8 @@
 			case USM_STREAM_CMD_SET_ENC_PARAM:
 			case USM_DATA_CMD_MEDIA_FORMAT_UPDATE:
 			case USM_SESSION_CMD_SIGNAL_DETECT_MODE:
+			case USM_STREAM_CMD_SET_PARAM:
+			case USM_STREAM_CMD_GET_PARAM:
 				if (atomic_read(&usc->cmd_state)) {
 					atomic_set(&usc->cmd_state, 0);
 					wake_up(&usc->cmd_wait);
@@ -1230,6 +1331,98 @@
 	return rc;
 }
 
+int q6usm_set_us_stream_param(int dir, struct us_client *usc,
+		uint32_t module_id, uint32_t param_id, uint32_t buf_size)
+{
+	int rc = 0;
+	struct usm_stream_cmd_set_param cmd_set_param;
+	struct us_port_data *port = NULL;
+
+	if ((usc == NULL) || (usc->apr == NULL)) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	port = &usc->port[dir];
+
+	q6usm_add_hdr(usc, &cmd_set_param.hdr,
+		(sizeof(cmd_set_param) - APR_HDR_SIZE), true);
+
+	cmd_set_param.hdr.opcode = USM_STREAM_CMD_SET_PARAM;
+	cmd_set_param.buf_size = buf_size;
+	cmd_set_param.buf_addr_msw = upper_32_bits(port->param_phys);
+	cmd_set_param.buf_addr_lsw = lower_32_bits(port->param_phys);
+	cmd_set_param.mem_map_handle =
+			*((uint32_t *)(port->param_buf_mem_handle));
+	cmd_set_param.module_id = module_id;
+	cmd_set_param.param_id = param_id;
+	cmd_set_param.hdr.token = 0;
+
+	rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_set_param);
+
+	if (rc < 0) {
+		pr_err("%s:write op[0x%x];rc[%d]\n",
+			__func__, cmd_set_param.hdr.opcode, rc);
+	}
+
+	rc = wait_event_timeout(usc->cmd_wait,
+				(atomic_read(&usc->cmd_state) == 0),
+				Q6USM_TIMEOUT_JIFFIES);
+	if (!rc) {
+		rc = -ETIME;
+		pr_err("%s: CMD_SET_PARAM: timeout=%d\n",
+			__func__, Q6USM_TIMEOUT_JIFFIES);
+	} else
+		rc = 0;
+
+	return rc;
+}
+
+int q6usm_get_us_stream_param(int dir, struct us_client *usc,
+		uint32_t module_id, uint32_t param_id, uint32_t buf_size)
+{
+	int rc = 0;
+	struct usm_stream_cmd_get_param cmd_get_param;
+	struct us_port_data *port = NULL;
+
+	if ((usc == NULL) || (usc->apr == NULL)) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	port = &usc->port[dir];
+
+	q6usm_add_hdr(usc, &cmd_get_param.hdr,
+			(sizeof(cmd_get_param) - APR_HDR_SIZE), true);
+
+	cmd_get_param.hdr.opcode = USM_STREAM_CMD_GET_PARAM;
+	cmd_get_param.buf_size = buf_size;
+	cmd_get_param.buf_addr_msw = upper_32_bits(port->param_phys);
+	cmd_get_param.buf_addr_lsw = lower_32_bits(port->param_phys);
+	cmd_get_param.mem_map_handle =
+			*((uint32_t *)(port->param_buf_mem_handle));
+	cmd_get_param.module_id = module_id;
+	cmd_get_param.param_id = param_id;
+	cmd_get_param.hdr.token = 0;
+
+	rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_get_param);
+
+	if (rc < 0) {
+		pr_err("%s:write op[0x%x];rc[%d]\n",
+			__func__, cmd_get_param.hdr.opcode, rc);
+	}
+
+	rc = wait_event_timeout(usc->cmd_wait,
+				(atomic_read(&usc->cmd_state) == 0),
+				Q6USM_TIMEOUT_JIFFIES);
+	if (!rc) {
+		rc = -ETIME;
+		pr_err("%s: CMD_GET_PARAM: timeout=%d\n",
+			__func__, Q6USM_TIMEOUT_JIFFIES);
+	} else
+		rc = 0;
+
+	return rc;
+}
+
 static int __init q6usm_init(void)
 {
 	pr_debug("%s\n", __func__);
diff --git a/arch/arm/mach-msm/rmt_storage_client.c b/arch/arm/mach-msm/rmt_storage_client.c
deleted file mode 100644
index 550624c..0000000
--- a/arch/arm/mach-msm/rmt_storage_client.c
+++ /dev/null
@@ -1,1839 +0,0 @@
-/* Copyright (c) 2009-2013, Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/miscdevice.h>
-#include <linux/wait.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-#include <linux/wakelock.h>
-#include <linux/rmt_storage_client.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/reboot.h>
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <mach/msm_rpcrouter.h>
-#ifdef CONFIG_MSM_SDIO_SMEM
-#include <mach/sdio_smem.h>
-#endif
-#include <mach/msm_smem.h>
-
-enum {
-	RMT_STORAGE_EVNT_OPEN = 0,
-	RMT_STORAGE_EVNT_CLOSE,
-	RMT_STORAGE_EVNT_WRITE_BLOCK,
-	RMT_STORAGE_EVNT_GET_DEV_ERROR,
-	RMT_STORAGE_EVNT_WRITE_IOVEC,
-	RMT_STORAGE_EVNT_SEND_USER_DATA,
-	RMT_STORAGE_EVNT_READ_IOVEC,
-	RMT_STORAGE_EVNT_ALLOC_RMT_BUF,
-} rmt_storage_event;
-
-struct shared_ramfs_entry {
-	uint32_t client_id;	/* Client id to uniquely identify a client */
-	uint32_t base_addr;	/* Base address of shared RAMFS memory */
-	uint32_t size;		/* Size of the shared RAMFS memory */
-	uint32_t client_sts;	/* This will be initialized to 1 when
-				   remote storage RPC client is ready
-				   to process requests */
-};
-struct shared_ramfs_table {
-	uint32_t magic_id;	/* Identify RAMFS details in SMEM */
-	uint32_t version;	/* Version of shared_ramfs_table */
-	uint32_t entries;	/* Total number of valid entries   */
-	/* List all entries */
-	struct shared_ramfs_entry ramfs_entry[MAX_RAMFS_TBL_ENTRIES];
-};
-
-struct rmt_storage_client_info {
-	unsigned long cids;
-	struct list_head shrd_mem_list; /* List of shared memory entries */
-	int open_excl;
-	atomic_t total_events;
-	wait_queue_head_t event_q;
-	struct list_head event_list;
-	struct list_head client_list;	/* List of remote storage clients */
-	/* Lock to protect lists */
-	spinlock_t lock;
-	/* Wakelock to be acquired when processing requests from modem */
-	struct wake_lock wlock;
-	atomic_t wcount;
-	struct workqueue_struct *workq;
-};
-
-struct rmt_storage_kevent {
-	struct list_head list;
-	struct rmt_storage_event event;
-};
-
-/* Remote storage server on modem */
-struct rmt_storage_srv {
-	uint32_t prog;
-	int sync_token;
-	struct platform_driver plat_drv;
-	struct msm_rpc_client *rpc_client;
-	struct delayed_work restart_work;
-};
-
-/* Remote storage client on modem */
-struct rmt_storage_client {
-	uint32_t handle;
-	uint32_t sid;			/* Storage ID */
-	char path[MAX_PATH_NAME];
-	struct rmt_storage_srv *srv;
-	struct list_head list;
-};
-
-struct rmt_shrd_mem {
-	struct list_head list;
-	struct rmt_shrd_mem_param param;
-	struct shared_ramfs_entry *smem_info;
-	struct rmt_storage_srv *srv;
-};
-
-static struct rmt_storage_srv *rmt_storage_get_srv(uint32_t prog);
-static uint32_t rmt_storage_get_sid(const char *path);
-#ifdef CONFIG_MSM_SDIO_SMEM
-static void rmt_storage_sdio_smem_work(struct work_struct *work);
-#endif
-
-static struct rmt_storage_client_info *rmc;
-struct rmt_storage_srv *rmt_srv;
-
-#ifdef CONFIG_MSM_SDIO_SMEM
-DECLARE_DELAYED_WORK(sdio_smem_work, rmt_storage_sdio_smem_work);
-#endif
-
-#ifdef CONFIG_MSM_SDIO_SMEM
-#define MDM_LOCAL_BUF_SZ	0xC0000
-static struct sdio_smem_client *sdio_smem;
-#endif
-
-#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
-struct rmt_storage_op_stats {
-	unsigned long count;
-	ktime_t start;
-	ktime_t min;
-	ktime_t max;
-	ktime_t total;
-};
-struct rmt_storage_stats {
-       char path[MAX_PATH_NAME];
-       struct rmt_storage_op_stats rd_stats;
-       struct rmt_storage_op_stats wr_stats;
-};
-static struct rmt_storage_stats client_stats[MAX_NUM_CLIENTS];
-static struct dentry *stats_dentry;
-#endif
-
-#define MSM_RMT_STORAGE_APIPROG	0x300000A7
-#define MDM_RMT_STORAGE_APIPROG	0x300100A7
-
-#define RMT_STORAGE_OP_FINISH_PROC              2
-#define RMT_STORAGE_REGISTER_OPEN_PROC          3
-#define RMT_STORAGE_REGISTER_WRITE_IOVEC_PROC   4
-#define RMT_STORAGE_REGISTER_CB_PROC            5
-#define RMT_STORAGE_UN_REGISTER_CB_PROC         6
-#define RMT_STORAGE_FORCE_SYNC_PROC             7
-#define RMT_STORAGE_GET_SYNC_STATUS_PROC        8
-#define RMT_STORAGE_REGISTER_READ_IOVEC_PROC    9
-#define RMT_STORAGE_REGISTER_ALLOC_RMT_BUF_PROC 10
-
-#define RMT_STORAGE_OPEN_CB_TYPE_PROC           1
-#define RMT_STORAGE_WRITE_IOVEC_CB_TYPE_PROC    2
-#define RMT_STORAGE_EVENT_CB_TYPE_PROC          3
-#define RMT_STORAGE_READ_IOVEC_CB_TYPE_PROC     4
-#define RMT_STORAGE_ALLOC_RMT_BUF_CB_TYPE_PROC  5
-
-#define RAMFS_INFO_MAGICNUMBER		0x654D4D43
-#define RAMFS_INFO_VERSION		0x00000001
-#define RAMFS_DEFAULT			0xFFFFFFFF
-
-/* MSM EFS*/
-#define RAMFS_MODEMSTORAGE_ID		0x4D454653
-#define RAMFS_SHARED_EFS_RAM_BASE	0x46100000
-#define RAMFS_SHARED_EFS_RAM_SIZE	(3 * 1024 * 1024)
-
-/* MDM EFS*/
-#define RAMFS_MDM_STORAGE_ID		0x4D4583A1
-/* SSD */
-#define RAMFS_SSD_STORAGE_ID		0x00535344
-#define RAMFS_SHARED_SSD_RAM_BASE	0x42E00000
-#define RAMFS_SHARED_SSD_RAM_SIZE	0x2000
-
-static struct rmt_storage_client *rmt_storage_get_client(uint32_t handle)
-{
-	struct rmt_storage_client *rs_client;
-	list_for_each_entry(rs_client, &rmc->client_list, list)
-		if (rs_client->handle == handle)
-			return rs_client;
-	return NULL;
-}
-
-static struct rmt_storage_client *
-rmt_storage_get_client_by_path(const char *path)
-{
-	struct rmt_storage_client *rs_client;
-	list_for_each_entry(rs_client, &rmc->client_list, list)
-		if (!strncmp(path, rs_client->path, MAX_PATH_NAME))
-			return rs_client;
-	return NULL;
-}
-
-static struct rmt_shrd_mem_param *rmt_storage_get_shrd_mem(uint32_t sid)
-{
-	struct rmt_shrd_mem *shrd_mem;
-	struct rmt_shrd_mem_param *shrd_mem_param = NULL;
-
-	spin_lock(&rmc->lock);
-	list_for_each_entry(shrd_mem, &rmc->shrd_mem_list, list)
-		if (shrd_mem->param.sid == sid)
-			shrd_mem_param = &shrd_mem->param;
-	spin_unlock(&rmc->lock);
-
-	return shrd_mem_param;
-}
-
-static int rmt_storage_add_shrd_mem(uint32_t sid, uint32_t start,
-				    uint32_t size, void *base,
-				    struct shared_ramfs_entry *smem_info,
-				    struct rmt_storage_srv *srv)
-{
-	struct rmt_shrd_mem *shrd_mem;
-
-	shrd_mem = kzalloc(sizeof(struct rmt_shrd_mem), GFP_KERNEL);
-	if (!shrd_mem)
-		return -ENOMEM;
-	shrd_mem->param.sid = sid;
-	shrd_mem->param.start = start;
-	shrd_mem->param.size = size;
-	shrd_mem->param.base = base;
-	shrd_mem->smem_info = smem_info;
-	shrd_mem->srv = srv;
-
-	spin_lock(&rmc->lock);
-	list_add(&shrd_mem->list, &rmc->shrd_mem_list);
-	spin_unlock(&rmc->lock);
-	return 0;
-}
-
-static struct msm_rpc_client *rmt_storage_get_rpc_client(uint32_t handle)
-{
-	struct rmt_storage_client *rs_client;
-
-	rs_client = rmt_storage_get_client(handle);
-	if (!rs_client)
-		return NULL;
-	return rs_client->srv->rpc_client;
-}
-
-static int rmt_storage_validate_iovec(uint32_t handle,
-				      struct rmt_storage_iovec_desc *xfer)
-{
-	struct rmt_storage_client *rs_client;
-	struct rmt_shrd_mem_param *shrd_mem;
-
-	rs_client = rmt_storage_get_client(handle);
-	if (!rs_client)
-		return -EINVAL;
-	shrd_mem = rmt_storage_get_shrd_mem(rs_client->sid);
-	if (!shrd_mem)
-		return -EINVAL;
-
-	if ((xfer->data_phy_addr < shrd_mem->start) ||
-	    ((xfer->data_phy_addr + RAMFS_BLOCK_SIZE * xfer->num_sector) >
-	     (shrd_mem->start + shrd_mem->size)))
-		return -EINVAL;
-	return 0;
-}
-
-static int rmt_storage_send_sts_arg(struct msm_rpc_client *client,
-				struct msm_rpc_xdr *xdr, void *data)
-{
-	struct rmt_storage_send_sts *args = data;
-
-	xdr_send_uint32(xdr, &args->handle);
-	xdr_send_uint32(xdr, &args->err_code);
-	xdr_send_uint32(xdr, &args->data);
-	return 0;
-}
-
-static void put_event(struct rmt_storage_client_info *rmc,
-			struct rmt_storage_kevent *kevent)
-{
-	spin_lock(&rmc->lock);
-	list_add_tail(&kevent->list, &rmc->event_list);
-	spin_unlock(&rmc->lock);
-}
-
-static struct rmt_storage_kevent *get_event(struct rmt_storage_client_info *rmc)
-{
-	struct rmt_storage_kevent *kevent = NULL;
-
-	spin_lock(&rmc->lock);
-	if (!list_empty(&rmc->event_list)) {
-		kevent = list_first_entry(&rmc->event_list,
-			struct rmt_storage_kevent, list);
-		list_del(&kevent->list);
-	}
-	spin_unlock(&rmc->lock);
-	return kevent;
-}
-
-static int rmt_storage_event_open_cb(struct rmt_storage_event *event_args,
-		struct msm_rpc_xdr *xdr)
-{
-	uint32_t cid, len, event_type;
-	char *path;
-	int ret;
-	struct rmt_storage_srv *srv;
-	struct rmt_storage_client *rs_client = NULL;
-#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
-	struct rmt_storage_stats *stats;
-#endif
-
-	srv = rmt_storage_get_srv(event_args->usr_data);
-	if (!srv)
-		return -EINVAL;
-
-	xdr_recv_uint32(xdr, &event_type);
-	if (event_type != RMT_STORAGE_EVNT_OPEN)
-		return -1;
-
-	pr_info("%s: open callback received\n", __func__);
-
-	ret = xdr_recv_bytes(xdr, (void **)&path, &len);
-	if (ret || !path) {
-		pr_err("%s: Invalid path\n", __func__);
-		if (!ret)
-			ret = -1;
-		goto free_rs_client;
-	}
-
-	rs_client = rmt_storage_get_client_by_path(path);
-	if (rs_client) {
-		pr_debug("%s: Handle %d found for %s\n",
-			__func__, rs_client->handle, path);
-		event_args->id = RMT_STORAGE_NOOP;
-		cid = rs_client->handle;
-		goto end_open_cb;
-	}
-
-	rs_client = kzalloc(sizeof(struct rmt_storage_client), GFP_KERNEL);
-	if (!rs_client) {
-		pr_err("%s: Error allocating rmt storage client\n", __func__);
-		ret = -ENOMEM;
-		goto free_path;
-	}
-
-	memcpy(event_args->path, path, len);
-	rs_client->sid = rmt_storage_get_sid(event_args->path);
-	if (!rs_client->sid) {
-		pr_err("%s: No storage id found for %s\n", __func__,
-		       event_args->path);
-		ret = -EINVAL;
-		goto free_path;
-	}
-	strncpy(rs_client->path, event_args->path, MAX_PATH_NAME);
-
-	cid = find_first_zero_bit(&rmc->cids, sizeof(rmc->cids) * 8);
-	if (cid > MAX_NUM_CLIENTS) {
-		pr_err("%s: Max clients are reached\n", __func__);
-		cid = 0;
-		return cid;
-	}
-	__set_bit(cid, &rmc->cids);
-	pr_info("open partition %s handle=%d\n", event_args->path, cid);
-
-#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
-	stats = &client_stats[cid - 1];
-	memcpy(stats->path, event_args->path, len);
-	memset(stats->rd_stats, 0, sizeof(struct rmt_storage_op_stats));
-	memset(stats->wr_stats, 0, sizeof(struct rmt_storage_op_stats));
-	stats->rd_stats.min.tv64 = KTIME_MAX;
-	stats->wr_stats.min.tv64 = KTIME_MAX;
-#endif
-	event_args->id = RMT_STORAGE_OPEN;
-	event_args->sid = rs_client->sid;
-	event_args->handle = cid;
-
-	rs_client->handle = event_args->handle;
-	rs_client->srv = srv;
-	INIT_LIST_HEAD(&rs_client->list);
-	spin_lock(&rmc->lock);
-	list_add_tail(&rs_client->list, &rmc->client_list);
-	spin_unlock(&rmc->lock);
-
-end_open_cb:
-	kfree(path);
-	return cid;
-
-free_path:
-	kfree(path);
-free_rs_client:
-	kfree(rs_client);
-	return ret;
-}
-
-struct rmt_storage_close_args {
-	uint32_t handle;
-};
-
-struct rmt_storage_rw_block_args {
-	uint32_t handle;
-	uint32_t data_phy_addr;
-	uint32_t sector_addr;
-	uint32_t num_sector;
-};
-
-struct rmt_storage_get_err_args {
-	uint32_t handle;
-};
-
-struct rmt_storage_user_data_args {
-	uint32_t handle;
-	uint32_t data;
-};
-
-struct rmt_storage_event_params {
-	uint32_t type;
-	union {
-		struct rmt_storage_close_args close;
-		struct rmt_storage_rw_block_args block;
-		struct rmt_storage_get_err_args get_err;
-		struct rmt_storage_user_data_args user_data;
-	} params;
-};
-
-static int rmt_storage_parse_params(struct msm_rpc_xdr *xdr,
-		struct rmt_storage_event_params *event)
-{
-	xdr_recv_uint32(xdr, &event->type);
-
-	switch (event->type) {
-	case RMT_STORAGE_EVNT_CLOSE: {
-		struct rmt_storage_close_args *args;
-		args = &event->params.close;
-
-		xdr_recv_uint32(xdr, &args->handle);
-		break;
-	}
-
-	case RMT_STORAGE_EVNT_WRITE_BLOCK: {
-		struct rmt_storage_rw_block_args *args;
-		args = &event->params.block;
-
-		xdr_recv_uint32(xdr, &args->handle);
-		xdr_recv_uint32(xdr, &args->data_phy_addr);
-		xdr_recv_uint32(xdr, &args->sector_addr);
-		xdr_recv_uint32(xdr, &args->num_sector);
-		break;
-	}
-
-	case RMT_STORAGE_EVNT_GET_DEV_ERROR: {
-		struct rmt_storage_get_err_args *args;
-		args = &event->params.get_err;
-
-		xdr_recv_uint32(xdr, &args->handle);
-		break;
-	}
-
-	case RMT_STORAGE_EVNT_SEND_USER_DATA: {
-		struct rmt_storage_user_data_args *args;
-		args = &event->params.user_data;
-
-		xdr_recv_uint32(xdr, &args->handle);
-		xdr_recv_uint32(xdr, &args->data);
-		break;
-	}
-
-	default:
-		pr_err("%s: unknown event %d\n", __func__, event->type);
-		return -1;
-	}
-	return 0;
-}
-
-static int rmt_storage_event_close_cb(struct rmt_storage_event *event_args,
-		struct msm_rpc_xdr *xdr)
-{
-	struct rmt_storage_event_params *event;
-	struct rmt_storage_close_args *close;
-	struct rmt_storage_client *rs_client;
-	uint32_t event_type;
-	int ret;
-
-	xdr_recv_uint32(xdr, &event_type);
-	if (event_type != RMT_STORAGE_EVNT_CLOSE)
-		return -1;
-
-	pr_debug("%s: close callback received\n", __func__);
-	ret = xdr_recv_pointer(xdr, (void **)&event,
-			sizeof(struct rmt_storage_event_params),
-			rmt_storage_parse_params);
-
-	if (ret || !event)
-		return -1;
-
-	close = &event->params.close;
-	event_args->handle = close->handle;
-	event_args->id = RMT_STORAGE_CLOSE;
-	__clear_bit(event_args->handle, &rmc->cids);
-	rs_client = rmt_storage_get_client(event_args->handle);
-	if (rs_client) {
-		list_del(&rs_client->list);
-		kfree(rs_client);
-	}
-	kfree(event);
-	return RMT_STORAGE_NO_ERROR;
-}
-
-static int rmt_storage_event_write_block_cb(
-		struct rmt_storage_event *event_args,
-		struct msm_rpc_xdr *xdr)
-{
-	struct rmt_storage_event_params *event;
-	struct rmt_storage_rw_block_args *write_block;
-	struct rmt_storage_iovec_desc *xfer;
-	uint32_t event_type;
-	int ret;
-
-	xdr_recv_uint32(xdr, &event_type);
-	if (event_type != RMT_STORAGE_EVNT_WRITE_BLOCK)
-		return -1;
-
-	pr_debug("%s: write block callback received\n", __func__);
-	ret = xdr_recv_pointer(xdr, (void **)&event,
-			sizeof(struct rmt_storage_event_params),
-			rmt_storage_parse_params);
-
-	if (ret || !event)
-		return -1;
-
-	write_block = &event->params.block;
-	event_args->handle = write_block->handle;
-	xfer = &event_args->xfer_desc[0];
-	xfer->sector_addr = write_block->sector_addr;
-	xfer->data_phy_addr = write_block->data_phy_addr;
-	xfer->num_sector = write_block->num_sector;
-
-	ret = rmt_storage_validate_iovec(event_args->handle, xfer);
-	if (ret)
-		return -1;
-	event_args->xfer_cnt = 1;
-	event_args->id = RMT_STORAGE_WRITE;
-
-	if (atomic_inc_return(&rmc->wcount) == 1)
-		wake_lock(&rmc->wlock);
-
-	pr_debug("sec_addr = %u, data_addr = %x, num_sec = %d\n\n",
-		xfer->sector_addr, xfer->data_phy_addr,
-		xfer->num_sector);
-
-	kfree(event);
-	return RMT_STORAGE_NO_ERROR;
-}
-
-static int rmt_storage_event_get_err_cb(struct rmt_storage_event *event_args,
-		struct msm_rpc_xdr *xdr)
-{
-	struct rmt_storage_event_params *event;
-	struct rmt_storage_get_err_args *get_err;
-	uint32_t event_type;
-	int ret;
-
-	xdr_recv_uint32(xdr, &event_type);
-	if (event_type != RMT_STORAGE_EVNT_GET_DEV_ERROR)
-		return -1;
-
-	pr_debug("%s: get err callback received\n", __func__);
-	ret = xdr_recv_pointer(xdr, (void **)&event,
-			sizeof(struct rmt_storage_event_params),
-			rmt_storage_parse_params);
-
-	if (ret || !event)
-		return -1;
-
-	get_err = &event->params.get_err;
-	event_args->handle = get_err->handle;
-	kfree(event);
-	/* Not implemented */
-	return -1;
-
-}
-
-static int rmt_storage_event_user_data_cb(struct rmt_storage_event *event_args,
-		struct msm_rpc_xdr *xdr)
-{
-	struct rmt_storage_event_params *event;
-	struct rmt_storage_user_data_args *user_data;
-	uint32_t event_type;
-	int ret;
-
-	xdr_recv_uint32(xdr, &event_type);
-	if (event_type != RMT_STORAGE_EVNT_SEND_USER_DATA)
-		return -1;
-
-	pr_info("%s: send user data callback received\n", __func__);
-	ret = xdr_recv_pointer(xdr, (void **)&event,
-			sizeof(struct rmt_storage_event_params),
-			rmt_storage_parse_params);
-
-	if (ret || !event)
-		return -1;
-
-	user_data = &event->params.user_data;
-	event_args->handle = user_data->handle;
-	event_args->usr_data = user_data->data;
-	event_args->id = RMT_STORAGE_SEND_USER_DATA;
-
-	kfree(event);
-	return RMT_STORAGE_NO_ERROR;
-}
-
-static int rmt_storage_event_write_iovec_cb(
-		struct rmt_storage_event *event_args,
-		struct msm_rpc_xdr *xdr)
-{
-	struct rmt_storage_iovec_desc *xfer;
-	uint32_t i, ent, event_type;
-#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
-	struct rmt_storage_stats *stats;
-#endif
-
-	xdr_recv_uint32(xdr, &event_type);
-	if (event_type != RMT_STORAGE_EVNT_WRITE_IOVEC)
-		return -EINVAL;
-
-	pr_info("%s: write iovec callback received\n", __func__);
-	xdr_recv_uint32(xdr, &event_args->handle);
-	xdr_recv_uint32(xdr, &ent);
-	pr_debug("handle = %d\n", event_args->handle);
-
-#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
-	stats = &client_stats[event_args->handle - 1];
-	stats->wr_stats.start = ktime_get();
-#endif
-	for (i = 0; i < ent; i++) {
-		xfer = &event_args->xfer_desc[i];
-		xdr_recv_uint32(xdr, &xfer->sector_addr);
-		xdr_recv_uint32(xdr, &xfer->data_phy_addr);
-		xdr_recv_uint32(xdr, &xfer->num_sector);
-
-		if (rmt_storage_validate_iovec(event_args->handle, xfer))
-			return -EINVAL;
-
-		pr_debug("sec_addr = %u, data_addr = %x, num_sec = %d\n",
-			xfer->sector_addr, xfer->data_phy_addr,
-			xfer->num_sector);
-	}
-	xdr_recv_uint32(xdr, &event_args->xfer_cnt);
-	event_args->id = RMT_STORAGE_WRITE;
-	if (atomic_inc_return(&rmc->wcount) == 1)
-		wake_lock(&rmc->wlock);
-
-	pr_debug("iovec transfer count = %d\n\n", event_args->xfer_cnt);
-	return RMT_STORAGE_NO_ERROR;
-}
-
-static int rmt_storage_event_read_iovec_cb(
-		struct rmt_storage_event *event_args,
-		struct msm_rpc_xdr *xdr)
-{
-	struct rmt_storage_iovec_desc *xfer;
-	uint32_t i, ent, event_type;
-#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
-	struct rmt_storage_stats *stats;
-#endif
-
-	xdr_recv_uint32(xdr, &event_type);
-	if (event_type != RMT_STORAGE_EVNT_READ_IOVEC)
-		return -EINVAL;
-
-	pr_info("%s: read iovec callback received\n", __func__);
-	xdr_recv_uint32(xdr, &event_args->handle);
-	xdr_recv_uint32(xdr, &ent);
-	pr_debug("handle = %d\n", event_args->handle);
-
-#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
-	stats = &client_stats[event_args->handle - 1];
-	stats->rd_stats.start = ktime_get();
-#endif
-	for (i = 0; i < ent; i++) {
-		xfer = &event_args->xfer_desc[i];
-		xdr_recv_uint32(xdr, &xfer->sector_addr);
-		xdr_recv_uint32(xdr, &xfer->data_phy_addr);
-		xdr_recv_uint32(xdr, &xfer->num_sector);
-
-		if (rmt_storage_validate_iovec(event_args->handle, xfer))
-			return -EINVAL;
-
-		pr_debug("sec_addr = %u, data_addr = %x, num_sec = %d\n",
-			xfer->sector_addr, xfer->data_phy_addr,
-			xfer->num_sector);
-	}
-	xdr_recv_uint32(xdr, &event_args->xfer_cnt);
-	event_args->id = RMT_STORAGE_READ;
-	if (atomic_inc_return(&rmc->wcount) == 1)
-		wake_lock(&rmc->wlock);
-
-	pr_debug("iovec transfer count = %d\n\n", event_args->xfer_cnt);
-	return RMT_STORAGE_NO_ERROR;
-}
-
-#ifdef CONFIG_MSM_SDIO_SMEM
-static int sdio_smem_cb(int event)
-{
-	pr_debug("%s: Received event %d\n", __func__, event);
-
-	switch (event) {
-	case SDIO_SMEM_EVENT_READ_DONE:
-		pr_debug("Read done\n");
-		break;
-	case SDIO_SMEM_EVENT_READ_ERR:
-		pr_err("Read overflow\n");
-		return -EIO;
-	default:
-		pr_err("Unhandled event\n");
-	}
-	return 0;
-}
-
-static int rmt_storage_sdio_smem_probe(struct platform_device *pdev)
-{
-	int ret = 0;
-	struct rmt_shrd_mem_param *shrd_mem;
-
-	sdio_smem = container_of(pdev, struct sdio_smem_client, plat_dev);
-
-	/* SDIO SMEM is supported only for MDM */
-	shrd_mem = rmt_storage_get_shrd_mem(RAMFS_MDM_STORAGE_ID);
-	if (!shrd_mem) {
-		pr_err("%s: No shared mem entry for sid=0x%08x\n",
-		       __func__, (uint32_t)RAMFS_MDM_STORAGE_ID);
-		return -ENOMEM;
-	}
-	sdio_smem->buf = __va(shrd_mem->start);
-	sdio_smem->size = shrd_mem->size;
-	sdio_smem->cb_func = sdio_smem_cb;
-	ret = sdio_smem_register_client();
-	if (ret)
-		pr_info("%s: Error (%d) registering sdio_smem client\n",
-			__func__, ret);
-	return ret;
-}
-
-static int rmt_storage_sdio_smem_remove(struct platform_device *pdev)
-{
-	sdio_smem_unregister_client();
-	queue_delayed_work(rmc->workq, &sdio_smem_work, 0);
-	return 0;
-}
-
-static int sdio_smem_drv_registered;
-static struct platform_driver sdio_smem_drv = {
-	.probe		= rmt_storage_sdio_smem_probe,
-	.remove		= rmt_storage_sdio_smem_remove,
-	.driver		= {
-		.name	= "SDIO_SMEM_CLIENT",
-		.owner	= THIS_MODULE,
-	},
-};
-
-static void rmt_storage_sdio_smem_work(struct work_struct *work)
-{
-	platform_driver_unregister(&sdio_smem_drv);
-	sdio_smem_drv_registered = 0;
-}
-#endif
-
-static int rmt_storage_event_alloc_rmt_buf_cb(
-		struct rmt_storage_event *event_args,
-		struct msm_rpc_xdr *xdr)
-{
-	struct rmt_storage_client *rs_client;
-	struct rmt_shrd_mem_param *shrd_mem;
-	uint32_t event_type, handle, size;
-#ifdef CONFIG_MSM_SDIO_SMEM
-	int ret;
-#endif
-	xdr_recv_uint32(xdr, &event_type);
-	if (event_type != RMT_STORAGE_EVNT_ALLOC_RMT_BUF)
-		return -EINVAL;
-
-	pr_info("%s: Alloc rmt buf callback received\n", __func__);
-	xdr_recv_uint32(xdr, &handle);
-	xdr_recv_uint32(xdr, &size);
-
-	pr_debug("%s: handle=0x%x size=0x%x\n", __func__, handle, size);
-
-	rs_client = rmt_storage_get_client(handle);
-	if (!rs_client) {
-		pr_err("%s: Unable to find client for handle=%d\n",
-		       __func__, handle);
-		return -EINVAL;
-	}
-
-	rs_client->sid = rmt_storage_get_sid(rs_client->path);
-	if (!rs_client->sid) {
-		pr_err("%s: No storage id found for %s\n",
-		       __func__, rs_client->path);
-		return -EINVAL;
-	}
-
-	shrd_mem = rmt_storage_get_shrd_mem(rs_client->sid);
-	if (!shrd_mem) {
-		pr_err("%s: No shared memory entry found\n",
-		       __func__);
-		return -ENOMEM;
-	}
-	if (shrd_mem->size < size) {
-		pr_err("%s: Size mismatch for handle=%d\n",
-		       __func__, rs_client->handle);
-		return -EINVAL;
-	}
-	pr_debug("%s: %d bytes at phys=0x%x for handle=%d found\n",
-		__func__, size, shrd_mem->start, rs_client->handle);
-
-#ifdef CONFIG_MSM_SDIO_SMEM
-	if (rs_client->srv->prog == MDM_RMT_STORAGE_APIPROG) {
-		if (!sdio_smem_drv_registered) {
-			ret = platform_driver_register(&sdio_smem_drv);
-			if (!ret)
-				sdio_smem_drv_registered = 1;
-			else
-				pr_err("%s: Cant register sdio smem client\n",
-				       __func__);
-		}
-	}
-#endif
-	event_args->id = RMT_STORAGE_NOOP;
-	return (int)shrd_mem->start;
-}
-
-static int handle_rmt_storage_call(struct msm_rpc_client *client,
-				struct rpc_request_hdr *req,
-				struct msm_rpc_xdr *xdr)
-{
-	int rc;
-	uint32_t result = RMT_STORAGE_NO_ERROR;
-	uint32_t rpc_status = RPC_ACCEPTSTAT_SUCCESS;
-	struct rmt_storage_event *event_args;
-	struct rmt_storage_kevent *kevent;
-
-	kevent = kzalloc(sizeof(struct rmt_storage_kevent), GFP_KERNEL);
-	if (!kevent) {
-		rpc_status = RPC_ACCEPTSTAT_SYSTEM_ERR;
-		goto out;
-	}
-	event_args = &kevent->event;
-
-	switch (req->procedure) {
-	case RMT_STORAGE_OPEN_CB_TYPE_PROC:
-		/* client created in cb needs a ref. to its server */
-		event_args->usr_data = client->prog;
-		/* fall through */
-
-	case RMT_STORAGE_WRITE_IOVEC_CB_TYPE_PROC:
-		/* fall through */
-
-	case RMT_STORAGE_READ_IOVEC_CB_TYPE_PROC:
-		/* fall through */
-
-	case RMT_STORAGE_ALLOC_RMT_BUF_CB_TYPE_PROC:
-		/* fall through */
-
-	case RMT_STORAGE_EVENT_CB_TYPE_PROC: {
-		uint32_t cb_id;
-		int (*cb_func)(struct rmt_storage_event *event_args,
-				struct msm_rpc_xdr *xdr);
-
-		xdr_recv_uint32(xdr, &cb_id);
-		cb_func = msm_rpc_get_cb_func(client, cb_id);
-
-		if (!cb_func) {
-			rpc_status = RPC_ACCEPTSTAT_GARBAGE_ARGS;
-			kfree(kevent);
-			goto out;
-		}
-
-		rc = cb_func(event_args, xdr);
-		if (IS_ERR_VALUE(rc)) {
-			pr_err("%s: Invalid parameters received\n", __func__);
-			if (req->procedure == RMT_STORAGE_OPEN_CB_TYPE_PROC)
-				result = 0; /* bad handle to signify err */
-			else
-				result = RMT_STORAGE_ERROR_PARAM;
-			kfree(kevent);
-			goto out;
-		}
-		result = (uint32_t) rc;
-		break;
-	}
-
-	default:
-		kfree(kevent);
-		pr_err("%s: unknown procedure %d\n", __func__, req->procedure);
-		rpc_status = RPC_ACCEPTSTAT_PROC_UNAVAIL;
-		goto out;
-	}
-
-	if (kevent->event.id != RMT_STORAGE_NOOP) {
-		put_event(rmc, kevent);
-		atomic_inc(&rmc->total_events);
-		wake_up(&rmc->event_q);
-	} else
-		kfree(kevent);
-
-out:
-	pr_debug("%s: Sending result=0x%x\n", __func__, result);
-	xdr_start_accepted_reply(xdr, rpc_status);
-	xdr_send_uint32(xdr, &result);
-	rc = xdr_send_msg(xdr);
-	if (rc)
-		pr_err("%s: send accepted reply failed: %d\n", __func__, rc);
-
-	return rc;
-}
-
-static int rmt_storage_open(struct inode *ip, struct file *fp)
-{
-	int ret = 0;
-
-	spin_lock(&rmc->lock);
-	if (!rmc->open_excl)
-		rmc->open_excl = 1;
-	else
-		ret = -EBUSY;
-	spin_unlock(&rmc->lock);
-
-	return ret;
-}
-
-static int rmt_storage_release(struct inode *ip, struct file *fp)
-{
-	spin_lock(&rmc->lock);
-	rmc->open_excl = 0;
-	spin_unlock(&rmc->lock);
-
-	return 0;
-}
-
-static long rmt_storage_ioctl(struct file *fp, unsigned int cmd,
-			    unsigned long arg)
-{
-	int ret = 0;
-	struct rmt_storage_kevent *kevent;
-	struct rmt_storage_send_sts status;
-	static struct msm_rpc_client *rpc_client;
-	struct rmt_shrd_mem_param usr_shrd_mem, *shrd_mem;
-
-#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
-	struct rmt_storage_stats *stats;
-	struct rmt_storage_op_stats *op_stats;
-	ktime_t curr_stat;
-#endif
-
-	switch (cmd) {
-
-	case RMT_STORAGE_SHRD_MEM_PARAM:
-		pr_debug("%s: get shared memory parameters ioctl\n", __func__);
-		if (copy_from_user(&usr_shrd_mem, (void __user *)arg,
-				sizeof(struct rmt_shrd_mem_param))) {
-			pr_err("%s: copy from user failed\n\n", __func__);
-			ret = -EFAULT;
-			break;
-		}
-
-		shrd_mem = rmt_storage_get_shrd_mem(usr_shrd_mem.sid);
-		if (!shrd_mem) {
-			pr_err("%s: invalid sid (0x%x)\n", __func__,
-			       usr_shrd_mem.sid);
-			ret = -EFAULT;
-			break;
-		}
-
-		if (copy_to_user((void __user *)arg, shrd_mem,
-			sizeof(struct rmt_shrd_mem_param))) {
-			pr_err("%s: copy to user failed\n\n", __func__);
-			ret = -EFAULT;
-		}
-		break;
-
-	case RMT_STORAGE_WAIT_FOR_REQ:
-		pr_debug("%s: wait for request ioctl\n", __func__);
-		if (atomic_read(&rmc->total_events) == 0) {
-			ret = wait_event_interruptible(rmc->event_q,
-				atomic_read(&rmc->total_events) != 0);
-		}
-		if (ret < 0)
-			break;
-		atomic_dec(&rmc->total_events);
-
-		kevent = get_event(rmc);
-		WARN_ON(kevent == NULL);
-		if (copy_to_user((void __user *)arg, &kevent->event,
-			sizeof(struct rmt_storage_event))) {
-			pr_err("%s: copy to user failed\n\n", __func__);
-			ret = -EFAULT;
-		}
-		kfree(kevent);
-		break;
-
-	case RMT_STORAGE_SEND_STATUS:
-		pr_info("%s: send status ioctl\n", __func__);
-		if (copy_from_user(&status, (void __user *)arg,
-				sizeof(struct rmt_storage_send_sts))) {
-			pr_err("%s: copy from user failed\n\n", __func__);
-			ret = -EFAULT;
-			if (atomic_dec_return(&rmc->wcount) == 0)
-				wake_unlock(&rmc->wlock);
-			break;
-		}
-#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
-		stats = &client_stats[status.handle - 1];
-		if (status.xfer_dir == RMT_STORAGE_WRITE)
-			op_stats = &stats->wr_stats;
-		else
-			op_stats = &stats->rd_stats;
-		curr_stat = ktime_sub(ktime_get(), op_stats->start);
-		op_stats->total = ktime_add(op_stats->total, curr_stat);
-		op_stats->count++;
-		if (curr_stat.tv64 < stats->min.tv64)
-			op_stats->min = curr_stat;
-		if (curr_stat.tv64 > stats->max.tv64)
-			op_stats->max = curr_stat;
-#endif
-		pr_debug("%s: \thandle=%d err_code=%d data=0x%x\n", __func__,
-			status.handle, status.err_code, status.data);
-		rpc_client = rmt_storage_get_rpc_client(status.handle);
-		if (rpc_client)
-			ret = msm_rpc_client_req2(rpc_client,
-				RMT_STORAGE_OP_FINISH_PROC,
-				rmt_storage_send_sts_arg,
-				&status, NULL, NULL, -1);
-		else
-			ret = -EINVAL;
-		if (ret < 0)
-			pr_err("%s: send status failed with ret val = %d\n",
-				__func__, ret);
-		if (atomic_dec_return(&rmc->wcount) == 0)
-			wake_unlock(&rmc->wlock);
-		break;
-
-	default:
-		ret = -EINVAL;
-		break;
-	}
-
-	return ret;
-}
-
-struct rmt_storage_sync_recv_arg {
-	int data;
-};
-
-static int rmt_storage_receive_sync_arg(struct msm_rpc_client *client,
-				struct msm_rpc_xdr *xdr, void *data)
-{
-	struct rmt_storage_sync_recv_arg *args = data;
-	struct rmt_storage_srv *srv;
-
-	srv = rmt_storage_get_srv(client->prog);
-	if (!srv)
-		return -EINVAL;
-	xdr_recv_int32(xdr, &args->data);
-	srv->sync_token = args->data;
-	return 0;
-}
-
-static int rmt_storage_force_sync(struct msm_rpc_client *client)
-{
-	struct rmt_storage_sync_recv_arg args;
-	int rc;
-	rc = msm_rpc_client_req2(client,
-			RMT_STORAGE_FORCE_SYNC_PROC, NULL, NULL,
-			rmt_storage_receive_sync_arg, &args, -1);
-	if (rc) {
-		pr_err("%s: force sync RPC req failed: %d\n", __func__, rc);
-		return rc;
-	}
-	return 0;
-}
-
-struct rmt_storage_sync_sts_arg {
-	int token;
-};
-
-static int rmt_storage_send_sync_sts_arg(struct msm_rpc_client *client,
-				struct msm_rpc_xdr *xdr, void *data)
-{
-	struct rmt_storage_sync_sts_arg *req = data;
-
-	xdr_send_int32(xdr, &req->token);
-	return 0;
-}
-
-static int rmt_storage_receive_sync_sts_arg(struct msm_rpc_client *client,
-				struct msm_rpc_xdr *xdr, void *data)
-{
-	struct rmt_storage_sync_recv_arg *args = data;
-
-	xdr_recv_int32(xdr, &args->data);
-	return 0;
-}
-
-static int rmt_storage_get_sync_status(struct msm_rpc_client *client)
-{
-	struct rmt_storage_sync_recv_arg recv_args;
-	struct rmt_storage_sync_sts_arg send_args;
-	struct rmt_storage_srv *srv;
-	int rc;
-
-	srv = rmt_storage_get_srv(client->prog);
-	if (!srv)
-		return -EINVAL;
-
-	if (srv->sync_token < 0)
-		return -EINVAL;
-
-	send_args.token = srv->sync_token;
-	rc = msm_rpc_client_req2(client,
-			RMT_STORAGE_GET_SYNC_STATUS_PROC,
-			rmt_storage_send_sync_sts_arg, &send_args,
-			rmt_storage_receive_sync_sts_arg, &recv_args, -1);
-	if (rc) {
-		pr_err("%s: sync status RPC req failed: %d\n", __func__, rc);
-		return rc;
-	}
-	return recv_args.data;
-}
-
-static int rmt_storage_mmap(struct file *file, struct vm_area_struct *vma)
-{
-	unsigned long vsize = vma->vm_end - vma->vm_start;
-	int ret = -EINVAL;
-
-	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
-	ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-				 vsize, vma->vm_page_prot);
-	if (ret < 0)
-		pr_err("%s: failed with return val %d\n", __func__, ret);
-	return ret;
-}
-
-struct rmt_storage_reg_cb_args {
-	uint32_t event;
-	uint32_t cb_id;
-};
-
-static int rmt_storage_arg_cb(struct msm_rpc_client *client,
-		struct msm_rpc_xdr *xdr, void *data)
-{
-	struct rmt_storage_reg_cb_args *args = data;
-
-	xdr_send_uint32(xdr, &args->event);
-	xdr_send_uint32(xdr, &args->cb_id);
-	return 0;
-}
-
-static int rmt_storage_reg_cb(struct msm_rpc_client *client,
-			      uint32_t proc, uint32_t event, void *callback)
-{
-	struct rmt_storage_reg_cb_args args;
-	int rc, cb_id;
-	int retries = 10;
-
-	cb_id = msm_rpc_add_cb_func(client, callback);
-	if ((cb_id < 0) && (cb_id != MSM_RPC_CLIENT_NULL_CB_ID))
-		return cb_id;
-
-	args.event = event;
-	args.cb_id = cb_id;
-
-	while (retries) {
-		rc = msm_rpc_client_req2(client, proc, rmt_storage_arg_cb,
-					 &args, NULL, NULL, -1);
-		if (rc != -ETIMEDOUT)
-			break;
-		retries--;
-		udelay(1000);
-	}
-	if (rc)
-		pr_err("%s: Failed to register callback for event %d\n",
-				__func__, event);
-	return rc;
-}
-
-#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
-static int rmt_storage_stats_open(struct inode *inode, struct file *file)
-{
-	return 0;
-}
-
-static ssize_t rmt_storage_stats_read(struct file *file, char __user *ubuf,
-		size_t count, loff_t *ppos)
-{
-	uint32_t tot_clients;
-	char buf[512];
-	int max, j, i = 0;
-	struct rmt_storage_stats *stats;
-
-	max = sizeof(buf) - 1;
-	tot_clients = find_first_zero_bit(&rmc->cids, sizeof(rmc->cids)) - 1;
-
-	for (j = 0; j < tot_clients; j++) {
-		stats = &client_stats[j];
-		i += scnprintf(buf + i, max - i, "stats for partition %s:\n",
-				stats->path);
-		i += scnprintf(buf + i, max - i, "Min read time: %lld us\n",
-				ktime_to_us(stats->rd_stats.min));
-		i += scnprintf(buf + i, max - i, "Max read time: %lld us\n",
-				ktime_to_us(stats->rd_stats.max));
-		i += scnprintf(buf + i, max - i, "Total read time: %lld us\n",
-				ktime_to_us(stats->rd_stats.total));
-		i += scnprintf(buf + i, max - i, "Total read requests: %ld\n",
-				stats->rd_stats.count);
-		if (stats->count)
-			i += scnprintf(buf + i, max - i,
-				"Avg read time: %lld us\n",
-				div_s64(ktime_to_us(stats->total),
-				stats->rd_stats.count));
-
-		i += scnprintf(buf + i, max - i, "Min write time: %lld us\n",
-				ktime_to_us(stats->wr_stats.min));
-		i += scnprintf(buf + i, max - i, "Max write time: %lld us\n",
-				ktime_to_us(stats->wr_stats.max));
-		i += scnprintf(buf + i, max - i, "Total write time: %lld us\n",
-				ktime_to_us(stats->wr_stats.total));
-		i += scnprintf(buf + i, max - i, "Total read requests: %ld\n",
-				stats->wr_stats.count);
-		if (stats->count)
-			i += scnprintf(buf + i, max - i,
-				"Avg write time: %lld us\n",
-				div_s64(ktime_to_us(stats->total),
-				stats->wr_stats.count));
-	}
-	return simple_read_from_buffer(ubuf, count, ppos, buf, i);
-}
-
-static const struct file_operations debug_ops = {
-	.owner = THIS_MODULE,
-	.open = rmt_storage_stats_open,
-	.read = rmt_storage_stats_read,
-};
-#endif
-
-const struct file_operations rmt_storage_fops = {
-	.owner = THIS_MODULE,
-	.open = rmt_storage_open,
-	.unlocked_ioctl	 = rmt_storage_ioctl,
-	.mmap = rmt_storage_mmap,
-	.release = rmt_storage_release,
-};
-
-static struct miscdevice rmt_storage_device = {
-	.minor = MISC_DYNAMIC_MINOR,
-	.name = "rmt_storage",
-	.fops = &rmt_storage_fops,
-};
-
-static int rmt_storage_get_ramfs(struct rmt_storage_srv *srv)
-{
-	struct shared_ramfs_table *ramfs_table;
-	struct shared_ramfs_entry *ramfs_entry;
-	int index, ret;
-
-	if (srv->prog != MSM_RMT_STORAGE_APIPROG)
-		return 0;
-
-	ramfs_table = smem_alloc(SMEM_SEFS_INFO,
-			sizeof(struct shared_ramfs_table));
-
-	if (!ramfs_table) {
-		pr_err("%s: No RAMFS table in SMEM\n", __func__);
-		return -ENOENT;
-	}
-
-	if ((ramfs_table->magic_id != (u32) RAMFS_INFO_MAGICNUMBER) ||
-	    (ramfs_table->version != (u32) RAMFS_INFO_VERSION)) {
-		pr_err("%s: Magic / Version mismatch:, "
-		       "magic_id=%#x, format_version=%#x\n", __func__,
-		       ramfs_table->magic_id, ramfs_table->version);
-		return -ENOENT;
-	}
-
-	for (index = 0; index < ramfs_table->entries; index++) {
-		ramfs_entry = &ramfs_table->ramfs_entry[index];
-		if (!ramfs_entry->client_id ||
-		    ramfs_entry->client_id == (u32) RAMFS_DEFAULT)
-			break;
-
-		pr_info("%s: RAMFS entry: addr = 0x%08x, size = 0x%08x\n",
-			__func__, ramfs_entry->base_addr, ramfs_entry->size);
-
-		ret = rmt_storage_add_shrd_mem(ramfs_entry->client_id,
-					       ramfs_entry->base_addr,
-					       ramfs_entry->size,
-					       NULL,
-					       ramfs_entry,
-					       srv);
-		if (ret) {
-			pr_err("%s: Error (%d) adding shared mem\n",
-			       __func__, ret);
-			return ret;
-		}
-	}
-	return 0;
-}
-
-static ssize_t
-show_force_sync(struct device *dev, struct device_attribute *attr,
-		char *buf)
-{
-	struct platform_device *pdev;
-	struct rpcsvr_platform_device *rpc_pdev;
-	struct rmt_storage_srv *srv;
-
-	pdev = container_of(dev, struct platform_device, dev);
-	rpc_pdev = container_of(pdev, struct rpcsvr_platform_device, base);
-	srv = rmt_storage_get_srv(rpc_pdev->prog);
-	if (!srv) {
-		pr_err("%s: Unable to find prog=0x%x\n", __func__,
-		       rpc_pdev->prog);
-		return -EINVAL;
-	}
-
-	return rmt_storage_force_sync(srv->rpc_client);
-}
-
-/* Returns -EINVAL for invalid sync token and an error value for any failure
- * in RPC call. Upon success, it returns a sync status of 1 (sync done)
- * or 0 (sync still pending).
- */
-static ssize_t
-show_sync_sts(struct device *dev, struct device_attribute *attr, char *buf)
-{
-	struct platform_device *pdev;
-	struct rpcsvr_platform_device *rpc_pdev;
-	struct rmt_storage_srv *srv;
-
-	pdev = container_of(dev, struct platform_device, dev);
-	rpc_pdev = container_of(pdev, struct rpcsvr_platform_device, base);
-	srv = rmt_storage_get_srv(rpc_pdev->prog);
-	if (!srv) {
-		pr_err("%s: Unable to find prog=0x%x\n", __func__,
-		       rpc_pdev->prog);
-		return -EINVAL;
-	}
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-			rmt_storage_get_sync_status(srv->rpc_client));
-}
-
-/*
- * Initiate the remote storage force sync and wait until
- * sync status is done or maximum 4 seconds in the reboot notifier.
- * Usually RMT storage sync is not taking more than 2 seconds
- * for encryption and sync.
- */
-#define MAX_GET_SYNC_STATUS_TRIES 200
-#define RMT_SLEEP_INTERVAL_MS 20
-static int rmt_storage_reboot_call(
-	struct notifier_block *this, unsigned long code, void *cmd)
-{
-	int ret, count = 0;
-
-	/*
-	 * In recovery mode RMT daemon is not available,
-	 * so return from reboot notifier without initiating
-	 * force sync.
-	 */
-	spin_lock(&rmc->lock);
-	if (!rmc->open_excl) {
-		spin_unlock(&rmc->lock);
-		msm_rpc_unregister_client(rmt_srv->rpc_client);
-		return NOTIFY_DONE;
-	}
-
-	spin_unlock(&rmc->lock);
-	switch (code) {
-	case SYS_RESTART:
-	case SYS_HALT:
-	case SYS_POWER_OFF:
-		pr_info("%s: Sending force-sync RPC request\n", __func__);
-		ret = rmt_storage_force_sync(rmt_srv->rpc_client);
-		if (ret)
-			break;
-
-		do {
-			count++;
-			msleep(RMT_SLEEP_INTERVAL_MS);
-			ret = rmt_storage_get_sync_status(rmt_srv->rpc_client);
-		} while (ret != 1 && count < MAX_GET_SYNC_STATUS_TRIES);
-
-		if (ret == 1)
-			pr_info("%s: Final-sync successful\n", __func__);
-		else
-			pr_err("%s: Final-sync failed\n", __func__);
-
-		/*
-		 * Check if any ongoing efs_sync triggered just before force
-		 * sync is pending. If so, wait for 4sec for completing efs_sync
-		 * before unregistring client.
-		 */
-		count = 0;
-		while (count < MAX_GET_SYNC_STATUS_TRIES) {
-			if (atomic_read(&rmc->wcount) == 0) {
-				break;
-			} else {
-				count++;
-				msleep(RMT_SLEEP_INTERVAL_MS);
-			}
-		}
-		if (atomic_read(&rmc->wcount))
-			pr_err("%s: Efs_sync still incomplete\n", __func__);
-
-		pr_info("%s: Un-register RMT storage client\n", __func__);
-		msm_rpc_unregister_client(rmt_srv->rpc_client);
-		break;
-
-	default:
-		break;
-	}
-	return NOTIFY_DONE;
-}
-
-/*
- * For the RMT storage sync, RPC channels are required. If we do not
- * give max priority to RMT storage reboot notifier, RPC channels may get
- * closed before RMT storage sync completed if RPC reboot notifier gets
- * executed before this remotefs reboot notifier. Hence give the maximum
- * priority to this reboot notifier.
- */
-static struct notifier_block rmt_storage_reboot_notifier = {
-	.notifier_call = rmt_storage_reboot_call,
-	.priority = INT_MAX,
-};
-
-static int rmt_storage_init_ramfs(struct rmt_storage_srv *srv)
-{
-	struct shared_ramfs_table *ramfs_table;
-
-	if (srv->prog != MSM_RMT_STORAGE_APIPROG)
-		return 0;
-
-	ramfs_table = smem_alloc(SMEM_SEFS_INFO,
-				 sizeof(struct shared_ramfs_table));
-
-	if (!ramfs_table) {
-		pr_err("%s: No RAMFS table in SMEM\n", __func__);
-		return -ENOENT;
-	}
-
-	if (ramfs_table->magic_id == RAMFS_INFO_MAGICNUMBER) {
-		pr_debug("RAMFS table already filled... skipping %s", \
-			__func__);
-		return 0;
-	}
-
-	ramfs_table->ramfs_entry[0].client_id  = RAMFS_MODEMSTORAGE_ID;
-	ramfs_table->ramfs_entry[0].base_addr  = RAMFS_SHARED_EFS_RAM_BASE;
-	ramfs_table->ramfs_entry[0].size       = RAMFS_SHARED_EFS_RAM_SIZE;
-	ramfs_table->ramfs_entry[0].client_sts = RAMFS_DEFAULT;
-
-	ramfs_table->ramfs_entry[1].client_id  = RAMFS_SSD_STORAGE_ID;
-	ramfs_table->ramfs_entry[1].base_addr  = RAMFS_SHARED_SSD_RAM_BASE;
-	ramfs_table->ramfs_entry[1].size       = RAMFS_SHARED_SSD_RAM_SIZE;
-	ramfs_table->ramfs_entry[1].client_sts = RAMFS_DEFAULT;
-
-	ramfs_table->entries  = 2;
-	ramfs_table->version  = RAMFS_INFO_VERSION;
-	ramfs_table->magic_id = RAMFS_INFO_MAGICNUMBER;
-
-	return 0;
-}
-
-static void rmt_storage_set_client_status(struct rmt_storage_srv *srv,
-					  int enable)
-{
-	struct rmt_shrd_mem *shrd_mem;
-
-	spin_lock(&rmc->lock);
-	list_for_each_entry(shrd_mem, &rmc->shrd_mem_list, list)
-		if (shrd_mem->srv->prog == srv->prog)
-			if (shrd_mem->smem_info)
-				shrd_mem->smem_info->client_sts = !!enable;
-	spin_unlock(&rmc->lock);
-}
-
-static DEVICE_ATTR(force_sync, S_IRUGO | S_IWUSR, show_force_sync, NULL);
-static DEVICE_ATTR(sync_sts, S_IRUGO | S_IWUSR, show_sync_sts, NULL);
-static struct attribute *dev_attrs[] = {
-	&dev_attr_force_sync.attr,
-	&dev_attr_sync_sts.attr,
-	NULL,
-};
-static struct attribute_group dev_attr_grp = {
-	.attrs = dev_attrs,
-};
-
-static void handle_restart_teardown(struct msm_rpc_client *client)
-{
-	struct rmt_storage_srv *srv;
-
-	srv = rmt_storage_get_srv(client->prog);
-	if (!srv)
-		return;
-	pr_debug("%s: Modem restart for 0x%08x\n", __func__, srv->prog);
-	cancel_delayed_work_sync(&srv->restart_work);
-}
-
-#define RESTART_WORK_DELAY_MS	1000
-
-static void handle_restart_setup(struct msm_rpc_client *client)
-{
-	struct rmt_storage_srv *srv;
-
-	srv = rmt_storage_get_srv(client->prog);
-	if (!srv)
-		return;
-	pr_debug("%s: Scheduling restart for 0x%08x\n", __func__, srv->prog);
-	queue_delayed_work(rmc->workq, &srv->restart_work,
-			msecs_to_jiffies(RESTART_WORK_DELAY_MS));
-}
-
-static int rmt_storage_reg_callbacks(struct msm_rpc_client *client)
-{
-	int ret;
-
-	ret = rmt_storage_reg_cb(client,
-				 RMT_STORAGE_REGISTER_OPEN_PROC,
-				 RMT_STORAGE_EVNT_OPEN,
-				 rmt_storage_event_open_cb);
-	if (ret)
-		return ret;
-	ret = rmt_storage_reg_cb(client,
-				 RMT_STORAGE_REGISTER_CB_PROC,
-				 RMT_STORAGE_EVNT_CLOSE,
-				 rmt_storage_event_close_cb);
-	if (ret)
-		return ret;
-	ret = rmt_storage_reg_cb(client,
-				 RMT_STORAGE_REGISTER_CB_PROC,
-				 RMT_STORAGE_EVNT_WRITE_BLOCK,
-				 rmt_storage_event_write_block_cb);
-	if (ret)
-		return ret;
-	ret = rmt_storage_reg_cb(client,
-				 RMT_STORAGE_REGISTER_CB_PROC,
-				 RMT_STORAGE_EVNT_GET_DEV_ERROR,
-				 rmt_storage_event_get_err_cb);
-	if (ret)
-		return ret;
-	ret = rmt_storage_reg_cb(client,
-				 RMT_STORAGE_REGISTER_WRITE_IOVEC_PROC,
-				 RMT_STORAGE_EVNT_WRITE_IOVEC,
-				 rmt_storage_event_write_iovec_cb);
-	if (ret)
-		return ret;
-	ret = rmt_storage_reg_cb(client,
-				 RMT_STORAGE_REGISTER_READ_IOVEC_PROC,
-				 RMT_STORAGE_EVNT_READ_IOVEC,
-				 rmt_storage_event_read_iovec_cb);
-	if (ret)
-		return ret;
-	ret = rmt_storage_reg_cb(client,
-				 RMT_STORAGE_REGISTER_CB_PROC,
-				 RMT_STORAGE_EVNT_SEND_USER_DATA,
-				 rmt_storage_event_user_data_cb);
-	if (ret)
-		return ret;
-	ret = rmt_storage_reg_cb(client,
-				 RMT_STORAGE_REGISTER_ALLOC_RMT_BUF_PROC,
-				 RMT_STORAGE_EVNT_ALLOC_RMT_BUF,
-				 rmt_storage_event_alloc_rmt_buf_cb);
-	if (ret)
-		pr_info("%s: Unable (%d) registering aloc_rmt_buf\n",
-			__func__, ret);
-
-	pr_debug("%s: Callbacks (re)registered for 0x%08x\n\n", __func__,
-		 client->prog);
-	return 0;
-}
-
-static void rmt_storage_restart_work(struct work_struct *work)
-{
-	struct rmt_storage_srv *srv;
-	int ret;
-
-	srv = container_of((struct delayed_work *)work,
-			   struct rmt_storage_srv, restart_work);
-	if (!rmt_storage_get_srv(srv->prog)) {
-		pr_err("%s: Invalid server\n", __func__);
-		return;
-	}
-
-	ret = rmt_storage_reg_callbacks(srv->rpc_client);
-	if (!ret)
-		return;
-
-	pr_err("%s: Error (%d) re-registering callbacks for0x%08x\n",
-	       __func__, ret, srv->prog);
-
-	if (!msm_rpc_client_in_reset(srv->rpc_client))
-		queue_delayed_work(rmc->workq, &srv->restart_work,
-				msecs_to_jiffies(RESTART_WORK_DELAY_MS));
-}
-
-static int rmt_storage_probe(struct platform_device *pdev)
-{
-	struct rpcsvr_platform_device *dev;
-	int ret;
-
-	dev = container_of(pdev, struct rpcsvr_platform_device, base);
-	rmt_srv = rmt_storage_get_srv(dev->prog);
-
-	if (!rmt_srv) {
-		pr_err("%s: Invalid prog = %#x\n", __func__, dev->prog);
-		return -ENXIO;
-	}
-
-	rmt_storage_init_ramfs(rmt_srv);
-	rmt_storage_get_ramfs(rmt_srv);
-
-	INIT_DELAYED_WORK(&rmt_srv->restart_work, rmt_storage_restart_work);
-
-	/* Client Registration */
-	rmt_srv->rpc_client = msm_rpc_register_client2("rmt_storage",
-						   dev->prog, dev->vers, 1,
-						   handle_rmt_storage_call);
-	if (IS_ERR(rmt_srv->rpc_client)) {
-		pr_err("%s: Unable to register client (prog %.8x vers %.8x)\n",
-				__func__, dev->prog, dev->vers);
-		ret = PTR_ERR(rmt_srv->rpc_client);
-		return ret;
-	}
-
-	ret = msm_rpc_register_reset_callbacks(rmt_srv->rpc_client,
-		handle_restart_teardown,
-		handle_restart_setup);
-	if (ret)
-		goto unregister_client;
-
-	pr_info("%s: Remote storage RPC client (0x%x)initialized\n",
-		__func__, dev->prog);
-
-	/* register server callbacks */
-	ret = rmt_storage_reg_callbacks(rmt_srv->rpc_client);
-	if (ret)
-		goto unregister_client;
-
-	/* For targets that poll SMEM, set status to ready */
-	rmt_storage_set_client_status(rmt_srv, 1);
-
-	ret = register_reboot_notifier(&rmt_storage_reboot_notifier);
-	if (ret) {
-		pr_err("%s: Failed to register reboot notifier", __func__);
-		goto unregister_client;
-	}
-
-	ret = sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp);
-	if (ret)
-		pr_err("%s: Failed to create sysfs node: %d\n", __func__, ret);
-
-	return 0;
-
-unregister_client:
-	msm_rpc_unregister_client(rmt_srv->rpc_client);
-	return ret;
-}
-
-static void rmt_storage_client_shutdown(struct platform_device *pdev)
-{
-	struct rpcsvr_platform_device *dev;
-	struct rmt_storage_srv *srv;
-
-	dev = container_of(pdev, struct rpcsvr_platform_device, base);
-	srv = rmt_storage_get_srv(dev->prog);
-	rmt_storage_set_client_status(srv, 0);
-}
-
-static void rmt_storage_destroy_rmc(void)
-{
-	wake_lock_destroy(&rmc->wlock);
-}
-
-static void __init rmt_storage_init_client_info(void)
-{
-	/* Initialization */
-	init_waitqueue_head(&rmc->event_q);
-	spin_lock_init(&rmc->lock);
-	atomic_set(&rmc->total_events, 0);
-	INIT_LIST_HEAD(&rmc->event_list);
-	INIT_LIST_HEAD(&rmc->client_list);
-	INIT_LIST_HEAD(&rmc->shrd_mem_list);
-	/* The client expects a non-zero return value for
-	 * its open requests. Hence reserve 0 bit.  */
-	__set_bit(0, &rmc->cids);
-	atomic_set(&rmc->wcount, 0);
-	wake_lock_init(&rmc->wlock, WAKE_LOCK_SUSPEND, "rmt_storage");
-}
-
-static struct rmt_storage_srv msm_srv = {
-	.prog = MSM_RMT_STORAGE_APIPROG,
-	.plat_drv = {
-		.probe	  = rmt_storage_probe,
-		.shutdown = rmt_storage_client_shutdown,
-		.driver	  = {
-			.name	= "rs300000a7",
-			.owner	= THIS_MODULE,
-		},
-	},
-};
-
-static struct rmt_storage_srv mdm_srv = {
-	.prog = MDM_RMT_STORAGE_APIPROG,
-	.plat_drv = {
-		.probe	  = rmt_storage_probe,
-		.shutdown = rmt_storage_client_shutdown,
-		.driver	  = {
-			.name	= "rs300100a7",
-			.owner	= THIS_MODULE,
-		},
-	},
-};
-
-static struct rmt_storage_srv *rmt_storage_get_srv(uint32_t prog)
-{
-	if (prog == MSM_RMT_STORAGE_APIPROG)
-		return &msm_srv;
-	if (prog == MDM_RMT_STORAGE_APIPROG)
-		return &mdm_srv;
-	return NULL;
-}
-
-
-static uint32_t rmt_storage_get_sid(const char *path)
-{
-	if (!strncmp(path, "/boot/modem_fs1", MAX_PATH_NAME))
-		return RAMFS_MODEMSTORAGE_ID;
-	if (!strncmp(path, "/boot/modem_fs2", MAX_PATH_NAME))
-		return RAMFS_MODEMSTORAGE_ID;
-	if (!strncmp(path, "/boot/modem_fsg", MAX_PATH_NAME))
-		return RAMFS_MODEMSTORAGE_ID;
-	if (!strncmp(path, "/q6_fs1_parti_id_0x59", MAX_PATH_NAME))
-		return RAMFS_MDM_STORAGE_ID;
-	if (!strncmp(path, "/q6_fs2_parti_id_0x5A", MAX_PATH_NAME))
-		return RAMFS_MDM_STORAGE_ID;
-	if (!strncmp(path, "/q6_fsg_parti_id_0x5B", MAX_PATH_NAME))
-		return RAMFS_MDM_STORAGE_ID;
-	if (!strncmp(path, "ssd", MAX_PATH_NAME))
-		return RAMFS_SSD_STORAGE_ID;
-	return 0;
-}
-
-static int __init rmt_storage_init(void)
-{
-#ifdef CONFIG_MSM_SDIO_SMEM
-	void *mdm_local_buf;
-#endif
-	int ret = 0;
-
-	rmc = kzalloc(sizeof(struct rmt_storage_client_info), GFP_KERNEL);
-	if (!rmc) {
-		pr_err("%s: Unable to allocate memory\n", __func__);
-		return  -ENOMEM;
-	}
-	rmt_storage_init_client_info();
-
-	ret = platform_driver_register(&msm_srv.plat_drv);
-	if (ret) {
-		pr_err("%s: Unable to register MSM RPC driver\n", __func__);
-		goto rmc_free;
-	}
-
-	ret = platform_driver_register(&mdm_srv.plat_drv);
-	if (ret) {
-		pr_err("%s: Unable to register MDM RPC driver\n", __func__);
-		goto unreg_msm_rpc;
-	}
-
-	ret = misc_register(&rmt_storage_device);
-	if (ret) {
-		pr_err("%s: Unable to register misc device %d\n", __func__,
-				MISC_DYNAMIC_MINOR);
-		goto unreg_mdm_rpc;
-	}
-
-#ifdef CONFIG_MSM_SDIO_SMEM
-	mdm_local_buf = kzalloc(MDM_LOCAL_BUF_SZ, GFP_KERNEL);
-	if (!mdm_local_buf) {
-		pr_err("%s: Unable to allocate shadow mem\n", __func__);
-		ret = -ENOMEM;
-		goto unreg_misc;
-	}
-
-	ret = rmt_storage_add_shrd_mem(RAMFS_MDM_STORAGE_ID,
-				       __pa(mdm_local_buf),
-				       MDM_LOCAL_BUF_SZ,
-				       NULL, NULL, &mdm_srv);
-	if (ret) {
-		pr_err("%s: Unable to add shadow mem entry\n", __func__);
-		goto free_mdm_local_buf;
-	}
-
-	pr_debug("%s: Shadow memory at %p (phys=%lx), %d bytes\n", __func__,
-		 mdm_local_buf, __pa(mdm_local_buf), MDM_LOCAL_BUF_SZ);
-#endif
-
-	rmc->workq = create_singlethread_workqueue("rmt_storage");
-	if (!rmc->workq)
-		return -ENOMEM;
-
-#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
-	stats_dentry = debugfs_create_file("rmt_storage_stats", 0444, 0,
-					NULL, &debug_ops);
-	if (!stats_dentry)
-		pr_err("%s: Failed to create stats debugfs file\n", __func__);
-#endif
-	return 0;
-
-#ifdef CONFIG_MSM_SDIO_SMEM
-free_mdm_local_buf:
-	kfree(mdm_local_buf);
-unreg_misc:
-	misc_deregister(&rmt_storage_device);
-#endif
-unreg_mdm_rpc:
-	platform_driver_unregister(&mdm_srv.plat_drv);
-unreg_msm_rpc:
-	platform_driver_unregister(&msm_srv.plat_drv);
-rmc_free:
-	rmt_storage_destroy_rmc();
-	kfree(rmc);
-	return ret;
-}
-
-module_init(rmt_storage_init);
-MODULE_DESCRIPTION("Remote Storage RPC Client");
-MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 935bb9a..7ad6401 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -280,7 +280,8 @@
 
 static void *__alloc_from_contiguous(struct device *dev, size_t size,
 				     pgprot_t prot, struct page **ret_page,
-				     bool no_kernel_mapping, const void *caller);
+				     const void *caller,
+				     struct dma_attrs *attrs);
 
 struct dma_pool {
 	size_t size;
@@ -321,7 +322,7 @@
 
 	if (IS_ENABLED(CONFIG_CMA))
 		ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
-						false, atomic_pool_init);
+						atomic_pool_init, NULL);
 	else
 		ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
 					   &page, NULL);
@@ -508,19 +509,22 @@
 #define NO_KERNEL_MAPPING_DUMMY	0x2222
 static void *__alloc_from_contiguous(struct device *dev, size_t size,
 				     pgprot_t prot, struct page **ret_page,
-				     bool no_kernel_mapping,
-				     const void *caller)
+				     const void *caller,
+				     struct dma_attrs *attrs)
 {
 	unsigned long order = get_order(size);
 	size_t count = size >> PAGE_SHIFT;
 	struct page *page;
 	void *ptr;
+	bool no_kernel_mapping = dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING,
+							attrs);
 
 	page = dma_alloc_from_contiguous(dev, count, order);
 	if (!page)
 		return NULL;
 
-	__dma_clear_buffer(page, size);
+	if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
+		__dma_clear_buffer(page, size);
 
 	if (!PageHighMem(page)) {
 		__dma_remap(page, size, prot, no_kernel_mapping);
@@ -601,7 +605,7 @@
 
 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 			 gfp_t gfp, pgprot_t prot, const void *caller,
-			 bool no_kernel_mapping)
+			 struct dma_attrs *attrs)
 {
 	u64 mask = get_coherent_dma_mask(dev);
 	struct page *page;
@@ -642,7 +646,7 @@
 		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
 	else
 		addr = __alloc_from_contiguous(dev, size, prot, &page,
-						no_kernel_mapping, caller);
+						caller, attrs);
 
 	if (addr)
 		*handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -659,14 +663,12 @@
 {
 	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
 	void *memory;
-	bool no_kernel_mapping = dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING,
-					attrs);
 
 	if (dma_alloc_from_coherent(dev, size, handle, &memory))
 		return memory;
 
 	return __dma_alloc(dev, size, handle, gfp, prot,
-			   __builtin_return_address(0), no_kernel_mapping);
+			   __builtin_return_address(0), attrs);
 }
 
 /*
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 3d52735..947e229 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1519,8 +1519,25 @@
 				const struct mem_type *type)
 {
 	pte_t *pte, *start_pte;
+	pmd_t *base_pmd;
 
-	start_pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
+	base_pmd = pmd_offset(
+			pud_offset(pgd_offset(&init_mm, addr), addr), addr);
+
+	if (pmd_none(*base_pmd) || pmd_bad(*base_pmd)) {
+		start_pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
+#ifndef CONFIG_ARM_LPAE
+		/*
+		 * Following is needed when new pte is allocated for pmd[1]
+		 * cases, which may happen when base (start) address falls
+		 * under pmd[1].
+		 */
+		if (addr & SECTION_SIZE)
+			start_pte += pte_index(addr);
+#endif
+	} else {
+		start_pte = pte_offset_kernel(base_pmd, addr);
+	}
 
 	pte = start_pte;
 
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 2adcbbc..4745406 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -47,6 +47,7 @@
 	unsigned long	base_pfn;
 	unsigned long	count;
 	unsigned long	*bitmap;
+	bool in_system;
 	struct mutex lock;
 };
 
@@ -60,6 +61,7 @@
 	unsigned long size;
 	struct cma *cma;
 	const char *name;
+	bool to_system;
 } cma_areas[MAX_CMA_AREAS];
 static unsigned cma_area_count;
 
@@ -170,7 +172,7 @@
 }
 
 static __init struct cma *cma_create_area(unsigned long base_pfn,
-				     unsigned long count)
+				     unsigned long count, bool system)
 {
 	int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
 	struct cma *cma;
@@ -184,14 +186,17 @@
 
 	cma->base_pfn = base_pfn;
 	cma->count = count;
+	cma->in_system = system;
 	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
 
 	if (!cma->bitmap)
 		goto no_mem;
 
-	ret = cma_activate_area(base_pfn, count);
-	if (ret)
-		goto error;
+	if (cma->in_system) {
+		ret = cma_activate_area(base_pfn, count);
+		if (ret)
+			goto error;
+	}
 	mutex_init(&cma->lock);
 
 	pr_debug("%s: returned %p\n", __func__, (void *)cma);
@@ -214,6 +219,7 @@
 	unsigned long len;
 	__be32 *prop;
 	char *name;
+	bool in_system;
 	phys_addr_t limit = MEMBLOCK_ALLOC_ANYWHERE;
 
 	if (!of_get_flat_dt_prop(node, "linux,contiguous-region", NULL))
@@ -227,6 +233,8 @@
 	size = be32_to_cpu(prop[1]);
 
 	name = of_get_flat_dt_prop(node, "label", NULL);
+	in_system =
+		of_get_flat_dt_prop(node, "linux,reserve-region", NULL) ? 0 : 1;
 
 	prop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
 	if (prop)
@@ -234,7 +242,8 @@
 
 	pr_info("Found %s, memory base %lx, size %ld MiB, limit %pa\n", uname,
 		(unsigned long)base, (unsigned long)size / SZ_1M, &limit);
-	dma_contiguous_reserve_area(size, &base, limit, name);
+	dma_contiguous_reserve_area(size, &base, limit, name,
+					in_system);
 
 	return 0;
 }
@@ -275,8 +284,8 @@
 		pr_debug("%s: reserving %ld MiB for global area\n", __func__,
 			 (unsigned long)sel_size / SZ_1M);
 
-		if (dma_contiguous_reserve_area(sel_size, &base, limit, NULL)
-		    == 0)
+		if (dma_contiguous_reserve_area(sel_size, &base, limit, NULL,
+		    true) == 0)
 			dma_contiguous_def_base = base;
 	}
 #ifdef CONFIG_OF
@@ -299,7 +308,8 @@
  * devices.
  */
 int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
-				       phys_addr_t limit, const char *name)
+				       phys_addr_t limit, const char *name,
+				       bool to_system)
 {
 	phys_addr_t base = *res_base;
 	phys_addr_t alignment;
@@ -352,6 +362,7 @@
 	cma_areas[cma_area_count].base = base;
 	cma_areas[cma_area_count].size = size;
 	cma_areas[cma_area_count].name = name;
+	cma_areas[cma_area_count].to_system = to_system;
 	cma_area_count++;
 	*res_base = base;
 
@@ -434,8 +445,9 @@
 	for (i = 0; i < cma_area_count; i++) {
 		phys_addr_t base = PFN_DOWN(cma_areas[i].base);
 		unsigned int count = cma_areas[i].size >> PAGE_SHIFT;
+		bool system = cma_areas[i].to_system;
 
-		cma = cma_create_area(base, count);
+		cma = cma_create_area(base, count, system);
 		if (!IS_ERR(cma))
 			cma_areas[i].cma = cma;
 	}
@@ -485,7 +497,7 @@
 	unsigned long mask, pfn, pageno, start = 0;
 	struct cma *cma = dev_get_cma_area(dev);
 	struct page *page = NULL;
-	int ret;
+	int ret = 0;
 	int tries = 0;
 
 	if (!cma || !cma->count)
@@ -521,7 +533,8 @@
 
 		pfn = cma->base_pfn + pageno;
 		mutex_lock(&cma_mutex);
-		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
+		if (cma->in_system)
+			ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
 		mutex_unlock(&cma_mutex);
 		if (ret == 0) {
 			page = pfn_to_page(pfn);
@@ -573,7 +586,8 @@
 
 	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
 
-	free_contig_range(pfn, count);
+	if (cma->in_system)
+		free_contig_range(pfn, count);
 	clear_cma_bitmap(cma, pfn, count);
 
 	return true;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index ba13ec1..93e932a 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -1508,8 +1508,7 @@
 		return err;
 	}
 	if (pkt_type == CALLBACK_DATA_TYPE) {
-		if (payload_size > driver->itemsize ||
-				payload_size <= MIN_SIZ_ALLOW) {
+		if (payload_size > driver->itemsize) {
 			pr_err("diag: Dropping packet, invalid packet size. Current payload size %d\n",
 				payload_size);
 			driver->dropped_count++;
@@ -1541,6 +1540,11 @@
 			return ret;
 		}
 		/* The packet is for the remote processor */
+		if (payload_size <= MIN_SIZ_ALLOW) {
+				pr_err("diag: Integer underflow in %s, payload size: %d",
+							__func__, payload_size);
+		return -EBADMSG;
+		}
 		token_offset = 4;
 		payload_size -= 4;
 		buf += 4;
diff --git a/drivers/coresight/coresight-tmc.c b/drivers/coresight/coresight-tmc.c
index 0c7c9e0..936541f 100644
--- a/drivers/coresight/coresight-tmc.c
+++ b/drivers/coresight/coresight-tmc.c
@@ -640,7 +640,6 @@
 	char *hdr;
 	char *bufp;
 	uint32_t read_data;
-	int i;
 
 	memwidth = BMVAL(tmc_readl(drvdata, CORESIGHT_DEVID), 8, 10);
 	if (memwidth == TMC_MEM_INTF_WIDTH_32BITS)
@@ -654,16 +653,22 @@
 
 	bufp = drvdata->buf;
 	while (1) {
-		for (i = 0; i < memwords; i++) {
-			read_data = tmc_readl_no_log(drvdata, TMC_RRD);
-			if (read_data == 0xFFFFFFFF)
-				goto out;
-			memcpy(bufp, &read_data, BYTES_PER_WORD);
-			bufp += BYTES_PER_WORD;
+		read_data = tmc_readl_no_log(drvdata, TMC_RRD);
+		if (read_data == 0xFFFFFFFF)
+			goto out;
+		if ((bufp - drvdata->buf) >= drvdata->size) {
+			dev_err(drvdata->dev, "ETF-ETB end marker missing\n");
+			goto out;
 		}
+		memcpy(bufp, &read_data, BYTES_PER_WORD);
+		bufp += BYTES_PER_WORD;
 	}
 
 out:
+	if ((bufp - drvdata->buf) % (memwords * BYTES_PER_WORD))
+		dev_dbg(drvdata->dev, "ETF-ETB data is not %lx bytes aligned\n",
+			(unsigned long) memwords * BYTES_PER_WORD);
+
 	if (drvdata->aborting) {
 		hdr = drvdata->buf - PAGE_SIZE;
 		*(uint32_t *)(hdr + TMC_ETFETB_DUMP_MAGIC_OFF) =
diff --git a/drivers/crypto/msm/qce.c b/drivers/crypto/msm/qce.c
index 8037187..d11a831 100644
--- a/drivers/crypto/msm/qce.c
+++ b/drivers/crypto/msm/qce.c
@@ -1,6 +1,6 @@
 /* Qualcomm Crypto Engine driver.
  *
- * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -422,7 +422,7 @@
 {
 	unsigned n;
 
-	n = len  / sizeof(uint32_t) ;
+	n = len  / sizeof(uint32_t);
 	for (; n > 0; n--) {
 		*iv =  ((*b << 24)      & 0xff000000) |
 				(((*(b+1)) << 16) & 0xff0000)   |
@@ -436,12 +436,12 @@
 	if (n == 3) {
 		*iv = ((*b << 24) & 0xff000000) |
 				(((*(b+1)) << 16) & 0xff0000)   |
-				(((*(b+2)) << 8) & 0xff00)     ;
+				(((*(b+2)) << 8) & 0xff00);
 	} else if (n == 2) {
 		*iv = ((*b << 24) & 0xff000000) |
-				(((*(b+1)) << 16) & 0xff0000)   ;
+				(((*(b+1)) << 16) & 0xff0000);
 	} else if (n == 1) {
-		*iv = ((*b << 24) & 0xff000000) ;
+		*iv = ((*b << 24) & 0xff000000);
 	}
 }
 
@@ -1819,7 +1819,7 @@
 	}
 };
 
-static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req * req)
+static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req *req)
 {
 	uint32_t cfg;
 	uint32_t ikey[OTA_KEY_SIZE/sizeof(uint32_t)];
@@ -1846,7 +1846,7 @@
 	if (req->algorithm == QCE_OTA_ALGO_KASUMI)
 		cfg |= (CRYPTO_AUTH_SIZE_UIA1 << CRYPTO_AUTH_SIZE);
 	else
-		cfg |= (CRYPTO_AUTH_SIZE_UIA2 << CRYPTO_AUTH_SIZE) ;
+		cfg |= (CRYPTO_AUTH_SIZE_UIA2 << CRYPTO_AUTH_SIZE);
 
 	if (req->direction == QCE_OTA_DIR_DOWNLINK)
 		cfg |= 1 << CRYPTO_F9_DIRECTION;
@@ -1885,7 +1885,7 @@
 	if (req->algorithm == QCE_OTA_ALGO_KASUMI)
 		cfg |= (CRYPTO_ENCR_KEY_SZ_UEA1 << CRYPTO_ENCR_KEY_SZ);
 	else
-		cfg |= (CRYPTO_ENCR_KEY_SZ_UEA2 << CRYPTO_ENCR_KEY_SZ) ;
+		cfg |= (CRYPTO_ENCR_KEY_SZ_UEA2 << CRYPTO_ENCR_KEY_SZ);
 	if (key_stream_mode)
 		cfg |= 1 << CRYPTO_F8_KEYSTREAM_ENABLE;
 	if (req->direction == QCE_OTA_DIR_DOWNLINK)
@@ -1932,6 +1932,9 @@
 	return 0;
 };
 
+struct qce_pm_table qce_pm_table = {NULL, NULL};
+EXPORT_SYMBOL(qce_pm_table);
+
 int qce_aead_req(void *handle, struct qce_req *q_req)
 {
 	struct qce_device *pce_dev = (struct qce_device *) handle;
@@ -2555,7 +2558,7 @@
 	rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
 			cipher_size);
 	if (rc)
-		goto bad ;
+		goto bad;
 
 	/* setup for callback, and issue command to adm */
 	pce_dev->areq = cookie;
diff --git a/drivers/crypto/msm/qce.h b/drivers/crypto/msm/qce.h
index ff90db7..6ba5063 100644
--- a/drivers/crypto/msm/qce.h
+++ b/drivers/crypto/msm/qce.h
@@ -166,6 +166,13 @@
 	unsigned int  flags;
 };
 
+struct qce_pm_table {
+	int (*suspend)(void *handle);
+	int (*resume)(void *handle);
+};
+
+extern struct qce_pm_table qce_pm_table;
+
 void *qce_open(struct platform_device *pdev, int *rc);
 int qce_close(void *handle);
 int qce_aead_req(void *handle, struct qce_req *req);
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index f9f6646..ad7dd31 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -127,7 +127,7 @@
 {
 	unsigned n;
 
-	n = len  / sizeof(uint32_t) ;
+	n = len  / sizeof(uint32_t);
 	for (; n > 0; n--) {
 		*iv =  ((*b << 24)      & 0xff000000) |
 				(((*(b+1)) << 16) & 0xff0000)   |
@@ -141,12 +141,12 @@
 	if (n == 3) {
 		*iv = ((*b << 24) & 0xff000000) |
 				(((*(b+1)) << 16) & 0xff0000)   |
-				(((*(b+2)) << 8) & 0xff00)     ;
+				(((*(b+2)) << 8) & 0xff00);
 	} else if (n == 2) {
 		*iv = ((*b << 24) & 0xff000000) |
-				(((*(b+1)) << 16) & 0xff0000)   ;
+				(((*(b+1)) << 16) & 0xff0000);
 	} else if (n == 1) {
-		*iv = ((*b << 24) & 0xff000000) ;
+		*iv = ((*b << 24) & 0xff000000);
 	}
 }
 
@@ -2645,7 +2645,7 @@
 	struct sps_bam_props bam = {0};
 	struct bam_registration_info *pbam = NULL;
 	struct bam_registration_info *p;
-	uint32_t bam_cfg = 0 ;
+	uint32_t bam_cfg = 0;
 
 
 	mutex_lock(&bam_register_lock);
@@ -2955,7 +2955,7 @@
 	(*cmd_ptr)->mask = 0xFFFFFFFF;
 	if (populate != NULL)
 		*populate = *cmd_ptr;
-	(*cmd_ptr)++ ;
+	(*cmd_ptr)++;
 }
 
 static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev,
@@ -4405,6 +4405,65 @@
 	return rc;
 }
 
+static int _qce_suspend(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+	struct sps_pipe *sps_pipe_info;
+
+	if (handle == NULL)
+		return -ENODEV;
+
+	qce_enable_clk(pce_dev);
+
+	sps_pipe_info = pce_dev->ce_sps.consumer.pipe;
+	sps_disconnect(sps_pipe_info);
+
+	sps_pipe_info = pce_dev->ce_sps.producer.pipe;
+	sps_disconnect(sps_pipe_info);
+
+	qce_disable_clk(pce_dev);
+	return 0;
+}
+
+static int _qce_resume(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+	struct sps_pipe *sps_pipe_info;
+	struct sps_connect *sps_connect_info;
+	int rc;
+
+	if (handle == NULL)
+		return -ENODEV;
+
+	qce_enable_clk(pce_dev);
+
+	sps_pipe_info = pce_dev->ce_sps.consumer.pipe;
+	sps_connect_info = &pce_dev->ce_sps.consumer.connect;
+	memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+	rc = sps_connect(sps_pipe_info, sps_connect_info);
+	if (rc) {
+		pr_err("sps_connect() fail pipe_handle=0x%x, rc = %d\n",
+			(u32)sps_pipe_info, rc);
+		return rc;
+	}
+	sps_pipe_info = pce_dev->ce_sps.producer.pipe;
+	sps_connect_info = &pce_dev->ce_sps.producer.connect;
+	memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+	rc = sps_connect(sps_pipe_info, sps_connect_info);
+	if (rc)
+		pr_err("sps_connect() fail pipe_handle=0x%x, rc = %d\n",
+			(u32)sps_pipe_info, rc);
+
+	pce_dev->ce_sps.out_transfer.user = pce_dev->ce_sps.producer.pipe;
+	pce_dev->ce_sps.in_transfer.user = pce_dev->ce_sps.consumer.pipe;
+
+	qce_disable_clk(pce_dev);
+	return rc;
+}
+
+struct qce_pm_table qce_pm_table  = {_qce_suspend, _qce_resume};
+EXPORT_SYMBOL(qce_pm_table);
+
 int qce_aead_req(void *handle, struct qce_req *q_req)
 {
 	struct qce_device *pce_dev;
@@ -5469,5 +5528,6 @@
 }
 EXPORT_SYMBOL(qce_hw_support);
 
+
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Crypto Engine driver");
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index d7f0bcb..c247189 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -58,6 +58,14 @@
 
 #define QCRYPTO_HIGH_BANDWIDTH_TIMEOUT 1000
 
+enum qcrypto_bus_state {
+	BUS_NO_BANDWIDTH = 0,
+	BUS_HAS_BANDWIDTH,
+	BUS_BANDWIDTH_RELEASING,
+	BUS_BANDWIDTH_ALLOCATING,
+	BUS_SUSPENDED,
+};
+
 struct crypto_stat {
 	u64 aead_sha1_aes_enc;
 	u64 aead_sha1_aes_dec;
@@ -114,10 +122,19 @@
 	u32 unit;
 	u32 ce_device;
 	unsigned int signature;
-	uint32_t high_bw_req_count;
-	bool     high_bw_req;
-	struct timer_list bw_scale_down_timer;
-	struct work_struct low_bw_req_ws;
+
+	enum qcrypto_bus_state bw_state;
+	bool   high_bw_req;
+	struct timer_list bw_reaper_timer;
+	struct work_struct bw_reaper_ws;
+	struct work_struct bw_allocate_ws;
+
+	/* engine execution sequence number */
+	u32    active_seq;
+	/* last QCRYPTO_HIGH_BANDWIDTH_TIMEOUT active_seq */
+	u32    last_active_seq;
+
+	bool   check_flag;
 };
 
 struct crypto_priv {
@@ -127,7 +144,7 @@
 	/* CE features/algorithms supported by HW engine*/
 	struct ce_hw_support ce_support;
 
-	/* the lock protects queue and req*/
+	/* the lock protects crypto queue and req */
 	spinlock_t lock;
 
 	/* list of  registered algorithms */
@@ -141,13 +158,13 @@
 	struct list_head engine_list; /* list of  qcrypto engines */
 	int32_t total_units;   /* total units of engines */
 	struct mutex engine_lock;
+
 	struct crypto_engine *next_engine; /* next assign engine */
 	struct crypto_queue req_queue;	/*
 					 * request queue for those requests
 					 * that waiting for an available
 					 * engine.
 					 */
-
 };
 static struct crypto_priv qcrypto_dev;
 static struct crypto_engine *_qcrypto_static_assign_engine(
@@ -433,11 +450,12 @@
 {
 	int ret = 0;
 
-	if (high_bw_req && pengine->high_bw_req == false) {
+	if (high_bw_req) {
+		pm_stay_awake(&pengine->pdev->dev);
 		ret = qce_enable_clk(pengine->qce);
 		if (ret) {
 			pr_err("%s Unable enable clk\n", __func__);
-			return;
+			goto clk_err;
 		}
 		ret = msm_bus_scale_client_update_request(
 				pengine->bus_scale_handle, 1);
@@ -445,16 +463,18 @@
 			pr_err("%s Unable to set to high bandwidth\n",
 						__func__);
 			qce_disable_clk(pengine->qce);
-			return;
+			goto clk_err;
 		}
-		pengine->high_bw_req = true;
-	} else if (high_bw_req == false && pengine->high_bw_req == true) {
+
+
+	} else {
+
 		ret = msm_bus_scale_client_update_request(
 				pengine->bus_scale_handle, 0);
 		if (ret) {
 			pr_err("%s Unable to set to low bandwidth\n",
 						__func__);
-			return;
+			goto clk_err;
 		}
 		ret = qce_disable_clk(pengine->qce);
 		if (ret) {
@@ -464,67 +484,119 @@
 			if (ret)
 				pr_err("%s Unable to set to high bandwidth\n",
 						__func__);
-			return;
+			goto clk_err;
 		}
-		pengine->high_bw_req = false;
+		pm_relax(&pengine->pdev->dev);
 	}
+	return;
+clk_err:
+	pm_relax(&pengine->pdev->dev);
+	return;
+
 }
 
-static void qcrypto_bw_scale_down_timer_callback(unsigned long data)
+static void qcrypto_bw_reaper_timer_callback(unsigned long data)
 {
 	struct crypto_engine *pengine = (struct crypto_engine *)data;
 
-	schedule_work(&pengine->low_bw_req_ws);
+	schedule_work(&pengine->bw_reaper_ws);
 
 	return;
 }
 
 static void qcrypto_bw_set_timeout(struct crypto_engine *pengine)
 {
-	del_timer_sync(&(pengine->bw_scale_down_timer));
-	pengine->bw_scale_down_timer.data =
+	pengine->bw_reaper_timer.data =
 			(unsigned long)(pengine);
-	pengine->bw_scale_down_timer.expires = jiffies +
+	pengine->bw_reaper_timer.expires = jiffies +
 			msecs_to_jiffies(QCRYPTO_HIGH_BANDWIDTH_TIMEOUT);
-	add_timer(&(pengine->bw_scale_down_timer));
+	add_timer(&(pengine->bw_reaper_timer));
 }
 
-static void qcrypto_ce_bw_scaling_req(struct crypto_priv *cp,
-				 bool high_bw_req)
+static void qcrypto_ce_bw_allocate_req(struct crypto_engine *pengine)
 {
-	struct crypto_engine *pengine;
-
-	if (cp->platform_support.bus_scale_table == NULL)
-		return;
-	mutex_lock(&cp->engine_lock);
-	list_for_each_entry(pengine, &cp->engine_list, elist) {
-		if (high_bw_req) {
-			if (pengine->high_bw_req_count == 0)
-				qcrypto_ce_set_bus(pengine, true);
-			pengine->high_bw_req_count++;
-		} else {
-			pengine->high_bw_req_count--;
-			if (pengine->high_bw_req_count == 0)
-				qcrypto_bw_set_timeout(pengine);
-		}
-	}
-	mutex_unlock(&cp->engine_lock);
-}
-
-static void qcrypto_low_bw_req_work(struct work_struct *work)
-{
-	struct crypto_engine *pengine = container_of(work,
-				struct crypto_engine, low_bw_req_ws);
-
-	mutex_lock(&pengine->pcp->engine_lock);
-	if (pengine->high_bw_req_count == 0)
-		qcrypto_ce_set_bus(pengine, false);
-	mutex_unlock(&pengine->pcp->engine_lock);
+	schedule_work(&pengine->bw_allocate_ws);
 }
 
 static int _start_qcrypto_process(struct crypto_priv *cp,
 					struct crypto_engine *pengine);
 
+static void qcrypto_bw_allocate_work(struct work_struct *work)
+{
+	struct  crypto_engine *pengine = container_of(work,
+				struct crypto_engine, bw_allocate_ws);
+	unsigned long flags;
+	struct crypto_priv *cp = pengine->pcp;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	pengine->bw_state = BUS_BANDWIDTH_ALLOCATING;
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	qcrypto_ce_set_bus(pengine, true);
+
+	spin_lock_irqsave(&cp->lock, flags);
+	pengine->bw_state = BUS_HAS_BANDWIDTH;
+	pengine->high_bw_req = false;
+	pengine->active_seq++;
+	pengine->check_flag = true;
+	spin_unlock_irqrestore(&cp->lock, flags);
+	_start_qcrypto_process(cp, pengine);
+};
+
+static void qcrypto_bw_reaper_work(struct work_struct *work)
+{
+	struct  crypto_engine *pengine = container_of(work,
+				struct crypto_engine, bw_reaper_ws);
+	struct crypto_priv *cp = pengine->pcp;
+	unsigned long flags;
+	u32    active_seq;
+	bool restart = false;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	active_seq = pengine->active_seq;
+	if (pengine->bw_state == BUS_HAS_BANDWIDTH &&
+		(active_seq == pengine->last_active_seq)) {
+
+		/* check if engine is stuck */
+		if (pengine->req) {
+			if (pengine->check_flag)
+				dev_err(&pengine->pdev->dev,
+				"The engine appears to be stuck seq %d req %p.\n",
+				active_seq, pengine->req);
+			pengine->check_flag = false;
+			goto ret;
+		}
+		if (cp->platform_support.bus_scale_table == NULL)
+			goto ret;
+		pengine->bw_state = BUS_BANDWIDTH_RELEASING;
+		spin_unlock_irqrestore(&cp->lock, flags);
+
+		qcrypto_ce_set_bus(pengine, false);
+
+		spin_lock_irqsave(&cp->lock, flags);
+
+		if (pengine->high_bw_req == true) {
+			/* we got request while we are disabling clock */
+			pengine->bw_state = BUS_BANDWIDTH_ALLOCATING;
+			spin_unlock_irqrestore(&cp->lock, flags);
+
+			qcrypto_ce_set_bus(pengine, true);
+
+			spin_lock_irqsave(&cp->lock, flags);
+			pengine->bw_state = BUS_HAS_BANDWIDTH;
+			pengine->high_bw_req = false;
+			restart = true;
+		} else
+			pengine->bw_state = BUS_NO_BANDWIDTH;
+	}
+ret:
+	pengine->last_active_seq = active_seq;
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (restart)
+		_start_qcrypto_process(cp, pengine);
+	qcrypto_bw_set_timeout(pengine);
+}
+
 static int qcrypto_count_sg(struct scatterlist *sg, int nbytes)
 {
 	int i;
@@ -625,7 +697,6 @@
 			return -ENODEV;
 	} else
 		ctx->pengine = NULL;
-	qcrypto_ce_bw_scaling_req(ctx->cp, true);
 	INIT_LIST_HEAD(&ctx->rsp_queue);
 	return 0;
 };
@@ -650,7 +721,6 @@
 			return -ENODEV;
 	} else
 		sha_ctx->pengine = NULL;
-	qcrypto_ce_bw_scaling_req(sha_ctx->cp, true);
 	INIT_LIST_HEAD(&sha_ctx->rsp_queue);
 	return 0;
 };
@@ -665,7 +735,6 @@
 		ahash_request_free(sha_ctx->ahash_req);
 		sha_ctx->ahash_req = NULL;
 	}
-	qcrypto_ce_bw_scaling_req(sha_ctx->cp, false);
 };
 
 
@@ -716,7 +785,6 @@
 
 	if (!list_empty(&ctx->rsp_queue))
 		pr_err("_qcrypto__cra_ablkcipher_exit: requests still outstanding");
-	qcrypto_ce_bw_scaling_req(ctx->cp, false);
 };
 
 static void _qcrypto_cra_aead_exit(struct crypto_tfm *tfm)
@@ -725,7 +793,6 @@
 
 	if (!list_empty(&ctx->rsp_queue))
 		pr_err("_qcrypto__cra_aead_exit: requests still outstanding");
-	qcrypto_ce_bw_scaling_req(ctx->cp, false);
 };
 
 static int _disp_stats(int id)
@@ -875,8 +942,9 @@
 	cp->total_units--;
 
 	tasklet_kill(&pengine->done_tasklet);
-	cancel_work_sync(&pengine->low_bw_req_ws);
-	del_timer_sync(&pengine->bw_scale_down_timer);
+	cancel_work_sync(&pengine->bw_reaper_ws);
+	cancel_work_sync(&pengine->bw_allocate_ws);
+	del_timer_sync(&pengine->bw_reaper_timer);
 
 	if (pengine->bus_scale_handle != 0)
 		msm_bus_scale_unregister_client(pengine->bus_scale_handle);
@@ -1856,6 +1924,8 @@
 	arsp->async_req = async_req;
 	pengine->req = async_req;
 	pengine->arsp = arsp;
+	pengine->active_seq++;
+	pengine->check_flag = true;
 
 	spin_unlock_irqrestore(&cp->lock, flags);
 	if (backlog_eng)
@@ -1923,16 +1993,40 @@
 	}
 
 	spin_lock_irqsave(&cp->lock, flags);
+
 	if (pengine) {
 		ret = crypto_enqueue_request(&pengine->req_queue, req);
 	} else {
 		ret = crypto_enqueue_request(&cp->req_queue, req);
 		pengine = _avail_eng(cp);
 	}
+	if (pengine) {
+		switch (pengine->bw_state) {
+		case BUS_NO_BANDWIDTH:
+			if (pengine->high_bw_req == false) {
+				qcrypto_ce_bw_allocate_req(pengine);
+				pengine->high_bw_req = true;
+			}
+			pengine = NULL;
+			break;
+		case BUS_HAS_BANDWIDTH:
+			break;
+		case BUS_BANDWIDTH_RELEASING:
+			pengine->high_bw_req = true;
+			pengine = NULL;
+			break;
+		case BUS_BANDWIDTH_ALLOCATING:
+			pengine = NULL;
+			break;
+		case BUS_SUSPENDED:
+		default:
+			pengine = NULL;
+			break;
+		}
+	}
 	spin_unlock_irqrestore(&cp->lock, flags);
 	if (pengine)
 		_start_qcrypto_process(cp, pengine);
-
 	return ret;
 }
 
@@ -4163,12 +4257,17 @@
 	pengine->req = NULL;
 	pengine->signature = 0xdeadbeef;
 
-	pengine->high_bw_req_count = 0;
+	init_timer(&(pengine->bw_reaper_timer));
+	INIT_WORK(&pengine->bw_reaper_ws, qcrypto_bw_reaper_work);
+	pengine->bw_reaper_timer.function =
+			qcrypto_bw_reaper_timer_callback;
+	INIT_WORK(&pengine->bw_allocate_ws, qcrypto_bw_allocate_work);
 	pengine->high_bw_req = false;
-	init_timer(&(pengine->bw_scale_down_timer));
-	INIT_WORK(&pengine->low_bw_req_ws, qcrypto_low_bw_req_work);
-	pengine->bw_scale_down_timer.function =
-			qcrypto_bw_scale_down_timer_callback;
+	pengine->active_seq = 0;
+	pengine->last_active_seq = 0;
+	pengine->check_flag = false;
+	qcrypto_bw_set_timeout(pengine);
+
 	tasklet_init(&pengine->done_tasklet, req_done, (unsigned long)pengine);
 	crypto_init_queue(&pengine->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH);
 
@@ -4208,7 +4307,9 @@
 				platform_support->bus_scale_table;
 		cp->platform_support.sha_hmac = platform_support->sha_hmac;
 	}
+
 	pengine->bus_scale_handle = 0;
+
 	if (cp->platform_support.bus_scale_table != NULL) {
 		pengine->bus_scale_handle =
 			msm_bus_scale_register_client(
@@ -4220,6 +4321,9 @@
 			rc =  -ENOMEM;
 			goto err;
 		}
+		pengine->bw_state = BUS_NO_BANDWIDTH;
+	} else {
+		pengine->bw_state = BUS_HAS_BANDWIDTH;
 	}
 
 	if (cp->total_units != 1) {
@@ -4486,12 +4590,12 @@
 	return rc;
 };
 
-
 static int  _qcrypto_suspend(struct platform_device *pdev, pm_message_t state)
 {
 	int ret = 0;
 	struct crypto_engine *pengine;
 	struct crypto_priv *cp;
+	unsigned long flags;
 
 	pengine = platform_get_drvdata(pdev);
 	if (!pengine)
@@ -4504,42 +4608,39 @@
 	cp = pengine->pcp;
 	if (!cp->ce_support.clk_mgmt_sus_res)
 		return 0;
-
-	mutex_lock(&cp->engine_lock);
-
-	if (pengine->high_bw_req) {
-		del_timer_sync(&(pengine->bw_scale_down_timer));
-		ret = msm_bus_scale_client_update_request(
-				pengine->bus_scale_handle, 0);
-		if (ret) {
-			dev_err(&pdev->dev, "%s Unable to set to low bandwidth\n",
-					__func__);
-			mutex_unlock(&cp->engine_lock);
-			return ret;
-		}
-		ret = qce_disable_clk(pengine->qce);
-		if (ret) {
-			pr_err("%s Unable disable clk\n", __func__);
-			ret = msm_bus_scale_client_update_request(
-				pengine->bus_scale_handle, 1);
-			if (ret)
-				dev_err(&pdev->dev,
-					"%s Unable to set to high bandwidth\n",
-					__func__);
-			mutex_unlock(&cp->engine_lock);
-			return ret;
-		}
+	spin_lock_irqsave(&cp->lock, flags);
+	switch (pengine->bw_state) {
+	case BUS_NO_BANDWIDTH:
+		if (pengine->high_bw_req == false)
+			pengine->bw_state = BUS_SUSPENDED;
+		else
+			ret = -EBUSY;
+		break;
+	case BUS_HAS_BANDWIDTH:
+	case BUS_BANDWIDTH_RELEASING:
+	case BUS_BANDWIDTH_ALLOCATING:
+	case BUS_SUSPENDED:
+	default:
+			ret = -EBUSY;
+			break;
 	}
 
-	mutex_unlock(&cp->engine_lock);
-	return 0;
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (ret)
+		return ret;
+	else {
+		if (qce_pm_table.suspend)
+			qce_pm_table.suspend(pengine->qce);
+		return 0;
+	}
 }
 
 static int  _qcrypto_resume(struct platform_device *pdev)
 {
-	int ret = 0;
 	struct crypto_engine *pengine;
 	struct crypto_priv *cp;
+	unsigned long flags;
+	bool restart = false;
 
 	pengine = platform_get_drvdata(pdev);
 
@@ -4550,33 +4651,26 @@
 	if (!cp->ce_support.clk_mgmt_sus_res)
 		return 0;
 
-	mutex_lock(&cp->engine_lock);
-	if (pengine->high_bw_req) {
-		ret = qce_enable_clk(pengine->qce);
-		if (ret) {
-			dev_err(&pdev->dev, "%s Unable to enable clk\n",
-				__func__);
-			mutex_unlock(&cp->engine_lock);
-			return ret;
-		}
-		ret = msm_bus_scale_client_update_request(
-				pengine->bus_scale_handle, 1);
-		if (ret) {
-			dev_err(&pdev->dev,
-				"%s Unable to set to high bandwidth\n",
-				__func__);
-			qce_disable_clk(pengine->qce);
-			mutex_unlock(&cp->engine_lock);
-			return ret;
-		}
-		pengine->bw_scale_down_timer.data =
-					(unsigned long)(pengine);
-		pengine->bw_scale_down_timer.expires = jiffies +
-			msecs_to_jiffies(QCRYPTO_HIGH_BANDWIDTH_TIMEOUT);
-		add_timer(&(pengine->bw_scale_down_timer));
-	}
+	spin_lock_irqsave(&cp->lock, flags);
+	if (pengine->bw_state == BUS_SUSPENDED) {
+		pengine->bw_state = BUS_BANDWIDTH_ALLOCATING;
+		spin_unlock_irqrestore(&cp->lock, flags);
 
-	mutex_unlock(&cp->engine_lock);
+		if (qce_pm_table.resume)
+			qce_pm_table.resume(pengine->qce);
+
+		qcrypto_ce_set_bus(pengine, true);
+
+		spin_lock_irqsave(&cp->lock, flags);
+		pengine->bw_state = BUS_HAS_BANDWIDTH;
+		pengine->high_bw_req = false;
+		restart = true;
+		pengine->active_seq++;
+		pengine->check_flag = true;
+	}
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (restart)
+		_start_qcrypto_process(cp, pengine);
 
 	return 0;
 }
diff --git a/drivers/gpu/ion/ion_cma_secure_heap.c b/drivers/gpu/ion/ion_cma_secure_heap.c
index da68d05..048dc56 100644
--- a/drivers/gpu/ion/ion_cma_secure_heap.c
+++ b/drivers/gpu/ion/ion_cma_secure_heap.c
@@ -136,6 +136,7 @@
 	}
 
 	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+	dma_set_attr(DMA_ATTR_SKIP_ZEROING, &attrs);
 
 	cpu_addr = dma_alloc_attrs(sheap->dev, len, &handle, GFP_KERNEL,
 								&attrs);
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index f230033..12fa799 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -627,7 +627,8 @@
 }
 
 static inline void refcount_group(struct adreno_perfcount_group *group,
-	unsigned int reg, unsigned int flags, unsigned int *lo)
+	unsigned int reg, unsigned int flags,
+	unsigned int *lo, unsigned int *hi)
 {
 	if (flags & PERFCOUNTER_FLAG_KERNEL)
 		group->regs[reg].kernelcount++;
@@ -636,6 +637,9 @@
 
 	if (lo)
 		*lo = group->regs[reg].offset;
+
+	if (hi)
+		*hi = group->regs[reg].offset_hi;
 }
 
 /**
@@ -643,7 +647,8 @@
  * @adreno_dev: Adreno device to configure
  * @groupid: Desired performance counter group
  * @countable: Countable desired to be in a counter
- * @offset: Return offset of the countable
+ * @offset: Return offset of the LO counter assigned
+ * @offset_hi: Return offset of the HI counter assigned
  * @flags: Used to setup kernel perf counters
  *
  * Try to place a countable in an available counter.  If the countable is
@@ -653,7 +658,7 @@
 
 int adreno_perfcounter_get(struct adreno_device *adreno_dev,
 	unsigned int groupid, unsigned int countable, unsigned int *offset,
-	unsigned int flags)
+	unsigned int *offset_hi, unsigned int flags)
 {
 	struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
 	struct adreno_perfcount_group *group;
@@ -663,6 +668,8 @@
 	/* always clear return variables */
 	if (offset)
 		*offset = 0;
+	if (offset_hi)
+		*offset_hi = 0;
 
 	if (NULL == counters)
 		return -EINVAL;
@@ -684,7 +691,8 @@
 		/* If it is already reserved, just increase the refcounts */
 		if ((group->regs[countable].kernelcount != 0) ||
 			(group->regs[countable].usercount != 0)) {
-				refcount_group(group, countable, flags, offset);
+				refcount_group(group, countable, flags,
+					offset, offset_hi);
 				return 0;
 		}
 
@@ -700,7 +708,8 @@
 
 		for (i = 0; i < group->reg_count; i++) {
 			if (group->regs[i].countable == countable) {
-				refcount_group(group, i, flags, offset);
+				refcount_group(group, i, flags,
+					offset, offset_hi);
 				return 0;
 			} else if (group->regs[i].countable ==
 			KGSL_PERFCOUNTER_NOT_USED) {
@@ -733,6 +742,8 @@
 
 	if (offset)
 		*offset = group->regs[empty].offset;
+	if (offset_hi)
+		*offset_hi = group->regs[empty].offset_hi;
 
 	return ret;
 }
@@ -3352,7 +3363,8 @@
 		if (result)
 			break;
 		result = adreno_perfcounter_get(adreno_dev, get->groupid,
-			get->countable, &get->offset, PERFCOUNTER_FLAG_NONE);
+			get->countable, &get->offset, &get->offset_hi,
+			PERFCOUNTER_FLAG_NONE);
 		kgsl_active_count_put(device);
 		break;
 	}
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 3d9206b..c690801 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -240,6 +240,7 @@
 	unsigned int kernelcount;
 	unsigned int usercount;
 	unsigned int offset;
+	unsigned int offset_hi;
 	int load_bit;
 	unsigned int select;
 	uint64_t value;
@@ -530,7 +531,7 @@
 
 int adreno_perfcounter_get(struct adreno_device *adreno_dev,
 	unsigned int groupid, unsigned int countable, unsigned int *offset,
-	unsigned int flags);
+	unsigned int *offset_hi, unsigned int flags);
 
 int adreno_perfcounter_put(struct adreno_device *adreno_dev,
 	unsigned int groupid, unsigned int countable, unsigned int flags);
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 2025d73..b0851a2 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -3814,141 +3814,158 @@
 
 static struct adreno_perfcount_register a3xx_perfcounters_cp[] = {
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_CP_0_LO,
-		0, A3XX_CP_PERFCOUNTER_SELECT },
+		A3XX_RBBM_PERFCTR_CP_0_HI, 0, A3XX_CP_PERFCOUNTER_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_rbbm[] = {
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RBBM_0_LO,
-		1, A3XX_RBBM_PERFCOUNTER0_SELECT },
+		A3XX_RBBM_PERFCTR_RBBM_0_HI, 1, A3XX_RBBM_PERFCOUNTER0_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RBBM_1_LO,
-		2, A3XX_RBBM_PERFCOUNTER1_SELECT },
+		A3XX_RBBM_PERFCTR_RBBM_1_HI, 2, A3XX_RBBM_PERFCOUNTER1_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_pc[] = {
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_0_LO,
-		3, A3XX_PC_PERFCOUNTER0_SELECT },
+		A3XX_RBBM_PERFCTR_PC_0_HI, 3, A3XX_PC_PERFCOUNTER0_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_1_LO,
-		4, A3XX_PC_PERFCOUNTER1_SELECT },
+		A3XX_RBBM_PERFCTR_PC_1_HI, 4, A3XX_PC_PERFCOUNTER1_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_2_LO,
-		5, A3XX_PC_PERFCOUNTER2_SELECT },
+		A3XX_RBBM_PERFCTR_PC_2_HI, 5, A3XX_PC_PERFCOUNTER2_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_3_LO,
-		6, A3XX_PC_PERFCOUNTER3_SELECT },
+		A3XX_RBBM_PERFCTR_PC_3_HI, 6, A3XX_PC_PERFCOUNTER3_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_vfd[] = {
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VFD_0_LO,
-		7, A3XX_VFD_PERFCOUNTER0_SELECT },
+		A3XX_RBBM_PERFCTR_VFD_0_HI, 7, A3XX_VFD_PERFCOUNTER0_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VFD_1_LO,
-		8, A3XX_VFD_PERFCOUNTER1_SELECT },
+		A3XX_RBBM_PERFCTR_VFD_1_HI, 8, A3XX_VFD_PERFCOUNTER1_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_hlsq[] = {
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_0_LO,
-		9, A3XX_HLSQ_PERFCOUNTER0_SELECT },
+		A3XX_RBBM_PERFCTR_HLSQ_0_HI, 9,
+		A3XX_HLSQ_PERFCOUNTER0_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_1_LO,
-		10, A3XX_HLSQ_PERFCOUNTER1_SELECT },
+		A3XX_RBBM_PERFCTR_HLSQ_1_HI, 10,
+		A3XX_HLSQ_PERFCOUNTER1_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_2_LO,
-		11, A3XX_HLSQ_PERFCOUNTER2_SELECT },
+		A3XX_RBBM_PERFCTR_HLSQ_2_HI, 11,
+		A3XX_HLSQ_PERFCOUNTER2_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_3_LO,
-		12, A3XX_HLSQ_PERFCOUNTER3_SELECT },
+		A3XX_RBBM_PERFCTR_HLSQ_3_HI, 12,
+		A3XX_HLSQ_PERFCOUNTER3_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_4_LO,
-		13, A3XX_HLSQ_PERFCOUNTER4_SELECT },
+		A3XX_RBBM_PERFCTR_HLSQ_4_HI, 13,
+		A3XX_HLSQ_PERFCOUNTER4_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_5_LO,
-		14, A3XX_HLSQ_PERFCOUNTER5_SELECT },
+		A3XX_RBBM_PERFCTR_HLSQ_5_HI, 14,
+		A3XX_HLSQ_PERFCOUNTER5_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_vpc[] = {
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VPC_0_LO,
-		15, A3XX_VPC_PERFCOUNTER0_SELECT },
+		A3XX_RBBM_PERFCTR_VPC_0_HI, 15, A3XX_VPC_PERFCOUNTER0_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VPC_1_LO,
-		16, A3XX_VPC_PERFCOUNTER1_SELECT },
+		A3XX_RBBM_PERFCTR_VPC_1_HI, 16, A3XX_VPC_PERFCOUNTER1_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_tse[] = {
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TSE_0_LO,
-		17, A3XX_GRAS_PERFCOUNTER0_SELECT },
+		A3XX_RBBM_PERFCTR_TSE_0_HI, 17, A3XX_GRAS_PERFCOUNTER0_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TSE_1_LO,
-		18, A3XX_GRAS_PERFCOUNTER1_SELECT },
+		A3XX_RBBM_PERFCTR_TSE_1_HI, 18, A3XX_GRAS_PERFCOUNTER1_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_ras[] = {
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RAS_0_LO,
-		19, A3XX_GRAS_PERFCOUNTER2_SELECT },
+		A3XX_RBBM_PERFCTR_RAS_0_HI, 19, A3XX_GRAS_PERFCOUNTER2_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RAS_1_LO,
-		20, A3XX_GRAS_PERFCOUNTER3_SELECT },
+		A3XX_RBBM_PERFCTR_RAS_1_HI, 20, A3XX_GRAS_PERFCOUNTER3_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_uche[] = {
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_0_LO,
-		21, A3XX_UCHE_PERFCOUNTER0_SELECT },
+		A3XX_RBBM_PERFCTR_UCHE_0_HI, 21,
+		A3XX_UCHE_PERFCOUNTER0_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_1_LO,
-		22, A3XX_UCHE_PERFCOUNTER1_SELECT },
+		A3XX_RBBM_PERFCTR_UCHE_1_HI, 22,
+		A3XX_UCHE_PERFCOUNTER1_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_2_LO,
-		23, A3XX_UCHE_PERFCOUNTER2_SELECT },
+		A3XX_RBBM_PERFCTR_UCHE_2_HI, 23,
+		A3XX_UCHE_PERFCOUNTER2_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_3_LO,
-		24, A3XX_UCHE_PERFCOUNTER3_SELECT },
+		A3XX_RBBM_PERFCTR_UCHE_3_HI, 24,
+		A3XX_UCHE_PERFCOUNTER3_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_4_LO,
-		25, A3XX_UCHE_PERFCOUNTER4_SELECT },
+		A3XX_RBBM_PERFCTR_UCHE_4_HI, 25,
+		A3XX_UCHE_PERFCOUNTER4_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_5_LO,
-		26, A3XX_UCHE_PERFCOUNTER5_SELECT },
+		A3XX_RBBM_PERFCTR_UCHE_5_HI, 26,
+		A3XX_UCHE_PERFCOUNTER5_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_tp[] = {
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_0_LO,
-		27, A3XX_TP_PERFCOUNTER0_SELECT },
+		A3XX_RBBM_PERFCTR_TP_0_HI, 27, A3XX_TP_PERFCOUNTER0_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_1_LO,
-		28, A3XX_TP_PERFCOUNTER1_SELECT },
+		A3XX_RBBM_PERFCTR_TP_1_HI, 28, A3XX_TP_PERFCOUNTER1_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_2_LO,
-		29, A3XX_TP_PERFCOUNTER2_SELECT },
+		A3XX_RBBM_PERFCTR_TP_2_HI, 29, A3XX_TP_PERFCOUNTER2_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_3_LO,
-		30, A3XX_TP_PERFCOUNTER3_SELECT },
+		A3XX_RBBM_PERFCTR_TP_3_HI, 30, A3XX_TP_PERFCOUNTER3_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_4_LO,
-		31, A3XX_TP_PERFCOUNTER4_SELECT },
+		A3XX_RBBM_PERFCTR_TP_4_HI, 31, A3XX_TP_PERFCOUNTER4_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_5_LO,
-		32, A3XX_TP_PERFCOUNTER5_SELECT },
+		A3XX_RBBM_PERFCTR_TP_5_HI, 32, A3XX_TP_PERFCOUNTER5_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_sp[] = {
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_0_LO,
-		33, A3XX_SP_PERFCOUNTER0_SELECT },
+		A3XX_RBBM_PERFCTR_SP_0_HI, 33, A3XX_SP_PERFCOUNTER0_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_1_LO,
-		34, A3XX_SP_PERFCOUNTER1_SELECT },
+		A3XX_RBBM_PERFCTR_SP_1_HI, 34, A3XX_SP_PERFCOUNTER1_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_2_LO,
-		35, A3XX_SP_PERFCOUNTER2_SELECT },
+		A3XX_RBBM_PERFCTR_SP_2_HI, 35, A3XX_SP_PERFCOUNTER2_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_3_LO,
-		36, A3XX_SP_PERFCOUNTER3_SELECT },
+		A3XX_RBBM_PERFCTR_SP_3_HI, 36, A3XX_SP_PERFCOUNTER3_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_4_LO,
-		37, A3XX_SP_PERFCOUNTER4_SELECT },
+		A3XX_RBBM_PERFCTR_SP_4_HI, 37, A3XX_SP_PERFCOUNTER4_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_5_LO,
-		38, A3XX_SP_PERFCOUNTER5_SELECT },
+		A3XX_RBBM_PERFCTR_SP_5_HI, 38, A3XX_SP_PERFCOUNTER5_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_6_LO,
-		39, A3XX_SP_PERFCOUNTER6_SELECT },
+		A3XX_RBBM_PERFCTR_SP_6_HI, 39, A3XX_SP_PERFCOUNTER6_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_7_LO,
-		40, A3XX_SP_PERFCOUNTER7_SELECT },
+		A3XX_RBBM_PERFCTR_SP_7_HI, 40, A3XX_SP_PERFCOUNTER7_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_rb[] = {
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RB_0_LO,
-		41, A3XX_RB_PERFCOUNTER0_SELECT },
+		A3XX_RBBM_PERFCTR_RB_0_HI, 41, A3XX_RB_PERFCOUNTER0_SELECT },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RB_1_LO,
-		42, A3XX_RB_PERFCOUNTER1_SELECT },
+		A3XX_RBBM_PERFCTR_RB_1_HI, 42, A3XX_RB_PERFCOUNTER1_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_pwr[] = {
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PWR_0_LO,
-		-1, 0 },
+		A3XX_RBBM_PERFCTR_PWR_0_HI, -1, 0 },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PWR_1_LO,
-		-1, 0 },
+		A3XX_RBBM_PERFCTR_PWR_1_HI, -1, 0 },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_vbif[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_CNT0_LO, -1, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_CNT1_LO, -1, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_CNT0_LO,
+		A3XX_VBIF_PERF_CNT0_HI, -1, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_CNT1_LO,
+		A3XX_VBIF_PERF_CNT1_HI, -1, 0 },
 };
 static struct adreno_perfcount_register a3xx_perfcounters_vbif_pwr[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT0_LO, -1, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT1_LO, -1, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT2_LO, -1, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT0_LO,
+		A3XX_VBIF_PERF_PWR_CNT0_HI, -1, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT1_LO,
+		A3XX_VBIF_PERF_PWR_CNT1_HI, -1, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT2_LO,
+		A3XX_VBIF_PERF_PWR_CNT2_HI, -1, 0 },
 };
 
 static struct adreno_perfcount_group a3xx_perfcounter_groups[] = {
@@ -3983,14 +4000,11 @@
 	int ret = 0;
 
 	if (*lo == 0) {
-		*hi = 0;
 
 		ret = adreno_perfcounter_get(adreno_dev, group, countable,
-			lo, PERFCOUNTER_FLAG_KERNEL);
+			lo, hi, PERFCOUNTER_FLAG_KERNEL);
 
-		if (ret == 0)
-			*hi = *lo + 1;
-		else {
+		if (ret) {
 			struct kgsl_device *device = &adreno_dev->dev;
 
 			KGSL_DRV_ERR(device,
@@ -4106,18 +4120,18 @@
 
 	/* Reserve and start countable 1 in the PWR perfcounter group */
 	ret = adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_PWR, 1,
-			NULL, PERFCOUNTER_FLAG_KERNEL);
+			NULL, NULL, PERFCOUNTER_FLAG_KERNEL);
 
 	if (device->pwrctrl.bus_control) {
 		/* VBIF waiting for RAM */
 		ret |= adreno_perfcounter_get(adreno_dev,
 				KGSL_PERFCOUNTER_GROUP_VBIF_PWR, 0,
-				NULL, PERFCOUNTER_FLAG_KERNEL);
+				NULL, NULL, PERFCOUNTER_FLAG_KERNEL);
 		/* VBIF DDR cycles */
 		ret |= adreno_perfcounter_get(adreno_dev,
 				KGSL_PERFCOUNTER_GROUP_VBIF,
 				VBIF_AXI_TOTAL_BEATS,
-				&adreno_dev->ram_cycles_lo,
+				&adreno_dev->ram_cycles_lo, NULL,
 				PERFCOUNTER_FLAG_KERNEL);
 	}
 
diff --git a/drivers/gpu/msm/adreno_profile.c b/drivers/gpu/msm/adreno_profile.c
index 38e0af8..45075a5 100644
--- a/drivers/gpu/msm/adreno_profile.c
+++ b/drivers/gpu/msm/adreno_profile.c
@@ -146,7 +146,7 @@
 				entry->offset, data_offset);
 		IB_CMD(ibcmds, CP_REG_TO_MEM, entry->offset,
 				gpuaddr + data_offset, data_offset);
-		IB_CMD(ibcmds, CP_REG_TO_MEM, entry->offset + 1,
+		IB_CMD(ibcmds, CP_REG_TO_MEM, entry->offset_hi,
 				gpuaddr + data_offset, data_offset);
 
 		/* skip over post_ib counter data */
@@ -185,7 +185,7 @@
 
 		IB_CMD(ibcmds, CP_REG_TO_MEM, entry->offset,
 				gpuaddr + data_offset, data_offset);
-		IB_CMD(ibcmds, CP_REG_TO_MEM, entry->offset + 1,
+		IB_CMD(ibcmds, CP_REG_TO_MEM, entry->offset_hi,
 				gpuaddr + data_offset, data_offset);
 	}
 
@@ -281,7 +281,7 @@
 
 static bool _add_to_assignments_list(struct adreno_profile *profile,
 		const char *str, unsigned int groupid, unsigned int countable,
-		unsigned int offset)
+		unsigned int offset, unsigned int offset_hi)
 {
 	struct adreno_profile_assigns_list *entry;
 
@@ -295,6 +295,7 @@
 	entry->countable = countable;
 	entry->groupid = groupid;
 	entry->offset = offset;
+	entry->offset_hi = offset_hi;
 
 	strlcpy(entry->name, str, sizeof(entry->name));
 
@@ -576,7 +577,7 @@
 		unsigned int groupid, unsigned int countable)
 {
 	struct adreno_profile *profile = &adreno_dev->profile;
-	unsigned int offset;
+	unsigned int offset, offset_hi;
 	const char *name = NULL;
 
 	name = adreno_perfcounter_get_name(adreno_dev, groupid);
@@ -588,13 +589,13 @@
 		return;
 
 	/* add to perf counter allocation, if fail skip it */
-	if (adreno_perfcounter_get(adreno_dev, groupid,
-				countable, &offset, PERFCOUNTER_FLAG_NONE))
+	if (adreno_perfcounter_get(adreno_dev, groupid, countable,
+				&offset, &offset_hi, PERFCOUNTER_FLAG_NONE))
 		return;
 
 	/* add to assignments list, put counter back if error */
 	if (!_add_to_assignments_list(profile, name, groupid,
-				countable, offset))
+				countable, offset, offset_hi))
 		adreno_perfcounter_put(adreno_dev, groupid,
 				countable, PERFCOUNTER_FLAG_KERNEL);
 }
diff --git a/drivers/gpu/msm/adreno_profile.h b/drivers/gpu/msm/adreno_profile.h
index 7e1ccb2..dfd22d8 100644
--- a/drivers/gpu/msm/adreno_profile.h
+++ b/drivers/gpu/msm/adreno_profile.h
@@ -27,7 +27,8 @@
 	char name[25];
 	unsigned int groupid;
 	unsigned int countable;
-	unsigned int offset;   /* LO offset,  HI offset is +1 */
+	unsigned int offset;    /* LO offset */
+	unsigned int offset_hi; /* HI offset */
 };
 
 struct adreno_profile {
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index a96ab2b..5d78879 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -64,14 +64,8 @@
 	struct sg_table *table;
 };
 
-static void kgsl_put_process_private(struct kgsl_device *device,
-			 struct kgsl_process_private *private);
-
 static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry);
 
-static void
-kgsl_put_process_private(struct kgsl_device *device,
-			 struct kgsl_process_private *private);
 /**
  * kgsl_trace_issueibcmds() - Call trace_issueibcmds by proxy
  * device: KGSL device
@@ -352,8 +346,8 @@
 {
 	int ret;
 	struct kgsl_process_private *process = dev_priv->process_priv;
-	
-	ret = kref_get_unless_zero(&process->refcount);
+
+	ret = kgsl_process_private_get(process);
 	if (!ret)
 		return -EBADF;
 
@@ -392,7 +386,7 @@
 	return ret;
 
 err_put_proc_priv:
-	kgsl_put_process_private(dev_priv->device, process);
+	kgsl_process_private_put(process);
 	return ret;
 }
 
@@ -415,7 +409,7 @@
 
 	entry->priv->stats[entry->memtype].cur -= entry->memdesc.size;
 	spin_unlock(&entry->priv->mem_lock);
-	kgsl_put_process_private(entry->dev_priv->device, entry->priv);
+	kgsl_process_private_put(entry->priv);
 
 	entry->priv = NULL;
 }
@@ -473,7 +467,7 @@
 	 * the context is destroyed. This will also prevent the pagetable
 	 * from being destroyed
 	 */
-	if (!kref_get_unless_zero(&dev_priv->process_priv->refcount))
+	if (!kgsl_process_private_get(dev_priv->process_priv))
 		goto fail_free_id;
 	context->device = dev_priv->device;
 	context->dev_priv = dev_priv;
@@ -568,8 +562,7 @@
 	}
 	write_unlock(&device->context_lock);
 	kgsl_sync_timeline_destroy(context);
-	kgsl_put_process_private(device,
-				context->proc_priv);
+	kgsl_process_private_put(context->proc_priv);
 
 	device->ftbl->drawctxt_destroy(context);
 }
@@ -811,9 +804,8 @@
 	return;
 }
 
-static void
-kgsl_put_process_private(struct kgsl_device *device,
-			 struct kgsl_process_private *private)
+void
+kgsl_process_private_put(struct kgsl_process_private *private)
 {
 	mutex_lock(&kgsl_driver.process_mutex);
 
@@ -828,13 +820,32 @@
 }
 
 /**
- * find_process_private() - Helper function to search for process private
- * @cur_dev_priv: Pointer to device private structure which contains pointers
- * to device and process_private structs.
+ * kgsl_process_private_find() - Find the process associated with the specified
+ * name
+ * @name: pid_t of the process to search for
+ * Return the process struct for the given ID.
+ */
+struct kgsl_process_private *kgsl_process_private_find(pid_t pid)
+{
+	struct kgsl_process_private *p, *private = NULL;
+
+	mutex_lock(&kgsl_driver.process_mutex);
+	list_for_each_entry(p, &kgsl_driver.process_list, list) {
+		if (p->pid == pid) {
+			if (kgsl_process_private_get(p))
+				private = p;
+			break;
+		}
+	}
+	mutex_unlock(&kgsl_driver.process_mutex);
+	return private;
+}
+
+/**
+ * kgsl_process_private_new() - Helper function to search for process private
  * Returns: Pointer to the found/newly created private struct
  */
-static struct kgsl_process_private *
-kgsl_find_process_private(struct kgsl_device_private *cur_dev_priv)
+static struct kgsl_process_private *kgsl_process_private_new(void)
 {
 	struct kgsl_process_private *private;
 
@@ -842,18 +853,16 @@
 	mutex_lock(&kgsl_driver.process_mutex);
 	list_for_each_entry(private, &kgsl_driver.process_list, list) {
 		if (private->pid == task_tgid_nr(current)) {
-			kref_get(&private->refcount);
+			if (!kgsl_process_private_get(private))
+				private = NULL;
 			goto done;
 		}
 	}
 
 	/* no existing process private found for this dev_priv, create one */
 	private = kzalloc(sizeof(struct kgsl_process_private), GFP_KERNEL);
-	if (private == NULL) {
-		KGSL_DRV_ERR(cur_dev_priv->device, "kzalloc(%d) failed\n",
-			sizeof(struct kgsl_process_private));
+	if (private == NULL)
 		goto done;
-	}
 
 	kref_init(&private->refcount);
 
@@ -875,11 +884,11 @@
  * NULL if pagetable creation for this process private obj failed.
  */
 static struct kgsl_process_private *
-kgsl_get_process_private(struct kgsl_device_private *cur_dev_priv)
+kgsl_get_process_private(struct kgsl_device *device)
 {
 	struct kgsl_process_private *private;
 
-	private = kgsl_find_process_private(cur_dev_priv);
+	private = kgsl_process_private_new();
 
 	if (!private)
 		return NULL;
@@ -894,15 +903,15 @@
 
 	if ((!private->pagetable) && kgsl_mmu_enabled()) {
 		unsigned long pt_name;
-		struct kgsl_mmu *mmu = &cur_dev_priv->device->mmu;
 
 		pt_name = task_tgid_nr(current);
-		private->pagetable = kgsl_mmu_getpagetable(mmu, pt_name);
+		private->pagetable =
+			kgsl_mmu_getpagetable(&device->mmu, pt_name);
 		if (private->pagetable == NULL)
 			goto error;
 	}
 
-	if (kgsl_process_init_sysfs(cur_dev_priv->device, private))
+	if (kgsl_process_init_sysfs(device, private))
 		goto error;
 	if (kgsl_process_init_debugfs(private))
 		goto error;
@@ -915,7 +924,7 @@
 
 error:
 	mutex_unlock(&private->process_private_mutex);
-	kgsl_put_process_private(cur_dev_priv->device, private);
+	kgsl_process_private_put(private);
 	return NULL;
 }
 
@@ -1009,7 +1018,7 @@
 
 	kfree(dev_priv);
 
-	kgsl_put_process_private(device, private);
+	kgsl_process_private_put(private);
 
 	pm_runtime_put(device->parentdev);
 	return result;
@@ -1099,7 +1108,7 @@
 	 * after the first start so that the global pagetable mappings
 	 * are set up before we create the per-process pagetable.
 	 */
-	dev_priv->process_priv = kgsl_get_process_private(dev_priv);
+	dev_priv->process_priv = kgsl_get_process_private(device);
 	if (dev_priv->process_priv ==  NULL) {
 		result = -ENOMEM;
 		goto err_stop;
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index c1b3139..5645628 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -247,7 +247,7 @@
 
 static void print_mem_entry(struct seq_file *s, struct kgsl_mem_entry *entry)
 {
-	char flags[6];
+	char flags[7];
 	char usage[16];
 	struct kgsl_memdesc *m = &entry->memdesc;
 
@@ -256,11 +256,12 @@
 	flags[2] = get_alignflag(m);
 	flags[3] = get_cacheflag(m);
 	flags[4] = kgsl_memdesc_use_cpu_map(m) ? 'p' : '-';
-	flags[5] = '\0';
+	flags[5] = (m->useraddr) ? 'Y' : 'N';
+	flags[6] = '\0';
 
 	kgsl_get_memory_usage(usage, sizeof(usage), m->flags);
 
-	seq_printf(s, "%pK %pK %8zd %5d %5s %10s %16s %5d\n",
+	seq_printf(s, "%pK %pK %8zd %5d %6s %10s %16s %5d\n",
 			(unsigned long *) m->gpuaddr,
 			(unsigned long *) m->useraddr,
 			m->size, entry->id, flags,
@@ -274,7 +275,7 @@
 	struct kgsl_process_private *private = s->private;
 	int next = 0;
 
-	seq_printf(s, "%8s %8s %8s %5s %5s %10s %16s %5s\n",
+	seq_printf(s, "%8s %8s %8s %5s %6s %10s %16s %5s\n",
 		   "gpuaddr", "useraddr", "size", "id", "flags", "type",
 		   "usage", "sglen");
 
@@ -303,14 +304,38 @@
 
 static int process_mem_open(struct inode *inode, struct file *file)
 {
-	return single_open(file, process_mem_print, inode->i_private);
+	int ret;
+	pid_t pid = (pid_t) (unsigned long) inode->i_private;
+	struct kgsl_process_private *private = NULL;
+
+	private = kgsl_process_private_find(pid);
+
+	if (!private)
+		return -ENODEV;
+
+	ret = single_open(file, process_mem_print, private);
+	if (ret)
+		kgsl_process_private_put(private);
+
+	return ret;
+}
+
+static int process_mem_release(struct inode *inode, struct file *file)
+{
+	struct kgsl_process_private *private =
+		((struct seq_file *)file->private_data)->private;
+
+	if (private)
+		kgsl_process_private_put(private);
+
+	return single_release(inode, file);
 }
 
 static const struct file_operations process_mem_fops = {
 	.open = process_mem_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
-	.release = single_release,
+	.release = process_mem_release,
 };
 
 
@@ -349,8 +374,8 @@
 	 * So if debugfs is disabled in kernel, return as
 	 * success.
 	 */
-	dentry = debugfs_create_file("mem", 0444, private->debug_root, private,
-			    &process_mem_fops);
+	dentry = debugfs_create_file("mem", 0444, private->debug_root,
+		(void *) ((unsigned long) private->pid), &process_mem_fops);
 
 	if (IS_ERR(dentry)) {
 		ret = PTR_ERR(dentry);
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 47801a4..1e6fbc9 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -697,6 +697,27 @@
 void kgsl_cmdbatch_destroy_object(struct kref *kref);
 
 /**
+* kgsl_process_private_get() - increment the refcount on a kgsl_process_private
+*   struct
+* @process: Pointer to the KGSL process_private
+*
+* Returns 0 if the structure is invalid and a reference count could not be
+* obtained, nonzero otherwise.
+*/
+static inline int kgsl_process_private_get(struct kgsl_process_private *process)
+{
+	int ret = 0;
+	if (process != NULL)
+		ret = kref_get_unless_zero(&process->refcount);
+	return ret;
+}
+
+void kgsl_process_private_put(struct kgsl_process_private *private);
+
+
+struct kgsl_process_private *kgsl_process_private_find(pid_t pid);
+
+/**
  * kgsl_cmdbatch_put() - Decrement the refcount for a command batch object
  * @cmdbatch: Pointer to the command batch object
  */
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
index 37a11d2..96b058b 100644
--- a/drivers/hwmon/qpnp-adc-common.c
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -814,17 +814,22 @@
 		return -EINVAL;
 
 	scale_voltage = (adc_code -
-		chan_properties->adc_graph[CALIB_ABSOLUTE].adc_gnd)
-		* chan_properties->adc_graph[CALIB_ABSOLUTE].dx;
+		chan_properties->adc_graph[chan_properties->calib_type].adc_gnd)
+		* chan_properties->adc_graph[chan_properties->calib_type].dx;
 	if (scale_voltage < 0) {
 		negative_offset = 1;
 		scale_voltage = -scale_voltage;
 	}
 	do_div(scale_voltage,
-		chan_properties->adc_graph[CALIB_ABSOLUTE].dy);
+		chan_properties->adc_graph[chan_properties->calib_type].dy);
 	if (negative_offset)
 		scale_voltage = -scale_voltage;
-	scale_voltage += chan_properties->adc_graph[CALIB_ABSOLUTE].dx;
+
+	if (chan_properties->calib_type == CALIB_ABSOLUTE)
+		scale_voltage +=
+		chan_properties->adc_graph[chan_properties->calib_type].dx;
+	else
+		scale_voltage *= 1000;
 
 	if (scale_voltage < 0) {
 		if (adc_properties->bipolar) {
@@ -1190,6 +1195,7 @@
 		adc_channel_list[i].adc_scale_fn = post_scaling;
 		adc_channel_list[i].hw_settle_time = hw_settle_time;
 		adc_channel_list[i].fast_avg_setup = fast_avg_setup;
+		adc_channel_list[i].calib_type = calib_type;
 		i++;
 	}
 
diff --git a/drivers/hwmon/qpnp-adc-voltage.c b/drivers/hwmon/qpnp-adc-voltage.c
index 346a72d..ee69840 100644
--- a/drivers/hwmon/qpnp-adc-voltage.c
+++ b/drivers/hwmon/qpnp-adc-voltage.c
@@ -600,6 +600,7 @@
 					QPNP_VBAT_COEFF_25;
 			break;
 		}
+		break;
 	case QPNP_REV_ID_8110_2_0:
 		switch (vadc->id) {
 		case COMP_ID_SMIC:
@@ -713,6 +714,7 @@
 			temp_var = 0;
 			break;
 		}
+		break;
 	case QPNP_REV_ID_8110_2_0:
 		switch (vadc->id) {
 		case COMP_ID_SMIC:
@@ -1173,6 +1175,8 @@
 		qpnp_vadc_amux_scaling_ratio[amux_prescaling].num;
 	vadc->adc->amux_prop->chan_prop->offset_gain_denominator =
 		 qpnp_vadc_amux_scaling_ratio[amux_prescaling].den;
+	vadc->adc->amux_prop->chan_prop->calib_type =
+		vadc->adc->adc_channels[dt_index].calib_type;
 
 	scale_type = vadc->adc->adc_channels[dt_index].adc_scale_fn;
 	if (scale_type >= SCALE_NONE) {
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 1704105..545e68f 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -193,6 +193,7 @@
 	int                          wr_sz;
 	struct msm_i2c_platform_data *pdata;
 	enum msm_i2c_state           pwr_state;
+	atomic_t		     xfer_progress;
 	struct mutex                 mlock;
 	void                         *complete;
 	int                          i2c_gpios[ARRAY_SIZE(i2c_rsrcs)];
@@ -230,8 +231,10 @@
 	uint32_t op_flgs = 0;
 	int err = 0;
 
-	if (pm_runtime_suspended(dev->dev))
+	if (atomic_read(&dev->xfer_progress) != 1) {
+		dev_err(dev->dev, "irq:%d when PM suspended\n", irq);
 		return IRQ_NONE;
+	}
 
 	status = readl_relaxed(dev->base + QUP_I2C_STATUS);
 	status1 = readl_relaxed(dev->base + QUP_ERROR_FLAGS);
@@ -1002,6 +1005,7 @@
 	if (dev->pdata->clk_ctl_xfer)
 		i2c_qup_pm_resume_clk(dev);
 
+	atomic_set(&dev->xfer_progress, 1);
 	/* Initialize QUP registers during first transfer */
 	if (dev->clk_ctl == 0) {
 		int fs_div;
@@ -1305,6 +1309,7 @@
 	dev->cnt = 0;
 	if (dev->pdata->clk_ctl_xfer)
 		i2c_qup_pm_suspend_clk(dev);
+	atomic_set(&dev->xfer_progress, 0);
 	mutex_unlock(&dev->mlock);
 	pm_runtime_mark_last_busy(dev->dev);
 	pm_runtime_put_autosuspend(dev->dev);
@@ -1655,6 +1660,7 @@
 
 	mutex_init(&dev->mlock);
 	dev->pwr_state = MSM_I2C_PM_SUSPENDED;
+	atomic_set(&dev->xfer_progress, 0);
 	/* If the same AHB clock is used on Modem side
 	 * switch it on here itself and don't switch it
 	 * on and off during suspend and resume.
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 0c22694..915f5e7 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -81,8 +81,6 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called hbtp_input.
 
-endif
-
 config INPUT_PCSPKR
 	tristate "PC Speaker support"
 	depends on PCSPKR_PLATFORM
@@ -724,3 +722,5 @@
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called CM36283.
+
+endif
diff --git a/drivers/leds/leds-qpnp.c b/drivers/leds/leds-qpnp.c
index 0659b77..0413472 100644
--- a/drivers/leds/leds-qpnp.c
+++ b/drivers/leds/leds-qpnp.c
@@ -1755,7 +1755,7 @@
 				return -EINVAL;
 			}
 			rc = pwm_lut_config(pwm_cfg->pwm_dev,
-				PM_PWM_PERIOD_MIN, /* ignored by hardware */
+				pwm_cfg->pwm_period_us,
 				pwm_cfg->duty_cycles->duty_pcts,
 				pwm_cfg->lut_params);
 			if (rc < 0) {
@@ -2862,7 +2862,7 @@
 	else
 		return rc;
 
-	if (pwm_cfg->mode == PWM_MODE) {
+	if (pwm_cfg->mode != MANUAL_MODE) {
 		rc = of_property_read_u32(node, "qcom,pwm-us", &val);
 		if (!rc)
 			pwm_cfg->pwm_period_us = val;
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 937fb8c..3a3c370 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -3871,7 +3871,12 @@
 	if (ret > 0) {
 		dvb_dmxdev_notify_data_read(dmxdevfilter, ret);
 		spin_lock_irq(&dmxdevfilter->dev->lock);
-		dvb_dmxdev_update_events(&dmxdevfilter->events, ret);
+		/*
+		 * Updating the events in case of overflow might remove the
+		 * overflow event, so avoid that.
+		 */
+		if (dmxdevfilter->buffer.error != -EOVERFLOW)
+			dvb_dmxdev_update_events(&dmxdevfilter->events, ret);
 		spin_unlock_irq(&dmxdevfilter->dev->lock);
 
 		/*
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
index f9855c0..4e73c5a 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
@@ -675,7 +675,7 @@
 	struct msm_isp_bufq *bufq = NULL;
 	CDBG("%s: E\n", __func__);
 
-	if (!buf_request->num_buf) {
+	if (!buf_request->num_buf || buf_request->num_buf > VIDEO_MAX_FRAME) {
 		pr_err("Invalid buffer request\n");
 		return rc;
 	}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
index 80a0073..d35869c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
@@ -52,8 +52,82 @@
 	{}
 };
 
+#define MAX_OVERFLOW_COUNTERS  15
+#define OVERFLOW_LENGTH 512
+#define OVERFLOW_BUFFER_LENGTH 32
 static struct msm_isp_buf_mgr vfe_buf_mgr;
+static int msm_isp_enable_debugfs(struct msm_isp_statistics *stats);
+static char *stats_str[MAX_OVERFLOW_COUNTERS] = {
+	"imgmaster0_overflow_cnt",
+	"imgmaster1_overflow_cnt",
+	"imgmaster2_overflow_cnt",
+	"imgmaster3_overflow_cnt",
+	"imgmaster4_overflow_cnt",
+	"imgmaster5_overflow_cnt",
+	"imgmaster6_overflow_cnt",
+	"be_overflow_cnt",
+	"bg_overflow_cnt",
+	"bf_overflow_cnt",
+	"awb_overflow_cnt",
+	"rs_overflow_cnt",
+	"cs_overflow_cnt",
+	"ihist_overflow_cnt",
+	"skinbhist_overflow_cnt",
+};
+static int vfe_debugfs_statistics_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
 
+static ssize_t vfe_debugfs_statistics_read(struct file *t_file, char *t_char,
+	size_t t_size_t, loff_t *t_loff_t)
+{
+	int i;
+	char name[OVERFLOW_LENGTH] = {0};
+	int *ptr;
+	char buffer[OVERFLOW_BUFFER_LENGTH] = {0};
+	struct msm_isp_statistics  *stats = (struct msm_isp_statistics *)
+		t_file->private_data;
+	ptr = (int *)(stats);
+	for (i = 0; i < MAX_OVERFLOW_COUNTERS; i++) {
+		strlcat(name, stats_str[i], sizeof(name));
+		strlcat(name, "     ", sizeof(name));
+		snprintf(buffer, sizeof(buffer), "%d", ptr[i]);
+		strlcat(name, buffer, sizeof(name));
+		strlcat(name, "\r\n", sizeof(name));
+	}
+	return simple_read_from_buffer(t_char, t_size_t,
+		t_loff_t, name, strlen(name));
+}
+
+static ssize_t vfe_debugfs_statistics_write(struct file *t_file,
+	const char *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+	struct msm_isp_statistics *stats = (struct msm_isp_statistics *)
+		t_file->private_data;
+	memset(stats, 0, sizeof(struct msm_isp_statistics));
+
+	return sizeof(struct msm_isp_statistics);
+}
+
+static const struct file_operations vfe_debugfs_error = {
+	.open = vfe_debugfs_statistics_open,
+	.read = vfe_debugfs_statistics_read,
+	.write = vfe_debugfs_statistics_write,
+};
+
+static int msm_isp_enable_debugfs(struct msm_isp_statistics *stats)
+{
+	struct dentry *debugfs_base;
+	debugfs_base = debugfs_create_dir("msm_isp", NULL);
+	if (!debugfs_base)
+		return -ENOMEM;
+	if (!debugfs_create_file("stats", S_IRUGO | S_IWUSR, debugfs_base,
+		stats, &vfe_debugfs_error))
+		return -ENOMEM;
+	return 0;
+}
 static int __devinit vfe_probe(struct platform_device *pdev)
 {
 	struct vfe_device *vfe_dev;
@@ -73,6 +147,7 @@
 	};
 
 	vfe_dev = kzalloc(sizeof(struct vfe_device), GFP_KERNEL);
+	vfe_dev->stats = kzalloc(sizeof(struct msm_isp_statistics), GFP_KERNEL);
 	if (!vfe_dev) {
 		pr_err("%s: no enough memory\n", __func__);
 		return -ENOMEM;
@@ -144,6 +219,7 @@
 		kfree(vfe_dev);
 		return -EINVAL;
 	}
+	msm_isp_enable_debugfs(vfe_dev->stats);
 	vfe_dev->buf_mgr->ops->register_ctx(vfe_dev->buf_mgr,
 		&vfe_dev->iommu_ctx[0], vfe_dev->hw_info->num_iommu_ctx);
 	vfe_dev->vfe_open_cnt = 0;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index 334a293..69c5190 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -24,7 +24,6 @@
 #include <media/msmb_isp.h>
 #include <mach/msm_bus.h>
 #include <mach/msm_bus_board.h>
-
 #include "msm_buf_mgr.h"
 
 #define MAX_IOMMU_CTX 2
@@ -148,13 +147,13 @@
 	uint32_t (*get_wm_mask) (uint32_t irq_status0, uint32_t irq_status1);
 	uint32_t (*get_comp_mask) (uint32_t irq_status0, uint32_t irq_status1);
 	uint32_t (*get_pingpong_status) (struct vfe_device *vfe_dev);
-	long (*halt) (struct vfe_device *vfe_dev);
+	long (*halt) (struct vfe_device *vfe_dev, uint32_t blocking);
 };
 
 struct msm_vfe_core_ops {
 	void (*reg_update) (struct vfe_device *vfe_dev);
 	long (*reset_hw) (struct vfe_device *vfe_dev,
-		enum msm_isp_reset_type reset_type);
+		enum msm_isp_reset_type reset_type, uint32_t blocking);
 	int (*init_hw) (struct vfe_device *vfe_dev);
 	void (*init_hw_reg) (struct vfe_device *vfe_dev);
 	void (*release_hw) (struct vfe_device *vfe_dev);
@@ -168,6 +167,12 @@
 	int (*get_platform_data) (struct vfe_device *vfe_dev);
 	void (*get_error_mask) (uint32_t *error_mask0, uint32_t *error_mask1);
 	void (*process_error_status) (struct vfe_device *vfe_dev);
+	void (*get_overflow_mask) (uint32_t *overflow_mask);
+	void (*get_irq_mask) (struct vfe_device *vfe_dev,
+		uint32_t *irq0_mask, uint32_t *irq1_mask);
+	void (*restore_irq_mask) (struct vfe_device *vfe_dev);
+	void (*get_halt_restart_mask) (uint32_t *irq0_mask,
+		uint32_t *irq1_mask);
 };
 struct msm_vfe_stats_ops {
 	int (*get_stats_idx) (enum msm_isp_stats_type stats_type);
@@ -299,6 +304,15 @@
 	uint32_t runtime_num_burst_capture;
 	uint8_t runtime_framedrop_update;
 	uint32_t runtime_output_format;
+	enum msm_vfe_frame_skip_pattern frame_skip_pattern;
+
+};
+
+enum msm_vfe_overflow_state {
+	NO_OVERFLOW,
+	OVERFLOW_DETECTED,
+	HALT_REQUESTED,
+	RESTART_REQUESTED,
 };
 
 struct msm_vfe_axi_composite_info {
@@ -315,6 +329,7 @@
 	uint32_t width;
 	long pixel_clock;
 	uint32_t input_format;/*V4L2 pix format with bayer pattern*/
+	uint32_t last_updt_frm_id;
 };
 
 enum msm_wm_ub_cfg_type {
@@ -381,6 +396,7 @@
 	atomic_t stats_comp_mask;
 	uint16_t stream_handle_cnt;
 	atomic_t stats_update;
+	uint32_t stats_mask;
 };
 
 struct msm_vfe_tasklet_queue_cmd {
@@ -394,6 +410,9 @@
 #define MSM_VFE_TASKLETQ_SIZE 200
 
 struct msm_vfe_error_info {
+	atomic_t overflow_state;
+	uint32_t overflow_recover_irq_mask0;
+	uint32_t overflow_recover_irq_mask1;
 	uint32_t error_mask0;
 	uint32_t error_mask1;
 	uint32_t violation_status;
@@ -409,6 +428,24 @@
 	uint32_t frame_id;
 };
 
+struct msm_isp_statistics {
+	int32_t imagemaster0_overflow;
+	int32_t imagemaster1_overflow;
+	int32_t imagemaster2_overflow;
+	int32_t imagemaster3_overflow;
+	int32_t imagemaster4_overflow;
+	int32_t imagemaster5_overflow;
+	int32_t imagemaster6_overflow;
+	int32_t be_overflow;
+	int32_t bg_overflow;
+	int32_t bf_overflow;
+	int32_t awb_overflow;
+	int32_t rs_overflow;
+	int32_t cs_overflow;
+	int32_t ihist_overflow;
+	int32_t skinbhist_overflow;
+};
+
 struct vfe_device {
 	struct platform_device *pdev;
 	struct msm_sd_subdev subdev;
@@ -460,6 +497,7 @@
 	void __iomem *p_avtimer_msw;
 	void __iomem *p_avtimer_lsw;
 	uint8_t ignore_error;
+	struct msm_isp_statistics *stats;
 };
 
 #endif
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
index 4beb3c3..ba1e58c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -255,7 +255,6 @@
 static void msm_vfe32_process_error_status(struct vfe_device *vfe_dev)
 {
 	uint32_t error_status1 = vfe_dev->error_info.error_mask1;
-
 	if (error_status1 & BIT(0))
 		pr_err("%s: camif error status: 0x%x\n",
 			__func__, vfe_dev->error_info.camif_status);
@@ -275,34 +274,62 @@
 		pr_err("%s: violation\n", __func__);
 		msm_vfe32_process_violation_status(vfe_dev);
 	}
-	if (error_status1 & BIT(8))
+	if (error_status1 & BIT(8)) {
+		vfe_dev->stats->imagemaster0_overflow++;
 		pr_err("%s: image master 0 bus overflow\n", __func__);
-	if (error_status1 & BIT(9))
+	}
+	if (error_status1 & BIT(9)) {
+		vfe_dev->stats->imagemaster1_overflow++;
 		pr_err("%s: image master 1 bus overflow\n", __func__);
-	if (error_status1 & BIT(10))
+	}
+	if (error_status1 & BIT(10)) {
+		vfe_dev->stats->imagemaster2_overflow++;
 		pr_err("%s: image master 2 bus overflow\n", __func__);
-	if (error_status1 & BIT(11))
+	}
+	if (error_status1 & BIT(11)) {
+		vfe_dev->stats->imagemaster3_overflow++;
 		pr_err("%s: image master 3 bus overflow\n", __func__);
-	if (error_status1 & BIT(12))
+	}
+	if (error_status1 & BIT(12)) {
+		vfe_dev->stats->imagemaster4_overflow++;
 		pr_err("%s: image master 4 bus overflow\n", __func__);
-	if (error_status1 & BIT(13))
+	}
+	if (error_status1 & BIT(13)) {
+		vfe_dev->stats->imagemaster5_overflow++;
 		pr_err("%s: image master 5 bus overflow\n", __func__);
-	if (error_status1 & BIT(14))
+	}
+	if (error_status1 & BIT(14)) {
+		vfe_dev->stats->imagemaster6_overflow++;
 		pr_err("%s: image master 6 bus overflow\n", __func__);
-	if (error_status1 & BIT(15))
+	}
+	if (error_status1 & BIT(15)) {
+		vfe_dev->stats->bg_overflow++;
 		pr_err("%s: status ae/bg bus overflow\n", __func__);
-	if (error_status1 & BIT(16))
+	}
+	if (error_status1 & BIT(16)) {
+		vfe_dev->stats->bf_overflow++;
 		pr_err("%s: status af/bf bus overflow\n", __func__);
-	if (error_status1 & BIT(17))
+	}
+	if (error_status1 & BIT(17)) {
+		vfe_dev->stats->awb_overflow++;
 		pr_err("%s: status awb bus overflow\n", __func__);
-	if (error_status1 & BIT(18))
+	}
+	if (error_status1 & BIT(18)) {
+		vfe_dev->stats->rs_overflow++;
 		pr_err("%s: status rs bus overflow\n", __func__);
-	if (error_status1 & BIT(19))
+	}
+	if (error_status1 & BIT(19)) {
+		vfe_dev->stats->cs_overflow++;
 		pr_err("%s: status cs bus overflow\n", __func__);
-	if (error_status1 & BIT(20))
+	}
+	if (error_status1 & BIT(20)) {
+		vfe_dev->stats->ihist_overflow++;
 		pr_err("%s: status ihist bus overflow\n", __func__);
-	if (error_status1 & BIT(21))
+	}
+	if (error_status1 & BIT(21)) {
+		vfe_dev->stats->skinbhist_overflow++;
 		pr_err("%s: status skin bhist bus overflow\n", __func__);
+	}
 	if (error_status1 & BIT(22))
 		pr_err("%s: axi error\n", __func__);
 }
@@ -366,19 +393,25 @@
 };
 
 static long msm_vfe32_reset_hardware(struct vfe_device *vfe_dev ,
-				enum msm_isp_reset_type reset_type)
+	enum msm_isp_reset_type reset_type, uint32_t blocking)
 {
 
 	uint32_t rst_val;
+	long rc = 0;
 	if (reset_type >= ISP_RST_MAX) {
 		pr_err("%s: Error Invalid parameter\n", __func__);
 		reset_type = ISP_RST_HARD;
 	}
 	rst_val = msm_vfe32_reset_values[reset_type];
 	init_completion(&vfe_dev->reset_complete);
-	msm_camera_io_w_mb(rst_val, vfe_dev->vfe_base + 0x4);
-	return wait_for_completion_timeout(
-	   &vfe_dev->reset_complete, msecs_to_jiffies(50));
+	if (blocking) {
+		msm_camera_io_w_mb(rst_val, vfe_dev->vfe_base + 0x4);
+		rc = wait_for_completion_timeout(
+		   &vfe_dev->reset_complete, msecs_to_jiffies(50));
+	} else {
+		msm_camera_io_w_mb(0x3EF, vfe_dev->vfe_base + 0x4);
+	}
+	return rc;
 }
 
 static void msm_vfe32_axi_reload_wm(
@@ -632,6 +665,7 @@
 		val &= 0xFFFFFF3F;
 		val = val | bus_en << 7 | vfe_en << 6;
 		msm_camera_io_w(val, vfe_dev->vfe_base + 0x1E4);
+		msm_camera_io_w_mb(0x4, vfe_dev->vfe_base + 0x1E0);
 		msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x1E0);
 		vfe_dev->axi_data.src_info[VFE_PIX_0].active = 1;
 	} else if (update_state == DISABLE_CAMIF) {
@@ -864,12 +898,15 @@
 		VFE32_PING_PONG_BASE(wm_idx, pingpong_status));
 }
 
-static long msm_vfe32_axi_halt(struct vfe_device *vfe_dev)
+static long msm_vfe32_axi_halt(struct vfe_device *vfe_dev,
+	uint32_t blocking)
 {
 	uint32_t halt_mask;
-	uint32_t axi_busy_flag = true;
-
+	uint32_t axi_busy_flag = false;
 	msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x1D8);
+	if (blocking) {
+		axi_busy_flag = true;
+	}
 	while (axi_busy_flag) {
 		if (msm_camera_io_r(
 			vfe_dev->vfe_base + 0x1DC) & 0x1)
@@ -1040,6 +1077,33 @@
 	return (irq_status0 >> 13) & 0x7F;
 }
 
+static void msm_vfe32_get_overflow_mask(uint32_t *overflow_mask)
+{
+	*overflow_mask = 0x002FFF7E;
+}
+
+static void msm_vfe32_get_irq_mask(struct vfe_device *vfe_dev,
+	uint32_t *irq0_mask, uint32_t *irq1_mask)
+{
+	*irq0_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
+	*irq1_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x20);
+}
+
+static void msm_vfe32_restore_irq_mask(struct vfe_device *vfe_dev)
+{
+	msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask0,
+		vfe_dev->vfe_base + 0x1C);
+	msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask1,
+		vfe_dev->vfe_base + 0x20);
+}
+
+static void msm_vfe32_get_halt_restart_mask(uint32_t *irq0_mask,
+	uint32_t *irq1_mask)
+{
+	*irq0_mask = 0x0;
+	*irq1_mask = 0x01800000;
+}
+
 static uint32_t msm_vfe32_stats_get_comp_mask(uint32_t irq_status0,
 	uint32_t irq_status1)
 {
@@ -1193,6 +1257,11 @@
 			.release_hw = msm_vfe32_release_hardware,
 			.get_platform_data = msm_vfe32_get_platform_data,
 			.get_error_mask = msm_vfe32_get_error_mask,
+			.get_overflow_mask = msm_vfe32_get_overflow_mask,
+			.get_irq_mask = msm_vfe32_get_irq_mask,
+			.restore_irq_mask = msm_vfe32_restore_irq_mask,
+			.get_halt_restart_mask =
+				msm_vfe32_get_halt_restart_mask,
 			.process_error_status = msm_vfe32_process_error_status,
 		},
 		.stats_ops = {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index 827dbb0..e817680 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -350,6 +350,8 @@
 	msm_camera_io_w_mb(0xFEFFFFFF, vfe_dev->vfe_base + 0x2C);
 	msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
 	msm_camera_io_w_mb(0xFEFFFFFF, vfe_dev->vfe_base + 0x34);
+	msm_camera_io_w(vfe_dev->stats_data.stats_mask,
+		vfe_dev->vfe_base + 0x44);
 }
 
 static void msm_vfe40_process_reset_irq(struct vfe_device *vfe_dev,
@@ -480,44 +482,74 @@
 		pr_err_ratelimited("%s: violation\n", __func__);
 		msm_vfe40_process_violation_status(vfe_dev);
 	}
-	if (error_status1 & (1 << 9))
+	if (error_status1 & (1 << 9)) {
+		vfe_dev->stats->imagemaster0_overflow++;
 		pr_err_ratelimited("%s: image master 0 bus overflow\n",
 			__func__);
-	if (error_status1 & (1 << 10))
+	}
+	if (error_status1 & (1 << 10)) {
+		vfe_dev->stats->imagemaster1_overflow++;
 		pr_err_ratelimited("%s: image master 1 bus overflow\n",
 			__func__);
-	if (error_status1 & (1 << 11))
+	}
+	if (error_status1 & (1 << 11)) {
+		vfe_dev->stats->imagemaster2_overflow++;
 		pr_err_ratelimited("%s: image master 2 bus overflow\n",
 			__func__);
-	if (error_status1 & (1 << 12))
+	}
+	if (error_status1 & (1 << 12)) {
+		vfe_dev->stats->imagemaster3_overflow++;
 		pr_err_ratelimited("%s: image master 3 bus overflow\n",
 			__func__);
-	if (error_status1 & (1 << 13))
+	}
+	if (error_status1 & (1 << 13)) {
+		vfe_dev->stats->imagemaster4_overflow++;
 		pr_err_ratelimited("%s: image master 4 bus overflow\n",
 			__func__);
-	if (error_status1 & (1 << 14))
+	}
+	if (error_status1 & (1 << 14)) {
+		vfe_dev->stats->imagemaster5_overflow++;
 		pr_err_ratelimited("%s: image master 5 bus overflow\n",
 			__func__);
-	if (error_status1 & (1 << 15))
+	}
+	if (error_status1 & (1 << 15)) {
+		vfe_dev->stats->imagemaster6_overflow++;
 		pr_err_ratelimited("%s: image master 6 bus overflow\n",
 			__func__);
-	if (error_status1 & (1 << 16))
+	}
+	if (error_status1 & (1 << 16)) {
+		vfe_dev->stats->be_overflow++;
 		pr_err_ratelimited("%s: status be bus overflow\n", __func__);
-	if (error_status1 & (1 << 17))
+	}
+	if (error_status1 & (1 << 17)) {
+		vfe_dev->stats->bg_overflow++;
 		pr_err_ratelimited("%s: status bg bus overflow\n", __func__);
-	if (error_status1 & (1 << 18))
+	}
+	if (error_status1 & (1 << 18)) {
+		vfe_dev->stats->bf_overflow++;
 		pr_err_ratelimited("%s: status bf bus overflow\n", __func__);
-	if (error_status1 & (1 << 19))
+	}
+	if (error_status1 & (1 << 19)) {
+		vfe_dev->stats->awb_overflow++;
 		pr_err_ratelimited("%s: status awb bus overflow\n", __func__);
-	if (error_status1 & (1 << 20))
+	}
+	if (error_status1 & (1 << 20)) {
+		vfe_dev->stats->imagemaster0_overflow++;
 		pr_err_ratelimited("%s: status rs bus overflow\n", __func__);
-	if (error_status1 & (1 << 21))
+	}
+	if (error_status1 & (1 << 21)) {
+		vfe_dev->stats->cs_overflow++;
 		pr_err_ratelimited("%s: status cs bus overflow\n", __func__);
-	if (error_status1 & (1 << 22))
+	}
+	if (error_status1 & (1 << 22)) {
+		vfe_dev->stats->ihist_overflow++;
 		pr_err_ratelimited("%s: status ihist bus overflow\n", __func__);
-	if (error_status1 & (1 << 23))
+	}
+	if (error_status1 & (1 << 23)) {
+		vfe_dev->stats->skinbhist_overflow++;
 		pr_err_ratelimited("%s: status skin bhist bus overflow\n",
 			__func__);
+	}
 }
 
 static void msm_vfe40_read_irq_status(struct vfe_device *vfe_dev,
@@ -590,18 +622,24 @@
 
 
 static long msm_vfe40_reset_hardware(struct vfe_device *vfe_dev ,
-				enum msm_isp_reset_type reset_type)
+	enum msm_isp_reset_type reset_type, uint32_t blocking)
 {
 	uint32_t rst_val;
+	long rc = 0;
 	if (reset_type >= ISP_RST_MAX) {
 		pr_err("%s: Error Invalid parameter\n", __func__);
 		reset_type = ISP_RST_HARD;
 	}
 	rst_val = msm_vfe40_reset_values[reset_type];
 	init_completion(&vfe_dev->reset_complete);
-	msm_camera_io_w_mb(rst_val, vfe_dev->vfe_base + 0xC);
-	return wait_for_completion_timeout(
-		&vfe_dev->reset_complete, msecs_to_jiffies(50));
+	if (blocking) {
+		msm_camera_io_w_mb(rst_val, vfe_dev->vfe_base + 0xC);
+		rc = wait_for_completion_timeout(
+			&vfe_dev->reset_complete, msecs_to_jiffies(50));
+	} else {
+		msm_camera_io_w_mb(0x1EF, vfe_dev->vfe_base + 0xC);
+	}
+	return rc;
 }
 
 static void msm_vfe40_axi_reload_wm(
@@ -894,6 +932,7 @@
 		val &= 0xFFFFFF3F;
 		val = val | bus_en << 7 | vfe_en << 6;
 		msm_camera_io_w(val, vfe_dev->vfe_base + 0x2F8);
+		msm_camera_io_w_mb(0x4, vfe_dev->vfe_base + 0x2F4);
 		msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x2F4);
 		vfe_dev->axi_data.src_info[VFE_PIX_0].active = 1;
 	} else if (update_state == DISABLE_CAMIF) {
@@ -1135,16 +1174,24 @@
 		VFE40_PING_PONG_BASE(wm_idx, pingpong_status));
 }
 
-static long msm_vfe40_axi_halt(struct vfe_device *vfe_dev)
+static long msm_vfe40_axi_halt(struct vfe_device *vfe_dev,
+	uint32_t blocking)
 {
-	uint32_t halt_mask;
-	halt_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
-	halt_mask |= (1 << 8);
-	msm_camera_io_w_mb(halt_mask, vfe_dev->vfe_base + 0x2C);
+	long rc = 0;
+	/* Keep only restart mask and halt mask*/
+	msm_camera_io_w(BIT(31), vfe_dev->vfe_base + 0x28);
+	msm_camera_io_w(BIT(8), vfe_dev->vfe_base + 0x2C);
+	/* Clear IRQ Status*/
+	msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
+	msm_camera_io_w(0xFEFFFFFF, vfe_dev->vfe_base + 0x34);
 	init_completion(&vfe_dev->halt_complete);
 	msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x2C0);
-	return wait_for_completion_interruptible_timeout(
-		&vfe_dev->halt_complete, msecs_to_jiffies(500));
+	if (blocking) {
+		atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
+		rc = wait_for_completion_interruptible_timeout(
+			&vfe_dev->halt_complete, msecs_to_jiffies(500));
+	}
+	return rc;
 }
 
 static uint32_t msm_vfe40_get_wm_mask(
@@ -1153,6 +1200,33 @@
 	return (irq_status0 >> 8) & 0x7F;
 }
 
+static void msm_vfe40_get_overflow_mask(uint32_t *overflow_mask)
+{
+	*overflow_mask = 0x00FFFE7E;
+}
+
+static void msm_vfe40_get_irq_mask(struct vfe_device *vfe_dev,
+	uint32_t *irq0_mask, uint32_t *irq1_mask)
+{
+	*irq0_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+	*irq1_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
+}
+
+static void msm_vfe40_restore_irq_mask(struct vfe_device *vfe_dev)
+{
+	msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask0,
+		vfe_dev->vfe_base + 0x28);
+	msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask1,
+		vfe_dev->vfe_base + 0x2C);
+}
+
+static void msm_vfe40_get_halt_restart_mask(uint32_t *irq0_mask,
+	uint32_t *irq1_mask)
+{
+	*irq0_mask = BIT(31);
+	*irq1_mask = BIT(8);
+}
+
 static uint32_t msm_vfe40_get_comp_mask(
 	uint32_t irq_status0, uint32_t irq_status1)
 {
@@ -1200,6 +1274,7 @@
 	else
 		comp_mask &= ~stats_mask;
 	msm_camera_io_w(comp_mask << 16, vfe_dev->vfe_base + 0x44);
+	vfe_dev->stats_data.stats_mask = (comp_mask << 16);
 }
 
 static void msm_vfe40_stats_cfg_wm_irq_mask(
@@ -1489,6 +1564,11 @@
 			.release_hw = msm_vfe40_release_hardware,
 			.get_platform_data = msm_vfe40_get_platform_data,
 			.get_error_mask = msm_vfe40_get_error_mask,
+			.get_overflow_mask = msm_vfe40_get_overflow_mask,
+			.get_irq_mask = msm_vfe40_get_irq_mask,
+			.restore_irq_mask = msm_vfe40_restore_irq_mask,
+			.get_halt_restart_mask =
+				msm_vfe40_get_halt_restart_mask,
 			.process_error_status = msm_vfe40_process_error_status,
 		},
 		.stats_ops = {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 508bcec..4c3a3d5 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -20,6 +20,9 @@
 
 #define HANDLE_TO_IDX(handle) (handle & 0xFF)
 
+#define MSM_ISP_MIN_AB 450000000
+#define MSM_ISP_MIN_IB 900000000
+
 int msm_isp_axi_create_stream(
 	struct msm_vfe_axi_shared_data *axi_data,
 	struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
@@ -475,7 +478,8 @@
 
 	framedrop_period = msm_isp_get_framedrop_period(
 			stream_cfg_cmd->frame_skip_pattern);
-
+	stream_info->frame_skip_pattern =
+			stream_cfg_cmd->frame_skip_pattern;
 	if (stream_cfg_cmd->frame_skip_pattern == SKIP_ALL)
 		stream_info->framedrop_pattern = 0x0;
 	else
@@ -1069,6 +1073,8 @@
 	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
 	uint32_t total_pix_bandwidth = 0, total_rdi_bandwidth = 0;
 	uint32_t num_pix_streams = 0;
+	uint32_t num_rdi_streams = 0;
+	uint32_t total_streams   = 0;
 	uint64_t total_bandwidth = 0;
 
 	for (i = 0; i < MAX_NUM_STREAM; i++) {
@@ -1080,6 +1086,7 @@
 				num_pix_streams++;
 			} else {
 				total_rdi_bandwidth += stream_info->bandwidth;
+				num_rdi_streams++;
 			}
 		}
 	}
@@ -1089,10 +1096,17 @@
 			((unsigned long)axi_data->src_info[VFE_PIX_0].
 			pixel_clock) * ISP_DEFAULT_FORMAT_FACTOR / ISP_Q2;
 	total_bandwidth = total_pix_bandwidth + total_rdi_bandwidth;
-
+	total_streams = num_pix_streams + num_rdi_streams;
+	if (total_streams == 1) {
+         rc = msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id,
+		(total_bandwidth - MSM_ISP_MIN_AB) , (total_bandwidth *
+		ISP_BUS_UTILIZATION_FACTOR / ISP_Q2 - MSM_ISP_MIN_IB));
+	}
+	else {
 	rc = msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id,
 		total_bandwidth, total_bandwidth *
 		ISP_BUS_UTILIZATION_FACTOR / ISP_Q2);
+	}
 	if (rc < 0)
 		pr_err("%s: update failed\n", __func__);
 
@@ -1318,9 +1332,9 @@
 	if (cur_stream_cnt == 0) {
 		vfe_dev->ignore_error = 1;
 		if (camif_update == DISABLE_CAMIF_IMMEDIATELY) {
-			vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev);
+			vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
 		}
-		vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, ISP_RST_HARD);
+		vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, ISP_RST_HARD, 1);
 		vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
 		vfe_dev->ignore_error = 0;
 	}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
index 7d282bd..2314300 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
@@ -37,6 +37,10 @@
 	struct vfe_device *vfe_dev,
 	struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd);
 
+void msm_isp_calculate_framedrop(
+		struct msm_vfe_axi_shared_data *axi_data,
+		struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd);
+
 int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg);
 int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg);
 int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index cb46e9c..b1521df 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -130,7 +130,10 @@
 	mutex_lock(&bandwidth_mgr_mutex);
 	if (!isp_bandwidth_mgr.use_count ||
 		!isp_bandwidth_mgr.bus_client) {
-		pr_err("%s: bandwidth manager inactive\n", __func__);
+		pr_err("%s:error bandwidth manager inactive use_cnt:%d bus_clnt:%d\n",
+			__func__, isp_bandwidth_mgr.use_count,
+			isp_bandwidth_mgr.bus_client);
+		mutex_unlock(&bandwidth_mgr_mutex);
 		return -EINVAL;
 	}
 
@@ -166,8 +169,11 @@
 		return;
 	}
 
-	if (!isp_bandwidth_mgr.bus_client)
+	if (!isp_bandwidth_mgr.bus_client) {
+		pr_err("%s:%d error: bus client invalid\n", __func__, __LINE__);
+		mutex_unlock(&bandwidth_mgr_mutex);
 		return;
+	}
 
 	msm_bus_scale_client_update_request(
 	   isp_bandwidth_mgr.bus_client, 0);
@@ -631,6 +637,33 @@
 		}
 		break;
 	}
+	case VFE_HW_UPDATE_LOCK: {
+		uint32_t update_id =
+			vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id;
+		if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id != *cfg_data
+			|| update_id == *cfg_data) {
+			pr_err("hw update lock failed,acquire id %u\n",
+				*cfg_data);
+			pr_err("hw update lock failed,current id %lu\n",
+				vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
+			pr_err("hw update lock failed,last id %u\n",
+				update_id);
+			return -EINVAL;
+		}
+		break;
+	}
+	case VFE_HW_UPDATE_UNLOCK: {
+		if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id
+			!= *cfg_data) {
+			pr_err("hw update across frame boundary,begin id %u\n",
+				*cfg_data);
+			pr_err("hw update across frame boundary,end id %lu\n",
+				vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
+			vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id =
+			vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+		}
+		break;
+	}
 	case VFE_READ: {
 		int i;
 		uint32_t *data_ptr = cfg_data +
@@ -720,7 +753,7 @@
 	}
 
 	for (i = 0; i < proc_cmd->num_cfg; i++)
-		msm_isp_send_hw_cmd(vfe_dev, &reg_cfg_cmd[i],
+		rc = msm_isp_send_hw_cmd(vfe_dev, &reg_cfg_cmd[i],
 			cfg_data, proc_cmd->cmd_len);
 
 	if (copy_to_user(proc_cmd->cfg_data,
@@ -981,6 +1014,125 @@
 	vfe_dev->error_info.error_count++;
 }
 
+static inline void msm_isp_process_overflow_irq(
+	struct vfe_device *vfe_dev,
+	uint32_t *irq_status0, uint32_t *irq_status1)
+{
+	uint32_t overflow_mask;
+	uint32_t halt_restart_mask0, halt_restart_mask1;
+	/*Mask out all other irqs if recovery is started*/
+	if (atomic_read(&vfe_dev->error_info.overflow_state) !=
+		NO_OVERFLOW) {
+		vfe_dev->hw_info->vfe_ops.core_ops.
+			get_halt_restart_mask(&halt_restart_mask0,
+				&halt_restart_mask1);
+		*irq_status0 &= halt_restart_mask0;
+		*irq_status1 &= halt_restart_mask1;
+		return;
+	}
+
+	/*Check if any overflow bit is set*/
+	vfe_dev->hw_info->vfe_ops.core_ops.
+		get_overflow_mask(&overflow_mask);
+	overflow_mask &= *irq_status1;
+	if (overflow_mask) {
+		pr_warning("%s: Bus overflow detected: 0x%x\n",
+			__func__, overflow_mask);
+		atomic_set(&vfe_dev->error_info.overflow_state,
+			OVERFLOW_DETECTED);
+		pr_warning("%s: Start bus overflow recovery\n", __func__);
+		/*Store current IRQ mask*/
+		vfe_dev->hw_info->vfe_ops.core_ops.get_irq_mask(vfe_dev,
+			&vfe_dev->error_info.overflow_recover_irq_mask0,
+			&vfe_dev->error_info.overflow_recover_irq_mask1);
+		/*Stop CAMIF Immediately*/
+		vfe_dev->hw_info->vfe_ops.core_ops.
+			update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
+		/*Halt the hardware & Clear all other IRQ mask*/
+		vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 0);
+		/*Update overflow state*/
+		atomic_set(&vfe_dev->error_info.overflow_state, HALT_REQUESTED);
+		*irq_status0 = 0;
+		*irq_status1 = 0;
+	}
+}
+
+static inline void msm_isp_reset_burst_count(
+	struct vfe_device *vfe_dev)
+{
+	int i;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_stream_request_cmd framedrop_info;
+	for (i = 0; i < MAX_NUM_STREAM; i++) {
+		stream_info = &axi_data->stream_info[i];
+		if (stream_info->state != ACTIVE)
+			continue;
+		if (stream_info->stream_type == BURST_STREAM &&
+			stream_info->num_burst_capture != 0) {
+			framedrop_info.burst_count =
+				stream_info->num_burst_capture;
+			framedrop_info.frame_skip_pattern =
+				stream_info->frame_skip_pattern;
+			framedrop_info.init_frame_drop = 0;
+			msm_isp_calculate_framedrop(&vfe_dev->axi_data,
+				&framedrop_info);
+		}
+	}
+}
+
+static void msm_isp_process_overflow_recovery(
+	struct vfe_device *vfe_dev,
+	uint32_t irq_status0, uint32_t irq_status1)
+{
+	uint32_t halt_restart_mask0, halt_restart_mask1;
+	vfe_dev->hw_info->vfe_ops.core_ops.
+		get_halt_restart_mask(&halt_restart_mask0,
+							  &halt_restart_mask1);
+	irq_status0 &= halt_restart_mask0;
+	irq_status1 &= halt_restart_mask1;
+	if (irq_status0 == 0 && irq_status1 == 0)
+		return;
+
+	switch (atomic_read(&vfe_dev->error_info.overflow_state)) {
+	case HALT_REQUESTED: {
+		pr_err("%s: Halt done, Restart Pending\n", __func__);
+		/*Reset the hardware*/
+		vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
+			ISP_RST_SOFT, 0);
+		/*Update overflow state*/
+		atomic_set(&vfe_dev->error_info.overflow_state,
+				RESTART_REQUESTED);
+	}
+		break;
+	case RESTART_REQUESTED: {
+		pr_err("%s: Restart done, Resuming\n", __func__);
+		/*Reset the burst stream frame drop pattern, in the
+		 *case where bus overflow happens during the burstshot,
+		 *the framedrop pattern might be updated after reg update
+		 *to skip all the frames after the burst shot. The burst shot
+		 *might not be completed due to the overflow, so the framedrop
+		 *pattern need to change back to the original settings in order
+		 *to recovr from overflow.
+		 */
+		msm_isp_reset_burst_count(vfe_dev);
+		vfe_dev->hw_info->vfe_ops.axi_ops.
+			reload_wm(vfe_dev, 0xFFFFFFFF);
+		vfe_dev->hw_info->vfe_ops.core_ops.restore_irq_mask(vfe_dev);
+		vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev);
+		memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+		atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
+		vfe_dev->hw_info->vfe_ops.core_ops.
+			update_camif_state(vfe_dev, ENABLE_CAMIF);
+	}
+		break;
+	case NO_OVERFLOW:
+	case OVERFLOW_DETECTED:
+	default:
+		break;
+	}
+}
+
 irqreturn_t msm_isp_process_irq(int irq_num, void *data)
 {
 	unsigned long flags;
@@ -991,6 +1143,8 @@
 
 	vfe_dev->hw_info->vfe_ops.irq_ops.
 		read_irq_status(vfe_dev, &irq_status0, &irq_status1);
+	msm_isp_process_overflow_irq(vfe_dev,
+		&irq_status0, &irq_status1);
 	vfe_dev->hw_info->vfe_ops.core_ops.
 		get_error_mask(&error_mask0, &error_mask1);
 	error_mask0 &= irq_status0;
@@ -1055,6 +1209,13 @@
 		irq_status1 = queue_cmd->vfeInterruptStatus1;
 		ts = queue_cmd->ts;
 		spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+		if (atomic_read(&vfe_dev->error_info.overflow_state) !=
+			NO_OVERFLOW) {
+			pr_err("There is Overflow, kicking up recovery !!!!");
+			msm_isp_process_overflow_recovery(vfe_dev,
+				irq_status0, irq_status1);
+			continue;
+		}
 		ISP_DBG("%s: status0: 0x%x status1: 0x%x\n",
 			__func__, irq_status0, irq_status1);
 		irq_ops->process_reset_irq(vfe_dev,
@@ -1105,7 +1266,10 @@
 		return -EBUSY;
 	}
 
-	rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, ISP_RST_HARD);
+	memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+	atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
+	rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
+		ISP_RST_HARD, 1);
 	if (rc <= 0) {
 		pr_err("%s: reset timeout\n", __func__);
 		mutex_unlock(&vfe_dev->core_mutex);
@@ -1157,7 +1321,7 @@
 		return -ENODEV;
 	}
 
-	rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev);
+	rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
 	if (rc <= 0)
 		pr_err("%s: halt timeout rc=%ld\n", __func__, rc);
 
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
index a08fad7..8f99ff6 100755
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -64,6 +64,25 @@
 		false : true;
 }
 
+static struct msm_cam_clk_info ispif_8626_reset_clk_info[] = {
+	{"ispif_ahb_clk", NO_SET_RATE},
+	{"camss_top_ahb_clk", NO_SET_RATE},
+	{"csi0_ahb_clk", NO_SET_RATE},
+	{"csi0_src_clk", NO_SET_RATE},
+	{"csi0_phy_clk", NO_SET_RATE},
+	{"csi0_clk", NO_SET_RATE},
+	{"csi0_pix_clk", NO_SET_RATE},
+	{"csi0_rdi_clk", NO_SET_RATE},
+	{"csi1_ahb_clk", NO_SET_RATE},
+	{"csi1_src_clk", NO_SET_RATE},
+	{"csi1_phy_clk", NO_SET_RATE},
+	{"csi1_clk", NO_SET_RATE},
+	{"csi1_pix_clk", NO_SET_RATE},
+	{"csi1_rdi_clk", NO_SET_RATE},
+	{"camss_vfe_vfe_clk", NO_SET_RATE},
+	{"camss_csi_vfe_clk", NO_SET_RATE},
+};
+
 static struct msm_cam_clk_info ispif_8974_ahb_clk_info[] = {
 	{"ispif_ahb_clk", -1},
 };
@@ -98,13 +117,26 @@
 	int rc = 0;
 	long timeout = 0;
 	struct clk *reset_clk[ARRAY_SIZE(ispif_8974_reset_clk_info)];
+	struct clk *reset_clk1[ARRAY_SIZE(ispif_8626_reset_clk_info)];
+	ispif->clk_idx = 0;
 
 	rc = msm_cam_clk_enable(&ispif->pdev->dev,
 		ispif_8974_reset_clk_info, reset_clk,
 		ARRAY_SIZE(ispif_8974_reset_clk_info), 1);
 	if (rc < 0) {
-		pr_err("%s: cannot enable clock, error = %d",
-			__func__, rc);
+		rc = msm_cam_clk_enable(&ispif->pdev->dev,
+			ispif_8626_reset_clk_info, reset_clk1,
+			ARRAY_SIZE(ispif_8626_reset_clk_info), 1);
+		if (rc < 0){
+			pr_err("%s: cannot enable clock, error = %d",
+				__func__, rc);
+		} else {
+			/* This is set if device is 8x26 */
+			ispif->clk_idx = 2;
+		}
+	} else {
+		/* This is set if device is 8974 */
+		ispif->clk_idx = 1;
 	}
 
 	init_completion(&ispif->reset_complete[VFE0]);
@@ -121,11 +153,19 @@
 	timeout = wait_for_completion_timeout(
 			&ispif->reset_complete[VFE0], msecs_to_jiffies(500));
 	CDBG("%s: VFE0 done\n", __func__);
+
 	if (timeout <= 0) {
 		pr_err("%s: VFE0 reset wait timeout\n", __func__);
-		msm_cam_clk_enable(&ispif->pdev->dev,
+		rc = msm_cam_clk_enable(&ispif->pdev->dev,
 			ispif_8974_reset_clk_info, reset_clk,
 			ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
+		if (rc < 0){
+			rc = msm_cam_clk_enable(&ispif->pdev->dev,
+				ispif_8626_reset_clk_info, reset_clk1,
+				ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
+			if (rc < 0)
+				pr_err("%s: VFE0 reset wait timeout\n", __func__);
+		}
 		return -ETIMEDOUT;
 	}
 
@@ -143,13 +183,26 @@
 		}
 	}
 
-	rc = msm_cam_clk_enable(&ispif->pdev->dev,
-		ispif_8974_reset_clk_info, reset_clk,
-		ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
-	if (rc < 0) {
-		pr_err("%s: cannot disable clock, error = %d",
-			__func__, rc);
+	if (ispif->clk_idx == 1){
+		rc = msm_cam_clk_enable(&ispif->pdev->dev,
+			ispif_8974_reset_clk_info, reset_clk,
+			ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
+		if (rc < 0) {
+			pr_err("%s: cannot disable clock, error = %d",
+				__func__, rc);
+		}
 	}
+
+	if (ispif->clk_idx == 2){
+        	rc = msm_cam_clk_enable(&ispif->pdev->dev,
+			ispif_8626_reset_clk_info, reset_clk1,
+			ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
+        	if (rc < 0) {
+			pr_err("%s: cannot disable clock, error = %d",
+				__func__, rc);
+		}
+	}
+
 	return rc;
 }
 
@@ -941,11 +994,7 @@
 		goto error_ahb;
 	}
 
-	if (of_device_is_compatible(ispif->pdev->dev.of_node,
-				    "qcom,ispif-v3.0")) {
-		/* currently HW reset is implemented for 8974 only */
-		msm_ispif_reset_hw(ispif);
-	}
+	msm_ispif_reset_hw(ispif);
 
 	rc = msm_ispif_reset(ispif);
 	if (rc == 0) {
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
index 45e7354..10dbfb6 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
@@ -60,5 +60,6 @@
 	struct clk *ahb_clk;
 	struct completion reset_complete[VFE_MAX];
 	uint32_t hw_num_isps;
+	uint32_t clk_idx;
 };
 #endif
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index a53ac38..d3a848a 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -37,6 +37,7 @@
 #include <media/msmb_camera.h>
 #include <media/msmb_generic_buf_mgr.h>
 #include <media/msmb_pproc.h>
+#include <mach/clk-provider.h>
 #include "msm_cpp.h"
 #include "msm_isp_util.h"
 #include "msm_camera_io_util.h"
@@ -49,6 +50,7 @@
 #define CONFIG_MSM_CPP_DBG 0
 
 #define CPP_CMD_TIMEOUT_MS 300
+#define MSM_CPP_CORE_CLK_IDX 4
 #define MSM_MICRO_IFACE_CLK_IDX 7
 
 #define MSM_CPP_NOMINAL_CLOCK 266670000
@@ -98,6 +100,8 @@
 	qcmd;			 \
 })
 
+#define MSM_CPP_MAX_TIMEOUT_TRIAL 3
+
 static void msm_queue_init(struct msm_device_queue *queue, const char *name)
 {
 	CPP_DBG("E\n");
@@ -135,6 +139,23 @@
 	{"cpp_bus_clk", -1},
 	{"micro_iface_clk", -1},
 };
+
+#define msm_cpp_empty_list(queue, member) { \
+	unsigned long flags; \
+	struct msm_queue_cmd *qcmd = NULL; \
+	if (queue) { \
+		spin_lock_irqsave(&queue->lock, flags); \
+		while (!list_empty(&queue->list)) { \
+			queue->len--; \
+			qcmd = list_first_entry(&queue->list, \
+				struct msm_queue_cmd, member); \
+			list_del_init(&qcmd->member); \
+			kfree(qcmd); \
+		} \
+		spin_unlock_irqrestore(&queue->lock, flags); \
+	} \
+}
+
 static int msm_cpp_notify_frame_done(struct cpp_device *cpp_dev);
 static void cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin);
 static void cpp_timer_callback(unsigned long data);
@@ -147,6 +168,14 @@
 	writel_relaxed((data), cpp_base + MSM_CPP_MICRO_FIFO_RX_DATA);
 }
 
+static void msm_cpp_clear_timer(struct cpp_device *cpp_dev)
+{
+	atomic_set(&cpp_timer.used, 0);
+	del_timer(&cpp_timer.cpp_timer);
+	cpp_timer.data.processed_frame = NULL;
+	cpp_dev->timeout_trial_cnt = 0;
+}
+
 static uint32_t msm_cpp_read(void __iomem *cpp_base)
 {
 	uint32_t tmp, retry = 0;
@@ -572,7 +601,6 @@
 	uint32_t tx_fifo[MSM_CPP_TX_FIFO_LEVEL];
 	struct cpp_device *cpp_dev = (struct cpp_device *) data;
 	struct msm_cpp_tasklet_queue_cmd *queue_cmd;
-	struct msm_cpp_timer_t *timer = NULL;
 
 	while (atomic_read(&cpp_dev->irq_cnt)) {
 		spin_lock_irqsave(&cpp_dev->tasklet_lock, flags);
@@ -601,19 +629,13 @@
 					CPP_DBG("Frame done!!\n");
 					/* delete CPP timer */
 					CPP_DBG("delete timer.\n");
-					timer = &cpp_timer;
-					atomic_set(&timer->used, 0);
-					del_timer(&timer->cpp_timer);
-					timer->data.processed_frame = NULL;
+					msm_cpp_clear_timer(cpp_dev);
 					msm_cpp_notify_frame_done(cpp_dev);
 				} else if (msg_id ==
 					MSM_CPP_MSG_ID_FRAME_NACK) {
 					pr_err("NACK error from hw!!\n");
 					CPP_DBG("delete timer.\n");
-					timer = &cpp_timer;
-					atomic_set(&timer->used, 0);
-					del_timer(&timer->cpp_timer);
-					timer->data.processed_frame = NULL;
+					msm_cpp_clear_timer(cpp_dev);
 					msm_cpp_notify_frame_done(cpp_dev);
 				}
 				i += cmd_len + 2;
@@ -622,6 +644,27 @@
 	}
 }
 
+static void cpp_get_clk_freq_tbl(struct clk *clk, struct cpp_hw_info *hw_info)
+{
+	uint32_t count;
+	signed long freq_tbl_entry = 0;
+
+	if ((clk == NULL) || (hw_info == NULL) || (clk->ops == NULL) ||
+		(clk->ops->list_rate == NULL)) {
+		pr_err("Bad parameter\n");
+		return;
+	}
+
+	for (count = 0; count < MAX_FREQ_TBL; count++) {
+		freq_tbl_entry = clk->ops->list_rate(clk, count);
+		if (freq_tbl_entry >= 0)
+			hw_info->freq_tbl[count] = freq_tbl_entry;
+		else
+			break;
+	}
+
+	hw_info->freq_tbl_count = count;
+}
 static int cpp_init_hardware(struct cpp_device *cpp_dev)
 {
 	int rc = 0;
@@ -630,7 +673,6 @@
 		pr_err("%s: Bandwidth registration Failed!\n", __func__);
 		goto bus_scale_register_failed;
 	}
-	msm_isp_update_bandwidth(ISP_CPP, 981345600, 1066680000);
 
 	if (cpp_dev->fs_cpp == NULL) {
 		cpp_dev->fs_cpp =
@@ -735,11 +777,15 @@
 	pr_debug("CPP HW Version: 0x%x\n", cpp_dev->hw_info.cpp_hw_version);
 	cpp_dev->hw_info.cpp_hw_caps =
 		msm_camera_io_r(cpp_dev->cpp_hw_base + 0x4);
+	cpp_get_clk_freq_tbl(cpp_dev->cpp_clk[MSM_CPP_CORE_CLK_IDX],
+		&cpp_dev->hw_info);
 	pr_debug("CPP HW Caps: 0x%x\n", cpp_dev->hw_info.cpp_hw_caps);
 	msm_camera_io_w(0x1, cpp_dev->vbif_base + 0x4);
 	cpp_dev->taskletq_idx = 0;
 	atomic_set(&cpp_dev->irq_cnt, 0);
 	msm_cpp_create_buff_queue(cpp_dev, MSM_CPP_MAX_BUFF_QUEUE);
+	pr_debug("stream_cnt:%d\n", cpp_dev->stream_cnt);
+	cpp_dev->stream_cnt = 0;
 	if (cpp_dev->is_firmware_loaded == 1) {
 		disable_irq(cpp_dev->irq->start);
 		cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
@@ -763,7 +809,6 @@
 	regulator_disable(cpp_dev->fs_cpp);
 	regulator_put(cpp_dev->fs_cpp);
 fs_failed:
-	msm_isp_update_bandwidth(ISP_CPP, 0, 0);
 	msm_isp_deinit_bandwidth_mgr(ISP_CPP);
 bus_scale_register_failed:
 	return rc;
@@ -790,7 +835,11 @@
 	regulator_disable(cpp_dev->fs_cpp);
 	regulator_put(cpp_dev->fs_cpp);
 	cpp_dev->fs_cpp = NULL;
-	msm_isp_update_bandwidth(ISP_CPP, 0, 0);
+	if (cpp_dev->stream_cnt > 0) {
+		pr_debug("error: stream count active\n");
+		msm_isp_update_bandwidth(ISP_CPP, 0, 0);
+	}
+	cpp_dev->stream_cnt = 0;
 	msm_isp_deinit_bandwidth_mgr(ISP_CPP);
 }
 
@@ -919,9 +968,19 @@
 {
 	uint32_t i;
 	struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
+	struct msm_device_queue *processing_q = NULL;
+	struct msm_device_queue *eventData_q = NULL;
+
+	if (!cpp_dev) {
+		pr_err("failed: cpp_dev %p\n", cpp_dev);
+		return -EINVAL;
+	}
 
 	mutex_lock(&cpp_dev->mutex);
 
+	processing_q = &cpp_dev->processing_q;
+	eventData_q = &cpp_dev->eventData_q;
+
 	if (cpp_dev->cpp_open_cnt == 0) {
 		mutex_unlock(&cpp_dev->mutex);
 		return 0;
@@ -973,9 +1032,12 @@
 		pr_debug("DEBUG_R1: 0x%x\n",
 			msm_camera_io_r(cpp_dev->cpp_hw_base + 0x8C));
 		msm_camera_io_w(0x0, cpp_dev->base + MSM_CPP_MICRO_CLKEN_CTL);
+		msm_cpp_clear_timer(cpp_dev);
 		cpp_deinit_mem(cpp_dev);
 		iommu_detach_device(cpp_dev->domain, cpp_dev->iommu_ctx);
 		cpp_release_hardware(cpp_dev);
+		msm_cpp_empty_list(processing_q, list_frame);
+		msm_cpp_empty_list(eventData_q, list_eventdata);
 		cpp_dev->state = CPP_STATE_OFF;
 	}
 
@@ -1005,7 +1067,7 @@
 	struct v4l2_event v4l2_evt;
 	struct msm_queue_cmd *frame_qcmd = NULL;
 	struct msm_queue_cmd *event_qcmd = NULL;
-	struct msm_cpp_frame_info_t *processed_frame;
+	struct msm_cpp_frame_info_t *processed_frame = NULL;
 	struct msm_device_queue *queue = &cpp_dev->processing_q;
 	struct msm_buf_mngr_info buff_mgr_info;
 	int rc = 0;
@@ -1097,8 +1159,9 @@
 
 	pr_err("cpp_timer_callback called. (jiffies=%lu)\n",
 		jiffies);
-	if (!work) {
-		pr_err("Invalid work:%p\n", work);
+	if (!work || cpp_timer.data.cpp_dev->state != CPP_STATE_ACTIVE) {
+		pr_err("Invalid work:%p or state:%d\n", work,
+			cpp_timer.data.cpp_dev->state);
 		return;
 	}
 	if (!atomic_read(&cpp_timer.used)) {
@@ -1122,6 +1185,14 @@
 		return;
 	}
 
+	if (cpp_timer.data.cpp_dev->timeout_trial_cnt >=
+		MSM_CPP_MAX_TIMEOUT_TRIAL) {
+		pr_info("Max trial reached\n");
+		msm_cpp_notify_frame_done(cpp_timer.data.cpp_dev);
+		cpp_timer.data.cpp_dev->timeout_trial_cnt = 0;
+		return;
+	}
+
 	this_frame = cpp_timer.data.processed_frame;
 	pr_err("Starting timer to fire in %d ms. (jiffies=%lu)\n",
 		CPP_CMD_TIMEOUT_MS, jiffies);
@@ -1138,6 +1209,7 @@
 	for (i = 0; i < this_frame->msg_len; i++)
 		msm_cpp_write(this_frame->cpp_cmd_msg[i],
 			cpp_timer.data.cpp_dev->base);
+	cpp_timer.data.cpp_dev->timeout_trial_cnt++;
 	return;
 }
 
@@ -1394,6 +1466,26 @@
 	return rc;
 }
 
+void msm_cpp_clean_queue(struct cpp_device *cpp_dev)
+{
+	struct msm_queue_cmd *frame_qcmd = NULL;
+	struct msm_cpp_frame_info_t *processed_frame = NULL;
+	struct msm_device_queue *queue = NULL;
+
+	while (cpp_dev->processing_q.len) {
+		pr_info("queue len:%d\n", cpp_dev->processing_q.len);
+		queue = &cpp_dev->processing_q;
+		frame_qcmd = msm_dequeue(queue, list_frame);
+		if (frame_qcmd) {
+			processed_frame = frame_qcmd->command;
+			kfree(frame_qcmd);
+			if (processed_frame)
+				kfree(processed_frame->cpp_cmd_msg);
+			kfree(processed_frame);
+		}
+	}
+}
+
 long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
 			unsigned int cmd, void *arg)
 {
@@ -1551,6 +1643,23 @@
 
 		kfree(k_stream_buff_info.buffer_info);
 		kfree(u_stream_buff_info);
+		if (cpp_dev->stream_cnt == 0) {
+			rc = msm_isp_update_bandwidth(ISP_CPP, 981345600,
+				1066680000);
+			if (rc < 0) {
+				pr_err("Bandwidth Set Failed!\n");
+				msm_isp_update_bandwidth(ISP_CPP, 0, 0);
+				mutex_unlock(&cpp_dev->mutex);
+				return -EINVAL;
+			}
+			cpp_dev->state = CPP_STATE_ACTIVE;
+			msm_cpp_clear_timer(cpp_dev);
+			msm_cpp_clean_queue(cpp_dev);
+		}
+		if (cmd != VIDIOC_MSM_CPP_APPEND_STREAM_BUFF_INFO) {
+			cpp_dev->stream_cnt++;
+			pr_debug("stream_cnt:%d\n", cpp_dev->stream_cnt);
+		}
 		break;
 	}
 	case VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO: {
@@ -1558,7 +1667,7 @@
 		struct msm_cpp_buff_queue_info_t *buff_queue_info;
 
 		if ((ioctl_ptr->len == 0) ||
-		    (ioctl_ptr->len > sizeof(uint32_t)))
+			(ioctl_ptr->len > sizeof(uint32_t)))
 			return -EINVAL;
 
 		rc = (copy_from_user(&identity,
@@ -1583,6 +1692,21 @@
 		rc = msm_cpp_free_buff_queue_entry(cpp_dev,
 			buff_queue_info->session_id,
 			buff_queue_info->stream_id);
+		if (cpp_dev->stream_cnt > 0) {
+			cpp_dev->stream_cnt--;
+			pr_debug("stream_cnt:%d\n", cpp_dev->stream_cnt);
+			if (cpp_dev->stream_cnt == 0) {
+				rc = msm_isp_update_bandwidth(ISP_CPP, 0, 0);
+				if (rc < 0)
+					pr_err("Bandwidth Reset Failed!\n");
+				cpp_dev->state = CPP_STATE_IDLE;
+				msm_cpp_clear_timer(cpp_dev);
+				msm_cpp_clean_queue(cpp_dev);
+			}
+		} else {
+			pr_err("error: stream count underflow %d\n",
+				cpp_dev->stream_cnt);
+		}
 		break;
 	}
 	case VIDIOC_MSM_CPP_GET_EVENTPAYLOAD: {
@@ -1610,7 +1734,9 @@
 		break;
 	}
 	case VIDIOC_MSM_CPP_SET_CLOCK: {
-		long clock_rate = 0;
+		struct msm_cpp_clock_settings_t clock_settings;
+		unsigned long clock_rate = 0;
+		CPP_DBG("VIDIOC_MSM_CPP_SET_CLOCK\n");
 		if (ioctl_ptr->len == 0) {
 			pr_err("ioctl_ptr->len is 0\n");
 			mutex_unlock(&cpp_dev->mutex);
@@ -1623,13 +1749,13 @@
 			return -EINVAL;
 		}
 
-		if (ioctl_ptr->len > sizeof(clock_rate)) {
+		if (ioctl_ptr->len != sizeof(struct msm_cpp_clock_settings_t)) {
 			pr_err("Not valid ioctl_ptr->len\n");
 			mutex_unlock(&cpp_dev->mutex);
 			return -EINVAL;
 		}
 
-		rc = (copy_from_user(&clock_rate,
+		rc = (copy_from_user(&clock_settings,
 			(void __user *)ioctl_ptr->ioctl_ptr,
 			ioctl_ptr->len) ? -EFAULT : 0);
 		if (rc) {
@@ -1638,27 +1764,37 @@
 			return -EINVAL;
 		}
 
-		if (clock_rate > 0) {
-			clock_rate =
-				clk_round_rate(cpp_dev->cpp_clk[4], clock_rate);
-			CPP_DBG("clk:%ld\n", clock_rate);
-			clk_set_rate(cpp_dev->cpp_clk[4], clock_rate);
-			rc = msm_isp_update_bandwidth(ISP_CPP, clock_rate * 4,
-				clock_rate * 6);
+		if (clock_settings.clock_rate > 0) {
+			rc = msm_isp_update_bandwidth(ISP_CPP,
+				clock_settings.avg,
+				clock_settings.inst);
 			if (rc < 0) {
 				pr_err("Bandwidth Set Failed!\n");
 				msm_isp_update_bandwidth(ISP_CPP, 0, 0);
 				mutex_unlock(&cpp_dev->mutex);
 				return -EINVAL;
 			}
+			clock_rate = clk_round_rate(
+				cpp_dev->cpp_clk[MSM_CPP_CORE_CLK_IDX],
+				clock_settings.clock_rate);
+			if (clock_rate != clock_settings.clock_rate)
+				pr_err("clock rate differ from settings\n");
+			clk_set_rate(cpp_dev->cpp_clk[MSM_CPP_CORE_CLK_IDX],
+				clock_rate);
 		}
-
 		break;
 	}
 	case MSM_SD_SHUTDOWN: {
 		mutex_unlock(&cpp_dev->mutex);
+		pr_info("shutdown cpp node. open cnt:%d\n",
+			cpp_dev->cpp_open_cnt);
+
+		if (atomic_read(&cpp_timer.used))
+			pr_info("Timer state not cleared\n");
+
 		while (cpp_dev->cpp_open_cnt != 0)
 			cpp_close_node(sd, NULL);
+		mutex_lock(&cpp_dev->mutex);
 		rc = 0;
 		break;
 	}
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
index cf22e6c..bd73ab2 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
@@ -161,6 +161,13 @@
 	struct cpp_device *cpp_dev;
 };
 
+struct msm_cpp_clock_settings_t {
+	long clock_rate;
+	uint64_t avg;
+	uint64_t inst;
+};
+
+
 struct cpp_device {
 	struct platform_device *pdev;
 	struct msm_sd_subdev msm_sd;
@@ -182,6 +189,8 @@
 	char *fw_name_bin;
 	struct workqueue_struct *timer_wq;
 	struct msm_cpp_work_t *work;
+	uint8_t stream_cnt;
+	uint8_t timeout_trial_cnt;
 
 	int domain_num;
 	struct iommu_domain *domain;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
index 7805930..cce6525 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
@@ -27,6 +27,10 @@
 #define CDBG(fmt, args...) pr_debug(fmt, ##args)
 #endif
 
+
+static int32_t msm_actuator_power_up(struct msm_actuator_ctrl_t *a_ctrl);
+static int32_t msm_actuator_power_down(struct msm_actuator_ctrl_t *a_ctrl);
+
 static struct msm_actuator msm_vcm_actuator_table;
 static struct msm_actuator msm_piezo_actuator_table;
 
@@ -156,6 +160,7 @@
 					settings[i].i2c_operation);
 				break;
 			}
+			break;
 		}
 		case MSM_ACT_POLL: {
 			switch (settings[i].data_type) {
@@ -178,6 +183,7 @@
 					settings[i].i2c_operation);
 				break;
 			}
+			break;
 		}
 		}
 
@@ -302,6 +308,21 @@
 	if (dest_step_pos == a_ctrl->curr_step_pos)
 		return rc;
 
+	if ((sign_dir > MSM_ACTUATOR_MOVE_SIGNED_NEAR) ||
+		(sign_dir < MSM_ACTUATOR_MOVE_SIGNED_FAR)) {
+		pr_err("Invalid sign_dir = %d\n", sign_dir);
+		return -EFAULT;
+	}
+	if ((dir > MOVE_FAR) || (dir < MOVE_NEAR)) {
+		pr_err("Invalid direction = %d\n", dir);
+		return -EFAULT;
+	}
+	if (dest_step_pos > a_ctrl->total_steps) {
+		pr_err("Step pos greater than total steps = %d\n",
+		dest_step_pos);
+		return -EFAULT;
+	}
+	curr_lens_pos = a_ctrl->step_position_table[a_ctrl->curr_step_pos];
 	a_ctrl->i2c_tbl_index = 0;
 	CDBG("curr_step_pos =%d dest_step_pos =%d curr_lens_pos=%d\n",
 		a_ctrl->curr_step_pos, dest_step_pos, curr_lens_pos);
@@ -372,6 +393,12 @@
 	kfree(a_ctrl->step_position_table);
 	a_ctrl->step_position_table = NULL;
 
+	if (set_info->af_tuning_params.total_steps
+		>  MAX_ACTUATOR_AF_TOTAL_STEPS) {
+		pr_err("Max actuator totalsteps exceeded = %d\n",
+		set_info->af_tuning_params.total_steps);
+		return -EFAULT;
+	}
 	/* Fill step position table */
 	a_ctrl->step_position_table =
 		kmalloc(sizeof(uint16_t) *
@@ -424,6 +451,31 @@
 	return rc;
 }
 
+static int32_t msm_actuator_vreg_control(struct msm_actuator_ctrl_t *a_ctrl,
+							int config)
+{
+	int rc = 0, i, cnt;
+	struct msm_actuator_vreg *vreg_cfg;
+
+	vreg_cfg = &a_ctrl->vreg_cfg;
+	cnt = vreg_cfg->num_vreg;
+	if (!cnt)
+		return 0;
+
+	if (cnt >= MSM_ACTUATOT_MAX_VREGS) {
+		pr_err("%s failed %d cnt %d\n", __func__, __LINE__, cnt);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < cnt; i++) {
+		rc = msm_camera_config_single_vreg(&(a_ctrl->pdev->dev),
+			&vreg_cfg->cam_vreg[i],
+			(struct regulator **)&vreg_cfg->data[i],
+			config);
+	}
+	return rc;
+}
+
 static int32_t msm_actuator_power_down(struct msm_actuator_ctrl_t *a_ctrl)
 {
 	int32_t rc = 0;
@@ -433,6 +485,11 @@
 		if (!rc)
 			gpio_free(a_ctrl->vcm_pwd);
 	}
+	rc = msm_actuator_vreg_control(a_ctrl, 0);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return rc;
+	}
 
 	kfree(a_ctrl->step_position_table);
 	a_ctrl->step_position_table = NULL;
@@ -500,12 +557,19 @@
 		pr_err("Actuator function table not found\n");
 		return rc;
 	}
-
-	a_ctrl->region_size = set_info->af_tuning_params.region_size;
-	if (a_ctrl->region_size > MAX_ACTUATOR_REGION) {
+	if (set_info->af_tuning_params.total_steps
+		>  MAX_ACTUATOR_AF_TOTAL_STEPS) {
+		pr_err("Max actuator totalsteps exceeded = %d\n",
+		set_info->af_tuning_params.total_steps);
+		return -EFAULT;
+	}
+	if (set_info->af_tuning_params.region_size
+		> MAX_ACTUATOR_REGION) {
 		pr_err("MAX_ACTUATOR_REGION is exceeded.\n");
 		return -EFAULT;
 	}
+
+	a_ctrl->region_size = set_info->af_tuning_params.region_size;
 	a_ctrl->pwd_step = set_info->af_tuning_params.pwd_step;
 	a_ctrl->total_steps = set_info->af_tuning_params.total_steps;
 
@@ -555,7 +619,9 @@
 		return -EFAULT;
 	}
 
-	if (set_info->actuator_params.init_setting_size) {
+	if (set_info->actuator_params.init_setting_size &&
+		set_info->actuator_params.init_setting_size
+		<= MAX_ACTUATOR_REG_TBL_SIZE) {
 		if (a_ctrl->func_tbl->actuator_init_focus) {
 			init_settings = kmalloc(sizeof(struct reg_settings_t) *
 				(set_info->actuator_params.init_setting_size),
@@ -639,6 +705,13 @@
 		if (rc < 0)
 			pr_err("actuator_set_position failed %d\n", rc);
 		break;
+
+	case CFG_ACTUATOR_POWERUP:
+		rc = msm_actuator_power_up(a_ctrl);
+		if (rc < 0)
+			pr_err("Failed actuator power up%d\n", rc);
+		break;
+
 	default:
 		break;
 	}
@@ -762,6 +835,13 @@
 
 	CDBG("vcm info: %x %x\n", a_ctrl->vcm_pwd,
 		a_ctrl->vcm_enable);
+
+	rc = msm_actuator_vreg_control(a_ctrl, 1);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return rc;
+	}
+
 	if (a_ctrl->vcm_enable) {
 		rc = gpio_request(a_ctrl->vcm_pwd, "msm_actuator");
 		if (!rc) {
@@ -877,6 +957,7 @@
 	int32_t rc = 0;
 	struct msm_camera_cci_client *cci_client = NULL;
 	struct msm_actuator_ctrl_t *msm_actuator_t = NULL;
+	struct msm_actuator_vreg *vreg_cfg;
 	CDBG("Enter\n");
 
 	if (!pdev->dev.of_node) {
@@ -894,6 +975,7 @@
 		&pdev->id);
 	CDBG("cell-index %d, rc %d\n", pdev->id, rc);
 	if (rc < 0) {
+		kfree(msm_actuator_t);
 		pr_err("failed rc %d\n", rc);
 		return rc;
 	}
@@ -902,10 +984,23 @@
 		&msm_actuator_t->cci_master);
 	CDBG("qcom,cci-master %d, rc %d\n", msm_actuator_t->cci_master, rc);
 	if (rc < 0) {
+		kfree(msm_actuator_t);
 		pr_err("failed rc %d\n", rc);
 		return rc;
 	}
 
+	if (of_find_property((&pdev->dev)->of_node,
+			"qcom,cam-vreg-name", NULL)) {
+		vreg_cfg = &msm_actuator_t->vreg_cfg;
+		rc = msm_camera_get_dt_vreg_data((&pdev->dev)->of_node,
+			&vreg_cfg->cam_vreg, &vreg_cfg->num_vreg);
+		if (rc < 0) {
+			kfree(msm_actuator_t);
+			pr_err("failed rc %d\n", rc);
+			return rc;
+		}
+	}
+
 	msm_actuator_t->act_v4l2_subdev_ops = &msm_actuator_subdev_ops;
 	msm_actuator_t->actuator_mutex = &msm_actuator_mutex;
 	msm_actuator_t->cam_name = pdev->id;
@@ -918,6 +1013,8 @@
 	msm_actuator_t->i2c_client.cci_client = kzalloc(sizeof(
 		struct msm_camera_cci_client), GFP_KERNEL);
 	if (!msm_actuator_t->i2c_client.cci_client) {
+		kfree(msm_actuator_t->vreg_cfg.cam_vreg);
+		kfree(msm_actuator_t);
 		pr_err("failed no memory\n");
 		return -ENOMEM;
 	}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.h b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.h
index 772b12e..b0d9430 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.h
@@ -18,10 +18,15 @@
 #include <media/v4l2-subdev.h>
 #include <media/msmb_camera.h>
 #include "msm_camera_i2c.h"
+#include "msm_camera_dt_util.h"
+#include "msm_camera_io_util.h"
+
 
 #define DEFINE_MSM_MUTEX(mutexname) \
 	static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
 
+#define	MSM_ACTUATOT_MAX_VREGS (10)
+
 struct msm_actuator_ctrl_t;
 
 struct msm_actuator_func_tbl {
@@ -52,6 +57,12 @@
 	struct msm_actuator_func_tbl func_tbl;
 };
 
+struct msm_actuator_vreg {
+	struct camera_vreg_t *cam_vreg;
+	void *data[MSM_ACTUATOT_MAX_VREGS];
+	int num_vreg;
+};
+
 struct msm_actuator_ctrl_t {
 	struct i2c_driver *i2c_driver;
 	struct platform_driver *pdriver;
@@ -83,6 +94,7 @@
 	uint16_t i2c_tbl_index;
 	enum cci_i2c_master_t cci_master;
 	uint32_t subdev_id;
+	struct msm_actuator_vreg vreg_cfg;
 };
 
 #endif
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 1ca85bd..a563f68 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -342,12 +342,6 @@
 	case HAL_EXTRADATA_RECOVERY_POINT_SEI:
 		ret = HFI_PROPERTY_PARAM_VDEC_RECOVERY_POINT_SEI_EXTRADATA;
 		break;
-	case HAL_EXTRADATA_CLOSED_CAPTION_UD:
-		ret = HFI_PROPERTY_PARAM_VDEC_CLOSED_CAPTION_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_AFD_UD:
-		ret = HFI_PROPERTY_PARAM_VDEC_AFD_EXTRADATA;
-		break;
 	case HAL_EXTRADATA_MULTISLICE_INFO:
 		ret = HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO;
 		break;
@@ -372,6 +366,9 @@
 	case HAL_EXTRADATA_METADATA_MBI:
 		ret = HFI_PROPERTY_PARAM_VENC_MBI_DUMPING;
 		break;
+	case HAL_EXTRADATA_STREAM_USERDATA:
+		ret = HFI_PROPERTY_PARAM_VDEC_STREAM_USERDATA_EXTRADATA;
+		break;
 	default:
 		dprintk(VIDC_WARN, "Extradata index not found: %d\n", index);
 		break;
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index f4ad985..c6fb382 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -49,7 +49,7 @@
 		vidc_err = VIDC_ERR_NOT_SUPPORTED;
 		break;
 	case HFI_ERR_SYS_MAX_SESSIONS_REACHED:
-		vidc_err = VIDC_ERR_MAX_CLIENT;
+		vidc_err = VIDC_ERR_MAX_CLIENTS;
 		break;
 	case HFI_ERR_SYS_SESSION_IN_USE:
 		vidc_err = VIDC_ERR_CLIENT_PRESENT;
@@ -75,6 +75,8 @@
 		vidc_err = VIDC_ERR_FAIL;
 		break;
 	}
+	if (vidc_err != HFI_ERR_NONE)
+		dprintk(VIDC_ERR, "HFI Error: %d\n", vidc_err);
 	return vidc_err;
 }
 
@@ -108,7 +110,8 @@
 	struct hfi_frame_size frame_sz;
 	u8 *data_ptr;
 	int prop_id;
-	dprintk(VIDC_DBG, "RECEIVED:EVENT_NOTIFY");
+	dprintk(VIDC_DBG, "RECEIVED: EVENT_NOTIFY[%u]: %d, 0x%x\n",
+		pkt->session_id, pkt->event_data1, pkt->event_data2);
 	if (sizeof(struct hfi_msg_event_notify_packet)
 		> pkt->size) {
 		dprintk(VIDC_ERR, "hal_process_session_init_done:bad_pkt_size");
@@ -248,27 +251,33 @@
 		hfi_process_sys_error(callback, device_id);
 		break;
 	case HFI_EVENT_SESSION_ERROR:
-		dprintk(VIDC_INFO, "HFI_EVENT_SESSION_ERROR");
+		dprintk(VIDC_INFO,
+			"HFI_EVENT_SESSION_ERROR[%u]\n", pkt->session_id);
 		if (!validate_session_pkt(sessions, sess, session_lock))
 			hfi_process_session_error(callback, device_id, pkt);
 		break;
 	case HFI_EVENT_SESSION_SEQUENCE_CHANGED:
-		dprintk(VIDC_INFO, "HFI_EVENT_SESSION_SEQUENCE_CHANGED");
+		dprintk(VIDC_INFO, "HFI_EVENT_SESSION_SEQUENCE_CHANGED[%u]\n",
+			pkt->session_id);
 		if (!validate_session_pkt(sessions, sess, session_lock))
 			hfi_process_sess_evt_seq_changed(callback,
 				device_id, pkt);
 		break;
 	case HFI_EVENT_SESSION_PROPERTY_CHANGED:
-		dprintk(VIDC_INFO, "HFI_EVENT_SESSION_PROPERTY_CHANGED");
+		dprintk(VIDC_INFO, "HFI_EVENT_SESSION_PROPERTY_CHANGED[%u]\n",
+			pkt->session_id);
 		break;
 	case HFI_EVENT_RELEASE_BUFFER_REFERENCE:
-		dprintk(VIDC_INFO, "HFI_EVENT_RELEASE_BUFFER_REFERENCE\n");
+		dprintk(VIDC_INFO, "HFI_EVENT_RELEASE_BUFFER_REFERENCE[%u]\n",
+			pkt->session_id);
 		if (!validate_session_pkt(sessions, sess, session_lock))
 			hfi_process_evt_release_buffer_ref(callback,
 				device_id, pkt);
 		break;
 	default:
-		dprintk(VIDC_WARN, "hal_process_event_notify:unkown_event_id");
+		dprintk(VIDC_WARN,
+			"hal_process_event_notify: unknown_event_id[%u]\n",
+			pkt->session_id);
 		break;
 	}
 }
@@ -758,7 +767,8 @@
 	struct msm_vidc_cb_cmd_done cmd_done;
 	struct buffer_requirements buff_req;
 
-	dprintk(VIDC_DBG, "Received SESSION_PROPERTY_INFO");
+	dprintk(VIDC_DBG, "Received SESSION_PROPERTY_INFO[%u]\n",
+		pkt->session_id);
 
 	if (pkt->size < sizeof(struct hfi_msg_session_property_info_packet)) {
 		dprintk(VIDC_ERR, "hal_process_session_prop_info:bad_pkt_size");
@@ -800,7 +810,8 @@
 	struct msm_vidc_cb_cmd_done cmd_done;
 	struct vidc_hal_session_init_done session_init_done;
 	struct hal_session *sess_close = NULL;
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_INIT_DONE");
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_INIT_DONE[%u]\n",
+		pkt->session_id);
 	if (sizeof(struct hfi_msg_sys_session_init_done_packet)
 		> pkt->size) {
 		dprintk(VIDC_ERR, "hal_process_session_init_done:bad_pkt_size");
@@ -839,7 +850,8 @@
 		struct hfi_msg_session_load_resources_done_packet *pkt)
 {
 	struct msm_vidc_cb_cmd_done cmd_done;
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_LOAD_RESOURCES_DONE");
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_LOAD_RESOURCES_DONE[%u]\n",
+		pkt->session_id);
 
 	if (sizeof(struct hfi_msg_session_load_resources_done_packet) !=
 		pkt->size) {
@@ -865,7 +877,8 @@
 {
 	struct msm_vidc_cb_cmd_done cmd_done;
 
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_FLUSH_DONE");
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_FLUSH_DONE[%u]\n",
+		pkt->session_id);
 
 	if (sizeof(struct hfi_msg_session_flush_done_packet) != pkt->size) {
 		dprintk(VIDC_ERR, "hal_process_session_flush_done: "
@@ -889,7 +902,8 @@
 {
 	struct msm_vidc_cb_data_done data_done;
 
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_ETB_DONE");
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_ETB_DONE[%u]\n",
+		pkt->session_id);
 
 	if (!pkt || pkt->size <
 		sizeof(struct hfi_msg_session_empty_buffer_done_packet)) {
@@ -930,7 +944,8 @@
 
 	session = (struct hal_session *)
 		((struct hal_session *)	pack->session_id)->session_id;
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_FTB_DONE");
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_FTB_DONE[%u]\n",
+		pack->session_id);
 
 	memset(&data_done, 0, sizeof(struct msm_vidc_cb_data_done));
 
@@ -1028,7 +1043,8 @@
 {
 	struct msm_vidc_cb_cmd_done cmd_done;
 
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_START_DONE");
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_START_DONE[%u]\n",
+		pkt->session_id);
 
 	if (!pkt || pkt->size !=
 		sizeof(struct hfi_msg_session_start_done_packet)) {
@@ -1053,7 +1069,8 @@
 {
 	struct msm_vidc_cb_cmd_done cmd_done;
 
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_STOP_DONE");
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_STOP_DONE[%u]\n",
+		pkt->session_id);
 
 	if (!pkt || pkt->size !=
 		sizeof(struct hfi_msg_session_stop_done_packet)) {
@@ -1078,7 +1095,8 @@
 {
 	struct msm_vidc_cb_cmd_done cmd_done;
 
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_RELEASE_RESOURCES_DONE");
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_RELEASE_RESOURCES_DONE[%u]\n",
+		pkt->session_id);
 
 	if (!pkt || pkt->size !=
 		sizeof(struct hfi_msg_session_release_resources_done_packet)) {
@@ -1102,6 +1120,8 @@
 		struct hfi_msg_session_release_buffers_done_packet *pkt)
 {
 	struct msm_vidc_cb_cmd_done cmd_done;
+	dprintk(VIDC_DBG, "RECEIVED:SESSION_RELEASE_BUFFER_DONE[%u]\n",
+		pkt->session_id);
 	if (!pkt || pkt->size !=
 		sizeof(struct
 			   hfi_msg_session_release_buffers_done_packet)) {
@@ -1129,7 +1149,8 @@
 {
 	struct msm_vidc_cb_cmd_done cmd_done;
 
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_END_DONE");
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_END_DONE[%u]\n",
+		pkt->session_id);
 
 	if (!pkt || pkt->size !=
 		sizeof(struct hfi_msg_sys_session_end_done_packet)) {
@@ -1154,8 +1175,8 @@
 {
 	struct msm_vidc_cb_cmd_done cmd_done;
 
-	dprintk(VIDC_DBG, "RECEIVED:SESSION_ABORT_DONE");
-
+	dprintk(VIDC_DBG, "RECEIVED: SESSION_ABORT_DONE[%u]\n",
+		pkt->session_id);
 	if (!pkt || pkt->size !=
 		sizeof(struct hfi_msg_sys_session_abort_done_packet)) {
 		dprintk(VIDC_ERR, "%s: bad packet/packet size: %d",
@@ -1184,6 +1205,8 @@
 		dprintk(VIDC_ERR, "bad packet/packet size: %d", pkt->size);
 		return;
 	}
+	dprintk(VIDC_DBG, "RECEIVED:SESSION_GET_SEQ_HDR_DONE[%u]\n",
+		pkt->session_id);
 	memset(&data_done, 0, sizeof(struct msm_vidc_cb_data_done));
 	data_done.device_id = device_id;
 	data_done.size = sizeof(struct msm_vidc_cb_data_done);
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index 176c612..c638415 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -319,7 +319,6 @@
 	}
 
 	INIT_LIST_HEAD(&core->instances);
-	mutex_init(&core->sync_lock);
 	mutex_init(&core->lock);
 
 	core->state = VIDC_CORE_UNINIT;
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 8a0dfc2..f84a806 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -211,7 +211,7 @@
 		.name = "Extradata Type",
 		.type = V4L2_CTRL_TYPE_MENU,
 		.minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
-		.maximum = V4L2_MPEG_VIDC_EXTRADATA_FRAME_BITS_INFO,
+		.maximum = V4L2_MPEG_VIDC_EXTRADATA_STREAM_USERDATA,
 		.default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
 		.menu_skip_mask = ~(
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
@@ -224,8 +224,6 @@
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_AFD_UD) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER) |
@@ -234,7 +232,8 @@
 			(1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_MPEG2_SEQDISP) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_QP) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_BITS_INFO)
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_BITS_INFO) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_STREAM_USERDATA)
 			),
 		.qmenu = mpeg_video_vidc_extradata,
 		.step = 0,
@@ -871,9 +870,8 @@
 		dprintk(VIDC_PROF, "reported fps changed for %p: %d->%d\n",
 				inst, inst->prop.fps, fps);
 		inst->prop.fps = fps;
-		mutex_lock(&inst->core->sync_lock);
+
 		msm_comm_scale_clocks_and_bus(inst);
-		mutex_unlock(&inst->core->sync_lock);
 	}
 exit:
 	return rc;
@@ -1229,9 +1227,8 @@
 			goto fail_start;
 		}
 	}
-	mutex_lock(&inst->core->sync_lock);
+
 	msm_comm_scale_clocks_and_bus(inst);
-	mutex_unlock(&inst->core->sync_lock);
 
 	rc = msm_comm_try_state(inst, MSM_VIDC_START_DONE);
 	if (rc) {
@@ -1338,9 +1335,7 @@
 		break;
 	}
 
-	mutex_lock(&inst->core->sync_lock);
 	msm_comm_scale_clocks_and_bus(inst);
-	mutex_unlock(&inst->core->sync_lock);
 
 	if (rc)
 		dprintk(VIDC_ERR,
@@ -1890,6 +1885,11 @@
 
 int msm_vdec_ctrl_deinit(struct msm_vidc_inst *inst)
 {
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
 	kfree(inst->ctrls);
 	kfree(inst->cluster);
 	v4l2_ctrl_handler_free(&inst->ctrl_handler);
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 1ba5fcc..bcd13b8 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -157,7 +157,7 @@
 		.name = "IDR Period",
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = 1,
-		.maximum = 10*MAX_FRAME_RATE,
+		.maximum = INT_MAX,
 		.default_value = DEFAULT_FRAME_RATE,
 		.step = 1,
 		.menu_skip_mask = 0,
@@ -168,7 +168,7 @@
 		.name = "Intra Period",
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = 1,
-		.maximum = 10*MAX_FRAME_RATE,
+		.maximum = INT_MAX,
 		.default_value = DEFAULT_FRAME_RATE,
 		.step = 1,
 		.menu_skip_mask = 0,
@@ -657,8 +657,6 @@
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_AFD_UD) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER) |
@@ -909,9 +907,23 @@
 	}
 	hdev = inst->core->device;
 
+	rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to open instance\n");
+		return rc;
+	}
+
+	rc = msm_comm_try_get_bufreqs(inst);
+	if (rc) {
+		dprintk(VIDC_ERR,
+				"Failed to get buffer requirements: %d\n", rc);
+		return rc;
+	}
+
 	switch (q->type) {
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
 		*num_planes = 1;
+
 		buff_req = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
 		if (buff_req) {
 			*num_buffers = buff_req->buffer_count_actual =
@@ -939,9 +951,18 @@
 		inst->fmts[CAPTURE_PORT]->num_planes = *num_planes;
 
 		for (i = 0; i < *num_planes; i++) {
+			int extra_idx = EXTRADATA_IDX(*num_planes);
+
 			sizes[i] = inst->fmts[CAPTURE_PORT]->get_frame_size(
 					i, inst->prop.height[CAPTURE_PORT],
 					inst->prop.width[CAPTURE_PORT]);
+
+			if (extra_idx && i == extra_idx &&
+					extra_idx < VIDEO_MAX_PLANES) {
+				buff_req = get_buff_req_buffer(inst,
+						HAL_BUFFER_EXTRADATA_OUTPUT);
+				sizes[i] = buff_req->buffer_size;
+			}
 		}
 
 		property_id = HAL_PARAM_BUFFER_COUNT_ACTUAL;
@@ -951,26 +972,18 @@
 			property_id, &new_buf_count);
 		break;
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
-		rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
-		if (rc) {
-			dprintk(VIDC_ERR, "Failed to open instance\n");
-			break;
-		}
-		rc = msm_comm_try_get_bufreqs(inst);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"Failed to get buffer requirements: %d\n", rc);
-			break;
-		}
 		*num_planes = 1;
+
 		mutex_lock(&inst->lock);
 		*num_buffers = inst->buff_req.buffer[0].buffer_count_actual =
 			max(*num_buffers, inst->buff_req.buffer[0].
 				buffer_count_actual);
 		mutex_unlock(&inst->lock);
+
 		property_id = HAL_PARAM_BUFFER_COUNT_ACTUAL;
 		new_buf_count.buffer_type = HAL_BUFFER_INPUT;
 		new_buf_count.buffer_count_actual = *num_buffers;
+
 		rc = call_hfi_op(hdev, session_set_property, inst->session,
 					property_id, &new_buf_count);
 		dprintk(VIDC_DBG, "size = %d, alignment = %d, count = %d\n",
@@ -982,6 +995,7 @@
 					i, inst->prop.height[OUTPUT_PORT],
 					inst->prop.width[OUTPUT_PORT]);
 		}
+
 		break;
 	default:
 		dprintk(VIDC_ERR, "Invalid q type = %d\n", q->type);
@@ -1062,9 +1076,7 @@
 		goto fail_start;
 	}
 
-	mutex_lock(&inst->core->sync_lock);
 	msm_comm_scale_clocks_and_bus(inst);
-	mutex_unlock(&inst->core->sync_lock);
 
 	rc = msm_comm_try_state(inst, MSM_VIDC_START_DONE);
 	if (rc) {
@@ -1141,9 +1153,7 @@
 		break;
 	}
 
-	mutex_lock(&inst->core->sync_lock);
 	msm_comm_scale_clocks_and_bus(inst);
-	mutex_unlock(&inst->core->sync_lock);
 
 	if (rc)
 		dprintk(VIDC_ERR,
@@ -1279,7 +1289,7 @@
 		default:
 			goto unknown_value;
 		}
-		/* H263 */
+	/* H263 */
 	case V4L2_CID_MPEG_VIDC_VIDEO_H263_PROFILE:
 		switch (value) {
 		case V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BASELINE:
@@ -1372,6 +1382,17 @@
 		default:
 			goto unknown_value;
 		}
+	case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+		switch (value) {
+		case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED:
+			return HAL_H264_DB_MODE_DISABLE;
+		case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED:
+			return HAL_H264_DB_MODE_ALL_BOUNDARY;
+		case L_MODE:
+			return HAL_H264_DB_MODE_SKIP_SLICE_BOUNDARY;
+		default:
+			goto unknown_value;
+		}
 	}
 
 unknown_value:
@@ -1980,23 +2001,58 @@
 		break;
 	}
 	case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
-		property_id =
-			HAL_PARAM_VENC_H264_DEBLOCK_CONTROL;
-		h264_db_control.mode = ctrl->val;
+	{
+		struct v4l2_ctrl *alpha, *beta;
+
+		alpha = TRY_GET_CTRL(
+				V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA);
+		beta = TRY_GET_CTRL(
+				V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA);
+
+		property_id = HAL_PARAM_VENC_H264_DEBLOCK_CONTROL;
+		h264_db_control.slice_alpha_offset = alpha->val;
+		h264_db_control.slice_beta_offset = beta->val;
+		h264_db_control.mode = venc_v4l2_to_hal(
+				V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+				ctrl->val);
 		pdata = &h264_db_control;
 		break;
+	}
 	case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
-		property_id =
-			HAL_PARAM_VENC_H264_DEBLOCK_CONTROL;
+	{
+		struct v4l2_ctrl *mode, *beta;
+
+		mode = TRY_GET_CTRL(
+				V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE);
+		beta = TRY_GET_CTRL(
+				V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA);
+
+		property_id = HAL_PARAM_VENC_H264_DEBLOCK_CONTROL;
 		h264_db_control.slice_alpha_offset = ctrl->val;
+		h264_db_control.slice_beta_offset = beta->val;
+		h264_db_control.mode = venc_v4l2_to_hal(
+				V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+				mode->val);
 		pdata = &h264_db_control;
 		break;
+	}
 	case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
-		property_id =
-			HAL_PARAM_VENC_H264_DEBLOCK_CONTROL;
+	{
+		struct v4l2_ctrl *mode, *alpha;
+
+		mode = TRY_GET_CTRL(
+				V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE);
+		alpha = TRY_GET_CTRL(
+				V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA);
+		property_id = HAL_PARAM_VENC_H264_DEBLOCK_CONTROL;
+		h264_db_control.slice_alpha_offset = alpha->val;
 		h264_db_control.slice_beta_offset = ctrl->val;
+		h264_db_control.mode = venc_v4l2_to_hal(
+				V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+				mode->val);
 		pdata = &h264_db_control;
 		break;
+	}
 	case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
 		property_id =
 			HAL_PARAM_VENC_SYNC_FRAME_SEQUENCE_HEADER;
@@ -2498,9 +2554,8 @@
 			dprintk(VIDC_WARN,
 				"Failed to set frame rate %d\n", rc);
 		}
-		mutex_lock(&inst->core->sync_lock);
+
 		msm_comm_scale_clocks_and_bus(inst);
-		mutex_unlock(&inst->core->sync_lock);
 	}
 exit:
 	return rc;
@@ -3042,6 +3097,11 @@
 
 int msm_venc_ctrl_deinit(struct msm_vidc_inst *inst)
 {
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
 	kfree(inst->ctrls);
 	kfree(inst->cluster);
 	v4l2_ctrl_handler_free(&inst->ctrl_handler);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index bfa9025..cfc2eb8 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -1276,9 +1276,9 @@
 
 	setup_event_queue(inst, &core->vdev[session_type].vdev);
 
-	mutex_lock(&core->sync_lock);
+	mutex_lock(&core->lock);
 	list_add_tail(&inst->list, &core->instances);
-	mutex_unlock(&core->sync_lock);
+	mutex_unlock(&core->lock);
 	return inst;
 fail_init:
 	vb2_queue_release(&inst->bufq[OUTPUT_PORT].vb2_bufq);
@@ -1373,13 +1373,13 @@
 	}
 
 	core = inst->core;
-	mutex_lock(&core->sync_lock);
+	mutex_lock(&core->lock);
 	list_for_each_safe(ptr, next, &core->instances) {
 		temp = list_entry(ptr, struct msm_vidc_inst, list);
 		if (temp == inst)
 			list_del(&inst->list);
 	}
-	mutex_unlock(&core->sync_lock);
+	mutex_unlock(&core->lock);
 
 	if (inst->session_type == MSM_VIDC_DECODER)
 		msm_vdec_ctrl_deinit(inst);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index d6cfbb3..4879d22 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -51,9 +51,10 @@
 		enum session_type type)
 {
 	struct msm_vidc_inst *inst = NULL;
+	bool wants_turbo = false;
 
+	mutex_lock(&core->lock);
 	list_for_each_entry(inst, &core->instances, list) {
-		bool wants_turbo = false;
 
 		mutex_lock(&inst->lock);
 		if (inst->session_type == type &&
@@ -64,10 +65,12 @@
 		mutex_unlock(&inst->lock);
 
 		if (wants_turbo)
-			return true;
+			break;
 	}
 
-	return false;
+	mutex_unlock(&core->lock);
+
+	return wants_turbo;
 }
 
 static bool is_thumbnail_session(struct msm_vidc_inst *inst)
@@ -117,6 +120,7 @@
 		dprintk(VIDC_ERR, "Invalid args: %p\n", core);
 		return -EINVAL;
 	}
+	mutex_lock(&core->lock);
 	list_for_each_entry(inst, &core->instances, list) {
 		mutex_lock(&inst->lock);
 		if (inst->session_type == type &&
@@ -128,6 +132,7 @@
 		}
 		mutex_unlock(&inst->lock);
 	}
+	mutex_unlock(&core->lock);
 	return num_mbs_per_sec;
 }
 
@@ -391,8 +396,9 @@
 		&inst->completions[SESSION_MSG_INDEX(cmd)],
 		msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
 	if (!rc) {
-		dprintk(VIDC_ERR, "Wait interrupted or timeout: %d\n",
-				SESSION_MSG_INDEX(cmd));
+		dprintk(VIDC_ERR,
+			"%s: Wait interrupted or timeout[%u]: %d\n",
+			__func__, (u32)inst->session, SESSION_MSG_INDEX(cmd));
 		msm_comm_recover_from_session_error(inst);
 		rc = -EIO;
 	} else {
@@ -440,6 +446,21 @@
 	mutex_unlock(&inst->lock);
 }
 
+static void msm_comm_generate_max_clients_error(struct msm_vidc_inst *inst)
+{
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s: invalid input parameters\n", __func__);
+		return;
+	}
+	mutex_lock(&inst->sync_lock);
+	inst->session = NULL;
+	inst->state = MSM_VIDC_CORE_INVALID;
+	msm_vidc_queue_v4l2_event(inst, V4L2_EVENT_MSM_VIDC_MAX_CLIENTS);
+	dprintk(VIDC_WARN,
+			"%s: Too many clients\n", __func__);
+	mutex_unlock(&inst->sync_lock);
+}
+
 static void handle_session_init_done(enum command_response cmd, void *data)
 {
 	struct msm_vidc_cb_cmd_done *response = data;
@@ -477,7 +498,10 @@
 			dprintk(VIDC_ERR,
 				"Session init response from FW : 0x%x",
 				response->status);
-			msm_comm_generate_session_error(inst);
+			if (response->status == VIDC_ERR_MAX_CLIENTS)
+				msm_comm_generate_max_clients_error(inst);
+			else
+				msm_comm_generate_session_error(inst);
 		}
 		signal_session_msg_receipt(cmd, inst);
 	} else {
@@ -850,13 +874,12 @@
 	core = handler->core;
 	hdev = core->device;
 
-	mutex_lock(&core->sync_lock);
+	mutex_lock(&core->lock);
 	/*
 	* Restart the firmware to bring out of bad state.
 	*/
 	if ((core->state == VIDC_CORE_INVALID) &&
 		hdev->resurrect_fw) {
-		mutex_lock(&core->lock);
 		rc = call_hfi_op(hdev, resurrect_fw,
 				hdev->hfi_device_data);
 		if (rc) {
@@ -865,12 +888,11 @@
 				__func__, rc);
 		}
 		core->state = VIDC_CORE_LOADED;
-		mutex_unlock(&core->lock);
 	} else {
 		dprintk(VIDC_DBG,
 			"fw unloaded after sys error, no need to resurrect\n");
 	}
-	mutex_unlock(&core->sync_lock);
+	mutex_unlock(&core->lock);
 
 exit:
 	/* free sys error handler, allocated in handle_sys_err */
@@ -903,13 +925,11 @@
 	dprintk(VIDC_WARN, "SYS_ERROR %d received for core %p\n", cmd, core);
 	mutex_lock(&core->lock);
 	core->state = VIDC_CORE_INVALID;
-	mutex_unlock(&core->lock);
 
 	/*
 	* 1. Delete each instance session from hfi list
 	* 2. Notify all clients about hardware error.
 	*/
-	mutex_lock(&core->sync_lock);
 	list_for_each_entry(inst, &core->instances,
 			list) {
 		mutex_lock(&inst->lock);
@@ -931,7 +951,7 @@
 		msm_vidc_queue_v4l2_event(inst,
 				V4L2_EVENT_MSM_VIDC_SYS_ERROR);
 	}
-	mutex_unlock(&core->sync_lock);
+	mutex_unlock(&core->lock);
 
 
 	handler = kzalloc(sizeof(*handler), GFP_KERNEL);
@@ -1573,8 +1593,8 @@
 		&core->completions[SYS_MSG_INDEX(RELEASE_RESOURCE_DONE)],
 		msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
 	if (!rc) {
-		dprintk(VIDC_ERR, "Wait interrupted or timeout: %d\n",
-				SYS_MSG_INDEX(RELEASE_RESOURCE_DONE));
+		dprintk(VIDC_ERR, "%s: Wait interrupted or timeout: %d\n",
+			__func__, SYS_MSG_INDEX(RELEASE_RESOURCE_DONE));
 		rc = -EIO;
 	}
 release_ocmem_failed:
@@ -1585,7 +1605,7 @@
 {
 	struct msm_vidc_core *core = inst->core;
 	int rc = 0;
-	mutex_lock(&core->sync_lock);
+	mutex_lock(&core->lock);
 	if (core->state >= VIDC_CORE_INIT_DONE) {
 		dprintk(VIDC_INFO, "Video core: %d is already in state: %d\n",
 				core->id, core->state);
@@ -1596,21 +1616,19 @@
 		&core->completions[SYS_MSG_INDEX(SYS_INIT_DONE)],
 		msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
 	if (!rc) {
-		dprintk(VIDC_ERR, "Wait interrupted or timeout: %d\n",
-				SYS_MSG_INDEX(SYS_INIT_DONE));
+		dprintk(VIDC_ERR, "%s: Wait interrupted or timeout: %d\n",
+			__func__, SYS_MSG_INDEX(SYS_INIT_DONE));
 		rc = -EIO;
 		goto exit;
 	} else {
-		mutex_lock(&core->lock);
 		core->state = VIDC_CORE_INIT_DONE;
-		mutex_unlock(&core->lock);
 	}
 	dprintk(VIDC_DBG, "SYS_INIT_DONE!!!\n");
 core_already_inited:
 	change_inst_state(inst, MSM_VIDC_CORE_INIT_DONE);
 	rc = 0;
 exit:
-	mutex_unlock(&core->sync_lock);
+	mutex_unlock(&core->lock);
 	return rc;
 }
 
@@ -1624,12 +1642,13 @@
 		return -EINVAL;
 	hdev = core->device;
 
-	mutex_lock(&core->sync_lock);
+	mutex_lock(&core->lock);
 	if (core->state >= VIDC_CORE_INIT) {
 		dprintk(VIDC_INFO, "Video core: %d is already in state: %d\n",
 				core->id, core->state);
 		goto core_already_inited;
 	}
+	mutex_unlock(&core->lock);
 
 	rc = msm_comm_scale_bus(core, inst->session_type, DDR_MEM);
 	if (rc) {
@@ -1637,13 +1656,16 @@
 		goto fail_scale_bus;
 	}
 
+	mutex_lock(&core->lock);
 	if (core->state < VIDC_CORE_LOADED) {
 		rc = call_hfi_op(hdev, load_fw, hdev->hfi_device_data);
 		if (rc) {
 			dprintk(VIDC_ERR, "Failed to load video firmware\n");
 			goto fail_load_fw;
 		}
+		core->state = VIDC_CORE_LOADED;
 	}
+	mutex_unlock(&core->lock);
 
 	rc = msm_comm_scale_clocks(core);
 	if (rc) {
@@ -1651,25 +1673,29 @@
 		goto fail_core_init;
 	}
 
-	init_completion(&core->completions[SYS_MSG_INDEX(SYS_INIT_DONE)]);
-	rc = call_hfi_op(hdev, core_init, hdev->hfi_device_data);
-	if (rc) {
-		dprintk(VIDC_ERR, "Failed to init core, id = %d\n", core->id);
-		goto fail_core_init;
-	}
 	mutex_lock(&core->lock);
-	core->state = VIDC_CORE_INIT;
-	mutex_unlock(&core->lock);
+	if (core->state == VIDC_CORE_LOADED) {
+		init_completion(&core->completions
+			[SYS_MSG_INDEX(SYS_INIT_DONE)]);
+		rc = call_hfi_op(hdev, core_init, hdev->hfi_device_data);
+		if (rc) {
+			dprintk(VIDC_ERR, "Failed to init core, id = %d\n",
+					core->id);
+			goto fail_core_init;
+		}
+		core->state = VIDC_CORE_INIT;
+	}
+
 core_already_inited:
 	change_inst_state(inst, MSM_VIDC_CORE_INIT);
-	mutex_unlock(&core->sync_lock);
+	mutex_unlock(&core->lock);
 	return rc;
 fail_core_init:
 	call_hfi_op(hdev, unload_fw, hdev->hfi_device_data);
 fail_load_fw:
 	msm_comm_unvote_buses(core, DDR_MEM);
 fail_scale_bus:
-	mutex_unlock(&core->sync_lock);
+	mutex_unlock(&core->lock);
 	return rc;
 }
 
@@ -1687,14 +1713,17 @@
 	core = inst->core;
 	hdev = core->device;
 
-	mutex_lock(&core->sync_lock);
+	mutex_lock(&core->lock);
 	if (core->state == VIDC_CORE_UNINIT) {
 		dprintk(VIDC_INFO, "Video core: %d is already in state: %d\n",
 				core->id, core->state);
 		goto core_already_uninited;
 	}
+	mutex_unlock(&core->lock);
 
 	msm_comm_scale_clocks_and_bus(inst);
+
+	mutex_lock(&core->lock);
 	if (list_empty(&core->instances)) {
 		if (core->state > VIDC_CORE_INIT) {
 			if (core->resources.has_ocmem) {
@@ -1713,9 +1742,9 @@
 				goto exit;
 			}
 		}
-		mutex_lock(&core->lock);
+
 		core->state = VIDC_CORE_UNINIT;
-		mutex_unlock(&core->lock);
+
 		call_hfi_op(hdev, unload_fw, hdev->hfi_device_data);
 		if (core->resources.has_ocmem)
 			msm_comm_unvote_buses(core, DDR_MEM|OCMEM_MEM);
@@ -1726,7 +1755,7 @@
 core_already_uninited:
 	change_inst_state(inst, MSM_VIDC_CORE_UNINIT);
 exit:
-	mutex_unlock(&core->sync_lock);
+	mutex_unlock(&core->lock);
 	return rc;
 }
 
@@ -1855,6 +1884,8 @@
 	struct msm_vidc_inst *temp;
 	dprintk(VIDC_ERR, "Running instances:\n");
 	dprintk(VIDC_ERR, "%4s|%4s|%4s|%4s\n", "type", "w", "h", "fps");
+
+	mutex_lock(&core->lock);
 	list_for_each_entry(temp, &core->instances, list) {
 		mutex_lock(&temp->lock);
 		if (temp->state >= MSM_VIDC_OPEN_DONE &&
@@ -1867,6 +1898,7 @@
 		}
 		mutex_unlock(&temp->lock);
 	}
+	mutex_unlock(&core->lock);
 }
 
 static int msm_vidc_load_resources(int flipped_state,
@@ -1895,10 +1927,8 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&inst->core->sync_lock);
 	num_mbs_per_sec = msm_comm_get_load(inst->core, MSM_VIDC_DECODER);
 	num_mbs_per_sec += msm_comm_get_load(inst->core, MSM_VIDC_ENCODER);
-	mutex_unlock(&inst->core->sync_lock);
 
 	if (num_mbs_per_sec > inst->core->resources.max_load) {
 		dprintk(VIDC_ERR, "HW is overloaded, needed: %d max: %d\n",
@@ -1922,16 +1952,14 @@
 			inst->prop.width[OUTPUT_PORT]);
 		ocmem_sz = get_ocmem_requirement(
 			height, width);
-		mutex_lock(&inst->core->sync_lock);
 		rc = msm_comm_scale_bus(inst->core, inst->session_type,
 					OCMEM_MEM);
-		mutex_unlock(&inst->core->sync_lock);
 		if (!rc) {
-			mutex_lock(&inst->core->sync_lock);
+			mutex_lock(&inst->core->lock);
 			rc = call_hfi_op(hdev, alloc_ocmem,
 					hdev->hfi_device_data,
 					ocmem_sz);
-			mutex_unlock(&inst->core->sync_lock);
+			mutex_unlock(&inst->core->lock);
 			if (rc) {
 				dprintk(VIDC_WARN,
 				"Failed to allocate OCMEM. Performance will be impacted\n");
@@ -2543,13 +2571,6 @@
 			mutex_unlock(&inst->sync_lock);
 	} else {
 		int64_t time_usec = timeval_to_ns(&vb->v4l2_buf.timestamp);
-
-		rc = msm_vidc_check_session_supported(inst);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"%s: session not supported\n", __func__);
-			goto err_no_mem;
-		}
 		do_div(time_usec, NSEC_PER_USEC);
 		memset(&frame_data, 0 , sizeof(struct vidc_frame_data));
 		frame_data.alloc_len = vb->v4l2_planes[0].length;
@@ -2693,7 +2714,8 @@
 		msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
 	if (!rc) {
 		dprintk(VIDC_ERR,
-			"Wait interrupted or timeout: %d\n",
+			"%s: Wait interrupted or timeout[%u]: %d\n",
+			__func__, (u32)inst->session,
 			SESSION_MSG_INDEX(SESSION_PROPERTY_INFO));
 		inst->state = MSM_VIDC_CORE_INVALID;
 		msm_comm_recover_from_session_error(inst);
@@ -3240,12 +3262,6 @@
 	case V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI:
 		ret = HAL_EXTRADATA_RECOVERY_POINT_SEI;
 		break;
-	case V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD:
-		ret = HAL_EXTRADATA_CLOSED_CAPTION_UD;
-		break;
-	case V4L2_MPEG_VIDC_EXTRADATA_AFD_UD:
-		ret = HAL_EXTRADATA_AFD_UD;
-		break;
 	case V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO:
 		ret = HAL_EXTRADATA_MULTISLICE_INFO;
 		break;
@@ -3273,6 +3289,9 @@
 	case V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI:
 		ret = HAL_EXTRADATA_METADATA_MBI;
 		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_STREAM_USERDATA:
+		ret = HAL_EXTRADATA_STREAM_USERDATA;
+		break;
 	default:
 		dprintk(VIDC_WARN, "Extradata not found: %d\n", index);
 		break;
@@ -3336,20 +3355,16 @@
 	int num_mbs_per_sec = 0;
 
 	if (inst->state == MSM_VIDC_OPEN_DONE) {
-		mutex_lock(&inst->core->sync_lock);
 		num_mbs_per_sec = msm_comm_get_load(inst->core,
 			MSM_VIDC_DECODER);
 		num_mbs_per_sec += msm_comm_get_load(inst->core,
 			MSM_VIDC_ENCODER);
-		mutex_unlock(&inst->core->sync_lock);
 		if (num_mbs_per_sec > inst->core->resources.max_load) {
 			dprintk(VIDC_ERR,
 				"H/w is overloaded. needed: %d max: %d\n",
 				num_mbs_per_sec,
 				inst->core->resources.max_load);
-			mutex_lock(&inst->sync_lock);
 			msm_vidc_print_running_insts(inst->core);
-			mutex_unlock(&inst->sync_lock);
 			return -EINVAL;
 		}
 	}
@@ -3447,20 +3462,14 @@
 				capability->height.min);
 			rc = -ENOTSUPP;
 		}
-		if (msm_vp8_low_tier &&
-			inst->fmts[OUTPUT_PORT]->fourcc == V4L2_PIX_FMT_VP8) {
-			capability->width.max = DEFAULT_WIDTH;
-			capability->width.max = DEFAULT_HEIGHT;
-		}
-		if (!rc && (inst->prop.width[CAPTURE_PORT] >
-			capability->width.max)) {
-			dprintk(VIDC_ERR,
-				"Unsupported width = %u supported max width = %u\n",
-				inst->prop.width[CAPTURE_PORT],
-				capability->width.max);
-				rc = -ENOTSUPP;
-		}
 
+		if (!rc) {
+			rc = call_hfi_op(hdev, capability_check,
+					inst->fmts[OUTPUT_PORT]->fourcc,
+					inst->prop.width[CAPTURE_PORT],
+					&capability->width.max,
+					&capability->height.max);
+		}
 		if (!rc && (inst->prop.height[CAPTURE_PORT]
 			* inst->prop.width[CAPTURE_PORT] >
 			capability->width.max * capability->height.max)) {
@@ -3476,7 +3485,11 @@
 		mutex_lock(&inst->sync_lock);
 		inst->state = MSM_VIDC_CORE_INVALID;
 		mutex_unlock(&inst->sync_lock);
-		msm_vidc_queue_v4l2_event(inst, V4L2_EVENT_MSM_VIDC_SYS_ERROR);
+		msm_vidc_queue_v4l2_event(inst,
+					V4L2_EVENT_MSM_VIDC_HW_OVERLOAD);
+		dprintk(VIDC_WARN,
+			"%s: Hardware is overloaded\n", __func__);
+		wake_up(&inst->kernel_event_queue);
 	}
 	return rc;
 }
@@ -3526,8 +3539,9 @@
 		&inst->completions[SESSION_MSG_INDEX(SESSION_ABORT_DONE)],
 		msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
 	if (!rc) {
-		dprintk(VIDC_ERR, "%s: Wait interrupted or timeout: %d\n",
-			__func__, SESSION_MSG_INDEX(SESSION_ABORT_DONE));
+		dprintk(VIDC_ERR, "%s: Wait interrupted or timeout[%u]: %d\n",
+			__func__, (u32)inst->session,
+			SESSION_MSG_INDEX(SESSION_ABORT_DONE));
 		msm_comm_generate_sys_error(inst);
 	} else
 		change_inst_state(inst, MSM_VIDC_CLOSE_DONE);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index 5a18265..475683c 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -255,8 +255,8 @@
 		inst->session_type == MSM_VIDC_ENCODER ? "Encoder" : "Decoder");
 	write_str(&dbg_buf, "===============================\n");
 	write_str(&dbg_buf, "core: 0x%p\n", inst->core);
-	write_str(&dbg_buf, "height: %d\n", inst->prop.height);
-	write_str(&dbg_buf, "width: %d\n", inst->prop.width);
+	write_str(&dbg_buf, "height: %d\n", inst->prop.height[CAPTURE_PORT]);
+	write_str(&dbg_buf, "width: %d\n", inst->prop.width[CAPTURE_PORT]);
 	write_str(&dbg_buf, "fps: %d\n", inst->prop.fps);
 	write_str(&dbg_buf, "state: %d\n", inst->state);
 	write_str(&dbg_buf, "-----------Formats-------------\n");
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index e9bf91d..1677e57 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -199,7 +199,7 @@
 
 struct msm_vidc_core {
 	struct list_head list;
-	struct mutex sync_lock, lock;
+	struct mutex lock;
 	int id;
 	void *device;
 	struct msm_video_device vdev[MSM_VIDC_MAX_DEVICES];
diff --git a/drivers/media/platform/msm/vidc/q6_hfi.c b/drivers/media/platform/msm/vidc/q6_hfi.c
index 486d740..bbba29a 100644
--- a/drivers/media/platform/msm/vidc/q6_hfi.c
+++ b/drivers/media/platform/msm/vidc/q6_hfi.c
@@ -1318,6 +1318,24 @@
 	return rc;
 }
 
+int q6_hfi_capability_check(u32 fourcc, u32 width,
+				u32 *max_width, u32 *max_height)
+{
+	int rc = 0;
+	if (!max_width || !max_height) {
+		dprintk(VIDC_ERR, "%s - invalid parameter\n", __func__);
+		return -EINVAL;
+	}
+
+	if (width > *max_width) {
+		dprintk(VIDC_ERR,
+			"Unsupported width = %u supported max width = %u\n",
+			width, *max_width);
+		rc = -ENOTSUPP;
+	}
+	return rc;
+}
+
 static void q6_hfi_unload_fw(void *hfi_device_data)
 {
 	struct q6_hfi_device *device = hfi_device_data;
@@ -1372,6 +1390,7 @@
 	hdev->unset_ocmem = q6_hfi_unset_ocmem;
 	hdev->iommu_get_domain_partition = q6_hfi_iommu_get_domain_partition;
 	hdev->load_fw = q6_hfi_load_fw;
+	hdev->capability_check = q6_hfi_capability_check;
 	hdev->unload_fw = q6_hfi_unload_fw;
 	hdev->get_stride_scanline = q6_hfi_get_stride_scanline;
 }
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 37b1e72..30ee45d 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -3739,45 +3739,34 @@
 
 int venus_hfi_get_core_capabilities(void)
 {
-	int i = 0, rc = 0, j = 0, venus_version_length = 0;
-	u32 smem_block_size = 0;
-	u8 *smem_table_ptr;
-	char version[256];
-	const u32 version_string_size = 128;
-	char venus_version[] = "VIDEO.VE.1.4";
-	u8 version_info[256];
-	const u32 smem_image_index_venus = 14 * 128;
-	/* Venus version is stored at 14th entry in smem table */
+	int rc = 0;
+	rc = HAL_VIDEO_ENCODER_ROTATION_CAPABILITY |
+		HAL_VIDEO_ENCODER_SCALING_CAPABILITY |
+		HAL_VIDEO_ENCODER_DEINTERLACE_CAPABILITY |
+		HAL_VIDEO_DECODER_MULTI_STREAM_CAPABILITY;
+	return rc;
+}
 
-	smem_table_ptr = smem_get_entry(SMEM_IMAGE_VERSION_TABLE,
-			&smem_block_size);
-	if (smem_table_ptr &&
-			((smem_image_index_venus + version_string_size) <=
-			smem_block_size)) {
-		memcpy(version_info, smem_table_ptr + smem_image_index_venus,
-				version_string_size);
-	} else {
-		dprintk(VIDC_ERR,
-			"%s: failed to read version info from smem table\n",
-			__func__);
+int venus_hfi_capability_check(u32 fourcc, u32 width,
+				u32 *max_width, u32 *max_height)
+{
+	int rc = 0;
+	if (!max_width || !max_height) {
+		dprintk(VIDC_ERR, "%s - invalid parameter\n", __func__);
 		return -EINVAL;
 	}
 
-	while (version_info[i++] != 'V' && i < version_string_size)
-		;
+	if (msm_vp8_low_tier && fourcc == V4L2_PIX_FMT_VP8) {
+		*max_width = DEFAULT_WIDTH;
+		*max_height = DEFAULT_HEIGHT;
+	}
 
-	venus_version_length = strlen(venus_version);
-	for (i--, j = 0; i < version_string_size && j < venus_version_length;
-		i++)
-		version[j++] = version_info[i];
-	version[venus_version_length] = '\0';
-	dprintk(VIDC_DBG, "F/W version retrieved : %s\n", version);
-
-	if (strcmp((const char *)version, (const char *)venus_version) == 0)
-		rc = HAL_VIDEO_ENCODER_ROTATION_CAPABILITY |
-			HAL_VIDEO_ENCODER_SCALING_CAPABILITY |
-			HAL_VIDEO_ENCODER_DEINTERLACE_CAPABILITY |
-			HAL_VIDEO_DECODER_MULTI_STREAM_CAPABILITY;
+	if (width > *max_width) {
+		dprintk(VIDC_ERR,
+			"Unsupported width = %u supported max width = %u\n",
+			width, *max_width);
+		rc = -ENOTSUPP;
+	}
 	return rc;
 }
 
@@ -3943,6 +3932,7 @@
 	hdev->get_info = venus_hfi_get_info;
 	hdev->get_stride_scanline = venus_hfi_get_stride_scanline;
 	hdev->get_core_capabilities = venus_hfi_get_core_capabilities;
+	hdev->capability_check = venus_hfi_capability_check;
 	hdev->power_enable = venus_hfi_power_enable;
 }
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 75f583f..5566338 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -79,9 +79,8 @@
 #define HFI_EXTRADATA_FRAME_RATE			0x00000007
 #define HFI_EXTRADATA_PANSCAN_WINDOW		0x00000008
 #define HFI_EXTRADATA_RECOVERY_POINT_SEI	0x00000009
-#define HFI_EXTRADATA_CLOSED_CAPTION_UD		0x0000000A
-#define HFI_EXTRADATA_AFD_UD			0x0000000B
 #define HFI_EXTRADATA_MPEG2_SEQDISP		0x0000000D
+#define HFI_EXTRADATA_STREAM_USERDATA		0x0000000E
 #define HFI_EXTRADATA_FRAME_QP			0x0000000F
 #define HFI_EXTRADATA_FRAME_BITS_INFO		0x00000010
 #define HFI_EXTRADATA_MULTISLICE_INFO		0x7F100000
@@ -190,10 +189,6 @@
 
 #define HFI_PROPERTY_PARAM_VDEC_FRAME_ASSEMBLY		\
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00E)
-#define HFI_PROPERTY_PARAM_VDEC_CLOSED_CAPTION_EXTRADATA	\
-	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00F)
-#define HFI_PROPERTY_PARAM_VDEC_AFD_EXTRADATA		\
-	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x010)
 #define HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA		\
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x011)
 #define HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA		\
@@ -206,6 +201,8 @@
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x015)
 #define HFI_PROPERTY_PARAM_VDEC_MPEG2_SEQDISP_EXTRADATA \
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x016)
+#define HFI_PROPERTY_PARAM_VDEC_STREAM_USERDATA_EXTRADATA \
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x017)
 #define HFI_PROPERTY_PARAM_VDEC_FRAME_QP_EXTRADATA \
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x018)
 #define HFI_PROPERTY_PARAM_VDEC_FRAME_BITS_INFO_EXTRADATA \
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index f0c57f1..62507a1 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -63,7 +63,7 @@
 	VIDC_ERR_BAD_HANDLE,
 	VIDC_ERR_NOT_SUPPORTED,
 	VIDC_ERR_BAD_STATE,
-	VIDC_ERR_MAX_CLIENT,
+	VIDC_ERR_MAX_CLIENTS,
 	VIDC_ERR_IFRAME_EXPECTED,
 	VIDC_ERR_HW_FATAL,
 	VIDC_ERR_BITSTREAM_ERR,
@@ -91,8 +91,6 @@
 	HAL_EXTRADATA_FRAME_RATE,
 	HAL_EXTRADATA_PANSCAN_WINDOW,
 	HAL_EXTRADATA_RECOVERY_POINT_SEI,
-	HAL_EXTRADATA_CLOSED_CAPTION_UD,
-	HAL_EXTRADATA_AFD_UD,
 	HAL_EXTRADATA_MULTISLICE_INFO,
 	HAL_EXTRADATA_INDEX,
 	HAL_EXTRADATA_NUM_CONCEALED_MB,
@@ -103,6 +101,7 @@
 	HAL_EXTRADATA_FRAME_BITS_INFO,
 	HAL_EXTRADATA_LTR_INFO,
 	HAL_EXTRADATA_METADATA_MBI,
+	HAL_EXTRADATA_STREAM_USERDATA,
 };
 
 enum hal_property {
@@ -1169,6 +1168,8 @@
 	int (*get_info) (void *dev, enum dev_info info);
 	int (*get_stride_scanline)(int color_fmt, int width,
 		int height,	int *stride, int *scanlines);
+	int (*capability_check)(u32 fourcc, u32 width,
+			u32 *max_width, u32 *max_height);
 	int (*session_clean)(void *sess);
 	int (*get_core_capabilities)(void);
 	int (*power_enable)(void *dev);
diff --git a/drivers/media/radio/radio-iris.c b/drivers/media/radio/radio-iris.c
index 02441ec..07d3fde 100644
--- a/drivers/media/radio/radio-iris.c
+++ b/drivers/media/radio/radio-iris.c
@@ -3721,7 +3721,6 @@
 		}
 		saved_val = radio->mute_mode.hard_mute;
 		radio->mute_mode.hard_mute = ctrl->value;
-		radio->mute_mode.soft_mute = IOC_SFT_MUTE;
 		retval = hci_set_fm_mute_mode(
 				&radio->mute_mode,
 				radio->fm_hdev);
diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c
index e5311ce..6cea218 100644
--- a/drivers/media/video/videobuf2-core.c
+++ b/drivers/media/video/videobuf2-core.c
@@ -513,13 +513,6 @@
 		return -EINVAL;
 	}
 
-	/*
-	 * If the same number of buffers and memory access method is requested
-	 * then return immediately.
-	 */
-	if (q->memory == req->memory && req->count == q->num_buffers)
-		return 0;
-
 	if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
 		/*
 		 * We already have buffers allocated, so first check if they
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index dbc7d5c..36bf643 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -698,11 +698,11 @@
 	struct mmc_context_info *context_info = &host->context_info;
 	bool pending_is_urgent = false;
 	bool is_urgent = false;
-	int err;
+	int err, ret;
 	unsigned long flags;
 
 	while (1) {
-		wait_io_event_interruptible(context_info->wait,
+		ret = wait_io_event_interruptible(context_info->wait,
 				(context_info->is_done_rcv ||
 				 context_info->is_new_req  ||
 				 context_info->is_urgent));
@@ -755,7 +755,7 @@
 				err = MMC_BLK_NEW_REQUEST;
 				break; /* return err */
 			}
-		} else {
+		} else if (context_info->is_urgent) {
 			/*
 			 * The case when block layer sent next urgent
 			 * notification before it receives end_io on
@@ -807,6 +807,11 @@
 				pending_is_urgent = true;
 				continue; /* wait for done/new/urgent event */
 			}
+		} else {
+			pr_warn("%s: mmc thread unblocked from waiting by signal, ret=%d\n",
+					mmc_hostname(host),
+					ret);
+			continue;
 		}
 	} /* while */
 	return err;
@@ -3234,7 +3239,8 @@
 	mmc_release_host(host);
 	mmc_rpm_release(host, &host->class_dev);
  out:
-	if (extend_wakelock)
+	/* only extend the wakelock, if suspend has not started yet */
+	if (extend_wakelock && !host->rescan_disable)
 		wake_lock_timeout(&host->detect_wake_lock, HZ / 2);
 
 	if (host->caps & MMC_CAP_NEEDS_POLL)
@@ -3619,16 +3625,15 @@
 			spin_unlock_irqrestore(&host->lock, flags);
 			break;
 		}
+
+		/* since its suspending anyway, disable rescan */
+		host->rescan_disable = 1;
 		spin_unlock_irqrestore(&host->lock, flags);
 
 		/* Wait for pending detect work to be completed */
 		if (!(host->caps & MMC_CAP_NEEDS_POLL))
 			flush_work(&host->detect.work);
 
-		spin_lock_irqsave(&host->lock, flags);
-		host->rescan_disable = 1;
-		spin_unlock_irqrestore(&host->lock, flags);
-
 		/*
 		 * In some cases, the detect work might be scheduled
 		 * just before rescan_disable is set to true.
@@ -3636,6 +3641,13 @@
 		 */
 		cancel_delayed_work_sync(&host->detect);
 
+		/*
+		 * It is possible that the wake-lock has been acquired, since
+		 * its being suspended, release the wakelock
+		 */
+		if (wake_lock_active(&host->detect_wake_lock))
+			wake_unlock(&host->detect_wake_lock);
+
 		if (!host->bus_ops || host->bus_ops->suspend)
 			break;
 
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c26c1bb..c1f366e 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -66,6 +66,10 @@
 	MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX,
 			      0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5),
 
+	/* Disable HPI feature for Kingstone card */
+	MMC_FIXUP_EXT_CSD_REV("MMC16G", CID_MANFID_KINGSTON, CID_OEMID_ANY,
+			add_quirk, MMC_QUIRK_BROKEN_HPI, 5),
+
 	/*
 	 * Some Hynix cards exhibit data corruption over reboots if cache is
 	 * enabled. Disable cache for all versions until a class of cards that
@@ -1260,8 +1264,7 @@
 		mmc_set_clock(host, (unsigned int) (*freq));
 	}
 
-	if ((mmc_card_hs400(card) || mmc_card_hs200(card))
-		&& card->host->ops->execute_tuning) {
+	if (mmc_card_hs200(card) && card->host->ops->execute_tuning) {
 		/*
 		 * We try to probe host driver for tuning for any
 		 * frequency, it is host driver responsibility to
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 179632c..783f512 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1399,7 +1399,11 @@
 	 */
 #ifdef CONFIG_MMC_PARANOID_SD_INIT
 	retries = 5;
-	while (retries) {
+	/*
+	 * Some bad cards may take a long time to init, give preference to
+	 * suspend in those cases.
+	 */
+	while (retries && !host->rescan_disable) {
 		err = mmc_sd_init_card(host, host->ocr, NULL);
 		if (err) {
 			retries--;
@@ -1417,6 +1421,9 @@
 		       mmc_hostname(host), err);
 		goto err;
 	}
+
+	if (host->rescan_disable)
+		goto err;
 #else
 	err = mmc_sd_init_card(host, host->ocr, NULL);
 	if (err)
@@ -1440,9 +1447,9 @@
 	mmc_claim_host(host);
 err:
 	mmc_detach_bus(host);
-
-	pr_err("%s: error %d whilst initialising SD card\n",
-		mmc_hostname(host), err);
+	if (err)
+		pr_err("%s: error %d whilst initialising SD card: rescan: %d\n",
+		       mmc_hostname(host), err, host->rescan_disable);
 
 	return err;
 }
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 074b6a8..2c96d3c 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -876,6 +876,14 @@
 		memset(data_buf, 0, size);
 		mmc_wait_for_req(mmc, &mrq);
 
+		/*
+		 * wait for 146 MCLK cycles for the card to send out the data
+		 * and thus move to TRANS state. As the MCLK would be minimum
+		 * 200MHz when tuning is performed, we need maximum 0.73us
+		 * delay. To be on safer side 1ms delay is given.
+		 */
+		if (cmd.error)
+			usleep_range(1000, 1200);
 		if (!cmd.error && !data.error &&
 			!memcmp(data_buf, tuning_block_pattern, size)) {
 			/* tuning is successful at this tuning point */
@@ -2897,7 +2905,6 @@
 	host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
 	host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
 	host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
-	host->quirks2 |= SDHCI_QUIRK2_IGNORE_CMDCRC_FOR_TUNING;
 	host->quirks2 |= SDHCI_QUIRK2_USE_MAX_DISCARD_SIZE;
 	host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
 	host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 32ff175..277aef5 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2591,17 +2591,6 @@
 			host->cmd->error = -EILSEQ;
 	}
 
-	if (host->quirks2 & SDHCI_QUIRK2_IGNORE_CMDCRC_FOR_TUNING) {
-		if ((host->cmd->opcode == MMC_SEND_TUNING_BLOCK_HS400) ||
-			(host->cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) ||
-			(host->cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
-			if (intmask & SDHCI_INT_CRC) {
-				sdhci_reset(host, SDHCI_RESET_CMD);
-				host->cmd->error = 0;
-			}
-		}
-	}
-
 	if (host->cmd->error) {
 		if (host->cmd->error == -EILSEQ)
 			host->flags |= SDHCI_NEEDS_RETUNING;
@@ -2631,17 +2620,6 @@
 		 * fall through and take the SDHCI_INT_RESPONSE */
 	}
 
-	if (host->quirks2 & SDHCI_QUIRK2_IGNORE_CMDCRC_FOR_TUNING) {
-		if ((host->cmd->opcode == MMC_SEND_TUNING_BLOCK_HS400) ||
-			(host->cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) ||
-			(host->cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
-			if (intmask & SDHCI_INT_CRC) {
-				sdhci_finish_command(host);
-				return;
-			}
-		}
-	}
-
 	if (intmask & SDHCI_INT_RESPONSE)
 		sdhci_finish_command(host);
 }
@@ -2727,8 +2705,7 @@
 		host->data->error = -EIO;
 	}
 	if (host->data->error) {
-		if ((intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT)) &&
-		    (host->quirks2 & SDHCI_QUIRK2_IGNORE_CMDCRC_FOR_TUNING)) {
+		if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT)) {
 			command = SDHCI_GET_CMD(sdhci_readw(host,
 							    SDHCI_COMMAND));
 			if ((command != MMC_SEND_TUNING_BLOCK_HS400) &&
diff --git a/drivers/net/wireless/wcnss/wcnss_vreg.c b/drivers/net/wireless/wcnss/wcnss_vreg.c
index 4713d29..c066acd 100644
--- a/drivers/net/wireless/wcnss/wcnss_vreg.c
+++ b/drivers/net/wireless/wcnss/wcnss_vreg.c
@@ -51,6 +51,8 @@
 #define WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP   BIT(5)
 #define WCNSS_PMU_CFG_IRIS_XO_CFG_STS      BIT(6) /* 1: in progress, 0: done */
 
+#define WCNSS_PMU_CFG_IRIS_RESET           BIT(7)
+#define WCNSS_PMU_CFG_IRIS_RESET_STS       BIT(8) /* 1: in progress, 0: done */
 #define WCNSS_PMU_CFG_IRIS_XO_READ         BIT(9)
 #define WCNSS_PMU_CFG_IRIS_XO_READ_STS     BIT(10)
 
@@ -258,6 +260,19 @@
 
 		writel_relaxed(reg, pmu_conf_reg);
 
+		/* Reset IRIS */
+		reg |= WCNSS_PMU_CFG_IRIS_RESET;
+		writel_relaxed(reg, pmu_conf_reg);
+
+		/* Wait for PMU_CFG.iris_reg_reset_sts */
+		while (readl_relaxed(pmu_conf_reg) &
+				WCNSS_PMU_CFG_IRIS_RESET_STS)
+			cpu_relax();
+
+		/* Reset iris reset bit */
+		reg &= ~WCNSS_PMU_CFG_IRIS_RESET;
+		writel_relaxed(reg, pmu_conf_reg);
+
 		/* Start IRIS XO configuration */
 		reg |= WCNSS_PMU_CFG_IRIS_XO_CFG;
 		writel_relaxed(reg, pmu_conf_reg);
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index e03d5be..d06ec85 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -129,10 +129,10 @@
 #define WCNSS_TSTBUS_CTRL_WRFIFO	(0x04 << 1)
 #define WCNSS_TSTBUS_CTRL_RDFIFO	(0x05 << 1)
 #define WCNSS_TSTBUS_CTRL_CTRL		(0x07 << 1)
-#define WCNSS_TSTBUS_CTRL_AXIM_CFG0	(0x00 << 6)
-#define WCNSS_TSTBUS_CTRL_AXIM_CFG1	(0x01 << 6)
-#define WCNSS_TSTBUS_CTRL_CTRL_CFG0	(0x00 << 12)
-#define WCNSS_TSTBUS_CTRL_CTRL_CFG1	(0x01 << 12)
+#define WCNSS_TSTBUS_CTRL_AXIM_CFG0	(0x00 << 8)
+#define WCNSS_TSTBUS_CTRL_AXIM_CFG1	(0x01 << 8)
+#define WCNSS_TSTBUS_CTRL_CTRL_CFG0	(0x00 << 28)
+#define WCNSS_TSTBUS_CTRL_CTRL_CFG1	(0x01 << 28)
 
 #define MSM_PRONTO_CCPU_BASE			0xfb205050
 #define CCU_PRONTO_INVALID_ADDR_OFFSET		0x08
@@ -203,6 +203,7 @@
 #define	WCNSS_VBATT_LEVEL_IND         (WCNSS_CTRL_MSG_START + 8)
 #define	WCNSS_BUILD_VER_REQ           (WCNSS_CTRL_MSG_START + 9)
 #define	WCNSS_BUILD_VER_RSP           (WCNSS_CTRL_MSG_START + 10)
+#define	WCNSS_PM_CONFIG_REQ           (WCNSS_CTRL_MSG_START + 11)
 
 /* max 20mhz channel count */
 #define WCNSS_MAX_CH_NUM			45
@@ -377,6 +378,7 @@
 	struct delayed_work wcnss_work;
 	struct delayed_work vbatt_work;
 	struct work_struct wcnssctrl_version_work;
+	struct work_struct wcnss_pm_config_work;
 	struct work_struct wcnssctrl_nvbin_dnld_work;
 	struct work_struct wcnssctrl_rx_work;
 	struct wake_lock wcnss_wake_lock;
@@ -945,6 +947,7 @@
 		pr_debug("wcnss: opening WCNSS SMD channel :%s",
 				WCNSS_CTRL_CHANNEL);
 		schedule_work(&penv->wcnssctrl_version_work);
+		schedule_work(&penv->wcnss_pm_config_work);
 
 		break;
 
@@ -1800,6 +1803,52 @@
 	return;
 }
 
+static void wcnss_send_pm_config(struct work_struct *worker)
+{
+	struct smd_msg_hdr *hdr;
+	unsigned char *msg = NULL;
+	int rc, prop_len;
+	u32 *payload;
+
+	if (!of_find_property(penv->pdev->dev.of_node,
+				"qcom,wcnss-pm", &prop_len))
+		return;
+
+	msg = kmalloc((sizeof(struct smd_msg_hdr) + prop_len), GFP_KERNEL);
+
+	if (NULL == msg) {
+		pr_err("wcnss: %s: failed to allocate memory\n", __func__);
+		return;
+	}
+
+	payload = (u32 *)(msg + sizeof(struct smd_msg_hdr));
+
+	prop_len /= sizeof(int);
+
+	rc = of_property_read_u32_array(penv->pdev->dev.of_node,
+			"qcom,wcnss-pm", payload, prop_len);
+	if (rc < 0) {
+		pr_err("wcnss: property read failed\n");
+		kfree(msg);
+		return;
+	}
+
+	pr_debug("%s:size=%d: <%d, %d, %d, %d, %d>\n", __func__,
+			prop_len, *payload, *(payload+1), *(payload+2),
+			*(payload+3), *(payload+4));
+
+	hdr = (struct smd_msg_hdr *)msg;
+	hdr->msg_type = WCNSS_PM_CONFIG_REQ;
+	hdr->msg_len = sizeof(struct smd_msg_hdr) + prop_len;
+
+	rc = wcnss_smd_tx(msg, hdr->msg_len);
+	if (rc < 0)
+		pr_err("wcnss: smd tx failed\n");
+
+	kfree(msg);
+	return;
+}
+
 static DECLARE_RWSEM(wcnss_pm_sem);
 
 static void wcnss_nvbin_dnld(void)
@@ -2234,6 +2283,7 @@
 	}
 	INIT_WORK(&penv->wcnssctrl_rx_work, wcnssctrl_rx_handler);
 	INIT_WORK(&penv->wcnssctrl_version_work, wcnss_send_version_req);
+	INIT_WORK(&penv->wcnss_pm_config_work, wcnss_send_pm_config);
 	INIT_WORK(&penv->wcnssctrl_nvbin_dnld_work, wcnss_nvbin_dnld_main);
 
 	wake_lock_init(&penv->wcnss_wake_lock, WAKE_LOCK_SUSPEND, "wcnss");
diff --git a/drivers/nfc/nfc-nci.c b/drivers/nfc/nfc-nci.c
index 8cd4bd1..018c3a6 100644
--- a/drivers/nfc/nfc-nci.c
+++ b/drivers/nfc/nfc-nci.c
@@ -1169,10 +1169,7 @@
 	r = of_property_read_string(np, "qcom,clk-src", &pdata->clk_src_name);
 
 	if (strcmp(pdata->clk_src_name, "GPCLK2")) {
-		r = of_property_read_u32(np, "qcom,clk-gpio",
-					&pdata->clkreq_gpio);
-		if (r)
-			return -EINVAL;
+		pdata->clkreq_gpio = of_get_named_gpio(np, "qcom,clk-gpio", 0);
 	}
 
 	if ((!strcmp(pdata->clk_src_name, "GPCLK")) ||
@@ -1186,6 +1183,7 @@
 			if ((!gpio_is_valid(pdata->irq_gpio_clk_req)))
 				return -EINVAL;
 	}
+
 	if (r)
 		return -EINVAL;
 	return r;
@@ -1196,7 +1194,6 @@
 {
 	int r = 0;
 	int irqn = 0;
-	struct device_node *node = client->dev.of_node;
 	struct qca199x_platform_data *platform_data;
 	struct qca199x_dev *qca199x_dev;
 
@@ -1348,9 +1345,6 @@
 	}
 
 	if (strcmp(platform_data->clk_src_name, "GPCLK2")) {
-		platform_data->clkreq_gpio =
-			of_get_named_gpio(node, "qcom,clk-gpio", 0);
-
 		if (gpio_is_valid(platform_data->clkreq_gpio)) {
 			r = gpio_request(platform_data->clkreq_gpio,
 				"nfc_clkreq_gpio");
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index 0035349..b4c88cd 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -301,17 +301,9 @@
 		 */
 		ngd_slim_runtime_resume(dev->dev);
 	}
-	if ((txn->mc == (SLIM_MSG_CLK_PAUSE_SEQ_FLG |
-			SLIM_MSG_MC_RECONFIGURE_NOW)) &&
-			dev->state <= MSM_CTRL_IDLE) {
-		msm_slim_disconnect_endp(dev, &dev->rx_msgq,
-					&dev->use_rx_msgqs);
-		msm_slim_disconnect_endp(dev, &dev->tx_msgq,
-					&dev->use_tx_msgqs);
-		return msm_slim_qmi_power_request(dev, false);
-	}
+
 	else if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)
-		return 0;
+		return -EPROTONOSUPPORT;
 
 	if (txn->mt == SLIM_MSG_MT_CORE &&
 		(txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
@@ -385,6 +377,7 @@
 	if (report_sat == false && dev->state != MSM_CTRL_AWAKE) {
 		SLIM_ERR(dev, "controller not ready\n");
 		mutex_unlock(&dev->tx_lock);
+		pm_runtime_set_suspended(dev->dev);
 		msm_slim_put_ctrl(dev);
 		return -EREMOTEIO;
 	}
@@ -1074,20 +1067,7 @@
 		ret = msm_slim_qmi_init(dev, false);
 		/* controller state should be in sync with framework state */
 		if (!ret) {
-			ret = slim_ctrl_clk_pause(&dev->ctrl, false,
-						SLIM_CLK_UNSPECIFIED);
 			complete(&dev->qmi.qmi_comp);
-			/*
-			 * Power-up won't be called if clock pause failed.
-			 * This can happen if ADSP SSR happened when audio
-			 * session is in progress. Framework will think that
-			 * clock pause failed so no need to wakeup controller.
-			 * Call power-up explicitly in that case, since slimbus
-			 * HW needs to be powered-on to be in sync with
-			 * framework state
-			 */
-			if (ret)
-				ngd_slim_power_up(dev, false);
 			if (!pm_runtime_enabled(dev->dev) ||
 					!pm_runtime_suspended(dev->dev))
 				ngd_slim_runtime_resume(dev->dev);
@@ -1105,10 +1085,25 @@
 	return ret;
 }
 
-static int ngd_clk_pause_wakeup(struct slim_controller *ctrl)
+static int ngd_slim_power_down(struct msm_slim_ctrl *dev)
 {
-	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
-	return ngd_slim_power_up(dev, false);
+	int i;
+	struct slim_controller *ctrl = &dev->ctrl;
+	mutex_lock(&ctrl->m_ctrl);
+	/* Pending response for a message */
+	for (i = 0; i < ctrl->last_tid; i++) {
+		if (ctrl->txnt[i]) {
+			SLIM_INFO(dev, "NGD down:txn-rsp for %d pending", i);
+			mutex_unlock(&ctrl->m_ctrl);
+			return -EBUSY;
+		}
+	}
+	mutex_unlock(&ctrl->m_ctrl);
+	msm_slim_disconnect_endp(dev, &dev->rx_msgq,
+				&dev->use_rx_msgqs);
+	msm_slim_disconnect_endp(dev, &dev->tx_msgq,
+				&dev->use_tx_msgqs);
+	return msm_slim_qmi_power_request(dev, false);
 }
 
 static int ngd_slim_rx_msgq_thread(void *data)
@@ -1365,7 +1360,7 @@
 	dev->ctrl.allocbw = ngd_allocbw;
 	dev->ctrl.xfer_msg = ngd_xfer_msg;
 	dev->ctrl.xfer_user_msg = ngd_user_msg;
-	dev->ctrl.wakeup =  ngd_clk_pause_wakeup;
+	dev->ctrl.wakeup = NULL;
 	dev->ctrl.alloc_port = msm_alloc_port;
 	dev->ctrl.dealloc_port = msm_dealloc_port;
 	dev->ctrl.port_xfer = msm_slim_port_xfer;
@@ -1528,9 +1523,9 @@
 	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
 	int ret = 0;
 	if (dev->state >= MSM_CTRL_ASLEEP)
-		ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0);
+		ret = ngd_slim_power_up(dev, false);
 	if (ret) {
-		/* Did SSR cause this clock pause failure */
+		/* Did SSR cause this power up failure */
 		if (dev->state != MSM_CTRL_DOWN)
 			dev->state = MSM_CTRL_ASLEEP;
 		else
@@ -1548,10 +1543,10 @@
 	struct platform_device *pdev = to_platform_device(device);
 	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
 	int ret = 0;
-	ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
+	ret = ngd_slim_power_down(dev);
 	if (ret) {
 		if (ret != -EBUSY)
-			SLIM_INFO(dev, "clk pause not entered:%d\n", ret);
+			SLIM_INFO(dev, "slim resource not idle:%d\n", ret);
 		dev->state = MSM_CTRL_AWAKE;
 	} else {
 		dev->state = MSM_CTRL_ASLEEP;
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index d670f8b..d8dc6f9 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -50,7 +50,7 @@
 
 static int msm_spi_pm_resume_runtime(struct device *device);
 static int msm_spi_pm_suspend_runtime(struct device *device);
-
+static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd);
 
 static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
 					struct platform_device *pdev)
@@ -135,6 +135,40 @@
 	}
 }
 
+static inline int msm_spi_request_cs_gpio(struct msm_spi *dd)
+{
+	int cs_num;
+	int rc;
+
+	cs_num = dd->cur_msg->spi->chip_select;
+	if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
+		(!(dd->cs_gpios[cs_num].valid)) &&
+		(dd->cs_gpios[cs_num].gpio_num >= 0)) {
+		rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
+				spi_cs_rsrcs[cs_num]);
+		if (rc) {
+			dev_err(dd->dev,
+				"gpio_request for pin %d failed,error %d\n",
+				dd->cs_gpios[cs_num].gpio_num, rc);
+			return rc;
+		}
+		dd->cs_gpios[cs_num].valid = 1;
+	}
+	return 0;
+}
+
+static inline void msm_spi_free_cs_gpio(struct msm_spi *dd)
+{
+	int cs_num;
+
+	cs_num = dd->cur_msg->spi->chip_select;
+	if (dd->cs_gpios[cs_num].valid) {
+		gpio_free(dd->cs_gpios[cs_num].gpio_num);
+		dd->cs_gpios[cs_num].valid = 0;
+	}
+}
+
+
 /**
  * msm_spi_clk_max_rate: finds the nearest lower rate for a clk
  * @clk the clock for which to find nearest lower rate
@@ -785,117 +819,190 @@
 	msm_spi_bam_pipe_flush(dd, SPI_BAM_PRODUCER_PIPE);
 }
 
+static int
+msm_spi_bam_process_rx(struct msm_spi *dd, u32 *bytes_to_send, u32 desc_cnt)
+{
+	int ret = 0;
+	u32 data_xfr_size = 0, rem_bc = 0;
+	u32 prod_flags = 0;
+
+	rem_bc = dd->cur_rx_transfer->len - dd->bam.curr_rx_bytes_recvd;
+	data_xfr_size = (rem_bc < *bytes_to_send) ? rem_bc : *bytes_to_send;
+
+	/*
+	 * set flags for last descriptor only
+	 */
+	if ((desc_cnt == 1)
+		|| (*bytes_to_send == data_xfr_size))
+		prod_flags = (dd->write_buf)
+			? 0 : (SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD);
+
+	/*
+	 * enqueue read buffer in BAM
+	 */
+	ret = sps_transfer_one(dd->bam.prod.handle,
+			dd->cur_rx_transfer->rx_dma
+				+ dd->bam.curr_rx_bytes_recvd,
+			data_xfr_size, dd, prod_flags);
+	if (ret < 0) {
+		dev_err(dd->dev,
+		"%s: Failed to queue producer BAM transfer",
+		__func__);
+		return ret;
+	}
+
+	dd->bam.curr_rx_bytes_recvd += data_xfr_size;
+	*bytes_to_send -= data_xfr_size;
+	dd->bam.bam_rx_len -= data_xfr_size;
+
+	if (!(dd->cur_rx_transfer->len - dd->bam.curr_rx_bytes_recvd)) {
+		struct spi_transfer *t = dd->cur_rx_transfer;
+		struct spi_transfer *next;
+		if (t->transfer_list.next != &dd->cur_msg->transfers) {
+			next = list_entry(t->transfer_list.next,
+					struct spi_transfer,
+					transfer_list);
+			dd->read_buf  = next->rx_buf;
+			dd->cur_rx_transfer = next;
+			dd->bam.curr_rx_bytes_recvd = 0;
+		}
+	}
+	return data_xfr_size;
+}
+
+static int
+msm_spi_bam_process_tx(struct msm_spi *dd, u32 *bytes_to_send, u32 desc_cnt)
+{
+	int ret = 0;
+	u32 data_xfr_size = 0, rem_bc = 0;
+	u32 cons_flags = 0;
+
+	rem_bc = dd->cur_tx_transfer->len - dd->bam.curr_tx_bytes_sent;
+	data_xfr_size = (rem_bc < *bytes_to_send) ? rem_bc : *bytes_to_send;
+
+	/*
+	 * set flags for last descriptor only
+	*/
+	if ((desc_cnt == 1)
+		|| (*bytes_to_send == data_xfr_size))
+		cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD;
+
+	/*
+	 * enqueue write buffer in BAM
+	 */
+	ret = sps_transfer_one(dd->bam.cons.handle,
+			dd->cur_tx_transfer->tx_dma
+				+ dd->bam.curr_tx_bytes_sent,
+			data_xfr_size, dd, cons_flags);
+	if (ret < 0) {
+		dev_err(dd->dev,
+		"%s: Failed to queue consumer BAM transfer",
+		__func__);
+		return ret;
+	}
+
+	dd->bam.curr_tx_bytes_sent	+= data_xfr_size;
+	*bytes_to_send	-= data_xfr_size;
+	dd->bam.bam_tx_len -= data_xfr_size;
+
+	if (!(dd->cur_tx_transfer->len - dd->bam.curr_tx_bytes_sent)) {
+		struct spi_transfer *t = dd->cur_tx_transfer;
+		struct spi_transfer *next;
+		if (t->transfer_list.next != &dd->cur_msg->transfers) {
+			next = list_entry(t->transfer_list.next,
+					struct spi_transfer,
+					transfer_list);
+			dd->write_buf = next->tx_buf;
+			dd->cur_tx_transfer = next;
+			dd->bam.curr_tx_bytes_sent = 0;
+		}
+	}
+	return data_xfr_size;
+}
+
+
 /**
  * msm_spi_bam_begin_transfer: transfer dd->tx_bytes_remaining bytes
  * using BAM.
  * @brief BAM can transfer SPI_MAX_TRFR_BTWN_RESETS byte at a single
  * transfer. Between transfer QUP must change to reset state. A loop is
- * issuing a single BAM transfer at a time. If another tsranfer is
- * required, it waits for the trasfer to finish, then moving to reset
- * state, and back to run state to issue the next transfer.
- * The function dose not wait for the last transfer to end, or if only
- * a single transfer is required, the function dose not wait for it to
- * end.
- * @timeout max time in jiffies to wait for a transfer to finish.
+ * issuing a single BAM transfer at a time.
  * @return zero on success
  */
 static int
-msm_spi_bam_begin_transfer(struct msm_spi *dd, u32 timeout, u8 bpw)
+msm_spi_bam_begin_transfer(struct msm_spi *dd)
 {
-	u32 bytes_to_send, bytes_sent, n_words_xfr, cons_flags, prod_flags;
-	int ret;
-	/*
-	 * QUP must move to reset mode every 64K-1 bytes of transfer
-	 * (counter is 16 bit)
-	 */
-	if (dd->tx_bytes_remaining > SPI_MAX_TRFR_BTWN_RESETS) {
-		/* assert chip select unconditionally */
-		u32 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
-		if (!(spi_ioc & SPI_IO_C_FORCE_CS))
-			writel_relaxed(spi_ioc | SPI_IO_C_FORCE_CS,
-				dd->base + SPI_IO_CONTROL);
+	u32 tx_bytes_to_send = 0, rx_bytes_to_recv = 0;
+	u32 n_words_xfr;
+	s32 ret = 0;
+	u32 prod_desc_cnt = SPI_BAM_MAX_DESC_NUM - 1;
+	u32 cons_desc_cnt = SPI_BAM_MAX_DESC_NUM - 1;
+	u32 byte_count = 0;
+
+
+	rx_bytes_to_recv = min_t(u32, dd->bam.bam_rx_len,
+				SPI_MAX_TRFR_BTWN_RESETS);
+	tx_bytes_to_send = min_t(u32, dd->bam.bam_tx_len,
+				SPI_MAX_TRFR_BTWN_RESETS);
+	n_words_xfr = DIV_ROUND_UP(rx_bytes_to_recv,
+				dd->bytes_per_word);
+
+	msm_spi_set_mx_counts(dd, n_words_xfr);
+	ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN);
+	if (ret < 0) {
+		dev_err(dd->dev,
+			"%s: Failed to set QUP state to run",
+			__func__);
+		goto xfr_err;
 	}
 
-	/* Following flags are required since we are waiting on all transfers */
-	cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD;
-	/*
-	 * on a balanced transaction, BAM will set the flags on the producer
-	 * pipe based on the flags set on the consumer pipe
-	 */
-	prod_flags = (dd->write_buf) ? 0 : cons_flags;
-
-	while (dd->tx_bytes_remaining > 0) {
-		bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
-		bytes_to_send = min_t(u32, dd->tx_bytes_remaining
-						, SPI_MAX_TRFR_BTWN_RESETS);
-		n_words_xfr = DIV_ROUND_UP(bytes_to_send
-						, dd->bytes_per_word);
-
-		msm_spi_set_mx_counts(dd, n_words_xfr);
-
-		ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN);
-		if (ret < 0) {
-			dev_err(dd->dev,
-				"%s: Failed to set QUP state to run",
-				__func__);
-			goto xfr_err;
+	while ((rx_bytes_to_recv + tx_bytes_to_send) &&
+		((cons_desc_cnt + prod_desc_cnt) > 0)) {
+		if (dd->read_buf && (prod_desc_cnt > 0)) {
+			ret = msm_spi_bam_process_rx(dd, &rx_bytes_to_recv,
+							prod_desc_cnt);
+			if (ret < 0)
+				goto xfr_err;
+			prod_desc_cnt--;
 		}
 
-		/* enqueue read buffer in BAM */
-		if (dd->read_buf) {
-			ret = sps_transfer_one(dd->bam.prod.handle,
-				dd->cur_transfer->rx_dma + bytes_sent,
-				bytes_to_send, dd, prod_flags);
-			if (ret < 0) {
-				dev_err(dd->dev,
-				"%s: Failed to queue producer BAM transfer",
-				__func__);
+		if (dd->write_buf && (cons_desc_cnt > 0)) {
+			ret = msm_spi_bam_process_tx(dd, &tx_bytes_to_send,
+							cons_desc_cnt);
+			if (ret < 0)
 				goto xfr_err;
-			}
+			cons_desc_cnt--;
 		}
-
-		/* enqueue write buffer in BAM */
-		if (dd->write_buf) {
-			ret = sps_transfer_one(dd->bam.cons.handle,
-				dd->cur_transfer->tx_dma + bytes_sent,
-				bytes_to_send, dd, cons_flags);
-			if (ret < 0) {
-				dev_err(dd->dev,
-				"%s: Failed to queue consumer BAM transfer",
-				__func__);
-				goto xfr_err;
-			}
-		}
-
-		dd->tx_bytes_remaining -= bytes_to_send;
-
-		/* move to reset state after SPI_MAX_TRFR_BTWN_RESETS */
-		if (dd->tx_bytes_remaining > 0) {
-			if (!wait_for_completion_timeout(
-				&dd->transfer_complete, timeout)) {
-				dev_err(dd->dev,
-					"%s: SPI transaction timeout",
-					__func__);
-				dd->cur_msg->status = -EIO;
-				ret = -EIO;
-				goto xfr_err;
-			}
-			ret = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
-			if (ret < 0) {
-				dev_err(dd->dev,
-					"%s: Failed to set QUP state to reset",
-					__func__);
-				goto xfr_err;
-			}
-			init_completion(&dd->transfer_complete);
-		}
+		byte_count += ret;
 	}
+
+	dd->tx_bytes_remaining -= min_t(u32, byte_count,
+						SPI_MAX_TRFR_BTWN_RESETS);
 	return 0;
-
 xfr_err:
 	return ret;
 }
 
+static int
+msm_spi_bam_next_transfer(struct msm_spi *dd)
+{
+	if (dd->mode != SPI_BAM_MODE)
+		return 0;
+
+	if (dd->tx_bytes_remaining > 0) {
+		init_completion(&dd->transfer_complete);
+		if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
+			return 0;
+		if ((msm_spi_bam_begin_transfer(dd)) < 0) {
+			dev_err(dd->dev, "%s: BAM transfer setup failed\n",
+				__func__);
+			return 0;
+		}
+		return 1;
+	}
+	return 0;
+}
+
 static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
 {
 	dmov_box *box;
@@ -1098,6 +1205,16 @@
 	return 0;
 }
 
+static int msm_spi_dma_send_next(struct msm_spi *dd)
+{
+	int ret = 0;
+	if (dd->mode == SPI_DMOV_MODE)
+		ret = msm_spi_dm_send_next(dd);
+	if (dd->mode == SPI_BAM_MODE)
+		ret = msm_spi_bam_next_transfer(dd);
+	return ret;
+}
+
 static inline void msm_spi_ack_transfer(struct msm_spi *dd)
 {
 	writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
@@ -1297,14 +1414,14 @@
 }
 
 /**
- * msm_spi_dma_map_buffers: prepares buffer for DMA transfer
+ * msm_spi_dmov_map_buffers: prepares buffer for DMA transfer
  * @return zero on success or negative error code
  *
  * calls dma_map_single() on the read/write buffers, effectively invalidating
  * their cash entries. for For WR-WR and WR-RD transfers, allocates temporary
  * buffer and copy the data to/from the client buffers
  */
-static int msm_spi_dma_map_buffers(struct msm_spi *dd)
+static int msm_spi_dmov_map_buffers(struct msm_spi *dd)
 {
 	struct device *dev;
 	struct spi_transfer *first_xfr;
@@ -1323,7 +1440,7 @@
 	 * For WR-WR and WR-RD transfers, we allocate our own temporary
 	 * buffer and copy the data to/from the client buffers.
 	 */
-	if (dd->multi_xfr) {
+	if (!dd->qup_ver && dd->multi_xfr) {
 		dd->temp_buf = kzalloc(dd->cur_msg_len,
 				       GFP_KERNEL | __GFP_DMA);
 		if (!dd->temp_buf)
@@ -1384,6 +1501,70 @@
 	return ret;
 }
 
+static int msm_spi_bam_map_buffers(struct msm_spi *dd)
+{
+	int ret = -EINVAL;
+	struct device *dev;
+	struct spi_transfer *first_xfr;
+	struct spi_transfer *nxt_xfr;
+	void *tx_buf, *rx_buf;
+	u32 tx_len, rx_len;
+	int num_xfrs_grped = dd->num_xfrs_grped;
+
+	dev = &dd->cur_msg->spi->dev;
+	first_xfr = dd->cur_transfer;
+
+	do {
+		tx_buf = (void *)first_xfr->tx_buf;
+		rx_buf = first_xfr->rx_buf;
+		tx_len = rx_len = first_xfr->len;
+		if (tx_buf != NULL) {
+			first_xfr->tx_dma = dma_map_single(dev, tx_buf,
+							tx_len, DMA_TO_DEVICE);
+			if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
+				ret = -ENOMEM;
+				goto error;
+			}
+		}
+
+		if (rx_buf != NULL) {
+			first_xfr->rx_dma = dma_map_single(dev, rx_buf,	rx_len,
+							DMA_FROM_DEVICE);
+			if (dma_mapping_error(NULL, first_xfr->rx_dma)) {
+				if (tx_buf != NULL)
+					dma_unmap_single(NULL,
+							first_xfr->tx_dma,
+							tx_len, DMA_TO_DEVICE);
+				ret = -ENOMEM;
+				goto error;
+			}
+		}
+
+		nxt_xfr = list_entry(first_xfr->transfer_list.next,
+				struct spi_transfer, transfer_list);
+
+		if (nxt_xfr == NULL)
+			break;
+		num_xfrs_grped--;
+		first_xfr = nxt_xfr;
+	} while (num_xfrs_grped > 0);
+
+	return 0;
+error:
+	msm_spi_dma_unmap_buffers(dd);
+	return ret;
+}
+
+static int msm_spi_dma_map_buffers(struct msm_spi *dd)
+{
+	int ret = 0;
+	if (dd->mode == SPI_DMOV_MODE)
+		ret = msm_spi_dmov_map_buffers(dd);
+	else if (dd->mode == SPI_BAM_MODE)
+		ret = msm_spi_bam_map_buffers(dd);
+	return ret;
+}
+
 static void msm_spi_dmov_unmap_buffers(struct msm_spi *dd)
 {
 	struct device *dev;
@@ -1455,21 +1636,39 @@
 static void msm_spi_bam_unmap_buffers(struct msm_spi *dd)
 {
 	struct device *dev;
+	int num_xfrs_grped = dd->num_xfrs_grped;
+	struct spi_transfer *first_xfr;
+	struct spi_transfer *nxt_xfr;
+	void *tx_buf, *rx_buf;
+	u32  tx_len, rx_len;
+
+	dev = &dd->cur_msg->spi->dev;
+	first_xfr = dd->cur_transfer;
 
 	 /* mapped by client */
 	if (dd->cur_msg->is_dma_mapped)
 		return;
 
-	dev = &dd->cur_msg->spi->dev;
-	if (dd->cur_transfer->rx_buf)
-		dma_unmap_single(dev, dd->cur_transfer->rx_dma,
-				dd->cur_transfer->len,
-				DMA_FROM_DEVICE);
+	do {
+		tx_buf = (void *)first_xfr->tx_buf;
+		rx_buf = first_xfr->rx_buf;
+		tx_len = rx_len = first_xfr->len;
+		if (tx_buf != NULL)
+			dma_unmap_single(dev, first_xfr->tx_dma,
+					tx_len, DMA_TO_DEVICE);
 
-	if (dd->cur_transfer->tx_buf)
-		dma_unmap_single(dev, dd->cur_transfer->tx_dma,
-				dd->cur_transfer->len,
-				DMA_TO_DEVICE);
+		if (rx_buf != NULL)
+			dma_unmap_single(dev, first_xfr->rx_dma,
+					rx_len, DMA_FROM_DEVICE);
+
+		nxt_xfr = list_entry(first_xfr->transfer_list.next,
+				struct spi_transfer, transfer_list);
+
+		if (nxt_xfr == NULL)
+			break;
+		num_xfrs_grped--;
+		first_xfr = nxt_xfr;
+	} while (num_xfrs_grped > 0);
 }
 
 static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd)
@@ -1506,7 +1705,8 @@
 	if (dd->cur_msg_len < 3*dd->input_block_size)
 		return false;
 
-	if (dd->multi_xfr && !dd->read_len && !dd->write_len)
+	if ((dd->qup_ver != SPI_QUP_VERSION_BFAM) &&
+		dd->multi_xfr && !dd->read_len && !dd->write_len)
 		return false;
 
 	if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
@@ -1688,11 +1888,17 @@
 
 	msm_spi_set_transfer_mode(dd, bpw, read_count);
 	msm_spi_set_mx_counts(dd, read_count);
-	if ((dd->mode == SPI_BAM_MODE) || (dd->mode == SPI_DMOV_MODE))
+	if (dd->mode == SPI_DMOV_MODE) {
 		if (msm_spi_dma_map_buffers(dd) < 0) {
 			pr_err("Mapping DMA buffers\n");
 			return;
+			}
+	} else if (dd->mode == SPI_BAM_MODE) {
+			if (msm_spi_dma_map_buffers(dd) < 0) {
+				pr_err("Mapping DMA buffers\n");
+				return;
 		}
+	}
 	msm_spi_set_qup_io_modes(dd);
 	msm_spi_set_spi_config(dd, bpw);
 	msm_spi_set_qup_config(dd, bpw);
@@ -1712,7 +1918,7 @@
 			goto transfer_end;
 		msm_spi_start_write(dd, read_count);
 	} else if (dd->mode == SPI_BAM_MODE) {
-		if ((msm_spi_bam_begin_transfer(dd, timeout, bpw)) < 0)
+		if ((msm_spi_bam_begin_transfer(dd)) < 0)
 			dev_err(dd->dev, "%s: BAM transfer setup failed\n",
 				__func__);
 	}
@@ -1749,9 +1955,10 @@
 					msm_spi_bam_flush(dd);
 				break;
 		}
-	} while (msm_spi_dm_send_next(dd));
+	} while (msm_spi_dma_send_next(dd));
 
-	msm_spi_udelay(dd->cur_transfer->delay_usecs);
+	msm_spi_udelay(dd->xfrs_delay_usec);
+
 transfer_end:
 	msm_spi_dma_unmap_buffers(dd);
 	dd->mode = SPI_MODE_NONE;
@@ -1803,26 +2010,6 @@
 		dd->multi_xfr = 1;
 }
 
-static inline int combine_transfers(struct msm_spi *dd)
-{
-	struct spi_transfer *t = dd->cur_transfer;
-	struct spi_transfer *nxt;
-	int xfrs_grped = 1;
-
-	dd->cur_msg_len = dd->cur_transfer->len;
-	while (t->transfer_list.next != &dd->cur_msg->transfers) {
-		nxt = list_entry(t->transfer_list.next,
-				 struct spi_transfer,
-				 transfer_list);
-		if (t->cs_change != nxt->cs_change)
-			return xfrs_grped;
-		dd->cur_msg_len += nxt->len;
-		xfrs_grped++;
-		t = nxt;
-	}
-	return xfrs_grped;
-}
-
 static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
 {
 	u32 spi_ioc;
@@ -1839,122 +2026,124 @@
 		writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
 }
 
+static inline int combine_transfers(struct msm_spi *dd)
+{
+	struct spi_transfer *t = dd->cur_transfer;
+	struct spi_transfer *nxt;
+	int xfrs_grped = 1;
+	dd->xfrs_delay_usec = 0;
+
+	dd->bam.bam_rx_len = dd->bam.bam_tx_len = 0;
+
+	dd->cur_msg_len = dd->cur_transfer->len;
+
+	if (dd->cur_transfer->tx_buf)
+		dd->bam.bam_tx_len += dd->cur_transfer->len;
+	if (dd->cur_transfer->rx_buf)
+		dd->bam.bam_rx_len += dd->cur_transfer->len;
+
+	while (t->transfer_list.next != &dd->cur_msg->transfers) {
+		nxt = list_entry(t->transfer_list.next,
+				 struct spi_transfer,
+				 transfer_list);
+		if (t->cs_change != nxt->cs_change)
+			return xfrs_grped;
+		if (t->delay_usecs) {
+			dd->xfrs_delay_usec = t->delay_usecs;
+			dev_info(dd->dev, "SPI slave requests delay per txn :%d usecs",
+					t->delay_usecs);
+			return xfrs_grped;
+		}
+		if (nxt->tx_buf)
+			dd->bam.bam_tx_len += nxt->len;
+		if (nxt->rx_buf)
+			dd->bam.bam_rx_len += nxt->len;
+
+		dd->cur_msg_len += nxt->len;
+		xfrs_grped++;
+		t = nxt;
+	}
+
+	if (1 == xfrs_grped)
+		dd->xfrs_delay_usec = dd->cur_transfer->delay_usecs;
+
+	return xfrs_grped;
+}
+
 static void msm_spi_process_message(struct msm_spi *dd)
 {
 	int xfrs_grped = 0;
-	int cs_num;
 	int rc;
-	bool xfer_delay = false;
-	struct spi_transfer *tr;
 
+	dd->num_xfrs_grped = 0;
+	dd->bam.curr_rx_bytes_recvd = dd->bam.curr_tx_bytes_sent = 0;
 	dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
-	cs_num = dd->cur_msg->spi->chip_select;
-	if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
-		(!(dd->cs_gpios[cs_num].valid)) &&
-		(dd->cs_gpios[cs_num].gpio_num >= 0)) {
-		rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
-				spi_cs_rsrcs[cs_num]);
-		if (rc) {
-			dev_err(dd->dev, "gpio_request for pin %d failed with "
-				"error %d\n", dd->cs_gpios[cs_num].gpio_num,
-				rc);
-			return;
-		}
-		dd->cs_gpios[cs_num].valid = 1;
-	}
+	rc = msm_spi_request_cs_gpio(dd);
+	if (rc)
+		return;
 
-	list_for_each_entry(tr,
-				&dd->cur_msg->transfers,
-				transfer_list) {
-		if (tr->delay_usecs) {
-			dev_info(dd->dev, "SPI slave requests delay per txn :%d",
-					tr->delay_usecs);
-			xfer_delay = true;
-			break;
-		}
-	}
-
-	/* Don't combine xfers if delay is needed after every xfer */
-	if (dd->qup_ver || xfer_delay) {
-		if (dd->qup_ver)
-			write_force_cs(dd, 0);
-		list_for_each_entry(dd->cur_transfer,
-				&dd->cur_msg->transfers,
-				transfer_list) {
-			struct spi_transfer *t = dd->cur_transfer;
-			struct spi_transfer *nxt;
-
-			if (t->transfer_list.next != &dd->cur_msg->transfers) {
-				nxt = list_entry(t->transfer_list.next,
+	dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
 						struct spi_transfer,
 						transfer_list);
 
-				if (dd->qup_ver &&
-					t->cs_change == nxt->cs_change)
-					write_force_cs(dd, 1);
-				else if (dd->qup_ver)
-					write_force_cs(dd, 0);
-			}
+	get_transfer_length(dd);
+	if (dd->qup_ver || (dd->multi_xfr && !dd->read_len && !dd->write_len)) {
 
-			dd->cur_msg_len = dd->cur_transfer->len;
-			msm_spi_process_transfer(dd);
-		}
-	} else {
-		dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
-						    struct spi_transfer,
-						    transfer_list);
-		get_transfer_length(dd);
-		if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
-			/*
-			 * Handling of multi-transfers.
-			 * FIFO mode is used by default
-			 */
-			list_for_each_entry(dd->cur_transfer,
-					    &dd->cur_msg->transfers,
-					    transfer_list) {
-				if (!dd->cur_transfer->len)
-					goto error;
-				if (xfrs_grped) {
-					xfrs_grped--;
-					continue;
-				} else {
-					dd->read_len = dd->write_len = 0;
-					xfrs_grped = combine_transfers(dd);
-				}
+		if (dd->qup_ver)
+			write_force_cs(dd, 0);
 
-				dd->cur_tx_transfer = dd->cur_transfer;
-				dd->cur_rx_transfer = dd->cur_transfer;
-				msm_spi_process_transfer(dd);
+		/*
+		 * Handling of multi-transfers.
+		 * FIFO mode is used by default
+		 */
+		list_for_each_entry(dd->cur_transfer,
+					&dd->cur_msg->transfers,
+					transfer_list) {
+			if (!dd->cur_transfer->len)
+				goto error;
+			if (xfrs_grped) {
 				xfrs_grped--;
-			}
-		} else {
-			/* Handling of a single transfer or
-			 * WR-WR or WR-RD transfers
-			 */
-			if ((!dd->cur_msg->is_dma_mapped) &&
-			    (msm_spi_use_dma(dd, dd->cur_transfer,
-					dd->cur_transfer->bits_per_word))) {
-				/* Mapping of DMA buffers */
-				int ret = msm_spi_dma_map_buffers(dd);
-				if (ret < 0) {
-					dd->cur_msg->status = ret;
-					goto error;
-				}
+				continue;
+			} else {
+				dd->read_len = dd->write_len = 0;
+				xfrs_grped = combine_transfers(dd);
+				dd->num_xfrs_grped = xfrs_grped;
+				if (dd->qup_ver)
+					write_force_cs(dd, 1);
 			}
 
 			dd->cur_tx_transfer = dd->cur_transfer;
 			dd->cur_rx_transfer = dd->cur_transfer;
 			msm_spi_process_transfer(dd);
+			if (dd->qup_ver && !dd->xfrs_delay_usec)
+				write_force_cs(dd, 0);
+			xfrs_grped--;
 		}
+	} else {
+		/* Handling of a single transfer or
+		 * WR-WR or WR-RD transfers
+		 */
+		if ((!dd->cur_msg->is_dma_mapped) &&
+			(msm_spi_use_dma(dd, dd->cur_transfer,
+				dd->cur_transfer->bits_per_word))) {
+			/* Mapping of DMA buffers */
+			int ret = msm_spi_dma_map_buffers(dd);
+			if (ret < 0) {
+				dd->cur_msg->status = ret;
+				goto error;
+			}
+		}
+
+		dd->cur_tx_transfer = dd->cur_transfer;
+		dd->cur_rx_transfer = dd->cur_transfer;
+		dd->num_xfrs_grped = 1;
+		msm_spi_process_transfer(dd);
 	}
 
 	return;
 
 error:
-	if (dd->cs_gpios[cs_num].valid) {
-		gpio_free(dd->cs_gpios[cs_num].gpio_num);
-		dd->cs_gpios[cs_num].valid = 0;
-	}
+	msm_spi_free_cs_gpio(dd);
 }
 
 /**
diff --git a/drivers/spi/spi_qsd.h b/drivers/spi/spi_qsd.h
index d538076..a0c399b 100644
--- a/drivers/spi/spi_qsd.h
+++ b/drivers/spi/spi_qsd.h
@@ -291,6 +291,10 @@
 	struct msm_spi_bam_pipe  prod;
 	struct msm_spi_bam_pipe  cons;
 	bool                     deregister_required;
+	u32			 curr_rx_bytes_recvd;
+	u32			 curr_tx_bytes_sent;
+	u32			 bam_rx_len;
+	u32			 bam_tx_len;
 };
 
 struct msm_spi {
@@ -388,6 +392,8 @@
 	struct spi_cs_gpio       cs_gpios[ARRAY_SIZE(spi_cs_rsrcs)];
 	enum msm_spi_qup_version qup_ver;
 	int			 max_trfr_len;
+	int			 num_xfrs_grped;
+	u16			 xfrs_delay_usec;
 };
 
 /* Forward declaration */
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index 6e6457c..81b683d 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -30,6 +30,7 @@
 #include <linux/of.h>
 #include <linux/sysfs.h>
 #include <linux/types.h>
+#include <linux/io.h>
 #include <linux/android_alarm.h>
 #include <linux/thermal.h>
 #include <mach/rpm-regulator.h>
@@ -40,6 +41,9 @@
 #define MAX_CURRENT_UA 1000000
 #define MAX_RAILS 5
 #define MAX_THRESHOLD 2
+#define MONITOR_ALL_TSENS -1
+#define BYTES_PER_FUSE_ROW  8
+#define MAX_EFUSE_VALUE  16
 
 static struct msm_thermal_data msm_thermal_info;
 static struct delayed_work check_temp_work;
@@ -53,10 +57,13 @@
 static struct work_struct timer_work;
 static struct task_struct *hotplug_task;
 static struct task_struct *freq_mitigation_task;
+static struct task_struct *thermal_monitor_task;
 static struct completion hotplug_notify_complete;
 static struct completion freq_mitigation_complete;
+static struct completion thermal_monitor_complete;
 
 static int enabled;
+static int polling_enabled;
 static int rails_cnt;
 static int psm_rails_cnt;
 static int ocr_rail_cnt;
@@ -78,11 +85,17 @@
 static bool ocr_enabled;
 static bool ocr_nodes_called;
 static bool ocr_probed;
+static bool interrupt_mode_enable;
+static bool msm_thermal_probed;
 static int *tsens_id_map;
 static DEFINE_MUTEX(vdd_rstr_mutex);
 static DEFINE_MUTEX(psm_mutex);
 static DEFINE_MUTEX(ocr_mutex);
 static uint32_t min_freq_limit;
+static uint32_t default_cpu_temp_limit;
+static bool default_temp_limit_enabled;
+static bool default_temp_limit_probed;
+static bool default_temp_limit_nodes_called;
 
 enum thermal_threshold {
 	HOTPLUG_THRESHOLD_HIGH,
@@ -115,6 +128,21 @@
 	bool freq_thresh_clear;
 };
 
+struct threshold_info;
+struct therm_threshold {
+	int32_t sensor_id;
+	struct sensor_threshold threshold[MAX_THRESHOLD];
+	int32_t trip_triggered;
+	void (*notify)(struct therm_threshold *);
+	struct threshold_info *parent;
+};
+
+struct threshold_info {
+	uint32_t thresh_ct;
+	bool thresh_triggered;
+	struct therm_threshold *thresh_list;
+};
+
 struct rail {
 	const char *name;
 	uint32_t freq_req;
@@ -138,16 +166,31 @@
 	struct attribute_group attr_gp;
 };
 
+enum msm_thresh_list {
+	MSM_VDD_RESTRICTION,
+	MSM_LIST_MAX_NR,
+};
+
 static struct psm_rail *psm_rails;
 static struct psm_rail *ocr_rails;
 static struct rail *rails;
 static struct cpu_info cpus[NR_CPUS];
+static struct threshold_info *thresh;
 
 struct vdd_rstr_enable {
 	struct kobj_attribute ko_attr;
 	uint32_t enabled;
 };
 
+enum efuse_data {
+	EFUSE_ADDRESS = 0,
+	EFUSE_SIZE,
+	EFUSE_ROW,
+	EFUSE_START_BIT,
+	EFUSE_BIT_MASK,
+	EFUSE_DATA_MAX,
+};
+
 /* For SMPS only*/
 enum PMIC_SW_MODE {
 	PMIC_AUTO_MODE  = RPM_REGULATOR_MODE_AUTO,
@@ -367,6 +410,12 @@
 	return fail_cnt ? (-EFAULT) : ret;
 }
 
+static ssize_t default_cpu_temp_limit_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", default_cpu_temp_limit);
+}
+
 static int vdd_rstr_en_show(
 	struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 {
@@ -943,7 +992,9 @@
 		return -EINVAL;
 
 	while (!kthread_should_stop()) {
-		wait_for_completion(&hotplug_notify_complete);
+		while (wait_for_completion_interruptible(
+			&hotplug_notify_complete) != 0)
+			;
 		INIT_COMPLETION(hotplug_notify_complete);
 		mask = 0;
 
@@ -1195,7 +1246,7 @@
 	do_freq_control(temp);
 
 reschedule:
-	if (enabled)
+	if (polling_enabled)
 		schedule_delayed_work(&check_temp_work,
 				msecs_to_jiffies(msm_thermal_info.poll_ms));
 }
@@ -1377,7 +1428,9 @@
 	uint32_t cpu = 0, max_freq_req = 0, min_freq_req = 0;
 
 	while (!kthread_should_stop()) {
-		wait_for_completion(&freq_mitigation_complete);
+		while (wait_for_completion_interruptible(
+			&freq_mitigation_complete) != 0)
+			;
 		INIT_COMPLETION(freq_mitigation_complete);
 
 		get_online_cpus();
@@ -1532,6 +1585,217 @@
 	return ret;
 }
 
+int therm_set_threshold(struct threshold_info *thresh_inp)
+{
+	int ret = 0, i = 0, err = 0;
+	struct therm_threshold *thresh_ptr;
+
+	if (!thresh_inp) {
+		pr_err("%s: %s: Invalid input\n",
+			KBUILD_MODNAME, __func__);
+		ret = -EINVAL;
+		goto therm_set_exit;
+	}
+
+	thresh_inp->thresh_triggered = false;
+	for (i = 0; i < thresh_inp->thresh_ct; i++) {
+		thresh_ptr = &thresh_inp->thresh_list[i];
+		thresh_ptr->trip_triggered = -1;
+		err = set_threshold(thresh_ptr->sensor_id,
+			thresh_ptr->threshold);
+		if (err) {
+			ret = err;
+			err = 0;
+		}
+	}
+
+therm_set_exit:
+	return ret;
+}
+
+static void vdd_restriction_notify(struct therm_threshold *trig_thresh)
+{
+	int ret = 0;
+	static uint32_t vdd_sens_status;
+
+	if (!vdd_rstr_enabled)
+		return;
+	if (!trig_thresh) {
+		pr_err("%s:%s Invalid input\n", KBUILD_MODNAME, __func__);
+		return;
+	}
+	if (trig_thresh->trip_triggered < 0)
+		goto set_and_exit;
+
+	mutex_lock(&vdd_rstr_mutex);
+	pr_debug("%s: sensor%d reached %d thresh for Vdd restriction\n",
+		KBUILD_MODNAME, trig_thresh->sensor_id,
+		trig_thresh->trip_triggered);
+	switch (trig_thresh->trip_triggered) {
+	case THERMAL_TRIP_CONFIGURABLE_HI:
+		if (vdd_sens_status & BIT(trig_thresh->sensor_id))
+			vdd_sens_status ^= BIT(trig_thresh->sensor_id);
+		break;
+	case THERMAL_TRIP_CONFIGURABLE_LOW:
+		vdd_sens_status |= BIT(trig_thresh->sensor_id);
+		break;
+	default:
+		pr_err("%s:%s: Unsupported trip type\n",
+			KBUILD_MODNAME, __func__);
+		goto unlock_and_exit;
+		break;
+	}
+
+	ret = vdd_restriction_apply_all((vdd_sens_status) ? 1 : 0);
+	if (ret) {
+		pr_err("%s vdd rstr votlage for all failed\n",
+			(vdd_sens_status) ?
+			"Enable" : "Disable");
+			goto unlock_and_exit;
+	}
+
+unlock_and_exit:
+	mutex_unlock(&vdd_rstr_mutex);
+set_and_exit:
+	set_threshold(trig_thresh->sensor_id, trig_thresh->threshold);
+	return;
+}
+
+static __ref int do_thermal_monitor(void *data)
+{
+	int ret = 0, i, j;
+	struct therm_threshold *sensor_list;
+
+	while (!kthread_should_stop()) {
+		while (wait_for_completion_interruptible(
+			&thermal_monitor_complete) != 0)
+			;
+		INIT_COMPLETION(thermal_monitor_complete);
+
+		for (i = 0; i < MSM_LIST_MAX_NR; i++) {
+			if (!thresh[i].thresh_triggered)
+				continue;
+			thresh[i].thresh_triggered = false;
+			for (j = 0; j < thresh[i].thresh_ct; j++) {
+				sensor_list = &thresh[i].thresh_list[j];
+				if (sensor_list->trip_triggered < 0)
+					continue;
+				sensor_list->notify(sensor_list);
+				sensor_list->trip_triggered = -1;
+			}
+		}
+	}
+	return ret;
+}
+
+static void thermal_monitor_init(void)
+{
+	if (thermal_monitor_task)
+		return;
+
+	init_completion(&thermal_monitor_complete);
+	thermal_monitor_task = kthread_run(do_thermal_monitor, NULL,
+		"msm_thermal:therm_monitor");
+	if (IS_ERR(thermal_monitor_task)) {
+		pr_err("%s: Failed to create thermal monitor thread\n",
+				KBUILD_MODNAME);
+		goto init_exit;
+	}
+
+	if (vdd_rstr_enabled)
+		therm_set_threshold(&thresh[MSM_VDD_RESTRICTION]);
+
+init_exit:
+	return;
+}
+
+static int msm_thermal_notify(enum thermal_trip_type type, int temp, void *data)
+{
+	struct therm_threshold *thresh_data = (struct therm_threshold *)data;
+
+	if (thermal_monitor_task) {
+		thresh_data->trip_triggered = type;
+		thresh_data->parent->thresh_triggered = true;
+		complete(&thermal_monitor_complete);
+	} else {
+		pr_err("%s: Thermal monitor task is not initialized\n",
+			KBUILD_MODNAME);
+	}
+	return 0;
+}
+
+static int init_threshold(enum msm_thresh_list index,
+	int sensor_id, int32_t hi_temp, int32_t low_temp,
+	void (*callback)(struct therm_threshold *))
+{
+	int ret = 0, i;
+	struct therm_threshold *thresh_ptr;
+
+	if (!callback || index >= MSM_LIST_MAX_NR || index < 0
+		|| sensor_id == -ENODEV) {
+		pr_err("%s: Invalid input to init_threshold\n",
+			KBUILD_MODNAME);
+		ret = -EINVAL;
+		goto init_thresh_exit;
+	}
+	if (thresh[index].thresh_list) {
+		pr_err("%s: threshold already initialized\n",
+			KBUILD_MODNAME);
+		ret = -EEXIST;
+		goto init_thresh_exit;
+	}
+
+	thresh[index].thresh_ct = (sensor_id == MONITOR_ALL_TSENS) ?
+						max_tsens_num : 1;
+	thresh[index].thresh_triggered = false;
+	thresh[index].thresh_list = kzalloc(sizeof(struct therm_threshold) *
+					thresh[index].thresh_ct, GFP_KERNEL);
+	if (!thresh[index].thresh_list) {
+		pr_err("%s: kzalloc failed\n", KBUILD_MODNAME);
+		ret = -ENOMEM;
+		goto init_thresh_exit;
+	}
+
+	thresh_ptr = thresh[index].thresh_list;
+	if (sensor_id == MONITOR_ALL_TSENS) {
+		for (i = 0; i < max_tsens_num; i++) {
+			thresh_ptr[i].sensor_id = tsens_id_map[i];
+			thresh_ptr[i].notify = callback;
+			thresh_ptr[i].trip_triggered = -1;
+			thresh_ptr[i].parent = &thresh[index];
+			thresh_ptr[i].threshold[0].temp = hi_temp;
+			thresh_ptr[i].threshold[0].trip =
+				THERMAL_TRIP_CONFIGURABLE_HI;
+			thresh_ptr[i].threshold[1].temp = low_temp;
+			thresh_ptr[i].threshold[1].trip =
+				THERMAL_TRIP_CONFIGURABLE_LOW;
+			thresh_ptr[i].threshold[0].notify =
+			thresh_ptr[i].threshold[1].notify = msm_thermal_notify;
+			thresh_ptr[i].threshold[0].data =
+			thresh_ptr[i].threshold[1].data =
+				(void *)&thresh_ptr[i];
+		}
+	} else {
+		thresh_ptr->sensor_id = sensor_id;
+		thresh_ptr->notify = callback;
+		thresh_ptr->trip_triggered = -1;
+		thresh_ptr->parent = &thresh[index];
+		thresh_ptr->threshold[0].temp = hi_temp;
+		thresh_ptr->threshold[0].trip =
+			THERMAL_TRIP_CONFIGURABLE_HI;
+		thresh_ptr->threshold[1].temp = low_temp;
+		thresh_ptr->threshold[1].trip =
+			THERMAL_TRIP_CONFIGURABLE_LOW;
+		thresh_ptr->threshold[0].notify =
+		thresh_ptr->threshold[1].notify = msm_thermal_notify;
+		thresh_ptr->threshold[0].data =
+		thresh_ptr->threshold[1].data = (void *)thresh_ptr;
+	}
+
+init_thresh_exit:
+	return ret;
+}
+
 /*
  * We will reset the cpu frequencies limits here. The core online/offline
  * status will be carried over to the process stopping the msm_thermal, as
@@ -1542,8 +1806,7 @@
 	uint32_t cpu = 0;
 
 	/* make sure check_temp is no longer running */
-	cancel_delayed_work(&check_temp_work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&check_temp_work);
 
 	get_online_cpus();
 	for_each_possible_cpu(cpu) {
@@ -1557,16 +1820,30 @@
 	put_online_cpus();
 }
 
+static void interrupt_mode_init(void)
+{
+	if (!msm_thermal_probed) {
+		interrupt_mode_enable = true;
+		return;
+	}
+	if (polling_enabled) {
+		pr_info("%s: Interrupt mode init\n", KBUILD_MODNAME);
+		polling_enabled = 0;
+		disable_msm_thermal();
+		hotplug_init();
+		freq_mitigation_init();
+		thermal_monitor_init();
+	}
+}
+
 static int __ref set_enabled(const char *val, const struct kernel_param *kp)
 {
 	int ret = 0;
 
 	ret = param_set_bool(val, kp);
-	if (!enabled) {
-		disable_msm_thermal();
-		hotplug_init();
-		freq_mitigation_init();
-	} else
+	if (!enabled)
+		interrupt_mode_init();
+	else
 		pr_info("%s: no action for enabled = %d\n",
 			KBUILD_MODNAME, enabled);
 
@@ -1642,7 +1919,7 @@
 		goto done_cc;
 	}
 
-	if (enabled) {
+	if (polling_enabled) {
 		pr_err("%s: Ignoring request; polling thread is enabled.\n",
 				KBUILD_MODNAME);
 		goto done_cc;
@@ -1795,7 +2072,34 @@
 	return ret;
 }
 
-int __devinit msm_thermal_init(struct msm_thermal_data *pdata)
+int msm_thermal_pre_init(void)
+{
+	int ret = 0;
+
+	tsens_get_max_sensor_num(&max_tsens_num);
+	if (create_sensor_id_map()) {
+		ret = -EINVAL;
+		goto pre_init_exit;
+	}
+
+	if (!thresh) {
+		thresh = kzalloc(
+				sizeof(struct threshold_info) * MSM_LIST_MAX_NR,
+				GFP_KERNEL);
+		if (!thresh) {
+			pr_err("%s:%s: kzalloc failed\n",
+				KBUILD_MODNAME, __func__);
+			ret = -ENOMEM;
+			goto pre_init_exit;
+		}
+		memset(thresh, 0, sizeof(struct threshold_info) *
+			MSM_LIST_MAX_NR);
+	}
+pre_init_exit:
+	return ret;
+}
+
+int msm_thermal_init(struct msm_thermal_data *pdata)
 {
 	int ret = 0;
 	uint32_t cpu;
@@ -1813,15 +2117,13 @@
 		cpus[cpu].freq_thresh_clear = false;
 	}
 	BUG_ON(!pdata);
-	tsens_get_max_sensor_num(&max_tsens_num);
 	memcpy(&msm_thermal_info, pdata, sizeof(struct msm_thermal_data));
 
-	if (create_sensor_id_map())
-		return -EINVAL;
 	if (check_sensor_id(msm_thermal_info.sensor_id))
 		return -EINVAL;
 
 	enabled = 1;
+	polling_enabled = 1;
 	ret = cpufreq_register_notifier(&msm_thermal_cpufreq_notifier,
 			CPUFREQ_POLICY_NOTIFIER);
 	if (ret)
@@ -1959,6 +2261,38 @@
 	return ret;
 }
 
+static struct kobj_attribute default_cpu_temp_limit_attr =
+		__ATTR_RO(default_cpu_temp_limit);
+
+static int msm_thermal_add_default_temp_limit_nodes(void)
+{
+	struct kobject *module_kobj = NULL;
+	int ret = 0;
+
+	if (!default_temp_limit_probed) {
+		default_temp_limit_nodes_called = true;
+		return ret;
+	}
+	if (!default_temp_limit_enabled)
+		return ret;
+
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("cannot find kobject\n");
+		return -ENOENT;
+	}
+
+	sysfs_attr_init(&default_cpu_temp_limit_attr.attr);
+	ret = sysfs_create_file(module_kobj, &default_cpu_temp_limit_attr.attr);
+	if (ret) {
+		pr_err(
+		"cannot create default_cpu_temp_limit attribute. err:%d\n",
+		ret);
+		return ret;
+	}
+	return ret;
+}
+
 static int msm_thermal_add_vdd_rstr_nodes(void)
 {
 	struct kobject *module_kobj = NULL;
@@ -2271,6 +2605,11 @@
 				__func__);
 			goto read_node_fail;
 		}
+		ret = init_threshold(MSM_VDD_RESTRICTION, MONITOR_ALL_TSENS,
+			data->vdd_rstr_temp_hyst_degC, data->vdd_rstr_temp_degC,
+			vdd_restriction_notify);
+		if (ret)
+			goto read_node_fail;
 		vdd_rstr_enabled = true;
 	}
 read_node_fail:
@@ -2287,6 +2626,169 @@
 	return ret;
 }
 
+static int get_efuse_temp_map(struct device_node *node,
+				int *efuse_values,
+				int *efuse_temp)
+{
+	uint32_t i, j, efuse_arr_cnt = 0;
+	int ret = 0, efuse_map_cnt = 0;
+	uint32_t data[2 * MAX_EFUSE_VALUE];
+
+	char *key = "qcom,efuse-temperature-map";
+	if (!of_get_property(node, key, &efuse_map_cnt)
+		|| efuse_map_cnt <= 0) {
+		pr_debug("Property %s not defined.\n", key);
+		return -ENODEV;
+	}
+
+	if (efuse_map_cnt % (sizeof(__be32) * 2)) {
+		pr_err("Invalid number(%d) of entry for %s\n",
+				efuse_map_cnt, key);
+		return -EINVAL;
+	}
+
+	efuse_arr_cnt = efuse_map_cnt / sizeof(__be32);
+
+	ret = of_property_read_u32_array(node, key, data, efuse_arr_cnt);
+	if (ret)
+		return -EINVAL;
+
+	efuse_map_cnt /= (sizeof(__be32) * 2);
+
+	j = 0;
+	for (i = 0; i < efuse_map_cnt; i++) {
+		efuse_values[i] = data[j++];
+		efuse_temp[i] = data[j++];
+	}
+
+	return efuse_map_cnt;
+}
+
+static int probe_thermal_efuse_read(struct device_node *node,
+			struct msm_thermal_data *data,
+			struct platform_device *pdev)
+{
+	u64 efuse_bits;
+	int ret = 0;
+	int i = 0;
+	int efuse_map_cnt = 0;
+	int efuse_data_cnt = 0;
+	char *key = NULL;
+	void __iomem *efuse_base = NULL;
+	uint32_t efuse_data[EFUSE_DATA_MAX] = {0};
+	uint32_t efuse_values[MAX_EFUSE_VALUE] = {0};
+	uint32_t efuse_temp[MAX_EFUSE_VALUE] = {0};
+	uint32_t default_temp = 0;
+	uint8_t thermal_efuse_data = 0;
+
+	if (default_temp_limit_probed)
+		goto read_efuse_exit;
+
+	key = "qcom,default-temp";
+	if (of_property_read_u32(node, key, &default_temp))
+		default_temp = 0;
+
+	default_cpu_temp_limit = default_temp;
+
+	key = "qcom,efuse-data";
+	if (!of_get_property(node, key, &efuse_data_cnt) ||
+		efuse_data_cnt <= 0) {
+		ret = -ENODEV;
+		goto read_efuse_fail;
+	}
+	efuse_data_cnt /= sizeof(__be32);
+
+	if (efuse_data_cnt != EFUSE_DATA_MAX) {
+		pr_err("Invalid number of efuse data. data cnt %d\n",
+			efuse_data_cnt);
+		ret = -EINVAL;
+		goto read_efuse_fail;
+	}
+
+	ret = of_property_read_u32_array(node, key, efuse_data,
+						efuse_data_cnt);
+	if (ret)
+		goto read_efuse_fail;
+
+	if (efuse_data[EFUSE_ADDRESS] == 0 ||
+		efuse_data[EFUSE_SIZE] == 0 ||
+		efuse_data[EFUSE_BIT_MASK] == 0) {
+		pr_err("Invalid efuse data: address:%x len:%d bitmask%x\n",
+			efuse_data[EFUSE_ADDRESS], efuse_data[EFUSE_SIZE],
+			efuse_data[EFUSE_BIT_MASK]);
+		ret = -EINVAL;
+		goto read_efuse_fail;
+	}
+
+	efuse_map_cnt = get_efuse_temp_map(node, efuse_values,
+						efuse_temp);
+	if (efuse_map_cnt <= 0 ||
+		efuse_map_cnt > (efuse_data[EFUSE_BIT_MASK] + 1)) {
+		pr_err("Invalid efuse-temperature-map. cnt%d\n",
+			efuse_map_cnt);
+		ret = -EINVAL;
+		goto read_efuse_fail;
+	}
+
+	efuse_base = ioremap(efuse_data[EFUSE_ADDRESS], efuse_data[EFUSE_SIZE]);
+	if (!efuse_base) {
+		pr_err("Unable to map efuse_addr:%x with size%d\n",
+			efuse_data[EFUSE_ADDRESS],
+			efuse_data[EFUSE_SIZE]);
+		ret = -EINVAL;
+		goto read_efuse_fail;
+	}
+
+	efuse_bits = readll_relaxed(efuse_base
+		+ efuse_data[EFUSE_ROW] * BYTES_PER_FUSE_ROW);
+
+	thermal_efuse_data = (efuse_bits >> efuse_data[EFUSE_START_BIT]) &
+						efuse_data[EFUSE_BIT_MASK];
+
+	/* Get cpu limit temp from efuse truth table */
+	for (; i < efuse_map_cnt; i++) {
+		if (efuse_values[i] == thermal_efuse_data) {
+			default_cpu_temp_limit = efuse_temp[i];
+			break;
+		}
+	}
+	if (i >= efuse_map_cnt) {
+		if (!default_temp) {
+			pr_err("No matching efuse value. value:%d\n",
+				thermal_efuse_data);
+			ret = -EINVAL;
+			goto read_efuse_fail;
+		}
+	}
+
+	pr_debug(
+	"Efuse address:0x%x [row:%d] = 0x%llx @%d:mask:0x%x = 0x%x temp:%d\n",
+		efuse_data[EFUSE_ADDRESS], efuse_data[EFUSE_ROW], efuse_bits,
+		efuse_data[EFUSE_START_BIT], efuse_data[EFUSE_BIT_MASK],
+		thermal_efuse_data, default_cpu_temp_limit);
+
+	default_temp_limit_enabled = true;
+
+read_efuse_fail:
+	if (efuse_base)
+		iounmap(efuse_base);
+	default_temp_limit_probed = true;
+	if (ret) {
+		if (!default_temp) {
+			dev_info(&pdev->dev,
+			"%s:Failed reading node=%s, key=%s. KTM continues\n",
+				__func__, node->full_name, key);
+		} else {
+			default_temp_limit_enabled = true;
+			pr_debug("Default cpu temp limit is %d\n",
+					default_cpu_temp_limit);
+			ret = 0;
+		}
+	}
+read_efuse_exit:
+	return ret;
+}
+
 static int probe_ocr(struct device_node *node, struct msm_thermal_data *data,
 		struct platform_device *pdev)
 {
@@ -2547,6 +3049,9 @@
 	struct msm_thermal_data data;
 
 	memset(&data, 0, sizeof(struct msm_thermal_data));
+	ret = msm_thermal_pre_init();
+	if (ret)
+		goto fail;
 
 	key = "qcom,sensor-id";
 	ret = of_property_read_u32(node, key, &data.sensor_id);
@@ -2596,6 +3101,7 @@
 	ret = probe_ocr(node, &data, pdev);
 	if (ret == -EPROBE_DEFER)
 		goto fail;
+	probe_thermal_efuse_read(node, &data, pdev);
 
 	/*
 	 * In case sysfs add nodes get called before probe function.
@@ -2613,8 +3119,18 @@
 		msm_thermal_add_ocr_nodes();
 		ocr_nodes_called = false;
 	}
+	if (default_temp_limit_nodes_called) {
+		msm_thermal_add_default_temp_limit_nodes();
+		default_temp_limit_nodes_called = false;
+	}
 	msm_thermal_ioctl_init();
 	ret = msm_thermal_init(&data);
+	msm_thermal_probed = true;
+
+	if (interrupt_mode_enable) {
+		interrupt_mode_init();
+		interrupt_mode_enable = false;
+	}
 
 	return ret;
 fail:
@@ -2628,6 +3144,12 @@
 static int msm_thermal_dev_exit(struct platform_device *inp_dev)
 {
 	msm_thermal_ioctl_cleanup();
+	if (thresh) {
+		if (vdd_rstr_enabled)
+			kfree(thresh[MSM_VDD_RESTRICTION].thresh_list);
+		kfree(thresh);
+		thresh = NULL;
+	}
 	return 0;
 }
 
@@ -2658,11 +3180,13 @@
 	msm_thermal_add_psm_nodes();
 	msm_thermal_add_vdd_rstr_nodes();
 	msm_thermal_add_ocr_nodes();
+	msm_thermal_add_default_temp_limit_nodes();
 	alarm_init(&thermal_rtc, ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
 			thermal_rtc_callback);
 	INIT_WORK(&timer_work, timer_work_fn);
 	msm_thermal_add_timer_nodes();
 
+	interrupt_mode_init();
 	return 0;
 }
 late_initcall(msm_thermal_late_init);
diff --git a/drivers/usb/class/ccid_bridge.c b/drivers/usb/class/ccid_bridge.c
index 05483fd..a914902 100644
--- a/drivers/usb/class/ccid_bridge.c
+++ b/drivers/usb/class/ccid_bridge.c
@@ -36,6 +36,10 @@
 #define CCID_CONTROL_TIMEOUT 500 /* msec */
 #define CCID_BRIDGE_MSG_TIMEOUT 1000 /* msec */
 
+static unsigned ccid_bulk_msg_timeout = CCID_BRIDGE_MSG_TIMEOUT;
+module_param_named(bulk_msg_timeout, ccid_bulk_msg_timeout, uint, 0644);
+MODULE_PARM_DESC(bulk_msg_timeout, "Bulk message timeout (msecs)");
+
 struct ccid_bridge {
 	struct usb_device *udev;
 	struct usb_interface *intf;
@@ -304,7 +308,7 @@
 
 	ret = wait_event_interruptible_timeout(ccid->write_wq,
 			ccid->write_result != 0,
-			msecs_to_jiffies(CCID_BRIDGE_MSG_TIMEOUT));
+			msecs_to_jiffies(ccid_bulk_msg_timeout));
 	if (!ret || ret == -ERESTARTSYS) { /* timedout or interrupted */
 		usb_kill_urb(ccid->writeurb);
 		if (!ret)
@@ -375,7 +379,7 @@
 
 	ret = wait_event_interruptible_timeout(ccid->read_wq,
 			ccid->read_result != 0,
-			msecs_to_jiffies(CCID_BRIDGE_MSG_TIMEOUT));
+			msecs_to_jiffies(ccid_bulk_msg_timeout));
 	if (!ret || ret == -ERESTARTSYS) { /* timedout or interrupted */
 		usb_kill_urb(ccid->readurb);
 		if (!ret)
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index 6b01472..4de97f56 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -2723,6 +2723,8 @@
 	struct android_configuration	*conf;
 	int value = -EOPNOTSUPP;
 	unsigned long flags;
+	bool do_work = false;
+	bool prev_configured = false;
 
 	req->zero = 0;
 	req->complete = composite_setup_complete;
@@ -2741,6 +2743,12 @@
 			}
 		}
 
+	/*
+	 * skip the  work when 2nd set config arrives
+	 * with same value from the host.
+	 */
+	if (cdev->config)
+		prev_configured = true;
 	/* Special case the accessory function.
 	 * It needs to handle control requests before it is enabled.
 	 */
@@ -2753,13 +2761,15 @@
 	spin_lock_irqsave(&cdev->lock, flags);
 	if (!dev->connected) {
 		dev->connected = 1;
-		schedule_work(&dev->work);
+		do_work = true;
 	} else if (c->bRequest == USB_REQ_SET_CONFIGURATION &&
 						cdev->config) {
-		schedule_work(&dev->work);
+		if (!prev_configured)
+			do_work = true;
 	}
 	spin_unlock_irqrestore(&cdev->lock, flags);
-
+	if (do_work)
+		schedule_work(&dev->work);
 	return value;
 }
 
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 6a92684..30a678b 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -841,9 +841,6 @@
 	/* ESS flushes only at end?!? */
 	hw_cwrite(CAP_ENDPTFLUSH,    ~0, ~0);   /* flush all EPs */
 
-	/* clear setup token semaphores */
-	hw_cwrite(CAP_ENDPTSETUPSTAT, 0,  0);   /* writes its content */
-
 	/* clear complete status */
 	hw_cwrite(CAP_ENDPTCOMPLETE,  0,  0);   /* writes its content */
 
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 7d63bf9..b13e8e5 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -604,6 +604,16 @@
 	unsigned		power = gadget_is_otg(gadget) ? 8 : 100;
 	int			tmp;
 
+	/*
+	 * ignore 2nd time SET_CONFIGURATION
+	 * only for same config value twice.
+	 */
+	if (cdev->config && (cdev->config->bConfigurationValue == number)) {
+		DBG(cdev, "already in the same config with value %d\n",
+				number);
+		return 0;
+	}
+
 	if (number) {
 		list_for_each_entry(c, &cdev->configs, list) {
 			if (c->bConfigurationValue == number) {
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index 486d99d..ea724d6 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -2438,6 +2438,11 @@
 		 * Notify the charger type to power supply
 		 * owner as soon as we determine the charger.
 		 */
+		if (motg->chg_type == USB_DCP_CHARGER &&
+			motg->ext_chg_opened) {
+				init_completion(&motg->ext_chg_wait);
+				motg->ext_chg_active = DEFAULT;
+		}
 		msm_otg_notify_chg_type(motg);
 		msm_chg_block_off(motg);
 		msm_chg_enable_aca_det(motg);
@@ -2541,14 +2546,17 @@
 	 * detection is completed.
 	 */
 
-	if (motg->ext_chg_active) {
+	if (motg->ext_chg_active == ACTIVE) {
 
+do_wait:
 		pr_debug("before msm_otg ext chg wait\n");
 
 		t = wait_for_completion_timeout(&motg->ext_chg_wait,
 				msecs_to_jiffies(3000));
 		if (!t)
 			pr_err("msm_otg ext chg wait timeout\n");
+		else if (motg->ext_chg_active == ACTIVE)
+			goto do_wait;
 		else
 			pr_debug("msm_otg ext chg wait done\n");
 	}
@@ -2624,11 +2632,6 @@
 				case USB_DCP_CHARGER:
 					/* Enable VDP_SRC */
 					ulpi_write(otg->phy, 0x2, 0x85);
-					if (motg->ext_chg_opened) {
-						init_completion(
-							&motg->ext_chg_wait);
-						motg->ext_chg_active = true;
-					}
 					/* fall through */
 				case USB_PROPRIETARY_CHARGER:
 					msm_otg_notify_charger(motg,
@@ -2698,6 +2701,8 @@
 			motg->chg_type = USB_INVALID_CHARGER;
 			msm_otg_notify_charger(motg, 0);
 			if (dcp) {
+				if (motg->ext_chg_active == DEFAULT)
+					motg->ext_chg_active = INACTIVE;
 				msm_otg_wait_for_ext_chg_done(motg);
 				/* Turn off VDP_SRC */
 				ulpi_write(otg->phy, 0x2, 0x86);
@@ -4051,6 +4056,7 @@
 		pr_debug("%s: LPM block request %d\n", __func__, val);
 		if (val) { /* block LPM */
 			if (motg->chg_type == USB_DCP_CHARGER) {
+				motg->ext_chg_active = ACTIVE;
 				/*
 				 * If device is already suspended, resume it.
 				 * The PM usage counter is incremented in
@@ -4063,14 +4069,23 @@
 				else
 					pm_runtime_get_sync(motg->phy.dev);
 			} else {
-				motg->ext_chg_active = false;
+				motg->ext_chg_active = INACTIVE;
 				complete(&motg->ext_chg_wait);
 				ret = -ENODEV;
 			}
 		} else {
-			motg->ext_chg_active = false;
+			motg->ext_chg_active = INACTIVE;
 			complete(&motg->ext_chg_wait);
-			pm_runtime_put(motg->phy.dev);
+			/*
+			 * If usb cable is disconnected and then userspace
+			 * calls ioctl to unblock low power mode, make sure
+			 * otg_sm work for usb disconnect is processed first
+			 * followed by decrementing the PM usage counters.
+			 */
+			flush_work(&motg->sm_work);
+			pm_runtime_put_noidle(motg->phy.dev);
+			motg->pm_done = 1;
+			pm_runtime_suspend(motg->phy.dev);
 		}
 		break;
 	case MSM_USB_EXT_CHG_VOLTAGE_INFO:
@@ -4897,7 +4912,7 @@
 	if (phy->state == OTG_STATE_UNDEFINED)
 		return -EAGAIN;
 
-	if (motg->ext_chg_active) {
+	if (motg->ext_chg_active == DEFAULT) {
 		dev_dbg(dev, "Deferring LPM\n");
 		/*
 		 * Charger detection may happen in user space.
diff --git a/drivers/video/msm/mdss/Makefile b/drivers/video/msm/mdss/Makefile
index 6025a78..ae13322 100644
--- a/drivers/video/msm/mdss/Makefile
+++ b/drivers/video/msm/mdss/Makefile
@@ -1,8 +1,8 @@
-mdss-mdp3-objs = mdp3.o mdp3_dma.o mdp3_ctrl.o
+mdss-mdp3-objs = mdp3.o mdp3_dma.o mdp3_ctrl.o dsi_status_v2.o
 mdss-mdp3-objs += mdp3_ppp.o mdp3_ppp_hwio.o mdp3_ppp_data.o
 obj-$(CONFIG_FB_MSM_MDSS_MDP3) += mdss-mdp3.o
 
-mdss-mdp-objs := mdss_mdp.o mdss_mdp_ctl.o mdss_mdp_pipe.o mdss_mdp_util.o
+mdss-mdp-objs := mdss_mdp.o mdss_mdp_ctl.o mdss_mdp_pipe.o mdss_mdp_util.o dsi_status_6g.o
 mdss-mdp-objs += mdss_mdp_pp.o
 mdss-mdp-objs += mdss_mdp_intf_video.o
 mdss-mdp-objs += mdss_mdp_intf_cmd.o
@@ -43,8 +43,4 @@
 
 obj-$(CONFIG_FB_MSM_MDSS) += mdss_fb.o
 
-ifeq ($(CONFIG_FB_MSM_MDSS_MDP3),y)
-obj-$(CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS) += dsi_status_v2.o
-else
 obj-$(CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS) += mdss_dsi_status.o
-endif
diff --git a/drivers/video/msm/mdss/dsi_status_6g.c b/drivers/video/msm/mdss/dsi_status_6g.c
new file mode 100644
index 0000000..4f7e4da
--- /dev/null
+++ b/drivers/video/msm/mdss/dsi_status_6g.c
@@ -0,0 +1,118 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#include "mdss_dsi.h"
+#include "mdss_mdp.h"
+
+/*
+ * mdss_check_dsi_ctrl_status() - Check MDP5 DSI controller status periodically.
+ * @work     : dsi controller status data
+ * @interval : duration in milliseconds to schedule work queue
+ *
+ * This function calls check_status API on DSI controller to send the BTA
+ * command. If DSI controller fails to acknowledge the BTA command, it sends
+ * the PANEL_ALIVE=0 status to HAL layer.
+ */
+void mdss_check_dsi_ctrl_status(struct work_struct *work, uint32_t interval)
+{
+	struct dsi_status_data *pstatus_data = NULL;
+	struct mdss_panel_data *pdata = NULL;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_overlay_private *mdp5_data = NULL;
+	struct mdss_mdp_ctl *ctl = NULL;
+	int ret = 0;
+
+	pstatus_data = container_of(to_delayed_work(work),
+		struct dsi_status_data, check_status);
+	if (!pstatus_data || !(pstatus_data->mfd)) {
+		pr_err("%s: mfd not available\n", __func__);
+		return;
+	}
+
+	pdata = dev_get_platdata(&pstatus_data->mfd->pdev->dev);
+	if (!pdata) {
+		pr_err("%s: Panel data not available\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+							panel_data);
+	if (!ctrl_pdata || !ctrl_pdata->check_status) {
+		pr_err("%s: DSI ctrl or status_check callback not available\n",
+								__func__);
+		return;
+	}
+
+	mdp5_data = mfd_to_mdp5_data(pstatus_data->mfd);
+	ctl = mfd_to_ctl(pstatus_data->mfd);
+
+	if (!ctl) {
+		pr_err("%s: Display is off\n", __func__);
+		return;
+	}
+
+	if (ctl->shared_lock)
+		mutex_lock(ctl->shared_lock);
+	mutex_lock(&mdp5_data->ov_lock);
+
+	if (pstatus_data->mfd->shutdown_pending) {
+		mutex_unlock(&mdp5_data->ov_lock);
+		if (ctl->shared_lock)
+			mutex_unlock(ctl->shared_lock);
+		pr_err("%s: DSI turning off, avoiding BTA status check\n",
+							__func__);
+		return;
+	}
+
+	/*
+	 * For the command mode panels, we return pan display
+	 * IOCTL on vsync interrupt. So, after vsync interrupt comes
+	 * and when DMA_P is in progress, if the panel stops responding
+	 * and if we trigger BTA before DMA_P finishes, then the DSI
+	 * FIFO will not be cleared since the DSI data bus control
+	 * doesn't come back to the host after BTA. This may cause the
+	 * display reset not to be proper. Hence, wait for DMA_P done
+	 * for command mode panels before triggering BTA.
+	 */
+	if (ctl->wait_pingpong)
+		ctl->wait_pingpong(ctl, NULL);
+
+	pr_debug("%s: DSI ctrl wait for ping pong done\n", __func__);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+	ret = ctrl_pdata->check_status(ctrl_pdata);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+
+	mutex_unlock(&mdp5_data->ov_lock);
+	if (ctl->shared_lock)
+		mutex_unlock(ctl->shared_lock);
+
+	if ((pstatus_data->mfd->panel_power_on)) {
+		if (ret > 0) {
+			schedule_delayed_work(&pstatus_data->check_status,
+				msecs_to_jiffies(interval));
+		} else {
+			char *envp[2] = {"PANEL_ALIVE=0", NULL};
+			pdata->panel_info.panel_dead = true;
+			ret = kobject_uevent_env(
+				&pstatus_data->mfd->fbi->dev->kobj,
+							KOBJ_CHANGE, envp);
+			pr_err("%s: Panel has gone bad, sending uevent - %s\n",
+							__func__, envp[0]);
+		}
+	}
+}
diff --git a/drivers/video/msm/mdss/dsi_status_v2.c b/drivers/video/msm/mdss/dsi_status_v2.c
index 565401d..f0966cc 100644
--- a/drivers/video/msm/mdss/dsi_status_v2.c
+++ b/drivers/video/msm/mdss/dsi_status_v2.c
@@ -10,49 +10,26 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/fb.h>
-#include <linux/notifier.h>
 #include <linux/workqueue.h>
 #include <linux/delay.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/iopoll.h>
 #include <linux/kobject.h>
 #include <linux/string.h>
 #include <linux/sysfs.h>
 
-#include "mdss_fb.h"
 #include "mdss_dsi.h"
-#include "mdss_panel.h"
 #include "mdp3_ctrl.h"
 
-#define STATUS_CHECK_INTERVAL 5000
-
-/**
- * dsi_status_data - Stores all the data necessary for this module
- * @fb_notif: Used to egister for the fb events
- * @live_status: Delayed worker structure, used to associate the
- * delayed worker function
- * @mfd: Used to store the msm_fb_data_type received when the notifier
- * call back happens
- * @root: Stores the dir created by debuugfs
- * @debugfs_reset_panel: The debugfs variable used to inject errors
+/*
+ * mdp3_check_dsi_ctrl_status() - Check MDP3 DSI controller status periodically.
+ * @work     : dsi controller status data
+ * @interval : duration in milliseconds to schedule work queue
+ *
+ * This function calls check_status API on DSI controller to send the BTA
+ * command. If DSI controller fails to acknowledge the BTA command, it sends
+ * the PANEL_ALIVE=0 status to HAL layer.
  */
-
-struct dsi_status_data {
-	struct notifier_block fb_notifier;
-	struct delayed_work check_status;
-	struct msm_fb_data_type *mfd;
-	uint32_t check_interval;
-};
-struct dsi_status_data *pstatus_data;
-static uint32_t interval = STATUS_CHECK_INTERVAL;
-
-void check_dsi_ctrl_status(struct work_struct *work)
+void mdp3_check_dsi_ctrl_status(struct work_struct *work,
+				uint32_t interval)
 {
 	struct dsi_status_data *pdsi_status = NULL;
 	struct mdss_panel_data *pdata = NULL;
@@ -61,9 +38,10 @@
 	int ret = 0;
 
 	pdsi_status = container_of(to_delayed_work(work),
-		struct dsi_status_data, check_status);
-	if (!pdsi_status) {
-		pr_err("%s: DSI status data not available\n", __func__);
+	struct dsi_status_data, check_status);
+
+	if (!pdsi_status || !(pdsi_status->mfd)) {
+		pr_err("%s: mfd not available\n", __func__);
 		return;
 	}
 
@@ -72,16 +50,23 @@
 		pr_err("%s: Panel data not available\n", __func__);
 		return;
 	}
+
 	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
 							panel_data);
+
 	if (!ctrl_pdata || !ctrl_pdata->check_status) {
-		pr_err("%s: DSI ctrl or status_check callback not avilable\n",
+		pr_err("%s: DSI ctrl or status_check callback not available\n",
 								__func__);
 		return;
 	}
-	mdp3_session = pdsi_status->mfd->mdp.private1;
-	mutex_lock(&mdp3_session->lock);
 
+	mdp3_session = pdsi_status->mfd->mdp.private1;
+	if (!mdp3_session) {
+		pr_err("%s: Display is off\n", __func__);
+		return;
+	}
+
+	mutex_lock(&mdp3_session->lock);
 	if (!mdp3_session->status) {
 		pr_info("display off already\n");
 		mutex_unlock(&mdp3_session->lock);
@@ -94,107 +79,22 @@
 	if (!ret)
 		ret = ctrl_pdata->check_status(ctrl_pdata);
 	else
-		pr_err("wait_for_dma_done error\n");
-
+		pr_err("%s: wait_for_dma_done error\n", __func__);
 	mutex_unlock(&mdp3_session->lock);
 
 	if ((pdsi_status->mfd->panel_power_on)) {
 		if (ret > 0) {
 			schedule_delayed_work(&pdsi_status->check_status,
-				msecs_to_jiffies(pdsi_status->check_interval));
+						msecs_to_jiffies(interval));
 		} else {
 			char *envp[2] = {"PANEL_ALIVE=0", NULL};
 			pdata->panel_info.panel_dead = true;
 			ret = kobject_uevent_env(
-				&pdsi_status->mfd->fbi->dev->kobj,
-							KOBJ_CHANGE, envp);
+					&pdsi_status->mfd->fbi->dev->kobj,
+					KOBJ_CHANGE, envp);
 			pr_err("%s: Panel has gone bad, sending uevent - %s\n",
 							__func__, envp[0]);
 		}
 	}
 }
 
-/**
- * fb_notifier_callback() - Call back function for the fb_register_client()
- * notifying events
- * @self  : notifier block
- * @event : The event that was triggered
- * @data  : Of type struct fb_event
- *
- * - This function listens for FB_BLANK_UNBLANK and FB_BLANK_POWERDOWN events
- * - Based on the event the delayed work is either scheduled again after
- * PANEL_STATUS_CHECK_INTERVAL or cancelled
- */
-static int fb_event_callback(struct notifier_block *self,
-				unsigned long event, void *data)
-{
-	struct fb_event *evdata = (struct fb_event *)data;
-	struct dsi_status_data *pdata = container_of(self,
-				struct dsi_status_data, fb_notifier);
-	pdata->mfd = (struct msm_fb_data_type *)evdata->info->par;
-
-	if (event == FB_EVENT_BLANK && evdata) {
-		int *blank = evdata->data;
-		switch (*blank) {
-		case FB_BLANK_UNBLANK:
-			schedule_delayed_work(&pdata->check_status,
-			msecs_to_jiffies(STATUS_CHECK_INTERVAL));
-			break;
-		case FB_BLANK_POWERDOWN:
-			cancel_delayed_work(&pdata->check_status);
-			break;
-		}
-	}
-	return 0;
-}
-
-int __init mdss_dsi_status_init(void)
-{
-	int rc;
-
-	pstatus_data = kzalloc(sizeof(struct dsi_status_data),	GFP_KERNEL);
-	if (!pstatus_data) {
-		pr_err("%s: can't alloc mem\n", __func__);
-		rc = -ENOMEM;
-		return rc;
-	}
-
-	memset(pstatus_data, 0, sizeof(struct dsi_status_data));
-
-	pstatus_data->fb_notifier.notifier_call = fb_event_callback;
-
-	rc = fb_register_client(&pstatus_data->fb_notifier);
-	if (rc < 0) {
-		pr_err("%s: fb_register_client failed, returned with rc=%d\n",
-								__func__, rc);
-		kfree(pstatus_data);
-		return -EPERM;
-	}
-
-	pstatus_data->check_interval = interval;
-	pr_info("%s: DSI status check interval:%d\n", __func__, interval);
-
-	INIT_DELAYED_WORK(&pstatus_data->check_status, check_dsi_ctrl_status);
-
-	pr_debug("%s: DSI ctrl status thread initialized\n", __func__);
-
-	return rc;
-}
-
-void __exit mdss_dsi_status_exit(void)
-{
-	fb_unregister_client(&pstatus_data->fb_notifier);
-	cancel_delayed_work_sync(&pstatus_data->check_status);
-	kfree(pstatus_data);
-	pr_debug("%s: DSI ctrl status thread removed\n", __func__);
-}
-
-module_param(interval, uint, 0);
-MODULE_PARM_DESC(interval,
-	"Duration in milliseconds to send BTA command for checking"
-	"DSI status periodically");
-
-module_init(mdss_dsi_status_init);
-module_exit(mdss_dsi_status_exit);
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/msm/mdss/mdp3.c b/drivers/video/msm/mdss/mdp3.c
index ad7cb81..37134b4 100644
--- a/drivers/video/msm/mdss/mdp3.c
+++ b/drivers/video/msm/mdss/mdp3.c
@@ -738,9 +738,11 @@
 	if (context >= MDP3_IOMMU_CTX_MAX)
 		return -EINVAL;
 
+	mutex_lock(&mdp3_res->iommu_lock);
 	context_map = mdp3_res->iommu_contexts + context;
 	if (context_map->attached) {
 		pr_warn("mdp iommu already attached\n");
+		mutex_unlock(&mdp3_res->iommu_lock);
 		return 0;
 	}
 
@@ -749,6 +751,7 @@
 	iommu_attach_device(domain_map->domain, context_map->ctx);
 
 	context_map->attached = true;
+	mutex_unlock(&mdp3_res->iommu_lock);
 	return 0;
 }
 
@@ -761,9 +764,11 @@
 		context >= MDP3_IOMMU_CTX_MAX)
 		return -EINVAL;
 
+	mutex_lock(&mdp3_res->iommu_lock);
 	context_map = mdp3_res->iommu_contexts + context;
 	if (!context_map->attached) {
 		pr_warn("mdp iommu not attached\n");
+		mutex_unlock(&mdp3_res->iommu_lock);
 		return 0;
 	}
 
@@ -771,6 +776,7 @@
 	iommu_detach_device(domain_map->domain, context_map->ctx);
 	context_map->attached = false;
 
+	mutex_unlock(&mdp3_res->iommu_lock);
 	return 0;
 }
 
@@ -2266,6 +2272,7 @@
 	.panel_register_done = mdp3_panel_register_done,
 	.fb_stride = mdp3_fb_stride,
 	.fb_mem_alloc_fnc = mdp3_alloc,
+	.check_dsi_status = mdp3_check_dsi_ctrl_status,
 	};
 
 	struct mdp3_intr_cb underrun_cb = {
diff --git a/drivers/video/msm/mdss/mdp3.h b/drivers/video/msm/mdss/mdp3.h
index 15aab59..137a1b8 100644
--- a/drivers/video/msm/mdss/mdp3.h
+++ b/drivers/video/msm/mdss/mdp3.h
@@ -209,6 +209,8 @@
 int mdp3_misr_set(struct mdp_misr *misr_req);
 int mdp3_misr_get(struct mdp_misr *misr_resp);
 void mdp3_enable_regulator(int enable);
+void mdp3_check_dsi_ctrl_status(struct work_struct *work,
+				uint32_t interval);
 
 #define MDP3_REG_WRITE(addr, val) writel_relaxed(val, mdp3_res->mdp_base + addr)
 #define MDP3_REG_READ(addr) readl_relaxed(mdp3_res->mdp_base + addr)
diff --git a/drivers/video/msm/mdss/mdp3_ppp.c b/drivers/video/msm/mdss/mdp3_ppp.c
index afa0b7c..462f1d8 100644
--- a/drivers/video/msm/mdss/mdp3_ppp.c
+++ b/drivers/video/msm/mdss/mdp3_ppp.c
@@ -863,24 +863,6 @@
 	if (unlikely(req->dst_rect.h == 0 || req->dst_rect.w == 0))
 		return 0;
 
-	if (req->flags & MDP_ROT_90) {
-		if (((req->dst_rect.h == 1) && ((req->src_rect.w != 1) ||
-			(req->dst_rect.w == req->src_rect.h))) ||
-			((req->dst_rect.w == 1) && ((req->src_rect.h != 1) ||
-			(req->dst_rect.h == req->src_rect.w)))) {
-			pr_err("mdp_ppp: error scaling when size is 1!\n");
-			return -EINVAL;
-		}
-	} else {
-		if (((req->dst_rect.w == 1) && ((req->src_rect.w != 1) ||
-			(req->dst_rect.h == req->src_rect.h))) ||
-			((req->dst_rect.h == 1) && ((req->src_rect.h != 1) ||
-			(req->dst_rect.w == req->src_rect.w)))) {
-			pr_err("mdp_ppp: error scaling when size is 1!\n");
-			return -EINVAL;
-		}
-	}
-
 	/* MDP width split workaround */
 	remainder = (req->dst_rect.w) % 16;
 	ret = ppp_get_bpp(req->dst.format, mfd->fb_imgType);
diff --git a/drivers/video/msm/mdss/mdp3_ppp.h b/drivers/video/msm/mdss/mdp3_ppp.h
index 9753e94..a0ad3c3 100644
--- a/drivers/video/msm/mdss/mdp3_ppp.h
+++ b/drivers/video/msm/mdss/mdp3_ppp.h
@@ -391,7 +391,7 @@
 uint32_t ppp_bpp(uint32_t type);
 uint32_t ppp_src_config(uint32_t type);
 uint32_t ppp_out_config(uint32_t type);
-uint32_t ppp_pack_pattern(uint32_t type);
+uint32_t ppp_pack_pattern(uint32_t type, uint32_t yuv2rgb);
 uint32_t ppp_dst_op_reg(uint32_t type);
 uint32_t ppp_src_op_reg(uint32_t type);
 bool ppp_per_p_alpha(uint32_t type);
diff --git a/drivers/video/msm/mdss/mdp3_ppp_data.c b/drivers/video/msm/mdss/mdp3_ppp_data.c
index e562ad3..5748842 100644
--- a/drivers/video/msm/mdss/mdp3_ppp_data.c
+++ b/drivers/video/msm/mdss/mdp3_ppp_data.c
@@ -88,6 +88,35 @@
 		CLR_G, CLR_R, 8),
 };
 
+const uint32_t swapped_pack_patt_lut[MDP_IMGTYPE_LIMIT] = {
+	[MDP_RGB_565] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
+	[MDP_BGR_565] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+	[MDP_RGB_888] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
+	[MDP_BGR_888] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+	[MDP_BGRA_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R,
+		CLR_G, CLR_B, 8),
+	[MDP_RGBA_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+		CLR_G, CLR_R, 8),
+	[MDP_ARGB_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+		CLR_G, CLR_R, 8),
+	[MDP_XRGB_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+		CLR_G, CLR_R, 8),
+	[MDP_RGBX_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+		CLR_G, CLR_R, 8),
+	[MDP_Y_CRCB_H2V2] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
+	[MDP_Y_CBCR_H2V2] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
+	[MDP_Y_CBCR_H2V2_ADRENO] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR,
+		CLR_CB, 8),
+	[MDP_Y_CBCR_H2V2_VENUS] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR,
+		CLR_CB, 8),
+	[MDP_YCRYCB_H2V1] = PPP_GET_PACK_PATTERN(CLR_Y,
+		CLR_CB, CLR_Y, CLR_CR, 8),
+	[MDP_Y_CBCR_H2V1] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
+	[MDP_Y_CRCB_H2V1] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
+	[MDP_BGRX_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R,
+		CLR_G, CLR_B, 8),
+};
+
 const uint32_t dst_op_reg[MDP_IMGTYPE_LIMIT] = {
 	[MDP_Y_CRCB_H2V2] = PPP_OP_DST_CHROMA_420,
 	[MDP_Y_CBCR_H2V2] = PPP_OP_DST_CHROMA_420,
@@ -1530,10 +1559,13 @@
 	return out_cfg_lut[type];
 }
 
-uint32_t ppp_pack_pattern(uint32_t type)
+uint32_t ppp_pack_pattern(uint32_t type, uint32_t yuv2rgb)
 {
 	if (MDP_IS_IMGTYPE_BAD(type))
 		return 0;
+	if (yuv2rgb)
+		return swapped_pack_patt_lut[type];
+
 	return pack_patt_lut[type];
 }
 
diff --git a/drivers/video/msm/mdss/mdp3_ppp_hwio.c b/drivers/video/msm/mdss/mdp3_ppp_hwio.c
index eb01d00..a25c2c7 100644
--- a/drivers/video/msm/mdss/mdp3_ppp_hwio.c
+++ b/drivers/video/msm/mdss/mdp3_ppp_hwio.c
@@ -486,7 +486,7 @@
 	return load_secondary_matrix(csc);
 }
 
-int config_ppp_src(struct ppp_img_desc *src)
+int config_ppp_src(struct ppp_img_desc *src, uint32_t yuv2rgb)
 {
 	uint32_t val;
 
@@ -510,12 +510,12 @@
 	val |= (src->roi.x % 2) ? PPP_SRC_BPP_ROI_ODD_X : 0;
 	val |= (src->roi.y % 2) ? PPP_SRC_BPP_ROI_ODD_Y : 0;
 	PPP_WRITEL(val, MDP3_PPP_SRC_FORMAT);
-	PPP_WRITEL(ppp_pack_pattern(src->color_fmt),
+	PPP_WRITEL(ppp_pack_pattern(src->color_fmt, yuv2rgb),
 		MDP3_PPP_SRC_UNPACK_PATTERN1);
 	return 0;
 }
 
-int config_ppp_out(struct ppp_img_desc *dst)
+int config_ppp_out(struct ppp_img_desc *dst, uint32_t yuv2rgb)
 {
 	uint32_t val;
 	bool pseudoplanr_output = false;
@@ -534,7 +534,7 @@
 	if (pseudoplanr_output)
 		val |= PPP_DST_PLANE_PSEUDOPLN;
 	PPP_WRITEL(val, MDP3_PPP_OUT_FORMAT);
-	PPP_WRITEL(ppp_pack_pattern(dst->color_fmt),
+	PPP_WRITEL(ppp_pack_pattern(dst->color_fmt, yuv2rgb),
 		MDP3_PPP_OUT_PACK_PATTERN1);
 
 	val = ((dst->roi.height & MDP3_PPP_XY_MASK) << MDP3_PPP_XY_OFFSET) |
@@ -573,7 +573,7 @@
 
 	PPP_WRITEL(ppp_src_config(bg->color_fmt),
 		MDP3_PPP_BG_FORMAT);
-	PPP_WRITEL(ppp_pack_pattern(bg->color_fmt),
+	PPP_WRITEL(ppp_pack_pattern(bg->color_fmt, 0),
 		MDP3_PPP_BG_UNPACK_PATTERN1);
 	return 0;
 }
@@ -1108,6 +1108,7 @@
 
 int config_ppp_op_mode(struct ppp_blit_op *blit_op)
 {
+	uint32_t yuv2rgb;
 	uint32_t ppp_operation_reg = 0;
 	int sv_slice, sh_slice;
 	int dv_slice, dh_slice;
@@ -1153,6 +1154,7 @@
 
 	config_ppp_csc(blit_op->src.color_fmt,
 		blit_op->dst.color_fmt, &ppp_operation_reg);
+	yuv2rgb = ppp_operation_reg & PPP_OP_CONVERT_YCBCR2RGB;
 
 	if (blit_op->mdp_op & MDPOP_DITHER)
 		ppp_operation_reg |= PPP_OP_DITHER_EN;
@@ -1197,8 +1199,8 @@
 
 	config_ppp_blend(blit_op, &ppp_operation_reg);
 
-	config_ppp_src(&blit_op->src);
-	config_ppp_out(&blit_op->dst);
+	config_ppp_src(&blit_op->src, yuv2rgb);
+	config_ppp_out(&blit_op->dst, yuv2rgb);
 	PPP_WRITEL(ppp_operation_reg, MDP3_PPP_OP_MODE);
 	mb();
 	return 0;
diff --git a/drivers/video/msm/mdss/mdss_debug.h b/drivers/video/msm/mdss/mdss_debug.h
index e2c9edd..684bd45 100644
--- a/drivers/video/msm/mdss/mdss_debug.h
+++ b/drivers/video/msm/mdss/mdss_debug.h
@@ -16,6 +16,7 @@
 
 #include <stdarg.h>
 #include "mdss.h"
+#include "mdss_mdp_trace.h"
 
 #define MISR_POLL_SLEEP		2000
 #define MISR_POLL_TIMEOUT	32000
@@ -30,6 +31,13 @@
 #define MDSS_XLOG_TOUT_HANDLER(...)	\
 	mdss_xlog_tout_handler(__func__, ##__VA_ARGS__, XLOG_TOUT_DATA_LIMITER)
 
+#define ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
+#define ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
+#define ATRACE_FUNC() ATRACE_BEGIN(__func__)
+
+#define ATRACE_INT(name, value) \
+	trace_mdp_trace_counter(current->tgid, name, value)
+
 #ifdef CONFIG_DEBUG_FS
 struct mdss_debug_base {
 	struct mdss_debug_data *mdd;
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
index dd8eec5..e724d68 100644
--- a/drivers/video/msm/mdss/mdss_dsi.c
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -1455,6 +1455,8 @@
 		pinfo->new_fps = pinfo->mipi.frame_rate;
 	}
 
+	pinfo->panel_max_fps = mdss_panel_get_framerate(pinfo);
+	pinfo->panel_max_vtotal = mdss_panel_get_vtotal(pinfo);
 	ctrl_pdata->disp_en_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
 		"qcom,platform-enable-gpio", 0);
 
@@ -1518,6 +1520,8 @@
 		if (!gpio_is_valid(ctrl_pdata->mode_gpio))
 			pr_info("%s:%d, mode gpio not specified\n",
 							__func__, __LINE__);
+	} else {
+		ctrl_pdata->mode_gpio = -EINVAL;
 	}
 
 	if (mdss_dsi_clk_init(ctrl_pdev, ctrl_pdata)) {
diff --git a/drivers/video/msm/mdss/mdss_dsi.h b/drivers/video/msm/mdss/mdss_dsi.h
index f6f6cd4..f79391a 100644
--- a/drivers/video/msm/mdss/mdss_dsi.h
+++ b/drivers/video/msm/mdss/mdss_dsi.h
@@ -233,6 +233,7 @@
 
 #define DSI_EV_PLL_UNLOCKED		0x0001
 #define DSI_EV_MDP_FIFO_UNDERFLOW	0x0002
+#define DSI_EV_DSI_FIFO_EMPTY		0x0003
 #define DSI_EV_MDP_BUSY_RELEASE		0x80000000
 
 struct mdss_dsi_ctrl_pdata {
@@ -302,6 +303,12 @@
 	struct dsi_buf rx_buf;
 };
 
+struct dsi_status_data {
+	struct notifier_block fb_notifier;
+	struct delayed_work check_status;
+	struct msm_fb_data_type *mfd;
+};
+
 int dsi_panel_device_register(struct device_node *pan_node,
 				struct mdss_dsi_ctrl_pdata *ctrl_pdata);
 
diff --git a/drivers/video/msm/mdss/mdss_dsi_host.c b/drivers/video/msm/mdss/mdss_dsi_host.c
index 7abc705..74d2739 100644
--- a/drivers/video/msm/mdss/mdss_dsi_host.c
+++ b/drivers/video/msm/mdss/mdss_dsi_host.c
@@ -252,6 +252,8 @@
 
 	if (pinfo->mode == DSI_VIDEO_MODE) {
 		data = 0;
+		if (pinfo->last_line_interleave_en)
+			data |= BIT(31);
 		if (pinfo->pulse_mode_hsa_he)
 			data |= BIT(28);
 		if (pinfo->hfp_power_stop)
@@ -878,7 +880,7 @@
 
 	ctrl_restore = __mdss_dsi_cmd_mode_config(ctrl, 1);
 
-	if (rlen == 0) {
+	if (rlen <= 2) {
 		short_response = 1;
 		rx_byte = 4;
 	} else {
@@ -1384,6 +1386,9 @@
 			}
 		}
 
+		if (todo & DSI_EV_DSI_FIFO_EMPTY)
+			mdss_dsi_sw_reset_restore(ctrl);
+
 		if (todo & DSI_EV_MDP_BUSY_RELEASE) {
 			spin_lock_irqsave(&ctrl->mdp_lock, flag);
 			ctrl->mdp_busy = false;
@@ -1456,7 +1461,7 @@
 
 	status = MIPI_INP(base + 0x000c);/* DSI_FIFO_STATUS */
 
-	/* fifo underflow, overflow */
+	/* fifo underflow, overflow and empty*/
 	if (status & 0xcccc4489) {
 		MIPI_OUTP(base + 0x000c, status);
 		pr_err("%s: status=%x\n", __func__, status);
@@ -1464,6 +1469,8 @@
 			dsi_send_events(ctrl, DSI_EV_MDP_FIFO_UNDERFLOW);
 			MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0", "dsi1",
 						"edp", "hdmi", "panic");
+		if (status & 0x11110000) /* DLN_FIFO_EMPTY */
+			dsi_send_events(ctrl, DSI_EV_DSI_FIFO_EMPTY);
 	}
 }
 
diff --git a/drivers/video/msm/mdss/mdss_dsi_panel.c b/drivers/video/msm/mdss/mdss_dsi_panel.c
index f58e2a1..7575fe3 100644
--- a/drivers/video/msm/mdss/mdss_dsi_panel.c
+++ b/drivers/video/msm/mdss/mdss_dsi_panel.c
@@ -923,6 +923,8 @@
 		"qcom,mdss-dsi-hsa-power-mode");
 	pinfo->mipi.hbp_power_stop = of_property_read_bool(np,
 		"qcom,mdss-dsi-hbp-power-mode");
+	pinfo->mipi.last_line_interleave_en = of_property_read_bool(np,
+		"qcom,mdss-dsi-last-line-interleave");
 	pinfo->mipi.bllp_power_stop = of_property_read_bool(np,
 		"qcom,mdss-dsi-bllp-power-mode");
 	pinfo->mipi.eof_bllp_power_stop = of_property_read_bool(
diff --git a/drivers/video/msm/mdss/mdss_dsi_status.c b/drivers/video/msm/mdss/mdss_dsi_status.c
index 1dca08e..c68155a 100644
--- a/drivers/video/msm/mdss/mdss_dsi_status.c
+++ b/drivers/video/msm/mdss/mdss_dsi_status.c
@@ -30,36 +30,26 @@
 #include "mdss_panel.h"
 #include "mdss_mdp.h"
 
-#define STATUS_CHECK_INTERVAL 5000
+#define STATUS_CHECK_INTERVAL_MS 5000
+#define STATUS_CHECK_INTERVAL_MIN_MS 200
+#define DSI_STATUS_CHECK_DISABLE 1
 
-struct dsi_status_data {
-	struct notifier_block fb_notifier;
-	struct delayed_work check_status;
-	struct msm_fb_data_type *mfd;
-	uint32_t check_interval;
-};
+static uint32_t interval = STATUS_CHECK_INTERVAL_MS;
+static uint32_t dsi_status_disable = DSI_STATUS_CHECK_DISABLE;
 struct dsi_status_data *pstatus_data;
-static uint32_t interval = STATUS_CHECK_INTERVAL;
 
 /*
- * check_dsi_ctrl_status() - Check DSI controller status periodically.
+ * check_dsi_ctrl_status() - Reads MFD structure and
+ * calls platform specific DSI ctrl Status function.
  * @work  : dsi controller status data
- *
- * This function calls check_status API on DSI controller to send the BTA
- * command. If DSI controller fails to acknowledge the BTA command, it sends
- * the PANEL_ALIVE=0 status to HAL layer.
  */
 static void check_dsi_ctrl_status(struct work_struct *work)
 {
 	struct dsi_status_data *pdsi_status = NULL;
-	struct mdss_panel_data *pdata = NULL;
-	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
-	struct mdss_overlay_private *mdp5_data = NULL;
-	struct mdss_mdp_ctl *ctl = NULL;
-	int ret = 0;
 
 	pdsi_status = container_of(to_delayed_work(work),
 		struct dsi_status_data, check_status);
+
 	if (!pdsi_status) {
 		pr_err("%s: DSI status data not available\n", __func__);
 		return;
@@ -70,73 +60,7 @@
 		return;
 	}
 
-	pdata = dev_get_platdata(&pdsi_status->mfd->pdev->dev);
-	if (!pdata) {
-		pr_err("%s: Panel data not available\n", __func__);
-		return;
-	}
-
-	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
-							panel_data);
-	if (!ctrl_pdata || !ctrl_pdata->check_status) {
-		pr_err("%s: DSI ctrl or status_check callback not available\n",
-								__func__);
-		return;
-	}
-
-	mdp5_data = mfd_to_mdp5_data(pdsi_status->mfd);
-	ctl = mfd_to_ctl(pdsi_status->mfd);
-
-	if (ctl->shared_lock)
-		mutex_lock(ctl->shared_lock);
-	mutex_lock(&mdp5_data->ov_lock);
-
-	if (pdsi_status->mfd->shutdown_pending) {
-		mutex_unlock(&mdp5_data->ov_lock);
-		if (ctl->shared_lock)
-			mutex_unlock(ctl->shared_lock);
-		pr_err("%s: DSI turning off, avoiding BTA status check\n",
-							__func__);
-		return;
-	}
-
-	/*
-	 * For the command mode panels, we return pan display
-	 * IOCTL on vsync interrupt. So, after vsync interrupt comes
-	 * and when DMA_P is in progress, if the panel stops responding
-	 * and if we trigger BTA before DMA_P finishes, then the DSI
-	 * FIFO will not be cleared since the DSI data bus control
-	 * doesn't come back to the host after BTA. This may cause the
-	 * display reset not to be proper. Hence, wait for DMA_P done
-	 * for command mode panels before triggering BTA.
-	 */
-	if (ctl->wait_pingpong)
-		ctl->wait_pingpong(ctl, NULL);
-
-	pr_debug("%s: DSI ctrl wait for ping pong done\n", __func__);
-
-	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
-	ret = ctrl_pdata->check_status(ctrl_pdata);
-	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
-
-	mutex_unlock(&mdp5_data->ov_lock);
-	if (ctl->shared_lock)
-		mutex_unlock(ctl->shared_lock);
-
-	if ((pdsi_status->mfd->panel_power_on)) {
-		if (ret > 0) {
-			schedule_delayed_work(&pdsi_status->check_status,
-				msecs_to_jiffies(pdsi_status->check_interval));
-		} else {
-			char *envp[2] = {"PANEL_ALIVE=0", NULL};
-			pdata->panel_info.panel_dead = true;
-			ret = kobject_uevent_env(
-				&pdsi_status->mfd->fbi->dev->kobj,
-							KOBJ_CHANGE, envp);
-			pr_err("%s: Panel has gone bad, sending uevent - %s\n",
-							__func__, envp[0]);
-		}
-	}
+	pdsi_status->mfd->mdp.check_dsi_status(work, interval);
 }
 
 /*
@@ -156,6 +80,19 @@
 	struct fb_event *evdata = data;
 	struct dsi_status_data *pdata = container_of(self,
 				struct dsi_status_data, fb_notifier);
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	pdata->mfd = evdata->info->par;
+	ctrl_pdata = container_of(dev_get_platdata(&pdata->mfd->pdev->dev),
+				struct mdss_dsi_ctrl_pdata, panel_data);
+	if (!ctrl_pdata) {
+		pr_err("%s: DSI ctrl not available\n", __func__);
+		return NOTIFY_BAD;
+	}
+	if (dsi_status_disable) {
+		pr_debug("%s: DSI status disabled\n", __func__);
+		return NOTIFY_DONE;
+	}
 
 	if (event == FB_EVENT_BLANK && evdata) {
 		int *blank = evdata->data;
@@ -166,16 +103,57 @@
 		switch (*blank) {
 		case FB_BLANK_UNBLANK:
 			schedule_delayed_work(&pdata->check_status,
-				msecs_to_jiffies(pdata->check_interval));
+				msecs_to_jiffies(interval));
 			break;
 		case FB_BLANK_POWERDOWN:
+		case FB_BLANK_HSYNC_SUSPEND:
+		case FB_BLANK_VSYNC_SUSPEND:
+		case FB_BLANK_NORMAL:
 			cancel_delayed_work(&pdata->check_status);
 			break;
+		default:
+			pr_err("Unknown case in FB_EVENT_BLANK event\n");
+			break;
 		}
 	}
 	return 0;
 }
 
+static int param_dsi_status_disable(const char *val, struct kernel_param *kp)
+{
+	int ret = 0;
+	int int_val;
+
+	ret = kstrtos32(val, 0, &int_val);
+	if (ret)
+		return ret;
+
+	pr_info("%s: Set DSI status disable to %d\n",
+			__func__, int_val);
+	*((int *)kp->arg) = int_val;
+	return ret;
+}
+
+static int param_set_interval(const char *val, struct kernel_param *kp)
+{
+	int ret = 0;
+	int int_val;
+
+	ret = kstrtos32(val, 0, &int_val);
+	if (ret)
+		return ret;
+	if (int_val < STATUS_CHECK_INTERVAL_MIN_MS) {
+		pr_err("%s: Invalid value %d used, ignoring\n",
+						__func__, int_val);
+		ret = -EINVAL;
+	} else {
+		pr_info("%s: Set check interval to %d msecs\n",
+						__func__, int_val);
+		*((int *)kp->arg) = int_val;
+	}
+	return ret;
+}
+
 int __init mdss_dsi_status_init(void)
 {
 	int rc = 0;
@@ -196,7 +174,6 @@
 		return -EPERM;
 	}
 
-	pstatus_data->check_interval = interval;
 	pr_info("%s: DSI status check interval:%d\n", __func__,	interval);
 
 	INIT_DELAYED_WORK(&pstatus_data->check_status, check_dsi_ctrl_status);
@@ -214,11 +191,17 @@
 	pr_debug("%s: DSI ctrl status work queue removed\n", __func__);
 }
 
-module_param(interval, uint, 0);
+module_param_call(interval, param_set_interval, param_get_uint,
+						&interval, 0644);
 MODULE_PARM_DESC(interval,
 		"Duration in milliseconds to send BTA command for checking"
 		"DSI status periodically");
 
+module_param_call(dsi_status_disable, param_dsi_status_disable, param_get_uint,
+						&dsi_status_disable, 0644);
+MODULE_PARM_DESC(dsi_status_disable,
+		"Disable DSI status check");
+
 module_init(mdss_dsi_status_init);
 module_exit(mdss_dsi_status_exit);
 
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index 08ecf1d..c14f936 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -267,22 +267,30 @@
 
 static void mdss_fb_parse_dt(struct msm_fb_data_type *mfd)
 {
-	u32 data[2];
+	u32 data[2] = {0};
+	u32 panel_xres;
 	struct platform_device *pdev = mfd->pdev;
 
-	if (of_property_read_u32_array(pdev->dev.of_node, "qcom,mdss-fb-split",
-				       data, 2))
-		return;
-	if (data[0] && data[1] &&
-	    (mfd->panel_info->xres == (data[0] + data[1]))) {
-		mfd->split_fb_left = data[0];
-		mfd->split_fb_right = data[1];
-		pr_info("split framebuffer left=%d right=%d\n",
-			mfd->split_fb_left, mfd->split_fb_right);
+	of_property_read_u32_array(pdev->dev.of_node,
+		"qcom,mdss-fb-split", data, 2);
+
+	panel_xres = mfd->panel_info->xres;
+	if (data[0] && data[1]) {
+		if (mfd->split_display)
+			panel_xres *= 2;
+
+		if (panel_xres == data[0] + data[1]) {
+			mfd->split_fb_left = data[0];
+			mfd->split_fb_right = data[1];
+		}
 	} else {
-		mfd->split_fb_left = 0;
-		mfd->split_fb_right = 0;
+		if (mfd->split_display)
+			mfd->split_fb_left = mfd->split_fb_right = panel_xres;
+		else
+			mfd->split_fb_left = mfd->split_fb_right = 0;
 	}
+	pr_info("split framebuffer left=%d right=%d\n",
+		mfd->split_fb_left, mfd->split_fb_right);
 }
 
 static ssize_t mdss_fb_get_split(struct device *dev,
@@ -807,17 +815,17 @@
 {
 	struct mdss_panel_data *pdata;
 
+	mutex_lock(&mfd->bl_lock);
 	if (mfd->unset_bl_level && !mfd->bl_updated) {
 		pdata = dev_get_platdata(&mfd->pdev->dev);
 		if ((pdata) && (pdata->set_backlight)) {
-			mutex_lock(&mfd->bl_lock);
 			mfd->bl_level = mfd->unset_bl_level;
 			pdata->set_backlight(pdata, mfd->bl_level);
 			mfd->bl_level_old = mfd->unset_bl_level;
-			mutex_unlock(&mfd->bl_lock);
 			mfd->bl_updated = 1;
 		}
 	}
+	mutex_unlock(&mfd->bl_lock);
 }
 
 static int mdss_fb_blank_sub(int blank_mode, struct fb_info *info,
@@ -872,7 +880,9 @@
 			mfd->op_enable = false;
 			curr_pwr_state = mfd->panel_power_on;
 			mfd->panel_power_on = false;
+			mutex_lock(&mfd->bl_lock);
 			mfd->bl_updated = 0;
+			mutex_unlock(&mfd->bl_lock);
 
 			ret = mfd->mdp.off_fnc(mfd);
 			if (ret)
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index 78cee7a..3416b9e 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -128,6 +128,7 @@
 	int (*splash_init_fnc)(struct msm_fb_data_type *mfd);
 	struct msm_sync_pt_data *(*get_sync_fnc)(struct msm_fb_data_type *mfd,
 				const struct mdp_buf_sync *buf_sync);
+	void (*check_dsi_status)(struct work_struct *work, uint32_t interval);
 	void *private1;
 };
 
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index 7f44e36..bf151f4 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -56,6 +56,9 @@
 #include "mdss_panel.h"
 #include "mdss_debug.h"
 
+#define CREATE_TRACE_POINTS
+#include "mdss_mdp_trace.h"
+
 struct mdss_data_type *mdss_res;
 
 static int mdss_fb_mem_get_iommu_domain(void)
@@ -68,6 +71,7 @@
 	.fb_mem_get_iommu_domain = mdss_fb_mem_get_iommu_domain,
 	.panel_register_done = mdss_panel_register_done,
 	.fb_stride = mdss_mdp_fb_stride,
+	.check_dsi_status = mdss_check_dsi_ctrl_status,
 };
 
 #define DEFAULT_TOTAL_RGB_PIPES 3
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 69821d3..8e83f51 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -19,6 +19,7 @@
 #include <linux/msm_mdp.h>
 #include <linux/platform_device.h>
 #include <linux/notifier.h>
+#include <linux/kref.h>
 
 #include "mdss.h"
 #include "mdss_mdp_hwio.h"
@@ -297,6 +298,7 @@
 	u32 hist_cnt_time;
 	u32 frame_cnt;
 	struct completion comp;
+	struct completion first_kick;
 	u32 data[HIST_V_SIZE];
 	struct mutex hist_mutex;
 	spinlock_t hist_lock;
@@ -376,7 +378,8 @@
 	struct mdss_mdp_shared_reg_ctrl clk_ctrl;
 	struct mdss_mdp_shared_reg_ctrl clk_status;
 
-	atomic_t ref_cnt;
+	struct kref kref;
+
 	u32 play_cnt;
 	int pid;
 	bool is_handed_off;
@@ -437,6 +440,7 @@
 
 	struct mdss_data_type *mdata;
 	struct mutex ov_lock;
+        struct mutex dfps_lock;
 	struct mdss_mdp_ctl *ctl;
 	struct mdss_mdp_wb *wb;
 	struct list_head overlay_list;
@@ -686,7 +690,7 @@
 int mdss_mdp_data_check(struct mdss_mdp_data *data,
 			struct mdss_mdp_plane_sizes *ps);
 int mdss_mdp_get_plane_sizes(u32 format, u32 w, u32 h,
-			     struct mdss_mdp_plane_sizes *ps, u32 bwc_mode);
+	     struct mdss_mdp_plane_sizes *ps, u32 bwc_mode, bool rotation);
 int mdss_mdp_get_rau_strides(u32 w, u32 h, struct mdss_mdp_format_params *fmt,
 			       struct mdss_mdp_plane_sizes *ps);
 void mdss_mdp_data_calc_offset(struct mdss_mdp_data *data, u16 x, u16 y,
@@ -710,6 +714,7 @@
 
 int mdss_mdp_get_ctl_mixers(u32 fb_num, u32 *mixer_id);
 u32 mdss_mdp_fb_stride(u32 fb_index, u32 xres, int bpp);
+void mdss_check_dsi_ctrl_status(struct work_struct *work, uint32_t interval);
 
 int mdss_panel_register_done(struct mdss_panel_data *pdata);
 int mdss_mdp_limited_lut_igc_config(struct mdss_mdp_ctl *ctl);
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index 9cf745e..4b7f89d 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -23,6 +23,8 @@
 #include "mdss_fb.h"
 #include "mdss_mdp.h"
 #include "mdss_debug.h"
+#include "mdss_mdp_trace.h"
+#include "mdss_debug.h"
 
 static void mdss_mdp_xlog_mixer_reg(struct mdss_mdp_ctl *ctl);
 static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
@@ -338,8 +340,13 @@
 		struct mdss_panel_info *pinfo;
 
 		pinfo = &mixer->ctl->panel_data->panel_info;
-		fps = mdss_panel_get_framerate(pinfo);
-		v_total = mdss_panel_get_vtotal(pinfo);
+		if (pinfo->type == MIPI_VIDEO_PANEL) {
+			fps = pinfo->panel_max_fps;
+			v_total = pinfo->panel_max_vtotal;
+		} else {
+			fps = mdss_panel_get_framerate(pinfo);
+			v_total = mdss_panel_get_vtotal(pinfo);
+		}
 		xres = pinfo->xres;
 		is_fbc = pinfo->fbc.enabled;
 	} else {
@@ -459,8 +466,13 @@
 	if (!mixer->rotator_mode) {
 		if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
 			pinfo = &mixer->ctl->panel_data->panel_info;
-			fps = mdss_panel_get_framerate(pinfo);
-			v_total = mdss_panel_get_vtotal(pinfo);
+			if (pinfo->type == MIPI_VIDEO_PANEL) {
+				fps = pinfo->panel_max_fps;
+				v_total = pinfo->panel_max_vtotal;
+			} else {
+				fps = mdss_panel_get_framerate(pinfo);
+				v_total = mdss_panel_get_vtotal(pinfo);
+			}
 
 			if (pinfo->type == WRITEBACK_PANEL)
 				pinfo = NULL;
@@ -819,7 +831,7 @@
 
 	if (!ctl || !ctl->mdata)
 		return;
-
+	ATRACE_BEGIN(__func__);
 	mdata = ctl->mdata;
 	for (i = 0; i < mdata->nctl; i++) {
 		struct mdss_mdp_ctl *ctl;
@@ -833,8 +845,11 @@
 	bus_ib_quota = bw_sum_of_intfs;
 	bus_ab_quota = apply_fudge_factor(bw_sum_of_intfs,
 		&mdss_res->ab_factor);
+	trace_mdp_perf_update_bus(bus_ab_quota, bus_ib_quota);
+	ATRACE_INT("bus_quota", bus_ib_quota);
 	mdss_mdp_bus_scale_set_quota(bus_ab_quota, bus_ib_quota);
 	pr_debug("ab=%llu ib=%llu\n", bus_ab_quota, bus_ib_quota);
+	ATRACE_END(__func__);
 }
 
 /**
@@ -874,6 +889,7 @@
 
 	/*Release the bandwidth only if there are no transactions pending*/
 	if (!transaction_status) {
+		trace_mdp_cmd_release_bw(ctl->num);
 		ctl->cur_perf.bw_ctl = 0;
 		ctl->new_perf.bw_ctl = 0;
 		pr_debug("Release BW ctl=%d\n", ctl->num);
@@ -916,7 +932,7 @@
 
 	if (!ctl || !ctl->mdata)
 		return;
-
+	ATRACE_BEGIN(__func__);
 	mutex_lock(&mdss_mdp_ctl_lock);
 
 	mdata = ctl->mdata;
@@ -975,11 +991,13 @@
 		}
 
 		clk_rate  = mdss_mdp_select_clk_lvl(ctl, clk_rate);
+		ATRACE_INT("mdp_clk", clk_rate);
 		mdss_mdp_set_clk_rate(clk_rate);
 		pr_debug("update clk rate = %d HZ\n", clk_rate);
 	}
 
 	mutex_unlock(&mdss_mdp_ctl_lock);
+	ATRACE_END(__func__);
 }
 
 static struct mdss_mdp_ctl *mdss_mdp_ctl_alloc(struct mdss_data_type *mdata,
@@ -1966,6 +1984,7 @@
 	if (!mixer)
 		return -ENODEV;
 
+	trace_mdp_mixer_update(mixer->num);
 	pr_debug("setup mixer=%d\n", mixer->num);
 
 	outsize = (mixer->roi.h << 16) | mixer->roi.w;
@@ -2071,6 +2090,8 @@
 
 		mixercfg |= stage << (3 * pipe->num);
 
+		trace_mdp_sspp_change(pipe);
+
 		pr_debug("stg=%d op=%x fg_alpha=%x bg_alpha=%x\n", stage,
 					blend_op, fg_alpha, bg_alpha);
 		mdp_mixer_write(mixer, off + MDSS_MDP_REG_LM_OP_MODE, blend_op);
@@ -2483,8 +2504,12 @@
 		return 0;
 	}
 
+	ATRACE_BEGIN("wait_fnc");
 	if (ctl->wait_fnc)
 		ret = ctl->wait_fnc(ctl, NULL);
+	ATRACE_END("wait_fnc");
+
+	trace_mdp_commit(ctl);
 
 	mdss_mdp_ctl_perf_update(ctl, 0);
 
@@ -2551,13 +2576,16 @@
 
 	if (is_bw_released || mixer1_changed || mixer2_changed
 			|| ctl->force_screen_state) {
+		ATRACE_BEGIN("prepare_fnc");
 		if (ctl->prepare_fnc)
 			ret = ctl->prepare_fnc(ctl, arg);
+		ATRACE_END("prepare_fnc");
 		if (ret) {
 			pr_err("error preparing display\n");
 			goto done;
 		}
 
+		ATRACE_BEGIN("mixer_programming");
 		mdss_mdp_ctl_perf_update(ctl, 1);
 
 		if (mixer1_changed)
@@ -2573,20 +2601,29 @@
 					sctl->opmode);
 			sctl->flush_bits |= BIT(17);
 		}
+		ATRACE_END("mixer_programming");
 	}
 
-	mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_READY);
+	ATRACE_BEGIN("frame_ready");
+	if (!ctl->shared_lock)
+		mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_READY);
+	ATRACE_END("frame_ready");
 
+	ATRACE_BEGIN("wait_pingpong");
 	if (ctl->wait_pingpong)
 		ctl->wait_pingpong(ctl, NULL);
+	ATRACE_END("wait_pingpong");
 
 	ctl->roi_bkup.w = ctl->roi.w;
 	ctl->roi_bkup.h = ctl->roi.h;
 
+	ATRACE_BEGIN("postproc_programming");
 	if (ctl->mfd && ctl->mfd->dcm_state != DTM_ENTER)
 		/* postprocessing setup, including dspp */
 		mdss_mdp_pp_setup_locked(ctl);
+	ATRACE_END("postproc_programming");
 
+	ATRACE_BEGIN("flush_kickoff");
 	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, ctl->flush_bits);
 	if (sctl) {
 		mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_FLUSH,
@@ -2603,6 +2640,7 @@
 		pr_warn("error displaying frame\n");
 
 	ctl->play_cnt++;
+	ATRACE_END("flush_kickoff");
 
 done:
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
diff --git a/drivers/video/msm/mdss/mdss_mdp_hwio.h b/drivers/video/msm/mdss/mdss_mdp_hwio.h
index c11b438..ea41159 100644
--- a/drivers/video/msm/mdss/mdss_mdp_hwio.h
+++ b/drivers/video/msm/mdss/mdss_mdp_hwio.h
@@ -519,7 +519,7 @@
 #define MDSS_MDP_REG_INTF_FRAME_COUNT			0x0AC
 #define MDSS_MDP_REG_INTF_LINE_COUNT			0x0B0
 #define MDSS_MDP_PANEL_FORMAT_RGB888			0x213F
-#define MDSS_MDP_PANEL_FORMAT_RGB666			0x212A
+#define MDSS_MDP_PANEL_FORMAT_RGB666			0x21AA
 
 enum mdss_mdp_pingpong_index {
 	MDSS_MDP_PINGPONG0,
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
index e2a8d56..991eb06 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
@@ -16,6 +16,7 @@
 #include "mdss_mdp.h"
 #include "mdss_panel.h"
 #include "mdss_debug.h"
+#include "mdss_mdp_trace.h"
 
 #define VSYNC_EXPIRE_TICK 4
 
@@ -24,7 +25,7 @@
 /* wait for at most 2 vsync for lowest refresh rate (24hz) */
 #define KOFF_TIMEOUT msecs_to_jiffies(84)
 
-#define STOP_TIMEOUT msecs_to_jiffies(16 * (VSYNC_EXPIRE_TICK + 2))
+#define STOP_TIMEOUT(hz) msecs_to_jiffies((1000 / hz) * (VSYNC_EXPIRE_TICK + 2))
 #define ULPS_ENTER_TIME msecs_to_jiffies(100)
 
 struct mdss_mdp_cmd_ctx {
@@ -354,6 +355,7 @@
 	} else
 		pr_err("%s: should not have pingpong interrupt!\n", __func__);
 
+	trace_mdp_cmd_pingpong_done(ctl, ctx->pp_num, ctx->koff_cnt);
 	pr_debug("%s: ctl_num=%d intf_num=%d ctx=%d kcnt=%d\n", __func__,
 		ctl->num, ctl->intf_num, ctx->pp_num, ctx->koff_cnt);
 
@@ -531,6 +533,8 @@
 		rc = wait_for_completion_timeout(
 				&ctx->pp_comp, KOFF_TIMEOUT);
 
+		trace_mdp_cmd_wait_pingpong(ctl->num, ctx->koff_cnt);
+
 		if (rc <= 0) {
 			WARN(1, "cmd kickoff timed out (%d) ctl=%d\n",
 						rc, ctl->num);
@@ -596,6 +600,7 @@
 	spin_lock_irqsave(&ctx->clk_lock, flags);
 	ctx->koff_cnt++;
 	spin_unlock_irqrestore(&ctx->clk_lock, flags);
+	trace_mdp_cmd_kickoff(ctl->num, ctx->koff_cnt);
 
 	mdss_mdp_cmd_clk_on(ctx);
 
@@ -626,6 +631,7 @@
 	struct mdss_mdp_vsync_handler *tmp, *handle;
 	int need_wait = 0;
 	int ret = 0;
+	int hz;
 
 	ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
 	if (!ctx) {
@@ -645,8 +651,11 @@
 	}
 	spin_unlock_irqrestore(&ctx->clk_lock, flags);
 
+	hz = mdss_panel_get_framerate(&ctl->panel_data->panel_info);
+
 	if (need_wait)
-		if (wait_for_completion_timeout(&ctx->stop_comp, STOP_TIMEOUT)
+		if (wait_for_completion_timeout(&ctx->stop_comp,
+					STOP_TIMEOUT(hz))
 		    <= 0) {
 			WARN(1, "stop cmd time out\n");
 
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
index b07aab4..eff708c 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_video.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -22,6 +22,7 @@
 #include "mdss_mdp.h"
 #include "mdss_panel.h"
 #include "mdss_debug.h"
+#include "mdss_mdp_trace.h"
 
 /* wait for at least 2 vsyncs for lowest refresh rate (24hz) */
 #define VSYNC_TIMEOUT_US 100000
@@ -493,6 +494,7 @@
 	ctl->underrun_cnt++;
 	MDSS_XLOG(ctl->num, ctl->underrun_cnt);
 	MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0", "dsi1", "edp", "hdmi", "panic");
+	trace_mdp_video_underrun_done(ctl->num, ctl->underrun_cnt);
 	pr_debug("display underrun detected for ctl=%d count=%d\n", ctl->num,
 			ctl->underrun_cnt);
 
@@ -632,9 +634,19 @@
 				usecs_to_jiffies(VSYNC_TIMEOUT_US));
 			WARN(rc <= 0, "timeout (%d) vsync interrupt on ctl=%d\n",
 				rc, ctl->num);
-			rc = 0;
-			video_vsync_irq_disable(ctl);
 
+			video_vsync_irq_disable(ctl);
+			/* Do not configure fps on vsync timeout */
+			if (rc <= 0)
+				return rc;
+
+			if (mdss_mdp_video_line_count(ctl) >=
+					pdata->panel_info.yres/2) {
+				pr_err("Too few lines left line_cnt = %d y_res/2 = %d\n",
+					mdss_mdp_video_line_count(ctl),
+					pdata->panel_info.yres/2);
+				return -EPERM;
+			}
 			rc = mdss_mdp_video_vfp_fps_update(ctl, new_fps);
 			if (rc < 0) {
 				pr_err("%s: Error during DFPS\n", __func__);
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
index 27a7707..02e7b75 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -131,13 +131,17 @@
 	struct mdss_mdp_format_params *fmt;
 	u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
 	u32 opmode = ctx->opmode;
+	bool rotation = false;
 	struct mdss_data_type *mdata;
 
 	pr_debug("wb_num=%d format=%d\n", ctx->wb_num, format);
 
+	if (ctx->rot90)
+		rotation = true;
+
 	mdss_mdp_get_plane_sizes(format, ctx->width, ctx->height,
 				 &ctx->dst_planes,
-				 ctx->opmode & MDSS_MDP_OP_BWC_EN);
+				 ctx->opmode & MDSS_MDP_OP_BWC_EN, rotation);
 
 	fmt = mdss_mdp_get_format_params(format);
 	if (!fmt) {
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 18e9f4a..52c220f 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -277,7 +277,8 @@
 		 * mdp clock requirement
 		 */
 		if (mdata->has_decimation && (pipe->vert_deci < MAX_DECIMATION)
-			&& !pipe->bwc_mode && !pipe->src_fmt->tile)
+			&& !pipe->bwc_mode && !pipe->src_fmt->tile &&
+			!pipe->scale.enable_pxl_ext)
 			pipe->vert_deci++;
 		else
 			return -EPERM;
@@ -409,17 +410,33 @@
 	}
 
 	if (req->id == MSMFB_NEW_REQUEST) {
-		if (req->flags & MDP_OV_PIPE_FORCE_DMA)
-			pipe_type = MDSS_MDP_PIPE_TYPE_DMA;
-		else if (fmt->is_yuv || (req->flags & MDP_OV_PIPE_SHARE))
-			pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
-		else
-			pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
+		switch (req->pipe_type) {
+                case PIPE_TYPE_VIG:
+                        pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
+                        break;
+                case PIPE_TYPE_RGB:
+                        pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
+                        break;
+                case PIPE_TYPE_DMA:
+                        pipe_type = MDSS_MDP_PIPE_TYPE_DMA;
+                        break;
+                case PIPE_TYPE_AUTO:
+                default:
+                        if (req->flags & MDP_OV_PIPE_FORCE_DMA)
+                                pipe_type = MDSS_MDP_PIPE_TYPE_DMA;
+                        else if (fmt->is_yuv ||
+                                (req->flags & MDP_OV_PIPE_SHARE))
+                                pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
+                        else
+                                pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
+                        break;
+                }
 
 		pipe = mdss_mdp_pipe_alloc(mixer, pipe_type);
 
 		/* VIG pipes can also support RGB format */
-		if (!pipe && pipe_type == MDSS_MDP_PIPE_TYPE_RGB) {
+		if ((req->pipe_type == PIPE_TYPE_AUTO) && !pipe &&
+			(pipe_type == MDSS_MDP_PIPE_TYPE_RGB)) {
 			pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
 			pipe = mdss_mdp_pipe_alloc(mixer, pipe_type);
 		}
@@ -573,14 +590,21 @@
 		}
 	}
 
-	if ((pipe->flags & MDP_DEINTERLACE) && !pipe->scale.enable_pxl_ext) {
+	/*
+	 * When scaling is enabled src crop and image
+	 * width and height is modified by user
+	 */
+	if ((pipe->flags & MDP_DEINTERLACE)) {
 		if (pipe->flags & MDP_SOURCE_ROTATED_90) {
 			pipe->src.x = DIV_ROUND_UP(pipe->src.x, 2);
 			pipe->src.x &= ~1;
-			pipe->src.w /= 2;
-			pipe->img_width /= 2;
+			if (!pipe->scale.enable_pxl_ext) {
+				pipe->src.w /= 2;
+				pipe->img_width /= 2;
+			}
 		} else {
-			pipe->src.h /= 2;
+			if (!pipe->scale.enable_pxl_ext)
+				pipe->src.h /= 2;
 			pipe->src.y = DIV_ROUND_UP(pipe->src.y, 2);
 			pipe->src.y &= ~1;
 		}
@@ -1060,7 +1084,9 @@
 	mdss_mdp_display_commit(ctl, NULL);
 	mdss_mdp_display_wait4comp(ctl);
 
+	ATRACE_BEGIN("sspp_programming");
 	__overlay_queue_pipes(mfd);
+	ATRACE_END("sspp_programming");
 
 	mdss_mdp_display_commit(ctl, NULL);
 	mdss_mdp_display_wait4comp(ctl);
@@ -1074,9 +1100,14 @@
 	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
 	int ret = 0;
 	int sd_in_pipe = 0;
+	bool need_cleanup = false;
 
-	if (ctl->shared_lock)
+	ATRACE_BEGIN(__func__);
+	if (ctl->shared_lock) {
+		mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_BEGIN);
+		mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_READY);
 		mutex_lock(ctl->shared_lock);
+	}
 
 	mutex_lock(&mdp5_data->ov_lock);
 	mutex_lock(&mfd->lock);
@@ -1100,7 +1131,9 @@
 			mdp5_data->sd_enabled = 0;
 	}
 
-	mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_BEGIN);
+	if (!ctl->shared_lock)
+		mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_BEGIN);
+
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 
 	if (data)
@@ -1113,17 +1146,27 @@
 	list_for_each_entry(pipe, &mdp5_data->pipes_cleanup, cleanup_list) {
 		mdss_mdp_pipe_queue_data(pipe, NULL);
 		mdss_mdp_mixer_pipe_unstage(pipe);
+		need_cleanup = true;
 	}
 
+	ATRACE_BEGIN("sspp_programming");
 	ret = __overlay_queue_pipes(mfd);
+	ATRACE_END("sspp_programming");
 
-	if (mfd->panel.type == WRITEBACK_PANEL)
+	if (mfd->panel.type == WRITEBACK_PANEL) {
+		ATRACE_BEGIN("wb_kickoff");
 		ret = mdss_mdp_wb_kickoff(mfd);
-	else
+		ATRACE_END("wb_kickoff");
+	} else {
+		ATRACE_BEGIN("display_commit");
 		ret = mdss_mdp_display_commit(mdp5_data->ctl, NULL);
+		ATRACE_END("display_commit");
+	}
 
-	atomic_set(&mfd->kickoff_pending, 0);
-	wake_up_all(&mfd->kickoff_wait_q);
+	if (!need_cleanup) {
+		atomic_set(&mfd->kickoff_pending, 0);
+		wake_up_all(&mfd->kickoff_wait_q);
+	}
 	mutex_unlock(&mfd->lock);
 
 	if (IS_ERR_VALUE(ret))
@@ -1132,7 +1175,9 @@
 	mutex_unlock(&mdp5_data->ov_lock);
 	mdss_mdp_overlay_update_pm(mdp5_data);
 
+	ATRACE_BEGIN("display_wait4comp");
 	ret = mdss_mdp_display_wait4comp(mdp5_data->ctl);
+	ATRACE_END("display_wait4comp");
 	mutex_lock(&mdp5_data->ov_lock);
 
 	if (ret == 0) {
@@ -1147,14 +1192,19 @@
 
 	mdss_fb_update_notify_update(mfd);
 commit_fail:
+	ATRACE_BEGIN("overlay_cleanup");
 	mdss_mdp_overlay_cleanup(mfd);
+	ATRACE_END("overlay_cleanup");
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 	mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_FLUSHED);
-
+	if (need_cleanup) {
+		atomic_set(&mfd->kickoff_pending, 0);
+		wake_up_all(&mfd->kickoff_wait_q);
+	}
 	mutex_unlock(&mdp5_data->ov_lock);
 	if (ctl->shared_lock)
 		mutex_unlock(ctl->shared_lock);
-
+	ATRACE_END(__func__);
 	return ret;
 }
 
@@ -1266,8 +1316,8 @@
 		}
 	}
 
-	if (cnt == 0 && !list_empty(&mdp5_data->pipes_cleanup)) {
-		pr_debug("overlay release on fb%d called without commit!",
+	if (!mfd->ref_cnt && !list_empty(&mdp5_data->pipes_cleanup)) {
+		pr_debug("fb%d:: free pipes present in cleanup list",
 			mfd->index);
 		cnt++;
 	}
@@ -1389,8 +1439,13 @@
 
 	for (i = 0; i < mdata->ndma_pipes; i++) {
 		pipe = mdata->dma_pipes + i;
-		if (atomic_read(&pipe->ref_cnt) && pipe->mfd)
-			mdss_mdp_overlay_force_cleanup(pipe->mfd);
+
+		if (!mdss_mdp_pipe_map(pipe)) {
+			struct msm_fb_data_type *mfd = pipe->mfd;
+			mdss_mdp_pipe_unmap(pipe);
+			if (mfd)
+				mdss_mdp_overlay_force_cleanup(mfd);
+		}
 	}
 }
 
@@ -1735,10 +1790,12 @@
 		return -ENODEV;
 	}
 
+	mutex_lock(&mdp5_data->dfps_lock);
 	ret = snprintf(buf, PAGE_SIZE, "%d\n",
 		       pdata->panel_info.mipi.frame_rate);
 	pr_debug("%s: '%d'\n", __func__,
 		pdata->panel_info.mipi.frame_rate);
+	mutex_unlock(&mdp5_data->dfps_lock);
 
 	return ret;
 } /* dynamic_fps_sysfs_rda_dfps */
@@ -1773,6 +1830,7 @@
 		return count;
 	}
 
+	mutex_lock(&mdp5_data->dfps_lock);
 	if (dfps < 30) {
 		pr_err("Unsupported FPS. Configuring to min_fps = 30\n");
 		dfps = 30;
@@ -1789,9 +1847,11 @@
 	} else {
 		pr_err("Failed to configure '%d' FPS. rc = %d\n",
 							dfps, rc);
+		mutex_unlock(&mdp5_data->dfps_lock);
 		return rc;
 	}
 	pdata->panel_info.new_fps = dfps;
+	mutex_unlock(&mdp5_data->dfps_lock);
 	return count;
 } /* dynamic_fps_sysfs_wta_dfps */
 
@@ -2674,8 +2734,10 @@
 		(mfd->panel_info->type != DTV_PANEL)) {
 		rc = mdss_mdp_overlay_start(mfd);
 		if (!IS_ERR_VALUE(rc) &&
-			(mfd->panel_info->type != WRITEBACK_PANEL))
+			(mfd->panel_info->type != WRITEBACK_PANEL)) {
+			atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
 			rc = mdss_mdp_overlay_kickoff(mfd, NULL);
+		}
 	} else {
 		rc = mdss_mdp_ctl_setup(mdp5_data->ctl);
 		if (rc)
@@ -3022,6 +3084,7 @@
 	INIT_LIST_HEAD(&mdp5_data->pipes_cleanup);
 	INIT_LIST_HEAD(&mdp5_data->rot_proc_list);
 	mutex_init(&mdp5_data->ov_lock);
+	mutex_init(&mdp5_data->dfps_lock);
 	mdp5_data->hw_refresh = true;
 	mdp5_data->overlay_play_enable = true;
 
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index 50bee17..32b8cbf 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -37,7 +37,7 @@
 static DEFINE_MUTEX(mdss_mdp_sspp_lock);
 static DEFINE_MUTEX(mdss_mdp_smp_lock);
 
-static int mdss_mdp_pipe_free(struct mdss_mdp_pipe *pipe);
+static void mdss_mdp_pipe_free(struct kref *kref);
 static int mdss_mdp_smp_mmb_set(int client_id, unsigned long *smp);
 static void mdss_mdp_smp_mmb_free(unsigned long *smp, bool write);
 static struct mdss_mdp_pipe *mdss_mdp_pipe_search_by_client_id(
@@ -270,7 +270,7 @@
 			}
 		}
 		rc = mdss_mdp_get_plane_sizes(format, width, pipe->src.h,
-			&ps, 0);
+			&ps, 0, 0);
 		if (rc)
 			return rc;
 
@@ -481,21 +481,17 @@
 
 void mdss_mdp_pipe_unmap(struct mdss_mdp_pipe *pipe)
 {
-	int tmp;
-
-	tmp = atomic_dec_return(&pipe->ref_cnt);
-
-	WARN(tmp < 0, "Invalid unmap with ref_cnt=%d", tmp);
-	if (tmp == 0)
-		mdss_mdp_pipe_free(pipe);
+	if (kref_put_mutex(&pipe->kref, mdss_mdp_pipe_free,
+			&mdss_mdp_sspp_lock)) {
+		WARN(1, "Unexpected free pipe during unmap");
+		mutex_unlock(&mdss_mdp_sspp_lock);
+	}
 }
 
 int mdss_mdp_pipe_map(struct mdss_mdp_pipe *pipe)
 {
-	if (!atomic_inc_not_zero(&pipe->ref_cnt)) {
-		pr_err("attempting to map unallocated pipe (%d)", pipe->num);
+	if (!kref_get_unless_zero(&pipe->kref))
 		return -EINVAL;
-	}
 	return 0;
 }
 
@@ -541,7 +537,7 @@
 
 	for (i = off; i < npipes; i++) {
 		pipe = pipe_pool + i;
-		if (atomic_cmpxchg(&pipe->ref_cnt, 0, 1) == 0) {
+		if (atomic_read(&pipe->kref.refcount) == 0) {
 			pipe->mixer = mixer;
 			break;
 		}
@@ -551,7 +547,6 @@
 	if (pipe && mdss_mdp_pipe_fetch_halt(pipe)) {
 		pr_err("%d failed because pipe is in bad state\n",
 			pipe->num);
-		atomic_dec(&pipe->ref_cnt);
 		return NULL;
 	}
 
@@ -559,13 +554,14 @@
 		pr_debug("type=%x   pnum=%d\n", pipe->type, pipe->num);
 		mutex_init(&pipe->pp_res.hist.hist_mutex);
 		spin_lock_init(&pipe->pp_res.hist.hist_lock);
+		kref_init(&pipe->kref);
 	} else if (pipe_share) {
 		/*
 		 * when there is no dedicated wfd blk, DMA pipe can be
 		 * shared as long as its attached to a writeback mixer
 		 */
 		pipe = mdata->dma_pipes + mixer->num;
-		atomic_inc(&pipe->ref_cnt);
+		kref_get(&pipe->kref);
 		pr_debug("pipe sharing for pipe=%d\n", pipe->num);
 	} else {
 		pr_err("no %d type pipes available\n", type);
@@ -587,7 +583,7 @@
 	} else if (pipe != &mdata->dma_pipes[mixer->num]) {
 		pr_err("Requested DMA pnum=%d not available\n",
 			mdata->dma_pipes[mixer->num].num);
-		mdss_mdp_pipe_unmap(pipe);
+		kref_put(&pipe->kref, mdss_mdp_pipe_free);
 		pipe = NULL;
 	} else {
 		pipe->mixer = mixer;
@@ -674,10 +670,13 @@
 	return NULL;
 }
 
-static int mdss_mdp_pipe_free(struct mdss_mdp_pipe *pipe)
+static void mdss_mdp_pipe_free(struct kref *kref)
 {
-	pr_debug("ndx=%x pnum=%d ref_cnt=%d\n", pipe->ndx, pipe->num,
-			atomic_read(&pipe->ref_cnt));
+	struct mdss_mdp_pipe *pipe;
+
+	pipe = container_of(kref, struct mdss_mdp_pipe, kref);
+
+	pr_debug("ndx=%x pnum=%d\n", pipe->ndx, pipe->num);
 
 	if (pipe->play_cnt) {
 		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
@@ -693,8 +692,6 @@
 	pipe->bwc_mode = 0;
 	pipe->mfd = NULL;
 	memset(&pipe->scale, 0, sizeof(struct mdp_scale_data));
-
-	return 0;
 }
 
 static int mdss_mdp_is_pipe_idle(struct mdss_mdp_pipe *pipe,
@@ -802,19 +799,16 @@
 
 int mdss_mdp_pipe_destroy(struct mdss_mdp_pipe *pipe)
 {
-	int tmp;
-
-	tmp = atomic_dec_return(&pipe->ref_cnt);
-
-	if (tmp != 0) {
-		pr_err("unable to free pipe %d while still in use (%d)\n",
-				pipe->num, tmp);
+	if (!kref_put_mutex(&pipe->kref, mdss_mdp_pipe_free,
+			&mdss_mdp_sspp_lock)) {
+		pr_err("unable to free pipe %d while still in use\n",
+				pipe->num);
 		return -EBUSY;
 	}
-	mdss_mdp_pipe_free(pipe);
+
+	mutex_unlock(&mdss_mdp_sspp_lock);
 
 	return 0;
-
 }
 
 /**
@@ -873,7 +867,7 @@
 
 	pipe->is_handed_off = true;
 	pipe->play_cnt = 1;
-	atomic_inc(&pipe->ref_cnt);
+	kref_init(&pipe->kref);
 
 error:
 	return rc;
@@ -889,6 +883,7 @@
 	u32 decimation;
 	struct mdss_mdp_img_rect sci, dst, src;
 	int ret = 0;
+	bool rotation = false;
 
 	pr_debug("pnum=%d wh=%dx%d src={%d,%d,%d,%d} dst={%d,%d,%d,%d}\n",
 			pipe->num, pipe->img_width, pipe->img_height,
@@ -897,8 +892,12 @@
 
 	width = pipe->img_width;
 	height = pipe->img_height;
+
+	if (pipe->flags & MDP_SOURCE_ROTATED_90)
+		rotation = true;
+
 	mdss_mdp_get_plane_sizes(pipe->src_fmt->format, width, height,
-			&pipe->src_planes, pipe->bwc_mode);
+			&pipe->src_planes, pipe->bwc_mode, rotation);
 
 	if (data != NULL) {
 		ret = mdss_mdp_data_check(data, &pipe->src_planes);
@@ -1136,6 +1135,13 @@
 		pipe->bg_color);
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN, unpack);
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS, secure);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_OP_MODE, 0);
+
+	if (pipe->type != MDSS_MDP_PIPE_TYPE_DMA) {
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SCALE_CONFIG, 0);
+		if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG)
+			mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_VIG_OP_MODE, 0);
+	}
 
 	return 0;
 }
diff --git a/drivers/video/msm/mdss/mdss_mdp_pp.c b/drivers/video/msm/mdss/mdss_mdp_pp.c
index 6b497bb..7159a0a 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pp.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pp.c
@@ -81,6 +81,8 @@
 #define MDSS_BLOCK_DISP_NUM	(MDP_BLOCK_MAX - MDP_LOGICAL_BLOCK_DISP_0)
 
 #define HIST_WAIT_TIMEOUT(frame) ((75 * HZ * (frame)) / 1000)
+#define HIST_KICKOFF_WAIT_FRACTION 4
+
 /* hist collect state */
 enum {
 	HIST_UNKNOWN,
@@ -1092,39 +1094,51 @@
 		}
 	}
 
-	if (pipe->scale.enable_pxl_ext &&
-		pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
+	if (pipe->scale.enable_pxl_ext) {
+		if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
+			/*program x,y initial phase and phase step*/
+			writel_relaxed(pipe->scale.init_phase_x[0],
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEX);
+			writel_relaxed(pipe->scale.phase_step_x[0],
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPX);
+			writel_relaxed(pipe->scale.init_phase_x[1],
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEX);
+			writel_relaxed(pipe->scale.phase_step_x[1],
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX);
 
-		/*program x,y initial phase and phase step*/
-		writel_relaxed(pipe->scale.init_phase_x[0],
-			pipe->base + MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEX);
-		writel_relaxed(pipe->scale.phase_step_x[0],
-			pipe->base + MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPX);
-		writel_relaxed(pipe->scale.init_phase_x[1],
-			pipe->base + MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEX);
-		writel_relaxed(pipe->scale.phase_step_x[1],
-			pipe->base + MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX);
+			writel_relaxed(pipe->scale.init_phase_y[0],
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEY);
+			writel_relaxed(pipe->scale.phase_step_y[0],
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPY);
+			writel_relaxed(pipe->scale.init_phase_y[1],
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEY);
+			writel_relaxed(pipe->scale.phase_step_y[1],
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY);
+		} else {
 
-		writel_relaxed(pipe->scale.init_phase_y[0],
-			pipe->base + MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEY);
-		writel_relaxed(pipe->scale.phase_step_y[0],
-			pipe->base + MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPY);
-		writel_relaxed(pipe->scale.init_phase_y[1],
-			pipe->base + MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEY);
-		writel_relaxed(pipe->scale.phase_step_y[1],
-			pipe->base + MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY);
-
+			writel_relaxed(pipe->scale.phase_step_x[0],
+				pipe->base +
+				MDSS_MDP_REG_SCALE_PHASE_STEP_X);
+			writel_relaxed(pipe->scale.phase_step_y[0],
+				pipe->base +
+				MDSS_MDP_REG_SCALE_PHASE_STEP_Y);
+			writel_relaxed(pipe->scale.init_phase_x[0],
+				pipe->base +
+				MDSS_MDP_REG_SCALE_INIT_PHASE_X);
+			writel_relaxed(pipe->scale.init_phase_y[0],
+				pipe->base +
+				MDSS_MDP_REG_SCALE_INIT_PHASE_Y);
+		}
 		/*program pixel extn values for the SSPP*/
 		mdss_mdp_pipe_program_pixel_extn(pipe);
-	} else if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
-		writel_relaxed(phasex_step, pipe->base +
-		   MDSS_MDP_REG_SCALE_PHASE_STEP_X);
-		writel_relaxed(phasey_step, pipe->base +
-		   MDSS_MDP_REG_SCALE_PHASE_STEP_Y);
-		writel_relaxed(init_phasex, pipe->base +
-			MDSS_MDP_REG_SCALE_INIT_PHASE_X);
-		writel_relaxed(init_phasey, pipe->base +
-			MDSS_MDP_REG_SCALE_INIT_PHASE_Y);
 	} else {
 		writel_relaxed(phasex_step, pipe->base +
 		   MDSS_MDP_REG_SCALE_PHASE_STEP_X);
@@ -1351,6 +1365,7 @@
 			/* Kick off collection */
 			writel_relaxed(1, base + kick_base);
 			hist_info->col_state = HIST_START;
+			complete(&hist_info->first_kick);
 		}
 	}
 	spin_unlock_irqrestore(&hist_info->hist_lock, flag);
@@ -1609,6 +1624,7 @@
 	u32 disp_num;
 	int i;
 	bool valid_mixers = true;
+	bool valid_ad_panel = true;
 	if ((!ctl->mfd) || (!mdss_pp_res))
 		return -EINVAL;
 
@@ -1629,7 +1645,13 @@
 		if (mixer_id[i] >= mdata->nad_cfgs)
 			valid_mixers = false;
 	}
-	if (valid_mixers && (mixer_cnt <= mdata->nmax_concurrent_ad_hw)) {
+	valid_ad_panel = (ctl->mfd->panel_info->type != DTV_PANEL) &&
+		(((mdata->mdp_rev < MDSS_MDP_HW_REV_103) &&
+			(ctl->mfd->panel_info->type == WRITEBACK_PANEL)) ||
+		(ctl->mfd->panel_info->type != WRITEBACK_PANEL));
+
+	if (valid_mixers && (mixer_cnt <= mdata->nmax_concurrent_ad_hw) &&
+		valid_ad_panel) {
 		ret = mdss_mdp_ad_setup(ctl->mfd);
 		if (ret < 0)
 			pr_warn("ad_setup(disp%d) returns %d", disp_num, ret);
@@ -1788,6 +1810,8 @@
 					&mdss_pp_res->dspp_hist[i].hist_lock);
 				init_completion(
 					&mdss_pp_res->dspp_hist[i].comp);
+				init_completion(
+					&mdss_pp_res->dspp_hist[i].first_kick);
 			}
 		}
 	}
@@ -1797,6 +1821,7 @@
 			mutex_init(&vig[i].pp_res.hist.hist_mutex);
 			spin_lock_init(&vig[i].pp_res.hist.hist_lock);
 			init_completion(&vig[i].pp_res.hist.comp);
+			init_completion(&vig[i].pp_res.hist.first_kick);
 		}
 		if (!mdata->pp_bus_hdl) {
 			pp_bus_pdata = &mdp_pp_bus_scale_table;
@@ -3006,6 +3031,7 @@
 	spin_unlock_irqrestore(&hist_info->hist_lock, flag);
 	hist_info->frame_cnt = req->frame_cnt;
 	INIT_COMPLETION(hist_info->comp);
+	INIT_COMPLETION(hist_info->first_kick);
 	hist_info->hist_cnt_read = 0;
 	hist_info->hist_cnt_sent = 0;
 	hist_info->hist_cnt_time = 0;
@@ -3131,6 +3157,7 @@
 	spin_unlock_irqrestore(&hist_info->hist_lock, flag);
 	mdss_mdp_hist_intr_req(&mdata->hist_intr, done_bit, false);
 	complete_all(&hist_info->comp);
+	complete_all(&hist_info->first_kick);
 	writel_relaxed(BIT(1), ctl_base);/* cancel */
 	ret = 0;
 exit:
@@ -3363,7 +3390,7 @@
 				struct pp_hist_col_info *hist_info,
 				char __iomem *ctl_base, u32 expect_sum)
 {
-	int wait_ret, ret = 0;
+	int kick_ret, wait_ret, ret = 0;
 	u32 timeout, sum;
 	char __iomem *v_base;
 	unsigned long flag;
@@ -3389,12 +3416,26 @@
 			pipe = container_of(res, struct mdss_mdp_pipe, pp_res);
 			pipe->params_changed++;
 		}
-		wait_ret = wait_for_completion_killable_timeout(
+		kick_ret = wait_for_completion_killable_timeout(
+				&(hist_info->first_kick), timeout /
+					HIST_KICKOFF_WAIT_FRACTION);
+		if (kick_ret != 0)
+			wait_ret = wait_for_completion_killable_timeout(
 				&(hist_info->comp), timeout);
 
 		mutex_lock(&hist_info->hist_mutex);
 		spin_lock_irqsave(&hist_info->hist_lock, flag);
-		if (wait_ret == 0) {
+		if (kick_ret == 0) {
+			ret = -ENODATA;
+			pr_debug("histogram kickoff not done yet");
+			spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+			goto hist_collect_exit;
+		} else if (kick_ret < 0) {
+			ret = -EINTR;
+			pr_debug("histogram first kickoff interrupted");
+			spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+			goto hist_collect_exit;
+		} else if (wait_ret == 0) {
 			ret = -ETIMEDOUT;
 			pr_debug("bin collection timedout, state %d",
 					hist_info->col_state);
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
index 8d6d41d..6953469 100755
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.c
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -23,6 +23,7 @@
 #include "mdss_mdp.h"
 #include "mdss_mdp_rotator.h"
 #include "mdss_fb.h"
+#include "mdss_debug.h"
 
 #define MAX_ROTATOR_SESSIONS 8
 
@@ -281,8 +282,9 @@
 		pr_err("unable to queue rot data\n");
 		goto error;
 	}
-
+	ATRACE_BEGIN("rotator_kickoff");
 	ret = mdss_mdp_rotator_kickoff(rot_ctl, rot, dst_data);
+	ATRACE_END("rotator_kickoff");
 
 	return ret;
 error:
@@ -642,7 +644,7 @@
 
 	if (rot_pipe) {
 		struct mdss_mdp_mixer *mixer = rot_pipe->mixer;
-		mdss_mdp_pipe_unmap(rot_pipe);
+		mdss_mdp_pipe_destroy(rot_pipe);
 		tmp = mdss_mdp_ctl_mixer_switch(mixer->ctl,
 				MDSS_MDP_WB_CTL_TYPE_BLOCK);
 		if (!tmp)
diff --git a/drivers/video/msm/mdss/mdss_mdp_splash_logo.c b/drivers/video/msm/mdss/mdss_mdp_splash_logo.c
index 77f6554..838f58f 100644
--- a/drivers/video/msm/mdss/mdss_mdp_splash_logo.c
+++ b/drivers/video/msm/mdss/mdss_mdp_splash_logo.c
@@ -504,8 +504,8 @@
 	}
 	unlock_fb_info(mfd->fbi);
 
-	mfd->bl_updated = true;
 	mutex_lock(&mfd->bl_lock);
+	mfd->bl_updated = true;
 	mdss_fb_set_backlight(mfd, mfd->panel_info->bl_max >> 1);
 	mutex_unlock(&mfd->bl_lock);
 
diff --git a/drivers/video/msm/mdss/mdss_mdp_trace.h b/drivers/video/msm/mdss/mdss_mdp_trace.h
new file mode 100644
index 0000000..33fe3a4
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_mdp_trace.h
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#if !defined(TRACE_MDSS_MDP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define TRACE_MDSS_MDP_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mdss
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mdss_mdp_trace
+
+#include <linux/tracepoint.h>
+#include "mdss_mdp.h"
+
+DECLARE_EVENT_CLASS(mdp_sspp_template,
+	TP_PROTO(struct mdss_mdp_pipe *pipe),
+	TP_ARGS(pipe),
+	TP_STRUCT__entry(
+			__field(u32, num)
+			__field(u32, play_cnt)
+			__field(u32, mixer)
+			__field(u32, stage)
+			__field(u32, flags)
+			__field(u32, format)
+			__field(u16, img_w)
+			__field(u16, img_h)
+			__field(u16, src_x)
+			__field(u16, src_y)
+			__field(u16, src_w)
+			__field(u16, src_h)
+			__field(u16, dst_x)
+			__field(u16, dst_y)
+			__field(u16, dst_w)
+			__field(u16, dst_h)
+	),
+	TP_fast_assign(
+			__entry->num = pipe->num;
+			__entry->play_cnt = pipe->play_cnt;
+			__entry->mixer = pipe->mixer->num;
+			__entry->stage = pipe->mixer_stage;
+			__entry->flags = pipe->flags;
+			__entry->format = pipe->src_fmt ?
+					pipe->src_fmt->format : -1;
+			__entry->img_w = pipe->img_width;
+			__entry->img_h = pipe->img_height;
+			__entry->src_x = pipe->src.x;
+			__entry->src_y = pipe->src.y;
+			__entry->src_w = pipe->src.w;
+			__entry->src_h = pipe->src.h;
+			__entry->dst_x = pipe->dst.x;
+			__entry->dst_y = pipe->dst.y;
+			__entry->dst_w = pipe->dst.w;
+			__entry->dst_h = pipe->dst.h;
+	),
+
+	TP_printk("num=%d mixer=%d play_cnt=%d flags=0x%x stage=%d format=%d img=%dx%d src=[%d,%d,%d,%d] dst=[%d,%d,%d,%d]",
+			__entry->num, __entry->mixer, __entry->play_cnt,
+			__entry->flags, __entry->stage,
+			__entry->format, __entry->img_w, __entry->img_h,
+			__entry->src_x, __entry->src_y,
+			__entry->src_w, __entry->src_h,
+			__entry->dst_x, __entry->dst_y,
+			__entry->dst_w, __entry->dst_h)
+);
+
+DEFINE_EVENT(mdp_sspp_template, mdp_sspp_set,
+	TP_PROTO(struct mdss_mdp_pipe *pipe),
+	TP_ARGS(pipe)
+);
+
+DEFINE_EVENT(mdp_sspp_template, mdp_sspp_change,
+	TP_PROTO(struct mdss_mdp_pipe *pipe),
+	TP_ARGS(pipe)
+);
+
+TRACE_EVENT(mdp_mixer_update,
+	TP_PROTO(u32 mixer_num),
+	TP_ARGS(mixer_num),
+	TP_STRUCT__entry(
+			__field(u32, mixer_num)
+	),
+	TP_fast_assign(
+			__entry->mixer_num = mixer_num;
+	),
+	TP_printk("mixer=%d",
+			__entry->mixer_num)
+);
+
+TRACE_EVENT(mdp_commit,
+	TP_PROTO(struct mdss_mdp_ctl *ctl),
+	TP_ARGS(ctl),
+	TP_STRUCT__entry(
+			__field(u32, num)
+			__field(u32, play_cnt)
+			__field(u32, clk_rate)
+			__field(u64, bandwidth)
+	),
+	TP_fast_assign(
+			__entry->num = ctl->num;
+			__entry->play_cnt = ctl->play_cnt;
+			__entry->clk_rate = ctl->new_perf.mdp_clk_rate;
+			__entry->bandwidth = ctl->new_perf.bw_ctl;
+	),
+	TP_printk("num=%d play_cnt=%d bandwidth=%llu clk_rate=%u",
+			__entry->num,
+			__entry->play_cnt,
+			__entry->bandwidth,
+			__entry->clk_rate)
+);
+
+TRACE_EVENT(mdp_video_underrun_done,
+	TP_PROTO(u32 ctl_num, u32 underrun_cnt),
+	TP_ARGS(ctl_num, underrun_cnt),
+	TP_STRUCT__entry(
+			__field(u32, ctl_num)
+			__field(u32, underrun_cnt)
+	),
+	TP_fast_assign(
+			__entry->ctl_num = ctl_num;
+			__entry->underrun_cnt = underrun_cnt;
+	),
+	TP_printk("ctl=%d count=%d",
+			__entry->ctl_num, __entry->underrun_cnt)
+);
+
+TRACE_EVENT(mdp_perf_update_bus,
+	TP_PROTO(unsigned long long ab_quota, unsigned long long ib_quota),
+	TP_ARGS(ab_quota, ib_quota),
+	TP_STRUCT__entry(
+			__field(u64, ab_quota)
+			__field(u64, ib_quota)
+	),
+	TP_fast_assign(
+			__entry->ab_quota = ab_quota;
+			__entry->ib_quota = ib_quota;
+	),
+	TP_printk("ab=%llu ib=%llu",
+			__entry->ab_quota,
+			__entry->ib_quota)
+);
+
+TRACE_EVENT(mdp_cmd_pingpong_done,
+	TP_PROTO(struct mdss_mdp_ctl *ctl, u32 pp_num, int koff_cnt),
+	TP_ARGS(ctl, pp_num, koff_cnt),
+	TP_STRUCT__entry(
+			__field(u32, ctl_num)
+			__field(u32, intf_num)
+			__field(u32, pp_num)
+			__field(int, koff_cnt)
+	),
+	TP_fast_assign(
+			__entry->ctl_num = ctl->num;
+			__entry->intf_num = ctl->intf_num;
+			__entry->pp_num = pp_num;
+			__entry->koff_cnt = koff_cnt;
+	),
+	TP_printk("ctl num:%d intf_num:%d ctx:%d kickoff:%d",
+			__entry->ctl_num, __entry->intf_num, __entry->pp_num,
+			__entry->koff_cnt)
+);
+
+TRACE_EVENT(mdp_cmd_release_bw,
+	TP_PROTO(u32 ctl_num),
+	TP_ARGS(ctl_num),
+	TP_STRUCT__entry(
+			__field(u32, ctl_num)
+	),
+	TP_fast_assign(
+			__entry->ctl_num = ctl_num;
+	),
+	TP_printk("ctl num:%d", __entry->ctl_num)
+);
+
+TRACE_EVENT(mdp_cmd_kickoff,
+	TP_PROTO(u32 ctl_num, int kickoff_cnt),
+	TP_ARGS(ctl_num, kickoff_cnt),
+	TP_STRUCT__entry(
+			__field(u32, ctl_num)
+			__field(int, kickoff_cnt)
+	),
+	TP_fast_assign(
+			__entry->ctl_num = ctl_num;
+			__entry->kickoff_cnt = kickoff_cnt;
+	),
+	TP_printk("kickoff ctl=%d cnt=%d",
+			__entry->ctl_num,
+			__entry->kickoff_cnt)
+);
+
+TRACE_EVENT(mdp_cmd_wait_pingpong,
+	TP_PROTO(u32 ctl_num, int kickoff_cnt),
+	TP_ARGS(ctl_num, kickoff_cnt),
+	TP_STRUCT__entry(
+			__field(u32, ctl_num)
+			__field(int, kickoff_cnt)
+	),
+	TP_fast_assign(
+			__entry->ctl_num = ctl_num;
+			__entry->kickoff_cnt = kickoff_cnt;
+	),
+	TP_printk("pingpong ctl=%d cnt=%d",
+			__entry->ctl_num,
+			__entry->kickoff_cnt)
+);
+
+TRACE_EVENT(tracing_mark_write,
+	TP_PROTO(int pid, const char *name, bool trace_begin),
+	TP_ARGS(pid, name, trace_begin),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(trace_name, name)
+			__field(bool, trace_begin)
+	),
+	TP_fast_assign(
+			__entry->pid = pid;
+			__assign_str(trace_name, name);
+			__entry->trace_begin = trace_begin;
+	),
+	TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+		__entry->pid, __get_str(trace_name))
+);
+
+TRACE_EVENT(mdp_trace_counter,
+	TP_PROTO(int pid, char *name, int value),
+	TP_ARGS(pid, name, value),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(counter_name, name)
+			__field(int, value)
+	),
+	TP_fast_assign(
+			__entry->pid = current->tgid;
+			__assign_str(counter_name, name);
+			__entry->value = value;
+	),
+	TP_printk("%d|%s|%d", __entry->pid,
+			__get_str(counter_name), __entry->value)
+);
+
+#endif /* if !defined(TRACE_MDSS_MDP_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/video/msm/mdss/mdss_mdp_util.c b/drivers/video/msm/mdss/mdss_mdp_util.c
index 69506d4..9336582 100644
--- a/drivers/video/msm/mdss/mdss_mdp_util.c
+++ b/drivers/video/msm/mdss/mdss_mdp_util.c
@@ -320,7 +320,7 @@
 }
 
 int mdss_mdp_get_plane_sizes(u32 format, u32 w, u32 h,
-			     struct mdss_mdp_plane_sizes *ps, u32 bwc_mode)
+	struct mdss_mdp_plane_sizes *ps, u32 bwc_mode, bool rotation)
 {
 	struct mdss_mdp_format_params *fmt;
 	int i, rc;
@@ -374,9 +374,19 @@
 			u8 hmap[] = { 1, 2, 1, 2 };
 			u8 vmap[] = { 1, 1, 2, 2 };
 			u8 horiz, vert, stride_align, height_align;
+			u32 chroma_samp;
 
-			horiz = hmap[fmt->chroma_sample];
-			vert = vmap[fmt->chroma_sample];
+			chroma_samp = fmt->chroma_sample;
+
+			if (rotation) {
+				if (chroma_samp == MDSS_MDP_CHROMA_H2V1)
+					chroma_samp = MDSS_MDP_CHROMA_H1V2;
+				else if (chroma_samp == MDSS_MDP_CHROMA_H1V2)
+					chroma_samp = MDSS_MDP_CHROMA_H2V1;
+			}
+
+			horiz = hmap[chroma_samp];
+			vert = vmap[chroma_samp];
 
 			switch (format) {
 			case MDP_Y_CR_CB_GH2V2:
diff --git a/drivers/video/msm/mdss/mdss_panel.h b/drivers/video/msm/mdss/mdss_panel.h
index c5ac72e..52c3e71 100644
--- a/drivers/video/msm/mdss/mdss_panel.h
+++ b/drivers/video/msm/mdss/mdss_panel.h
@@ -208,6 +208,7 @@
 	char hbp_power_stop;
 	char hsa_power_stop;
 	char eof_bllp_power_stop;
+	char last_line_interleave_en;
 	char bllp_power_stop;
 	char traffic_mode;
 	char frame_rate;
@@ -320,7 +321,8 @@
 	bool ulps_feature_enabled;
 	char dfps_update;
 	int new_fps;
-
+	int panel_max_fps;
+	int panel_max_vtotal;
 	u32 cont_splash_enabled;
 	u32 partial_update_enabled;
 	struct ion_handle *splash_ihdl;
diff --git a/drivers/video/msm/vidc/common/dec/vdec.c b/drivers/video/msm/vidc/common/dec/vdec.c
index 276a48f..0958e07 100755
--- a/drivers/video/msm/vidc/common/dec/vdec.c
+++ b/drivers/video/msm/vidc/common/dec/vdec.c
@@ -899,6 +899,12 @@
 	vcd_meta_buffer->offset = meta_buffers->offset;
 	vcd_meta_buffer->pmem_fd_iommu = meta_buffers->pmem_fd_iommu;
 
+	if (meta_buffers->count > MAX_META_BUFFERS) {
+		ERR("meta buffers maximum count reached, count = %d",
+			meta_buffers->count);
+		return false;
+	}
+
 	if (!vcd_get_ion_status()) {
 		pr_err("PMEM Not available\n");
 		return false;
@@ -1104,6 +1110,12 @@
 	vcd_h264_mv_buffer->pmem_fd = mv_data->pmem_fd;
 	vcd_h264_mv_buffer->offset = mv_data->offset;
 
+	if (mv_data->count > MAX_MV_BUFFERS) {
+		ERR("MV buffers maximum count reached, count = %d",
+			mv_data->count);
+		return false;
+	}
+
 	if (!vcd_get_ion_status()) {
 		pr_err("PMEM not available\n");
 		return false;
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
index 18513e3..74f6714 100644
--- a/include/linux/dma-attrs.h
+++ b/include/linux/dma-attrs.h
@@ -17,6 +17,7 @@
 	DMA_ATTR_NON_CONSISTENT,
 	DMA_ATTR_NO_KERNEL_MAPPING,
 	DMA_ATTR_STRONGLY_ORDERED,
+	DMA_ATTR_SKIP_ZEROING,
 	DMA_ATTR_MAX,
 };
 
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index 78e57cd..9975eef 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -73,7 +73,8 @@
 void dma_contiguous_reserve(phys_addr_t addr_limit);
 
 int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
-				  phys_addr_t limit, const char *name);
+				  phys_addr_t limit, const char *name,
+				  bool in_system);
 
 int dma_contiguous_add_device(struct device *dev, phys_addr_t base);
 
@@ -94,7 +95,19 @@
 					 phys_addr_t base, phys_addr_t limit)
 {
 	int ret;
-	ret = dma_contiguous_reserve_area(size, &base, limit, NULL);
+	ret = dma_contiguous_reserve_area(size, &base, limit, NULL, true);
+	if (ret == 0)
+		ret = dma_contiguous_add_device(dev, base);
+	return ret;
+}
+
+static inline int dma_declare_contiguous_reserved(struct device *dev,
+					 phys_addr_t size,
+					 phys_addr_t base,
+					 phys_addr_t limit)
+{
+	int ret;
+	ret = dma_contiguous_reserve_area(size, &base, limit, NULL, false);
 	if (ret == 0)
 		ret = dma_contiguous_add_device(dev, base);
 	return ret;
diff --git a/include/linux/mfd/wcd9xxx/core.h b/include/linux/mfd/wcd9xxx/core.h
index 85be7c3..a8e792b 100644
--- a/include/linux/mfd/wcd9xxx/core.h
+++ b/include/linux/mfd/wcd9xxx/core.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -133,6 +133,7 @@
 	u16 grph;				/* slimbus group handle */
 	unsigned long ch_mask;
 	wait_queue_head_t dai_wait;
+	bool bus_down_in_recovery;
 };
 
 #define WCD9XXX_CH(xport, xshift) \
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 65a3fe1..9bdf508 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -113,39 +113,37 @@
  * be called twice.
  */
 #define SDHCI_QUIRK2_SLOW_INT_CLR			(1<<2)
-/* Ignore CMD CRC errors for tuning commands */
-#define SDHCI_QUIRK2_IGNORE_CMDCRC_FOR_TUNING		(1<<3)
 /*
  * If the base clock can be scalable, then there should be no further
  * clock dividing as the input clock itself will be scaled down to
  * required frequency.
  */
-#define SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK		(1<<4)
+#define SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK		(1<<3)
 /*
  * Dont use the max_discard_to in sdhci driver so that the maximum discard
  * unit gets picked by the mmc queue. Otherwise, it takes a long time for
  * secure discard kind of operations to complete.
  */
-#define SDHCI_QUIRK2_USE_MAX_DISCARD_SIZE		(1<<5)
+#define SDHCI_QUIRK2_USE_MAX_DISCARD_SIZE		(1<<4)
 /*
  * Ignore data timeout error for R1B commands as there will be no
  * data associated and the busy timeout value for these commands
  * could be lager than the maximum timeout value that controller
  * can handle.
  */
-#define SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD		(1<<6)
+#define SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD		(1<<5)
 /*
  * The preset value registers are not properly initialized by
  * some hardware and hence preset value must not be enabled for
  * such controllers.
  */
-#define SDHCI_QUIRK2_BROKEN_PRESET_VALUE		(1<<7)
+#define SDHCI_QUIRK2_BROKEN_PRESET_VALUE		(1<<6)
 /*
  * Some controllers define the usage of 0xF in data timeout counter
  * register (0x2E) which is actually a reserved bit as per
  * specification.
  */
-#define SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT		(1<<8)
+#define SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT		(1<<7)
 /*
  * This is applicable for controllers that advertize timeout clock
  * value in capabilities register (bit 5-0) as just 50MHz whereas the
@@ -158,7 +156,7 @@
  * will be used in such cases to avoid controller mulplication when timeout is
  * calculated based on the base clock.
  */
-#define SDHCI_QUIRK2_DIVIDE_TOUT_BY_4 (1 << 9)
+#define SDHCI_QUIRK2_DIVIDE_TOUT_BY_4 (1 << 8)
 
 /*
  * Some SDHC controllers are unable to handle data-end bit error in
diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h
index 17986b5..bc80bda 100644
--- a/include/linux/msm_kgsl.h
+++ b/include/linux/msm_kgsl.h
@@ -692,7 +692,8 @@
  * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET
  * @groupid: Performance counter group ID
  * @countable: Countable to select within the group
- * @offset: Return offset of the reserved counter
+ * @offset: Return offset of the reserved LO counter
+ * @offset_hi: Return offset of the reserved HI counter
  *
  * Get an available performance counter from a specified groupid.  The offset
  * of the performance counter will be returned after successfully assigning
@@ -707,8 +708,9 @@
 	unsigned int groupid;
 	unsigned int countable;
 	unsigned int offset;
+	unsigned int offset_hi;
 /* private: reserved for future use */
-	unsigned int __pad[2]; /* For future binary compatibility */
+	unsigned int __pad; /* For future binary compatibility */
 };
 
 #define IOCTL_KGSL_PERFCOUNTER_GET \
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index 00eba66..fd2cc6c 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -555,6 +555,23 @@
 };
 
 /**
+ * enum mdp_overlay_pipe_type - Different pipe type set by userspace
+ *
+ * @PIPE_TYPE_AUTO:    Not specified, pipe will be selected according to flags.
+ * @PIPE_TYPE_VIG:     VIG pipe.
+ * @PIPE_TYPE_RGB:     RGB pipe.
+ * @PIPE_TYPE_DMA:     DMA pipe.
+ * @PIPE_TYPE_MAX:     Used to track maximum number of pipe type.
+ */
+enum mdp_overlay_pipe_type {
+        PIPE_TYPE_AUTO = 0,
+        PIPE_TYPE_VIG,
+        PIPE_TYPE_RGB,
+        PIPE_TYPE_DMA,
+        PIPE_TYPE_MAX,
+};
+
+/**
  * struct mdp_overlay - overlay surface structure
  * @src:	Source image information (width, height, format).
  * @src_rect:	Source crop rectangle, portion of image that will be fetched.
@@ -576,6 +593,7 @@
  *		The color should be in same format as the source image format.
  * @flags:	This is used to customize operation of overlay. See MDP flags
  *		for more information.
+ * @pipe_type:  Used to specify the type of overlay pipe.
  * @user_data:	DEPRECATED* Used to store user application specific information.
  * @bg_color:	Solid color used to fill the overlay surface when no source
  *		buffer is provided.
@@ -608,6 +626,7 @@
 	uint32_t blend_op;
 	uint32_t transp_mask;
 	uint32_t flags;
+	uint32_t pipe_type;
 	uint32_t id;
 	uint32_t user_data[6];
 	uint32_t bg_color;
diff --git a/include/linux/msm_vidc_dec.h b/include/linux/msm_vidc_dec.h
index 35279bf..b09fc2d 100644
--- a/include/linux/msm_vidc_dec.h
+++ b/include/linux/msm_vidc_dec.h
@@ -58,6 +58,8 @@
 #define VDEC_MSG_EVT_HW_ERROR	(VDEC_MSG_BASE + 14)
 #define VDEC_MSG_EVT_INFO_CONFIG_CHANGED	(VDEC_MSG_BASE + 15)
 #define VDEC_MSG_EVT_INFO_FIELD_DROPPED	(VDEC_MSG_BASE + 16)
+#define VDEC_MSG_EVT_HW_OVERLOAD	(VDEC_MSG_BASE + 17)
+#define VDEC_MSG_EVT_MAX_CLIENTS	(VDEC_MSG_BASE + 18)
 
 /*Buffer flags bits masks.*/
 #define VDEC_BUFFERFLAG_EOS	0x00000001
diff --git a/include/linux/msm_vidc_enc.h b/include/linux/msm_vidc_enc.h
index 4ce3db1..36625a7 100644
--- a/include/linux/msm_vidc_enc.h
+++ b/include/linux/msm_vidc_enc.h
@@ -45,7 +45,8 @@
 #define VEN_MSG_RESUME	9
 #define VEN_MSG_STOP_READING_MSG	10
 #define VEN_MSG_LTRUSE_FAILED	    11
-
+#define VEN_MSG_HW_OVERLOAD	12
+#define VEN_MSG_MAX_CLIENTS	13
 
 /*Buffer flags bits masks*/
 #define VEN_BUFFLAG_EOS	0x00000001
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
index 7ba4148..52195c1 100644
--- a/include/linux/qpnp/qpnp-adc.h
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -840,6 +840,7 @@
 	enum qpnp_adc_meas_timer_2		meas_interval2;
 	enum qpnp_adc_tm_channel_select		tm_channel_select;
 	enum qpnp_state_request			state_request;
+	enum qpnp_adc_calib_type		calib_type;
 	struct qpnp_vadc_linear_graph	adc_graph[2];
 };
 
@@ -883,6 +884,7 @@
 	enum qpnp_adc_scale_fn_type		adc_scale_fn;
 	enum qpnp_adc_fast_avg_ctl		fast_avg_setup;
 	enum qpnp_adc_hw_settle_time		hw_settle_time;
+	enum qpnp_adc_calib_type		calib_type;
 };
 
 /**
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index 4ecacc7..44fe134 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -191,6 +191,20 @@
 };
 
 /**
+ * Maintain state for hvdcp external charger status
+ * DEFAULT	This is used when DCP is detected
+ * ACTIVE	This is used when ioctl is called to block LPM
+ * INACTIVE	This is used when ioctl is called to unblock LPM
+ */
+
+enum usb_ext_chg_status {
+	DEFAULT = 1,
+	ACTIVE,
+	INACTIVE,
+};
+
+
+/**
  * struct msm_otg_platform_data - platform device data
  *              for msm_otg driver.
  * @phy_init_seq: PHY configuration sequence. val, reg pairs
@@ -463,7 +477,7 @@
 	struct class *ext_chg_class;
 	struct device *ext_chg_device;
 	bool ext_chg_opened;
-	bool ext_chg_active;
+	enum usb_ext_chg_status ext_chg_active;
 	struct completion ext_chg_wait;
 	int ui_enabled;
 	bool pm_done;
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 1531aa4..f760672 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -1824,29 +1824,30 @@
 #define V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA \
 		(V4L2_CID_MPEG_MSM_VIDC_BASE + 25)
 enum v4l2_mpeg_vidc_extradata {
-	V4L2_MPEG_VIDC_EXTRADATA_NONE,
-	V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION,
-	V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO,
-	V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP,
-	V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP,
-	V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP,
-	V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING,
-	V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE,
-	V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW,
-	V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI,
-	V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD,
-	V4L2_MPEG_VIDC_EXTRADATA_AFD_UD,
-	V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO,
-	V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB,
-	V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER,
-	V4L2_MPEG_VIDC_INDEX_EXTRADATA_INPUT_CROP,
-	V4L2_MPEG_VIDC_INDEX_EXTRADATA_DIGITAL_ZOOM,
-	V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO,
-	V4L2_MPEG_VIDC_EXTRADATA_MPEG2_SEQDISP,
-	V4L2_MPEG_VIDC_EXTRADATA_FRAME_QP,
-	V4L2_MPEG_VIDC_EXTRADATA_FRAME_BITS_INFO,
-	V4L2_MPEG_VIDC_EXTRADATA_LTR,
-	V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI,
+	V4L2_MPEG_VIDC_EXTRADATA_NONE = 0,
+	V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION = 1,
+	V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO = 2,
+	V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP = 3,
+	V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP = 4,
+	V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP = 5,
+	V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING = 6,
+	V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE = 7,
+	V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW = 8,
+	V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI = 9,
+	V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD = 10,
+	V4L2_MPEG_VIDC_EXTRADATA_AFD_UD = 11,
+	V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO = 12,
+	V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB = 13,
+	V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER = 14,
+	V4L2_MPEG_VIDC_INDEX_EXTRADATA_INPUT_CROP = 15,
+	V4L2_MPEG_VIDC_INDEX_EXTRADATA_DIGITAL_ZOOM = 16,
+	V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO = 17,
+	V4L2_MPEG_VIDC_EXTRADATA_MPEG2_SEQDISP = 18,
+	V4L2_MPEG_VIDC_EXTRADATA_FRAME_QP = 19,
+	V4L2_MPEG_VIDC_EXTRADATA_FRAME_BITS_INFO = 20,
+	V4L2_MPEG_VIDC_EXTRADATA_LTR = 21,
+	V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI = 22,
+	V4L2_MPEG_VIDC_EXTRADATA_STREAM_USERDATA = 23,
 };
 
 #define V4L2_CID_MPEG_VIDC_SET_PERF_LEVEL (V4L2_CID_MPEG_MSM_VIDC_BASE + 26)
@@ -2512,6 +2513,8 @@
 		(V4L2_EVENT_MSM_VIDC_START + 6)
 #define V4L2_EVENT_MSM_VIDC_RELEASE_UNQUEUED_BUFFER \
 		(V4L2_EVENT_MSM_VIDC_START + 7)
+#define V4L2_EVENT_MSM_VIDC_HW_OVERLOAD (V4L2_EVENT_MSM_VIDC_START + 8)
+#define V4L2_EVENT_MSM_VIDC_MAX_CLIENTS (V4L2_EVENT_MSM_VIDC_START + 9)
 
 /* Payload for V4L2_EVENT_VSYNC */
 struct v4l2_event_vsync {
diff --git a/include/media/msm/vidc_init.h b/include/media/msm/vidc_init.h
index bcc0370..9c9a270 100644
--- a/include/media/msm/vidc_init.h
+++ b/include/media/msm/vidc_init.h
@@ -20,6 +20,7 @@
 #define VIDC_MAX_NUM_CLIENTS 4
 #define MAX_VIDEO_NUM_OF_BUFF 100
 #define MAX_META_BUFFERS 32
+#define MAX_MV_BUFFERS 32
 
 enum buffer_dir {
 	BUFFER_TYPE_INPUT,
diff --git a/include/media/msm_cam_sensor.h b/include/media/msm_cam_sensor.h
index a164705..d583601 100644
--- a/include/media/msm_cam_sensor.h
+++ b/include/media/msm_cam_sensor.h
@@ -40,10 +40,14 @@
 #define MAX_ACTUATOR_REGION 5
 #define MAX_ACTUATOR_INIT_SET 12
 #define MAX_ACTUATOR_REG_TBL_SIZE 8
+#define MAX_ACTUATOR_AF_TOTAL_STEPS 1024
 
 #define MOVE_NEAR 0
 #define MOVE_FAR  1
 
+#define MSM_ACTUATOR_MOVE_SIGNED_FAR -1
+#define MSM_ACTUATOR_MOVE_SIGNED_NEAR  1
+
 #define MAX_EEPROM_NAME 32
 
 #define MAX_AF_ITERATIONS 3
@@ -480,8 +484,10 @@
 	CFG_GET_ACTUATOR_INFO,
 	CFG_SET_ACTUATOR_INFO,
 	CFG_SET_DEFAULT_FOCUS,
-	CFG_SET_POSITION,
 	CFG_MOVE_FOCUS,
+	CFG_SET_POSITION,
+	CFG_ACTUATOR_POWERDOWN,
+	CFG_ACTUATOR_POWERUP,
 };
 
 enum actuator_type {
diff --git a/include/media/msm_media_info.h b/include/media/msm_media_info.h
index ddf9c8e..d46b505 100644
--- a/include/media/msm_media_info.h
+++ b/include/media/msm_media_info.h
@@ -79,6 +79,18 @@
 	COLOR_FMT_NV21,
 };
 
+static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
+{
+	(void)height;
+	(void)width;
+
+	/*
+	 * In the future, calculate the size based on the w/h but just
+	 * hardcode it for now since 8K satisfies all current usecases.
+	 */
+	return 8 * 1024;
+}
+
 static inline unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
 {
 	unsigned int alignment, stride = 0;
@@ -158,8 +170,8 @@
 static inline unsigned int VENUS_BUFFER_SIZE(
 	int color_fmt, int width, int height)
 {
-	unsigned int uv_alignment;
-	unsigned int size = 0;
+	const unsigned int extra_size = VENUS_EXTRADATA_SIZE(width, height);
+	unsigned int uv_alignment = 0, size = 0;
 	unsigned int y_plane, uv_plane, y_stride,
 		uv_stride, y_sclines, uv_sclines;
 	if (!width || !height)
@@ -175,7 +187,7 @@
 		uv_alignment = 4096;
 		y_plane = y_stride * y_sclines;
 		uv_plane = uv_stride * uv_sclines + uv_alignment;
-		size = y_plane + uv_plane;
+		size = y_plane + uv_plane + extra_size;
 		size = MSM_MEDIA_ALIGN(size, 4096);
 		break;
 	default:
diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h
index 9028b1a..bbde6ef 100644
--- a/include/media/msm_vidc.h
+++ b/include/media/msm_vidc.h
@@ -179,6 +179,11 @@
 	unsigned int header_bits;
 };
 
+struct msm_vidc_stream_userdata_payload {
+	unsigned int type;
+	unsigned int data[1];
+};
+
 enum msm_vidc_extradata_type {
 	EXTRADATA_NONE = 0x00000000,
 	EXTRADATA_MB_QUANTIZATION = 0x00000001,
@@ -191,6 +196,7 @@
 	EXTRADATA_PANSCAN_WINDOW = 0x00000008,
 	EXTRADATA_RECOVERY_POINT_SEI = 0x00000009,
 	EXTRADATA_MPEG2_SEQDISP = 0x0000000D,
+	EXTRADATA_STREAM_USERDATA = 0x0000000E,
 	EXTRADATA_FRAME_QP = 0x0000000F,
 	EXTRADATA_FRAME_BITS_INFO = 0x00000010,
 	EXTRADATA_MULTISLICE_INFO = 0x7F100000,
@@ -214,4 +220,9 @@
 	FRAME_RECONSTRUCTION_APPROXIMATELY_CORRECT = 0x02,
 };
 
+enum msm_vidc_userdata_type {
+	MSM_VIDC_USERDATA_TYPE_FRAME = 0x1,
+	MSM_VIDC_USERDATA_TYPE_TOP_FIELD = 0x2,
+	MSM_VIDC_USERDATA_TYPE_BOTTOM_FIELD = 0x3,
+};
 #endif
diff --git a/include/media/msmb_isp.h b/include/media/msmb_isp.h
index 3828221..30e7d06 100644
--- a/include/media/msmb_isp.h
+++ b/include/media/msmb_isp.h
@@ -240,6 +240,8 @@
 	VFE_READ_DMI_64BIT,
 	GET_SOC_HW_VER,
 	GET_MAX_CLK_RATE,
+	VFE_HW_UPDATE_LOCK,
+	VFE_HW_UPDATE_UNLOCK,
 };
 
 struct msm_vfe_cfg_cmd2 {
diff --git a/include/media/msmb_pproc.h b/include/media/msmb_pproc.h
index df9f9e7..59dcca9 100644
--- a/include/media/msmb_pproc.h
+++ b/include/media/msmb_pproc.h
@@ -15,6 +15,7 @@
 #define MSM_CPP_MAX_NUM_PLANES 3
 #define MSM_CPP_MAX_FRAME_LENGTH 1024
 #define MSM_CPP_MAX_FW_NAME_LEN 32
+#define MAX_FREQ_TBL 10
 
 enum msm_cpp_frame_type {
 	MSM_CPP_OFFLINE_FRAME,
@@ -126,6 +127,8 @@
 struct cpp_hw_info {
 	uint32_t cpp_hw_version;
 	uint32_t cpp_hw_caps;
+	unsigned long freq_tbl[MAX_FREQ_TBL];
+	uint32_t freq_tbl_count;
 };
 
 struct msm_vpe_frame_strip_info {
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 30b88d7..cada6a7 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -368,7 +368,7 @@
 
 static inline void free_leaf(struct leaf *l)
 {
-	call_rcu_bh(&l->rcu, __leaf_free_rcu);
+	call_rcu(&l->rcu, __leaf_free_rcu);
 }
 
 static inline void free_leaf_info(struct leaf_info *leaf)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index a8d7ed0..87f05b2 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -879,10 +879,12 @@
 	/* Copy the address and add cmsg data. */
 	if (family == AF_INET) {
 		sin = (struct sockaddr_in *) msg->msg_name;
-		sin->sin_family = AF_INET;
-		sin->sin_port = 0 /* skb->h.uh->source */;
-		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
-		memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+		if (sin) {
+			sin->sin_family = AF_INET;
+			sin->sin_port = 0 /* skb->h.uh->source */;
+			sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+			memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+		}
 
 		if (isk->cmsg_flags)
 			ip_cmsg_recv(msg, skb);
@@ -892,17 +894,19 @@
 		struct ipv6_pinfo *np = inet6_sk(sk);
 		struct ipv6hdr *ip6 = ipv6_hdr(skb);
 		sin6 = (struct sockaddr_in6 *) msg->msg_name;
-		sin6->sin6_family = AF_INET6;
-		sin6->sin6_port = 0;
-		sin6->sin6_addr = ip6->saddr;
+		if (sin6) {
+			sin6->sin6_family = AF_INET6;
+			sin6->sin6_port = 0;
+			sin6->sin6_addr = ip6->saddr;
 
-		sin6->sin6_flowinfo = 0;
-		if (np->sndflow)
-			sin6->sin6_flowinfo =
-				*(__be32 *)ip6 & IPV6_FLOWINFO_MASK;
+			sin6->sin6_flowinfo = 0;
+			if (np->sndflow)
+				sin6->sin6_flowinfo =
+					*(__be32 *)ip6 & IPV6_FLOWINFO_MASK;
 
-		sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
-							  IP6CB(skb)->iif);
+			sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
+								  IP6CB(skb)->iif);
+		}
 
 		if (inet6_sk(sk)->rxopt.all)
 			pingv6_ops.datagram_recv_ctl(sk, msg, skb);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index bc4888d..76e2afe 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1971,7 +1971,8 @@
 restart:
 	read_lock_bh(&table->tb6_lock);
 	for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
-		if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
+		if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
+		    (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
 			dst_hold(&rt->dst);
 			read_unlock_bh(&table->tb6_lock);
 			ip6_del_rt(rt);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 2a89d01..61d82fb 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -374,7 +374,7 @@
 #endif
 }
 
-static int unix_release_sock(struct sock *sk, int embrion)
+static void unix_release_sock(struct sock *sk, int embrion)
 {
 	struct unix_sock *u = unix_sk(sk);
 	struct path path;
@@ -443,8 +443,6 @@
 
 	if (unix_tot_inflight)
 		unix_gc();		/* Garbage collect fds */
-
-	return 0;
 }
 
 static void init_peercred(struct sock *sk)
@@ -694,9 +692,10 @@
 	if (!sk)
 		return 0;
 
+	unix_release_sock(sk, 0);
 	sock->sk = NULL;
 
-	return unix_release_sock(sk, 0);
+	return 0;
 }
 
 static int unix_autobind(struct socket *sock)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c3adef8..cf82dbd 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -6780,9 +6780,11 @@
 			data = nla_data(info->attrs[NL80211_ATTR_VENDOR_DATA]);
 			len = nla_len(info->attrs[NL80211_ATTR_VENDOR_DATA]);
 		}
-
-		return rdev->wiphy.vendor_commands[i].doit(&rdev->wiphy, wdev,
+		rdev->cur_cmd_info = info;
+		err = rdev->wiphy.vendor_commands[i].doit(&rdev->wiphy, wdev,
 							   data, len);
+		rdev->cur_cmd_info = NULL;
+		return err;
 	}
 
 	return -EOPNOTSUPP;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 185f849..72b20b1 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1229,6 +1229,10 @@
 	struct context context;
 	int rc = 0;
 
+	/* An empty security context is never valid. */
+	if (!scontext_len)
+		return -EINVAL;
+
 	if (!ss_initialized) {
 		int i;
 
diff --git a/sound/soc/codecs/msm8x10-wcd.c b/sound/soc/codecs/msm8x10-wcd.c
index a5e0bb3..bbe1ac7 100644
--- a/sound/soc/codecs/msm8x10-wcd.c
+++ b/sound/soc/codecs/msm8x10-wcd.c
@@ -2834,7 +2834,7 @@
 		rc = snd_soc_dapm_force_enable_pin(&codec->dapm,
 			DAPM_MICBIAS_EXTERNAL_STANDALONE);
 	else {
-		if (msm8x10_wcd->micb_en_count > 0) {
+		if (msm8x10_wcd->micb_en_count > 1) {
 			msm8x10_wcd->micb_en_count--;
 			pr_debug("%s micb_en_count : %d", __func__,
 					msm8x10_wcd->micb_en_count);
diff --git a/sound/soc/codecs/msm_hdmi_codec_rx.c b/sound/soc/codecs/msm_hdmi_codec_rx.c
index 9cf37b4..5223070 100644
--- a/sound/soc/codecs/msm_hdmi_codec_rx.c
+++ b/sound/soc/codecs/msm_hdmi_codec_rx.c
@@ -84,7 +84,7 @@
 		struct snd_pcm_substream *substream,
 		struct snd_soc_dai *dai)
 {
-	int rv;
+	int rv = 0;
 	struct msm_hdmi_audio_codec_rx_data *codec_data =
 			dev_get_drvdata(dai->codec->dev);
 
@@ -92,7 +92,13 @@
 		codec_data->hdmi_core_pdev, 1);
 	if (IS_ERR_VALUE(rv)) {
 		dev_err(dai->dev,
-			"%s() HDMI core is not ready\n", __func__);
+			"%s() HDMI core is not ready (rv = %d)\n",
+			__func__, rv);
+	} else if (!rv) {
+		dev_err(dai->dev,
+			"%s() HDMI cable is not connected (ret val = %d)\n",
+			__func__, rv);
+		rv = -EAGAIN;
 	}
 
 	return rv;
@@ -116,8 +122,14 @@
 		codec_data->hdmi_core_pdev, 1);
 	if (IS_ERR_VALUE(rv)) {
 		dev_err(dai->dev,
-			"%s() HDMI core is not ready\n", __func__);
+			"%s() HDMI core is not ready (rv = %d)\n",
+			__func__, rv);
 		return rv;
+	} else if (!rv) {
+		dev_err(dai->dev,
+			"%s() HDMI cable is not connected (rv = %d)\n",
+			__func__, rv);
+		return -EAGAIN;
 	}
 
 	switch (num_channels) {
diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
index 61a0682..38d7901 100644
--- a/sound/soc/codecs/wcd9306.c
+++ b/sound/soc/codecs/wcd9306.c
@@ -4154,6 +4154,7 @@
 
 	switch (event) {
 	case SND_SOC_DAPM_POST_PMU:
+		dai->bus_down_in_recovery = false;
 		(void) tapan_codec_enable_slim_chmask(dai, true);
 		ret = wcd9xxx_cfg_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
 					      dai->rate, dai->bit_width,
@@ -4162,7 +4163,8 @@
 	case SND_SOC_DAPM_POST_PMD:
 		ret = wcd9xxx_close_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
 						dai->grph);
-		ret = tapan_codec_enable_slim_chmask(dai, false);
+		if (!dai->bus_down_in_recovery)
+			ret = tapan_codec_enable_slim_chmask(dai, false);
 		if (ret < 0) {
 			ret = wcd9xxx_disconnect_port(core,
 						      &dai->wcd9xxx_ch_list,
@@ -4177,6 +4179,7 @@
 			pm_runtime_put(core->dev->parent);
 			dev_dbg(codec->dev, "%s: unvote requested", __func__);
 		}
+		dai->bus_down_in_recovery = false;
 		break;
 	}
 	return ret;
@@ -4208,6 +4211,7 @@
 	dai = &tapan_p->dai[w->shift];
 	switch (event) {
 	case SND_SOC_DAPM_POST_PMU:
+		dai->bus_down_in_recovery = false;
 		(void) tapan_codec_enable_slim_chmask(dai, true);
 		ret = wcd9xxx_cfg_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
 					      dai->rate, dai->bit_width,
@@ -4216,7 +4220,8 @@
 	case SND_SOC_DAPM_POST_PMD:
 		ret = wcd9xxx_close_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
 						dai->grph);
-		ret = tapan_codec_enable_slim_chmask(dai, false);
+		if (!dai->bus_down_in_recovery)
+			ret = tapan_codec_enable_slim_chmask(dai, false);
 		if (ret < 0) {
 			ret = wcd9xxx_disconnect_port(core,
 						      &dai->wcd9xxx_ch_list,
@@ -4231,6 +4236,7 @@
 			pm_runtime_put(core->dev->parent);
 			dev_dbg(codec->dev, "%s: unvote requested", __func__);
 		}
+		dai->bus_down_in_recovery = false;
 		break;
 	}
 	return ret;
@@ -5732,6 +5738,7 @@
 	int rco_clk_rate;
 	struct snd_soc_codec *codec;
 	struct tapan_priv *tapan;
+	int count;
 
 	codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv);
 	tapan = snd_soc_codec_get_drvdata(codec);
@@ -5788,6 +5795,9 @@
 
 	tapan->machine_codec_event_cb(codec, WCD9XXX_CODEC_EVENT_CODEC_UP);
 
+	for (count = 0; count < NUM_CODEC_DAIS; count++)
+		tapan->dai[count].bus_down_in_recovery = true;
+
 	mutex_unlock(&codec->mutex);
 	return ret;
 }
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index 2b0c9d3..4c5d327 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -5144,6 +5144,7 @@
 
 	switch (event) {
 	case SND_SOC_DAPM_POST_PMU:
+		dai->bus_down_in_recovery = false;
 		taiko_codec_enable_int_port(dai, codec);
 		(void) taiko_codec_enable_slim_chmask(dai, true);
 		ret = wcd9xxx_cfg_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
@@ -5153,7 +5154,9 @@
 	case SND_SOC_DAPM_POST_PMD:
 		ret = wcd9xxx_close_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
 						dai->grph);
-		ret = taiko_codec_enable_slim_chmask(dai, false);
+		if (!dai->bus_down_in_recovery)
+			ret = taiko_codec_enable_slim_chmask(dai, false);
+
 		if (ret < 0) {
 			ret = wcd9xxx_disconnect_port(core,
 						      &dai->wcd9xxx_ch_list,
@@ -5161,6 +5164,7 @@
 			pr_debug("%s: Disconnect RX port, ret = %d\n",
 				 __func__, ret);
 		}
+		dai->bus_down_in_recovery = false;
 		break;
 	}
 	return ret;
@@ -5210,6 +5214,7 @@
 		snd_soc_update_bits(codec,
 		TAIKO_A_CDC_CLK_TX_CLK_EN_B2_CTL, 0xC, 0xC);
 		taiko_codec_enable_int_port(dai, codec);
+		dai->bus_down_in_recovery = false;
 		(void) taiko_codec_enable_slim_chmask(dai, true);
 		ret = wcd9xxx_cfg_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
 					dai->rate, dai->bit_width,
@@ -5226,6 +5231,8 @@
 		/*Disable V&I sensing*/
 		snd_soc_update_bits(codec, TAIKO_A_SPKR_PROT_EN,
 				0x88, 0x00);
+
+		dai->bus_down_in_recovery = false;
 		break;
 	}
 out_vi:
@@ -5255,9 +5262,11 @@
 		__func__, w->name, event, w->shift);
 
 	dai = &taiko_p->dai[w->shift];
+
 	switch (event) {
 	case SND_SOC_DAPM_POST_PMU:
 		taiko_codec_enable_int_port(dai, codec);
+		dai->bus_down_in_recovery = false;
 		(void) taiko_codec_enable_slim_chmask(dai, true);
 		ret = wcd9xxx_cfg_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
 					      dai->rate, dai->bit_width,
@@ -5266,7 +5275,9 @@
 	case SND_SOC_DAPM_POST_PMD:
 		ret = wcd9xxx_close_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
 						dai->grph);
-		ret = taiko_codec_enable_slim_chmask(dai, false);
+		if (!dai->bus_down_in_recovery)
+			ret = taiko_codec_enable_slim_chmask(dai, false);
+
 		if (ret < 0) {
 			ret = wcd9xxx_disconnect_port(core,
 						      &dai->wcd9xxx_ch_list,
@@ -5274,6 +5285,8 @@
 			pr_debug("%s: Disconnect RX port, ret = %d\n",
 				 __func__, ret);
 		}
+
+		dai->bus_down_in_recovery = false;
 		break;
 	}
 	return ret;
@@ -6485,6 +6498,9 @@
 
 	/* set MAD input MIC to DMIC1 */
 	{TAIKO_A_CDC_CONN_MAD, 0x0F, 0x08},
+
+	/* set DMIC CLK drive strength to 4mA */
+	{TAIKO_A_HDRIVE_OVERRIDE, 0x07, 0x01},
 };
 
 static void taiko_codec_init_reg(struct snd_soc_codec *codec)
@@ -6807,6 +6823,7 @@
 	struct snd_soc_codec *codec;
 	struct taiko_priv *taiko;
 	int rco_clk_rate;
+	int count;
 
 	codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv);
 	taiko = snd_soc_codec_get_drvdata(codec);
@@ -6863,6 +6880,9 @@
 	if (ret)
 		pr_err("%s: Failed to setup irq: %d\n", __func__, ret);
 
+	for (count = 0; count < NUM_CODEC_DAIS; count++)
+		taiko->dai[count].bus_down_in_recovery = true;
+
 	mutex_unlock(&codec->mutex);
 	return ret;
 }
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.c b/sound/soc/codecs/wcd9xxx-mbhc.c
index 34cb21a..9bd1652 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.c
+++ b/sound/soc/codecs/wcd9xxx-mbhc.c
@@ -884,7 +884,8 @@
 					    0, WCD9XXX_JACK_MASK);
 			mbhc->hph_status &= ~(SND_JACK_HEADSET |
 						SND_JACK_LINEOUT |
-						SND_JACK_ANC_HEADPHONE);
+						SND_JACK_ANC_HEADPHONE |
+						SND_JACK_UNSUPPORTED);
 		}
 
 		/* Report insertion */
@@ -1648,9 +1649,9 @@
 		}
 	}
 
-	if (type == PLUG_TYPE_HEADSET && dvddio) {
-		if ((dvddio->_vdces > hs_max) ||
-		    (dvddio->_vdces > minv + WCD9XXX_THRESHOLD_MIC_THRESHOLD)) {
+	if (type == PLUG_TYPE_HEADSET) {
+		if (dvddio && ((dvddio->_vdces > hs_max) ||
+		   (dvddio->_vdces > minv + WCD9XXX_THRESHOLD_MIC_THRESHOLD))) {
 			pr_debug("%s: Headset with threshold on MIC detected\n",
 				 __func__);
 			if (mbhc->mbhc_cfg->micbias_enable_flags &
@@ -3050,11 +3051,14 @@
 			if (!mbhc->mbhc_cfg->detect_extn_cable &&
 			    retry == NUM_ATTEMPTS_TO_REPORT &&
 			    mbhc->current_plug == PLUG_TYPE_NONE) {
+				WCD9XXX_BCL_LOCK(mbhc->resmgr);
 				wcd9xxx_report_plug(mbhc, 1,
 						    SND_JACK_HEADPHONE);
+				WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
 			}
 		} else if (plug_type == PLUG_TYPE_HEADPHONE) {
 			pr_debug("Good headphone detected, continue polling\n");
+			WCD9XXX_BCL_LOCK(mbhc->resmgr);
 			if (mbhc->mbhc_cfg->detect_extn_cable) {
 				if (mbhc->current_plug != plug_type)
 					wcd9xxx_report_plug(mbhc, 1,
@@ -3063,9 +3067,11 @@
 				wcd9xxx_report_plug(mbhc, 1,
 						    SND_JACK_HEADPHONE);
 			}
+			WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
 		} else if (plug_type == PLUG_TYPE_HIGH_HPH) {
 			pr_debug("%s: High HPH detected, continue polling\n",
 				  __func__);
+			WCD9XXX_BCL_LOCK(mbhc->resmgr);
 			if (mbhc->mbhc_cfg->detect_extn_cable) {
 				if (mbhc->current_plug != plug_type)
 					wcd9xxx_report_plug(mbhc, 1,
@@ -3074,6 +3080,7 @@
 				wcd9xxx_report_plug(mbhc, 1,
 						    SND_JACK_HEADPHONE);
 			}
+			WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
 		} else {
 			if (plug_type == PLUG_TYPE_GND_MIC_SWAP) {
 				pt_gnd_mic_swap_cnt++;
@@ -3499,6 +3506,7 @@
 	pr_debug("%s: enter\n", __func__);
 
 	WCD9XXX_BCL_LOCK(mbhc->resmgr);
+	mutex_lock(&mbhc->mbhc_lock);
 	mbhc_status = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_STATUS) & 0x3E;
 
 	if (mbhc->mbhc_state == MBHC_STATE_POTENTIAL_RECOVERY) {
@@ -3671,6 +3679,7 @@
 
  done:
 	pr_debug("%s: leave\n", __func__);
+	mutex_unlock(&mbhc->mbhc_lock);
 	WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
 	return IRQ_HANDLED;
 }
@@ -4518,6 +4527,7 @@
 	pr_debug("%s: enter event %s(%d)\n", __func__,
 		 wcd9xxx_get_event_string(event), event);
 
+	mutex_lock(&mbhc->mbhc_lock);
 	switch (event) {
 	/* MICBIAS usage change */
 	case WCD9XXX_EVENT_PRE_MICBIAS_1_ON:
@@ -4711,6 +4721,7 @@
 		WARN(1, "Unknown event %d\n", event);
 		ret = -EINVAL;
 	}
+	mutex_unlock(&mbhc->mbhc_lock);
 
 	pr_debug("%s: leave\n", __func__);
 
@@ -4920,17 +4931,19 @@
 				  wcd9xxx_mbhc_insert_work);
 	}
 
+	mutex_init(&mbhc->mbhc_lock);
+
 	/* Register event notifier */
 	mbhc->nblock.notifier_call = wcd9xxx_event_notify;
 	ret = wcd9xxx_resmgr_register_notifier(mbhc->resmgr, &mbhc->nblock);
 	if (ret) {
 		pr_err("%s: Failed to register notifier %d\n", __func__, ret);
+		mutex_destroy(&mbhc->mbhc_lock);
 		return ret;
 	}
 
 	wcd9xxx_init_debugfs(mbhc);
 
-
 	/* Disable Impedance detection by default for certain codec types */
 	if (mbhc->mbhc_cb &&
 	    mbhc->mbhc_cb->get_cdc_type() == WCD9XXX_CDC_TYPE_HELICON)
@@ -5015,6 +5028,8 @@
 err_insert_irq:
 	wcd9xxx_resmgr_unregister_notifier(mbhc->resmgr, &mbhc->nblock);
 
+	mutex_destroy(&mbhc->mbhc_lock);
+
 	pr_debug("%s: leave ret %d\n", __func__, ret);
 	return ret;
 }
@@ -5036,6 +5051,7 @@
 	wcd9xxx_free_irq(core_res, mbhc->intr_ids->hph_left_ocp, mbhc);
 	wcd9xxx_free_irq(core_res, mbhc->intr_ids->hph_right_ocp, mbhc);
 
+	mutex_destroy(&mbhc->mbhc_lock);
 	wcd9xxx_resmgr_unregister_notifier(mbhc->resmgr, &mbhc->nblock);
 	wcd9xxx_cleanup_debugfs(mbhc);
 }
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.h b/sound/soc/codecs/wcd9xxx-mbhc.h
index cf25798..91edaca 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.h
+++ b/sound/soc/codecs/wcd9xxx-mbhc.h
@@ -368,6 +368,8 @@
 	struct dentry *debugfs_poke;
 	struct dentry *debugfs_mbhc;
 #endif
+
+	struct mutex mbhc_lock;
 };
 
 #define WCD9XXX_MBHC_CAL_SIZE(buttons, rload) ( \
diff --git a/sound/soc/msm/msm8974.c b/sound/soc/msm/msm8974.c
index 9c01a08..23ed60a 100644
--- a/sound/soc/msm/msm8974.c
+++ b/sound/soc/msm/msm8974.c
@@ -46,6 +46,7 @@
 #define BTSCO_RATE_16KHZ 16000
 
 static int slim0_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int slim0_tx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
 static int hdmi_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
 
 #define SAMPLING_RATE_48KHZ 48000
@@ -1318,7 +1319,7 @@
 
 	pr_debug("%s()\n", __func__);
 	param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
-				   slim0_rx_bit_format);
+				   slim0_tx_bit_format);
 	rate->min = rate->max = 48000;
 	channels->min = channels->max = msm_slim_0_tx_ch;
 
@@ -2412,6 +2413,21 @@
 		.ignore_pmdown_time = 1,
 		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA9,
 	},
+	{
+		.name = "VoWLAN",
+		.stream_name = "VoWLAN",
+		.cpu_dai_name   = "VoWLAN",
+		.platform_name  = "msm-pcm-voice",
+		.dynamic = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.be_id = MSM_FRONTEND_DAI_VOWLAN,
+	},
 	/* Backend BT/FM DAI Links */
 	{
 		.name = LPASS_BE_INT_BT_SCO_RX,
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index cdfd042..fce1940 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -177,8 +177,31 @@
 			pr_debug("%s: call q6asm_set_lrgain\n", __func__);
 			rc = q6asm_set_lrgain(prtd->audio_client,
 						volume_l, volume_r);
+			if (rc < 0) {
+				pr_err("%s: set lrgain command failed rc=%d\n",
+				__func__, rc);
+				return rc;
+			}
+			/*
+			 * set master gain to unity so that only lr gain
+			 * is effective
+			 */
+			rc = q6asm_set_volume(prtd->audio_client,
+						COMPRESSED_LR_VOL_MAX_STEPS);
 		} else {
 			pr_debug("%s: call q6asm_set_volume\n", __func__);
+			/*
+			 * set left and right channel gain to unity so that
+			 * only master gain is effective
+			 */
+			rc = q6asm_set_lrgain(prtd->audio_client,
+						COMPRESSED_LR_VOL_MAX_STEPS,
+						COMPRESSED_LR_VOL_MAX_STEPS);
+			if (rc < 0) {
+				pr_err("%s: set lrgain command failed rc=%d\n",
+				__func__, rc);
+				return rc;
+			}
 			rc = q6asm_set_volume(prtd->audio_client, volume_l);
 		}
 		if (rc < 0) {
@@ -545,12 +568,6 @@
 	int dir = IN, ret = 0;
 	struct audio_client *ac = prtd->audio_client;
 	uint32_t stream_index;
-	struct asm_softpause_params softpause = {
-		.enable = SOFT_PAUSE_ENABLE,
-		.period = SOFT_PAUSE_PERIOD,
-		.step = SOFT_PAUSE_STEP,
-		.rampingcurve = SOFT_PAUSE_CURVE_LINEAR,
-	};
 	struct asm_softvolume_params softvol = {
 		.period = SOFT_VOLUME_PERIOD,
 		.step = SOFT_VOLUME_STEP,
@@ -580,15 +597,6 @@
 				prtd->session_id,
 				SNDRV_PCM_STREAM_PLAYBACK);
 
-	ret = msm_compr_set_volume(cstream, 0, 0);
-	if (ret < 0)
-		pr_err("%s : Set Volume failed : %d", __func__, ret);
-
-	ret = q6asm_set_softpause(ac, &softpause);
-	if (ret < 0)
-		pr_err("%s: Send SoftPause Param failed ret=%d\n",
-			__func__, ret);
-
 	ret = q6asm_set_softvolume(ac, &softvol);
 	if (ret < 0)
 		pr_err("%s: Send SoftVolume Param failed ret=%d\n",
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
index 08448fe..b49ce46 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
@@ -31,6 +31,7 @@
 #include <linux/msm_audio_ion.h>
 
 #include <linux/of_device.h>
+#include <sound/tlv.h>
 #include <sound/pcm_params.h>
 
 #include "msm-pcm-q6-v2.h"
@@ -38,6 +39,10 @@
 
 static struct audio_locks the_locks;
 
+#define PCM_MASTER_VOL_MAX_STEPS	0x2000
+static const DECLARE_TLV_DB_LINEAR(msm_pcm_vol_gain, 0,
+				PCM_MASTER_VOL_MAX_STEPS);
+
 struct snd_msm {
 	struct snd_card *card;
 	struct snd_pcm *pcm;
@@ -819,6 +824,94 @@
 	.mmap		= msm_pcm_mmap,
 };
 
+static int msm_pcm_set_volume(struct msm_audio *prtd, uint32_t volume)
+{
+	int rc = 0;
+
+	if (prtd && prtd->audio_client) {
+		pr_debug("%s: channels %d volume 0x%x\n", __func__,
+				prtd->channel_mode, volume);
+		rc = q6asm_set_volume(prtd->audio_client, volume);
+		if (rc < 0) {
+			pr_err("%s: Send Volume command failed rc=%d\n",
+					__func__, rc);
+		}
+	}
+	return rc;
+}
+
+static int msm_pcm_volume_ctl_get(struct snd_kcontrol *kcontrol,
+		      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_pcm_volume *vol = snd_kcontrol_chip(kcontrol);
+	struct snd_pcm_substream *substream =
+		vol->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+	struct msm_audio *prtd;
+
+	pr_debug("%s\n", __func__);
+	if (!substream) {
+		pr_err("%s substream not found\n", __func__);
+		return -ENODEV;
+	}
+	if (!substream->runtime) {
+		pr_err("%s substream runtime not found\n", __func__);
+		return 0;
+	}
+	prtd = substream->runtime->private_data;
+	if (prtd)
+		ucontrol->value.integer.value[0] = prtd->volume;
+	return 0;
+}
+
+static int msm_pcm_volume_ctl_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int rc = 0;
+	struct snd_pcm_volume *vol = snd_kcontrol_chip(kcontrol);
+	struct snd_pcm_substream *substream =
+		vol->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+	struct msm_audio *prtd;
+	int volume = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: volume : 0x%x\n", __func__, volume);
+	if (!substream) {
+		pr_err("%s substream not found\n", __func__);
+		return -ENODEV;
+	}
+	if (!substream->runtime) {
+		pr_err("%s substream runtime not found\n", __func__);
+		return 0;
+	}
+	prtd = substream->runtime->private_data;
+	if (prtd) {
+		rc = msm_pcm_set_volume(prtd, volume);
+		prtd->volume = volume;
+	}
+	return rc;
+}
+
+static int msm_pcm_add_volume_control(struct snd_soc_pcm_runtime *rtd)
+{
+	int ret = 0;
+	struct snd_pcm *pcm = rtd->pcm;
+	struct snd_pcm_volume *volume_info;
+	struct snd_kcontrol *kctl;
+
+	dev_dbg(rtd->dev, "%s, Volume control add\n", __func__);
+	ret = snd_pcm_add_volume_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+			NULL, 1, rtd->dai_link->be_id,
+			&volume_info);
+	if (ret < 0) {
+		pr_err("%s volume control failed ret %d\n", __func__, ret);
+		return ret;
+	}
+	kctl = volume_info->kctl;
+	kctl->put = msm_pcm_volume_ctl_put;
+	kctl->get = msm_pcm_volume_ctl_get;
+	kctl->tlv.p = msm_pcm_vol_gain;
+	return 0;
+}
+
 static int msm_pcm_chmap_ctl_put(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_value *ucontrol)
 {
@@ -905,6 +998,10 @@
 		__func__, kctl->id.name);
 	kctl->put = msm_pcm_chmap_ctl_put;
 	kctl->get = msm_pcm_chmap_ctl_get;
+	ret = msm_pcm_add_volume_control(rtd);
+	if (ret)
+		pr_err("%s: Could not add pcm Volume Control %d\n",
+			__func__, ret);
 	return ret;
 }
 
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index a5c8f8d..4893990 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -1419,34 +1419,45 @@
 				struct snd_ctl_elem_value *ucontrol)
 {
 	int ec_ref_port_id;
+	struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	int mux = ucontrol->value.enumerated.item[0];
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+
 	mutex_lock(&routing_lock);
 	switch (ucontrol->value.integer.value[0]) {
 	case 0:
 		msm_route_ec_ref_rx = 0;
-		ec_ref_port_id = SLIMBUS_0_RX;
+		ec_ref_port_id = AFE_PORT_INVALID;
 		break;
 	case 1:
 		msm_route_ec_ref_rx = 1;
-		ec_ref_port_id = AFE_PORT_ID_PRIMARY_MI2S_RX;
+		ec_ref_port_id = SLIMBUS_0_RX;
 		break;
 	case 2:
 		msm_route_ec_ref_rx = 2;
-		ec_ref_port_id = AFE_PORT_ID_PRIMARY_MI2S_TX;
+		ec_ref_port_id = AFE_PORT_ID_PRIMARY_MI2S_RX;
 		break;
 	case 3:
 		msm_route_ec_ref_rx = 3;
-		ec_ref_port_id = AFE_PORT_ID_SECONDARY_MI2S_TX;
+		ec_ref_port_id = AFE_PORT_ID_PRIMARY_MI2S_TX;
 		break;
 	case 4:
 		msm_route_ec_ref_rx = 4;
-		ec_ref_port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
+		ec_ref_port_id = AFE_PORT_ID_SECONDARY_MI2S_TX;
 		break;
 	case 5:
 		msm_route_ec_ref_rx = 5;
+		ec_ref_port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
+		break;
+	case 6:
+		msm_route_ec_ref_rx = 6;
 		ec_ref_port_id = AFE_PORT_ID_QUATERNARY_MI2S_TX;
 		break;
 	default:
-		msm_route_ec_ref_rx = 6; /* NONE */
+		msm_route_ec_ref_rx = 0; /* NONE */
+		pr_err("%s EC ref rx %ld not valid\n",
+			__func__, ucontrol->value.integer.value[0]);
 		ec_ref_port_id = AFE_PORT_INVALID;
 		break;
 	}
@@ -1454,11 +1465,13 @@
 	pr_debug("%s: msm_route_ec_ref_rx = %d\n",
 	    __func__, msm_route_ec_ref_rx);
 	mutex_unlock(&routing_lock);
+	snd_soc_dapm_mux_update_power(widget, kcontrol, 1, mux, e);
 	return 0;
 }
 
-static const char *const ec_ref_rx[] = { "SLIM_RX", "I2S_RX", "PRI_MI2S_TX",
-	"SEC_MI2S_TX", "TERT_MI2S_TX", "QUAT_MI2S_TX", "PROXY_RX", "NONE"};
+static const char *const ec_ref_rx[] = { "None", "SLIM_RX", "I2S_RX",
+	"PRI_MI2S_TX",
+	"SEC_MI2S_TX", "TERT_MI2S_TX", "QUAT_MI2S_TX", "PROXY_RX"};
 static const struct soc_enum msm_route_ec_ref_rx_enum[] = {
 	SOC_ENUM_SINGLE_EXT(8, ec_ref_rx),
 };