Merge "msm: krait-regulator: fix unnecessary calls to switch to LDO"
diff --git a/Documentation/devicetree/bindings/arm/msm/cpr-regulator.txt b/Documentation/devicetree/bindings/arm/msm/cpr-regulator.txt
index 1b881f0..aa24dc6 100644
--- a/Documentation/devicetree/bindings/arm/msm/cpr-regulator.txt
+++ b/Documentation/devicetree/bindings/arm/msm/cpr-regulator.txt
@@ -203,21 +203,6 @@
 - qcom,cpr-uplift-speed-bin:	The speed bin value corresponding to one type of processor which needs to apply the
 				pvs voltage uplift workaround.
 				This is required if cpr-fuse-uplift-disable-sel is present.
-- qcom,cpr-quot-adjust-table:	Array of triples in which each triple indicates the speed bin of the CPU, the virtual
-				corner to use and the quotient adjustment.
-				The 3 elements in one triple are:
-				[0]: => the speed bin of the CPU.
-				[1]: => the virtual voltage corner to use.
-				[2]: => the quotient adjustment for the corresponding virtual corner.
-				If the speed bin in a triple is equal to the speed bin of the CPU, the adjustment would
-				be subtracted from the quotient value of the voltage corner when the CPU is running at
-				that virtual corner. Each virtual corner value must be in the range 1 to the number of
-				elements in qcom,cpr-corner-map.
-- qcom,cpr-corner-map:		Array of elements of fuse corner value for each virtual corner.
-				The location or 1-based index of an element in the list corresponds to
-				the virtual corner value. For example, the first element in the list is the fuse corner
-				value that virtual corner 1 maps to.
-				This is required if qcom,cpr-quot-adjust-table is present.
 - qcom,cpr-quotient-adjustment:	Array of three elements of CPR quotient adjustments for each corner.
 				The 3 quotient adjustments with index[0..2] are:
 				[0] => amount to add to the SVS quotient
@@ -230,6 +215,42 @@
 				Not Present: No such regulator.
 - vdd-apc-optional-sec-supply:	Present: Regulator of second highest priority to supply VDD APC power.
 				Not Present: No such regulator.
+- qcom,cpr-speed-bin-max-corners: Array of quintuples in which each quintuple maps a CPU speed bin and PVS version to
+				the maximum virtual voltage corner corresponding to the SVS, NORMAL and TURBO corners.
+				The 5 elements in one quintuple are:
+				[0]: => the speed bin of the CPU.
+				[1]: => the PVS version of the CPU.
+				[2]: => the max virtual voltage corner value corresponding to SVS corner for this speed bin.
+				[3]: => the max virtual voltage corner value corresponding to NORMAL corner for this speed bin.
+				[4]: => the max virtual voltage corner value corresponding to TURBO corner for this speed bin.
+				No CPR target quotient scaling is applied on chips which have a speed bin + PVS version
+				pair that does not appear in one of the quintuples in this property. If the property is
+				specified, then quotient scaling is enabled for the TURBO corner. If this property is
+				not specified, then no quotient scaling can take place.
+- qcom,cpr-corner-map:		Array of elements of fuse corner value for each virtual corner.
+				The location or 1-based index of an element in the list corresponds to
+				the virtual corner value. For example, the first element in the list is the fuse corner
+				value that virtual corner 1 maps to.
+				This property is required if qcom,cpr-speed-bin-max-corners is present.
+- qcom,cpr-corner-frequency-map: Array of tuples in which a tuple describes a corner to application processor frequency
+				mapping.
+				The 2 elements in one tuple are:
+				[0]: => a virtual voltage corner.
+				[1]: => the application processor frequency in Hz corresponding to the virtual corner.
+				This property is required if qcom,cpr-speed-bin-max-corners is present.
+- qcom,pvs-version-fuse-sel:	Array of 4 elements to indicate where to read the pvs version of the processor,
+				and the fuse reading method.
+				The 4 elements with index[0..3] are:
+				[0]: => the fuse row number of the selector;
+				[1]: => LSB bit position of the bits;
+				[2]: => the number of bits;
+				[3]: => fuse reading method, 0 for direct reading or 1 for SCM reading.
+				This property is required if qcom,cpr-speed-bin-max-corners is present.
+- qcom,cpr-quot-adjust-scaling-factor-max: The maximum allowed CPR target quotient scaling factor to use when
+				calculating the quotient adjustment for a given virtual voltage corner. It
+				corresponds to 'scaling' in this equation:
+				quot_adjust = (freq_turbo - freq_corner) * scaling / 1000.
+				This property is required if qcom,cpr-speed-bin-max-corners is present.
 
 Example:
 	apc_vreg_corner: regulator@f9018000 {
@@ -319,9 +340,25 @@
 		qcom,cpr-uplift-speed-bin = <1>;
 		qcom,speed-bin-fuse-sel = <22 0 3 0>;
 		qcom,cpr-corner-map = <1 1 2 2 3 3 3 3 3 3 3 3>;
-		qcom,cpr-quot-adjust-table = <1 1 0>, <1 2 0>, <1 3 0>,
-						<1 4 0>, <1 5 450>, <1 6 375>,
-						<1 7 300>, <1 8 225>, <1 9 187>,
-						<1 10 150>, <1 11 75>, <1 12 0>;
+		qcom,cpr-corner-frequency-map =
+				<1 300000000>,
+				<2 384000000>,
+				<3 600000000>,
+				<4 787200000>,
+				<5 998400000>,
+				<6 1094400000>,
+				<7 1190400000>,
+				<8 1305600000>,
+				<9 1344000000>,
+				<10 1401600000>,
+				<11 1497600000>,
+				<12 1593600000>;
+		qcom,pvs-version-fuse-sel = <22 4 2 0>;
+		qcom,cpr-speed-bin-max-corners =
+				<0 1 2 4 7>,
+				<1 1 2 4 12>,
+				<2 1 2 4 10>,
+				<5 1 2 4 14>;
+		qcom,cpr-quot-adjust-scaling-factor-max = <650>;
 	};
 
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_bus.txt b/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
index 9f74225..e6e54bf 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
@@ -100,8 +100,8 @@
 			(Can be Fixed/Limiter/Bypass/Regulator)
 qcom,bimc,bw:		Bandwidth limit for a BIMC master using dual modes.
 			This bandwidth is used to calculate Grant count and
-			other parameters used in Limiter and Regular mode
-			for static BKE configuration. It is defined in KBps.
+			other parameters used in Limiter and Regular mode.
+			for static BKE configuration. It is defined in KBytes/s.
 qcom,bimc,gp:		Grant Period for configuring a master in limiter
 			mode. This is an integer value in nano-seconds.
 qcom,bimc,thmp:		Medium threshold percentage for BIMC masters.
@@ -111,11 +111,12 @@
 			1 and 100.
 qcom,thresh:		Beyond this threshold frequency, the mode usage is
 			switched from mode specified by property qcom,mode
-			to the one specified by qcom,mode-thresh. In case the
-			requested IB value falls below this threshold, the mode
-			is switched back to qcom,mode. Frequency is specified in
-			KBps.
-
+			to the one specified by qcom,mode-thresh. These thresholds
+			can be setup in increasing order of thresholds, so the
+			requested IB is evaluated at each threshold level before
+			making the decision to switch QoS modes and applying the
+			corresponding qcom,bimc,bw limitig bw as needed.
+			This is specified in KBytes/s.
 
 
 
diff --git a/Documentation/devicetree/bindings/arm/msm/pm-8x60.txt b/Documentation/devicetree/bindings/arm/msm/pm-8x60.txt
index 795af3b..2fbe4ca 100644
--- a/Documentation/devicetree/bindings/arm/msm/pm-8x60.txt
+++ b/Documentation/devicetree/bindings/arm/msm/pm-8x60.txt
@@ -12,6 +12,7 @@
 The required properties for PM-8x60 are:
 
 - compatible: "qcom,pm-8x60"
+- qcom,lpm-levels: phandle for associated lpm_levels device.
 
 The optional properties are:
 
@@ -39,4 +40,5 @@
 		reg = <0xfe800664 0x40>;
 		qcom,pc-mode = "tz_l2_int";
 		qcom,use-sync-timer;
+		qcom,lpm-levels = <&lpm_levels>;
 	};
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-ctrl.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-ctrl.txt
index cda437a..b88c3ce 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-ctrl.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-ctrl.txt
@@ -6,7 +6,12 @@
 Required properties:
 - compatible:				Must be "qcom,mdss-dsi-ctrl"
 - cell-index:				Specifies the controller used among the two controllers.
-- reg:					offset and length of the register set for the device.
+- reg: 					Base address and length of the different register
+					regions(s) required for DSI device functionality.
+- reg-names: 				A list of strings that map in order to the list of regs.
+					"dsi_ctrl" - MDSS DSI controller register region
+					"dsi_phy" - MDSS DSI PHY register region
+					"mmss_misc_phys" - Register region for MMSS DSI clamps
 - vdd-supply:				Phandle for vdd regulator device node.
 - vddio-supply:				Phandle for vdd-io regulator device node.
 - vdda-supply:				Phandle for vreg regulator device node.
@@ -52,7 +57,10 @@
 		compatible = "qcom,mdss-dsi-ctrl";
 		label = "MDSS DSI CTRL->0";
 		cell-index = <0>;
-		reg = <0xfd922800 0x600>;
+		reg = 	<0xfd922800 0x1f8>,
+			<0xfd922b00 0x2b0>,
+			<0xfd828000 0x108>;
+		reg-names = "dsi_ctrl", "dsi_phy", "mmss_misc_phys";
 		vdd-supply = <&pm8226_l15>;
 		vddio-supply = <&pm8226_l8>;
 		vdda-supply = <&pm8226_l4>;
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
index 4bc67ff..02d6df9 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -256,6 +256,7 @@
 					delay required by panel to reach functional.
 - qcom,mdss-dsi-rx-eot-ignore:		Boolean used to enable ignoring end of transmission packets.
 - qcom,mdss-dsi-tx-eot-append:		Boolean used to enable appending end of transmission packets.
+- qcom,ulps-enabled:			Boolean to enable support for Ultra Low Power State (ULPS) mode.
 
 Note, if a given optional qcom,* binding is not present, then the driver will configure
 the default values specified.
@@ -352,5 +353,6 @@
 		qcom,mdss-dsi-init-delay-us = <100>;
 		mdss-dsi-rx-eot-ignore;
 		mdss-dsi-tx-eot-append;
+		qcom,ulps-enabled;
 	};
 };
diff --git a/Documentation/devicetree/bindings/fb/mdss-mdp.txt b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
index c2b963f..cfcc48b 100644
--- a/Documentation/devicetree/bindings/fb/mdss-mdp.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
@@ -27,6 +27,14 @@
 				to the respective VIG pipes. Number of xin ids
 				defined should match the number of offsets
 				defined in property: qcom,mdss-pipe-vig-off
+- qcom,mdss-pipe-vig-clk-ctrl-off: Array of offsets describing clk control
+				offsets for dynamic clock gating. 1st value
+				in the array represents offset of the control
+				register. 2nd value represents bit offset within
+				control register and 3rd value represents bit
+				offset within status register. Number of tuples
+				defined should match the number of offsets
+				defined in property: qcom,mdss-pipe-vig-off
 - qcom,mdss-pipe-rgb-off:	Array of offsets for MDP source surface pipes of
 				type RGB, the offsets are calculated from
 				register "mdp_phys" defined in reg property.
@@ -42,6 +50,14 @@
 				to the respective RGB pipes. Number of xin ids
 				defined should match the number of offsets
 				defined in property: qcom,mdss-pipe-rgb-off
+- qcom,mdss-pipe-rgb-clk-ctrl-off: Array of offsets describing clk control
+				offsets for dynamic clock gating. 1st value
+				in the array represents offset of the control
+				register. 2nd value represents bit offset within
+				control register and 3rd value represents bit
+				offset within status register. Number of tuples
+				defined should match the number of offsets
+				defined in property: qcom,mdss-pipe-rgb-off
 - qcom,mdss-pipe-dma-off:	Array of offsets for MDP source surface pipes of
 				type DMA, the offsets are calculated from
 				register "mdp_phys" defined in reg property.
@@ -57,6 +73,14 @@
 				to the respective DMA pipes. Number of xin ids
 				defined should match the number of offsets
 				defined in property: qcom,mdss-pipe-dma-off
+- qcom,mdss-pipe-dma-clk-ctrl-off: Array of offsets describing clk control
+				offsets for dynamic clock gating. 1st value
+				in the array represents offset of the control
+				register. 2nd value represents bit offset within
+				control register and 3rd value represents bit
+				offset within status register. Number of tuples
+				defined should match the number of offsets
+				defined in property: qcom,mdss-pipe-dma-off
 - qcom,mdss-smp-data:		Array of shared memory pool data. There should
 				be only two values in this property. The first
 				value corresponds to the number of smp blocks
@@ -364,6 +388,19 @@
 		qcom,mdss-has-decimation;
 		qcom,mdss-has-wfd-blk;
 
+		qcom,mdss-pipe-vig-clk-ctrl-offsets = <0x3AC 0 0>,
+						      <0x3B4 0 0>,
+						      <0x3BC 0 0>,
+						      <0x3C4 0 0>;
+
+		qcom,mdss-pipe-rgb-clk-ctrl-offsets = <0x3AC 4 8>,
+						      <0x3B4 4 8>,
+						      <0x3BC 4 8>,
+						      <0x3C4 4 8>;
+
+		qcom,mdss-pipe-dma-clk-ctrl-offsets = <0x3AC 8 12>,
+						      <0x3B4 8 12>;
+
 		qcom,mdss-ctl-off = <0x00000600 0x00000700 0x00000800
 				     0x00000900 0x0000A00>;
 		qcom,mdss-mixer-intf-off = <0x00003200 0x00003600
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index 656f3a4..ec5cfa5 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -71,6 +71,13 @@
 			   This is used to override faulty hardware readings.
 - qcom,strtstp-sleepwake:  Boolean. Enables use of GPU SLUMBER instead of SLEEP for power savings
 
+- qcom,pm-qos-latency:		Every time GPU wakes up from sleep, driver votes for
+				acceptable maximum latency to the pm-qos driver. This
+				voting demands that *CPU* can not go into a power save
+				state *if* the latency to bring CPU back into normal
+				state is more than this value.
+				Value is in microseconds.
+
 The following properties are optional as collecting data via coresight might
 not be supported for every chipset. The documentation for coresight
 properties can be found in:
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
index d0cad52..9acf54a 100644
--- a/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
@@ -33,6 +33,9 @@
 				  apply the default RSENSE if conditions are met.
 			      1 : Select this type to read the IADC, SMBB trim register and
 				  manufacturer type and apply the default RSENSE if conditions are met.
+- qcom,pmic-revid : Phandle pointing to the revision peripheral node. Use it to query the
+		    PMIC type and revision for applying the appropriate temperature
+		    compensation parameters.
 
 Channel node
 NOTE: Atleast one Channel node is required.
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
index dd0c440..83403ba 100644
--- a/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
@@ -22,6 +22,9 @@
 
 Optional properties:
 - qcom,vadc-poll-eoc: Use polling instead of interrupts for End of Conversion completion.
+- qcom,pmic-revid : Phandle pointing to the revision peripheral node. Use it to query the
+		    PMIC type and revision for applying the appropriate temperature
+		    compensation parameters.
 
 Client required property:
 - qcom,<consumer name>-vadc : The phandle to the corresponding vadc device.
diff --git a/Documentation/devicetree/bindings/memory.txt b/Documentation/devicetree/bindings/memory.txt
index e98ee05..32f6a24 100644
--- a/Documentation/devicetree/bindings/memory.txt
+++ b/Documentation/devicetree/bindings/memory.txt
@@ -36,6 +36,7 @@
 	reg = <(baseaddr) (size)>;
 	(linux,contiguous-region);
 	(linux,default-contiguous-region);
+	(linux,memory-limit);
         label = (unique_name);
 };
 
@@ -48,6 +49,11 @@
 linux,default-contiguous-region: property indicating that the region
 		is the default region for all contiguous memory
 		allocations, Linux specific (optional)
+linux,memory-limit: property specifying an upper bound on the physical address
+		of the region if the region is placed dynamically. If no limit
+		is specificed, the region may be placed anywhere in the physical
+		address space. 0 may be used to specify lowmem (i.e. the region
+		will be placed in the direct mapped lowmem region)
 label:		an internal name used for automatically associating the
 		cma region with a given device. The label is optional;
 		if the label is not given the client is responsible for
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index d502f78..8c41926 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -55,6 +55,9 @@
 				   no default value that the driver assumes if this property
 				   is not specified. So if this property is not specified,
 				   then SDHC driver will not vote for PM QOS.
+        - qcom,dat1-mpm-int:    specifies MPM interrupt number (e.g. sdhc_2 node below)
+				corresponding to DAT1 line of SDHC (used only if slot has dedicated
+				DAT1 MSM pin (not GPIO))
 
 In the following, <supply> can be vdd (flash core voltage) or vdd-io (I/O voltage).
 	- qcom,<supply>-always-on - specifies whether supply should be kept "on" always.
@@ -165,4 +168,5 @@
 				<81 512 106496 212992>, /* 208 MB/s */
 				<81 512 2147483647 4294967295>; /* Max. bandwidth */
 		qcom,bus-bw-vectors-bps = <0 13631488 27262976 54525952 109051904 218103808 4294967295>;
+		qcom,dat1-mpm-int = <44>;
 	};
diff --git a/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt b/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
index 8cef7f0..447c8c1 100644
--- a/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
+++ b/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
@@ -13,11 +13,12 @@
 Each partition is represented as a sub-node of the qcom,mtd-partitions device.
 Each node's name represents the name of the corresponding partition.
 
-Required properties:
-- reg : The partition offset and size
-- label : The label / name for this partition.
+This is now completely optional as the partition information is avaialble from
+bootloader.
 
 Optional properties:
+- reg : The partition offset and size
+- label : The label / name for this partition.
 - read-only: This parameter, if present, indicates that this partition
   should only be mounted read-only.
 
diff --git a/Documentation/devicetree/bindings/nfc/nfc-nci.txt b/Documentation/devicetree/bindings/nfc/nfc-nci.txt
index 2c06599..7af847c 100644
--- a/Documentation/devicetree/bindings/nfc/nfc-nci.txt
+++ b/Documentation/devicetree/bindings/nfc/nfc-nci.txt
@@ -12,13 +12,12 @@
 - qcom,clk-src-gpio: msm gpio clock,used ony if clock source is msm gpio
 - qcom,clk-req-gpio: clk-req input gpio for MSM based clocks.
                      not used for pmic implementation
-- vlogic-supply: LDO for power supply
 - interrupt-parent: Should be phandle for the interrupt controller
                     that services interrupts for this device.
 - interrupts: Nfc read interrupt,gpio-clk-req interrupt
 - qcom,clk-gpio: pmic or msm gpio on which bbclk2 signal is coming.
 
-LDO example:
+Example:
 
 	i2c@f9925000 { /* BLSP-1 QUP-3 */
 		nfc-nci@e {
@@ -31,7 +30,6 @@
 			interrupt-parent = <&msmgpio>;
 			interrupts = <77 0>;
 			qcom,clk-gpio = <&msmgpio 75 0x00>;
-			vlogic-supply = <&pm8110_l14>;
 		};
 	};
 
diff --git a/Documentation/devicetree/bindings/sound/voice-svc.txt b/Documentation/devicetree/bindings/sound/voice-svc.txt
new file mode 100644
index 0000000..deca7f5
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/voice-svc.txt
@@ -0,0 +1,11 @@
+* Voice Service binding
+
+Required properties:
+- compatible : "qcom,msm-voice-svc"
+
+Example:
+
+	qcom,msm-voice-svc {
+		compatible = "qcom,msm-voice-svc";
+	};
+
diff --git a/Documentation/devicetree/bindings/usb/ice40-hcd.txt b/Documentation/devicetree/bindings/usb/ice40-hcd.txt
new file mode 100644
index 0000000..43d24dc
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ice40-hcd.txt
@@ -0,0 +1,45 @@
+ICE40 FPGA based SPI-USB bridge
+
+Documentation/devicetree/bindings/spi/spi-bus.txt provides the details
+of the required and optional properties of a SPI slave device node.
+
+The purpose of this document is to provide the additional properties
+that are required to use the ICE40 FPGA based SPI slave device as a
+USB host controller.
+
+Required properties:
+- compatible : should be "lattice,ice40-spi-usb"
+- <supply-name>-supply: handle to the regulator device tree node
+  Required "supply-name" is "core-vcc" and "spi-vcc"
+- reset-gpio: gpio used to assert the bridge chip reset
+- slave-select-gpio: gpio used to select the slave during configuration
+  loading
+- config-done-gpio: gpio used to indicate the configuration status
+- vcc-en-gpio: gpio used to enable the chip power supply
+
+Optional properties:
+- interrupts: IRQ lines used by this controller
+- clk-en-gpio: gpio used to enable the 19.2 MHZ clock to the bridge
+  chip. If it is not present, assume that the clock is available on
+  the bridge chip board.
+- <supply-name>-supply: handle to the regulator device tree node
+  Optional "supply-name" is "gpio" used to power up the gpio bank
+  used by this device
+
+	spi@f9923000 {
+		lattice,spi-usb@3 {
+			compatible = "lattice,ice40-spi-usb";
+			reg = <3>;
+			spi-max-frequency = <50000000>;
+			spi-cpol = <1>;
+			spi-cpha = <1>;
+			interrupt-parent = <&msmgpio>;
+			interrupts = <121 0x8>;
+			core-vcc-supply = <&pm8226_l2>;
+			spi-vcc-supply = <&pm8226_l5>;
+			lattice,reset-gpio = <&msmgpio 114 0>;
+			lattice,slave-select-gpio = <&msmgpio 118 0>;
+			lattice,config-done-gpio = <&msmgpio 115 0>;
+			lattice,vcc-en-gpio = <&msmgpio 117 0>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
index 7d3d435..fcafe11 100644
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
@@ -159,6 +159,7 @@
   and no peripheral connected over dock during low power mode, fourth value represents
   minimum value to vote when USB is operational, fifth item represents maximum value
   to vote for USB is operational.
+- qcom,usb2-enable-uicc: If present, usb2 port will be used for uicc card connection.
 
 Example MSM HSUSB EHCI controller device node :
 	ehci: qcom,ehci-host@f9a55000 {
@@ -172,6 +173,7 @@
 		qcom,usb2-enable-hsphy2;
 		qcom,usb2-power-budget = <500>;
 		qcom,vdd-voltage-level = <1 2 3 5 7>;
+		qcom,usb2-enable-uicc;
 	};
 
 ANDROID USB:
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index fd826d9..440dac1 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -8,6 +8,7 @@
 apm	Applied Micro Circuits Corporation (APM)
 arm	ARM Ltd.
 atmel	Atmel Corporation
+avago	Avago Technologies
 bosch	Bosch Sensortec GmbH
 capella	Capella Microsystems, Inc.
 cavium	Cavium, Inc.
@@ -28,6 +29,7 @@
 idt	Integrated Device Technologies, Inc.
 intercontrol	Inter Control Group
 invn	InvenSense Inc.
+lattice	Lattice Semiconductor.
 linux	Linux-specific binding
 kionix	Kionix Inc.
 marvell	Marvell Technology Group Ltd.
diff --git a/Documentation/usb/ice40-hcd.txt b/Documentation/usb/ice40-hcd.txt
new file mode 100644
index 0000000..54f845e
--- /dev/null
+++ b/Documentation/usb/ice40-hcd.txt
@@ -0,0 +1,247 @@
+Introduction
+============
+
+USB UICC connectivity is required for MSM8x12. This SoC has only 1 USB
+controller which is used for peripheral mode and charging. Hence an external
+USB host controller over SPI is used to connect a USB UICC card. ICE40 FPGA
+based SPI to IC-USB (Inter-Chip USB) bridge chip is used.
+
+The ICE40 Host controller driver (ice40-hcd) is registered as a SPI protocol
+driver and interacts with the SPI subsystem on one side and interacts with the
+USB core on the other side.
+
+Hardware description
+====================
+
+The ICE40 devices are SRAM-based FPGAs. The SRAM memory cells are volatile,
+meaning that once power is removed from the device, its configuration is lost
+and must be reloaded on the next power-up. An on-chip non-volatile configuration
+memory or an external SPI flash are not used to store the configuration data due
+to increased power consumption.  Instead, the software loads the configuration
+data through SPI interface after powering up the bridge chip. Once the
+configuration data is programmed successfully, the bridge chip will be ready for
+the USB host controller operations.
+
+The ICE40 device has an interrupt signal apart from the standard SPI signals
+CSn, SCLK, MOSI and MISO. It has support for 25 to 50 MHz frequencies. The
+maximum operating frequency during configuration loading is 25 MHz.
+
+The bridge chip requires two power supplies, SPI_VCC (1.8v - 3.3v) and VCC_CORE
+(1.2v). The SPI_VCC manages the SPI slave portion and VCC_CORE manages the USB
+serial engine (SIE) portion.  It requires a 19.2 MHz reference clock and a
+32 MHz clock is required for remote wakeup detection during suspend.
+
+The configuration loading sequence:
+
+- Assert the RSTn pin. This keeps bridge chip in reset state after downloading
+the configuration data.
+- The bridge chip samples the SPI interface chip select pin during power-up and
+enters SPI slave mode if it is low. Drive the chip select pin low before
+powering up the bridge chip.
+- Power-up the bridge chip by enabling SPI_VCC and VCC_CORE
+- De-assert the chip select pin after 50 usec.
+- Transfer the configuration data over SPI. Note that the bridge chip requires
+49 dummy clock cycles after sending the data.
+- The bridge chip indicates the status of the configuration loading via config
+done pin. It may take 50 usec to assert this pin.
+
+The 19.2 MHz clock should be supplied before de-asserting the RSTn pin. A PLL
+is used to generate a 48MHz clock signal that then creates a 12MHz clock signal
+by a divider. When the PLLOK bit is set in USB Transfer Result register, it
+indicates that the PLL output is locked to the input reference clock. When it
+is 0, it indicates that the PLL is out of lock. It is recommended to assert the
+RSTn pin to re-synchronize the PLL to the reference clock when the PLL loses
+lock. The chip will be ready for the USB host controller operations after it is
+brought out of reset and PLL is synchronized to the reference clock.
+
+The software is responsible for initiating all the USB host transfers by writing
+the associated registers. The SIE in the bridge chip performs the USB host
+operations via the IC-USB bus based on the registers set by the software. The
+USB transfer results as well as the bus status like the peripheral connection,
+disconnection, resume, etc. are notified to software through the interrupt and
+the internal registers.
+
+The bridge chip provides the DP & DM pull-down resistor control to the software.
+The pull-down resistors are enabled automatically after the power up to force
+the SE0 condition on the bus. The software is required to disable these
+resistors before driving the reset on the bus. Control, Bulk and Interrupt
+transfers are supported. The data toggling states are not maintained in the
+hardware and should be serviced by the software. The bridge chip returns
+one of the following values for a USB transaction (SETUP/IN/OUT) via Transfer
+result register.
+
+xSUCCESS: Successful transfer.
+xBUSY: The SIE is busy with a USB transfer.
+xPKTERR: Packet Error (stuff, EOP).
+xPIDERR: PID check bits are incorrect.
+xNAK: Device returned NAK. This is not an error condition for IN/OUT. But it
+is an error condition for SETUP.
+xSTALL: Device returned STALL.
+xWRONGPID: Wrong PID is received. For example a IN transaction is attempted on
+OUT endpoint.
+xCRCERR: CRC error.
+xTOGERR: Toggle bit error. The SIE returns ACK when the toggle mismatch happens
+for IN transaction  and returns this error code. Software should discard the
+data as it was received already in the previous transaction.
+xBADLEN: Too big packet size received.
+xTIMEOUT: Device failed to respond in time.
+
+Software description
+====================
+
+This driver is compiled as a module and is loaded by the userspace after
+getting the UICC card insertion event from the modem processor. The module is
+unloaded upon the UICC card removal.
+
+This driver registers as a SPI protocol driver. The SPI controller driver
+manages the chip select pin. This pin needs to be driven low before powering
+up the bridge chip. Hence this pin settings are overridden temporarily during
+the bridge chip power-up sequence. The original settings are restored before
+sending the configuration data to the bridge chip which acts as a SPI slave.
+Both pinctl and gpiomux framework allow this type of use case.
+
+The configuration data file is stored on the eMMC card. Firmware class API
+request_firmware() is used to read the configuration data file. The
+configuration data is then sent to the bridge chip via SPI interface. The
+bridge chip asserts the config done pin once the configuration is completed.
+
+The driver registers as a Full Speed (USB 1.1) HCD. The following methods
+are implemented that are part of hc_drive struct:
+
+reset: It is called one time by the core during HCD registration. The
+default address 0 is programmed and the line state is sampled to check if any
+device is connected. If any device is connected, the port flags are updated
+accordingly. As the module is loaded after the UICC card is inserted, the
+device would be present at this time.
+
+start: This method is called one time by the core during HCD registration.
+The bridge chip is programmed to transmit the SOFs.
+
+stop: The method is called one time by the core during HCD deregistration.
+The bridge chip is programmed to stop transmitting the SOFs.
+
+hub_control: This method is called by the core to manage the Root HUB. The
+hardware does not maintain port state.  The software maintain the port
+state and provide the information to the core when required.  The following
+HUB class requests are supported.
+
+- GetHubDescriptor: The HUB descriptor is sent to the core. Only 1 port
+is present. Over current protection and port power control are not supported.
+- SetPortFeature: The device reset and suspend are supported. The The DP & DM
+pull-down resistors are disabled before driving the reset as per the IC-USB
+spec. The reset signaling is stopped when the core queries the port status.
+- GetPortStatus: The device connection status is sent to the core.  If a reset
+is in progress, it is stopped before returning the port status.
+- ClearPortFeature: The device resume (clear suspend) is supported.
+
+urb_enqueue: This method is called by the core to initiate a USB Control/Bulk
+transfer.  If the endpoint private context is not present, it will be created to
+hold the endpoint number, host endpoint structure, transaction error count, halt
+state and unlink state. The URB is attached to the endpoint URB list. If the
+endpoint is not active, it is attached to the asynchronous schedule list and the
+work is scheduled to traverse this list. The traversal algorithm is explained
+later in this document.
+
+urb_dequeue: This method is called by the core when an URB is unlinked.  If the
+endpoint is not active, the URB is unlinked immediately.  Otherwise the endpoint
+is marked for unlink and URB is unlinked from the asynchronous schedule work.
+
+bus_suspend: This method is called by the core during root hub suspend. The SOFs
+are already stopped during the port suspend which happens before root hub
+suspend. Assert the RSTn pin to put the bridge chip in reset state and stop XO
+(19.2 MHz) clock.
+
+bus_resume: This method is called by the core during root hub resume. Turn on
+the XO clock and de-assert the RSTn signal to bring the chip out of reset.
+
+endpoint_disable: This method is called by the core during the device
+disconnect. All the URB are unlinked by this time, so free the endpoint private
+structure.
+
+Asynchronous scheduling:
+
+All the active endpoints are queued to the asynchronous schedule list. A worker
+thread iterates over this circular list and process the URBs. Processing an URB
+involves initiating multiple SETUP/IN/OUT transactions and checking the result.
+After receiving the DATA/ACK, the toggle bit is inverted.
+
+A URB is finished when any of the following events occur:
+
+- The entire data is received for an OUT endpoint or a short packet is received
+for an IN endpoint.
+- The endpoint is stalled by the device. -EPIPE is returned.
+- Transaction error is occurred consecutively 3 times. -EPROTO is returned.
+- A NAK received for a SETUP transaction.
+- The URB is unlinked.
+
+The next transaction is issued on the next endpoint (if available) irrespective
+of the result of the current transaction.  But the IN/OUT transaction of data
+or status phase is attempted immediately after the SETUP transaction for a
+control endpoint. If a NAK is received for this transaction, the control
+transfer is resumed next time when the control endpoint is encountered in the
+asynchronous schedule list. This is to give the control transfers priority
+over the bulk transfers.
+
+The endpoint is marked as halted when a URB is finished due to transaction
+errors or stall condition. The halted endpoint is removed from the asynchronous
+schedule list.  It will be added again next time when a URB is enqueued on this
+endpoint.
+
+This driver provides debugfs interface and exports a file called "command" under
+<debugfs root>/ice40 directory.  The following strings can be echoed to this
+file.
+
+"poll": If the device is connected after the module is loaded, it will not be
+detected automatically. The bus is sampled when this string is echoed. If a
+device is connected, port flags are updated and core is notified about the
+device connect event.
+
+"rwtest": Function Address register is written and read back to validate the
+contents. This should NOT be used while the usb device is connected. This is
+strictly for debugging purpose.
+
+"dump": Dumps all the register values to the kernel log buffer.
+
+Design Goals:
+=============
+
+- Handle errors gracefully. Implement retry mechanism for transaction errors,
+memory failures. Mark HCD as dead for serious errors like SPI transaction
+errors to avoid further interactions with the attached USB device.
+- Keep the asynchronous schedule algorithm simple and efficient. Take advantage
+of the static configuration of the USB device. UICC cards has only CCID and Mass
+storage interfaces. These interface protocol allows only 1 active transfer on
+either in or out endpoint.
+- Add trace points to capture USB transactions.
+
+Driver parameters
+=================
+
+The driver is compiled as a module and it accepts the configuration data file
+name as a module param called "firmware". The default configuration file name
+is "ice40.bin".
+
+Config options
+==============
+
+Set CONFIG_USB_SL811_HCD to m to compile this driver as a module.  The driver
+should not be compiled statically, because the configuration data is not
+available during kernel boot.
+
+To do
+=====
+
+- The bridge chip has 2 IN FIFO and 2 OUT FIFO.  Implement double buffering.
+- The bridge chip has an interrupt to indicate the transaction (IN/OUT)
+completion. The current implementation uses polling for simplicity and to avoid
+interrupt latencies.  Evaluate interrupt approach.
+- The bridge chip can be completely power collapsed during suspend to avoid
+leakage currents. As the bridge chip does not have any non-volatile memory,
+the configuration data needs to be loaded during resume. This method has higher
+power savings with higher resume latencies. Evaluate this approach.
+- Implement Interrupt transfers if required.
+- The request_firmware() API copies the configuration data file to the kernel
+virtual memory. This memory can't be used for DMA. The current implementation
+copies this data into contiguous physical memory which is allocated via
+kmalloc. If this memory allocation fails, try to allocate multiple pages
+and submit the SPI message with multiple transfers.
diff --git a/Documentation/vm/ksm.txt b/Documentation/vm/ksm.txt
index b392e49..8e54981 100644
--- a/Documentation/vm/ksm.txt
+++ b/Documentation/vm/ksm.txt
@@ -72,6 +72,13 @@
 pages_unshared   - how many pages unique but repeatedly checked for merging
 pages_volatile   - how many pages changing too fast to be placed in a tree
 full_scans       - how many times all mergeable areas have been scanned
+deferred_timer   - whether to use deferred timers or not
+                 e.g. "echo 1 > /sys/kernel/mm/ksm/deferred_timer"
+                 Default: 0 (means, we are not using deferred timers. Users
+		 might want to set deferred_timer option if they donot want
+		 ksm thread to wakeup CPU to carryout ksm activities thus
+		 gaining on battery while compromising slightly on memory
+		 that could have been saved.)
 
 A high ratio of pages_sharing to pages_shared indicates good sharing, but
 a high ratio of pages_unshared to pages_sharing indicates wasted effort.
diff --git a/arch/arm/boot/dts/apq8084-mdss.dtsi b/arch/arm/boot/dts/apq8084-mdss.dtsi
index 329ab32..665eedc 100644
--- a/arch/arm/boot/dts/apq8084-mdss.dtsi
+++ b/arch/arm/boot/dts/apq8084-mdss.dtsi
@@ -54,6 +54,17 @@
 		qcom,mdss-pipe-rgb-xin-id = <1 5 9 13>;
 		qcom,mdss-pipe-dma-xin-id = <2 10>;
 
+		qcom,mdss-pipe-vig-clk-ctrl-offsets = <0x3AC 0 0>,
+						      <0x3B4 0 0>,
+						      <0x3BC 0 0>,
+						      <0x3C4 0 0>;
+		qcom,mdss-pipe-rgb-clk-ctrl-offsets = <0x3AC 4 8>,
+						      <0x3B4 4 8>,
+						      <0x3BC 4 8>,
+						      <0x3C4 4 8>;
+		qcom,mdss-pipe-dma-clk-ctrl-offsets = <0x3AC 8 12>,
+						      <0x3B4 8 12>;
+
 		qcom,mdss-smp-data = <44 8192>;
 
 		qcom,mdss-ctl-off = <0x00000600 0x00000700 0x00000800
diff --git a/arch/arm/boot/dts/dsi-panel-nt35590-720p-video.dtsi b/arch/arm/boot/dts/dsi-panel-nt35590-720p-video.dtsi
index 8d28996..9a18a31 100644
--- a/arch/arm/boot/dts/dsi-panel-nt35590-720p-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-nt35590-720p-video.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -30,7 +30,7 @@
 		qcom,mdss-dsi-h-back-porch = <164>;
 		qcom,mdss-dsi-h-pulse-width = <8>;
 		qcom,mdss-dsi-h-sync-skew = <0>;
-		qcom,mdss-dsi-v-back-porch = <1>;
+		qcom,mdss-dsi-v-back-porch = <11>;
 		qcom,mdss-dsi-v-front-porch = <6>;
 		qcom,mdss-dsi-v-pulse-width = <1>;
 		qcom,mdss-dsi-h-left-border = <0>;
@@ -504,7 +504,9 @@
 					29 01 00 00 00 00 02 6A 60
 					29 01 00 00 00 00 02 FF 00
 					29 01 00 00 78 00 02 29 00
-					29 01 00 00 78 00 02 53 2C];
+					29 01 00 00 78 00 02 53 2C
+					29 01 00 00 00 00 02 FF 00
+					29 01 00 00 00 00 06 3B 03 06 03 02 02];
 		qcom,mdss-dsi-off-command = [05 01 00 00 32 00 02 28 00
 					05 01 00 00 78 00 02 10 00];
 		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
diff --git a/arch/arm/boot/dts/msm-pm8110.dtsi b/arch/arm/boot/dts/msm-pm8110.dtsi
index 9adbf81..7a60861 100644
--- a/arch/arm/boot/dts/msm-pm8110.dtsi
+++ b/arch/arm/boot/dts/msm-pm8110.dtsi
@@ -22,7 +22,7 @@
 		#address-cells = <1>;
 		#size-cells = <1>;
 
-		qcom,revid@100 {
+		pm8110_revid: qcom,revid@100 {
 			compatible = "qcom,qpnp-revid";
 			reg = <0x100 0x100>;
 		};
@@ -219,6 +219,7 @@
 			qcom,adc-bit-resolution = <15>;
 			qcom,adc-vdd-reference = <1800>;
 			qcom,vadc-poll-eoc;
+			qcom,pmic-revid = <&pm8110_revid>;
 
 			chan@8 {
 				label = "die_temp";
@@ -268,6 +269,7 @@
 			qcom,iadc-vadc = <&pm8110_vadc>;
 			qcom,iadc-poll-eoc;
 			qcom,use-default-rds-trim = <1>;
+			qcom,pmic-revid = <&pm8110_revid>;
 
 			chan@0 {
 				label = "internal_rsense";
diff --git a/arch/arm/boot/dts/msm-pm8226.dtsi b/arch/arm/boot/dts/msm-pm8226.dtsi
index 41897da..08d3d05 100644
--- a/arch/arm/boot/dts/msm-pm8226.dtsi
+++ b/arch/arm/boot/dts/msm-pm8226.dtsi
@@ -364,6 +364,7 @@
 			qcom,adc-bit-resolution = <15>;
 			qcom,adc-vdd-reference = <1800>;
 			qcom,vadc-poll-eoc;
+			qcom,pmic-revid = <&pm8226_revid>;
 
 			chan@8 {
 				label = "die_temp";
@@ -424,6 +425,7 @@
 			qcom,iadc-vadc = <&pm8226_vadc>;
 			qcom,iadc-poll-eoc;
 			qcom,use-default-rds-trim = <0>;
+			qcom,pmic-revid = <&pm8226_revid>;
 
 			chan@0 {
 				label = "internal_rsense";
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index 94a4e83..a0e02f7 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -27,7 +27,7 @@
 		reg = <0x900 0x100>;
 	};
 
-	qcom,revid@100 {
+	pm8941_revid: qcom,revid@100 {
 		compatible = "qcom,qpnp-revid";
 		reg = <0x100 0x100>;
 	};
@@ -577,6 +577,7 @@
 		qcom,adc-bit-resolution = <15>;
 		qcom,adc-vdd-reference = <1800>;
 		qcom,vadc-poll-eoc;
+		qcom,pmic-revid = <&pm8941_revid>;
 
 		chan@0 {
 			label = "usb_in";
@@ -824,6 +825,7 @@
 		qcom,iadc-vadc = <&pm8941_vadc>;
 		qcom,iadc-poll-eoc;
 		qcom,use-default-rds-trim = <0>;
+		qcom,pmic-revid = <&pm8941_revid>;
 
 		chan@0 {
 			label = "internal_rsense";
diff --git a/arch/arm/boot/dts/msm8226-bus.dtsi b/arch/arm/boot/dts/msm8226-bus.dtsi
index 74b4a30..a2f91cf 100644
--- a/arch/arm/boot/dts/msm8226-bus.dtsi
+++ b/arch/arm/boot/dts/msm8226-bus.dtsi
@@ -998,7 +998,7 @@
 		qcom,fabclk-dual = "mem_clk";
 		qcom,fabclk-active = "mem_a_clk";
 		qcom,ntieredslaves = <0>;
-		qcom,qos-freq = <4800>;
+		qcom,qos-freq = <19200>;
 		qcom,hw-sel = "BIMC";
 		qcom,rpm-en;
 
@@ -1008,12 +1008,18 @@
 			qcom,masterp = <0>;
 			qcom,tier = <2>;
 			qcom,hw-sel = "BIMC";
-			qcom,mode = "Fixed";
+			qcom,mode = "Limiter";
 			qcom,qport = <0>;
 			qcom,ws = <10000>;
 			qcom,mas-hw-id = <0>;
 			qcom,prio-rd = <0>;
 			qcom,prio-wr = <0>;
+			qcom,mode-thresh = "Fixed";
+			qcom,thresh = <1800000>;
+			qcom,dual-conf;
+			qcom,bimc,bw = <450000>;
+			qcom,bimc,gp = <5000>;
+			qcom,bimc,thmp = <50>;
 		};
 
 		mas-mss-proc {
diff --git a/arch/arm/boot/dts/msm8226-gpu.dtsi b/arch/arm/boot/dts/msm8226-gpu.dtsi
index fd20d8c..d1c3264 100644
--- a/arch/arm/boot/dts/msm8226-gpu.dtsi
+++ b/arch/arm/boot/dts/msm8226-gpu.dtsi
@@ -47,6 +47,9 @@
 		/* IOMMU Data */
 		iommu = <&kgsl_iommu>;
 
+		/* CPU latency parameter */
+		qcom,pm-qos-latency = <701>;
+
 		/* Power levels */
 		qcom,gpu-pwrlevels {
 			#address-cells = <1>;
diff --git a/arch/arm/boot/dts/msm8226-mdss.dtsi b/arch/arm/boot/dts/msm8226-mdss.dtsi
index 375c5df..5edc43b 100644
--- a/arch/arm/boot/dts/msm8226-mdss.dtsi
+++ b/arch/arm/boot/dts/msm8226-mdss.dtsi
@@ -19,7 +19,7 @@
 		interrupts = <0 72 0>;
 		vdd-supply = <&gdsc_mdss>;
 
-		qcom,max-bandwidth-low-kbps = <1660000>;
+		qcom,max-bandwidth-low-kbps = <1100000>;
 		qcom,max-bandwidth-high-kbps = <1660000>;
 
 		/* Bus Scale Settings */
@@ -32,8 +32,8 @@
 			<22 512 0 6400000>;
 
 		/* Fudge factors */
-		qcom,mdss-ab-factor = <2 1>;		/* 2 times    */
-		qcom,mdss-ib-factor = <6 5>;		/* 1.2 times  */
+		qcom,mdss-ab-factor = <1 1>;		/* 1 times    */
+		qcom,mdss-ib-factor = <2 1>;		/* 2 times  */
 		qcom,mdss-clk-factor = <5 4>;		/* 1.25 times */
 
 		qcom,max-clk-rate = <200000000>;
@@ -48,6 +48,10 @@
 		qcom,mdss-pipe-rgb-xin-id = <1>;
 		qcom,mdss-pipe-dma-xin-id = <2>;
 
+		qcom,mdss-pipe-vig-clk-ctrl-offsets = <0x3AC 0 0>;
+		qcom,mdss-pipe-rgb-clk-ctrl-offsets = <0x3AC 4 8>;
+		qcom,mdss-pipe-dma-clk-ctrl-offsets = <0x3AC 8 12>;
+
 		qcom,mdss-smp-data = <7 4096>;
 
 		qcom,mdss-ctl-off = <0x00000600 0x00000700>;
@@ -95,7 +99,10 @@
 		compatible = "qcom,mdss-dsi-ctrl";
 		label = "MDSS DSI CTRL->0";
 		cell-index = <0>;
-		reg = <0xfd922800 0x600>;
+		reg = 	<0xfd922800 0x1f8>,
+			<0xfd922b00 0x2b0>,
+			<0xfd828000 0x108>;
+		reg-names = "dsi_ctrl", "dsi_phy", "mmss_misc_phys";
 		qcom,mdss-fb-map = <&mdss_fb0>;
 		qcom,mdss-mdp = <&mdss_mdp>;
 		vdd-supply = <&pm8226_l15>;
diff --git a/arch/arm/boot/dts/msm8226-regulator.dtsi b/arch/arm/boot/dts/msm8226-regulator.dtsi
index 5e890d3..78e1a63 100644
--- a/arch/arm/boot/dts/msm8226-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8226-regulator.dtsi
@@ -78,7 +78,7 @@
 		vdd-apc-supply = <&pm8226_s2>;
 
 		vdd-mx-supply = <&pm8226_l3_ao>;
-		qcom,vdd-mx-vmax = <1350000>;
+		qcom,vdd-mx-vmax = <1337500>;
 		qcom,vdd-mx-vmin-method = <1>;
 
 		qcom,cpr-ref-clk = <19200>;
@@ -109,7 +109,7 @@
 		qcom,cpr-fuse-uplift-sel = <22 53 1 0 0>;
 		qcom,cpr-uplift-voltage = <50000>;
 		qcom,cpr-uplift-quotient = <0 0 120>;
-		qcom,cpr-uplift-max-volt = <1350000>;
+		qcom,cpr-uplift-max-volt = <1330000>;
 		qcom,cpr-uplift-speed-bin = <1>;
 		qcom,speed-bin-fuse-sel = <22 0 3 0>;
 	};
diff --git a/arch/arm/boot/dts/msm8226-v1-pm.dtsi b/arch/arm/boot/dts/msm8226-v1-pm.dtsi
index d59fab3..10aff70 100644
--- a/arch/arm/boot/dts/msm8226-v1-pm.dtsi
+++ b/arch/arm/boot/dts/msm8226-v1-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -105,7 +105,7 @@
 		qcom,L2-spm-is-apcs-master;
 	};
 
-	qcom,lpm-levels {
+	lpm_levels: qcom,lpm-levels {
 		compatible = "qcom,lpm-levels";
 		qcom,default-l2-state = "l2_cache_active";
 		#address-cells = <1>;
@@ -300,6 +300,7 @@
 		qcom,pc-resets-timer;
 		qcom,cpus-as-clocks;
 		qcom,synced-clocks;
+		qcom,lpm-levels = <&lpm_levels>;
 	};
 
 	qcom,cpu-sleep-status@f9088008{
diff --git a/arch/arm/boot/dts/msm8226-v2-pm.dtsi b/arch/arm/boot/dts/msm8226-v2-pm.dtsi
index bc8fe5d..7af2c7f 100644
--- a/arch/arm/boot/dts/msm8226-v2-pm.dtsi
+++ b/arch/arm/boot/dts/msm8226-v2-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -107,7 +107,7 @@
 		qcom,L2-spm-is-apcs-master;
 	};
 
-	qcom,lpm-levels {
+	lpm_levels: qcom,lpm-levels {
 		compatible = "qcom,lpm-levels";
 		qcom,default-l2-state = "l2_cache_active";
 		#address-cells = <1>;
@@ -312,6 +312,7 @@
 		qcom,pc-resets-timer;
 		qcom,cpus-as-clocks;
 		qcom,synced-clocks;
+		qcom,lpm-levels = <&lpm_levels>;
 	};
 
 	qcom,cpu-sleep-status@f9088008{
diff --git a/arch/arm/boot/dts/msm8226-v2.dtsi b/arch/arm/boot/dts/msm8226-v2.dtsi
index 6215740..14fe237 100644
--- a/arch/arm/boot/dts/msm8226-v2.dtsi
+++ b/arch/arm/boot/dts/msm8226-v2.dtsi
@@ -37,14 +37,28 @@
 	qcom,cpr-up-threshold = <0>;
 	qcom,cpr-down-threshold = <5>;
 	qcom,cpr-corner-map = <1 1 2 2 3 3 3 3 3 3 3 3 3 3>;
-	qcom,cpr-quot-adjust-table =
-				<1 5 450>,
-				<1 6 375>,
-				<1 7 300>,
-				<1 8 225>,
-				<1 9 187>,
-				<1 10 150>,
-				<1 11 75>;
+	qcom,pvs-version-fuse-sel = <22 4 2 0>;
+	qcom,cpr-corner-frequency-map =
+			<1 300000000>,
+			<2 384000000>,
+			<3 600000000>,
+			<4 787200000>,
+			<5 998400000>,
+			<6 1094400000>,
+			<7 1190400000>,
+			<8 1305600000>,
+			<9 1344000000>,
+			<10 1401600000>,
+			<11 1497600000>,
+			<12 1593600000>,
+			<13 1689600000>,
+			<14 1785600000>;
+	qcom,cpr-speed-bin-max-corners =
+			<0 2 2 4 7>,
+			<1 2 2 4 12>,
+			<2 2 2 4 10>,
+			<5 2 2 4 14>;
+	qcom,cpr-quot-adjust-scaling-factor-max = <650>;
 };
 
 &msm_gpu {
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index ff97564..4117d9d 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -499,6 +499,10 @@
 		compatible = "qti,msm-pcm-loopback";
 	};
 
+	qcom,msm-voice-svc {
+		compatible = "qcom,msm-voice-svc";
+	};
+
 	qcom,msm-dai-q6 {
 		compatible = "qcom,msm-dai-q6";
 		qcom,msm-dai-q6-sb-0-rx {
@@ -1264,8 +1268,8 @@
                 qcom,msm-bus,num-cases = <2>;
                 qcom,msm-bus,num-paths = <1>;
                 qcom,msm-bus,vectors-KBps =
-                                <1 618 0 0>,
-                                <1 618 0 800>;
+                                <54 618 0 0>,
+                                <54 618 0 800>;
 	};
 
 	qcom,tz-log@fe805720 {
diff --git a/arch/arm/boot/dts/msm8610-gpu.dtsi b/arch/arm/boot/dts/msm8610-gpu.dtsi
index de480df..480ec11 100644
--- a/arch/arm/boot/dts/msm8610-gpu.dtsi
+++ b/arch/arm/boot/dts/msm8610-gpu.dtsi
@@ -46,6 +46,9 @@
 		/* IOMMU Data */
 		iommu = <&gfx_iommu>;
 
+		/* CPU latency parameter */
+		qcom,pm-qos-latency = <701>;
+
 		/* Power levels */
 		qcom,gpu-pwrlevels {
 			#address-cells = <1>;
diff --git a/arch/arm/boot/dts/msm8610-v1-pm.dtsi b/arch/arm/boot/dts/msm8610-v1-pm.dtsi
index dc1dc8b..adc66d7 100644
--- a/arch/arm/boot/dts/msm8610-v1-pm.dtsi
+++ b/arch/arm/boot/dts/msm8610-v1-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -105,7 +105,7 @@
 		qcom,L2-spm-is-apcs-master;
 	};
 
-	qcom,lpm-levels {
+	lpm_levels: qcom,lpm-levels {
 		compatible = "qcom,lpm-levels";
 		qcom,default-l2-state = "l2_cache_active";
 		#address-cells = <1>;
@@ -296,6 +296,7 @@
 		qcom,pc-resets-timer;
 		qcom,cpus-as-clocks;
 		qcom,synced-clocks;
+		qcom,lpm-levels = <&lpm_levels>;
 	};
 
 	qcom,cpu-sleep-status@f9088008{
diff --git a/arch/arm/boot/dts/msm8610-v2-pm.dtsi b/arch/arm/boot/dts/msm8610-v2-pm.dtsi
index 2859744..b69b061 100644
--- a/arch/arm/boot/dts/msm8610-v2-pm.dtsi
+++ b/arch/arm/boot/dts/msm8610-v2-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -107,7 +107,7 @@
 		qcom,L2-spm-is-apcs-master;
 	};
 
-	qcom,lpm-levels {
+	lpm_levels: qcom,lpm-levels {
 		compatible = "qcom,lpm-levels";
 		qcom,default-l2-state = "l2_cache_active";
 		#address-cells = <1>;
@@ -308,6 +308,7 @@
 		qcom,pc-resets-timer;
 		qcom,cpus-as-clocks;
 		qcom,synced-clocks;
+		qcom,lpm-levels = <&lpm_levels>;
 	};
 
 	qcom,cpu-sleep-status@f9088008{
diff --git a/arch/arm/boot/dts/msm8610.dtsi b/arch/arm/boot/dts/msm8610.dtsi
index f152ceb..43cd7c6 100644
--- a/arch/arm/boot/dts/msm8610.dtsi
+++ b/arch/arm/boot/dts/msm8610.dtsi
@@ -606,6 +606,7 @@
 		qcom,gpio-miso = <&msmgpio 87 0>;
 		qcom,gpio-clk  = <&msmgpio 89 0>;
 		qcom,gpio-cs0  = <&msmgpio 88 0>;
+		qcom,gpio-cs2  = <&msmgpio 85 0>;
 
 		qcom,infinite-mode = <0>;
 		qcom,use-bam;
@@ -613,6 +614,21 @@
 		qcom,bam-consumer-pipe-index = <18>;
 		qcom,bam-producer-pipe-index = <19>;
 		qcom,master-id = <86>;
+
+		lattice,spi-usb@2 {
+			compatible = "lattice,ice40-spi-usb";
+			reg = <2>;
+			spi-max-frequency = <50000000>;
+			spi-cpol = <1>;
+			spi-cpha = <1>;
+			core-vcc-supply = <&pm8110_l2>;
+			spi-vcc-supply = <&pm8110_l6>;
+			gpio-supply = <&pm8110_l22>;
+			lattice,reset-gpio = <&msmgpio 95 0>;
+			lattice,slave-select-gpio = <&msmgpio 85 0>;
+			lattice,config-done-gpio = <&msmgpio 94 0>;
+			lattice,vcc-en-gpio = <&msmgpio 96 0>;
+		};
 	};
 
 	qcom,pronto@fb21b000 {
@@ -765,6 +781,11 @@
 			compatible = "qcom,msm-dai-q6-dev";
 			qcom,msm-dai-q6-dev-id = <32773>;
 		};
+
+		qcom,msm-dai-q6-incall-music-2-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <32770>;
+		};
 	};
 
 	qcom,msm-pcm-hostless {
diff --git a/arch/arm/boot/dts/msm8926.dtsi b/arch/arm/boot/dts/msm8926.dtsi
index 8a0e5c4..394f4a9 100644
--- a/arch/arm/boot/dts/msm8926.dtsi
+++ b/arch/arm/boot/dts/msm8926.dtsi
@@ -22,6 +22,11 @@
 / {
 	model = "Qualcomm MSM 8926";
 	compatible = "qcom,msm8926";
+
+};
+
+&qsecom_mem {
+	linux,memory-limit = <0x0>;
 };
 
 &soc {
@@ -138,15 +143,29 @@
 	regulator-min-microvolt = <1>;
 	regulator-max-microvolt = <14>;
 	qcom,cpr-corner-map = <1 1 2 2 3 3 3 3 3 3 3 3 3 3>;
-	qcom,cpr-quot-adjust-table =
-				<1 5 450>,
-				<1 6 375>,
-				<1 7 300>,
-				<1 8 225>,
-				<1 9 187>,
-				<1 10 150>,
-				<1 11 75>;
 	qcom,cpr-quotient-adjustment = <0 72 72>;
+	qcom,pvs-version-fuse-sel = <22 4 2 0>;
+	qcom,cpr-corner-frequency-map =
+			<1 300000000>,
+			<2 384000000>,
+			<3 600000000>,
+			<4 787200000>,
+			<5 998400000>,
+			<6 1094400000>,
+			<7 1190400000>,
+			<8 1305600000>,
+			<9 1344000000>,
+			<10 1401600000>,
+			<11 1497600000>,
+			<12 1593600000>,
+			<13 1689600000>,
+			<14 1785600000>;
+	qcom,cpr-speed-bin-max-corners =
+			<0 1 2 4 7>,
+			<1 1 2 4 12>,
+			<2 1 2 4 10>,
+			<5 1 2 4 14>;
+	qcom,cpr-quot-adjust-scaling-factor-max = <650>;
 };
 
 &tsens {
diff --git a/arch/arm/boot/dts/msm8974-bus.dtsi b/arch/arm/boot/dts/msm8974-bus.dtsi
index b33b2b5..ba8b27e 100644
--- a/arch/arm/boot/dts/msm8974-bus.dtsi
+++ b/arch/arm/boot/dts/msm8974-bus.dtsi
@@ -1175,9 +1175,9 @@
 			qcom,prio-rd = <0>;
 			qcom,prio-wr = <0>;
 			qcom,mode-thresh = "Fixed";
-			qcom,thresh = <2000000>;
+			qcom,thresh = <2000000 2456000>;
 			qcom,dual-conf;
-			qcom,bimc,bw = <300000>;
+			qcom,bimc,bw = <300000 450000>;
 			qcom,bimc,gp = <5000>;
 			qcom,bimc,thmp = <50>;
 		};
@@ -1195,9 +1195,9 @@
 			qcom,prio-rd = <0>;
 			qcom,prio-wr = <0>;
 			qcom,mode-thresh = "Fixed";
-			qcom,thresh = <2000000>;
+			qcom,thresh = <2000000 2456000>;
 			qcom,dual-conf;
-			qcom,bimc,bw = <300000>;
+			qcom,bimc,bw = <300000 450000>;
 			qcom,bimc,gp = <5000>;
 			qcom,bimc,thmp = <50>;
 		};
diff --git a/arch/arm/boot/dts/msm8974-mdss.dtsi b/arch/arm/boot/dts/msm8974-mdss.dtsi
index 7f63234..d83a235 100644
--- a/arch/arm/boot/dts/msm8974-mdss.dtsi
+++ b/arch/arm/boot/dts/msm8974-mdss.dtsi
@@ -50,6 +50,15 @@
 		qcom,mdss-pipe-rgb-xin-id = <1 5 9>;
 		qcom,mdss-pipe-dma-xin-id = <2 10>;
 
+		qcom,mdss-pipe-vig-clk-ctrl-offsets = <0x3AC 0 0>,
+						      <0x3B4 0 0>,
+						      <0x3BC 0 0>;
+		qcom,mdss-pipe-rgb-clk-ctrl-offsets = <0x3AC 4 8>,
+						      <0x3B4 4 8>,
+						      <0x3BC 4 8>;
+		qcom,mdss-pipe-dma-clk-ctrl-offsets = <0x3AC 8 12>,
+						      <0x3B4 8 12>;
+
 		qcom,mdss-smp-data = <22 4096>;
 
 		qcom,mdss-ctl-off = <0x00000600 0x00000700 0x00000800
@@ -113,7 +122,10 @@
 		compatible = "qcom,mdss-dsi-ctrl";
 		label = "MDSS DSI CTRL->0";
 		cell-index = <0>;
-		reg = <0xfd922800 0x600>;
+		reg = 	<0xfd922800 0x1f8>,
+			<0xfd922b00 0x2b0>,
+			<0xfdf30000 0x108>;
+		reg-names = "dsi_ctrl", "dsi_phy", "mmss_misc_phys";
 		vdd-supply = <&pm8941_l22>;
 		vddio-supply = <&pm8941_l12>;
 		vdda-supply = <&pm8941_l2>;
@@ -169,7 +181,10 @@
 		compatible = "qcom,mdss-dsi-ctrl";
 		label = "MDSS DSI CTRL->1";
 		cell-index = <1>;
-		reg = <0xfd922e00 0x600>;
+		reg = 	<0xfd922e00 0x1f8>,
+			<0xfd923100 0x2b0>,
+			<0xfdf30000 0x108>;
+		reg-names = "dsi_ctrl", "dsi_phy", "mmss_misc_phys";
 		vdd-supply = <&pm8941_l22>;
 		vddio-supply = <&pm8941_l12>;
 		vdda-supply = <&pm8941_l2>;
diff --git a/arch/arm/boot/dts/msm8974-v1-pm.dtsi b/arch/arm/boot/dts/msm8974-v1-pm.dtsi
index 886177d..516d068 100644
--- a/arch/arm/boot/dts/msm8974-v1-pm.dtsi
+++ b/arch/arm/boot/dts/msm8974-v1-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -130,7 +130,7 @@
 		qcom,L2-spm-is-apcs-master;
 	};
 
-	qcom,lpm-levels {
+	lpm_levels: qcom,lpm-levels {
 		compatible = "qcom,lpm-levels";
 		qcom,default-l2-state = "l2_cache_retention";
 		#address-cells = <1>;
@@ -311,6 +311,7 @@
 		qcom,pc-mode = "tz_l2_int";
 		qcom,use-sync-timer;
 		qcom,cpus-as-clocks;
+		qcom,lpm-levels = <&lpm_levels>;
 	};
 
 	qcom,cpu-sleep-status@f9088008 {
diff --git a/arch/arm/boot/dts/msm8974-v2-pm.dtsi b/arch/arm/boot/dts/msm8974-v2-pm.dtsi
index 84a8c2d..cde5e5a9 100644
--- a/arch/arm/boot/dts/msm8974-v2-pm.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -126,7 +126,7 @@
 		qcom,L2-spm-is-apcs-master;
 	};
 
-	qcom,lpm-levels {
+	lpm_levels: qcom,lpm-levels {
 		compatible = "qcom,lpm-levels";
 		qcom,default-l2-state = "l2_cache_retention";
 		#address-cells = <1>;
@@ -324,6 +324,7 @@
 		qcom,pc-mode = "tz_l2_int";
 		qcom,use-sync-timer;
 		qcom,cpus-as-clocks;
+		qcom,lpm-levels = <&lpm_levels>;
 
 		qcom,pm-snoc-client {
 			compatible = "qcom,pm-snoc-client";
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 23ddc8c..a3b3c87 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -509,6 +509,7 @@
 				<78 512 800000 1600000>, /* 200 MB/s */
 				<78 512 2048000 4096000>; /* Max. bandwidth */
 		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000 100000000 200000000 4294967295>;
+		qcom,dat1-mpm-int = <42>;
 		status = "disable";
 	};
 
@@ -535,6 +536,7 @@
 				<81 512 800000 1600000>, /* 200 MB/s */
 				<81 512 2048000 4096000>; /* Max. bandwidth */
 		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000 100000000 200000000 4294967295>;
+		qcom,dat1-mpm-int = <44>;
 		status = "disable";
 	};
 
@@ -542,8 +544,17 @@
 		compatible = "qcom,sdhci-msm";
 		reg = <0xf9864900 0x11c>, <0xf9864000 0x800>;
 		reg-names = "hc_mem", "core_mem";
-		interrupts = <0 127 0>, <0 224 0>;
-		interrupt-names = "hc_irq", "pwr_irq";
+
+		#address-cells = <0>;
+		interrupt-parent = <&sdhc_3>;
+		interrupts = <0 1 2>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-map = <0 &intc 0 127 0
+				1 &intc 0 224 0
+				2 &msmgpio 37 0x8>;
+		interrupt-names = "hc_irq", "pwr_irq", "sdiowakeup_irq";
+
 		gpios = <&msmgpio 40 0>, /* CLK */
 			<&msmgpio 39 0>, /* CMD */
 			<&msmgpio 38 0>, /* DATA0 */
@@ -575,8 +586,17 @@
 		compatible = "qcom,sdhci-msm";
 		reg = <0xf98e4900 0x11c>, <0xf98e4000 0x800>;
 		reg-names = "hc_mem", "core_mem";
-		interrupts = <0 129 0>, <0 227 0>;
-		interrupt-names = "hc_irq", "pwr_irq";
+
+		#address-cells = <0>;
+		interrupt-parent = <&sdhc_4>;
+		interrupts = <0 1 2>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-map = <0 &intc 0 129 0
+				1 &intc 0 227 0
+				2 &msmgpio 95 0x8>;
+		interrupt-names = "hc_irq", "pwr_irq", "sdiowakeup_irq";
+
 		gpios = <&msmgpio 93 0>, /* CLK */
 			<&msmgpio 91 0>, /* CMD */
 			<&msmgpio 96 0>, /* DATA0 */
@@ -1520,7 +1540,9 @@
 		compatible = "qcom,cpubw";
 		qcom,cpu-mem-ports = <1 512>, <2 512>;
 		qcom,bw-tbl =
+			<  381 /*  50 MHz */ >,
 			<  572 /*  75 MHz */ >,
+			<  762 /* 100 MHz */ >,
 			< 1144 /* 150 MHz */ >,
 			< 1525 /* 200 MHz */ >,
 			< 2342 /* 307 MHz */ >,
diff --git a/arch/arm/boot/dts/msm8974pro-pm.dtsi b/arch/arm/boot/dts/msm8974pro-pm.dtsi
index aca8f20..0307e2a 100644
--- a/arch/arm/boot/dts/msm8974pro-pm.dtsi
+++ b/arch/arm/boot/dts/msm8974pro-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -126,7 +126,7 @@
 		qcom,L2-spm-is-apcs-master;
 	};
 
-	qcom,lpm-levels {
+	lpm_levels: qcom,lpm-levels {
 		compatible = "qcom,lpm-levels";
 		qcom,allow-synced-levels;
 		qcom,default-l2-state = "l2_cache_retention";
@@ -242,6 +242,7 @@
 			<0xff 109>,  /* ocmem_dm_nonsec_irq */
 			<0xff 126>,  /* bam_irq[0] */
 			<0xff 140>,  /* uart_dm_intr */
+			<0xff 146>,  /* uart_dm_intr: blsp2_uart_2_irq */
 			<0xff 155>,  /* sdcc_irq[0] */
 			<0xff 157>,  /* sdcc_irq[0] */
 			<0xff 159>,  /* sdcc_irq[0] */
@@ -333,6 +334,7 @@
 		reg = <0xfe805664 0x40>;
 		qcom,pc-mode = "tz_l2_int";
 		qcom,cpus-as-clocks;
+		qcom,lpm-levels = <&lpm_levels>;
 
 		qcom,pm-snoc-client {
 			compatible = "qcom,pm-snoc-client";
diff --git a/arch/arm/boot/dts/msm8974pro.dtsi b/arch/arm/boot/dts/msm8974pro.dtsi
index a72ebb2..ae0547f 100644
--- a/arch/arm/boot/dts/msm8974pro.dtsi
+++ b/arch/arm/boot/dts/msm8974pro.dtsi
@@ -1578,13 +1578,13 @@
 		/* Off */
 		<26 512 0 0>, <89 604 0 0>,
 		/* Sub-SVS / SVS */
-		<26 512 0 1600000>, <89 604 0 3200000>,
+		<26 512 1200000 2456000>, <89 604 0 3200000>,
 		/* SVS */
-		<26 512 0 2456000>, <89 604 0 3200000>,
+		<26 512 1200000 2456000>, <89 604 0 3200000>,
 		/* low Nominal / SVS */
 		<26 512 0 3680000>, <89 604 0 3200000>,
 		/* SVS / low Nominal */
-		<26 512 0 2456000>, <89 604 0 5280000>,
+		<26 512 1200000 2456000>, <89 604 0 5280000>,
 		/* low Nominal / low Nominal */
 		<26 512 0 3680000>, <89 604 0 5280000>,
 		/* Nominal / low Nominal */
@@ -1657,6 +1657,7 @@
 };
 
 &mdss_mdp {
+	qcom,max-bandwidth-low-kbps = <2750000>;
 	qcom,vbif-settings = <0x0004 0x00000001>;
 
 	qcom,mdss-wb-off = <0x00011100 0x00011500
diff --git a/arch/arm/boot/dts/msm9625-pm.dtsi b/arch/arm/boot/dts/msm9625-pm.dtsi
index 1e6cdf2..ec62cd4 100644
--- a/arch/arm/boot/dts/msm9625-pm.dtsi
+++ b/arch/arm/boot/dts/msm9625-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -28,7 +28,7 @@
 		3e 0f];
 	};
 
-	qcom,lpm-levels {
+	lpm_levels: qcom,lpm-levels {
 		compatible = "qcom,lpm-levels";
 		qcom,no-l2-saw;
 		#address-cells = <1>;
@@ -167,6 +167,7 @@
 		reg = <0xfe805664 0x40>;
 		qcom,pc-mode = "tz_l2_ext";
 		qcom,use-sync-timer;
+		qcom,lpm-levels = <&lpm_levels>;
 	};
 
 	qcom,rpm-log@fc19dc00 {
diff --git a/arch/arm/configs/apq8084_defconfig b/arch/arm/configs/apq8084_defconfig
index 823e4f8..2f5e776 100644
--- a/arch/arm/configs/apq8084_defconfig
+++ b/arch/arm/configs/apq8084_defconfig
@@ -365,6 +365,7 @@
 CONFIG_QPNP_CLKDIV=y
 CONFIG_MSM_IOMMU_V1=y
 CONFIG_IOMMU_PGTABLES_L2=y
+CONFIG_MSM_IOMMU_VBIF_CHECK=y
 CONFIG_IOMMU_NON_SECURE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
diff --git a/arch/arm/configs/msm8226-perf_defconfig b/arch/arm/configs/msm8226-perf_defconfig
index 3dd9c55..b0f52b2 100644
--- a/arch/arm/configs/msm8226-perf_defconfig
+++ b/arch/arm/configs/msm8226-perf_defconfig
@@ -272,8 +272,8 @@
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_UINPUT=y
 CONFIG_INPUT_GPIO=m
-CONFIG_SERIAL_MSM_HSL=y
-CONFIG_SERIAL_MSM_HSL_CONSOLE=y
+#CONFIG_SERIAL_MSM_HSL is not set
+#CONFIG_SERIAL_MSM_HSL_CONSOLE is not set
 CONFIG_DIAG_CHAR=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM=y
@@ -427,6 +427,7 @@
 CONFIG_QPNP_VIBRATOR=y
 CONFIG_QPNP_REVID=y
 CONFIG_MSM_IOMMU_V1=y
+CONFIG_MSM_IOMMU_VBIF_CHECK=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT3_FS=y
diff --git a/arch/arm/configs/msm8226_defconfig b/arch/arm/configs/msm8226_defconfig
index 38171e6..c7af9f9 100644
--- a/arch/arm/configs/msm8226_defconfig
+++ b/arch/arm/configs/msm8226_defconfig
@@ -451,6 +451,7 @@
 CONFIG_QPNP_VIBRATOR=y
 CONFIG_QPNP_REVID=y
 CONFIG_MSM_IOMMU_V1=y
+CONFIG_MSM_IOMMU_VBIF_CHECK=y
 CONFIG_CORESIGHT=y
 CONFIG_CORESIGHT_FUSE=y
 CONFIG_CORESIGHT_TMC=y
diff --git a/arch/arm/configs/msm8610-perf_defconfig b/arch/arm/configs/msm8610-perf_defconfig
index c7abf42..e5b386c 100644
--- a/arch/arm/configs/msm8610-perf_defconfig
+++ b/arch/arm/configs/msm8610-perf_defconfig
@@ -212,6 +212,13 @@
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_QSEECOM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
@@ -244,8 +251,8 @@
 CONFIG_INPUT_UINPUT=y
 CONFIG_INPUT_GPIO=m
 # CONFIG_INPUT_MOUSEDEV is not set
-CONFIG_SERIAL_MSM_HSL=y
-CONFIG_SERIAL_MSM_HSL_CONSOLE=y
+#CONFIG_SERIAL_MSM_HSL is not set
+#CONFIG_SERIAL_MSM_HSL_CONSOLE is not set
 CONFIG_DIAG_CHAR=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM=y
@@ -335,6 +342,12 @@
 CONFIG_HID_MAGICMOUSE=y
 CONFIG_HID_MICROSOFT=y
 CONFIG_HID_ELECOM=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_ICE40_HCD=m
+CONFIG_USB_CCID_BRIDGE=y
+CONFIG_USB_STORAGE=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_DEBUG_FILES=y
 CONFIG_USB_GADGET_DEBUG_FS=y
@@ -412,6 +425,8 @@
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_ARC4=y
 CONFIG_CRYPTO_TWOFISH=y
+CONFIG_MOBICORE_SUPPORT=m
+CONFIG_MOBICORE_API=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC_CCITT=y
 CONFIG_PPP=y
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index fe84f96..4f60013 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -213,6 +213,13 @@
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_QSEECOM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
@@ -357,6 +364,12 @@
 CONFIG_HID_MAGICMOUSE=y
 CONFIG_HID_MICROSOFT=y
 CONFIG_HID_ELECOM=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_ICE40_HCD=m
+CONFIG_USB_CCID_BRIDGE=y
+CONFIG_USB_STORAGE=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_DEBUG_FILES=y
 CONFIG_USB_GADGET_DEBUG_FS=y
@@ -485,3 +498,5 @@
 CONFIG_SENSORS_MMA8X5X=y
 CONFIG_SENSORS_CAPELLA_CM36283=y
 CONFIG_MSM_RDBG=m
+CONFIG_MOBICORE_SUPPORT=m
+CONFIG_MOBICORE_API=m
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index 915b1c3..0421131 100755
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -463,6 +463,7 @@
 CONFIG_QPNP_COINCELL=y
 CONFIG_MSM_IOMMU_V1=y
 CONFIG_IOMMU_PGTABLES_L2=y
+CONFIG_MSM_IOMMU_VBIF_CHECK=y
 CONFIG_MOBICORE_SUPPORT=m
 CONFIG_MOBICORE_API=m
 CONFIG_BIF=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index e72e5ff..3c3de4a 100755
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -124,6 +124,7 @@
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
 CONFIG_INET=y
 CONFIG_IP_ADVANCED_ROUTER=y
 CONFIG_IP_MULTIPLE_TABLES=y
@@ -132,6 +133,7 @@
 CONFIG_IP_PNP_DHCP=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
 # CONFIG_INET_XFRM_MODE_BEET is not set
 # CONFIG_INET_LRO is not set
 CONFIG_IPV6=y
@@ -222,6 +224,9 @@
 CONFIG_IP6_NF_RAW=y
 CONFIG_BRIDGE_NF_EBTABLES=y
 CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
 CONFIG_BRIDGE=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
@@ -283,6 +288,18 @@
 CONFIG_KS8851=m
 # CONFIG_MSM_RMNET is not set
 CONFIG_MSM_RMNET_BAM=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
 CONFIG_SLIP=y
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_MODE_SLIP6=y
@@ -471,6 +488,7 @@
 CONFIG_MSM_IOMMU_V1=y
 CONFIG_MSM_IOMMU_PMON=y
 CONFIG_IOMMU_PGTABLES_L2=y
+CONFIG_MSM_IOMMU_VBIF_CHECK=y
 CONFIG_MOBICORE_SUPPORT=m
 CONFIG_MOBICORE_API=m
 CONFIG_CORESIGHT=y
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index 71f4827..0827df7 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -24,6 +24,7 @@
 extern void disable_hlt(void);
 extern void enable_hlt(void);
 extern int get_hlt(void);
+extern char* (*arch_read_hardware_id)(void);
 
 #endif /* !__ASSEMBLY__ */
 
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index 8c0a923..5a1ed75 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -493,8 +493,8 @@
 	clockevents_config_and_register(clk, arch_timer_rate,
 					0xf, 0x7fffffff);
 
-	err = request_irq(arch_timer_spi, arch_timer_handler_mem, 0,
-		"arch_timer", clk);
+	err = request_irq(arch_timer_spi, arch_timer_handler_mem,
+			IRQF_TIMER, "arch_timer", clk);
 
 	return err;
 }
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 7298f9a..c110f0f 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -110,6 +110,9 @@
 unsigned int cold_boot;
 EXPORT_SYMBOL(cold_boot);
 
+char* (*arch_read_hardware_id)(void);
+EXPORT_SYMBOL(arch_read_hardware_id);
+
 #ifdef MULTI_CPU
 struct processor processor __read_mostly;
 #endif
@@ -1108,7 +1111,10 @@
 
 	seq_puts(m, "\n");
 
-	seq_printf(m, "Hardware\t: %s\n", machine_name);
+	if (!arch_read_hardware_id)
+		seq_printf(m, "Hardware\t: %s\n", machine_name);
+	else
+		seq_printf(m, "Hardware\t: %s\n", arch_read_hardware_id());
 	seq_printf(m, "Revision\t: %04x\n", system_rev);
 	seq_printf(m, "Serial\t\t: %08x%08x\n",
 		   system_serial_high, system_serial_low);
diff --git a/arch/arm/mach-msm/board-8226-gpiomux.c b/arch/arm/mach-msm/board-8226-gpiomux.c
index 5882ebc..08566bb 100644
--- a/arch/arm/mach-msm/board-8226-gpiomux.c
+++ b/arch/arm/mach-msm/board-8226-gpiomux.c
@@ -11,6 +11,7 @@
  *
  */
 
+#include <linux/gpio.h>
 #include <linux/init.h>
 #include <linux/ioport.h>
 #include <mach/board.h>
@@ -18,6 +19,12 @@
 #include <mach/gpiomux.h>
 #include <mach/socinfo.h>
 
+#define WLAN_CLK	44
+#define WLAN_SET	43
+#define WLAN_DATA0	42
+#define WLAN_DATA1	41
+#define WLAN_DATA2	40
+
 #ifdef CONFIG_USB_EHCI_MSM_HSIC
 static struct gpiomux_setting hsic_sus_cfg = {
 	.func = GPIOMUX_FUNC_GPIO,
@@ -50,6 +57,42 @@
 };
 #endif
 
+static struct gpiomux_setting smsc_hub_act_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_8MA,
+	.pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting smsc_hub_susp_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_NONE,
+};
+
+static struct msm_gpiomux_config smsc_hub_configs[] = {
+	{
+		.gpio = 114, /* reset_n */
+		.settings = {
+			[GPIOMUX_ACTIVE] = &smsc_hub_act_cfg,
+			[GPIOMUX_SUSPENDED] = &smsc_hub_susp_cfg,
+		},
+	},
+	{
+		.gpio = 8, /* clk_en */
+		.settings = {
+			[GPIOMUX_ACTIVE] = &smsc_hub_act_cfg,
+			[GPIOMUX_SUSPENDED] = &smsc_hub_susp_cfg,
+		},
+	},
+	{
+		.gpio = 9, /* int_n */
+		.settings = {
+			[GPIOMUX_ACTIVE] = &smsc_hub_act_cfg,
+			[GPIOMUX_SUSPENDED] = &smsc_hub_susp_cfg,
+		},
+	},
+};
+
 #define KS8851_IRQ_GPIO 115
 
 #if defined(CONFIG_KS8851) || defined(CONFIG_KS8851_MODULE)
@@ -140,6 +183,18 @@
 	.pull = GPIOMUX_PULL_DOWN,
 };
 
+static struct gpiomux_setting wcnss_5gpio_suspend_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv  = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting wcnss_5gpio_active_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv  = GPIOMUX_DRV_6MA,
+	.pull = GPIOMUX_PULL_DOWN,
+};
+
 static struct gpiomux_setting gpio_i2c_config = {
 	.func = GPIOMUX_FUNC_3,
 	.drv = GPIOMUX_DRV_2MA,
@@ -542,6 +597,44 @@
 	},
 };
 
+static struct msm_gpiomux_config wcnss_5gpio_interface[] = {
+	{
+		.gpio = 40,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5gpio_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+		},
+	},
+	{
+		.gpio = 41,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5gpio_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+		},
+	},
+	{
+		.gpio = 42,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5gpio_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+		},
+	},
+	{
+		.gpio = 43,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5gpio_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+		},
+	},
+	{
+		.gpio = 44,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5gpio_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+		},
+	},
+};
+
 static struct gpiomux_setting gpio_suspend_config[] = {
 	{
 		.func = GPIOMUX_FUNC_GPIO,  /* IN-NP */
@@ -883,4 +976,113 @@
 	}
 	msm_gpiomux_install(msm_hsic_configs, ARRAY_SIZE(msm_hsic_configs));
 #endif
+	if (machine_is_msm8926() && of_board_is_mtp())
+		msm_gpiomux_install(smsc_hub_configs,
+			ARRAY_SIZE(smsc_hub_configs));
+}
+
+static void wcnss_switch_to_gpio(void)
+{
+	/* Switch MUX to GPIO */
+	msm_gpiomux_install(wcnss_5gpio_interface,
+			ARRAY_SIZE(wcnss_5gpio_interface));
+
+	/* Ensure GPIO config */
+	gpio_direction_input(WLAN_DATA2);
+	gpio_direction_input(WLAN_DATA1);
+	gpio_direction_input(WLAN_DATA0);
+	gpio_direction_output(WLAN_SET, 0);
+	gpio_direction_output(WLAN_CLK, 0);
+}
+
+static void wcnss_switch_to_5wire(void)
+{
+	msm_gpiomux_install(wcnss_5wire_interface,
+			ARRAY_SIZE(wcnss_5wire_interface));
+}
+
+u32 wcnss_rf_read_reg(u32 rf_reg_addr)
+{
+	int count = 0;
+	u32 rf_cmd_and_addr = 0;
+	u32 rf_data_received = 0;
+	u32 rf_bit = 0;
+
+	wcnss_switch_to_gpio();
+
+	/* Reset the signal if it is already being used. */
+	gpio_set_value(WLAN_SET, 0);
+	gpio_set_value(WLAN_CLK, 0);
+
+	/* We start with cmd_set high WLAN_SET = 1. */
+	gpio_set_value(WLAN_SET, 1);
+
+	gpio_direction_output(WLAN_DATA0, 1);
+	gpio_direction_output(WLAN_DATA1, 1);
+	gpio_direction_output(WLAN_DATA2, 1);
+
+	gpio_set_value(WLAN_DATA0, 0);
+	gpio_set_value(WLAN_DATA1, 0);
+	gpio_set_value(WLAN_DATA2, 0);
+
+	/* Prepare command and RF register address that need to sent out.
+	 * Make sure that we send only 14 bits from LSB.
+	 */
+	rf_cmd_and_addr  = (((WLAN_RF_READ_REG_CMD) |
+		(rf_reg_addr << WLAN_RF_REG_ADDR_START_OFFSET)) &
+		WLAN_RF_READ_CMD_MASK);
+
+	for (count = 0; count < 5; count++) {
+		gpio_set_value(WLAN_CLK, 0);
+
+		rf_bit = (rf_cmd_and_addr & 0x1);
+		gpio_set_value(WLAN_DATA0, rf_bit ? 1 : 0);
+		rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+		rf_bit = (rf_cmd_and_addr & 0x1);
+		gpio_set_value(WLAN_DATA1, rf_bit ? 1 : 0);
+		rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+		rf_bit = (rf_cmd_and_addr & 0x1);
+		gpio_set_value(WLAN_DATA2, rf_bit ? 1 : 0);
+		rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+		/* Send the data out WLAN_CLK = 1 */
+		gpio_set_value(WLAN_CLK, 1);
+	}
+
+	/* Pull down the clock signal */
+	gpio_set_value(WLAN_CLK, 0);
+
+	/* Configure data pins to input IO pins */
+	gpio_direction_input(WLAN_DATA0);
+	gpio_direction_input(WLAN_DATA1);
+	gpio_direction_input(WLAN_DATA2);
+
+	for (count = 0; count < 2; count++) {
+		gpio_set_value(WLAN_CLK, 1);
+		gpio_set_value(WLAN_CLK, 0);
+	}
+
+	rf_bit = 0;
+	for (count = 0; count < 6; count++) {
+		gpio_set_value(WLAN_CLK, 1);
+		gpio_set_value(WLAN_CLK, 0);
+
+		rf_bit = gpio_get_value(WLAN_DATA0);
+		rf_data_received |= (rf_bit << (count * 3 + 0));
+
+		if (count != 5) {
+			rf_bit = gpio_get_value(WLAN_DATA1);
+			rf_data_received |= (rf_bit << (count * 3 + 1));
+
+			rf_bit = gpio_get_value(WLAN_DATA2);
+			rf_data_received |= (rf_bit << (count * 3 + 2));
+		}
+	}
+
+	gpio_set_value(WLAN_SET, 0);
+	wcnss_switch_to_5wire();
+
+	return rf_data_received;
 }
diff --git a/arch/arm/mach-msm/board-8226.c b/arch/arm/mach-msm/board-8226.c
index 1c1fbe3..43646cd 100644
--- a/arch/arm/mach-msm/board-8226.c
+++ b/arch/arm/mach-msm/board-8226.c
@@ -167,7 +167,7 @@
 	NULL
 };
 
-DT_MACHINE_START(MSM8226_DT, "Qualcomm MSM 8226 (Flattened Device Tree)")
+DT_MACHINE_START(MSM8226_DT, "Qualcomm MSM 8x26 / MSM 8x28 (Flattened Device Tree)")
 	.map_io = msm_map_msm8226_io,
 	.init_irq = msm_dt_init_irq,
 	.init_machine = msm8226_init,
diff --git a/arch/arm/mach-msm/board-8610-gpiomux.c b/arch/arm/mach-msm/board-8610-gpiomux.c
index 2e12fc2..c91deb2 100644
--- a/arch/arm/mach-msm/board-8610-gpiomux.c
+++ b/arch/arm/mach-msm/board-8610-gpiomux.c
@@ -11,6 +11,7 @@
  *
  */
 
+#include <linux/gpio.h>
 #include <linux/init.h>
 #include <linux/ioport.h>
 #include <mach/board.h>
@@ -18,6 +19,12 @@
 #include <mach/gpiomux.h>
 #include <mach/socinfo.h>
 
+#define WLAN_CLK	27
+#define WLAN_SET	26
+#define WLAN_DATA0	25
+#define WLAN_DATA1	24
+#define WLAN_DATA2	23
+
 static struct gpiomux_setting gpio_spi_config = {
 	.func = GPIOMUX_FUNC_1,
 	.drv = GPIOMUX_DRV_6MA,
@@ -112,6 +119,18 @@
 	.pull = GPIOMUX_PULL_DOWN,
 };
 
+static struct gpiomux_setting wcnss_5gpio_suspend_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv  = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting wcnss_5gpio_active_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv  = GPIOMUX_DRV_6MA,
+	.pull = GPIOMUX_PULL_DOWN,
+};
+
 static struct gpiomux_setting lcd_en_act_cfg = {
 	.func = GPIOMUX_FUNC_GPIO,
 	.drv = GPIOMUX_DRV_8MA,
@@ -405,6 +424,44 @@
 	},
 };
 
+static struct msm_gpiomux_config wcnss_5gpio_interface[] = {
+	{
+		.gpio = 23,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5gpio_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+		},
+	},
+	{
+		.gpio = 24,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5gpio_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+		},
+	},
+	{
+		.gpio = 25,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5gpio_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+		},
+	},
+	{
+		.gpio = 26,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5gpio_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+		},
+	},
+	{
+		.gpio = 27,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5gpio_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+		},
+	},
+};
+
 static struct gpiomux_setting gpio_suspend_config[] = {
 	{
 		.func = GPIOMUX_FUNC_GPIO,  /* IN-NP */
@@ -673,6 +730,61 @@
 	},
 };
 
+static struct gpiomux_setting ice40_spi_cs_act_config = {
+	.func = GPIOMUX_FUNC_1,
+	.drv = GPIOMUX_DRV_6MA,
+	.pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting ice40_spi_cs_susp_config = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_6MA,
+	.pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting ice40_act_config = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_8MA,
+	.pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting ice40_susp_config = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_NONE,
+};
+
+static struct msm_gpiomux_config ice40_spi_usb_configs[] __initdata = {
+	{
+		.gpio = 85,
+		.settings = {
+			[GPIOMUX_ACTIVE] = &ice40_spi_cs_act_config,
+			[GPIOMUX_SUSPENDED] = &ice40_spi_cs_susp_config,
+		},
+	},
+	{
+		.gpio = 94,
+		.settings = {
+			[GPIOMUX_ACTIVE] = &ice40_act_config,
+			[GPIOMUX_SUSPENDED] = &ice40_susp_config,
+		},
+	},
+	{
+		.gpio = 95,
+		.settings = {
+			[GPIOMUX_ACTIVE] = &ice40_act_config,
+			[GPIOMUX_SUSPENDED] = &ice40_susp_config,
+		},
+	},
+	{
+		.gpio = 96,
+		.settings = {
+			[GPIOMUX_ACTIVE] = &ice40_act_config,
+			[GPIOMUX_SUSPENDED] = &ice40_susp_config,
+		},
+	},
+};
+
 void __init msm8610_init_gpiomux(void)
 {
 	int rc;
@@ -713,4 +825,114 @@
 	if (of_board_is_cdp())
 		msm_gpiomux_install(msm_cdc_dmic_configs,
 			ARRAY_SIZE(msm_cdc_dmic_configs));
+
+	if (of_board_is_cdp())
+		msm_gpiomux_install(ice40_spi_usb_configs,
+			ARRAY_SIZE(ice40_spi_usb_configs));
+}
+
+static void wcnss_switch_to_gpio(void)
+{
+	/* Switch MUX to GPIO */
+	msm_gpiomux_install(wcnss_5gpio_interface,
+			ARRAY_SIZE(wcnss_5gpio_interface));
+
+	/* Ensure GPIO config */
+	gpio_direction_input(WLAN_DATA2);
+	gpio_direction_input(WLAN_DATA1);
+	gpio_direction_input(WLAN_DATA0);
+	gpio_direction_output(WLAN_SET, 0);
+	gpio_direction_output(WLAN_CLK, 0);
+}
+
+static void wcnss_switch_to_5wire(void)
+{
+	msm_gpiomux_install(wcnss_5wire_interface,
+			ARRAY_SIZE(wcnss_5wire_interface));
+}
+
+u32 wcnss_rf_read_reg(u32 rf_reg_addr)
+{
+	int count = 0;
+	u32 rf_cmd_and_addr = 0;
+	u32 rf_data_received = 0;
+	u32 rf_bit = 0;
+
+	wcnss_switch_to_gpio();
+
+	/* Reset the signal if it is already being used. */
+	gpio_set_value(WLAN_SET, 0);
+	gpio_set_value(WLAN_CLK, 0);
+
+	/* We start with cmd_set high WLAN_SET = 1. */
+	gpio_set_value(WLAN_SET, 1);
+
+	gpio_direction_output(WLAN_DATA0, 1);
+	gpio_direction_output(WLAN_DATA1, 1);
+	gpio_direction_output(WLAN_DATA2, 1);
+
+	gpio_set_value(WLAN_DATA0, 0);
+	gpio_set_value(WLAN_DATA1, 0);
+	gpio_set_value(WLAN_DATA2, 0);
+
+	/* Prepare command and RF register address that need to sent out.
+	 * Make sure that we send only 14 bits from LSB.
+	 */
+	rf_cmd_and_addr  = (((WLAN_RF_READ_REG_CMD) |
+		(rf_reg_addr << WLAN_RF_REG_ADDR_START_OFFSET)) &
+		WLAN_RF_READ_CMD_MASK);
+
+	for (count = 0; count < 5; count++) {
+		gpio_set_value(WLAN_CLK, 0);
+
+		rf_bit = (rf_cmd_and_addr & 0x1);
+		gpio_set_value(WLAN_DATA0, rf_bit ? 1 : 0);
+		rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+		rf_bit = (rf_cmd_and_addr & 0x1);
+		gpio_set_value(WLAN_DATA1, rf_bit ? 1 : 0);
+		rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+		rf_bit = (rf_cmd_and_addr & 0x1);
+		gpio_set_value(WLAN_DATA2, rf_bit ? 1 : 0);
+		rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+		/* Send the data out WLAN_CLK = 1 */
+		gpio_set_value(WLAN_CLK, 1);
+	}
+
+	/* Pull down the clock signal */
+	gpio_set_value(WLAN_CLK, 0);
+
+	/* Configure data pins to input IO pins */
+	gpio_direction_input(WLAN_DATA0);
+	gpio_direction_input(WLAN_DATA1);
+	gpio_direction_input(WLAN_DATA2);
+
+	for (count = 0; count < 2; count++) {
+		gpio_set_value(WLAN_CLK, 1);
+		gpio_set_value(WLAN_CLK, 0);
+	}
+
+	rf_bit = 0;
+	for (count = 0; count < 6; count++) {
+		gpio_set_value(WLAN_CLK, 1);
+		gpio_set_value(WLAN_CLK, 0);
+
+		rf_bit = gpio_get_value(WLAN_DATA0);
+		rf_data_received |= (rf_bit << (count * 3 + 0));
+
+		if (count != 5) {
+			rf_bit = gpio_get_value(WLAN_DATA1);
+			rf_data_received |= (rf_bit << (count * 3 + 1));
+
+			rf_bit = gpio_get_value(WLAN_DATA2);
+			rf_data_received |= (rf_bit << (count * 3 + 2));
+		}
+	}
+
+	gpio_set_value(WLAN_SET, 0);
+	wcnss_switch_to_5wire();
+
+	return rf_data_received;
 }
diff --git a/arch/arm/mach-msm/board-8610.c b/arch/arm/mach-msm/board-8610.c
index d175bb4..cd9b82e 100644
--- a/arch/arm/mach-msm/board-8610.c
+++ b/arch/arm/mach-msm/board-8610.c
@@ -136,7 +136,7 @@
 	NULL
 };
 
-DT_MACHINE_START(MSM8610_DT, "Qualcomm MSM 8610 (Flattened Device Tree)")
+DT_MACHINE_START(MSM8610_DT, "Qualcomm MSM 8x10 / MSM 8x12 (Flattened Device Tree)")
 	.map_io = msm_map_msm8610_io,
 	.init_irq = msm_dt_init_irq,
 	.init_machine = msm8610_init,
diff --git a/arch/arm/mach-msm/board-8974-gpiomux.c b/arch/arm/mach-msm/board-8974-gpiomux.c
index cec1a8f..5d4d379 100755
--- a/arch/arm/mach-msm/board-8974-gpiomux.c
+++ b/arch/arm/mach-msm/board-8974-gpiomux.c
@@ -11,6 +11,7 @@
  *
  */
 
+#include <linux/gpio.h>
 #include <linux/init.h>
 #include <linux/ioport.h>
 #include <mach/board.h>
@@ -20,6 +21,12 @@
 
 #define KS8851_IRQ_GPIO 94
 
+#define WLAN_CLK	40
+#define WLAN_SET	39
+#define WLAN_DATA0	38
+#define WLAN_DATA1	37
+#define WLAN_DATA2	36
+
 static struct gpiomux_setting ap2mdm_cfg = {
 	.func = GPIOMUX_FUNC_GPIO,
 	.drv = GPIOMUX_DRV_2MA,
@@ -208,6 +215,18 @@
 	.pull = GPIOMUX_PULL_DOWN,
 };
 
+static struct gpiomux_setting wcnss_5gpio_suspend_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv  = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting wcnss_5gpio_active_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv  = GPIOMUX_DRV_6MA,
+	.pull = GPIOMUX_PULL_DOWN,
+};
+
 static struct gpiomux_setting ath_gpio_active_cfg = {
 	.func = GPIOMUX_FUNC_GPIO,
 	.drv = GPIOMUX_DRV_2MA,
@@ -1158,6 +1177,43 @@
 	},
 };
 
+static struct msm_gpiomux_config wcnss_5gpio_interface[] = {
+	{
+		.gpio = 36,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5gpio_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+		},
+	},
+	{
+		.gpio = 37,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5gpio_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+		},
+	},
+	{
+		.gpio = 38,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5gpio_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+		},
+	},
+	{
+		.gpio = 39,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5gpio_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+		},
+	},
+	{
+		.gpio = 40,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &wcnss_5gpio_active_cfg,
+			[GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+		},
+	},
+};
 
 static struct msm_gpiomux_config ath_gpio_configs[] = {
 	{
@@ -1467,3 +1523,109 @@
 		msm_gpiomux_install(apq8074_dragonboard_ts_config,
 			ARRAY_SIZE(apq8074_dragonboard_ts_config));
 }
+
+static void wcnss_switch_to_gpio(void)
+{
+	/* Switch MUX to GPIO */
+	msm_gpiomux_install(wcnss_5gpio_interface,
+			ARRAY_SIZE(wcnss_5gpio_interface));
+
+	/* Ensure GPIO config */
+	gpio_direction_input(WLAN_DATA2);
+	gpio_direction_input(WLAN_DATA1);
+	gpio_direction_input(WLAN_DATA0);
+	gpio_direction_output(WLAN_SET, 0);
+	gpio_direction_output(WLAN_CLK, 0);
+}
+
+static void wcnss_switch_to_5wire(void)
+{
+	msm_gpiomux_install(wcnss_5wire_interface,
+			ARRAY_SIZE(wcnss_5wire_interface));
+}
+
+u32 wcnss_rf_read_reg(u32 rf_reg_addr)
+{
+	int count = 0;
+	u32 rf_cmd_and_addr = 0;
+	u32 rf_data_received = 0;
+	u32 rf_bit = 0;
+
+	wcnss_switch_to_gpio();
+
+	/* Reset the signal if it is already being used. */
+	gpio_set_value(WLAN_SET, 0);
+	gpio_set_value(WLAN_CLK, 0);
+
+	/* We start with cmd_set high WLAN_SET = 1. */
+	gpio_set_value(WLAN_SET, 1);
+
+	gpio_direction_output(WLAN_DATA0, 1);
+	gpio_direction_output(WLAN_DATA1, 1);
+	gpio_direction_output(WLAN_DATA2, 1);
+
+	gpio_set_value(WLAN_DATA0, 0);
+	gpio_set_value(WLAN_DATA1, 0);
+	gpio_set_value(WLAN_DATA2, 0);
+
+	/* Prepare command and RF register address that need to sent out.
+	 * Make sure that we send only 14 bits from LSB.
+	 */
+	rf_cmd_and_addr  = (((WLAN_RF_READ_REG_CMD) |
+		(rf_reg_addr << WLAN_RF_REG_ADDR_START_OFFSET)) &
+		WLAN_RF_READ_CMD_MASK);
+
+	for (count = 0; count < 5; count++) {
+		gpio_set_value(WLAN_CLK, 0);
+
+		rf_bit = (rf_cmd_and_addr & 0x1);
+		gpio_set_value(WLAN_DATA0, rf_bit ? 1 : 0);
+		rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+		rf_bit = (rf_cmd_and_addr & 0x1);
+		gpio_set_value(WLAN_DATA1, rf_bit ? 1 : 0);
+		rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+		rf_bit = (rf_cmd_and_addr & 0x1);
+		gpio_set_value(WLAN_DATA2, rf_bit ? 1 : 0);
+		rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+		/* Send the data out WLAN_CLK = 1 */
+		gpio_set_value(WLAN_CLK, 1);
+	}
+
+	/* Pull down the clock signal */
+	gpio_set_value(WLAN_CLK, 0);
+
+	/* Configure data pins to input IO pins */
+	gpio_direction_input(WLAN_DATA0);
+	gpio_direction_input(WLAN_DATA1);
+	gpio_direction_input(WLAN_DATA2);
+
+	for (count = 0; count < 2; count++) {
+		gpio_set_value(WLAN_CLK, 1);
+		gpio_set_value(WLAN_CLK, 0);
+	}
+
+	rf_bit = 0;
+	for (count = 0; count < 6; count++) {
+		gpio_set_value(WLAN_CLK, 1);
+		gpio_set_value(WLAN_CLK, 0);
+
+		rf_bit = gpio_get_value(WLAN_DATA0);
+		rf_data_received |= (rf_bit << (count * 3 + 0));
+
+		if (count != 5) {
+			rf_bit = gpio_get_value(WLAN_DATA1);
+			rf_data_received |= (rf_bit << (count * 3 + 1));
+
+			rf_bit = gpio_get_value(WLAN_DATA2);
+			rf_data_received |= (rf_bit << (count * 3 + 2));
+		}
+	}
+
+	gpio_set_value(WLAN_SET, 0);
+	wcnss_switch_to_5wire();
+
+	return rf_data_received;
+}
diff --git a/arch/arm/mach-msm/clock-8226.c b/arch/arm/mach-msm/clock-8226.c
index d16d0f1..7a7c008 100644
--- a/arch/arm/mach-msm/clock-8226.c
+++ b/arch/arm/mach-msm/clock-8226.c
@@ -3146,8 +3146,11 @@
 	CLK_LOOKUP("bus_clk", gcc_mss_q6_bimc_axi_clk.c, "fc880000.qcom,mss"),
 	CLK_LOOKUP("iface_clk",   gcc_mss_cfg_ahb_clk.c, "fc880000.qcom,mss"),
 	CLK_LOOKUP("mem_clk",    gcc_boot_rom_ahb_clk.c, "fc880000.qcom,mss"),
+
 	/* NFC */
-	CLK_LOOKUP("ref_clk",            cxo_d1_a_pin.c, "2-000e"),
+	CLK_LOOKUP("ref_clk",            cxo_d1_a_pin.c, ""),
+	CLK_LOOKUP("ref_clk",            cxo_d1_pin.c, "2-000e"),
+
 	/* PIL-PRONTO */
 	CLK_LOOKUP("xo", cxo_pil_pronto_clk.c, "fb21b000.qcom,pronto"),
 
@@ -3383,6 +3386,8 @@
 	CLK_LOOKUP("iface_clk", mdss_ahb_clk.c, "fd922800.qcom,mdss_dsi"),
 	CLK_LOOKUP("bus_clk", mdss_axi_clk.c, "fd922800.qcom,mdss_dsi"),
 	CLK_LOOKUP("mdp_core_clk", mdss_mdp_clk.c, "fd922800.qcom,mdss_dsi"),
+	CLK_LOOKUP("core_mmss_clk", mmss_misc_ahb_clk.c,
+		"fd922800.qcom,mdss_dsi"),
 
 	CLK_LOOKUP("core_clk", mdss_mdp_clk.c, "fd900000.qcom,mdss_mdp"),
 	CLK_LOOKUP("lut_clk", mdss_mdp_lut_clk.c, "fd900000.qcom,mdss_mdp"),
@@ -3405,13 +3410,13 @@
 
 	/* MM sensor clocks */
 	CLK_LOOKUP("cam_src_clk", mclk0_clk_src.c, "6f.qcom,camera"),
-	CLK_LOOKUP("cam_src_clk", mclk0_clk_src.c, "90.qcom,camera"),
+	CLK_LOOKUP("cam_src_clk", mclk1_clk_src.c, "90.qcom,camera"),
 	CLK_LOOKUP("cam_src_clk", mclk0_clk_src.c, "6d.qcom,camera"),
 	CLK_LOOKUP("cam_src_clk", mclk0_clk_src.c, "6a.qcom,camera"),
 	CLK_LOOKUP("cam_src_clk", mclk0_clk_src.c, "6c.qcom,camera"),
 	CLK_LOOKUP("cam_src_clk", mclk0_clk_src.c, "20.qcom,camera"),
 	CLK_LOOKUP("cam_clk", camss_mclk0_clk.c, "6f.qcom,camera"),
-	CLK_LOOKUP("cam_clk", camss_mclk0_clk.c, "90.qcom,camera"),
+	CLK_LOOKUP("cam_clk", camss_mclk1_clk.c, "90.qcom,camera"),
 	CLK_LOOKUP("cam_clk", camss_mclk0_clk.c, "6d.qcom,camera"),
 	CLK_LOOKUP("cam_clk", camss_mclk0_clk.c, "6a.qcom,camera"),
 	CLK_LOOKUP("cam_clk", camss_mclk0_clk.c, "6c.qcom,camera"),
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index fc5a78a..1771090 100755
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -2197,6 +2197,7 @@
 	.en_mask = BIT(5),
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &ce1_clk_src.c,
 		.dbg_name = "gcc_ce1_clk",
 		.ops = &clk_ops_vote,
 		CLK_INIT(gcc_ce1_clk.c),
@@ -2233,6 +2234,7 @@
 	.en_mask = BIT(2),
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &ce2_clk_src.c,
 		.dbg_name = "gcc_ce2_clk",
 		.ops = &clk_ops_vote,
 		CLK_INIT(gcc_ce2_clk.c),
@@ -5076,6 +5078,10 @@
 	CLK_LOOKUP("pixel_clk", mdss_pclk1_clk.c, "fd922e00.qcom,mdss_dsi"),
 	CLK_LOOKUP("mdp_core_clk", mdss_mdp_clk.c, "fd922800.qcom,mdss_dsi"),
 	CLK_LOOKUP("mdp_core_clk", mdss_mdp_clk.c, "fd922e00.qcom,mdss_dsi"),
+	CLK_LOOKUP("core_mmss_clk", mmss_misc_ahb_clk.c,
+		"fd922800.qcom,mdss_dsi"),
+	CLK_LOOKUP("core_mmss_clk", mmss_misc_ahb_clk.c,
+		"fd922e00.qcom,mdss_dsi"),
 	CLK_LOOKUP("iface_clk", mdss_ahb_clk.c, "fd922100.qcom,hdmi_tx"),
 	CLK_LOOKUP("alt_iface_clk", mdss_hdmi_ahb_clk.c,
 		"fd922100.qcom,hdmi_tx"),
diff --git a/arch/arm/mach-msm/clock-mdss-8974.c b/arch/arm/mach-msm/clock-mdss-8974.c
index 1fc7f1d..b63008f 100644
--- a/arch/arm/mach-msm/clock-mdss-8974.c
+++ b/arch/arm/mach-msm/clock-mdss-8974.c
@@ -1877,7 +1877,7 @@
 	if (vco_rate == 810000000) {
 		DSS_REG_W(mdss_edp_base, 0x0c, 0x18);
 		/* UNIPHY_PLL_LKDET_CFG2 */
-		DSS_REG_W(mdss_edp_base, 0x64, 0x05);
+		DSS_REG_W(mdss_edp_base, 0x64, 0x0d);
 		/* UNIPHY_PLL_REFCLK_CFG */
 		DSS_REG_W(mdss_edp_base, 0x00, 0x00);
 		/* UNIPHY_PLL_SDM_CFG0 */
@@ -1899,7 +1899,7 @@
 		/* UNIPHY_PLL_SSC_CFG3 */
 		DSS_REG_W(mdss_edp_base, 0x58, 0x00);
 		/* UNIPHY_PLL_CAL_CFG0 */
-		DSS_REG_W(mdss_edp_base, 0x6c, 0x0a);
+		DSS_REG_W(mdss_edp_base, 0x6c, 0x12);
 		/* UNIPHY_PLL_CAL_CFG2 */
 		DSS_REG_W(mdss_edp_base, 0x74, 0x01);
 		/* UNIPHY_PLL_CAL_CFG6 */
@@ -1924,7 +1924,7 @@
 		DSS_REG_W(mdss_edp_base, 0x28, 0x00);
 	} else if (vco_rate == 1350000000) {
 		/* UNIPHY_PLL_LKDET_CFG2 */
-		DSS_REG_W(mdss_edp_base, 0x64, 0x05);
+		DSS_REG_W(mdss_edp_base, 0x64, 0x0d);
 		/* UNIPHY_PLL_REFCLK_CFG */
 		DSS_REG_W(mdss_edp_base, 0x00, 0x01);
 		/* UNIPHY_PLL_SDM_CFG0 */
@@ -1946,7 +1946,7 @@
 		/* UNIPHY_PLL_SSC_CFG3 */
 		DSS_REG_W(mdss_edp_base, 0x58, 0x00);
 		/* UNIPHY_PLL_CAL_CFG0 */
-		DSS_REG_W(mdss_edp_base, 0x6c, 0x0a);
+		DSS_REG_W(mdss_edp_base, 0x6c, 0x12);
 		/* UNIPHY_PLL_CAL_CFG2 */
 		DSS_REG_W(mdss_edp_base, 0x74, 0x01);
 		/* UNIPHY_PLL_CAL_CFG6 */
diff --git a/arch/arm/mach-msm/cpr-regulator.c b/arch/arm/mach-msm/cpr-regulator.c
index 039d2d7..d952f82 100644
--- a/arch/arm/mach-msm/cpr-regulator.c
+++ b/arch/arm/mach-msm/cpr-regulator.c
@@ -169,6 +169,8 @@
 	/* Process voltage variables */
 	u32		pvs_bin;
 	u32		speed_bin;
+	u32		pvs_version;
+
 	/* APC voltage regulator */
 	struct regulator	*vdd_apc;
 
@@ -1291,14 +1293,48 @@
 	return rc;
 }
 
-static int cpr_get_of_corner_mappings(struct cpr_regulator *cpr_vreg,
+static void cpr_parse_pvs_version_fuse(struct cpr_regulator *cpr_vreg,
+				struct device_node *of_node)
+{
+	int rc;
+	u64 fuse_bits;
+	u32 fuse_sel[4];
+
+	rc = of_property_read_u32_array(of_node,
+			"qcom,pvs-version-fuse-sel", fuse_sel, 4);
+	if (!rc) {
+		fuse_bits = cpr_read_efuse_row(cpr_vreg,
+				fuse_sel[0], fuse_sel[3]);
+		cpr_vreg->pvs_version = (fuse_bits >> fuse_sel[1]) &
+			((1 << fuse_sel[2]) - 1);
+		pr_info("[row: %d]: 0x%llx, pvs_version = %d\n",
+				fuse_sel[0], fuse_bits, cpr_vreg->pvs_version);
+	} else {
+		cpr_vreg->pvs_version = UINT_MAX;
+	}
+}
+
+/*
+ * cpr_get_corner_quot_adjustment() -- get the quot_adjust for each corner.
+ *
+ * Get the corner to fuse corner (SVS/NORMAL/TURBO) mappings and corner to
+ * APC clock frequency mappings from device tree.
+ * Calculate the quotient adjustment scaling factor for those corners mapping
+ * to the TURBO fuse corner.
+ * Calculate the quotient adjustment for each corner which map to the TURBO
+ * fuse corner.
+ */
+static int cpr_get_corner_quot_adjustment(struct cpr_regulator *cpr_vreg,
 					struct device *dev)
 {
 	int rc = 0;
-	int i, size, stripe_size;
+	int i, size;
 	struct property *prop;
-	u32 *tmp;
 	bool corners_mapped;
+	u32 *tmp, *freq_mappings = NULL;
+	u32 scaling, max_factor;
+	u32 corner, turbo_corner = 0, normal_corner = 0, svs_corner = 0;
+	u32 freq_turbo, freq_normal, freq_corner;
 
 	prop = of_find_property(dev->of_node, "qcom,cpr-corner-map", NULL);
 
@@ -1313,81 +1349,182 @@
 	cpr_vreg->corner_map = devm_kzalloc(dev, sizeof(int) * (size + 1),
 					GFP_KERNEL);
 	if (!cpr_vreg->corner_map) {
-		pr_err("Can't allocate cpr_vreg->corner_map memory\n");
+		pr_err("Can't allocate memory for cpr_vreg->corner_map\n");
 		return -ENOMEM;
 	}
 	cpr_vreg->num_corners = size;
 
+	cpr_vreg->quot_adjust = devm_kzalloc(dev,
+			sizeof(u32) * (cpr_vreg->num_corners + 1),
+			GFP_KERNEL);
+	if (!cpr_vreg->quot_adjust) {
+		pr_err("Can't allocate memory for cpr_vreg->quot_adjust\n");
+		return -ENOMEM;
+	}
+
 	if (!corners_mapped) {
 		for (i = CPR_FUSE_CORNER_SVS; i < CPR_FUSE_CORNER_MAX; i++)
 			cpr_vreg->corner_map[i] = i;
+		return 0;
 	} else {
 		rc = of_property_read_u32_array(dev->of_node,
 			"qcom,cpr-corner-map", &cpr_vreg->corner_map[1], size);
 
 		if (rc) {
-			pr_err("qcom,cpr-corner-map missing, rc = %d", rc);
+			pr_err("qcom,cpr-corner-map missing, rc = %d\n", rc);
 			return rc;
 		}
 	}
 
-	cpr_vreg->quot_adjust = devm_kzalloc(dev,
-			sizeof(int) * (cpr_vreg->num_corners + 1),
-			GFP_KERNEL);
-	if (!cpr_vreg->quot_adjust) {
-		pr_err("Can't allocate cpr_vreg->quot_adjust memory\n");
+	prop = of_find_property(dev->of_node,
+			"qcom,cpr-speed-bin-max-corners", NULL);
+	if (!prop) {
+		cpr_debug("qcom,cpr-speed-bin-max-corner missing\n");
+		return 0;
+	}
+
+	size = prop->length / sizeof(u32);
+	tmp = kzalloc(size * sizeof(u32), GFP_KERNEL);
+	if (!tmp) {
+		pr_err("memory alloc failed\n");
 		return -ENOMEM;
 	}
-
-	prop = of_find_property(dev->of_node, "qcom,cpr-quot-adjust-table",
-				NULL);
-
-	if (prop) {
-		if (!corners_mapped) {
-			pr_err("qcom,cpr-corner-map missing\n");
-			return -EINVAL;
-		}
-
-		size = prop->length / sizeof(u32);
-		tmp = kzalloc(sizeof(u32) * size, GFP_KERNEL);
-		if (!tmp)
-			return -ENOMEM;
-
-		rc = of_property_read_u32_array(dev->of_node,
-				"qcom,cpr-quot-adjust-table", tmp, size);
-		if (rc) {
-			pr_err("qcom,cpr-quot-adjust-table missing, rc = %d",
-				rc);
-			kfree(tmp);
-			return rc;
-		}
-
-		stripe_size = sizeof(struct quot_adjust_info) / sizeof(int);
-
-		if ((size % stripe_size) != 0) {
-			pr_err("qcom,cpr-quot-adjust-table data is not correct");
-			kfree(tmp);
-			return -EINVAL;
-		}
-
-		for (i = 0; i < size; i += stripe_size) {
-			if (tmp[i] == cpr_vreg->speed_bin) {
-				if (tmp[i + 1] >= 1 &&
-					tmp[i + 1] <=
-					cpr_vreg->num_corners) {
-					cpr_vreg->quot_adjust[tmp[i + 1]] =
-					tmp[i + 2];
-				} else {
-					pr_err("qcom,cpr-quot-adjust-table data is not correct");
-					kfree(tmp);
-					return -EINVAL;
-				}
-			}
-		}
-
+	rc = of_property_read_u32_array(dev->of_node,
+		"qcom,cpr-speed-bin-max-corners", tmp, size);
+	if (rc < 0) {
 		kfree(tmp);
+		pr_err("get cpr-speed-bin-max-corners failed, rc = %d\n", rc);
+		return rc;
 	}
 
+	cpr_parse_pvs_version_fuse(cpr_vreg, dev->of_node);
+
+	/*
+	 * According to speed_bin && pvs_version, get the maximum
+	 * corner corresponding to SVS/NORMAL/TURBO fuse corner.
+	 */
+	for (i = 0; i < size; i += 5) {
+		if (tmp[i] == cpr_vreg->speed_bin &&
+			tmp[i + 1] == cpr_vreg->pvs_version) {
+			svs_corner = tmp[i + 2];
+			normal_corner = tmp[i + 3];
+			turbo_corner = tmp[i + 4];
+			break;
+		}
+	}
+	kfree(tmp);
+	/*
+	 * Return success if the virtual corner values read from
+	 * qcom,cpr-speed-bin-max-corners property are incorrect,
+	 * which make sure the driver could continue run without
+	 * error.
+	 */
+	if (turbo_corner <= normal_corner ||
+			turbo_corner > cpr_vreg->num_corners) {
+		cpr_debug("turbo:%d should be larger than normal:%d\n",
+				turbo_corner, normal_corner);
+		return 0;
+	}
+
+	prop = of_find_property(dev->of_node,
+			"qcom,cpr-corner-frequency-map", NULL);
+	if (!prop) {
+		cpr_debug("qcom,cpr-corner-frequency-map missing\n");
+		return 0;
+	}
+
+	size = prop->length / sizeof(u32);
+	tmp = kzalloc(sizeof(u32) * size, GFP_KERNEL);
+	if (!tmp) {
+		pr_err("memory alloc failed\n");
+		return -ENOMEM;
+	}
+	rc = of_property_read_u32_array(dev->of_node,
+		"qcom,cpr-corner-frequency-map", tmp, size);
+	if (rc < 0) {
+		pr_err("get cpr-corner-frequency-map failed, rc = %d\n", rc);
+		kfree(tmp);
+		return rc;
+	}
+	freq_mappings = kzalloc(sizeof(u32) * (cpr_vreg->num_corners + 1),
+			GFP_KERNEL);
+	if (!freq_mappings) {
+		pr_err("memory alloc for freq_mappings failed!\n");
+		kfree(tmp);
+		return -ENOMEM;
+	}
+	for (i = 0; i < size; i += 2) {
+		corner = tmp[i];
+		if ((corner < 1) || (corner > cpr_vreg->num_corners)) {
+			pr_err("corner should be in 1~%d range: %d\n",
+					cpr_vreg->num_corners, corner);
+			continue;
+		}
+		freq_mappings[corner] = tmp[i + 1];
+		cpr_debug("Frequency at virtual corner %d is %d Hz.\n",
+				corner, freq_mappings[corner]);
+	}
+	kfree(tmp);
+
+	rc = of_property_read_u32(dev->of_node,
+		"qcom,cpr-quot-adjust-scaling-factor-max",
+		&max_factor);
+	if (rc < 0) {
+		cpr_debug("get cpr-quot-adjust-scaling-factor-max failed\n");
+		kfree(freq_mappings);
+		return 0;
+	}
+
+	/*
+	 * Get the quot adjust scaling factor, according to:
+	 * scaling =
+	 * min(1000 * (QUOT(fused @turbo) - QUOT(fused @normal)) /
+	 * (freq_turbo - freq_normal), max_factor)
+	 *
+	 * @QUOT(fused @turbo): quotient read from fuse for TURBO fuse corner;
+	 * @QUOT(fused @normal): quotient read from fuse for NORMAL fuse corner;
+	 * @freq_turbo: MHz, max frequency running at TURBO fuse corner;
+	 * @freq_normal: MHz, max frequency running at NORMAL fuse corner.
+	 */
+
+	freq_turbo = freq_mappings[turbo_corner];
+	freq_normal = freq_mappings[normal_corner];
+	if (freq_normal == 0 || freq_turbo <= freq_normal) {
+		pr_err("freq_turbo: %d should larger than freq_normal: %d\n",
+				freq_turbo, freq_normal);
+		kfree(freq_mappings);
+		return -EINVAL;
+	}
+	freq_turbo /= 1000000;	/* MHz */
+	freq_normal /= 1000000;
+	scaling = 1000 *
+		(cpr_vreg->cpr_fuse_target_quot[CPR_FUSE_CORNER_TURBO] -
+		cpr_vreg->cpr_fuse_target_quot[CPR_FUSE_CORNER_NORMAL]) /
+		(freq_turbo - freq_normal);
+	scaling = min(scaling, max_factor);
+	pr_info("quotient adjustment scaling factor: %d.%03d\n",
+			scaling / 1000, scaling % 1000);
+
+	/*
+	 * Walk through the corners mapped to the TURBO fuse corner and
+	 * calculate the quotient adjustment for each one using the following
+	 * formula:
+	 * quot_adjust = (freq_turbo - freq_corner) * scaling / 1000
+	 *
+	 * @freq_turbo: MHz, max frequency running at TURBO fuse corner;
+	 * @freq_corner: MHz, frequency running at a corner.
+	 */
+	for (i = turbo_corner; i > normal_corner; i--) {
+		freq_corner = freq_mappings[i] / 1000000; /* MHz */
+		if (freq_corner > 0) {
+			cpr_vreg->quot_adjust[i] =
+				scaling * (freq_turbo - freq_corner) / 1000;
+		}
+		pr_info("adjusted quotient[%d] = %d\n", i,
+			(cpr_vreg->cpr_fuse_target_quot[cpr_vreg->corner_map[i]]
+				- cpr_vreg->quot_adjust[i]));
+	}
+	kfree(freq_mappings);
 	return 0;
 }
 
@@ -1531,7 +1668,7 @@
 		}
 	}
 
-	rc = cpr_get_of_corner_mappings(cpr_vreg, &pdev->dev);
+	rc = cpr_get_corner_quot_adjustment(cpr_vreg, &pdev->dev);
 	if (rc)
 		return rc;
 
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 89e3b51..c7f8b74 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -27,6 +27,12 @@
 #include <linux/msm_ssbi.h>
 #include <mach/msm_bus.h>
 
+#define WLAN_RF_REG_ADDR_START_OFFSET   0x3
+#define WLAN_RF_REG_DATA_START_OFFSET   0xf
+#define WLAN_RF_READ_REG_CMD            0x3
+#define WLAN_RF_WRITE_REG_CMD           0x2
+#define WLAN_RF_READ_CMD_MASK           0x3fff
+
 struct msm_camera_io_ext {
 	uint32_t mdcphy;
 	uint32_t mdcsz;
@@ -677,4 +683,5 @@
 extern phys_addr_t msm_shared_ram_phys; /* defined in arch/arm/mach-msm/io.c */
 
 
+u32 wcnss_rf_read_reg(u32 rf_reg_addr);
 #endif
diff --git a/arch/arm/mach-msm/include/mach/iommu.h b/arch/arm/mach-msm/include/mach/iommu.h
index c4db727..0f69a7b 100644
--- a/arch/arm/mach-msm/include/mach/iommu.h
+++ b/arch/arm/mach-msm/include/mach/iommu.h
@@ -86,6 +86,7 @@
  * struct msm_iommu_drvdata - A single IOMMU hardware instance
  * @base:	IOMMU config port base address (VA)
  * @glb_base:	IOMMU config port base address for global register space (VA)
+ * @phys_base:  IOMMU physical base address.
  * @ncb		The number of contexts on this IOMMU
  * @irq:	Interrupt number
  * @clk:	The bus clock for this IOMMU hardware instance
@@ -108,6 +109,7 @@
  */
 struct msm_iommu_drvdata {
 	void __iomem *base;
+	phys_addr_t phys_base;
 	void __iomem *glb_base;
 	int ncb;
 	int ttbr_split;
diff --git a/arch/arm/mach-msm/include/mach/iommu_hw-v1.h b/arch/arm/mach-msm/include/mach/iommu_hw-v1.h
index 04cd441..4509092 100644
--- a/arch/arm/mach-msm/include/mach/iommu_hw-v1.h
+++ b/arch/arm/mach-msm/include/mach/iommu_hw-v1.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,17 +16,19 @@
 #define CTX_SHIFT  12
 #define CTX_OFFSET 0x8000
 
-#define GET_GLOBAL_REG(reg, base) (readl_relaxed((base) + (reg)))
-#define GET_CTX_REG(reg, base, ctx) \
-	(readl_relaxed((base) + CTX_OFFSET + (reg) + ((ctx) << CTX_SHIFT)))
-#define GET_CTX_REG_L(reg, base, ctx) \
-	(readll_relaxed((base) + CTX_OFFSET + (reg) + ((ctx) << CTX_SHIFT)))
+#define CTX_REG(reg, base, ctx) \
+	((base) + CTX_OFFSET + (reg) + ((ctx) << CTX_SHIFT))
+#define GLB_REG(reg, base) \
+	((base) + (reg))
+
+#define GET_GLOBAL_REG(reg, base) (readl_relaxed(GLB_REG(reg, base)))
+#define GET_CTX_REG(reg, base, ctx) (readl_relaxed(CTX_REG(reg, base, ctx)))
+#define GET_CTX_REG_L(reg, base, ctx) (readll_relaxed(CTX_REG(reg, base, ctx)))
 
 #define SET_GLOBAL_REG(reg, base, val)	writel_relaxed((val), ((base) + (reg)))
 
 #define SET_CTX_REG(reg, base, ctx, val) \
-	writel_relaxed((val), \
-		((base) + CTX_OFFSET + (reg) + ((ctx) << CTX_SHIFT)))
+	writel_relaxed((val), (CTX_REG(reg, base, ctx)))
 
 /* Wrappers for numbered registers */
 #define SET_GLOBAL_REG_N(b, n, r, v) SET_GLOBAL_REG((b), ((r) + (n << 2)), (v))
@@ -150,6 +152,11 @@
 				SET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, HALT_REQ, v)
 #define GET_MICRO_MMU_CTRL_IDLE(b) \
 				GET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, IDLE)
+#define SET_MICRO_MMU_CTRL_RESERVED(b, v) \
+				SET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, RESERVED, v)
+
+#define MMU_CTRL_IDLE (MICRO_MMU_CTRL_IDLE_MASK << MICRO_MMU_CTRL_IDLE_SHIFT)
+
 #define SET_PREDICTIONDIS0(b, v) SET_GLOBAL_REG(PREDICTIONDIS0, (b), (v))
 #define SET_PREDICTIONDIS1(b, v) SET_GLOBAL_REG(PREDICTIONDIS1, (b), (v))
 #define SET_S1L1BFBLP0(b, v)     SET_GLOBAL_REG(S1L1BFBLP0, (b), (v))
diff --git a/arch/arm/mach-msm/include/mach/kgsl.h b/arch/arm/mach-msm/include/mach/kgsl.h
index f398652..edfe6b4 100644
--- a/arch/arm/mach-msm/include/mach/kgsl.h
+++ b/arch/arm/mach-msm/include/mach/kgsl.h
@@ -91,6 +91,7 @@
 	struct coresight_device *csdev;
 	struct coresight_platform_data *coresight_pdata;
 	unsigned int chipid;
+	unsigned int pm_qos_latency;
 };
 
 #endif
diff --git a/arch/arm/mach-msm/include/mach/ocmem_priv.h b/arch/arm/mach-msm/include/mach/ocmem_priv.h
index 32d58d4..00aedb6 100644
--- a/arch/arm/mach-msm/include/mach/ocmem_priv.h
+++ b/arch/arm/mach-msm/include/mach/ocmem_priv.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -226,7 +226,6 @@
 int process_dump(int, struct ocmem_handle *, unsigned long);
 int ocmem_rdm_transfer(int, struct ocmem_map_list *,
 				unsigned long, int);
-int ocmem_clear(unsigned long, unsigned long);
 unsigned long process_quota(int);
 int ocmem_memory_off(int, unsigned long, unsigned long);
 int ocmem_memory_on(int, unsigned long, unsigned long);
diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h
index aeb32f8..24b5181 100644
--- a/arch/arm/mach-msm/include/mach/socinfo.h
+++ b/arch/arm/mach-msm/include/mach/socinfo.h
@@ -138,6 +138,11 @@
 	MSM_CPU_SAMARIUM,
 };
 
+struct msm_soc_info {
+	enum msm_cpu generic_soc_type;
+	char *soc_id_string;
+};
+
 enum pmic_model {
 	PMIC_MODEL_PM8058	= 13,
 	PMIC_MODEL_PM8028	= 14,
diff --git a/arch/arm/mach-msm/include/mach/subsystem_notif.h b/arch/arm/mach-msm/include/mach/subsystem_notif.h
index 5865eff..59e212f 100644
--- a/arch/arm/mach-msm/include/mach/subsystem_notif.h
+++ b/arch/arm/mach-msm/include/mach/subsystem_notif.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011, 2013-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -25,6 +25,7 @@
 	SUBSYS_BEFORE_POWERUP,
 	SUBSYS_AFTER_POWERUP,
 	SUBSYS_RAMDUMP_NOTIFICATION,
+	SUBSYS_POWERUP_FAILURE,
 	SUBSYS_NOTIF_TYPE_COUNT
 };
 
diff --git a/arch/arm/mach-msm/lpm_levels.c b/arch/arm/mach-msm/lpm_levels.c
index 7128017..bd28131 100644
--- a/arch/arm/mach-msm/lpm_levels.c
+++ b/arch/arm/mach-msm/lpm_levels.c
@@ -82,6 +82,7 @@
 
 static struct lpm_system_state sys_state;
 static bool suspend_in_progress;
+static int64_t suspend_time;
 
 struct lpm_lookup_table {
 	uint32_t modes;
@@ -526,7 +527,7 @@
 		if (latency_us < pwr->latency_us)
 			continue;
 
-		if (next_event_us)
+		if (next_event_us) {
 			if (next_event_us < pwr->latency_us)
 				continue;
 
@@ -535,6 +536,7 @@
 				next_wakeup_us = next_event_us
 					- pwr->latency_us;
 			}
+		}
 
 		if (next_wakeup_us <= pwr->time_overhead_us)
 			continue;
@@ -544,11 +546,11 @@
 			if (!dev->cpu && msm_rpm_waiting_for_ack())
 					break;
 
-		if ((next_wakeup_us >> 10) > pwr->latency_us) {
+		if ((next_wakeup_us >> 10) > pwr->time_overhead_us) {
 			power = pwr->ss_power;
 		} else {
 			power = pwr->ss_power;
-			power -= (pwr->latency_us * pwr->ss_power)
+			power -= (pwr->time_overhead_us * pwr->ss_power)
 					/ next_wakeup_us;
 			power += pwr->energy_overhead / next_wakeup_us;
 		}
@@ -778,6 +780,11 @@
 
 static int lpm_suspend_prepare(void)
 {
+	struct timespec ts;
+
+	getnstimeofday(&ts);
+	suspend_time = timespec_to_ns(&ts);
+
 	suspend_in_progress = true;
 	msm_mpm_suspend_prepare();
 	return 0;
@@ -785,6 +792,12 @@
 
 static void lpm_suspend_wake(void)
 {
+	struct timespec ts;
+
+	getnstimeofday(&ts);
+	suspend_time = timespec_to_ns(&ts) - suspend_time;
+	msm_pm_add_stat(MSM_PM_STAT_SUSPEND, suspend_time);
+
 	msm_mpm_suspend_wake();
 	suspend_in_progress = false;
 }
diff --git a/arch/arm/mach-msm/mpm-of.c b/arch/arm/mach-msm/mpm-of.c
index 1fbd077..f1bf64f 100644
--- a/arch/arm/mach-msm/mpm-of.c
+++ b/arch/arm/mach-msm/mpm-of.c
@@ -531,6 +531,7 @@
 void msm_mpm_exit_sleep(bool from_idle)
 {
 	unsigned long pending;
+	uint32_t *enabled_intr;
 	int i;
 	int k;
 
@@ -539,12 +540,16 @@
 		return;
 	}
 
+	enabled_intr = from_idle ? msm_mpm_enabled_irq :
+						msm_mpm_wake_irq;
+
 	for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
 		pending = msm_mpm_read(MSM_MPM_REG_STATUS, i);
+		pending &= enabled_intr[i];
 
 		if (MSM_MPM_DEBUG_PENDING_IRQ & msm_mpm_debug_mask)
-			pr_info("%s: pending.%d: 0x%08lx", __func__,
-					i, pending);
+			pr_info("%s: enabled_intr pending.%d: 0x%08x 0x%08lx\n",
+				__func__, i, enabled_intr[i], pending);
 
 		k = find_first_bit(&pending, 32);
 		while (k < 32) {
@@ -678,7 +683,8 @@
 		return -ENXIO;
 	}
 	ret = devm_request_irq(&pdev->dev, dev->mpm_ipc_irq, msm_mpm_irq,
-			IRQF_TRIGGER_RISING, pdev->name, msm_mpm_irq);
+			IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND, pdev->name,
+			msm_mpm_irq);
 
 	if (ret) {
 		pr_info("%s(): request_irq failed errno: %d\n", __func__, ret);
diff --git a/arch/arm/mach-msm/msm-pm.c b/arch/arm/mach-msm/msm-pm.c
index f9a9343..cb65a70 100644
--- a/arch/arm/mach-msm/msm-pm.c
+++ b/arch/arm/mach-msm/msm-pm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -25,10 +25,12 @@
 #include <linux/platform_device.h>
 #include <linux/of_platform.h>
 #include <linux/cpu_pm.h>
+#include <linux/remote_spinlock.h>
 #include <asm/uaccess.h>
 #include <asm/suspend.h>
 #include <asm/cacheflush.h>
 #include <asm/outercache.h>
+#include <mach/remote_spinlock.h>
 #include <mach/scm.h>
 #include <mach/msm_bus.h>
 #include <mach/jtag.h>
@@ -117,6 +119,12 @@
 DEFINE_PER_CPU(struct clk *, cpu_clks);
 static struct clk *l2_clk;
 
+static int cpu_count;
+static DEFINE_SPINLOCK(cpu_cnt_lock);
+#define SCM_HANDOFF_LOCK_ID "S:7"
+static bool need_scm_handoff_lock;
+static remote_spinlock_t scm_handoff_lock;
+
 static void (*msm_pm_disable_l2_fn)(void);
 static void (*msm_pm_enable_l2_fn)(void);
 static void (*msm_pm_flush_l2_fn)(void);
@@ -478,8 +486,30 @@
 static int msm_pm_collapse(unsigned long unused)
 {
 	uint32_t cpu = smp_processor_id();
+	enum msm_pm_l2_scm_flag flag = MSM_SCM_L2_ON;
 
-	if (msm_pm_get_l2_flush_flag() == MSM_SCM_L2_OFF) {
+	spin_lock(&cpu_cnt_lock);
+	cpu_count++;
+	if (cpu_count == num_online_cpus())
+		flag = msm_pm_get_l2_flush_flag();
+
+	pr_debug("cpu:%d cores_in_pc:%d L2 flag: %d\n",
+			cpu, cpu_count, flag);
+
+	/*
+	 * The scm_handoff_lock will be release by the secure monitor.
+	 * It is used to serialize power-collapses from this point on,
+	 * so that both Linux and the secure context have a consistent
+	 * view regarding the number of running cpus (cpu_count).
+	 *
+	 * It must be acquired before releasing cpu_cnt_lock.
+	 */
+	if (need_scm_handoff_lock)
+		remote_spin_lock_rlock_id(&scm_handoff_lock,
+					  REMOTE_SPINLOCK_TID_START + cpu);
+	spin_unlock(&cpu_cnt_lock);
+
+	if (flag == MSM_SCM_L2_OFF) {
 		flush_cache_all();
 		if (msm_pm_flush_l2_fn)
 			msm_pm_flush_l2_fn();
@@ -491,8 +521,7 @@
 
 	msm_pc_inc_debug_count(cpu, MSM_PC_ENTRY_COUNTER);
 
-	scm_call_atomic1(SCM_SVC_BOOT, SCM_CMD_TERMINATE_PC,
-				msm_pm_get_l2_flush_flag());
+	scm_call_atomic1(SCM_SVC_BOOT, SCM_CMD_TERMINATE_PC, flag);
 
 	msm_pc_inc_debug_count(cpu, MSM_PC_FALLTHRU_COUNTER);
 
@@ -534,6 +563,12 @@
 	collapsed = save_cpu_regs ?
 		!cpu_suspend(0, msm_pm_collapse) : msm_pm_pc_hotplug();
 
+	if (save_cpu_regs) {
+		spin_lock(&cpu_cnt_lock);
+		cpu_count--;
+		BUG_ON(cpu_count > num_online_cpus());
+		spin_unlock(&cpu_cnt_lock);
+	}
 	msm_jtag_restore_state();
 
 	if (collapsed) {
@@ -764,17 +799,19 @@
 		pr_info("CPU%u: %s mode:%d\n",
 			smp_processor_id(), __func__, mode);
 
-	time = sched_clock();
+	if (from_idle)
+		time = sched_clock();
+
 	if (execute[mode])
 		exit_stat = execute[mode](from_idle);
-	time = sched_clock() - time;
-	if (from_idle)
+
+	if (from_idle) {
+		time = sched_clock() - time;
 		msm_pm_ftrace_lpm_exit(smp_processor_id(), mode, collapsed);
-	else
-		exit_stat = MSM_PM_STAT_SUSPEND;
-	if (exit_stat >= 0)
-		msm_pm_add_stat(exit_stat, time);
-	do_div(time, 1000);
+		if (exit_stat >= 0)
+			msm_pm_add_stat(exit_stat, time);
+	}
+
 	return collapsed;
 }
 
@@ -1166,6 +1203,7 @@
 	struct resource *res = NULL;
 	int i;
 	struct msm_pm_init_data_type pdata_local;
+	struct device_node *lpm_node;
 	int ret = 0;
 
 	memset(&pdata_local, 0, sizeof(struct msm_pm_init_data_type));
@@ -1192,6 +1230,23 @@
 		msm_pc_debug_counters_phys = 0;
 	}
 
+	lpm_node = of_parse_phandle(pdev->dev.of_node, "qcom,lpm-levels", 0);
+	if (!lpm_node) {
+		pr_warn("Could not get qcom,lpm-levels handle\n");
+		return -EINVAL;
+	}
+	need_scm_handoff_lock = of_property_read_bool(lpm_node,
+						      "qcom,allow-synced-levels");
+	if (need_scm_handoff_lock) {
+		ret = remote_spin_lock_init(&scm_handoff_lock,
+					    SCM_HANDOFF_LOCK_ID);
+		if (ret) {
+			pr_err("%s: Failed initializing scm_handoff_lock (%d)\n",
+				__func__, ret);
+			return ret;
+		}
+	}
+
 	if (pdev->dev.of_node) {
 		enum msm_pm_pc_mode_type pc_mode;
 
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
index 5747f79..8cb3cf3 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
@@ -1828,6 +1828,53 @@
 		set_qos_mode(baddr, mas_index, 0, 0, 0);
 }
 
+static void bimc_set_static_qos_bw(struct msm_bus_bimc_info *binfo,
+	int mport, struct msm_bus_bimc_qos_bw *qbw)
+{
+	int32_t bw_mbps, thh = 0, thm, thl, gc;
+	int32_t gp;
+	u64 temp;
+
+	if (binfo->qos_freq == 0) {
+		MSM_BUS_DBG("Zero QoS Frequency\n");
+		return;
+	}
+
+	if (!(qbw->bw && qbw->ws)) {
+		MSM_BUS_DBG("No QoS Bandwidth or Window size\n");
+		return;
+	}
+
+	/* Convert bandwidth to MBPS */
+	temp = qbw->bw;
+	bimc_div(&temp, 1000000);
+	bw_mbps = temp;
+
+	/* Grant period in clock cycles
+	 * Grant period from bandwidth structure
+	 * is in nano seconds, QoS freq is in KHz.
+	 * Divide by 1000 to get clock cycles */
+	gp = (binfo->qos_freq * qbw->gp) / (1000 * NSEC_PER_USEC);
+
+	/* Grant count = BW in MBps * Grant period
+	 * in micro seconds */
+	gc = bw_mbps * (qbw->gp / NSEC_PER_USEC);
+
+	/* Medium threshold = -((Medium Threshold percentage *
+	 * Grant count) / 100) */
+	thm = -((qbw->thmp * gc) / 100);
+	qbw->thm = thm;
+
+	/* Low threshold = -(Grant count) */
+	thl = -gc;
+	qbw->thl = thl;
+
+	MSM_BUS_DBG("%s: BKE parameters: gp %d, gc %d, thm %d thl %d thh %d",
+			__func__, gp, gc, thm, thl, thh);
+
+	set_qos_bw_regs(binfo->base, mport, thh, thm, thl, gp, gc);
+}
+
 static void msm_bus_bimc_config_master(
 	struct msm_bus_fabric_registration *fab_pdata,
 	struct msm_bus_inode_info *info,
@@ -1835,6 +1882,7 @@
 {
 	int mode, i, ports;
 	struct msm_bus_bimc_info *binfo;
+	uint64_t bw = 0;
 
 	binfo = (struct msm_bus_bimc_info *)fab_pdata->hw_data;
 	ports = info->node_info->num_mports;
@@ -1843,11 +1891,17 @@
 	 * Here check the details of dual configuration.
 	 * Take actions based on different modes.
 	 * Check for threshold if limiter mode, etc.
-	 */
-	if (req_clk > info->node_info->th)
-		mode = info->node_info->mode_thresh;
-	else
+	*/
+
+	if (req_clk <= info->node_info->th[0]) {
 		mode = info->node_info->mode;
+		bw = info->node_info->bimc_bw[0];
+	} else if ((info->node_info->num_thresh > 1) &&
+			(req_clk <= info->node_info->th[1])) {
+		mode = info->node_info->mode;
+		bw = info->node_info->bimc_bw[1];
+	} else
+		mode = info->node_info->mode_thresh;
 
 	switch (mode) {
 	case BIMC_QOS_MODE_BYPASS:
@@ -1858,9 +1912,24 @@
 		break;
 	case BIMC_QOS_MODE_REGULATOR:
 	case BIMC_QOS_MODE_LIMITER:
-		for (i = 0; i < ports; i++)
+		for (i = 0; i < ports; i++) {
+			/* If not in fixed mode, update bandwidth */
+			if ((info->node_info->cur_lim_bw != bw)
+					&& (mode != BIMC_QOS_MODE_FIXED)) {
+				struct msm_bus_bimc_qos_bw qbw;
+				qbw.ws = info->node_info->ws;
+				qbw.bw = bw;
+				qbw.gp = info->node_info->bimc_gp;
+				qbw.thmp = info->node_info->bimc_thmp;
+				bimc_set_static_qos_bw(binfo,
+					info->node_info->qport[i], &qbw);
+				info->node_info->cur_lim_bw = bw;
+				MSM_BUS_DBG("%s: Qos is %d reqclk %llu bw %llu",
+						__func__, mode, req_clk, bw);
+			}
 			bke_switch(binfo->base, info->node_info->qport[i],
 				BKE_ON, mode);
+		}
 		break;
 	default:
 		break;
@@ -1964,52 +2033,6 @@
 	return 0;
 }
 
-static void bimc_set_static_qos_bw(struct msm_bus_bimc_info *binfo,
-	int mport, struct msm_bus_bimc_qos_bw *qbw)
-{
-	int32_t bw_mbps, thh = 0, thm, thl, gc;
-	int32_t gp;
-	u64 temp;
-
-	if (binfo->qos_freq == 0) {
-		MSM_BUS_DBG("Zero QoS Frequency\n");
-		return;
-	}
-
-	if (!(qbw->bw && qbw->ws)) {
-		MSM_BUS_DBG("No QoS Bandwidth or Window size\n");
-		return;
-	}
-
-	/* Convert bandwidth to MBPS */
-	temp = qbw->bw;
-	bimc_div(&temp, 1000000);
-	bw_mbps = temp;
-
-	/* Grant period in clock cycles
-	 * Grant period from bandwidth structure
-	 * is in nano seconds, QoS freq is in KHz.
-	 * Divide by 1000 to get clock cycles */
-	gp = (binfo->qos_freq * qbw->gp) / (1000 * NSEC_PER_USEC);
-
-	/* Grant count = BW in MBps * Grant period
-	 * in micro seconds */
-	gc = bw_mbps * (qbw->gp / NSEC_PER_USEC);
-
-	/* Medium threshold = -((Medium Threshold percentage *
-	 * Grant count) / 100) */
-	thm = -((qbw->thmp * gc) / 100);
-	qbw->thm = thm;
-
-	/* Low threshold = -(Grant count) */
-	thl = -gc;
-	qbw->thl = thl;
-
-	MSM_BUS_DBG("%s: BKE parameters: gp %d, gc %d, thm %d thl %d thh %d",
-			__func__, gp, gc, thm, thl, thh);
-
-	set_qos_bw_regs(binfo->base, mport, thh, thm, thl, gp, gc);
-}
 
 static void bimc_init_mas_reg(struct msm_bus_bimc_info *binfo,
 	struct msm_bus_inode_info *info,
@@ -2048,7 +2071,7 @@
 			if (mode != BIMC_QOS_MODE_FIXED) {
 				struct msm_bus_bimc_qos_bw qbw;
 				qbw.ws = info->node_info->ws;
-				qbw.bw = info->node_info->bimc_bw;
+				qbw.bw = info->node_info->bimc_bw[0];
 				qbw.gp = info->node_info->bimc_gp;
 				qbw.thmp = info->node_info->bimc_thmp;
 				bimc_set_static_qos_bw(binfo,
@@ -2081,9 +2104,11 @@
 	 * If the master supports dual configuration,
 	 * configure registers for both modes
 	 */
-	if (info->node_info->dual_conf)
+	if (info->node_info->dual_conf) {
 		bimc_init_mas_reg(binfo, info, qmode,
 			info->node_info->mode_thresh);
+		info->node_info->cur_lim_bw = 0;
+	}
 
 	bimc_init_mas_reg(binfo, info, qmode, info->node_info->mode);
 	return 0;
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_core.h b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
index 7e4a513..557bcca 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_core.h
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2014 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -82,10 +82,12 @@
 	unsigned int prio_wr;
 	unsigned int prio1;
 	unsigned int prio0;
-	u64 th;
+	unsigned int num_thresh;
+	u64 *th;
+	u64 cur_lim_bw;
 	unsigned int mode_thresh;
 	bool dual_conf;
-	u64 bimc_bw;
+	u64 *bimc_bw;
 	u32 bimc_gp;
 	u32 bimc_thmp;
 	const char *name;
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
index 626c5e8..0ffc194 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
@@ -364,7 +364,7 @@
 {
 	struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
 	void *sel_cdata;
-	long rounded_rate;
+	long rounded_rate, cur_rate;
 
 	sel_cdata = fabric->cdata[ctx];
 
@@ -379,16 +379,20 @@
 	}
 
 	/* Enable clocks before accessing QoS registers */
-	if (fabric->info.nodeclk[DUAL_CTX].clk)
+	if (fabric->info.nodeclk[DUAL_CTX].clk) {
 		if (fabric->info.nodeclk[DUAL_CTX].rate == 0) {
-			rounded_rate = clk_round_rate(fabric->
-				info.nodeclk[DUAL_CTX].clk, 1);
+			cur_rate = clk_get_rate(
+					fabric->info.nodeclk[DUAL_CTX].clk);
+			rounded_rate = clk_round_rate(
+					fabric->info.nodeclk[DUAL_CTX].clk,
+					cur_rate ? cur_rate : 1);
 		if (clk_set_rate(fabric->info.nodeclk[DUAL_CTX].clk,
 				rounded_rate))
 			MSM_BUS_ERR("Error: clk: en: Node: %d rate: %ld",
 				fabric->fabdev.id, rounded_rate);
 
 		clk_prepare_enable(fabric->info.nodeclk[DUAL_CTX].clk);
+		}
 	}
 
 	if (info->iface_clk.clk)
@@ -514,22 +518,26 @@
 	struct msm_bus_inode_info *info, uint64_t req_clk, uint64_t req_bw)
 {
 	struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
-	long rounded_rate;
+	long rounded_rate, cur_rate;
 
 	if (fabdev->hw_algo.config_master == NULL)
 		return;
 
 	/* Enable clocks before accessing QoS registers */
-	if (fabric->info.nodeclk[DUAL_CTX].clk)
+	if (fabric->info.nodeclk[DUAL_CTX].clk) {
 		if (fabric->info.nodeclk[DUAL_CTX].rate == 0) {
-			rounded_rate = clk_round_rate(fabric->
-				info.nodeclk[DUAL_CTX].clk, 1);
+			cur_rate = clk_get_rate(
+					fabric->info.nodeclk[DUAL_CTX].clk);
+			rounded_rate = clk_round_rate(
+					fabric->info.nodeclk[DUAL_CTX].clk,
+					cur_rate ? cur_rate : 1);
 		if (clk_set_rate(fabric->info.nodeclk[DUAL_CTX].clk,
 				rounded_rate))
 			MSM_BUS_ERR("Error: clk: en: Node: %d rate: %ld",
 				fabric->fabdev.id, rounded_rate);
 
 		clk_prepare_enable(fabric->info.nodeclk[DUAL_CTX].clk);
+		}
 	}
 
 	if (info->iface_clk.clk)
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_of.c b/arch/arm/mach-msm/msm_bus/msm_bus_of.c
index 52195c7..f857920 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_of.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_of.c
@@ -271,6 +271,54 @@
 	return NULL;
 }
 
+static u64 *get_th_params(struct platform_device *pdev,
+		const struct device_node *node, const char *prop,
+		int *nports)
+{
+	int size = 0, ret;
+	u64 *ret_arr = NULL;
+	int *arr = NULL;
+	int i;
+
+	if (of_get_property(node, prop, &size)) {
+		*nports = size / sizeof(int);
+	} else {
+		pr_debug("Property %s not available\n", prop);
+		*nports = 0;
+		return NULL;
+	}
+
+	ret_arr = devm_kzalloc(&pdev->dev, (*nports * sizeof(u64)),
+							GFP_KERNEL);
+	arr = kzalloc(size, GFP_KERNEL);
+	if ((size > 0) && (ZERO_OR_NULL_PTR(arr)
+				|| ZERO_OR_NULL_PTR(ret_arr))) {
+		pr_err("Error: Failed to alloc mem for %s\n", prop);
+		return NULL;
+	}
+
+	ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
+	if (ret) {
+		pr_err("Error in reading property: %s\n", prop);
+		goto err;
+	}
+
+	for (i = 0; i < *nports; i++)
+		ret_arr[i] = (uint64_t)KBTOB(arr[i]);
+
+	MSM_BUS_DBG("%s: num entries %d prop %s", __func__, *nports, prop);
+
+	for (i = 0; i < *nports; i++)
+		MSM_BUS_DBG("Th %d val %llu", i, ret_arr[i]);
+
+	kfree(arr);
+	return ret_arr;
+err:
+	devm_kfree(&pdev->dev, arr);
+	devm_kfree(&pdev->dev, ret_arr);
+	return NULL;
+}
+
 static struct msm_bus_node_info *get_nodes(struct device_node *of_node,
 	struct platform_device *pdev,
 	struct msm_bus_fabric_registration *pdata)
@@ -278,7 +326,7 @@
 	struct msm_bus_node_info *info;
 	struct device_node *child_node = NULL;
 	int i = 0, ret;
-	u32 temp;
+	int num_bw = 0;
 
 	for_each_child_of_node(of_node, child_node) {
 		i++;
@@ -353,36 +401,29 @@
 		of_property_read_u32(child_node, "qcom,buswidth",
 			&info[i].buswidth);
 		of_property_read_u32(child_node, "qcom,ws", &info[i].ws);
-		ret = of_property_read_u32(child_node, "qcom,thresh",
-			&temp);
-		if (!ret)
-			info[i].th = (uint64_t)KBTOB(temp);
 
-		ret = of_property_read_u32(child_node, "qcom,bimc,bw",
-			&temp);
-		if (!ret)
-			info[i].bimc_bw = (uint64_t)KBTOB(temp);
+		info[i].dual_conf =
+			of_property_read_bool(child_node, "qcom,dual-conf");
+
+
+		info[i].th = get_th_params(pdev, child_node, "qcom,thresh",
+						&info[i].num_thresh);
+
+		info[i].bimc_bw = get_th_params(pdev, child_node,
+						"qcom,bimc,bw", &num_bw);
+
+		if (num_bw != info[i].num_thresh) {
+			pr_err("%s:num_bw %d must equal num_thresh %d",
+				__func__, num_bw, info[i].num_thresh);
+			pr_err("%s:Err setting up dual conf for %s",
+				__func__, info[i].name);
+			goto err;
+		}
 
 		of_property_read_u32(child_node, "qcom,bimc,gp",
 			&info[i].bimc_gp);
 		of_property_read_u32(child_node, "qcom,bimc,thmp",
 			&info[i].bimc_thmp);
-		ret = of_property_read_string(child_node, "qcom,mode",
-			&sel_str);
-		if (ret)
-			info[i].mode = 0;
-		else {
-			ret = get_num(mode_sel_name, sel_str);
-			if (ret < 0) {
-				pr_err("Unknown mode :%s\n", sel_str);
-				goto err;
-			}
-
-			info[i].mode = ret;
-		}
-
-		info[i].dual_conf =
-			of_property_read_bool(child_node, "qcom,dual-conf");
 
 		ret = of_property_read_string(child_node, "qcom,mode-thresh",
 			&sel_str);
@@ -397,7 +438,21 @@
 
 			info[i].mode_thresh = ret;
 			MSM_BUS_DBG("AXI: THreshold mode set: %d\n",
-				info[i].mode_thresh);
+					info[i].mode_thresh);
+		}
+
+		ret = of_property_read_string(child_node, "qcom,mode",
+				&sel_str);
+		if (ret)
+			info[i].mode = 0;
+		else {
+			ret = get_num(mode_sel_name, sel_str);
+			if (ret < 0) {
+				pr_err("Unknown mode :%s\n", sel_str);
+				goto err;
+			}
+
+			info[i].mode = ret;
 		}
 
 		ret = of_property_read_string(child_node, "qcom,perm-mode",
diff --git a/arch/arm/mach-msm/ocmem_core.c b/arch/arm/mach-msm/ocmem_core.c
index c186a5e..f753391 100644
--- a/arch/arm/mach-msm/ocmem_core.c
+++ b/arch/arm/mach-msm/ocmem_core.c
@@ -51,6 +51,7 @@
 static struct ocmem_hw_region *region_ctrl;
 static struct mutex region_ctrl_lock;
 static void *ocmem_base;
+static void *ocmem_vbase;
 
 #define OCMEM_V1_MACROS 8
 #define OCMEM_V1_MACRO_SZ (SZ_64K)
@@ -562,6 +563,13 @@
 	ocmem_write(0x0, ocmem_base + OC_GFX_MPU_END);
 }
 
+int ocmem_clear(unsigned long start, unsigned long size)
+{
+	memset((ocmem_vbase + start), 0x4D4D434F, size);
+	mb();
+	return 0;
+}
+
 static int do_lock(enum ocmem_client id, unsigned long offset,
 			unsigned long len, enum region_mode mode)
 {
@@ -1144,6 +1152,7 @@
 
 	pdata = platform_get_drvdata(pdev);
 	ocmem_base = pdata->reg_base;
+	ocmem_vbase = pdata->vbase;
 
 	rc = ocmem_enable_core_clock();
 
diff --git a/arch/arm/mach-msm/ocmem_rdm.c b/arch/arm/mach-msm/ocmem_rdm.c
index 4ff7212..9eac050 100644
--- a/arch/arm/mach-msm/ocmem_rdm.c
+++ b/arch/arm/mach-msm/ocmem_rdm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -149,38 +149,6 @@
 	return IRQ_HANDLED;
 }
 
-#ifdef CONFIG_MSM_OCMEM_NONSECURE
-int ocmem_clear(unsigned long start, unsigned long size)
-{
-	INIT_COMPLETION(dm_clear_event);
-	/* Clear DM Mask */
-	ocmem_write(DM_MASK_RESET, dm_base + DM_INTR_MASK);
-	/* Clear DM Interrupts */
-	ocmem_write(DM_INTR_RESET, dm_base + DM_INTR_CLR);
-	/* DM CLR offset */
-	ocmem_write(start, dm_base + DM_CLR_OFFSET);
-	/* DM CLR size */
-	ocmem_write(size, dm_base + DM_CLR_SIZE);
-	/* Wipe out memory as "OCMM" */
-	ocmem_write(0x4D4D434F, dm_base + DM_CLR_PATTERN);
-	/* The offset, size and pattern for clearing must be set
-	 * before triggering the clearing engine
-	 */
-	mb();
-	/* Trigger Data Clear */
-	ocmem_write(DM_CLR_ENABLE, dm_base + DM_CLR_TRIGGER);
-
-	wait_for_completion(&dm_clear_event);
-
-	return 0;
-}
-#else
-int ocmem_clear(unsigned long start, unsigned long size)
-{
-	return 0;
-}
-#endif
-
 /* Lock during transfers */
 int ocmem_rdm_transfer(int id, struct ocmem_map_list *clist,
 			unsigned long start, int direction)
diff --git a/arch/arm/mach-msm/qdsp6v2/Makefile b/arch/arm/mach-msm/qdsp6v2/Makefile
index 6bd3efb..3d7638d 100644
--- a/arch/arm/mach-msm/qdsp6v2/Makefile
+++ b/arch/arm/mach-msm/qdsp6v2/Makefile
@@ -12,7 +12,7 @@
 obj-$(CONFIG_FB_MSM_HDMI_MSM_PANEL) += lpa_if_hdmi.o
 endif
 obj-$(CONFIG_MSM_QDSP6_APR) += apr.o apr_v1.o apr_tal.o q6core.o dsp_debug.o
-obj-$(CONFIG_MSM_QDSP6_APRV2) += apr.o apr_v2.o apr_tal.o dsp_debug.o
+obj-$(CONFIG_MSM_QDSP6_APRV2) += apr.o apr_v2.o apr_tal.o dsp_debug.o voice_svc.o
 ifdef CONFIG_ARCH_MSM9615
 obj-y += audio_acdb.o
 obj-y += rtac.o
diff --git a/arch/arm/mach-msm/qdsp6v2/apr.c b/arch/arm/mach-msm/qdsp6v2/apr.c
index 8d9ad29..937eeda 100644
--- a/arch/arm/mach-msm/qdsp6v2/apr.c
+++ b/arch/arm/mach-msm/qdsp6v2/apr.c
@@ -436,7 +436,7 @@
 	if (data.payload_size > 0)
 		data.payload = (char *)hdr + hdr_size;
 
-	temp_port = ((data.src_port >> 8) * 8) + (data.src_port & 0xFF);
+	temp_port = ((data.dest_port >> 8) * 8) + (data.dest_port & 0xFF);
 	pr_debug("port = %d t_port = %d\n", data.src_port, temp_port);
 	if (c_svc->port_cnt && c_svc->port_fn[temp_port])
 		c_svc->port_fn[temp_port](&data,  c_svc->port_priv[temp_port]);
diff --git a/arch/arm/mach-msm/qdsp6v2/msm_audio_ion.c b/arch/arm/mach-msm/qdsp6v2/msm_audio_ion.c
index 399e073..df7760a 100644
--- a/arch/arm/mach-msm/qdsp6v2/msm_audio_ion.c
+++ b/arch/arm/mach-msm/qdsp6v2/msm_audio_ion.c
@@ -344,7 +344,7 @@
 		pr_err("%s: ion import dma buffer failed\n",
 			__func__);
 		rc = -EINVAL;
-		goto err_destroy_client;
+		goto err;
 	}
 
 	if (ionflag != NULL) {
@@ -380,10 +380,6 @@
 
 err_ion_handle:
 	ion_free(client, *handle);
-err_destroy_client:
-	msm_audio_ion_client_destroy(client);
-	client = NULL;
-	*handle = NULL;
 err:
 	return rc;
 }
diff --git a/arch/arm/mach-msm/qdsp6v2/voice_svc.c b/arch/arm/mach-msm/qdsp6v2/voice_svc.c
new file mode 100644
index 0000000..92b3003
--- /dev/null
+++ b/arch/arm/mach-msm/qdsp6v2/voice_svc.c
@@ -0,0 +1,593 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/cdev.h>
+#include <sound/voice_svc.h>
+#include <mach/qdsp6v2/apr_tal.h>
+#include <mach/qdsp6v2/apr.h>
+
+#define DRIVER_NAME "voice_svc"
+#define MINOR_NUMBER 1
+#define APR_MAX_RESPONSE 10
+
+#define MAX(a, b) ((a) >= (b) ? (a) : (b))
+
+struct voice_svc_device {
+	struct cdev *cdev;
+	struct device *dev;
+	int major;
+};
+
+struct voice_svc_prvt {
+	void* apr_q6_mvm;
+	void* apr_q6_cvs;
+	uint16_t response_count;
+	struct list_head response_queue;
+	wait_queue_head_t response_wait;
+	spinlock_t response_lock;
+};
+
+struct apr_data {
+	struct apr_hdr hdr;
+	__u8 payload[0];
+} __packed;
+
+struct apr_response_list {
+	struct list_head list;
+	struct voice_svc_cmd_response resp;
+};
+
+static struct voice_svc_device *voice_svc_dev;
+static struct class *voice_svc_class;
+dev_t device_num;
+
+static int32_t qdsp_apr_callback(struct apr_client_data *data, void *priv)
+{
+	struct voice_svc_prvt *prtd;
+	struct apr_response_list *response_list;
+	unsigned long spin_flags;
+
+	if ((data == NULL) || (priv == NULL)) {
+		pr_err("%s: data or priv is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	prtd = (struct voice_svc_prvt*)priv;
+
+	pr_debug("%s: data->opcode %x\n", __func__,
+		 data->opcode);
+
+	if (data->opcode == RESET_EVENTS) {
+		if (data->reset_proc == APR_DEST_QDSP6) {
+			pr_debug("%s: Received reset event\n", __func__);
+
+			if (prtd->apr_q6_mvm != NULL) {
+				apr_reset(prtd->apr_q6_mvm);
+				prtd->apr_q6_mvm = NULL;
+			}
+
+			if (prtd->apr_q6_cvs != NULL) {
+				apr_reset(prtd->apr_q6_cvs);
+				prtd->apr_q6_cvs = NULL;
+			}
+		} else if (data->reset_proc ==APR_DEST_MODEM) {
+			pr_debug("%s: Received Modem reset event\n", __func__);
+		}
+	}
+
+	spin_lock_irqsave(&prtd->response_lock, spin_flags);
+
+	if (prtd->response_count < APR_MAX_RESPONSE) {
+		response_list = (struct apr_response_list *)kmalloc(
+			sizeof(struct apr_response_list) + data->payload_size,
+			GFP_ATOMIC);
+		if (response_list == NULL) {
+			pr_err("%s: kmalloc failed\n", __func__);
+
+			return -ENOMEM;
+		}
+
+		response_list->resp.src_port = data->src_port;
+		response_list->resp.dest_port = ((data->dest_port) >> 8);
+		response_list->resp.token = data->token;
+		response_list->resp.opcode = data->opcode;
+		response_list->resp.payload_size = data->payload_size;
+		if (data->payload != NULL && data->payload_size > 0) {
+			memcpy(response_list->resp.payload, data->payload,
+				data->payload_size);
+		}
+
+		list_add_tail(&response_list->list, &prtd->response_queue);
+		prtd->response_count++;
+
+		wake_up(&prtd->response_wait);
+	} else {
+		pr_err("%s: Response dropped since the queue is full\n", __func__);
+	}
+
+	spin_unlock_irqrestore(&prtd->response_lock, spin_flags);
+
+	return 0;
+}
+
+static void voice_svc_update_hdr(struct voice_svc_cmd_request* apr_req_data,
+			    struct apr_data *aprdata,
+			    struct voice_svc_prvt *prtd)
+{
+
+	aprdata->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
+				       APR_HDR_LEN(sizeof(struct apr_hdr)),\
+				       APR_PKT_VER);
+	aprdata->hdr.src_port = ((apr_req_data->src_port) << 8 | 0x0001);
+	aprdata->hdr.dest_port = apr_req_data->dest_port;
+	aprdata->hdr.token = apr_req_data->token;
+	aprdata->hdr.opcode = apr_req_data->opcode;
+	aprdata->hdr.pkt_size  = APR_PKT_SIZE(APR_HDR_SIZE,
+					apr_req_data->payload_size);
+	memcpy(aprdata->payload, apr_req_data->payload,
+	       apr_req_data->payload_size);
+}
+
+static int voice_svc_send_req(struct voice_svc_cmd_request *apr_request,
+			      struct voice_svc_prvt *prtd)
+{
+	int ret = 0;
+	void *apr_handle = NULL;
+	struct apr_data *aprdata = NULL;
+	uint32_t user_payload_size = 0;
+
+	if (apr_request == NULL) {
+		pr_err("%s: apr_request is NULL\n", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	user_payload_size = apr_request->payload_size;
+
+	aprdata = kmalloc(sizeof(struct apr_data) + user_payload_size,
+			  GFP_KERNEL);
+
+	if (aprdata == NULL) {
+		pr_err("%s: aprdata kmalloc failed.", __func__);
+
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	voice_svc_update_hdr(apr_request, aprdata, prtd);
+
+	if (!strncmp(apr_request->svc_name, VOICE_SVC_CVS_STR,
+	    MAX(sizeof(apr_request->svc_name), sizeof(VOICE_SVC_CVS_STR)))) {
+		apr_handle = prtd->apr_q6_cvs;
+	} else if (!strncmp(apr_request->svc_name, VOICE_SVC_MVM_STR,
+	    MAX(sizeof(apr_request->svc_name), sizeof(VOICE_SVC_MVM_STR)))) {
+		apr_handle = prtd->apr_q6_mvm;
+	} else {
+		pr_err("%s: Invalid service %s\n", __func__,
+			apr_request->svc_name);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = apr_send_pkt(apr_handle, (uint32_t *)aprdata);
+
+	if (ret < 0) {
+		pr_err("%s: Fail in sending SNDRV_VOICE_SVC_REQUEST\n",
+			__func__);
+		ret = -EINVAL;
+	} else {
+		pr_debug("%s: apr packet sent successfully %d\n",
+				__func__, ret);
+		ret = 0;
+	}
+
+done:
+	if (aprdata != NULL)
+		kfree(aprdata);
+
+	return ret;
+}
+static int voice_svc_reg(char *svc, uint32_t src_port,
+			 struct voice_svc_prvt *prtd, void **handle)
+{
+	int ret = 0;
+
+	if (handle == NULL) {
+		pr_err("%s: handle is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (*handle != NULL) {
+		pr_err("%s: svc handle not NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	*handle = apr_register("ADSP",
+		svc, qdsp_apr_callback,
+		((src_port) << 8 | 0x0001),
+		prtd);
+
+	if (*handle == NULL) {
+		pr_err("%s: Unable to register %s\n",
+		__func__, svc);
+
+		ret = -EFAULT;
+		goto done;
+	}
+	pr_debug("%s: register %s successful\n",
+		__func__, svc);
+done:
+	return ret;
+}
+
+static int voice_svc_dereg(char *svc, void **handle)
+{
+	int ret = 0;
+	if (handle == NULL) {
+		pr_err("%s: handle is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	apr_deregister(*handle);
+	*handle = NULL;
+	pr_debug("%s: deregister %s successful\n",
+		__func__, svc);
+
+done:
+	return 0;
+}
+
+static int process_reg_cmd(struct voice_svc_register apr_reg_svc,
+			     struct voice_svc_prvt *prtd)
+{
+	int ret = 0;
+	char *svc = NULL;
+	void **handle = NULL;
+
+	if (!strncmp(apr_reg_svc.svc_name, VOICE_SVC_MVM_STR,
+	    MAX(sizeof(apr_reg_svc.svc_name), sizeof(VOICE_SVC_MVM_STR)))) {
+		svc = VOICE_SVC_MVM_STR;
+		handle = &prtd->apr_q6_mvm;
+	} else if (!strncmp(apr_reg_svc.svc_name, VOICE_SVC_CVS_STR,
+            MAX(sizeof(apr_reg_svc.svc_name), sizeof(VOICE_SVC_CVS_STR)))) {
+		svc = VOICE_SVC_CVS_STR;
+		handle = &prtd->apr_q6_cvs;
+	} else {
+		pr_err("%s: Invalid Service: %s\n", __func__,
+				apr_reg_svc.svc_name);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (*handle == NULL &&
+	    apr_reg_svc.reg_flag) {
+		ret = voice_svc_reg(svc, apr_reg_svc.src_port, prtd,
+				    handle);
+	} else if (handle != NULL &&
+		   !apr_reg_svc.reg_flag) {
+		ret = voice_svc_dereg(svc, handle);
+	}
+
+done:
+	return ret;
+}
+
+static long voice_svc_ioctl(struct file *file, unsigned int cmd,
+			    unsigned long u_arg)
+{
+	int ret = 0;
+	struct voice_svc_prvt *prtd;
+	struct voice_svc_register apr_reg_svc;
+	struct voice_svc_cmd_request *apr_request = NULL;
+	struct voice_svc_cmd_response *apr_response = NULL;
+	struct apr_response_list *resp;
+	void __user *arg = (void __user *)u_arg;
+	uint32_t user_payload_size = 0;
+	unsigned long spin_flags;
+
+	pr_debug("%s: cmd: %u\n", __func__, cmd);
+
+	prtd = (struct voice_svc_prvt*)file->private_data;
+
+	switch (cmd) {
+	case SNDRV_VOICE_SVC_REGISTER_SVC:
+		pr_debug("%s: size of struct: %d\n", __func__,
+				sizeof(apr_reg_svc));
+		if (copy_from_user(&apr_reg_svc, arg, sizeof(apr_reg_svc))) {
+			pr_err("%s: copy_from_user failed\n", __func__);
+
+			ret = -EFAULT;
+			goto done;
+		}
+
+		ret = process_reg_cmd(apr_reg_svc, prtd);
+
+		break;
+	case SNDRV_VOICE_SVC_CMD_REQUEST:
+		if (!access_ok(VERIFY_READ, arg,
+				sizeof(struct voice_svc_cmd_request))) {
+			pr_err("%s: Unable to read user data", __func__);
+
+			ret = -EFAULT;
+			goto done;
+		}
+
+		user_payload_size =
+			((struct voice_svc_cmd_request*)arg)->payload_size;
+
+		apr_request = kmalloc(sizeof(struct voice_svc_cmd_request) +
+				      user_payload_size, GFP_KERNEL);
+
+		if (apr_request == NULL) {
+			pr_err("%s: apr_request kmalloc failed.", __func__);
+
+			ret = -ENOMEM;
+			goto done;
+		}
+
+		if (copy_from_user(apr_request, arg,
+				sizeof(struct voice_svc_cmd_request) +
+				user_payload_size)) {
+			pr_err("%s: copy from user failed, size %d\n", __func__,
+				sizeof(struct voice_svc_cmd_request) +
+				user_payload_size);
+
+			ret = -EFAULT;
+			goto done;
+		}
+
+		ret = voice_svc_send_req(apr_request, prtd);
+
+		break;
+
+	case SNDRV_VOICE_SVC_CMD_RESPONSE:
+		do {
+			if (!access_ok(VERIFY_READ, arg,
+				sizeof(struct voice_svc_cmd_response))) {
+				pr_err("%s: Unable to read user data",
+				       __func__);
+
+				ret = -EFAULT;
+				goto done;
+			}
+
+			user_payload_size =
+			    ((struct voice_svc_cmd_response*)arg)->payload_size;
+			pr_debug("%s: RESPONSE: user payload size %d",
+				 __func__, user_payload_size);
+
+			spin_lock_irqsave(&prtd->response_lock, spin_flags);
+			if (!list_empty(&prtd->response_queue)) {
+				resp = list_first_entry(&prtd->response_queue,
+						struct apr_response_list, list);
+
+				if (user_payload_size <
+					resp->resp.payload_size) {
+					pr_err("%s: Invalid payload size %d,%d",
+					       __func__, user_payload_size,
+					       resp->resp.payload_size);
+					ret = -ENOMEM;
+					spin_unlock_irqrestore(
+						&prtd->response_lock,
+						spin_flags);
+					goto done;
+				}
+
+				if (!access_ok(VERIFY_WRITE, arg,
+					sizeof(struct voice_svc_cmd_response) +
+					resp->resp.payload_size)) {
+					ret = -EFAULT;
+					spin_unlock_irqrestore(
+						&prtd->response_lock,
+						spin_flags);
+					goto done;
+				}
+
+				if (copy_to_user(arg, &resp->resp,
+					sizeof(struct voice_svc_cmd_response) +
+					resp->resp.payload_size)) {
+					pr_err("%s: copy to user failed, size \
+						%d\n", __func__,
+					sizeof(struct voice_svc_cmd_response) +
+						resp->resp.payload_size);
+
+					ret = -EFAULT;
+					spin_unlock_irqrestore(
+						&prtd->response_lock,
+						spin_flags);
+					goto done;
+				}
+
+				prtd->response_count--;
+
+				list_del(&resp->list);
+				kfree(resp);
+				spin_unlock_irqrestore(&prtd->response_lock,
+							spin_flags);
+				goto done;
+			} else {
+				spin_unlock_irqrestore(&prtd->response_lock,
+							spin_flags);
+				wait_event_interruptible(prtd->response_wait,
+					!list_empty(&prtd->response_queue));
+				pr_debug("%s: Interupt recieved for response",
+					 __func__);
+			}
+		} while(!apr_response);
+		break;
+	default:
+		pr_debug("%s: cmd: %u\n", __func__, cmd);
+		ret = -EINVAL;
+	}
+
+done:
+	if (apr_request != NULL)
+		kfree(apr_request);
+
+	return ret;
+}
+
+static int voice_svc_open(struct inode *inode, struct file *file)
+{
+	struct voice_svc_prvt *prtd = NULL;
+
+	prtd = kmalloc(sizeof(struct voice_svc_prvt), GFP_KERNEL);
+
+	if (prtd == NULL) {
+		pr_err("%s: kmalloc failed", __func__);
+
+		return -ENOMEM;
+	}
+
+	memset(prtd, 0, sizeof(struct voice_svc_prvt));
+	prtd->apr_q6_cvs = NULL;
+	prtd->apr_q6_mvm = NULL;
+	prtd->response_count = 0;
+
+	INIT_LIST_HEAD(&prtd->response_queue);
+	init_waitqueue_head(&prtd->response_wait);
+	spin_lock_init(&prtd->response_lock);
+
+	file->private_data = (void*)prtd;
+
+	return 0;
+}
+
+static int voice_svc_release(struct inode *inode, struct file *file)
+{
+	kfree(file->private_data);
+	return 0;
+}
+
+static const struct file_operations voice_svc_fops = {
+	.owner =                THIS_MODULE,
+	.open =                 voice_svc_open,
+	.unlocked_ioctl =       voice_svc_ioctl,
+	.release =              voice_svc_release,
+};
+
+
+static int voice_svc_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	voice_svc_dev = devm_kzalloc(&pdev->dev, sizeof(struct voice_svc_device),
+			GFP_KERNEL);
+	if (!voice_svc_dev) {
+		pr_err("%s: kzalloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	ret = alloc_chrdev_region(&device_num, 0, MINOR_NUMBER, DRIVER_NAME);
+	if (ret) {
+		pr_err("%s: Failed to alloc chrdev\n", __func__);
+		ret = -ENODEV;
+		goto done;
+	}
+
+	voice_svc_dev->major = MAJOR(device_num);
+	voice_svc_class = class_create(THIS_MODULE, DRIVER_NAME);
+	if (IS_ERR(voice_svc_class)) {
+		ret = PTR_ERR(voice_svc_class);
+		pr_err("%s: Failed to create class; err = %d\n", __func__,
+			ret);
+		goto class_err;
+	}
+
+	voice_svc_dev->dev = device_create(voice_svc_class, NULL, device_num,
+					   NULL, DRIVER_NAME);
+	if (IS_ERR(voice_svc_dev->dev)) {
+		ret = PTR_ERR(voice_svc_dev->dev);
+		pr_err("%s: Failed to create device; err = %d\n", __func__,
+			ret);
+		goto dev_err;
+	}
+
+	voice_svc_dev->cdev = cdev_alloc();
+	cdev_init(voice_svc_dev->cdev, &voice_svc_fops);
+	ret = cdev_add(voice_svc_dev->cdev, device_num, MINOR_NUMBER);
+	if (ret) {
+		pr_err("%s: Failed to register chrdev; err = %d\n", __func__,
+			ret);
+		goto add_err;
+	}
+	pr_debug("%s: Device created\n", __func__);
+	goto done;
+
+add_err:
+	cdev_del(voice_svc_dev->cdev);
+	device_destroy(voice_svc_class, device_num);
+dev_err:
+	class_destroy(voice_svc_class);
+class_err:
+	unregister_chrdev_region(0, MINOR_NUMBER);
+done:
+	return ret;
+}
+
+static int voice_svc_remove(struct platform_device *pdev)
+{
+	cdev_del(voice_svc_dev->cdev);
+	kfree(voice_svc_dev->cdev);
+	device_destroy(voice_svc_class, device_num);
+	class_destroy(voice_svc_class);
+	unregister_chrdev_region(0, MINOR_NUMBER);
+	kfree(voice_svc_dev);
+
+	return 0;
+}
+
+static struct of_device_id voice_svc_of_match[] = {
+	{.compatible = "qcom,msm-voice-svc"},
+	{ }
+};
+MODULE_DEVICE_TABLE(of, voice_svc_of_match);
+
+static struct platform_driver voice_svc_driver = {
+	.probe          = voice_svc_probe,
+	.remove         = voice_svc_remove,
+	.driver         = {
+		.name   = "msm-voice-svc",
+		.owner  = THIS_MODULE,
+		.of_match_table = voice_svc_of_match,
+	},
+};
+
+static int __init voice_svc_init(void)
+{
+	return platform_driver_register(&voice_svc_driver);
+}
+
+static void __exit voice_svc_exit(void)
+{
+	platform_driver_unregister(&voice_svc_driver);
+}
+
+module_init(voice_svc_init);
+module_exit(voice_svc_exit);
+
+MODULE_DESCRIPTION("Soc QDSP6v2 Audio APR driver");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-msm/smd_tty.c b/arch/arm/mach-msm/smd_tty.c
index 428d5b0..9cb26e1 100644
--- a/arch/arm/mach-msm/smd_tty.c
+++ b/arch/arm/mach-msm/smd_tty.c
@@ -551,7 +551,7 @@
 
 static void smd_tty_close(struct tty_struct *tty, struct file *f)
 {
-	struct smd_tty_info *info = tty->driver_data;
+	struct smd_tty_info *info = smd_tty + tty->index;
 
 	tty_port_close(&info->port, tty, f);
 }
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index 9f50547..8e7adba 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -21,10 +21,12 @@
 #include <linux/sys_soc.h>
 #include <linux/slab.h>
 #include <linux/stat.h>
+#include <linux/string.h>
 #include <linux/sysdev.h>
 #include <linux/types.h>
 
 #include <asm/mach-types.h>
+#include <asm/system_misc.h>
 
 #include <mach/socinfo.h>
 #include <mach/msm_smem.h>
@@ -185,246 +187,247 @@
 	struct socinfo_v8 v8;
 } *socinfo;
 
-static enum msm_cpu cpu_of_id[] = {
+static struct msm_soc_info cpu_of_id[] = {
 
 	/* 7x01 IDs */
-	[1]  = MSM_CPU_7X01,
-	[16] = MSM_CPU_7X01,
-	[17] = MSM_CPU_7X01,
-	[18] = MSM_CPU_7X01,
-	[19] = MSM_CPU_7X01,
-	[23] = MSM_CPU_7X01,
-	[25] = MSM_CPU_7X01,
-	[26] = MSM_CPU_7X01,
-	[32] = MSM_CPU_7X01,
-	[33] = MSM_CPU_7X01,
-	[34] = MSM_CPU_7X01,
-	[35] = MSM_CPU_7X01,
+	[0]  = {MSM_CPU_UNKNOWN, "Unknown CPU"},
+	[1]  = {MSM_CPU_7X01, "MSM7X01"},
+	[16] = {MSM_CPU_7X01, "MSM7X01"},
+	[17] = {MSM_CPU_7X01, "MSM7X01"},
+	[18] = {MSM_CPU_7X01, "MSM7X01"},
+	[19] = {MSM_CPU_7X01, "MSM7X01"},
+	[23] = {MSM_CPU_7X01, "MSM7X01"},
+	[25] = {MSM_CPU_7X01, "MSM7X01"},
+	[26] = {MSM_CPU_7X01, "MSM7X01"},
+	[32] = {MSM_CPU_7X01, "MSM7X01"},
+	[33] = {MSM_CPU_7X01, "MSM7X01"},
+	[34] = {MSM_CPU_7X01, "MSM7X01"},
+	[35] = {MSM_CPU_7X01, "MSM7X01"},
 
 	/* 7x25 IDs */
-	[20] = MSM_CPU_7X25,
-	[21] = MSM_CPU_7X25, /* 7225 */
-	[24] = MSM_CPU_7X25, /* 7525 */
-	[27] = MSM_CPU_7X25, /* 7625 */
-	[39] = MSM_CPU_7X25,
-	[40] = MSM_CPU_7X25,
-	[41] = MSM_CPU_7X25,
-	[42] = MSM_CPU_7X25,
-	[62] = MSM_CPU_7X25, /* 7625-1 */
-	[63] = MSM_CPU_7X25, /* 7225-1 */
-	[66] = MSM_CPU_7X25, /* 7225-2 */
+	[20] = {MSM_CPU_7X25, "MSM7X25"},
+	[21] = {MSM_CPU_7X25, "MSM7X25"},
+	[24] = {MSM_CPU_7X25, "MSM7X25"},
+	[27] = {MSM_CPU_7X25, "MSM7X25"},
+	[39] = {MSM_CPU_7X25, "MSM7X25"},
+	[40] = {MSM_CPU_7X25, "MSM7X25"},
+	[41] = {MSM_CPU_7X25, "MSM7X25"},
+	[42] = {MSM_CPU_7X25, "MSM7X25"},
+	[62] = {MSM_CPU_7X25, "MSM7X25"},
+	[63] = {MSM_CPU_7X25, "MSM7X25"},
+	[66] = {MSM_CPU_7X25, "MSM7X25"},
 
 
 	/* 7x27 IDs */
-	[43] = MSM_CPU_7X27,
-	[44] = MSM_CPU_7X27,
-	[61] = MSM_CPU_7X27,
-	[67] = MSM_CPU_7X27, /* 7227-1 */
-	[68] = MSM_CPU_7X27, /* 7627-1 */
-	[69] = MSM_CPU_7X27, /* 7627-2 */
+	[43] = {MSM_CPU_7X27, "MSM7X27"},
+	[44] = {MSM_CPU_7X27, "MSM7X27"},
+	[61] = {MSM_CPU_7X27, "MSM7X27"},
+	[67] = {MSM_CPU_7X27, "MSM7X27"},
+	[68] = {MSM_CPU_7X27, "MSM7X27"},
+	[69] = {MSM_CPU_7X27, "MSM7X27"},
 
 
 	/* 8x50 IDs */
-	[30] = MSM_CPU_8X50,
-	[36] = MSM_CPU_8X50,
-	[37] = MSM_CPU_8X50,
-	[38] = MSM_CPU_8X50,
+	[30] = {MSM_CPU_8X50, "MSM8X50"},
+	[36] = {MSM_CPU_8X50, "MSM8X50"},
+	[37] = {MSM_CPU_8X50, "MSM8X50"},
+	[38] = {MSM_CPU_8X50, "MSM8X50"},
 
 	/* 7x30 IDs */
-	[59] = MSM_CPU_7X30,
-	[60] = MSM_CPU_7X30,
+	[59] = {MSM_CPU_7X30, "MSM7X30"},
+	[60] = {MSM_CPU_7X30, "MSM7X30"},
 
 	/* 8x55 IDs */
-	[74] = MSM_CPU_8X55,
-	[75] = MSM_CPU_8X55,
-	[85] = MSM_CPU_8X55,
+	[74] = {MSM_CPU_8X55, "MSM8X55"},
+	[75] = {MSM_CPU_8X55, "MSM8X55"},
+	[85] = {MSM_CPU_8X55, "MSM8X55"},
 
 	/* 8x60 IDs */
-	[70] = MSM_CPU_8X60,
-	[71] = MSM_CPU_8X60,
-	[86] = MSM_CPU_8X60,
+	[70] = {MSM_CPU_8X60, "MSM8X60"},
+	[71] = {MSM_CPU_8X60, "MSM8X60"},
+	[86] = {MSM_CPU_8X60, "MSM8X60"},
 
 	/* 8960 IDs */
-	[87] = MSM_CPU_8960,
+	[87] = {MSM_CPU_8960, "MSM8960"},
 
 	/* 7x25A IDs */
-	[88] = MSM_CPU_7X25A,
-	[89] = MSM_CPU_7X25A,
-	[96] = MSM_CPU_7X25A,
+	[88] = {MSM_CPU_7X25A, "MSM7X25A"},
+	[89] = {MSM_CPU_7X25A, "MSM7X25A"},
+	[96] = {MSM_CPU_7X25A, "MSM7X25A"},
 
 	/* 7x27A IDs */
-	[90] = MSM_CPU_7X27A,
-	[91] = MSM_CPU_7X27A,
-	[92] = MSM_CPU_7X27A,
-	[97] = MSM_CPU_7X27A,
+	[90] = {MSM_CPU_7X27A, "MSM7X27A"},
+	[91] = {MSM_CPU_7X27A, "MSM7X27A"},
+	[92] = {MSM_CPU_7X27A, "MSM7X27A"},
+	[97] = {MSM_CPU_7X27A, "MSM7X27A"},
 
 	/* FSM9xxx ID */
-	[94] = FSM_CPU_9XXX,
-	[95] = FSM_CPU_9XXX,
+	[94] = {FSM_CPU_9XXX, "FSM9XXX"},
+	[95] = {FSM_CPU_9XXX, "FSM9XXX"},
 
 	/*  7x25AA ID */
-	[98] = MSM_CPU_7X25AA,
-	[99] = MSM_CPU_7X25AA,
-	[100] = MSM_CPU_7X25AA,
+	[98] = {MSM_CPU_7X25AA, "MSM7X25AA"},
+	[99] = {MSM_CPU_7X25AA, "MSM7X25AA"},
+	[100] = {MSM_CPU_7X25AA, "MSM7X25AA"},
 
 	/*  7x27AA ID */
-	[101] = MSM_CPU_7X27AA,
-	[102] = MSM_CPU_7X27AA,
-	[103] = MSM_CPU_7X27AA,
-	[136] = MSM_CPU_7X27AA,
+	[101] = {MSM_CPU_7X27AA, "MSM7X27AA"},
+	[102] = {MSM_CPU_7X27AA, "MSM7X27AA"},
+	[103] = {MSM_CPU_7X27AA, "MSM7X27AA"},
+	[136] = {MSM_CPU_7X27AA, "MSM7X27AA"},
 
 	/* 9x15 ID */
-	[104] = MSM_CPU_9615,
-	[105] = MSM_CPU_9615,
-	[106] = MSM_CPU_9615,
-	[107] = MSM_CPU_9615,
-	[171] = MSM_CPU_9615,
+	[104] = {MSM_CPU_9615, "MSM9615"},
+	[105] = {MSM_CPU_9615, "MSM9615"},
+	[106] = {MSM_CPU_9615, "MSM9615"},
+	[107] = {MSM_CPU_9615, "MSM9615"},
+	[171] = {MSM_CPU_9615, "MSM9615"},
 
 	/* 8064 IDs */
-	[109] = MSM_CPU_8064,
+	[109] = {MSM_CPU_8064, "APQ8064"},
 
 	/* 8930 IDs */
-	[116] = MSM_CPU_8930,
-	[117] = MSM_CPU_8930,
-	[118] = MSM_CPU_8930,
-	[119] = MSM_CPU_8930,
-	[179] = MSM_CPU_8930,
+	[116] = {MSM_CPU_8930, "MSM8930"},
+	[117] = {MSM_CPU_8930, "MSM8930"},
+	[118] = {MSM_CPU_8930, "MSM8930"},
+	[119] = {MSM_CPU_8930, "MSM8930"},
+	[179] = {MSM_CPU_8930, "MSM8930"},
 
 	/* 8627 IDs */
-	[120] = MSM_CPU_8627,
-	[121] = MSM_CPU_8627,
+	[120] = {MSM_CPU_8627, "MSM8627"},
+	[121] = {MSM_CPU_8627, "MSM8627"},
 
 	/* 8660A ID */
-	[122] = MSM_CPU_8960,
+	[122] = {MSM_CPU_8960, "MSM8960"},
 
 	/* 8260A ID */
-	[123] = MSM_CPU_8960,
+	[123] = {MSM_CPU_8960, "MSM8960"},
 
 	/* 8060A ID */
-	[124] = MSM_CPU_8960,
+	[124] = {MSM_CPU_8960, "MSM8960"},
 
 	/* 8974 IDs */
-	[126] = MSM_CPU_8974,
-	[184] = MSM_CPU_8974,
-	[185] = MSM_CPU_8974,
-	[186] = MSM_CPU_8974,
+	[126] = {MSM_CPU_8974, "MSM8974"},
+	[184] = {MSM_CPU_8974, "MSM8974"},
+	[185] = {MSM_CPU_8974, "MSM8974"},
+	[186] = {MSM_CPU_8974, "MSM8974"},
 
 	/* 8974AA IDs */
-	[208] = MSM_CPU_8974PRO_AA,
-	[211] = MSM_CPU_8974PRO_AA,
-	[214] = MSM_CPU_8974PRO_AA,
-	[217] = MSM_CPU_8974PRO_AA,
+	[208] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
+	[211] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
+	[214] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
+	[217] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
 
 	/* 8974AB IDs */
-	[209] = MSM_CPU_8974PRO_AB,
-	[212] = MSM_CPU_8974PRO_AB,
-	[215] = MSM_CPU_8974PRO_AB,
-	[218] = MSM_CPU_8974PRO_AB,
+	[209] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
+	[212] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
+	[215] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
+	[218] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
 
 	/* 8974AC IDs */
-	[194] = MSM_CPU_8974PRO_AC,
-	[210] = MSM_CPU_8974PRO_AC,
-	[213] = MSM_CPU_8974PRO_AC,
-	[216] = MSM_CPU_8974PRO_AC,
+	[194] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
+	[210] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
+	[213] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
+	[216] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
 
 	/* 8625 IDs */
-	[127] = MSM_CPU_8625,
-	[128] = MSM_CPU_8625,
-	[129] = MSM_CPU_8625,
-	[137] = MSM_CPU_8625,
-	[167] = MSM_CPU_8625,
+	[127] = {MSM_CPU_8625, "MSM8625"},
+	[128] = {MSM_CPU_8625, "MSM8625"},
+	[129] = {MSM_CPU_8625, "MSM8625"},
+	[137] = {MSM_CPU_8625, "MSM8625"},
+	[167] = {MSM_CPU_8625, "MSM8625"},
 
 	/* 8064 MPQ ID */
-	[130] = MSM_CPU_8064,
+	[130] = {MSM_CPU_8064, "APQ8064"},
 
 	/* 7x25AB IDs */
-	[131] = MSM_CPU_7X25AB,
-	[132] = MSM_CPU_7X25AB,
-	[133] = MSM_CPU_7X25AB,
-	[135] = MSM_CPU_7X25AB,
+	[131] = {MSM_CPU_7X25AB, "MSM7X25AB"},
+	[132] = {MSM_CPU_7X25AB, "MSM7X25AB"},
+	[133] = {MSM_CPU_7X25AB, "MSM7X25AB"},
+	[135] = {MSM_CPU_7X25AB, "MSM7X25AB"},
 
 	/* 9625 IDs */
-	[134] = MSM_CPU_9625,
-	[148] = MSM_CPU_9625,
-	[149] = MSM_CPU_9625,
-	[150] = MSM_CPU_9625,
-	[151] = MSM_CPU_9625,
-	[152] = MSM_CPU_9625,
-	[173] = MSM_CPU_9625,
-	[174] = MSM_CPU_9625,
-	[175] = MSM_CPU_9625,
+	[134] = {MSM_CPU_9625, "MSM9625"},
+	[148] = {MSM_CPU_9625, "MSM9625"},
+	[149] = {MSM_CPU_9625, "MSM9625"},
+	[150] = {MSM_CPU_9625, "MSM9625"},
+	[151] = {MSM_CPU_9625, "MSM9625"},
+	[152] = {MSM_CPU_9625, "MSM9625"},
+	[173] = {MSM_CPU_9625, "MSM9625"},
+	[174] = {MSM_CPU_9625, "MSM9625"},
+	[175] = {MSM_CPU_9625, "MSM9625"},
 
 	/* 8960AB IDs */
-	[138] = MSM_CPU_8960AB,
-	[139] = MSM_CPU_8960AB,
-	[140] = MSM_CPU_8960AB,
-	[141] = MSM_CPU_8960AB,
+	[138] = {MSM_CPU_8960AB, "MSM8960AB"},
+	[139] = {MSM_CPU_8960AB, "MSM8960AB"},
+	[140] = {MSM_CPU_8960AB, "MSM8960AB"},
+	[141] = {MSM_CPU_8960AB, "MSM8960AB"},
 
 	/* 8930AA IDs */
-	[142] = MSM_CPU_8930AA,
-	[143] = MSM_CPU_8930AA,
-	[144] = MSM_CPU_8930AA,
-	[160] = MSM_CPU_8930AA,
-	[180] = MSM_CPU_8930AA,
+	[142] = {MSM_CPU_8930AA, "MSM8930AA"},
+	[143] = {MSM_CPU_8930AA, "MSM8930AA"},
+	[144] = {MSM_CPU_8930AA, "MSM8930AA"},
+	[160] = {MSM_CPU_8930AA, "MSM8930AA"},
+	[180] = {MSM_CPU_8930AA, "MSM8930AA"},
 
 	/* 8226 IDs */
-	[145] = MSM_CPU_8226,
-	[158] = MSM_CPU_8226,
-	[159] = MSM_CPU_8226,
-	[198] = MSM_CPU_8226,
-	[199] = MSM_CPU_8226,
-	[200] = MSM_CPU_8226,
-	[205] = MSM_CPU_8226,
-	[219] = MSM_CPU_8226,
-	[220] = MSM_CPU_8226,
-	[221] = MSM_CPU_8226,
-	[222] = MSM_CPU_8226,
-	[223] = MSM_CPU_8226,
-	[224] = MSM_CPU_8226,
+	[145] = {MSM_CPU_8226, "MSM8626"},
+	[158] = {MSM_CPU_8226, "MSM8226"},
+	[159] = {MSM_CPU_8226, "MSM8526"},
+	[198] = {MSM_CPU_8226, "MSM8126"},
+	[199] = {MSM_CPU_8226, "APQ8026"},
+	[200] = {MSM_CPU_8226, "MSM8926"},
+	[205] = {MSM_CPU_8226, "MSM8326"},
+	[219] = {MSM_CPU_8226, "APQ8028"},
+	[220] = {MSM_CPU_8226, "MSM8128"},
+	[221] = {MSM_CPU_8226, "MSM8228"},
+	[222] = {MSM_CPU_8226, "MSM8528"},
+	[223] = {MSM_CPU_8226, "MSM8628"},
+	[224] = {MSM_CPU_8226, "MSM8928"},
 
 	/* 8092 IDs */
-	[146] = MSM_CPU_8092,
+	[146] = {MSM_CPU_8092, "MSM8092"},
 
 	/* 8610 IDs */
-	[147] = MSM_CPU_8610,
-	[161] = MSM_CPU_8610,
-	[162] = MSM_CPU_8610,
-	[163] = MSM_CPU_8610,
-	[164] = MSM_CPU_8610,
-	[165] = MSM_CPU_8610,
-	[166] = MSM_CPU_8610,
-	[225] = MSM_CPU_8610,
-	[226] = MSM_CPU_8610,
+	[147] = {MSM_CPU_8610, "MSM8610"},
+	[161] = {MSM_CPU_8610, "MSM8110"},
+	[162] = {MSM_CPU_8610, "MSM8210"},
+	[163] = {MSM_CPU_8610, "MSM8810"},
+	[164] = {MSM_CPU_8610, "MSM8212"},
+	[165] = {MSM_CPU_8610, "MSM8612"},
+	[166] = {MSM_CPU_8610, "MSM8112"},
+	[225] = {MSM_CPU_8610, "MSM8510"},
+	[226] = {MSM_CPU_8610, "MSM8512"},
 
 	/* 8064AB IDs */
-	[153] = MSM_CPU_8064AB,
+	[153] = {MSM_CPU_8064AB, "APQ8064AB"},
 
 	/* 8930AB IDs */
-	[154] = MSM_CPU_8930AB,
-	[155] = MSM_CPU_8930AB,
-	[156] = MSM_CPU_8930AB,
-	[157] = MSM_CPU_8930AB,
-	[181] = MSM_CPU_8930AB,
+	[154] = {MSM_CPU_8930AB, "MSM8930AB"},
+	[155] = {MSM_CPU_8930AB, "MSM8930AB"},
+	[156] = {MSM_CPU_8930AB, "MSM8930AB"},
+	[157] = {MSM_CPU_8930AB, "MSM8930AB"},
+	[181] = {MSM_CPU_8930AB, "MSM8930AB"},
 
 	/* 8625Q IDs */
-	[168] = MSM_CPU_8625Q,
-	[169] = MSM_CPU_8625Q,
-	[170] = MSM_CPU_8625Q,
+	[168] = {MSM_CPU_8625Q, "MSM8225Q"},
+	[169] = {MSM_CPU_8625Q, "MSM8625Q"},
+	[170] = {MSM_CPU_8625Q, "MSM8125Q"},
 
 	/* 8064AA IDs */
-	[172] = MSM_CPU_8064AA,
+	[172] = {MSM_CPU_8064AA, "APQ8064AA"},
 
 	/* 8084 IDs */
-	[178] = MSM_CPU_8084,
+	[178] = {MSM_CPU_8084, "APQ8084"},
 
 	/* krypton IDs */
-	[187] = MSM_CPU_KRYPTON,
+	[187] = {MSM_CPU_KRYPTON, "MSMKRYPTON"},
 
 	/* FSM9900 ID */
-	[188] = FSM_CPU_9900,
+	[188] = {FSM_CPU_9900, "FSM9900"},
 
 	/* Samarium IDs */
-	[195] = MSM_CPU_SAMARIUM,
+	[195] = {MSM_CPU_SAMARIUM, "MSMSAMARIUM"},
 
 	/* Uninitialized IDs are not known to run Linux.
 	   MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
@@ -455,6 +458,25 @@
 	return (socinfo) ? socinfo->v1.build_id : NULL;
 }
 
+static char *msm_read_hardware_id(void)
+{
+	static char msm_soc_str[128] = "Qualcomm ";
+	static bool string_generated = false;
+
+	if (string_generated)
+		return msm_soc_str;
+	if (!socinfo)
+		goto err_path;
+	if (!cpu_of_id[socinfo->v1.id].soc_id_string)
+		goto err_path;
+
+	string_generated = true;
+	return strncat(msm_soc_str, cpu_of_id[socinfo->v1.id].soc_id_string,
+			sizeof(msm_soc_str) - strlen(msm_soc_str));
+err_path:
+	return "UNKNOWN SOC TYPE";
+}
+
 uint32_t socinfo_get_raw_id(void)
 {
 	return socinfo ?
@@ -678,6 +700,8 @@
 			char *buf)
 {
 	uint32_t hw_subtype;
+	WARN_ONCE(1, "Deprecated, use platform_subtype_id instead\n");
+
 	if (!socinfo) {
 		pr_err("%s: No socinfo found!\n", __func__);
 		return 0;
@@ -707,6 +731,18 @@
 }
 
 static ssize_t
+socinfo_show_platform_subtype_id(struct sys_device *dev,
+			struct sysdev_attribute *attr,
+			char *buf)
+{
+	uint32_t hw_subtype;
+
+	hw_subtype = socinfo_get_platform_subtype();
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		hw_subtype);
+}
+
+static ssize_t
 socinfo_show_pmic_model(struct sys_device *dev,
 			struct sysdev_attribute *attr,
 			char *buf)
@@ -829,6 +865,17 @@
 }
 
 static ssize_t
+msm_get_platform_subtype_id(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	uint32_t hw_subtype;
+	hw_subtype = socinfo_get_platform_subtype();
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		hw_subtype);
+}
+
+static ssize_t
 msm_get_pmic_model(struct device *dev,
 			struct device_attribute *attr,
 			char *buf)
@@ -974,6 +1021,8 @@
 static struct sysdev_attribute socinfo_v6_files[] = {
 	_SYSDEV_ATTR(platform_subtype, 0444,
 			socinfo_show_platform_subtype, NULL),
+	_SYSDEV_ATTR(platform_subtype_id, 0444,
+			socinfo_show_platform_subtype_id, NULL),
 };
 
 static struct sysdev_attribute socinfo_v7_files[] = {
@@ -1059,6 +1108,13 @@
 	__ATTR(platform_subtype, S_IRUGO,
 			msm_get_platform_subtype, NULL);
 
+/* Platform Subtype String is being deprecated. Use Platform
+ * Subtype ID instead.
+ */
+static struct device_attribute msm_soc_attr_platform_subtype_id =
+	__ATTR(platform_subtype_id, S_IRUGO,
+			msm_get_platform_subtype_id, NULL);
+
 static struct device_attribute msm_soc_attr_pmic_model =
 	__ATTR(pmic_model, S_IRUGO,
 			msm_get_pmic_model, NULL);
@@ -1152,6 +1208,8 @@
 	case 6:
 		device_create_file(msm_soc_device,
 					&msm_soc_attr_platform_subtype);
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_platform_subtype_id);
 	case 5:
 		device_create_file(msm_soc_device,
 					&msm_soc_attr_accessory_chip);
@@ -1401,14 +1459,16 @@
 	}
 
 	WARN(!socinfo_get_id(), "Unknown SOC ID!\n");
-	WARN(socinfo_get_id() >= ARRAY_SIZE(cpu_of_id),
-		"New IDs added! ID => CPU mapping might need an update.\n");
 
-	if (socinfo->v1.id < ARRAY_SIZE(cpu_of_id))
-		cur_cpu = cpu_of_id[socinfo->v1.id];
+	if (socinfo_get_id() >= ARRAY_SIZE(cpu_of_id))
+		BUG_ON("New IDs added! ID => CPU mapping might need an update.\n");
+
+	else
+		cur_cpu = cpu_of_id[socinfo->v1.id].generic_soc_type;
 
 	boot_stats_init();
 	socinfo_print();
+	arch_read_hardware_id = msm_read_hardware_id;
 
 	return 0;
 }
diff --git a/arch/arm/mach-msm/subsystem_restart.c b/arch/arm/mach-msm/subsystem_restart.c
index 01e0985..bc4eb76 100644
--- a/arch/arm/mach-msm/subsystem_restart.c
+++ b/arch/arm/mach-msm/subsystem_restart.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -466,13 +466,20 @@
 
 	pr_info("[%p]: Powering up %s\n", current, name);
 	init_completion(&dev->err_ready);
-	if (dev->desc->powerup(dev->desc) < 0)
+
+	if (dev->desc->powerup(dev->desc) < 0) {
+		notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
+								NULL);
 		panic("[%p]: Powerup error: %s!", current, name);
+	}
 
 	ret = wait_for_err_ready(dev);
-	if (ret)
+	if (ret) {
+		notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
+								NULL);
 		panic("[%p]: Timed out waiting for error ready: %s!",
 			current, name);
+	}
 	subsys_set_state(dev, SUBSYS_ONLINE);
 }
 
@@ -500,8 +507,11 @@
 
 	init_completion(&subsys->err_ready);
 	ret = subsys->desc->start(subsys->desc);
-	if (ret)
+	if (ret){
+		notify_each_subsys_device(&subsys, 1, SUBSYS_POWERUP_FAILURE,
+									NULL);
 		return ret;
+	}
 
 	if (subsys->desc->is_not_loadable) {
 		subsys_set_state(subsys, SUBSYS_ONLINE);
@@ -509,12 +519,14 @@
 	}
 
 	ret = wait_for_err_ready(subsys);
-	if (ret)
+	if (ret) {
 		/* pil-boot succeeded but we need to shutdown
 		 * the device because error ready timed out.
 		 */
+		notify_each_subsys_device(&subsys, 1, SUBSYS_POWERUP_FAILURE,
+									NULL);
 		subsys->desc->stop(subsys->desc);
-	else
+	} else
 		subsys_set_state(subsys, SUBSYS_ONLINE);
 
 	return ret;
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 606383a..adac211 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -212,6 +212,7 @@
 	unsigned long len;
 	__be32 *prop;
 	char *name;
+	phys_addr_t limit = MEMBLOCK_ALLOC_ANYWHERE;
 
 	if (!of_get_flat_dt_prop(node, "linux,contiguous-region", NULL))
 		return 0;
@@ -225,9 +226,13 @@
 
 	name = of_get_flat_dt_prop(node, "label", NULL);
 
-	pr_info("Found %s, memory base %lx, size %ld MiB\n", uname,
-		(unsigned long)base, (unsigned long)size / SZ_1M);
-	dma_contiguous_reserve_area(size, &base, MEMBLOCK_ALLOC_ANYWHERE, name);
+	prop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
+	if (prop)
+		limit = be32_to_cpu(prop[0]);
+
+	pr_info("Found %s, memory base %lx, size %ld MiB, limit %pa\n", uname,
+		(unsigned long)base, (unsigned long)size / SZ_1M, &limit);
+	dma_contiguous_reserve_area(size, &base, limit, name);
 
 	return 0;
 }
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 606953d..99647a7 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -107,6 +107,10 @@
 #define DIAG_STM_WCNSS	0x04
 #define DIAG_STM_APPS	0x08
 
+#define DIAG_DIAG_STM		0x214
+
+#define BAD_PARAM_RESPONSE_MESSAGE 20
+
 /*
  * The status bit masks when received in a signal handler are to be
  * used in conjunction with the peripheral list bit mask to determine the
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 8cc7515..0bbb012 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -56,6 +56,8 @@
 #define STM_RSP_SMD_COMPLY_INDEX	9
 #define STM_RSP_NUM_BYTES		10
 
+#define STM_COMMAND_VALID 1
+
 #define SMD_DRAIN_BUF_SIZE 4096
 
 int diag_debug_buf_idx;
@@ -1136,20 +1138,44 @@
 	}
 }
 
-int diag_process_stm_cmd(unsigned char *buf)
+int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf)
 {
-	uint8_t version = *(buf+STM_CMD_VERSION_OFFSET);
-	uint8_t mask = *(buf+STM_CMD_MASK_OFFSET);
-	uint8_t cmd = *(buf+STM_CMD_DATA_OFFSET);
+	uint8_t version, mask, cmd;
 	uint8_t rsp_supported = 0;
 	uint8_t rsp_smd_comply = 0;
-	int valid_command = 1;
 	int i;
 
-	/* Check if command is valid */
-	if ((version != 1) || (mask == 0) || (0 != (mask >> 4)) ||
-			(cmd != ENABLE_STM && cmd != DISABLE_STM)) {
-		valid_command = 0;
+	if (!buf || !dest_buf) {
+		pr_err("diag: Invalid pointers buf: %p, dest_buf %p in %s\n",
+		       buf, dest_buf, __func__);
+		return -EIO;
+	}
+
+	version = *(buf + STM_CMD_VERSION_OFFSET);
+	mask = *(buf + STM_CMD_MASK_OFFSET);
+	cmd = *(buf + STM_CMD_DATA_OFFSET);
+
+	/*
+	 * Check if command is valid. If the command is asking for
+	 * status, then the processor mask field is to be ignored.
+	 */
+	if ((version != 1) || (cmd > STATUS_STM) ||
+		((cmd != STATUS_STM) && ((mask == 0) || (0 != (mask >> 4))))) {
+		/* Command is invalid. Send bad param message response */
+		dest_buf[0] = BAD_PARAM_RESPONSE_MESSAGE;
+		for (i = 0; i < STM_CMD_NUM_BYTES; i++)
+			dest_buf[i+1] = *(buf + i);
+		return STM_CMD_NUM_BYTES+1;
+	} else if (cmd == STATUS_STM) {
+		/*
+		 * Only the status is being queried, so fill in whether diag
+		 * over stm is supported or not
+		 */
+		for (i = 0; i < NUM_SMD_CONTROL_CHANNELS; i++)
+			if (driver->peripheral_supports_stm[i])
+				rsp_supported |= 1 << i;
+
+		rsp_supported |= DIAG_STM_APPS;
 	} else {
 		if (mask & DIAG_STM_MODEM)
 			diag_process_stm_mask(cmd, DIAG_STM_MODEM, MODEM_DATA,
@@ -1169,15 +1195,13 @@
 	}
 
 	for (i = 0; i < STM_CMD_NUM_BYTES; i++)
-		driver->apps_rsp_buf[i] = *(buf+i);
+		dest_buf[i] = *(buf + i);
 
-	driver->apps_rsp_buf[STM_RSP_VALID_INDEX] = valid_command;
-	driver->apps_rsp_buf[STM_RSP_SUPPORTED_INDEX] = rsp_supported;
-	driver->apps_rsp_buf[STM_RSP_SMD_COMPLY_INDEX] = rsp_smd_comply;
+	dest_buf[STM_RSP_VALID_INDEX] = STM_COMMAND_VALID;
+	dest_buf[STM_RSP_SUPPORTED_INDEX] = rsp_supported;
+	dest_buf[STM_RSP_SMD_COMPLY_INDEX] = rsp_smd_comply;
 
-	encode_rsp_and_send(STM_RSP_NUM_BYTES-1);
-
-	return 0;
+	return STM_RSP_NUM_BYTES;
 }
 
 int diag_apps_responds()
@@ -1273,8 +1297,13 @@
 		encode_rsp_and_send(7);
 		return 0;
 	} else if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
-		(*(uint16_t *)(buf+2) == 0x020E)) {
-		return diag_process_stm_cmd(buf);
+		(*(uint16_t *)(buf+2) == DIAG_DIAG_STM)) {
+		len = diag_process_stm_cmd(buf, driver->apps_rsp_buf);
+		if (len > 0) {
+			encode_rsp_and_send(len - 1);
+			return 0;
+		}
+		return len;
 	}
 	/* Check for Apps Only & get event mask request */
 	else if (diag_apps_responds() && *buf == 0x81) {
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
index d79195c..7f5ea03 100644
--- a/drivers/char/diag/diagfwd_cntl.h
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -54,8 +54,9 @@
 #define ENABLE_SEPARATE_CMDRSP	1
 #define DISABLE_SEPARATE_CMDRSP	0
 
-#define ENABLE_STM	1
 #define DISABLE_STM	0
+#define ENABLE_STM	1
+#define STATUS_STM	2
 
 #define UPDATE_PERIPHERAL_STM_STATE	1
 #define CLEAR_PERIPHERAL_STM_STATE	2
diff --git a/drivers/coresight/coresight-tmc.c b/drivers/coresight/coresight-tmc.c
index f35ba53..0c7c9e0 100644
--- a/drivers/coresight/coresight-tmc.c
+++ b/drivers/coresight/coresight-tmc.c
@@ -177,6 +177,7 @@
 	bool			byte_cntr_read_active;
 	wait_queue_head_t	wq;
 	char			*byte_cntr_node;
+	uint32_t		mem_size;
 };
 
 static void tmc_wait_for_flush(struct tmc_drvdata *drvdata)
@@ -1310,6 +1311,32 @@
 static DEVICE_ATTR(byte_cntr_value, S_IRUGO | S_IWUSR,
 		   tmc_etr_show_byte_cntr_value, tmc_etr_store_byte_cntr_value);
 
+static ssize_t tmc_etr_show_mem_size(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val = drvdata->mem_size;
+
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tmc_etr_store_mem_size(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t size)
+{
+	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	drvdata->mem_size = val;
+	return size;
+}
+static DEVICE_ATTR(mem_size, S_IRUGO | S_IWUSR,
+		   tmc_etr_show_mem_size, tmc_etr_store_mem_size);
+
 static struct attribute *tmc_attrs[] = {
 	&dev_attr_trigger_cntr.attr,
 	NULL,
@@ -1322,6 +1349,7 @@
 static struct attribute *tmc_etr_attrs[] = {
 	&dev_attr_out_mode.attr,
 	&dev_attr_byte_cntr_value.attr,
+	&dev_attr_mem_size.attr,
 	NULL,
 };
 
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index 3aebaf0..48dc6ec 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -39,6 +39,7 @@
 #include <crypto/authenc.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
 
 #include <mach/scm.h>
 #include <linux/platform_data/qcom_crypto_device.h>
@@ -66,6 +67,8 @@
 	u32 aead_sha1_3des_dec;
 	u32 aead_ccm_aes_enc;
 	u32 aead_ccm_aes_dec;
+	u32 aead_rfc4309_ccm_aes_enc;
+	u32 aead_rfc4309_ccm_aes_dec;
 	u32 aead_op_success;
 	u32 aead_op_fail;
 	u32 aead_bad_msg;
@@ -231,6 +234,8 @@
 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
 #define QCRYPTO_MAX_IV_LENGTH	16
 
+#define	QCRYPTO_CCM4309_NONCE_LEN	3
+
 struct qcrypto_cipher_ctx {
 	u8 auth_key[QCRYPTO_MAX_KEY_SIZE];
 	u8 iv[QCRYPTO_MAX_IV_LENGTH];
@@ -244,10 +249,12 @@
 	struct crypto_priv *cp;
 	unsigned int flags;
 	struct crypto_engine *pengine;  /* fixed engine assigned */
+	u8 ccm4309_nonce[QCRYPTO_CCM4309_NONCE_LEN];
 };
 
 struct qcrypto_cipher_req_ctx {
 	u8 *iv;
+	u8 rfc4309_iv[QCRYPTO_MAX_IV_LENGTH];
 	unsigned int ivsize;
 	int  aead;
 	struct scatterlist asg;		/* Formatted associated data sg  */
@@ -736,7 +743,12 @@
 	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
 			"   AEAD CCM-AES decryption     : %d\n",
 					pstat->aead_ccm_aes_dec);
-
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD RFC4309-CCM-AES encryption     : %d\n",
+					pstat->aead_rfc4309_ccm_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD RFC4309-CCM-AES decryption     : %d\n",
+					pstat->aead_rfc4309_ccm_aes_dec);
 	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
 			"   AEAD operation success       : %d\n",
 					pstat->aead_op_success);
@@ -1272,6 +1284,12 @@
 	uint32_t bytes = 0;
 	uint32_t num_sg = 0;
 
+	if (alen == 0) {
+		qreq->assoc = NULL;
+		qreq->assoclen = 0;
+		return 0;
+	}
+
 	qreq->assoc = kzalloc((alen + 0x64), GFP_ATOMIC);
 	if (!qreq->assoc) {
 		pr_err("qcrypto Memory allocation of adata FAIL, error %ld\n",
@@ -1455,7 +1473,10 @@
 	qreq.authkey = cipher_ctx->auth_key;
 	qreq.authklen = cipher_ctx->auth_key_len;
 	qreq.authsize = crypto_aead_authsize(aead);
-	qreq.ivsize =  crypto_aead_ivsize(aead);
+	if (qreq.mode == QCE_MODE_CCM)
+		qreq.ivsize =  AES_BLOCK_SIZE;
+	else
+		qreq.ivsize =  crypto_aead_ivsize(aead);
 	qreq.flags = cipher_ctx->flags;
 
 	if (qreq.mode == QCE_MODE_CCM) {
@@ -1502,8 +1523,9 @@
 				kzfree(qreq.assoc);
 				return -ENOMEM;
 			}
-
-			memcpy((char *)rctx->data, qreq.assoc, qreq.assoclen);
+			if (qreq.assoclen)
+				memcpy((char *)rctx->data, qreq.assoc,
+						 qreq.assoclen);
 
 			num_sg = qcrypto_count_sg(req->src, req->cryptlen);
 			bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg,
@@ -1845,6 +1867,29 @@
 	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
 }
 
+static int _qcrypto_aead_rfc4309_enc_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv));
+	rctx->rfc4309_iv[0] = 3; /* L -1 */
+	memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
+	memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
+	rctx->iv = rctx->rfc4309_iv;
+	pstat->aead_rfc4309_ccm_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
 static int _qcrypto_enc_des_ecb(struct ablkcipher_request *req)
 {
 	struct qcrypto_cipher_req_ctx *rctx;
@@ -2136,6 +2181,27 @@
 	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
 }
 
+static int _qcrypto_aead_rfc4309_dec_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv));
+	rctx->rfc4309_iv[0] = 3; /* L -1 */
+	memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
+	memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
+	rctx->iv = rctx->rfc4309_iv;
+	pstat->aead_rfc4309_ccm_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
 static int _qcrypto_aead_setauthsize(struct crypto_aead *authenc,
 				unsigned int authsize)
 {
@@ -2166,6 +2232,24 @@
 	return 0;
 }
 
+static int _qcrypto_aead_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
+				  unsigned int authsize)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+	switch (authsize) {
+	case 8:
+	case 12:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+	ctx->authsize = authsize;
+	return 0;
+}
+
+
 static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 			unsigned int keylen)
 {
@@ -2231,6 +2315,21 @@
 	return 0;
 }
 
+static int _qcrypto_aead_rfc4309_ccm_setkey(struct crypto_aead *aead,
+				 const u8 *key, unsigned int key_len)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	int ret;
+
+	if (key_len < QCRYPTO_CCM4309_NONCE_LEN)
+		return -EINVAL;
+	key_len -= QCRYPTO_CCM4309_NONCE_LEN;
+	memcpy(ctx->ccm4309_nonce, key + key_len,  QCRYPTO_CCM4309_NONCE_LEN);
+	ret = _qcrypto_aead_ccm_setkey(aead, key, key_len);
+	return ret;
+};
+
 static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req)
 {
 	struct qcrypto_cipher_req_ctx *rctx;
@@ -3762,7 +3861,7 @@
 	.cra_u		= {
 		.aead = {
 			.ivsize         = AES_BLOCK_SIZE,
-			.maxauthsize    = SHA1_DIGEST_SIZE,
+			.maxauthsize    = AES_BLOCK_SIZE,
 			.setkey = _qcrypto_aead_ccm_setkey,
 			.setauthsize = _qcrypto_aead_ccm_setauthsize,
 			.encrypt = _qcrypto_aead_encrypt_aes_ccm,
@@ -3772,6 +3871,31 @@
 	}
 };
 
+static struct crypto_alg _qcrypto_aead_rfc4309_ccm_algo = {
+	.cra_name	= "rfc4309(ccm(aes))",
+	.cra_driver_name = "qcrypto-rfc4309-aes-ccm",
+	.cra_priority	= 300,
+	.cra_flags	= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+	.cra_blocksize  = 1,
+	.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+	.cra_alignmask	= 0,
+	.cra_type	= &crypto_nivaead_type,
+	.cra_module	= THIS_MODULE,
+	.cra_init	= _qcrypto_cra_aead_init,
+	.cra_exit	= _qcrypto_cra_aead_exit,
+	.cra_u		= {
+		.aead = {
+			.ivsize         = 8,
+			.maxauthsize    = 16,
+			.setkey = _qcrypto_aead_rfc4309_ccm_setkey,
+			.setauthsize = _qcrypto_aead_rfc4309_ccm_setauthsize,
+			.encrypt = _qcrypto_aead_rfc4309_enc_aes_ccm,
+			.decrypt = _qcrypto_aead_rfc4309_dec_aes_ccm,
+			.geniv = "seqiv",
+		}
+	}
+};
+
 
 static int  _qcrypto_probe(struct platform_device *pdev)
 {
@@ -4078,6 +4202,36 @@
 			dev_info(&pdev->dev, "%s\n",
 					q_alg->cipher_alg.cra_driver_name);
 		}
+
+		q_alg = _qcrypto_cipher_alg_alloc(cp,
+					&_qcrypto_aead_rfc4309_ccm_algo);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+
+		if (cp->ce_support.use_sw_aes_ccm_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->cipher_alg.cra_name,
+					strlen(q_alg->cipher_alg.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+						"The algorithm name %s is too long.\n",
+						q_alg->cipher_alg.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_alg(&q_alg->cipher_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->cipher_alg.cra_driver_name);
+			kfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->cipher_alg.cra_driver_name);
+		}
 	}
 
 	mutex_unlock(&cp->engine_lock);
diff --git a/drivers/gpio/qpnp-pin.c b/drivers/gpio/qpnp-pin.c
index 8d934df..6176df9 100644
--- a/drivers/gpio/qpnp-pin.c
+++ b/drivers/gpio/qpnp-pin.c
@@ -182,6 +182,7 @@
 	struct device_node	*int_ctrl;
 	struct list_head	chip_list;
 	struct dentry		*dfs_dir;
+	bool			chip_registered;
 };
 
 static LIST_HEAD(qpnp_pin_chips);
@@ -912,7 +913,7 @@
 static int qpnp_pin_free_chip(struct qpnp_pin_chip *q_chip)
 {
 	struct spmi_device *spmi = q_chip->spmi;
-	int rc, i;
+	int i, rc = 0;
 
 	if (q_chip->chip_gpios)
 		for (i = 0; i < spmi->num_dev_node; i++)
@@ -921,10 +922,12 @@
 	mutex_lock(&qpnp_pin_chips_lock);
 	list_del(&q_chip->chip_list);
 	mutex_unlock(&qpnp_pin_chips_lock);
-	rc = gpiochip_remove(&q_chip->gpio_chip);
-	if (rc)
-		dev_err(&q_chip->spmi->dev, "%s: unable to remove gpio\n",
-				__func__);
+	if (q_chip->chip_registered) {
+		rc = gpiochip_remove(&q_chip->gpio_chip);
+		if (rc)
+			dev_err(&q_chip->spmi->dev, "%s: unable to remove gpio\n",
+					__func__);
+	}
 	kfree(q_chip->chip_gpios);
 	kfree(q_chip->pmic_pins);
 	kfree(q_chip);
@@ -1342,6 +1345,7 @@
 		goto err_probe;
 	}
 
+	q_chip->chip_registered = true;
 	/* now configure gpio config defaults if they exist */
 	for (i = 0; i < spmi->num_dev_node; i++) {
 		q_spec = qpnp_chip_gpio_get_spec(q_chip, i);
diff --git a/drivers/gpu/ion/ion_cma_secure_heap.c b/drivers/gpu/ion/ion_cma_secure_heap.c
index d375c00..0aef596 100644
--- a/drivers/gpu/ion/ion_cma_secure_heap.c
+++ b/drivers/gpu/ion/ion_cma_secure_heap.c
@@ -446,6 +446,7 @@
 	if (ret) {
 		ret = ion_secure_cma_add_to_pool(sheap, len);
 		if (ret) {
+			mutex_unlock(&sheap->alloc_lock);
 			dev_err(sheap->dev, "Fail to allocate buffer\n");
 			goto err;
 		}
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 7717829..695095f 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1625,17 +1625,19 @@
 			goto err;
 		}
 
-		ret = of_property_read_u32_array(child, "reg", reg_val, 2);
-		if (ret) {
-			KGSL_CORE_ERR("Unable to read KGSL IOMMU 'reg'\n");
+		if (!strcmp("gfx3d_user", ctxs[ctx_index].iommu_ctx_name)) {
+			ctxs[ctx_index].ctx_id = 0;
+		} else if (!strcmp("gfx3d_priv",
+					ctxs[ctx_index].iommu_ctx_name)) {
+			ctxs[ctx_index].ctx_id = 1;
+		} else if (!strcmp("gfx3d_spare",
+					ctxs[ctx_index].iommu_ctx_name)) {
+			ctxs[ctx_index].ctx_id = 2;
+		} else {
+			KGSL_CORE_ERR("dt: IOMMU context %s is invalid\n",
+				ctxs[ctx_index].iommu_ctx_name);
 			goto err;
 		}
-		if (msm_soc_version_supports_iommu_v0())
-			ctxs[ctx_index].ctx_id = (reg_val[0] -
-				data->physstart) >> KGSL_IOMMU_CTX_SHIFT;
-		else
-			ctxs[ctx_index].ctx_id = ((reg_val[0] -
-				data->physstart) >> KGSL_IOMMU_CTX_SHIFT) - 8;
 
 		ctx_index++;
 	}
@@ -1690,6 +1692,12 @@
 	if (ret)
 		goto err;
 
+	/* get pm-qos-latency from target, set it to default if not found */
+	if (adreno_of_read_property(pdev->dev.of_node, "qcom,pm-qos-latency",
+		&pdata->pm_qos_latency))
+		pdata->pm_qos_latency = 501;
+
+
 	if (adreno_of_read_property(pdev->dev.of_node, "qcom,idle-timeout",
 		&pdata->idle_timeout))
 		pdata->idle_timeout = HZ/12;
@@ -1952,6 +1960,10 @@
 	for (i = 6; i < FT_DETECT_REGS_COUNT; i++)
 		ft_detect_regs[i] = 0;
 
+	/* turn on hang interrupt for a330v2 by default */
+	if (adreno_is_a330v2(adreno_dev))
+		set_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv);
+
 	ret = adreno_perfcounter_init(device);
 	if (ret)
 		goto done;
@@ -2415,9 +2427,12 @@
 
 	if (tmp != adreno_dev->fast_hang_detect) {
 		if (adreno_dev->fast_hang_detect) {
-			if (adreno_dev->gpudev->fault_detect_start)
+			if (adreno_dev->gpudev->fault_detect_start &&
+				!kgsl_active_count_get(&adreno_dev->dev)) {
 				adreno_dev->gpudev->fault_detect_start(
 					adreno_dev);
+				kgsl_active_count_put(&adreno_dev->dev);
+			}
 		} else {
 			if (adreno_dev->gpudev->fault_detect_stop)
 				adreno_dev->gpudev->fault_detect_stop(
@@ -2526,6 +2541,88 @@
 	return snprintf(buf, PAGE_SIZE, "%d\n", _wake_timeout);
 }
 
+/**
+ * _ft_hang_intr_status_store -  Routine to enable/disable h/w hang interrupt
+ * @dev: device ptr
+ * @attr: Device attribute
+ * @buf: value to write
+ * @count: size of the value to write
+ */
+static ssize_t _ft_hang_intr_status_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	unsigned int new_setting, old_setting;
+	struct kgsl_device *device = kgsl_device_from_dev(dev);
+	struct adreno_device *adreno_dev;
+	int ret;
+	if (device == NULL)
+		return 0;
+	adreno_dev = ADRENO_DEVICE(device);
+
+	mutex_lock(&device->mutex);
+	ret = _ft_sysfs_store(buf, count, &new_setting);
+	if (ret != count)
+		goto done;
+	if (new_setting)
+		new_setting = 1;
+	old_setting =
+		(test_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv) ? 1 : 0);
+	if (new_setting != old_setting) {
+		if (new_setting)
+			set_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv);
+		else
+			clear_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv);
+		/* Set the new setting based on device state */
+		switch (device->state) {
+		case KGSL_STATE_NAP:
+		case KGSL_STATE_SLEEP:
+			kgsl_pwrctrl_wake(device, 0);
+		case KGSL_STATE_ACTIVE:
+			adreno_dev->gpudev->irq_control(adreno_dev, 1);
+		/*
+		 * For following states setting will be picked up on device
+		 * start. Still need them in switch statement to differentiate
+		 * from default
+		 */
+		case KGSL_STATE_SLUMBER:
+		case KGSL_STATE_SUSPEND:
+			break;
+		default:
+			ret = -EACCES;
+			/* reset back to old setting on error */
+			if (new_setting)
+				clear_bit(ADRENO_DEVICE_HANG_INTR,
+					&adreno_dev->priv);
+			else
+				set_bit(ADRENO_DEVICE_HANG_INTR,
+					&adreno_dev->priv);
+			goto done;
+		}
+	}
+done:
+	mutex_unlock(&device->mutex);
+	return ret;
+}
+
+/**
+ * _ft_hang_intr_status_show() -  Routine to read hardware hang interrupt
+ * enablement
+ * @dev: device ptr
+ * @attr: Device attribute
+ * @buf: value read
+ */
+static ssize_t _ft_hang_intr_status_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct adreno_device *adreno_dev = _get_adreno_dev(dev);
+	if (adreno_dev == NULL)
+		return 0;
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+		test_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv) ? 1 : 0);
+}
+
 #define FT_DEVICE_ATTR(name) \
 	DEVICE_ATTR(name, 0644,	_ ## name ## _show, _ ## name ## _store);
 
@@ -2533,6 +2630,7 @@
 FT_DEVICE_ATTR(ft_pagefault_policy);
 FT_DEVICE_ATTR(ft_fast_hang_detect);
 FT_DEVICE_ATTR(ft_long_ib_detect);
+FT_DEVICE_ATTR(ft_hang_intr_status);
 
 static DEVICE_INT_ATTR(wake_nice, 0644, _wake_nice);
 static FT_DEVICE_ATTR(wake_timeout);
@@ -2544,6 +2642,7 @@
 	&dev_attr_ft_long_ib_detect,
 	&dev_attr_wake_nice.attr,
 	&dev_attr_wake_timeout,
+	&dev_attr_ft_hang_intr_status,
 	NULL,
 };
 
@@ -2778,7 +2877,7 @@
  * Return true if the RBBM status register for the GPU type indicates that the
  * hardware is idle
  */
-static bool adreno_hw_isidle(struct kgsl_device *device)
+bool adreno_hw_isidle(struct kgsl_device *device)
 {
 	unsigned int reg_rbbm_status;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -2891,6 +2990,12 @@
 
 	rptr = adreno_get_rptr(&adreno_dev->ringbuffer);
 
+	/*
+	 * wptr is updated when we add commands to ringbuffer, add a barrier
+	 * to make sure updated wptr is compared to rptr
+	 */
+	smp_mb();
+
 	if (rptr == adreno_dev->ringbuffer.wptr)
 		return adreno_hw_isidle(device);
 
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index e2ea262..976a355 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -217,6 +217,7 @@
 	ADRENO_DEVICE_PWRON_FIXUP = 1,
 	ADRENO_DEVICE_INITIALIZED = 2,
 	ADRENO_DEVICE_STARTED = 3,
+	ADRENO_DEVICE_HANG_INTR = 4,
 };
 
 #define PERFCOUNTER_FLAG_NONE 0x0
@@ -341,6 +342,7 @@
 	ADRENO_REG_TC_CNTL_STATUS,
 	ADRENO_REG_TP0_CHICKEN,
 	ADRENO_REG_RBBM_RBBM_CTL,
+	ADRENO_REG_UCHE_INVALIDATE0,
 	ADRENO_REG_REGISTER_MAX,
 };
 
@@ -413,7 +415,9 @@
 #define  KGSL_FT_SKIPFRAME                3
 #define  KGSL_FT_DISABLE                  4
 #define  KGSL_FT_TEMP_DISABLE             5
-#define  KGSL_FT_DEFAULT_POLICY (BIT(KGSL_FT_REPLAY) + BIT(KGSL_FT_SKIPIB))
+#define  KGSL_FT_THROTTLE                 6
+#define  KGSL_FT_DEFAULT_POLICY (BIT(KGSL_FT_REPLAY) + BIT(KGSL_FT_SKIPIB) \
+				+ BIT(KGSL_FT_THROTTLE))
 
 /* This internal bit is used to skip the PM dump on replayed command batches */
 #define  KGSL_FT_SKIP_PMDUMP              31
@@ -431,7 +435,8 @@
 	{ BIT(KGSL_FT_SKIPIB), "skipib" }, \
 	{ BIT(KGSL_FT_SKIPFRAME), "skipframe" }, \
 	{ BIT(KGSL_FT_DISABLE), "disable" }, \
-	{ BIT(KGSL_FT_TEMP_DISABLE), "temp" }
+	{ BIT(KGSL_FT_TEMP_DISABLE), "temp" }, \
+	{ BIT(KGSL_FT_THROTTLE), "throttle"}
 
 extern struct adreno_gpudev adreno_a2xx_gpudev;
 extern struct adreno_gpudev adreno_a3xx_gpudev;
@@ -461,6 +466,7 @@
 void adreno_coresight_remove(struct platform_device *pdev);
 int adreno_coresight_init(struct platform_device *pdev);
 
+bool adreno_hw_isidle(struct kgsl_device *device);
 int adreno_idle(struct kgsl_device *device);
 bool adreno_isidle(struct kgsl_device *device);
 
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index eed11c3..2faa278 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -3039,6 +3039,25 @@
 	return 0;
 }
 
+static void a3xx_fatal_err_callback(struct adreno_device *adreno_dev, int bit)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	const char *err = "";
+
+	switch (bit) {
+	case A3XX_INT_MISC_HANG_DETECT:
+		err = "MISC: GPU hang detected\n";
+		break;
+	default:
+		return;
+	}
+	KGSL_DRV_CRIT(device, "%s\n", err);
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+
+	/* Trigger a fault in the dispatcher - this will effect a restart */
+	adreno_dispatcher_irq_fault(device);
+}
+
 static void a3xx_err_callback(struct adreno_device *adreno_dev, int bit)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
@@ -3063,7 +3082,7 @@
 
 		/* Clear the error */
 		kgsl_regwrite(device, A3XX_RBBM_AHB_CMD, (1 << 3));
-		goto done;
+		return;
 	}
 	case A3XX_INT_RBBM_REG_TIMEOUT:
 		err = "RBBM: AHB register timeout";
@@ -3100,14 +3119,10 @@
 			"CP | Protected mode error| %s | addr=%x\n",
 			reg & (1 << 24) ? "WRITE" : "READ",
 			(reg & 0x1FFFF) >> 2);
-		goto done;
 	}
 	case A3XX_INT_CP_AHB_ERROR_HALT:
 		err = "ringbuffer AHB error interrupt";
 		break;
-	case A3XX_INT_MISC_HANG_DETECT:
-		err = "MISC: GPU hang detected";
-		break;
 	case A3XX_INT_UCHE_OOB_ACCESS:
 		err = "UCHE:  Out of bounds access";
 		break;
@@ -3115,11 +3130,6 @@
 		return;
 	}
 	KGSL_DRV_CRIT(device, "%s\n", err);
-	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
-
-done:
-	/* Trigger a fault in the dispatcher - this will effect a restart */
-	adreno_dispatcher_irq_fault(device);
 }
 
 static void a3xx_cp_callback(struct adreno_device *adreno_dev, int irq)
@@ -3561,7 +3571,7 @@
 	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 21 - CP_AHB_ERROR_FAULT */
 	A3XX_IRQ_CALLBACK(NULL),	       /* 22 - Unused */
 	A3XX_IRQ_CALLBACK(NULL),	       /* 23 - Unused */
-	A3XX_IRQ_CALLBACK(NULL),	       /* 24 - MISC_HANG_DETECT */
+	A3XX_IRQ_CALLBACK(a3xx_fatal_err_callback),/* 24 - MISC_HANG_DETECT */
 	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 25 - UCHE_OOB_ACCESS */
 	/* 26 to 31 - Unused */
 };
@@ -3602,7 +3612,9 @@
 	struct kgsl_device *device = &adreno_dev->dev;
 
 	if (state)
-		kgsl_regwrite(device, A3XX_RBBM_INT_0_MASK, A3XX_INT_MASK);
+		kgsl_regwrite(device, A3XX_RBBM_INT_0_MASK, A3XX_INT_MASK |
+			(test_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv) ?
+				(1 << A3XX_INT_MISC_HANG_DETECT) : 0));
 	else
 		kgsl_regwrite(device, A3XX_RBBM_INT_0_MASK, 0);
 }
@@ -4193,9 +4205,12 @@
 
 	/* Turn on hang detection - this spews a lot of useful information
 	 * into the RBBM registers on a hang */
-
-	kgsl_regwrite(device, A3XX_RBBM_INTERFACE_HANG_INT_CTL,
-			(1 << 16) | 0xFFF);
+	if (adreno_is_a330v2(adreno_dev))
+		kgsl_regwrite(device, A3XX_RBBM_INTERFACE_HANG_INT_CTL,
+				(1 << 31) | 0xFFFF);
+	else
+		kgsl_regwrite(device, A3XX_RBBM_INTERFACE_HANG_INT_CTL,
+				(1 << 16) | 0xFFF);
 
 	/* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0). */
 	kgsl_regwrite(device, A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
@@ -4577,6 +4592,8 @@
 	ADRENO_REG_DEFINE(ADRENO_REG_TC_CNTL_STATUS, REG_TC_CNTL_STATUS),
 	ADRENO_REG_DEFINE(ADRENO_REG_TP0_CHICKEN, REG_TP0_CHICKEN),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_RBBM_CTL, A3XX_RBBM_RBBM_CTL),
+	ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE0,
+			A3XX_UCHE_CACHE_INVALIDATE0_REG),
 };
 
 struct adreno_reg_offsets a3xx_reg_offsets = {
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 48d0210..7aae397 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -33,6 +33,15 @@
 /* Number of command batches sent at a time from a single context */
 static unsigned int _context_cmdbatch_burst = 5;
 
+/*
+ * GFT throttle parameters. If GFT recovered more than
+ * X times in Y ms invalidate the context and do not attempt recovery.
+ * X -> _fault_throttle_burst
+ * Y -> _fault_throttle_time
+ */
+static unsigned int _fault_throttle_time = 3000;
+static unsigned int _fault_throttle_burst = 3;
+
 /* Number of command batches inflight in the ringbuffer at any time */
 static unsigned int _dispatcher_inflight = 15;
 
@@ -78,15 +87,24 @@
 static inline bool _isidle(struct kgsl_device *device)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	unsigned int ts;
+	unsigned int ts, i;
+
+	if (!kgsl_pwrctrl_isenabled(device))
+		goto ret;
 
 	ts = kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED);
 
-	if (adreno_isidle(device) == true &&
-		(ts >= adreno_dev->ringbuffer.global_ts))
-		return true;
+	/* If GPU HW status is idle return true */
+	if (adreno_hw_isidle(device) ||
+			(ts == adreno_dev->ringbuffer.global_ts))
+		goto ret;
 
 	return false;
+
+ret:
+	for (i = 0; i < FT_DETECT_REGS_COUNT; i++)
+		fault_detect_regs[i] = 0;
+	return true;
 }
 
 /**
@@ -929,6 +947,12 @@
 	if (dispatcher->inflight == 0) {
 		KGSL_DRV_WARN(device,
 		"dispatcher_do_fault with 0 inflight commands\n");
+		/*
+		 * For certain faults like h/w fault the interrupts are
+		 * turned off, re-enable here
+		 */
+		if (kgsl_pwrctrl_isenabled(device))
+			kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
 		return 0;
 	}
 
@@ -1015,6 +1039,35 @@
 	cmdbatch = replay[0];
 
 	/*
+	 * If GFT recovered more than X times in Y ms invalidate the context
+	 * and do not attempt recovery.
+	 * Example: X==3 and Y==3000 ms, GPU hung at 500ms, 1700ms, 25000ms and
+	 * 3000ms for the same context, we will not try FT and invalidate the
+	 * context @3000ms because context triggered GFT more than 3 times in
+	 * last 3 seconds. If a context caused recoverable GPU hangs
+	 * where 1st and 4th gpu hang are more than 3 seconds apart we
+	 * won't disable GFT and invalidate the context.
+	 */
+	if (test_bit(KGSL_FT_THROTTLE, &cmdbatch->fault_policy)) {
+		if (time_after(jiffies, (cmdbatch->context->fault_time
+				+ msecs_to_jiffies(_fault_throttle_time)))) {
+			cmdbatch->context->fault_time = jiffies;
+			cmdbatch->context->fault_count = 1;
+		} else {
+			cmdbatch->context->fault_count++;
+			if (cmdbatch->context->fault_count >
+					_fault_throttle_burst) {
+				set_bit(KGSL_FT_DISABLE,
+						&cmdbatch->fault_policy);
+				pr_fault(device, cmdbatch,
+					 "gpu fault threshold exceeded %d faults in %d msecs\n",
+					 _fault_throttle_burst,
+					 _fault_throttle_time);
+			}
+		}
+	}
+
+	/*
 	 * If FT is disabled for this cmdbatch invalidate immediately
 	 */
 
@@ -1628,6 +1681,10 @@
 static DISPATCHER_UINT_ATTR(context_queue_wait, 0644, 0, _context_queue_wait);
 static DISPATCHER_UINT_ATTR(fault_detect_interval, 0644, 0,
 	_fault_timer_interval);
+static DISPATCHER_UINT_ATTR(fault_throttle_time, 0644, 0,
+	_fault_throttle_time);
+static DISPATCHER_UINT_ATTR(fault_throttle_burst, 0644, 0,
+	_fault_throttle_burst);
 
 static struct attribute *dispatcher_attrs[] = {
 	&dispatcher_attr_inflight.attr,
@@ -1636,6 +1693,8 @@
 	&dispatcher_attr_cmdbatch_timeout.attr,
 	&dispatcher_attr_context_queue_wait.attr,
 	&dispatcher_attr_fault_detect_interval.attr,
+	&dispatcher_attr_fault_throttle_time.attr,
+	&dispatcher_attr_fault_throttle_burst.attr,
 	NULL,
 };
 
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 4db045a..136456a 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -607,7 +607,7 @@
 				  struct adreno_context *context)
 {
 	struct kgsl_device *device;
-	unsigned int cmds[5];
+	unsigned int cmds[8];
 
 	if (adreno_dev == NULL || context == NULL)
 		return -EINVAL;
@@ -621,8 +621,14 @@
 	cmds[3] = device->memstore.gpuaddr +
 		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
 	cmds[4] = context->base.id;
+	/* Flush the UCHE for new context */
+	cmds[5] = cp_type0_packet(
+		adreno_getreg(adreno_dev, ADRENO_REG_UCHE_INVALIDATE0), 2);
+	cmds[6] = 0;
+	if (adreno_is_a3xx(adreno_dev))
+		cmds[7] = 0x90000000;
 	return adreno_ringbuffer_issuecmds(device, context,
-				KGSL_CMD_FLAGS_NONE, cmds, 5);
+				KGSL_CMD_FLAGS_NONE, cmds, 8);
 }
 
 
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 98fd731..fc4b77e 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -349,6 +349,8 @@
  * is set.
  * @flags: flags from userspace controlling the behavior of this context
  * @pwr_constraint: power constraint from userspace for this context
+ * @fault_count: number of times gpu hanged in last _context_throttle_time ms
+ * @fault_time: time of the first gpu hang in last _context_throttle_time ms
  */
 struct kgsl_context {
 	struct kref refcount;
@@ -367,6 +369,8 @@
 	unsigned int pagefault_ts;
 	unsigned int flags;
 	struct kgsl_pwr_constraint pwr_constraint;
+	unsigned int fault_count;
+	unsigned long fault_time;
 };
 
 /**
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index e21fd88..ccd13d5 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -210,7 +210,7 @@
 	kgsl_event_func func, void *priv, void *owner)
 {
 	struct kgsl_event *event;
-	unsigned int queued, cur_ts;
+	unsigned int queued = 0, cur_ts;
 	struct kgsl_context *context = NULL;
 
 	BUG_ON(!mutex_is_locked(&device->mutex));
@@ -224,11 +224,21 @@
 			return -EINVAL;
 	}
 
-	queued = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED);
+	/*
+	 * If the caller is creating their own timestamps, let them schedule
+	 * events in the future. Otherwise only allow timestamps that have been
+	 * queued.
+	 */
+	if (context == NULL ||
+		((context->flags & KGSL_CONTEXT_USER_GENERATED_TS) == 0)) {
 
-	if (timestamp_cmp(ts, queued) > 0) {
-		kgsl_context_put(context);
-		return -EINVAL;
+		queued = kgsl_readtimestamp(device, context,
+						KGSL_TIMESTAMP_QUEUED);
+
+		if (timestamp_cmp(ts, queued) > 0) {
+			kgsl_context_put(context);
+			return -EINVAL;
+		}
 	}
 
 	cur_ts = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED);
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 2af8d27..69b953f 100755
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -514,6 +514,9 @@
 	else
 		/* something went wrong with the event handling mechanism */
 		BUG_ON(1);
+
+	/* Free param we are done using it */
+	kfree(param);
 }
 
 /*
@@ -638,16 +641,18 @@
 				phys_addr_t pt_base)
 {
 	struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
-	phys_addr_t domain_ptbase = iommu_pt ?
-				iommu_get_pt_base_addr(iommu_pt->domain) : 0;
+	phys_addr_t domain_ptbase;
 
-	/* Only compare the valid address bits of the pt_base */
-	domain_ptbase &= KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
+	if (iommu_pt == NULL)
+		return 0;
+
+	domain_ptbase = iommu_get_pt_base_addr(iommu_pt->domain)
+			& KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
 
 	pt_base &= KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
 
-	return domain_ptbase && pt_base &&
-		(domain_ptbase == pt_base);
+	return (domain_ptbase == pt_base);
+
 }
 
 /*
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 6b04aad..d64d0d3 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -85,16 +85,8 @@
 	return status;
 }
 
-static void kgsl_destroy_pagetable(struct kref *kref)
+static void _kgsl_destroy_pagetable(struct kgsl_pagetable *pagetable)
 {
-	struct kgsl_pagetable *pagetable = container_of(kref,
-		struct kgsl_pagetable, refcount);
-	unsigned long flags;
-
-	spin_lock_irqsave(&kgsl_driver.ptlock, flags);
-	list_del(&pagetable->list);
-	spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
-
 	pagetable_remove_sysfs_objects(pagetable);
 
 	kgsl_cleanup_pt(pagetable);
@@ -109,6 +101,29 @@
 	kfree(pagetable);
 }
 
+static void kgsl_destroy_pagetable(struct kref *kref)
+{
+	struct kgsl_pagetable *pagetable = container_of(kref,
+		struct kgsl_pagetable, refcount);
+	unsigned long flags;
+
+	spin_lock_irqsave(&kgsl_driver.ptlock, flags);
+	list_del(&pagetable->list);
+	spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
+
+	_kgsl_destroy_pagetable(pagetable);
+}
+
+static void kgsl_destroy_pagetable_locked(struct kref *kref)
+{
+	struct kgsl_pagetable *pagetable = container_of(kref,
+		struct kgsl_pagetable, refcount);
+
+	list_del(&pagetable->list);
+
+	_kgsl_destroy_pagetable(pagetable);
+}
+
 static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
 {
 	if (pagetable)
@@ -128,7 +143,7 @@
 				ret = pt;
 				break;
 			}
-			kref_put(&pt->refcount, kgsl_destroy_pagetable);
+			kref_put(&pt->refcount, kgsl_destroy_pagetable_locked);
 		}
 	}
 
@@ -139,12 +154,12 @@
 static struct kgsl_pagetable *
 _get_pt_from_kobj(struct kobject *kobj)
 {
-	unsigned long ptname;
+	unsigned int ptname;
 
 	if (!kobj)
 		return NULL;
 
-	if (sscanf(kobj->name, "%ld", &ptname) != 1)
+	if (kstrtou32(kobj->name, 0, &ptname))
 		return NULL;
 
 	return kgsl_get_pagetable(ptname);
@@ -328,10 +343,11 @@
 		if (kref_get_unless_zero(&pt->refcount)) {
 			if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
 				ptid = (int) pt->name;
-				kref_put(&pt->refcount, kgsl_destroy_pagetable);
+				kref_put(&pt->refcount,
+					kgsl_destroy_pagetable_locked);
 				break;
 			}
-			kref_put(&pt->refcount, kgsl_destroy_pagetable);
+			kref_put(&pt->refcount, kgsl_destroy_pagetable_locked);
 		}
 	}
 	spin_unlock(&kgsl_driver.ptlock);
@@ -356,18 +372,18 @@
 				if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) {
 					ret = 1;
 					kref_put(&pt->refcount,
-						kgsl_destroy_pagetable);
+						kgsl_destroy_pagetable_locked);
 					break;
 				} else {
 					pt->fault_addr =
 						(addr & ~(PAGE_SIZE-1));
 					ret = 0;
 					kref_put(&pt->refcount,
-						kgsl_destroy_pagetable);
+						kgsl_destroy_pagetable_locked);
 					break;
 				}
 			}
-			kref_put(&pt->refcount, kgsl_destroy_pagetable);
+			kref_put(&pt->refcount, kgsl_destroy_pagetable_locked);
 		}
 	}
 	spin_unlock(&kgsl_driver.ptlock);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 9353b2e..fb9bdb1 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -78,7 +78,7 @@
 };
 
 static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
-					  int requested_state);
+					int requested_state);
 static void kgsl_pwrctrl_axi(struct kgsl_device *device, int state);
 static void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state);
 
@@ -883,7 +883,7 @@
 	}
 }
 
-static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
+void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
 					  int requested_state)
 {
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
@@ -1124,8 +1124,7 @@
 					 pwr->pwrlevels[pwr->active_pwrlevel].
 						bus_freq);
 
-	/* Set the CPU latency to 501usec to allow low latency PC modes */
-	pwr->pm_qos_latency = 501;
+	pwr->pm_qos_latency = pdata->pm_qos_latency;
 
 	pm_runtime_enable(device->parentdev);
 
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index d3adf84..c8ea471 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -73,12 +73,12 @@
 _get_priv_from_kobj(struct kobject *kobj)
 {
 	struct kgsl_process_private *private;
-	unsigned long name;
+	unsigned int name;
 
 	if (!kobj)
 		return NULL;
 
-	if (sscanf(kobj->name, "%ld", &name) != 1)
+	if (kstrtou32(kobj->name, 0, &name))
 		return NULL;
 
 	list_for_each_entry(private, &kgsl_driver.process_list, list) {
@@ -255,13 +255,13 @@
 					 const char *buf, size_t count)
 {
 	int ret;
-	unsigned int thresh;
-	ret = sscanf(buf, "%d", &thresh);
-	if (ret != 1)
-		return count;
+	unsigned int thresh = 0;
+
+	ret = kgsl_sysfs_store(buf, &thresh);
+	if (ret)
+		return ret;
 
 	kgsl_driver.full_cache_threshold = thresh;
-
 	return count;
 }
 
diff --git a/drivers/gud/Makefile b/drivers/gud/Makefile
index ef0e083..c415ad8 100644
--- a/drivers/gud/Makefile
+++ b/drivers/gud/Makefile
@@ -3,34 +3,35 @@
 #
 GUD_ROOT_FOLDER := drivers/gud
 # add our modules to kernel.
-obj-$(CONFIG_MOBICORE_API) += mckernelapi.o
-obj-$(CONFIG_MOBICORE_SUPPORT) += mcdrvmodule.o
+obj-$(CONFIG_MOBICORE_API) += mcKernelApi.o
+obj-$(CONFIG_MOBICORE_SUPPORT) += mcDrvModule.o
 
-mcdrvmodule-objs := mobicore_driver/logging.o \
-		mobicore_driver/ops.o \
-		mobicore_driver/mem.o \
-		mobicore_driver/api.o \
-		mobicore_driver/main.o \
-		mobicore_driver/pm.o
+mcDrvModule-objs := MobiCoreDriver/logging.o \
+		MobiCoreDriver/ops.o \
+		MobiCoreDriver/mem.o \
+		MobiCoreDriver/api.o \
+		MobiCoreDriver/pm.o \
+		MobiCoreDriver/main.o
 
-mckernelapi-objs := mobicore_kernelapi/main.o \
-		mobicore_kernelapi/clientlib.o \
-		mobicore_kernelapi/device.o \
-		mobicore_kernelapi/session.o \
-		mobicore_kernelapi/connection.o
+mcKernelApi-objs := MobiCoreKernelApi/main.o \
+		MobiCoreKernelApi/clientlib.o \
+		MobiCoreKernelApi/device.o \
+		MobiCoreKernelApi/session.o \
+		MobiCoreKernelApi/connection.o
 
 # Release mode by default
-ccflags-y := -DNDEBUG -include $(PWD)/$(GUD_ROOT_FOLDER)/mobicore_driver/build_tag.h
+ccflags-y := -DNDEBUG -I$(GUD_ROOT_FOLDER)
 ccflags-y += -Wno-declaration-after-statement
 
 ccflags-$(CONFIG_MOBICORE_DEBUG) += -DDEBUG
 ccflags-$(CONFIG_MOBICORE_VERBOSE) += -DDEBUG_VERBOSE
 
 # Choose one platform from the folder
-MOBICORE_PLATFORM := $(shell (ls -1 $(PWD)/$(GUD_ROOT_FOLDER)/mobicore_driver/platforms | tail -1) )
+MOBICORE_PLATFORM := $(shell (ls -1 $(PWD)/$(GUD_ROOT_FOLDER)/MobiCoreDriver/platforms | tail -1) )
 # Use the available platform folder
-ccflags-y += -I$(GUD_ROOT_FOLDER)/mobicore_driver/platforms/$(MOBICORE_PLATFORM)
-
-
-ccflags-y += -I$(GUD_ROOT_FOLDER)/mobicore_driver/public
-ccflags-y += -I$(GUD_ROOT_FOLDER)/mobicore_kernelapi/include
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver/platforms/$(MOBICORE_PLATFORM)
+# MobiCore Driver includes
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver/public
+# MobiCore KernelApi required incldes
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreKernelApi/include
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreKernelApi/public
diff --git a/drivers/gud/MobiCoreDriver/Makefile b/drivers/gud/MobiCoreDriver/Makefile
new file mode 100644
index 0000000..c17f35e
--- /dev/null
+++ b/drivers/gud/MobiCoreDriver/Makefile
@@ -0,0 +1,26 @@
+#
+# this makefile is called from the kernel make system. Thus we basically
+# add things to "obj-m" here.
+
+ifeq ($(MODE),release)
+    ccflags-y = -O2 -DNDEBUG
+else
+    ccflags-y = -DDEBUG
+endif # DEBUG/RELEASE
+
+# CFLAGS from the build script
+ifdef MOBICORE_CFLAGS
+	ccflags-y += $(MOBICORE_CFLAGS)
+endif
+#EXTRA_CFLAGS+=-DDEBUG_VERBOSE
+
+ccflags-y += -I$(M) -Wall -D__$(PLATFORM)__
+# add our module to kernel.
+obj-m += mcDrvModule.o
+
+mcDrvModule-objs :=logging.o ops.o mem.o api.o pm.o main.o
+
+clean:
+	rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions \
+		Module.markers Module.symvers modules.order
+
diff --git a/drivers/gud/mobicore_driver/api.c b/drivers/gud/MobiCoreDriver/api.c
similarity index 80%
rename from drivers/gud/mobicore_driver/api.c
rename to drivers/gud/MobiCoreDriver/api.c
index b47383a0..e7fa8e2 100644
--- a/drivers/gud/mobicore_driver/api.c
+++ b/drivers/gud/MobiCoreDriver/api.c
@@ -14,23 +14,12 @@
 #include "mem.h"
 #include "debug.h"
 
-
-/*
- * Map a virtual memory buffer structure to Mobicore
- * @param instance
- * @param addr		address of the buffer(NB it must be kernel virtual!)
- * @param len		buffer length
- * @param handle	pointer to handle
- * @param phys_wsm_l2_table	pointer to physical L2 table(?)
- *
- * @return 0 if no error
- *
- */
 int mobicore_map_vmem(struct mc_instance *instance, void *addr,
-	uint32_t len, uint32_t *handle, uint32_t *phys)
+	uint32_t len, uint32_t *handle)
 {
-	return mc_register_wsm_l2(instance, (uint32_t)addr, len,
-		handle, phys);
+	phys_addr_t phys;
+	return mc_register_wsm_mmu(instance, addr, len,
+		handle, &phys);
 }
 EXPORT_SYMBOL(mobicore_map_vmem);
 
@@ -44,7 +33,7 @@
  */
 int mobicore_unmap_vmem(struct mc_instance *instance, uint32_t handle)
 {
-	return mc_unregister_wsm_l2(instance, handle);
+	return mc_unregister_wsm_mmu(instance, handle);
 }
 EXPORT_SYMBOL(mobicore_unmap_vmem);
 
@@ -70,13 +59,11 @@
  * @param requested_size		size of the WSM
  * @param handle		pointer where the handle will be saved
  * @param virt_kernel_addr	pointer for the kernel virtual address
- * @param phys_addr		pointer for the physical address
  *
  * @return error code or 0 for success
  */
 int mobicore_allocate_wsm(struct mc_instance *instance,
-	unsigned long requested_size, uint32_t *handle, void **virt_kernel_addr,
-	void **phys_addr)
+	unsigned long requested_size, uint32_t *handle, void **virt_kernel_addr)
 {
 	struct mc_buffer *buffer = NULL;
 
@@ -85,7 +72,6 @@
 		return -EFAULT;
 
 	*handle = buffer->handle;
-	*phys_addr = buffer->phys;
 	*virt_kernel_addr = buffer->addr;
 	return 0;
 }
@@ -117,3 +103,14 @@
 }
 EXPORT_SYMBOL(mobicore_release);
 
+/*
+ * Test if mobicore can sleep
+ *
+ * @return true if mobicore can sleep, false if it can't sleep
+ */
+bool mobicore_sleep_ready(void)
+{
+	return mc_sleep_ready();
+}
+EXPORT_SYMBOL(mobicore_sleep_ready);
+
diff --git a/drivers/gud/mobicore_driver/arm.h b/drivers/gud/MobiCoreDriver/arm.h
similarity index 100%
rename from drivers/gud/mobicore_driver/arm.h
rename to drivers/gud/MobiCoreDriver/arm.h
diff --git a/drivers/gud/MobiCoreDriver/build.sh b/drivers/gud/MobiCoreDriver/build.sh
new file mode 100644
index 0000000..db8410c
--- /dev/null
+++ b/drivers/gud/MobiCoreDriver/build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+#  source the setup script
+if [ -z $COMP_PATH_ROOT ]; then
+	echo "The build environment is not set!"
+	echo "Trying to source setupDrivers.sh automatically!"
+	source ../setupDrivers.sh || exit 1
+fi
+
+ROOT_PATH=$(dirname $(readlink -f $0))
+#  These folders need to be relative to the kernel dir or absolute!
+PLATFORM=EXYNOS_5410_STD
+CODE_INCLUDE=$(readlink -f $ROOT_PATH/Locals/Code)
+PLATFORM_INCLUDE="$CODE_INCLUDE/platforms/$PLATFORM"
+MOBICORE_DAEMON=$COMP_PATH_MobiCoreDriverLib/Public
+
+MOBICORE_CFLAGS="-I$MOBICORE_DRIVER/Public -I$MOBICORE_DAEMON -I$COMP_PATH_MobiCore/inc/Mci -I$COMP_PATH_MobiCore/inc -I${PLATFORM_INCLUDE}"
+
+# Clean first
+make -C $CODE_INCLUDE clean
+
+make -C $LINUX_PATH \
+	MODE=$MODE \
+	ARCH=arm \
+	CROSS_COMPILE=$CROSS_COMPILE \
+	M=$CODE_INCLUDE \
+	"MOBICORE_CFLAGS=$MOBICORE_CFLAGS" \
+	modules
diff --git a/drivers/gud/mobicore_driver/debug.h b/drivers/gud/MobiCoreDriver/debug.h
similarity index 89%
rename from drivers/gud/mobicore_driver/debug.h
rename to drivers/gud/MobiCoreDriver/debug.h
index 1f9a632..d29efef 100644
--- a/drivers/gud/mobicore_driver/debug.h
+++ b/drivers/gud/MobiCoreDriver/debug.h
@@ -15,7 +15,7 @@
 extern struct device *mcd;
 
 #define MCDRV_DBG_ERROR(dev, txt, ...) \
-	dev_err(dev, "MobiCore %s() ### ERROR: " txt, \
+	dev_err(dev, "MobiCore %s() ### ERROR: " txt "\n", \
 		__func__, \
 		##__VA_ARGS__)
 
@@ -32,12 +32,12 @@
 #endif
 
 #define MCDRV_DBG(dev, txt, ...) \
-	dev_info(dev, "MobiCore %s(): " txt, \
+	dev_info(dev, "MobiCore %s(): " txt "\n", \
 		 __func__, \
 		 ##__VA_ARGS__)
 
 #define MCDRV_DBG_WARN(dev, txt, ...) \
-	dev_warn(dev, "MobiCore %s() WARNING: " txt, \
+	dev_warn(dev, "MobiCore %s() WARNING: " txt "\n", \
 		 __func__, \
 		 ##__VA_ARGS__)
 
diff --git a/drivers/gud/mobicore_driver/fastcall.h b/drivers/gud/MobiCoreDriver/fastcall.h
similarity index 70%
rename from drivers/gud/mobicore_driver/fastcall.h
rename to drivers/gud/MobiCoreDriver/fastcall.h
index 1c90520..33538df 100644
--- a/drivers/gud/mobicore_driver/fastcall.h
+++ b/drivers/gud/MobiCoreDriver/fastcall.h
@@ -36,9 +36,10 @@
  */
 #define MC_FC_INIT		-1
 #define MC_FC_INFO		-2
-#define MC_FC_POWER		-3
-#define MC_FC_DUMP		-4
 #define MC_FC_NWD_TRACE		-31 /* Mem trace setup fastcall */
+#ifdef TBASE_CORE_SWITCHER
+#define MC_FC_SWITCH_CORE   0x84000005
+#endif
 
 
 /*
@@ -96,6 +97,23 @@
 	} as_out;
 };
 
+#ifdef TBASE_CORE_SWITCHER
+/* fast call switch Core parameters */
+union mc_fc_swich_core {
+	union fc_generic as_generic;
+	struct {
+		uint32_t cmd;
+		uint32_t core_id;
+		uint32_t rfu[2];
+	} as_in;
+	struct {
+		uint32_t resp;
+		uint32_t ret;
+		uint32_t state;
+		uint32_t ext_info;
+	} as_out;
+};
+#endif
 /*
  * _smc() - fast call to MobiCore
  *
@@ -104,23 +122,22 @@
 static inline long _smc(void *data)
 {
 	int ret = 0;
-	union fc_generic fc_generic;
 
 	if (data == NULL)
 		return -EPERM;
 
 #ifdef MC_SMC_FASTCALL
 	{
-		ret = smc_fastcall(data, sizeof(fc_generic));
+		ret = smc_fastcall(data, sizeof(union fc_generic));
 	}
 #else
-	memcpy(&fc_generic, data, sizeof(union fc_generic));
 	{
-		/* SVC expect values in r0-r3 */
-		register u32 reg0 __asm__("r0") = fc_generic.as_in.cmd;
-		register u32 reg1 __asm__("r1") = fc_generic.as_in.param[0];
-		register u32 reg2 __asm__("r2") = fc_generic.as_in.param[1];
-		register u32 reg3 __asm__("r3") = fc_generic.as_in.param[2];
+		union fc_generic *fc_generic = data;
+		/* SMC expect values in r0-r3 */
+		register u32 reg0 __asm__("r0") = fc_generic->as_in.cmd;
+		register u32 reg1 __asm__("r1") = fc_generic->as_in.param[0];
+		register u32 reg2 __asm__("r2") = fc_generic->as_in.param[1];
+		register u32 reg3 __asm__("r3") = fc_generic->as_in.param[2];
 
 		__asm__ volatile (
 #ifdef MC_ARCH_EXTENSION_SEC
@@ -131,13 +148,23 @@
 			"smc 0\n"
 			: "+r"(reg0), "+r"(reg1), "+r"(reg2), "+r"(reg3)
 		);
+#ifdef __ARM_VE_A9X4_QEMU__
+		/* Qemu does not return to the address following the SMC
+		   instruction so we have to insert several nop instructions to
+		   workaround this Qemu bug. */
+		__asm__ volatile (
+		    "nop\n"
+		    "nop\n"
+		    "nop\n"
+		    "nop"
+		 );
+#endif
 
 		/* set response */
-		fc_generic.as_out.resp     = reg0;
-		fc_generic.as_out.ret      = reg1;
-		fc_generic.as_out.param[0] = reg2;
-		fc_generic.as_out.param[1] = reg3;
-		memcpy(data, &fc_generic, sizeof(union fc_generic));
+		fc_generic->as_out.resp     = reg0;
+		fc_generic->as_out.ret      = reg1;
+		fc_generic->as_out.param[0] = reg2;
+		fc_generic->as_out.param[1] = reg3;
 	}
 #endif
 	return ret;
diff --git a/drivers/gud/mobicore_driver/logging.c b/drivers/gud/MobiCoreDriver/logging.c
similarity index 95%
rename from drivers/gud/mobicore_driver/logging.c
rename to drivers/gud/MobiCoreDriver/logging.c
index 1f599f9..507c4ed 100644
--- a/drivers/gud/mobicore_driver/logging.c
+++ b/drivers/gud/MobiCoreDriver/logging.c
@@ -251,7 +251,7 @@
  */
 long mobicore_log_setup(void)
 {
-	unsigned long phys_log_buf;
+	phys_addr_t phys_log_buf;
 	union fc_generic fc_log;
 	struct sched_param param = { .sched_priority = 1 };
 
@@ -300,11 +300,12 @@
 
 	memset(&fc_log, 0, sizeof(fc_log));
 	fc_log.as_in.cmd = MC_FC_NWD_TRACE;
-	fc_log.as_in.param[0] = phys_log_buf;
-	fc_log.as_in.param[1] = log_size;
+	fc_log.as_in.param[0] = (uint32_t)phys_log_buf;
+	fc_log.as_in.param[1] = (uint32_t)(((uint64_t)phys_log_buf) >> 32);
+	fc_log.as_in.param[2] = log_size;
 
-	MCDRV_DBG(mcd, "fc_log virt=%p phys=%p ",
-		  log_buf, (void *)phys_log_buf);
+	MCDRV_DBG(mcd, "fc_log virt=%p phys=0x%llX",
+		  log_buf, (u64)phys_log_buf);
 	mc_fastcall(&fc_log);
 	MCDRV_DBG(mcd, "fc_log out ret=0x%08x", fc_log.as_out.ret);
 
@@ -319,7 +320,7 @@
 
 	set_task_state(log_thread, TASK_INTERRUPTIBLE);
 
-	MCDRV_DBG(mcd, "fc_log Logger version %u\n", log_buf->version);
+	MCDRV_DBG(mcd, "fc_log Logger version %u", log_buf->version);
 	return 0;
 
 err_stop_kthread:
diff --git a/drivers/gud/mobicore_driver/logging.h b/drivers/gud/MobiCoreDriver/logging.h
similarity index 100%
rename from drivers/gud/mobicore_driver/logging.h
rename to drivers/gud/MobiCoreDriver/logging.h
diff --git a/drivers/gud/mobicore_driver/main.c b/drivers/gud/MobiCoreDriver/main.c
similarity index 81%
rename from drivers/gud/mobicore_driver/main.c
rename to drivers/gud/MobiCoreDriver/main.c
index 0451452..ed2928a 100644
--- a/drivers/gud/mobicore_driver/main.c
+++ b/drivers/gud/MobiCoreDriver/main.c
@@ -112,12 +112,12 @@
 	int i;
 	struct page *page = virt_to_page(addr);
 	for (i = 0; i < (1<<order); i++) {
-		MCDRV_DBG_VERBOSE(mcd, "free page at 0x%p\n", page);
-		ClearPageReserved(page);
+		MCDRV_DBG_VERBOSE(mcd, "free page at 0x%p", page);
+		clear_bit(PG_reserved, &page->flags);
 		page++;
 	}
 
-	MCDRV_DBG_VERBOSE(mcd, "freeing addr:%p, order:%x\n", addr, order);
+	MCDRV_DBG_VERBOSE(mcd, "freeing addr:%p, order:%x", addr, order);
 	free_pages((unsigned long)addr, order);
 }
 
@@ -131,8 +131,9 @@
 		return -EINVAL;
 
 	MCDRV_DBG_VERBOSE(mcd,
-			  "handle=%u phys_addr=0x%p, virt_addr=0x%p len=%u\n",
-		  buffer->handle, buffer->phys, buffer->addr, buffer->len);
+			  "handle=%u phys_addr=0x%llx, virt_addr=0x%p len=%u",
+		  buffer->handle, (u64)buffer->phys,
+		  buffer->addr, buffer->len);
 
 	if (!atomic_dec_and_test(&buffer->usage)) {
 		MCDRV_DBG_VERBOSE(mcd, "Could not free %u", buffer->handle);
@@ -147,7 +148,7 @@
 }
 
 static uint32_t mc_find_cont_wsm_addr(struct mc_instance *instance, void *uaddr,
-	uint32_t *addr, uint32_t len)
+	void **addr, uint32_t len)
 {
 	int ret = 0;
 	struct mc_buffer *buffer;
@@ -162,7 +163,7 @@
 	/* search for the given handle in the buffers list */
 	list_for_each_entry(buffer, &ctx.cont_bufs, list) {
 		if (buffer->uaddr == uaddr && buffer->len == len) {
-			*addr = (uint32_t)buffer->addr;
+			*addr = buffer->addr;
 			goto found;
 		}
 	}
@@ -186,7 +187,7 @@
 	struct task_struct *peer = NULL;
 	bool ret = false;
 
-	MCDRV_DBG(mcd, "Finding wsm for fd = %d\n", fd);
+	MCDRV_DBG_VERBOSE(mcd, "Finding wsm for fd = %d", fd);
 	if (!instance)
 		return false;
 
@@ -197,7 +198,7 @@
 	s = __get_socket(fp);
 	if (s) {
 		peer = get_pid_task(s->sk_peer_pid, PIDTYPE_PID);
-		MCDRV_DBG(mcd, "Found pid for fd %d\n", peer->pid);
+		MCDRV_DBG_VERBOSE(mcd, "Found pid for fd %d", peer->pid);
 	}
 	if (peer) {
 		task_lock(peer);
@@ -209,11 +210,10 @@
 			if (!fp)
 				continue;
 			if (fp->private_data == instance) {
-				MCDRV_DBG(mcd, "Found owner!");
+				MCDRV_DBG_VERBOSE(mcd, "Found owner!");
 				ret = true;
 				goto out;
 			}
-
 		}
 	} else {
 		MCDRV_DBG(mcd, "Owner not found!");
@@ -230,7 +230,7 @@
 #endif
 }
 static uint32_t mc_find_cont_wsm(struct mc_instance *instance, uint32_t handle,
-	int32_t fd, uint32_t *phys, uint32_t *len)
+	int32_t fd, phys_addr_t *phys, uint32_t *len)
 {
 	int ret = 0;
 	struct mc_buffer *buffer;
@@ -239,7 +239,7 @@
 		return -EFAULT;
 
 	if (WARN_ON(!is_daemon(instance))) {
-		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
 		return -EPERM;
 	}
 
@@ -251,7 +251,7 @@
 	list_for_each_entry(buffer, &ctx.cont_bufs, list) {
 		if (buffer->handle == handle) {
 			if (mc_check_owner_fd(buffer->instance, fd)) {
-				*phys = (uint32_t)buffer->phys;
+				*phys = buffer->phys;
 				*len = buffer->len;
 				goto found;
 			} else {
@@ -326,7 +326,7 @@
 			/* Something is not right if we end up here, better not
 			 * clean the buffer so we just leak memory instead of
 			 * creating security issues */
-			MCDRV_DBG_ERROR(mcd, "Memory can't be unmapped\n");
+			MCDRV_DBG_ERROR(mcd, "Memory can't be unmapped");
 			return -EINVAL;
 		}
 	}
@@ -370,7 +370,7 @@
 {
 	struct mc_buffer *cbuffer = NULL;
 	void *addr = 0;
-	void *phys = 0;
+	phys_addr_t phys = 0;
 	unsigned int order;
 	unsigned long allocated_size;
 	int ret = 0;
@@ -379,13 +379,13 @@
 		return -EFAULT;
 
 	if (len == 0) {
-		MCDRV_DBG_WARN(mcd, "cannot allocate size 0\n");
+		MCDRV_DBG_WARN(mcd, "cannot allocate size 0");
 		return -ENOMEM;
 	}
 
 	order = get_order(len);
 	if (order > MAX_ORDER) {
-		MCDRV_DBG_WARN(mcd, "Buffer size too large\n");
+		MCDRV_DBG_WARN(mcd, "Buffer size too large");
 		return -ENOMEM;
 	}
 	allocated_size = (1 << order) * PAGE_SIZE;
@@ -398,23 +398,23 @@
 
 	if (cbuffer == NULL) {
 		MCDRV_DBG_WARN(mcd,
-			       "MMAP_WSM request: could not allocate buffer\n");
+			       "MMAP_WSM request: could not allocate buffer");
 		ret = -ENOMEM;
 		goto unlock_instance;
 	}
 	mutex_lock(&ctx.bufs_lock);
 
-	MCDRV_DBG_VERBOSE(mcd, "size %ld -> order %d --> %ld (2^n pages)\n",
+	MCDRV_DBG_VERBOSE(mcd, "size %ld -> order %d --> %ld (2^n pages)",
 			  len, order, allocated_size);
 
 	addr = (void *)__get_free_pages(GFP_USER | __GFP_ZERO, order);
 
 	if (addr == NULL) {
-		MCDRV_DBG_WARN(mcd, "get_free_pages failed\n");
+		MCDRV_DBG_WARN(mcd, "get_free_pages failed");
 		ret = -ENOMEM;
 		goto err;
 	}
-	phys = (void *)virt_to_phys(addr);
+	phys = virt_to_phys(addr);
 	cbuffer->handle = get_unique_id();
 	cbuffer->phys = phys;
 	cbuffer->addr = addr;
@@ -429,9 +429,11 @@
 	list_add(&cbuffer->list, &ctx.cont_bufs);
 
 	MCDRV_DBG_VERBOSE(mcd,
-			  "allocated phys=0x%p - 0x%p, size=%ld, kvirt=0x%p, h=%d\n",
-		  phys, (void *)((unsigned int)phys+allocated_size),
-		  allocated_size, addr, cbuffer->handle);
+			  "allocated phys=0x%llx - 0x%llx, size=%ld, kvirt=0x%p"
+			  ", h=%d",
+			  (u64)phys,
+			  (u64)(phys+allocated_size),
+			  allocated_size, addr, cbuffer->handle);
 	*buffer = cbuffer;
 	goto unlock;
 
@@ -457,7 +459,7 @@
 		return -EFAULT;
 
 	if (WARN_ON(!is_daemon(instance))) {
-		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
 		return -EPERM;
 	}
 
@@ -476,7 +478,7 @@
 	return ret;
 }
 
-void *get_mci_base_phys(unsigned int len)
+static phys_addr_t get_mci_base_phys(unsigned int len)
 {
 	if (ctx.mci_base.phys) {
 		return ctx.mci_base.phys;
@@ -487,45 +489,45 @@
 		ctx.mci_base.addr =
 			(void *)__get_free_pages(GFP_USER | __GFP_ZERO, order);
 		if (ctx.mci_base.addr == NULL) {
-			MCDRV_DBG_WARN(mcd, "get_free_pages failed\n");
+			MCDRV_DBG_WARN(mcd, "get_free_pages failed");
 			memset(&ctx.mci_base, 0, sizeof(ctx.mci_base));
-			return NULL;
+			return 0;
 		}
-		ctx.mci_base.phys = (void *)virt_to_phys(ctx.mci_base.addr);
+		ctx.mci_base.phys = virt_to_phys(ctx.mci_base.addr);
 		return ctx.mci_base.phys;
 	}
 }
 
 /*
- * Create a l2 table from a virtual memory buffer which can be vmalloc
+ * Create a MMU table from a virtual memory buffer which can be vmalloc
  * or user space virtual memory
  */
-int mc_register_wsm_l2(struct mc_instance *instance,
-	uint32_t buffer, uint32_t len,
-	uint32_t *handle, uint32_t *phys)
+int mc_register_wsm_mmu(struct mc_instance *instance,
+	void *buffer, uint32_t len,
+	uint32_t *handle, phys_addr_t *phys)
 {
 	int ret = 0;
-	struct mc_l2_table *table = NULL;
+	struct mc_mmu_table *table = NULL;
 	struct task_struct *task = current;
-	uint32_t kbuff = 0x0;
+	void *kbuff = NULL;
 
 	if (WARN(!instance, "No instance data available"))
 		return -EFAULT;
 
 	if (len == 0) {
-		MCDRV_DBG_ERROR(mcd, "len=0 is not supported!\n");
+		MCDRV_DBG_ERROR(mcd, "len=0 is not supported!");
 		return -EINVAL;
 	}
 
-	MCDRV_DBG_VERBOSE(mcd, "buffer: %p, len=%08x\n", (void *)buffer, len);
+	MCDRV_DBG_VERBOSE(mcd, "buffer: %p, len=%08x", buffer, len);
 
-	if (!mc_find_cont_wsm_addr(instance, (void *)buffer, &kbuff, len))
-		table = mc_alloc_l2_table(instance, NULL, (void *)kbuff, len);
+	if (!mc_find_cont_wsm_addr(instance, buffer, &kbuff, len))
+		table = mc_alloc_mmu_table(instance, NULL, kbuff, len);
 	else
-		table = mc_alloc_l2_table(instance, task, (void *)buffer, len);
+		table = mc_alloc_mmu_table(instance, task, buffer, len);
 
 	if (IS_ERR(table)) {
-		MCDRV_DBG_ERROR(mcd, "new_used_l2_table() failed\n");
+		MCDRV_DBG_ERROR(mcd, "mc_alloc_mmu_table() failed");
 		return -EINVAL;
 	}
 
@@ -533,19 +535,19 @@
 	*handle = table->handle;
 	/* WARNING: daemon shouldn't know this either, but live with it */
 	if (is_daemon(instance))
-		*phys = (uint32_t)table->phys;
+		*phys = table->phys;
 	else
 		*phys = 0;
 
-	MCDRV_DBG_VERBOSE(mcd, "handle: %d, phys=%p\n",
-			  *handle, (void *)*phys);
+	MCDRV_DBG_VERBOSE(mcd, "handle: %d, phys=0x%llX",
+			  *handle, (u64)(*phys));
 
-	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
+	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
 
 	return ret;
 }
 
-int mc_unregister_wsm_l2(struct mc_instance *instance, uint32_t handle)
+int mc_unregister_wsm_mmu(struct mc_instance *instance, uint32_t handle)
 {
 	int ret = 0;
 
@@ -553,11 +555,11 @@
 		return -EFAULT;
 
 	/* free table (if no further locks exist) */
-	mc_free_l2_table(instance, handle);
+	mc_free_mmu_table(instance, handle);
 
 	return ret;
 }
-/* Lock the object from handle, it could be a WSM l2 table or a cont buffer! */
+/* Lock the object from handle, it could be a WSM MMU table or a cont buffer! */
 static int mc_lock_handle(struct mc_instance *instance, uint32_t handle)
 {
 	int ret = 0;
@@ -566,14 +568,14 @@
 		return -EFAULT;
 
 	if (WARN_ON(!is_daemon(instance))) {
-		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
 		return -EPERM;
 	}
 
 	mutex_lock(&instance->lock);
-	ret = mc_lock_l2_table(instance, handle);
+	ret = mc_lock_mmu_table(instance, handle);
 
-	/* Handle was not a l2 table but a cont buffer */
+	/* Handle was not a MMU table but a cont buffer */
 	if (ret == -EINVAL) {
 		/* Call the non locking variant! */
 		ret = __lock_buffer(instance, handle);
@@ -592,14 +594,14 @@
 		return -EFAULT;
 
 	if (WARN_ON(!is_daemon(instance))) {
-		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
 		return -EPERM;
 	}
 
 	mutex_lock(&instance->lock);
-	ret = mc_free_l2_table(instance, handle);
+	ret = mc_free_mmu_table(instance, handle);
 
-	/* Not a l2 table, then it must be a buffer */
+	/* Not a MMU table, then it must be a buffer */
 	if (ret == -EINVAL) {
 		/* Call the non locking variant! */
 		ret = __free_buffer(instance, handle, true);
@@ -609,35 +611,31 @@
 	return ret;
 }
 
-static uint32_t mc_find_wsm_l2(struct mc_instance *instance,
+static phys_addr_t mc_find_wsm_mmu(struct mc_instance *instance,
 	uint32_t handle, int32_t fd)
 {
-	uint32_t ret = 0;
-
 	if (WARN(!instance, "No instance data available"))
 		return 0;
 
 	if (WARN_ON(!is_daemon(instance))) {
-		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
 		return 0;
 	}
 
-	ret = mc_find_l2_table(handle, fd);
-
-	return ret;
+	return mc_find_mmu_table(handle, fd);
 }
 
-static int mc_clean_wsm_l2(struct mc_instance *instance)
+static int mc_clean_wsm_mmu(struct mc_instance *instance)
 {
 	if (WARN(!instance, "No instance data available"))
 		return -EFAULT;
 
 	if (WARN_ON(!is_daemon(instance))) {
-		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
 		return -EPERM;
 	}
 
-	mc_clean_l2_tables();
+	mc_clean_mmu_tables();
 
 	return 0;
 }
@@ -646,19 +644,20 @@
 {
 	struct mc_instance *instance = get_instance(file);
 	unsigned long len = vmarea->vm_end - vmarea->vm_start;
-	void *paddr = (void *)(vmarea->vm_pgoff << PAGE_SHIFT);
+	phys_addr_t paddr = (vmarea->vm_pgoff << PAGE_SHIFT);
 	unsigned int pfn;
 	struct mc_buffer *buffer = 0;
 	int ret = 0;
 
-	MCDRV_DBG_VERBOSE(mcd, "enter (vma start=0x%p, size=%ld, mci=%p)\n",
-			  (void *)vmarea->vm_start, len, ctx.mci_base.phys);
+	MCDRV_DBG_VERBOSE(mcd, "enter (vma start=0x%p, size=%ld, mci=0x%llX)",
+			  (void *)vmarea->vm_start, len,
+			  (u64)ctx.mci_base.phys);
 
 	if (WARN(!instance, "No instance data available"))
 		return -EFAULT;
 
 	if (len == 0) {
-		MCDRV_DBG_ERROR(mcd, "cannot allocate size 0\n");
+		MCDRV_DBG_ERROR(mcd, "cannot allocate size 0");
 		return -ENOMEM;
 	}
 	if (paddr) {
@@ -722,7 +721,7 @@
 			vmarea->vm_page_prot);
 	}
 
-	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
+	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
 
 	return ret;
 }
@@ -768,21 +767,24 @@
 
 	case MC_IO_REG_WSM:{
 		struct mc_ioctl_reg_wsm reg;
+		phys_addr_t phys;
 		if (copy_from_user(&reg, uarg, sizeof(reg)))
 			return -EFAULT;
 
-		ret = mc_register_wsm_l2(instance, reg.buffer,
-			reg.len, &reg.handle, &reg.table_phys);
+		ret = mc_register_wsm_mmu(instance, (void *)reg.buffer,
+			reg.len, &reg.handle, &phys);
+		reg.table_phys = phys;
+
 		if (!ret) {
 			if (copy_to_user(uarg, &reg, sizeof(reg))) {
 				ret = -EFAULT;
-				mc_unregister_wsm_l2(instance, reg.handle);
+				mc_unregister_wsm_mmu(instance, reg.handle);
 			}
 		}
 		break;
 	}
 	case MC_IO_UNREG_WSM:
-		ret = mc_unregister_wsm_l2(instance, (uint32_t)arg);
+		ret = mc_unregister_wsm_mmu(instance, (uint32_t)arg);
 		break;
 
 	case MC_IO_VERSION:
@@ -803,7 +805,7 @@
 			return -EFAULT;
 
 		map.handle = buffer->handle;
-		map.phys_addr = (unsigned long)buffer->phys;
+		map.phys_addr = buffer->phys;
 		map.reused = 0;
 		if (copy_to_user(uarg, &map, sizeof(map)))
 			ret = -EFAULT;
@@ -812,7 +814,7 @@
 		break;
 	}
 	default:
-		MCDRV_DBG_ERROR(mcd, "unsupported cmd=%d\n", cmd);
+		MCDRV_DBG_ERROR(mcd, "unsupported cmd=0x%x", cmd);
 		ret = -ENOIOCTLCMD;
 		break;
 
@@ -836,7 +838,7 @@
 		return -EFAULT;
 
 	if (WARN_ON(!is_daemon(instance))) {
-		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
 		return -EPERM;
 	}
 
@@ -856,8 +858,8 @@
 			return -EFAULT;
 
 		ctx.mcp = ctx.mci_base.addr + init.mcp_offset;
-		ret = mc_init((uint32_t)ctx.mci_base.phys, init.nq_offset,
-			init.nq_length, init.mcp_offset, init.mcp_length);
+		ret = mc_init(ctx.mci_base.phys, init.nq_length,
+			init.mcp_offset, init.mcp_length);
 		break;
 	}
 	case MC_IO_INFO: {
@@ -890,14 +892,14 @@
 		ret = mc_unlock_handle(instance, (uint32_t)arg);
 		break;
 	case MC_IO_CLEAN_WSM:
-		ret = mc_clean_wsm_l2(instance);
+		ret = mc_clean_wsm_mmu(instance);
 		break;
 	case MC_IO_RESOLVE_WSM: {
-		uint32_t phys;
+		phys_addr_t phys;
 		struct mc_ioctl_resolv_wsm wsm;
 		if (copy_from_user(&wsm, uarg, sizeof(wsm)))
 			return -EFAULT;
-		phys = mc_find_wsm_l2(instance, wsm.handle, wsm.fd);
+		phys = mc_find_wsm_mmu(instance, wsm.handle, wsm.fd);
 		if (!phys)
 			return -EINVAL;
 
@@ -909,7 +911,8 @@
 	}
 	case MC_IO_RESOLVE_CONT_WSM: {
 		struct mc_ioctl_resolv_cont_wsm cont_wsm;
-		uint32_t phys = 0, len = 0;
+		phys_addr_t phys = 0;
+		uint32_t len = 0;
 		if (copy_from_user(&cont_wsm, uarg, sizeof(cont_wsm)))
 			return -EFAULT;
 		ret = mc_find_cont_wsm(instance, cont_wsm.handle, cont_wsm.fd,
@@ -928,7 +931,7 @@
 			return -EFAULT;
 
 		map.reused = (ctx.mci_base.phys != 0);
-		map.phys_addr = (unsigned long)get_mci_base_phys(map.len);
+		map.phys_addr = get_mci_base_phys(map.len);
 		if (!map.phys_addr) {
 			MCDRV_DBG_ERROR(mcd, "Failed to setup MCI buffer!");
 			return -EFAULT;
@@ -939,10 +942,6 @@
 		ret = 0;
 		break;
 	}
-	case MC_IO_MAP_PWSM:{
-		break;
-	}
-
 	case MC_IO_LOG_SETUP: {
 #ifdef MC_MEM_TRACES
 		ret = mobicore_log_setup();
@@ -985,27 +984,27 @@
 		return -EFAULT;
 
 	/* avoid debug output on non-error, because this is call quite often */
-	MCDRV_DBG_VERBOSE(mcd, "enter\n");
+	MCDRV_DBG_VERBOSE(mcd, "enter");
 
 	/* only the MobiCore Daemon is allowed to call this function */
 	if (WARN_ON(!is_daemon(instance))) {
-		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
 		return -EPERM;
 	}
 
 	if (buffer_len < sizeof(unsigned int)) {
-		MCDRV_DBG_ERROR(mcd, "invalid length\n");
+		MCDRV_DBG_ERROR(mcd, "invalid length");
 		return -EINVAL;
 	}
 
 	for (;;) {
 		if (wait_for_completion_interruptible(&ctx.isr_comp)) {
-			MCDRV_DBG_VERBOSE(mcd, "read interrupted\n");
+			MCDRV_DBG_VERBOSE(mcd, "read interrupted");
 			return -ERESTARTSYS;
 		}
 
 		ssiq_counter = atomic_read(&ctx.isr_counter);
-		MCDRV_DBG_VERBOSE(mcd, "ssiq_counter=%i, ctx.counter=%i\n",
+		MCDRV_DBG_VERBOSE(mcd, "ssiq_counter=%i, ctx.counter=%i",
 				  ssiq_counter, ctx.evt_counter);
 
 		if (ssiq_counter != ctx.evt_counter) {
@@ -1017,12 +1016,12 @@
 
 		/* end loop if non-blocking */
 		if (file->f_flags & O_NONBLOCK) {
-			MCDRV_DBG_ERROR(mcd, "non-blocking read\n");
+			MCDRV_DBG_ERROR(mcd, "non-blocking read");
 			return -EAGAIN;
 		}
 
 		if (signal_pending(current)) {
-			MCDRV_DBG_VERBOSE(mcd, "received signal.\n");
+			MCDRV_DBG_VERBOSE(mcd, "received signal.");
 			return -ERESTARTSYS;
 		}
 	}
@@ -1031,7 +1030,7 @@
 	ret = copy_to_user(buffer, &ctx.evt_counter, sizeof(unsigned int));
 
 	if (ret != 0) {
-		MCDRV_DBG_ERROR(mcd, "copy_to_user failed\n");
+		MCDRV_DBG_ERROR(mcd, "copy_to_user failed");
 		return -EFAULT;
 	}
 
@@ -1061,6 +1060,43 @@
 	return instance;
 }
 
+#if defined(TBASE_CORE_SWITCHER) && defined(DEBUG)
+static ssize_t mc_fd_write(struct file *file, const char __user *buffer,
+			size_t buffer_len, loff_t *x)
+{
+	uint32_t cpu_new;
+	/* we only consider one digit */
+	char buf[2];
+	struct mc_instance *instance = get_instance(file);
+
+	if (WARN(!instance, "No instance data available"))
+		return -EFAULT;
+
+	/* Invalid data, nothing to do */
+	if (buffer_len < 1)
+		return -EINVAL;
+
+	/* Invalid data, nothing to do */
+	if (copy_from_user(buf, buffer, min(sizeof(buf), buffer_len)))
+		return -EFAULT;
+
+	if (buf[0] == 'n') {
+		mc_nsiq();
+	/* If it's a digit then switch cores */
+	} else if ((buf[0] >= '0') && (buf[0] <= '9')) {
+		cpu_new = buf[0] - '0';
+		if (cpu_new <= 8) {
+			MCDRV_DBG_VERBOSE(mcd, "Set Active Cpu: %d\n", cpu_new);
+			mc_switch_core(cpu_new);
+		}
+	} else {
+		return -EINVAL;
+	}
+
+	return buffer_len;
+}
+#endif
+
 /*
  * Release a mobicore instance object and all objects related to it
  * @instance:	instance
@@ -1074,7 +1110,7 @@
 		return -EFAULT;
 
 	mutex_lock(&instance->lock);
-	mc_clear_l2_tables(instance);
+	mc_clear_mmu_tables(instance);
 
 	mutex_lock(&ctx.bufs_lock);
 	/* release all mapped data */
@@ -1112,7 +1148,7 @@
 {
 	struct mc_instance *instance;
 
-	MCDRV_DBG_VERBOSE(mcd, "enter\n");
+	MCDRV_DBG_VERBOSE(mcd, "enter");
 
 	instance = mc_alloc_instance();
 	if (instance == NULL)
@@ -1141,7 +1177,7 @@
 		return -ENOMEM;
 	instance = get_instance(file);
 
-	MCDRV_DBG(mcd, "accept this as MobiCore Daemon\n");
+	MCDRV_DBG(mcd, "accept this as MobiCore Daemon");
 
 	ctx.daemon_inst = instance;
 	ctx.daemon = current;
@@ -1172,7 +1208,7 @@
 
 	/* check if daemon closes us. */
 	if (is_daemon(instance)) {
-		MCDRV_DBG_WARN(mcd, "WARNING: MobiCore Daemon died\n");
+		MCDRV_DBG_WARN(mcd, "MobiCore Daemon died");
 		ctx.daemon_inst = NULL;
 		ctx.daemon = NULL;
 	}
@@ -1183,7 +1219,7 @@
 	 * ret is quite irrelevant here as most apps don't care about the
 	 * return value from close() and it's quite difficult to recover
 	 */
-	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
+	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
 
 	return (int)ret;
 }
@@ -1200,7 +1236,9 @@
 
 	/* signal the daemon */
 	complete(&ctx.isr_comp);
-
+#ifdef MC_MEM_TRACES
+	mobicore_log_read();
+#endif
 	return IRQ_HANDLED;
 }
 
@@ -1221,6 +1259,9 @@
 	.release	= mc_fd_release,
 	.unlocked_ioctl	= mc_fd_user_ioctl,
 	.mmap		= mc_fd_mmap,
+#if defined(TBASE_CORE_SWITCHER) && defined(DEBUG)
+	.write          = mc_fd_write,
+#endif
 };
 
 static int create_devices(void)
@@ -1239,17 +1280,17 @@
 
 	ret = alloc_chrdev_region(&mc_dev_admin, 0, MC_DEV_MAX, "mobicore");
 	if (ret < 0) {
-		MCDRV_DBG_ERROR(mcd, "failed to allocate char dev region\n");
+		MCDRV_DBG_ERROR(mcd, "failed to allocate char dev region");
 		goto error;
 	}
 	mc_dev_user = MKDEV(MAJOR(mc_dev_admin), 1);
 
-	MCDRV_DBG_VERBOSE(mcd, "%s: dev %d", "mobicore", MAJOR(mc_dev_region));
+	MCDRV_DBG_VERBOSE(mcd, "%s: dev %d", "mobicore", MAJOR(mc_dev_admin));
 
 	/* First the ADMIN node */
 	ret = cdev_add(&mc_admin_cdev,  mc_dev_admin, 1);
 	if (ret != 0) {
-		MCDRV_DBG_ERROR(mcd, "admin device register failed\n");
+		MCDRV_DBG_ERROR(mcd, "admin device register failed");
 		goto error;
 	}
 	mc_admin_cdev.owner = THIS_MODULE;
@@ -1260,7 +1301,7 @@
 
 	ret = cdev_add(&mc_user_cdev, mc_dev_user, 1);
 	if (ret != 0) {
-		MCDRV_DBG_ERROR(mcd, "user device register failed\n");
+		MCDRV_DBG_ERROR(mcd, "user device register failed");
 		goto error_unregister;
 	}
 	mc_user_cdev.owner = THIS_MODULE;
@@ -1301,13 +1342,13 @@
 	/* Hardware does not support ARM TrustZone -> Cannot continue! */
 	if (!has_security_extensions()) {
 		MCDRV_DBG_ERROR(mcd,
-				"Hardware doesn't support ARM TrustZone!\n");
+				"Hardware doesn't support ARM TrustZone!");
 		return -ENODEV;
 	}
 
 	/* Running in secure mode -> Cannot load the driver! */
 	if (is_secure_mode()) {
-		MCDRV_DBG_ERROR(mcd, "Running in secure MODE!\n");
+		MCDRV_DBG_ERROR(mcd, "Running in secure MODE!");
 		return -ENODEV;
 	}
 
@@ -1320,18 +1361,18 @@
 	/* initialize event counter for signaling of an IRQ to zero */
 	atomic_set(&ctx.isr_counter, 0);
 
-	/* set up S-SIQ interrupt handler */
+	/* set up S-SIQ interrupt handler ************************/
 	ret = request_irq(MC_INTR_SSIQ, mc_ssiq_isr, IRQF_TRIGGER_RISING,
 			MC_ADMIN_DEVNODE, &ctx);
 	if (ret != 0) {
-		MCDRV_DBG_ERROR(mcd, "interrupt request failed\n");
+		MCDRV_DBG_ERROR(mcd, "interrupt request failed");
 		goto err_req_irq;
 	}
 
 #ifdef MC_PM_RUNTIME
 	ret = mc_pm_initialize(&ctx);
 	if (ret != 0) {
-		MCDRV_DBG_ERROR(mcd, "Power Management init failed!\n");
+		MCDRV_DBG_ERROR(mcd, "Power Management init failed!");
 		goto free_isr;
 	}
 #endif
@@ -1340,7 +1381,7 @@
 	if (ret != 0)
 		goto free_pm;
 
-	ret = mc_init_l2_tables();
+	ret = mc_init_mmu_tables();
 
 #ifdef MC_CRYPTO_CLOCK_MANAGEMENT
 	ret = mc_pm_clock_initialize();
@@ -1361,7 +1402,7 @@
 	mutex_init(&ctx.bufs_lock);
 
 	memset(&ctx.mci_base, 0, sizeof(ctx.mci_base));
-	MCDRV_DBG(mcd, "initialized\n");
+	MCDRV_DBG(mcd, "initialized");
 	return 0;
 
 free_pm:
@@ -1381,12 +1422,12 @@
  */
 static void __exit mobicore_exit(void)
 {
-	MCDRV_DBG_VERBOSE(mcd, "enter\n");
+	MCDRV_DBG_VERBOSE(mcd, "enter");
 #ifdef MC_MEM_TRACES
 	mobicore_log_free();
 #endif
 
-	mc_release_l2_tables();
+	mc_release_mmu_tables();
 
 #ifdef MC_PM_RUNTIME
 	mc_pm_free();
@@ -1408,6 +1449,15 @@
 	MCDRV_DBG_VERBOSE(mcd, "exit");
 }
 
+bool mc_sleep_ready(void)
+{
+#ifdef MC_PM_RUNTIME
+	return mc_pm_sleep_ready();
+#else
+	return true;
+#endif
+}
+
 /* Linux Driver Module Macros */
 module_init(mobicore_init);
 module_exit(mobicore_exit);
diff --git a/drivers/gud/mobicore_driver/main.h b/drivers/gud/MobiCoreDriver/main.h
similarity index 89%
rename from drivers/gud/mobicore_driver/main.h
rename to drivers/gud/MobiCoreDriver/main.h
index 871191e..11e304c 100644
--- a/drivers/gud/mobicore_driver/main.h
+++ b/drivers/gud/MobiCoreDriver/main.h
@@ -52,7 +52,7 @@
 	/* virtual Userspace start address */
 	void			*uaddr;
 	/* physical start address */
-	void			*phys;
+	phys_addr_t		phys;
 	/* order of number of pages */
 	unsigned int		order;
 	uint32_t		len;
@@ -83,8 +83,8 @@
 };
 
 struct mc_sleep_mode {
-	uint16_t	SleepReq;
-	uint16_t	ReadyToSleep;
+	uint16_t	sleep_req;
+	uint16_t	ready_to_sleep;
 };
 
 /* MobiCore is idle. No scheduling required. */
@@ -129,14 +129,14 @@
 int mc_release_instance(struct mc_instance *instance);
 
 /*
- * mc_register_wsm_l2() - Create a L2 table from a virtual memory buffer which
+ * mc_register_wsm_mmu() - Create a MMU table from a virtual memory buffer which
  * can be vmalloc or user space virtual memory
  */
-int mc_register_wsm_l2(struct mc_instance *instance,
-	uint32_t buffer, uint32_t len,
-	uint32_t *handle, uint32_t *phys);
+int mc_register_wsm_mmu(struct mc_instance *instance,
+	void *buffer, uint32_t len,
+	uint32_t *handle, phys_addr_t *phys);
 /* Unregister the buffer mapped above */
-int mc_unregister_wsm_l2(struct mc_instance *instance, uint32_t handle);
+int mc_unregister_wsm_mmu(struct mc_instance *instance, uint32_t handle);
 
 /* Allocate one mc_buffer of contiguous space */
 int mc_get_buffer(struct mc_instance *instance,
@@ -147,4 +147,7 @@
 /* Check if the other end of the fd owns instance */
 bool mc_check_owner_fd(struct mc_instance *instance, int32_t fd);
 
+/* Test if sleep is possible */
+bool mc_sleep_ready(void);
+
 #endif /* _MC_MAIN_H_ */
diff --git a/drivers/gud/MobiCoreDriver/mem.c b/drivers/gud/MobiCoreDriver/mem.c
new file mode 100644
index 0000000..2d92f74
--- /dev/null
+++ b/drivers/gud/MobiCoreDriver/mem.c
@@ -0,0 +1,743 @@
+/*
+ * MobiCore Driver Kernel Module.
+ *
+ * This module is written as a Linux device driver.
+ * This driver represents the command proxy on the lowest layer, from the
+ * secure world to the non secure world, and vice versa.
+ * This driver is located in the non secure world (Linux).
+ * This driver offers IOCTL commands, for access to the secure world, and has
+ * the interface from the secure world to the normal world.
+ * The access to the driver is possible with a file descriptor,
+ * which has to be created by the fd = open(/dev/mobicore) command.
+ *
+ * <-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
+ * <-- Copyright Trustonic Limited 2013 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include "main.h"
+#include "debug.h"
+#include "mem.h"
+
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/pagemap.h>
+#include <linux/device.h>
+
+#ifdef LPAE_SUPPORT
+#define MMU_TYPE_PAGE	(3 << 0)
+#define MMU_BUFFERABLE	(1 << 2) /* AttrIndx[0] */
+#define MMU_CACHEABLE	(1 << 3) /* AttrIndx[1] */
+#define MMU_NS		(1 << 5)
+#define MMU_AP_RW_ALL	(1 << 6) /* AP[2:1], RW, at any privilege level */
+#define MMU_EXT_SHARED	(3 << 8) /* SH[1:0], inner shareable */
+#define MMU_EXT_AF	(1 << 10) /* Access Flag */
+#define MMU_EXT_NG	(1 << 11)
+#define MMU_EXT_XN      (((uint64_t)1) << 54) /* XN */
+#else
+#define MMU_TYPE_EXT	(3 << 0)	/* v5 */
+#define MMU_TYPE_SMALL	(2 << 0)
+#define MMU_BUFFERABLE	(1 << 2)
+#define MMU_CACHEABLE	(1 << 3)
+#define MMU_EXT_AP0	(1 << 4)
+#define MMU_EXT_AP1	(2 << 4)
+#define MMU_EXT_TEX(x)	((x) << 6)	/* v5 */
+#define MMU_EXT_SHARED	(1 << 10)	/* v6 */
+#define MMU_EXT_NG	(1 << 11)	/* v6 */
+#endif
+
+/* MobiCore memory context data */
+struct mc_mem_context mem_ctx;
+
+static inline void release_page(struct page *page)
+{
+	set_bit(PG_dirty, &page->flags);
+
+	page_cache_release(page);
+}
+
+static int lock_pages(struct task_struct *task, void *virt_start_page_addr,
+	int pages_no, struct page **pages)
+{
+	int locked_pages;
+
+	/* lock user pages, must hold the mmap_sem to do this. */
+	down_read(&(task->mm->mmap_sem));
+	locked_pages = get_user_pages(
+				task,
+				task->mm,
+				(unsigned long)virt_start_page_addr,
+				pages_no,
+				1, /* write access */
+				0,
+				pages,
+				NULL);
+	up_read(&(task->mm->mmap_sem));
+
+	/* check if we could lock all pages. */
+	if (locked_pages != pages_no) {
+		MCDRV_DBG_ERROR(mcd, "get_user_pages() failed, locked_pages=%d",
+				locked_pages);
+		if (locked_pages > 0) {
+			/* release all locked pages. */
+			release_pages(pages, locked_pages, 0);
+		}
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/* Get kernel pointer to shared MMU table given a per-process reference */
+static void *get_mmu_table_kernel_virt(struct mc_mmu_table *table)
+{
+	if (WARN(!table, "Invalid MMU table"))
+		return NULL;
+
+	if (WARN(!table->set, "Invalid MMU table set"))
+		return NULL;
+
+	if (WARN(!table->set->kernel_virt, "Invalid MMU pointer"))
+		return NULL;
+
+	return &(table->set->kernel_virt->table[table->idx]);
+}
+
+static inline int in_use(struct mc_mmu_table *table)
+{
+	return atomic_read(&table->usage) > 0;
+}
+
+/*
+ * Search the list of used MMU tables and return the one with the handle.
+ * Assumes the table_lock is taken.
+ */
+struct mc_mmu_table *find_mmu_table(unsigned int handle)
+{
+	struct mc_mmu_table *table;
+
+	list_for_each_entry(table, &mem_ctx.mmu_tables, list) {
+		if (table->handle == handle)
+			return table;
+	}
+	return NULL;
+}
+
+/*
+ * Allocate a new MMU table store plus MMU_TABLES_PER_PAGE in the MMU free
+ * tables list. Assumes the table_lock is already taken by the caller above.
+ */
+static int alloc_mmu_table_store(void)
+{
+	unsigned long store;
+	struct mc_mmu_tables_set *mmutable_set;
+	struct mc_mmu_table *mmutable, *mmutable2;
+	struct page *page;
+	int ret = 0, i;
+	/* temp list for holding the MMU tables */
+	LIST_HEAD(temp);
+
+	store = get_zeroed_page(GFP_KERNEL);
+	if (!store)
+		return -ENOMEM;
+
+	/*
+	 * Actually, locking is not necessary, because kernel
+	 * memory is not supposed to get swapped out. But we
+	 * play safe....
+	 */
+	page = virt_to_page(store);
+	set_bit(PG_reserved, &page->flags);
+
+	/* add all the descriptors to the free descriptors list */
+	mmutable_set = kmalloc(sizeof(*mmutable_set), GFP_KERNEL | __GFP_ZERO);
+	if (mmutable_set == NULL) {
+		ret = -ENOMEM;
+		goto free_store;
+	}
+	/* initialize */
+	mmutable_set->kernel_virt = (void *)store;
+	mmutable_set->page = page;
+	mmutable_set->phys = virt_to_phys((void *)store);
+	/* the set is not yet used */
+	atomic_set(&mmutable_set->used_tables, 0);
+
+	/* init add to list. */
+	INIT_LIST_HEAD(&(mmutable_set->list));
+	list_add(&mmutable_set->list, &mem_ctx.mmu_tables_sets);
+
+	for (i = 0; i < MMU_TABLES_PER_PAGE; i++) {
+		/* allocate a WSM MMU descriptor */
+		mmutable  = kmalloc(sizeof(*mmutable), GFP_KERNEL | __GFP_ZERO);
+		if (mmutable == NULL) {
+			ret = -ENOMEM;
+			MCDRV_DBG_ERROR(mcd, "out of memory");
+			/* Free the full temp list and the store in this case */
+			goto free_temp_list;
+		}
+
+		/* set set reference */
+		mmutable->set = mmutable_set;
+		mmutable->idx = i;
+		mmutable->virt = get_mmu_table_kernel_virt(mmutable);
+		mmutable->phys = mmutable_set->phys+i*sizeof(struct mmutable);
+		atomic_set(&mmutable->usage, 0);
+
+		/* add to temp list. */
+		INIT_LIST_HEAD(&mmutable->list);
+		list_add_tail(&mmutable->list, &temp);
+	}
+
+	/*
+	 * If everything went ok then merge the temp list with the global
+	 * free list
+	 */
+	list_splice_tail(&temp, &mem_ctx.free_mmu_tables);
+	return 0;
+free_temp_list:
+	list_for_each_entry_safe(mmutable, mmutable2, &temp, list) {
+		kfree(mmutable);
+	}
+
+	list_del(&mmutable_set->list);
+
+free_store:
+	free_page(store);
+	return ret;
+}
+/*
+ * Get a MMU table from the free tables list or allocate a new one and
+ * initialize it. Assumes the table_lock is already taken.
+ */
+static struct mc_mmu_table *alloc_mmu_table(struct mc_instance *instance)
+{
+	int ret = 0;
+	struct mc_mmu_table *table = NULL;
+
+	if (list_empty(&mem_ctx.free_mmu_tables)) {
+		ret = alloc_mmu_table_store();
+		if (ret) {
+			MCDRV_DBG_ERROR(mcd, "Failed to allocate new store!");
+			return ERR_PTR(-ENOMEM);
+		}
+		/* if it's still empty something wrong has happened */
+		if (list_empty(&mem_ctx.free_mmu_tables)) {
+			MCDRV_DBG_ERROR(mcd,
+					"Free list not updated correctly!");
+			return ERR_PTR(-EFAULT);
+		}
+	}
+
+	/* get a WSM MMU descriptor */
+	table  = list_first_entry(&mem_ctx.free_mmu_tables,
+		struct mc_mmu_table, list);
+	if (table == NULL) {
+		MCDRV_DBG_ERROR(mcd, "out of memory");
+		return ERR_PTR(-ENOMEM);
+	}
+	/* Move it to the used MMU tables list */
+	list_move_tail(&table->list, &mem_ctx.mmu_tables);
+
+	table->handle = get_unique_id();
+	table->owner = instance;
+
+	atomic_inc(&table->set->used_tables);
+	atomic_inc(&table->usage);
+
+	MCDRV_DBG_VERBOSE(mcd,
+			  "chunkPhys=0x%llX, idx=%d",
+			  (u64)table->set->phys, table->idx);
+
+	return table;
+}
+
+/*
+ * Frees the object associated with a MMU table. Initially the object is moved
+ * to the free tables list, but if all the 4 lists of the store are free
+ * then the store is also released.
+ * Assumes the table_lock is already taken.
+ */
+static void free_mmu_table(struct mc_mmu_table *table)
+{
+	struct mc_mmu_tables_set *mmutable_set;
+
+	if (WARN(!table, "Invalid table"))
+		return;
+
+	mmutable_set = table->set;
+	if (WARN(!mmutable_set, "Invalid table set"))
+		return;
+
+	list_move_tail(&table->list, &mem_ctx.free_mmu_tables);
+
+	/* if nobody uses this set, we can release it. */
+	if (atomic_dec_and_test(&mmutable_set->used_tables)) {
+		struct mc_mmu_table *tmp;
+
+		/* remove from list */
+		list_del(&mmutable_set->list);
+		/*
+		 * All the MMU tables are in the free list for this set
+		 * so we can just remove them from there
+		 */
+		list_for_each_entry_safe(table, tmp, &mem_ctx.free_mmu_tables,
+					 list) {
+			if (table->set == mmutable_set) {
+				list_del(&table->list);
+				kfree(table);
+			}
+		} /* end while */
+
+		/*
+		 * We shouldn't recover from this since it was some data
+		 * corruption before
+		 */
+		BUG_ON(!mmutable_set->page);
+		clear_bit(PG_reserved, &(mmutable_set->page)->flags);
+
+
+		BUG_ON(!mmutable_set->kernel_virt);
+		free_page((unsigned long)mmutable_set->kernel_virt);
+
+		kfree(mmutable_set);
+	}
+}
+
+/*
+ * Create a MMU table in a WSM container that has been allocates previously.
+ * Assumes the table lock is already taken or there is no need to take like
+ * when first creating the MMU table the full list is locked.
+ *
+ * @task	pointer to task owning WSM
+ * @wsm_buffer  user space WSM start
+ * @wsm_len     WSM length
+ * @table       Pointer to MMU table details
+ */
+static int map_buffer(struct task_struct *task, void *wsm_buffer,
+		      unsigned int wsm_len, struct mc_mmu_table *table)
+{
+	int		ret = 0;
+	unsigned int	i, nr_of_pages;
+	/* start address of the 4 KiB page of wsm_buffer */
+	void		*virt_addr_page;
+	struct page	*page;
+	struct mmutable	*mmutable;
+	struct page	**mmutable_as_array_of_pointers_to_page;
+	/* page offset in wsm buffer */
+	unsigned int offset;
+
+	if (WARN(!wsm_buffer, "Invalid WSM buffer pointer"))
+		return -EINVAL;
+
+	if (WARN(wsm_len == 0, "Invalid WSM buffer length"))
+		return -EINVAL;
+
+	if (WARN(!table, "Invalid mapping table for WSM"))
+		return -EINVAL;
+
+	/* no size > 1Mib supported */
+	if (wsm_len > SZ_1M) {
+		MCDRV_DBG_ERROR(mcd, "size > 1 MiB");
+		return -EINVAL;
+	}
+
+	MCDRV_DBG_VERBOSE(mcd, "WSM addr=0x%p, len=0x%08x", wsm_buffer,
+			  wsm_len);
+
+	/* calculate page usage */
+	virt_addr_page = (void *)(((unsigned long)(wsm_buffer)) & PAGE_MASK);
+	offset = (unsigned int)	(((unsigned long)(wsm_buffer)) & (~PAGE_MASK));
+	nr_of_pages  = PAGE_ALIGN(offset + wsm_len) / PAGE_SIZE;
+
+	MCDRV_DBG_VERBOSE(mcd, "virt addr page start=0x%p, pages=%d",
+			  virt_addr_page, nr_of_pages);
+
+	/* MMU table can hold max 1MiB in 256 pages. */
+	if ((nr_of_pages * PAGE_SIZE) > SZ_1M) {
+		MCDRV_DBG_ERROR(mcd, "WSM paged exceed 1 MiB");
+		return -EINVAL;
+	}
+
+	mmutable = table->virt;
+	/*
+	 * We use the memory for the MMU table to hold the pointer
+	 * and convert them later. This works, as everything comes
+	 * down to a 32 bit value.
+	 */
+	mmutable_as_array_of_pointers_to_page = (struct page **)mmutable;
+
+	/* Request comes from user space */
+	if (task != NULL && !is_vmalloc_addr(wsm_buffer)) {
+		/*
+		 * lock user page in memory, so they do not get swapped
+		 * out.
+		 * REV axh: Kernel 2.6.27 added a new get_user_pages_fast()
+		 * function, maybe it is called fast_gup() in some versions.
+		 * handle user process doing a fork().
+		 * Child should not get things.
+		 * http://osdir.com/ml/linux-media/2009-07/msg00813.html
+		 * http://lwn.net/Articles/275808/
+		 */
+		ret = lock_pages(task, virt_addr_page, nr_of_pages,
+				 mmutable_as_array_of_pointers_to_page);
+		if (ret != 0) {
+			MCDRV_DBG_ERROR(mcd, "lock_user_pages() failed");
+			return ret;
+		}
+	}
+	/* Request comes from kernel space(cont buffer) */
+	else if (task == NULL && !is_vmalloc_addr(wsm_buffer)) {
+		void *uaddr = wsm_buffer;
+		for (i = 0; i < nr_of_pages; i++) {
+			page = virt_to_page(uaddr);
+			if (!page) {
+				MCDRV_DBG_ERROR(mcd, "failed to map address");
+				return -EINVAL;
+			}
+			get_page(page);
+			mmutable_as_array_of_pointers_to_page[i] = page;
+			uaddr += PAGE_SIZE;
+		}
+	}
+	/* Request comes from kernel space(vmalloc buffer) */
+	else {
+		void *uaddr = wsm_buffer;
+		for (i = 0; i < nr_of_pages; i++) {
+			page = vmalloc_to_page(uaddr);
+			if (!page) {
+				MCDRV_DBG_ERROR(mcd, "failed to map address");
+				return -EINVAL;
+			}
+			get_page(page);
+			mmutable_as_array_of_pointers_to_page[i] = page;
+			uaddr += PAGE_SIZE;
+		}
+	}
+
+	table->pages = nr_of_pages;
+
+	/*
+	 * create MMU Table entries.
+	 * used_mmutable->table contains a list of page pointers here.
+	 * For a proper cleanup we have to ensure that the following
+	 * code either works and used_mmutable contains a valid MMU table
+	 * - or fails and used_mmutable->table contains the list of page
+	 * pointers.
+	 * Any mixed contents will make cleanup difficult.
+	 * Fill the table in reverse order as the table is used as input and
+	 * output.
+	 */
+	i = MC_ARM_MMU_TABLE_ENTRIES-1;
+	do {
+		if (i < nr_of_pages) {
+#ifdef LPAE_SUPPORT
+			uint64_t pte;
+#elif defined(CONFIG_ARM_LPAE) && !defined(LPAE_SUPPORT)
+			/* Nwd supports 64bit addresses, SWD only 32bit */
+			uint64_t pte64;
+			uint32_t pte;
+#else
+			uint32_t pte;
+#endif
+			page = mmutable_as_array_of_pointers_to_page[i];
+
+			/*
+			 * create MMU table entry, see ARM MMU docu for details
+			 * about flags stored in the lowest 12 bits.
+			 * As a side reference, the Article
+			 * "ARM's multiply-mapped memory mess"
+			 * found in the collection at
+			 * http://lwn.net/Articles/409032/
+			 * is also worth reading.
+			 */
+#ifdef LPAE_SUPPORT
+			pte = page_to_phys(page);
+			pte |=	MMU_EXT_XN
+				| MMU_EXT_NG
+				| MMU_EXT_AF
+				| MMU_AP_RW_ALL
+				| MMU_NS
+				| MMU_CACHEABLE | MMU_BUFFERABLE
+				| MMU_TYPE_PAGE;
+#elif defined(CONFIG_ARM_LPAE) && !defined(LPAE_SUPPORT)
+			/*
+			 * NWD uses 64bit addresses but SWD can handle only
+			 * short descriptors
+			 * and physical addresses not bigger than 4GB
+			 */
+			 pte64 = page_to_phys(page);
+			 if ((pte64 >> 32) != 0) {
+				MCDRV_DBG_ERROR(mcd,
+						"physical addresses bigger than 4GB not supported");
+				return -EINVAL;
+				}
+			pte = (uint32_t)pte64;
+			pte |= MMU_EXT_AP1 | MMU_EXT_AP0
+				| MMU_CACHEABLE | MMU_BUFFERABLE
+				| MMU_TYPE_SMALL | MMU_TYPE_EXT | MMU_EXT_NG;
+#else
+			pte = page_to_phys(page);
+			pte |= MMU_EXT_AP1 | MMU_EXT_AP0
+				| MMU_CACHEABLE | MMU_BUFFERABLE
+				| MMU_TYPE_SMALL | MMU_TYPE_EXT | MMU_EXT_NG;
+#endif /* LPAE_SUPPORT */
+			/*
+			 * Linux uses different mappings for SMP systems(the
+			 * sharing flag is set for the pte. In order not to
+			 * confuse things too much in Mobicore make sure the
+			 * shared buffers have the same flags.
+			 * This should also be done in SWD side
+			 */
+#ifdef CONFIG_SMP
+#ifdef LPAE_SUPPORT
+			pte |= MMU_EXT_SHARED;
+#else
+			pte |= MMU_EXT_SHARED | MMU_EXT_TEX(1);
+#endif /* LPAE_SUPPORT */
+#endif /* CONFIG_SMP */
+
+			mmutable->table_entries[i] = pte;
+			MCDRV_DBG_VERBOSE(mcd, "MMU entry %d:  0x%llx", i,
+					  (u64)(pte));
+		} else {
+			/* ensure rest of table is empty */
+			mmutable->table_entries[i] = 0;
+		}
+	} while (i-- != 0);
+
+	return ret;
+}
+
+/*
+ * Remove a MMU table in a WSM container. Afterwards the container may be
+ * released. Assumes the table_lock and the lock is taken.
+ */
+static void unmap_buffers(struct mc_mmu_table *table)
+{
+	struct mmutable *mmutable;
+	int i;
+
+	if (WARN_ON(!table))
+		return;
+
+	/* found the table, now release the resources. */
+	MCDRV_DBG_VERBOSE(mcd,
+			  "clear MMU table, phys_base=0x%llX,nr_of_pages=%d",
+			  (u64)table->phys, table->pages);
+
+	mmutable = table->virt;
+
+	/* release all locked user space pages */
+	for (i = 0; i < table->pages; i++) {
+		/* convert physical entries from MMU table to page pointers */
+		struct page *page;
+		page = phys_to_page(mmutable->table_entries[i]);
+		release_page(page);
+	}
+
+	/* remember that all pages have been freed */
+	table->pages = 0;
+}
+
+/* Delete a used MMU table. Assumes the table_lock and the lock is taken */
+static void unmap_mmu_table(struct mc_mmu_table *table)
+{
+	/* Check if it's not locked by other processes too! */
+	if (!atomic_dec_and_test(&table->usage))
+		return;
+
+	/* release if Nwd and Swd/MC do no longer use it. */
+	unmap_buffers(table);
+	free_mmu_table(table);
+}
+
+int mc_free_mmu_table(struct mc_instance *instance, uint32_t handle)
+{
+	struct mc_mmu_table *table;
+	int ret = 0;
+
+	if (WARN(!instance, "No instance data available"))
+		return -EFAULT;
+
+	mutex_lock(&mem_ctx.table_lock);
+	table = find_mmu_table(handle);
+
+	if (table == NULL) {
+		MCDRV_DBG_VERBOSE(mcd, "entry not found");
+		ret = -EINVAL;
+		goto err_unlock;
+	}
+	if (instance != table->owner && !is_daemon(instance)) {
+		MCDRV_DBG_ERROR(mcd, "instance does no own it");
+		ret = -EPERM;
+		goto err_unlock;
+	}
+	/* free table (if no further locks exist) */
+	unmap_mmu_table(table);
+err_unlock:
+	mutex_unlock(&mem_ctx.table_lock);
+
+	return ret;
+}
+
+int mc_lock_mmu_table(struct mc_instance *instance, uint32_t handle)
+{
+	int ret = 0;
+	struct mc_mmu_table *table = NULL;
+
+	if (WARN(!instance, "No instance data available"))
+		return -EFAULT;
+
+	mutex_lock(&mem_ctx.table_lock);
+	table = find_mmu_table(handle);
+
+	if (table == NULL) {
+		MCDRV_DBG_VERBOSE(mcd, "entry not found %u", handle);
+		ret = -EINVAL;
+		goto table_err;
+	}
+	if (instance != table->owner && !is_daemon(instance)) {
+		MCDRV_DBG_ERROR(mcd, "instance does no own it");
+		ret = -EPERM;
+		goto table_err;
+	}
+
+	/* lock entry */
+	atomic_inc(&table->usage);
+table_err:
+	mutex_unlock(&mem_ctx.table_lock);
+	return ret;
+}
+/*
+ * Allocate MMU table and map buffer into it.
+ * That is, create respective table entries.
+ */
+struct mc_mmu_table *mc_alloc_mmu_table(struct mc_instance *instance,
+	struct task_struct *task, void *wsm_buffer, unsigned int wsm_len)
+{
+	int ret = 0;
+	struct mc_mmu_table *table;
+
+	if (WARN(!instance, "No instance data available"))
+		return ERR_PTR(-EFAULT);
+
+	mutex_lock(&mem_ctx.table_lock);
+	table = alloc_mmu_table(instance);
+	if (IS_ERR(table)) {
+		MCDRV_DBG_ERROR(mcd, "alloc_mmu_table() failed");
+		ret = -ENOMEM;
+		goto err_no_mem;
+	}
+
+	/* create the MMU page for the WSM */
+	ret = map_buffer(task, wsm_buffer, wsm_len, table);
+
+	if (ret != 0) {
+		MCDRV_DBG_ERROR(mcd, "map_buffer() failed");
+		unmap_mmu_table(table);
+		goto err_no_mem;
+	}
+	MCDRV_DBG_VERBOSE(mcd,
+			  "mapped buffer %p to table with handle %d @ 0x%llX",
+			  wsm_buffer, table->handle, (u64)table->phys);
+
+	mutex_unlock(&mem_ctx.table_lock);
+	return table;
+err_no_mem:
+	mutex_unlock(&mem_ctx.table_lock);
+	return ERR_PTR(ret);
+}
+
+phys_addr_t mc_find_mmu_table(uint32_t handle, int32_t fd)
+{
+	phys_addr_t ret = 0;
+	struct mc_mmu_table *table = NULL;
+
+	mutex_lock(&mem_ctx.table_lock);
+	table = find_mmu_table(handle);
+
+	if (table == NULL) {
+		MCDRV_DBG_ERROR(mcd, "entry not found %u", handle);
+		ret = 0;
+		goto table_err;
+	}
+
+	/* It's safe here not to lock the instance since the owner of
+	 * the table will be cleared only with the table lock taken */
+	if (!mc_check_owner_fd(table->owner, fd)) {
+		MCDRV_DBG_ERROR(mcd, "not valid owner %u", handle);
+		ret = 0;
+		goto table_err;
+	}
+
+	ret = table->phys;
+table_err:
+	mutex_unlock(&mem_ctx.table_lock);
+	return ret;
+}
+
+void mc_clean_mmu_tables(void)
+{
+	struct mc_mmu_table *table, *tmp;
+
+	mutex_lock(&mem_ctx.table_lock);
+	/* Check if some WSM is orphaned. */
+	list_for_each_entry_safe(table, tmp, &mem_ctx.mmu_tables, list) {
+		if (table->owner == NULL) {
+			MCDRV_DBG(mcd,
+				  "cleariM MMU: p=0x%llX pages=%d",
+				  (u64)table->phys,
+				  table->pages);
+			unmap_mmu_table(table);
+		}
+	}
+	mutex_unlock(&mem_ctx.table_lock);
+}
+
+void mc_clear_mmu_tables(struct mc_instance *instance)
+{
+	struct mc_mmu_table *table, *tmp;
+
+	mutex_lock(&mem_ctx.table_lock);
+	/* Check if some WSM is still in use. */
+	list_for_each_entry_safe(table, tmp, &mem_ctx.mmu_tables, list) {
+		if (table->owner == instance) {
+			MCDRV_DBG(mcd, "release WSM MMU: p=0x%llX pages=%d",
+				  (u64)table->phys,
+				  table->pages);
+			/* unlock app usage and free or mark it as orphan */
+			table->owner = NULL;
+			unmap_mmu_table(table);
+		}
+	}
+	mutex_unlock(&mem_ctx.table_lock);
+}
+
+int mc_init_mmu_tables(void)
+{
+	/* init list for WSM MMU chunks. */
+	INIT_LIST_HEAD(&mem_ctx.mmu_tables_sets);
+
+	/* MMU table descriptor list. */
+	INIT_LIST_HEAD(&mem_ctx.mmu_tables);
+
+	/* MMU free table descriptor list. */
+	INIT_LIST_HEAD(&mem_ctx.free_mmu_tables);
+
+	mutex_init(&mem_ctx.table_lock);
+
+	return 0;
+}
+
+void mc_release_mmu_tables(void)
+{
+	struct mc_mmu_table *table;
+	/* Check if some WSM is still in use. */
+	list_for_each_entry(table, &mem_ctx.mmu_tables, list) {
+		WARN(1, "WSM MMU still in use: phys=0x%llX ,nr_of_pages=%d",
+		     (u64)table->phys, table->pages);
+	}
+}
diff --git a/drivers/gud/MobiCoreDriver/mem.h b/drivers/gud/MobiCoreDriver/mem.h
new file mode 100644
index 0000000..5c9006a
--- /dev/null
+++ b/drivers/gud/MobiCoreDriver/mem.h
@@ -0,0 +1,139 @@
+/*
+ * MobiCore driver module.(interface to the secure world SWD)
+ *
+ * <-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
+ * <-- Copyright Trustonic Limited 2013 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _MC_MEM_H_
+#define _MC_MEM_H_
+
+#ifdef LPAE_SUPPORT
+/*
+ * Number of page table entries in one MMU table. This is ARM specific, an
+ * MMU table covers 2 MiB by using 512 entries referring to 4KiB pages each.
+ */
+#define MC_ARM_MMU_TABLE_ENTRIES		512
+
+/* ARM level 3 (MMU) table with 512 entries. Size: 4k */
+struct mmutable {
+	uint64_t	table_entries[MC_ARM_MMU_TABLE_ENTRIES];
+};
+
+/* There is 1 table in each page. */
+#define MMU_TABLES_PER_PAGE		1
+#else
+/*
+ * MobiCore specific page tables for world shared memory.
+ * Linux uses shadow page tables, see arch/arm/include/asm/pgtable-2level.
+ * MobiCore uses the default ARM format.
+ *
+ * Number of page table entries in one MMU table. This is ARM specific, an
+ * MMU table covers 1 MiB by using 256 entries referring to 4KiB pages each.
+ */
+#define MC_ARM_MMU_TABLE_ENTRIES		256
+
+/* ARM level 2 (MMU) table with 256 entries. Size: 1k */
+struct mmutable {
+	uint32_t	table_entries[MC_ARM_MMU_TABLE_ENTRIES];
+};
+
+/* There are 4 tables in each page. */
+#define MMU_TABLES_PER_PAGE		4
+#endif
+
+/* Store for four MMU tables in one 4kb page*/
+struct mc_mmu_table_store {
+	struct mmutable table[MMU_TABLES_PER_PAGE];
+};
+
+/* Usage and maintenance information about mc_mmu_table_store */
+struct mc_mmu_tables_set {
+	struct list_head		list;
+	/* kernel virtual address */
+	struct mc_mmu_table_store	*kernel_virt;
+	/* physical address */
+	phys_addr_t			phys;
+	/* pointer to page struct */
+	struct page			*page;
+	/* How many pages from this set are used */
+	atomic_t			used_tables;
+};
+
+/*
+ * MMU table allocated to the Daemon or a TLC describing a world shared
+ * buffer.
+ * When users map a malloc()ed area into SWd, a MMU table is allocated.
+ * In addition, the area of maximum 1MB virtual address space is mapped into
+ * the MMU table and a handle for this table is returned to the user.
+ */
+struct mc_mmu_table {
+	struct list_head	list;
+	/* Table lock */
+	struct mutex		lock;
+	/* handle as communicated to user mode */
+	unsigned int		handle;
+	/* Number of references kept to this MMU table */
+	atomic_t		usage;
+	/* owner of this MMU table */
+	struct mc_instance	*owner;
+	/* set describing where our MMU table is stored */
+	struct mc_mmu_tables_set	*set;
+	/* index into MMU table set */
+	unsigned int		idx;
+	/* size of buffer */
+	unsigned int		pages;
+	/* virtual address*/
+	void			*virt;
+	/* physical address */
+	phys_addr_t		phys;
+};
+
+/* MobiCore Driver Memory context data. */
+struct mc_mem_context {
+	struct mc_instance	*daemon_inst;
+	/* Backing store for MMU tables */
+	struct list_head	mmu_tables_sets;
+	/* Bookkeeping for used MMU tables */
+	struct list_head	mmu_tables;
+	/* Bookkeeping for free MMU tables */
+	struct list_head	free_mmu_tables;
+	/* semaphore to synchronize access to above lists */
+	struct mutex		table_lock;
+};
+
+/*
+ * Allocate MMU table and map buffer into it.
+ * That is, create respective table entries.
+ */
+struct mc_mmu_table *mc_alloc_mmu_table(struct mc_instance *instance,
+	struct task_struct *task, void *wsm_buffer, unsigned int wsm_len);
+
+/* Delete all the MMU tables associated with an instance */
+void mc_clear_mmu_tables(struct mc_instance *instance);
+
+/* Release all orphaned MMU tables */
+void mc_clean_mmu_tables(void);
+
+/* Delete a used MMU table. */
+int mc_free_mmu_table(struct mc_instance *instance, uint32_t handle);
+
+/*
+ * Lock a MMU table - the daemon adds +1 to refcount of the MMU table
+ * marking it in use by SWD so it doesn't get released when the TLC dies.
+ */
+int mc_lock_mmu_table(struct mc_instance *instance, uint32_t handle);
+
+/* Return the phys address of MMU table. */
+phys_addr_t mc_find_mmu_table(uint32_t handle, int32_t fd);
+/* Release all used MMU tables to Linux memory space */
+void mc_release_mmu_tables(void);
+
+/* Initialize all MMU tables structure */
+int mc_init_mmu_tables(void);
+
+#endif /* _MC_MEM_H_ */
diff --git a/drivers/gud/MobiCoreDriver/ops.c b/drivers/gud/MobiCoreDriver/ops.c
new file mode 100644
index 0000000..96b4f4f
--- /dev/null
+++ b/drivers/gud/MobiCoreDriver/ops.c
@@ -0,0 +1,398 @@
+/*
+ * MobiCore Driver Kernel Module.
+ *
+ * This module is written as a Linux device driver.
+ * This driver represents the command proxy on the lowest layer, from the
+ * secure world to the non secure world, and vice versa.
+ * This driver is located in the non secure world (Linux).
+ * This driver offers IOCTL commands, for access to the secure world, and has
+ * the interface from the secure world to the normal world.
+ * The access to the driver is possible with a file descriptor,
+ * which has to be created by the fd = open(/dev/mobicore) command.
+ *
+ * <-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
+ * <-- Copyright Trustonic Limited 2013 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/cpu.h>
+
+#include "main.h"
+#include "fastcall.h"
+#include "ops.h"
+#include "mem.h"
+#include "pm.h"
+#include "debug.h"
+
+/* MobiCore context data */
+static struct mc_context *ctx;
+#ifdef TBASE_CORE_SWITCHER
+static uint32_t active_cpu;
+
+static int mobicore_cpu_callback(struct notifier_block *nfb,
+				 unsigned long action, void *hcpu);
+static struct notifier_block mobicore_cpu_notifer = {
+	.notifier_call = mobicore_cpu_callback,
+};
+#endif
+
+static inline long smc(union fc_generic *fc)
+{
+	/* If we request sleep yields must be filtered out as they
+	 * make no sense */
+	if (ctx->mcp)
+		if (ctx->mcp->flags.sleep_mode.sleep_req) {
+			if (fc->as_in.cmd == MC_SMC_N_YIELD)
+				return MC_FC_RET_ERR_INVALID;
+		}
+	return _smc(fc);
+}
+
+struct fastcall_work {
+#ifdef MC_FASTCALL_WORKER_THREAD
+	struct kthread_work work;
+#else
+	struct work_struct work;
+#endif
+	void *data;
+};
+
+#ifdef MC_FASTCALL_WORKER_THREAD
+static void fastcall_work_func(struct kthread_work *work);
+#else
+static void fastcall_work_func(struct work_struct *work);
+#endif
+
+
+#ifdef MC_FASTCALL_WORKER_THREAD
+
+static struct task_struct *fastcall_thread;
+static DEFINE_KTHREAD_WORKER(fastcall_worker);
+
+bool mc_fastcall(void *data)
+{
+	struct fastcall_work fc_work = {
+		KTHREAD_WORK_INIT(fc_work.work, fastcall_work_func),
+		.data = data,
+	};
+
+	if (!queue_kthread_work(&fastcall_worker, &fc_work.work))
+		return false;
+	flush_kthread_work(&fc_work.work);
+	return true;
+}
+
+int mc_fastcall_init(struct mc_context *context)
+{
+	int ret = 0;
+	ctx = context;
+
+	fastcall_thread = kthread_create(kthread_worker_fn, &fastcall_worker,
+					 "mc_fastcall");
+	if (IS_ERR(fastcall_thread)) {
+		ret = PTR_ERR(fastcall_thread);
+		fastcall_thread = NULL;
+		MCDRV_DBG_ERROR(mcd, "cannot create fastcall wq (%d)", ret);
+		return ret;
+	}
+
+	wake_up_process(fastcall_thread);
+
+	/* this thread MUST run on CPU 0 at startup */
+	set_cpus_allowed(fastcall_thread, CPU_MASK_CPU0);
+#ifdef TBASE_CORE_SWITCHER
+	register_cpu_notifier(&mobicore_cpu_notifer);
+#endif
+	return 0;
+}
+
+void mc_fastcall_destroy(void)
+{
+	if (!IS_ERR_OR_NULL(fastcall_thread)) {
+		kthread_stop(fastcall_thread);
+		fastcall_thread = NULL;
+	}
+}
+#else
+
+bool mc_fastcall(void *data)
+{
+	struct fastcall_work work = {
+		.data = data,
+	};
+	INIT_WORK(&work.work, fastcall_work_func);
+	if (!schedule_work_on(0, &work.work))
+		return false;
+	flush_work(&work.work);
+	return true;
+}
+
+int mc_fastcall_init(struct mc_context *context)
+{
+	ctx = context;
+	return 0;
+};
+
+void mc_fastcall_destroy(void) {};
+#endif
+
+#ifdef MC_FASTCALL_WORKER_THREAD
+static void fastcall_work_func(struct kthread_work *work)
+#else
+static void fastcall_work_func(struct work_struct *work)
+#endif
+{
+	struct fastcall_work *fc_work =
+		container_of(work, struct fastcall_work, work);
+	union fc_generic *fc_generic = fc_work->data;
+#ifdef TBASE_CORE_SWITCHER
+	uint32_t cpu_swap = 0, new_cpu;
+	uint32_t cpu_id[] = CPU_IDS;
+#endif
+
+#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
+	mc_pm_clock_enable();
+#endif
+
+
+	if (fc_generic == NULL)
+		return;
+#ifdef TBASE_CORE_SWITCHER
+	if (fc_generic->as_in.cmd == MC_FC_SWITCH_CORE) {
+		cpu_swap = 1;
+		new_cpu = fc_generic->as_in.param[0];
+		fc_generic->as_in.param[0] = cpu_id[fc_generic->as_in.param[0]];
+	}
+#endif
+	smc(fc_work->data);
+#ifdef TBASE_CORE_SWITCHER
+	if (cpu_swap) {
+		if (fc_generic->as_out.ret == 0) {
+			cpumask_t cpu;
+			active_cpu = new_cpu;
+			MCDRV_DBG(mcd, "CoreSwap ok %d -> %d\n",
+				  raw_smp_processor_id(), active_cpu);
+			cpumask_clear(&cpu);
+			cpumask_set_cpu(active_cpu, &cpu);
+#ifdef MC_FASTCALL_WORKER_THREAD
+			set_cpus_allowed(fastcall_thread, cpu);
+#endif
+		} else {
+			MCDRV_DBG(mcd, "CoreSwap failed %d -> %d\n",
+				  raw_smp_processor_id(),
+				  fc_generic->as_in.param[0]);
+		}
+	}
+#endif
+#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
+	mc_pm_clock_disable();
+#endif
+}
+
+int mc_info(uint32_t ext_info_id, uint32_t *state, uint32_t *ext_info)
+{
+	int ret = 0;
+	union mc_fc_info fc_info;
+
+	MCDRV_DBG_VERBOSE(mcd, "enter");
+
+	memset(&fc_info, 0, sizeof(fc_info));
+	fc_info.as_in.cmd = MC_FC_INFO;
+	fc_info.as_in.ext_info_id = ext_info_id;
+
+	MCDRV_DBG(mcd, "<- cmd=0x%08x, ext_info_id=0x%08x",
+		  fc_info.as_in.cmd, fc_info.as_in.ext_info_id);
+
+	mc_fastcall(&(fc_info.as_generic));
+
+	MCDRV_DBG(mcd,
+		  "-> r=0x%08x ret=0x%08x state=0x%08x "
+		  "ext_info=0x%08x",
+		  fc_info.as_out.resp,
+		  fc_info.as_out.ret,
+		  fc_info.as_out.state,
+		  fc_info.as_out.ext_info);
+
+	ret = convert_fc_ret(fc_info.as_out.ret);
+
+	*state  = fc_info.as_out.state;
+	*ext_info = fc_info.as_out.ext_info;
+
+	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
+
+	return ret;
+}
+
+#ifdef TBASE_CORE_SWITCHER
+int mc_switch_core(uint32_t core_num)
+{
+	int32_t ret = 0;
+	union mc_fc_swich_core fc_switch_core;
+
+	if (!cpu_online(core_num))
+		return 1;
+
+	MCDRV_DBG_VERBOSE(mcd, "enter\n");
+
+	memset(&fc_switch_core, 0, sizeof(fc_switch_core));
+	fc_switch_core.as_in.cmd = MC_FC_SWITCH_CORE;
+
+	if (core_num < COUNT_OF_CPUS)
+		fc_switch_core.as_in.core_id = core_num;
+	else
+		fc_switch_core.as_in.core_id = 0;
+
+	MCDRV_DBG(
+			mcd, "<- cmd=0x%08x, core_num=0x%08x, "
+			"active_cpu=0x%08x, active_cpu=0x%08x\n",
+			fc_switch_core.as_in.cmd,
+			fc_switch_core.as_in.core_id,
+			core_num, active_cpu);
+	mc_fastcall(&(fc_switch_core.as_generic));
+
+	ret = convert_fc_ret(fc_switch_core.as_out.ret);
+
+	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
+
+	return ret;
+}
+
+void mc_cpu_offfline(int cpu)
+{
+	if (active_cpu == cpu) {
+		int i;
+		/* Chose the first online CPU and switch! */
+		for_each_online_cpu(i) {
+			if (i == cpu) {
+				MCDRV_DBG(mcd, "Skipping CPU %d\n", cpu);
+				continue;
+			}
+			MCDRV_DBG(mcd, "CPU %d is dying, switching to %d\n",
+				  cpu, i);
+			mc_switch_core(i);
+			break;
+		}
+	} else {
+		MCDRV_DBG(mcd, "not active CPU, no action taken\n");
+	}
+}
+
+static int mobicore_cpu_callback(struct notifier_block *nfb,
+				unsigned long action, void *hcpu)
+{
+	unsigned int cpu = (unsigned long)hcpu;
+
+	switch (action) {
+	case CPU_DOWN_PREPARE:
+	case CPU_DOWN_PREPARE_FROZEN:
+		dev_info(mcd, "Cpu %u is going to die\n", cpu);
+		mc_cpu_offfline(cpu);
+		break;
+	case CPU_DEAD:
+	case CPU_DEAD_FROZEN:
+		dev_info(mcd, "Cpu %u is dead\n", cpu);
+		break;
+	}
+	return NOTIFY_OK;
+}
+#endif
+
+/* Yield to MobiCore */
+int mc_yield(void)
+{
+	int ret = 0;
+	union fc_generic yield;
+
+	MCDRV_DBG_VERBOSE(mcd, "enter");
+	memset(&yield, 0, sizeof(yield));
+	yield.as_in.cmd = MC_SMC_N_YIELD;
+	mc_fastcall(&yield);
+	ret = convert_fc_ret(yield.as_out.ret);
+
+	return ret;
+}
+
+/* call common notify */
+int mc_nsiq(void)
+{
+	int ret = 0;
+	union fc_generic nsiq;
+	MCDRV_DBG_VERBOSE(mcd, "enter");
+	memset(&nsiq, 0, sizeof(nsiq));
+	nsiq.as_in.cmd = MC_SMC_N_SIQ;
+	mc_fastcall(&nsiq);
+	ret = convert_fc_ret(nsiq.as_out.ret);
+	return ret;
+}
+
+/* call common notify */
+int _nsiq(void)
+{
+	int ret = 0;
+	union fc_generic nsiq;
+	MCDRV_DBG_VERBOSE(mcd, "enter");
+	memset(&nsiq, 0, sizeof(nsiq));
+	nsiq.as_in.cmd = MC_SMC_N_SIQ;
+	_smc(&nsiq);
+	ret = convert_fc_ret(nsiq.as_out.ret);
+	return ret;
+}
+
+/* Call the INIT fastcall to setup MobiCore initialization */
+int mc_init(phys_addr_t base, uint32_t nq_length,
+	uint32_t mcp_offset, uint32_t mcp_length)
+{
+	int ret = 0;
+	union mc_fc_init fc_init;
+	uint64_t base_addr = (uint64_t)base;
+	uint32_t base_high = (uint32_t)(base_addr >> 32);
+
+	MCDRV_DBG_VERBOSE(mcd, "enter");
+
+	memset(&fc_init, 0, sizeof(fc_init));
+
+	fc_init.as_in.cmd = MC_FC_INIT;
+	/* base address of mci buffer 4KB aligned */
+	fc_init.as_in.base = (uint32_t)base_addr;
+	/* notification buffer start/length [16:16] [start, length] */
+	fc_init.as_in.nq_info = ((base_high && 0xFFFF) << 16) |
+				(nq_length & 0xFFFF);
+	/* mcp buffer start/length [16:16] [start, length] */
+	fc_init.as_in.mcp_info = (mcp_offset << 16) | (mcp_length & 0xFFFF);
+
+	/*
+	 * Set KMOD notification queue to start of MCI
+	 * mciInfo was already set up in mmap
+	 */
+	MCDRV_DBG(mcd,
+		  "cmd=0x%08x, base=0x%08x,nq_info=0x%08x, mcp_info=0x%08x",
+		  fc_init.as_in.cmd, fc_init.as_in.base, fc_init.as_in.nq_info,
+		  fc_init.as_in.mcp_info);
+	mc_fastcall(&fc_init.as_generic);
+	MCDRV_DBG(mcd, "out cmd=0x%08x, ret=0x%08x", fc_init.as_out.resp,
+		  fc_init.as_out.ret);
+
+	ret = convert_fc_ret(fc_init.as_out.ret);
+
+	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
+
+	return ret;
+}
+
+/* Return MobiCore driver version */
+uint32_t mc_get_version(void)
+{
+	MCDRV_DBG(mcd, "MobiCore driver version is %i.%i",
+		  MCDRVMODULEAPI_VERSION_MAJOR,
+		  MCDRVMODULEAPI_VERSION_MINOR);
+
+	return MC_VERSION(MCDRVMODULEAPI_VERSION_MAJOR,
+					MCDRVMODULEAPI_VERSION_MINOR);
+}
diff --git a/drivers/gud/mobicore_driver/ops.h b/drivers/gud/MobiCoreDriver/ops.h
similarity index 78%
rename from drivers/gud/mobicore_driver/ops.h
rename to drivers/gud/MobiCoreDriver/ops.h
index 910c1f4..f04eb3e 100644
--- a/drivers/gud/mobicore_driver/ops.h
+++ b/drivers/gud/MobiCoreDriver/ops.h
@@ -21,10 +21,13 @@
 uint32_t mc_get_version(void);
 
 int mc_info(uint32_t ext_info_id, uint32_t *state, uint32_t *ext_info);
-int mc_init(uint32_t base, uint32_t  nq_offset, uint32_t  nq_length,
-	    uint32_t mcp_offset, uint32_t  mcp_length);
+int mc_init(phys_addr_t base, uint32_t  nq_length, uint32_t mcp_offset,
+		uint32_t  mcp_length);
+#ifdef TBASE_CORE_SWITCHER
+int mc_switch_core(uint32_t core_num);
+#endif
 
-void mc_fastcall(void *data);
+bool mc_fastcall(void *data);
 
 int mc_fastcall_init(struct mc_context *context);
 void mc_fastcall_destroy(void);
diff --git a/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h b/drivers/gud/MobiCoreDriver/platforms/MSM8960_SURF_STD/platform.h
similarity index 85%
rename from drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
rename to drivers/gud/MobiCoreDriver/platforms/MSM8960_SURF_STD/platform.h
index 9d128ae..72ea3ed 100644
--- a/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
+++ b/drivers/gud/MobiCoreDriver/platforms/MSM8960_SURF_STD/platform.h
@@ -36,19 +36,12 @@
 }
 
 /* Enable mobicore mem traces */
-/* #define MC_MEM_TRACES */
+#define MC_MEM_TRACES
 
 /* Enable the use of vm_unamp instead of the deprecated do_munmap
  * and other 3.7 features
  */
-#ifndef CONFIG_ARCH_MSM8960
 #define MC_VM_UNMAP
-#endif
-
-#if defined (CONFIG_ARCH_MSM8974) || defined (CONFIG_ARCH_MSM8226)
-/* Perform clock enable/disable */
-#define MC_CRYPTO_CLOCK_MANAGEMENT
-#endif
 
 /* Enable Power Management for Crypto Engine */
 #define MC_CRYPTO_CLOCK_MANAGEMENT
diff --git a/drivers/gud/mobicore_driver/pm.c b/drivers/gud/MobiCoreDriver/pm.c
similarity index 66%
rename from drivers/gud/mobicore_driver/pm.c
rename to drivers/gud/MobiCoreDriver/pm.c
index 55a1ef7..40365ef 100644
--- a/drivers/gud/mobicore_driver/pm.c
+++ b/drivers/gud/MobiCoreDriver/pm.c
@@ -46,7 +46,7 @@
 	if (!ctx->mcp)
 		return false;
 
-	if (!ctx->mcp->flags.sleep_mode.ReadyToSleep & READY_TO_SLEEP)
+	if (!(ctx->mcp->flags.sleep_mode.ready_to_sleep & READY_TO_SLEEP))
 		return false;
 
 	return true;
@@ -57,7 +57,7 @@
 	if (!ctx->mcp)
 		return;
 
-	ctx->mcp->flags.sleep_mode.SleepReq = REQ_TO_SLEEP;
+	ctx->mcp->flags.sleep_mode.sleep_req = REQ_TO_SLEEP;
 	_nsiq();
 }
 DECLARE_WORK(suspend_work, mc_suspend_handler);
@@ -66,9 +66,9 @@
 {
 	MCDRV_DBG(mcd, "MobiCore IDLE=%d!", flags->schedule);
 	MCDRV_DBG(mcd,
-		  "MobiCore Request Sleep=%d!", flags->sleep_mode.SleepReq);
+		  "MobiCore Request Sleep=%d!", flags->sleep_mode.sleep_req);
 	MCDRV_DBG(mcd,
-		  "MobiCore Sleep Ready=%d!", flags->sleep_mode.ReadyToSleep);
+		  "MobiCore Sleep Ready=%d!", flags->sleep_mode.ready_to_sleep);
 }
 
 static int mc_suspend_notifier(struct notifier_block *nb,
@@ -96,12 +96,12 @@
 		 */
 		dump_sleep_params(&mcp->flags);
 		if (!sleep_ready()) {
-			ctx->mcp->flags.sleep_mode.SleepReq = REQ_TO_SLEEP;
+			ctx->mcp->flags.sleep_mode.sleep_req = REQ_TO_SLEEP;
 			schedule_work_on(0, &suspend_work);
 			flush_work(&suspend_work);
 			if (!sleep_ready()) {
 				dump_sleep_params(&mcp->flags);
-				ctx->mcp->flags.sleep_mode.SleepReq = 0;
+				ctx->mcp->flags.sleep_mode.sleep_req = 0;
 				MCDRV_DBG_ERROR(mcd, "MobiCore can't SLEEP!");
 				return NOTIFY_BAD;
 			}
@@ -109,7 +109,7 @@
 		break;
 	case PM_POST_SUSPEND:
 		MCDRV_DBG(mcd, "Resume MobiCore system!");
-		ctx->mcp->flags.sleep_mode.SleepReq = 0;
+		ctx->mcp->flags.sleep_mode.sleep_req = 0;
 		break;
 	default:
 		break;
@@ -121,57 +121,6 @@
 	.notifier_call = mc_suspend_notifier,
 };
 
-#ifdef MC_BL_NOTIFIER
-
-static int bL_switcher_notifier_handler(struct notifier_block *this,
-			unsigned long event, void *ptr)
-{
-	unsigned int mpidr, cpu, cluster;
-	struct mc_mcp_buffer *mcp = ctx->mcp;
-
-	if (!mcp)
-		return 0;
-
-	asm volatile ("mrc\tp15, 0, %0, c0, c0, 5" : "=r" (mpidr));
-	cpu = mpidr & 0x3;
-	cluster = (mpidr >> 8) & 0xf;
-	MCDRV_DBG(mcd, "%s switching!!, cpu: %u, Out=%u\n",
-		  (event == SWITCH_ENTER ? "Before" : "After"), cpu, cluster);
-
-	if (cpu != 0)
-		return 0;
-
-	switch (event) {
-	case SWITCH_ENTER:
-		if (!sleep_ready()) {
-			ctx->mcp->flags.sleep_mode.SleepReq = REQ_TO_SLEEP;
-			_nsiq();
-			/* By this time we should be ready for sleep or we are
-			 * in the middle of something important */
-			if (!sleep_ready()) {
-				dump_sleep_params(&mcp->flags);
-				MCDRV_DBG(mcd,
-					  "MobiCore: Don't allow switch!\n");
-				ctx->mcp->flags.sleep_mode.SleepReq = 0;
-				return -EPERM;
-			}
-		}
-		break;
-	case SWITCH_EXIT:
-			ctx->mcp->flags.sleep_mode.SleepReq = 0;
-			break;
-	default:
-		MCDRV_DBG(mcd, "MobiCore: Unknown switch event!\n");
-	}
-
-	return 0;
-}
-
-static struct notifier_block switcher_nb = {
-	.notifier_call = bL_switcher_notifier_handler,
-};
-#endif
-
 int mc_pm_initialize(struct mc_context *context)
 {
 	int ret = 0;
@@ -180,12 +129,7 @@
 
 	ret = register_pm_notifier(&mc_notif_block);
 	if (ret)
-		MCDRV_DBG_ERROR(mcd, "device pm register failed\n");
-#ifdef MC_BL_NOTIFIER
-	if (register_bL_swicher_notifier(&switcher_nb))
-		MCDRV_DBG_ERROR(mcd,
-				"Failed to register to bL_switcher_notifier\n");
-#endif
+		MCDRV_DBG_ERROR(mcd, "device pm register failed");
 
 	return ret;
 }
@@ -194,15 +138,16 @@
 {
 	int ret = unregister_pm_notifier(&mc_notif_block);
 	if (ret)
-		MCDRV_DBG_ERROR(mcd, "device pm unregister failed\n");
-#ifdef MC_BL_NOTIFIER
-	ret = unregister_bL_swicher_notifier(&switcher_nb);
-	if (ret)
-		MCDRV_DBG_ERROR(mcd, "device bl unregister failed\n");
-#endif
+		MCDRV_DBG_ERROR(mcd, "device pm unregister failed");
 	return ret;
 }
 
+bool mc_pm_sleep_ready(void)
+{
+	if (ctx == 0)
+		return true;
+	return sleep_ready();
+}
 #endif /* MC_PM_RUNTIME */
 
 #ifdef MC_CRYPTO_CLOCK_MANAGEMENT
@@ -215,7 +160,7 @@
 	mc_ce_core_clk = clk_get(mcd, "core_clk");
 	if (IS_ERR(mc_ce_core_clk)) {
 		ret = PTR_ERR(mc_ce_core_clk);
-		MCDRV_DBG_ERROR(mcd, "cannot get core clock\n");
+		MCDRV_DBG_ERROR(mcd, "cannot get core clock");
 		goto error;
 	}
 	/* Get Interface clk */
@@ -223,7 +168,7 @@
 	if (IS_ERR(mc_ce_iface_clk)) {
 		clk_put(mc_ce_core_clk);
 		ret = PTR_ERR(mc_ce_iface_clk);
-		MCDRV_DBG_ERROR(mcd, "cannot get iface clock\n");
+		MCDRV_DBG_ERROR(mcd, "cannot get iface clock");
 		goto error;
 	}
 	/* Get AXI clk */
@@ -232,7 +177,7 @@
 		clk_put(mc_ce_iface_clk);
 		clk_put(mc_ce_core_clk);
 		ret = PTR_ERR(mc_ce_bus_clk);
-		MCDRV_DBG_ERROR(mcd, "cannot get AXI bus clock\n");
+		MCDRV_DBG_ERROR(mcd, "cannot get AXI bus clock");
 		goto error;
 	}
 	return ret;
@@ -263,17 +208,17 @@
 
 	rc = clk_prepare_enable(mc_ce_core_clk);
 	if (rc) {
-		MCDRV_DBG_ERROR(mcd, "cannot enable clock\n");
+		MCDRV_DBG_ERROR(mcd, "cannot enable clock");
 	} else {
 		rc = clk_prepare_enable(mc_ce_iface_clk);
 		if (rc) {
 			clk_disable_unprepare(mc_ce_core_clk);
-			MCDRV_DBG_ERROR(mcd, "cannot enable clock\n");
+			MCDRV_DBG_ERROR(mcd, "cannot enable clock");
 		} else {
 			rc = clk_prepare_enable(mc_ce_bus_clk);
 			if (rc) {
 				clk_disable_unprepare(mc_ce_iface_clk);
-				MCDRV_DBG_ERROR(mcd, "cannot enable clock\n");
+				MCDRV_DBG_ERROR(mcd, "cannot enable clock");
 			}
 		}
 	}
diff --git a/drivers/gud/mobicore_driver/pm.h b/drivers/gud/MobiCoreDriver/pm.h
similarity index 94%
rename from drivers/gud/mobicore_driver/pm.h
rename to drivers/gud/MobiCoreDriver/pm.h
index 332da34..b71c603 100644
--- a/drivers/gud/mobicore_driver/pm.h
+++ b/drivers/gud/MobiCoreDriver/pm.h
@@ -13,10 +13,6 @@
 #define _MC_PM_H_
 
 #include "main.h"
-#ifdef MC_BL_NOTIFIER
-#include <asm/bL_switcher.h>
-#endif
-
 
 #define NO_SLEEP_REQ	0
 #define REQ_TO_SLEEP	1
@@ -39,5 +35,7 @@
 int mc_pm_clock_enable(void);
 /* Disable secure crypto clocks */
 void mc_pm_clock_disable(void);
+/* Test if sleep is possible */
+bool mc_pm_sleep_ready(void);
 
 #endif /* _MC_PM_H_ */
diff --git a/drivers/gud/mobicore_driver/public/mc_kernel_api.h b/drivers/gud/MobiCoreDriver/public/mc_kernel_api.h
similarity index 89%
rename from drivers/gud/mobicore_driver/public/mc_kernel_api.h
rename to drivers/gud/MobiCoreDriver/public/mc_kernel_api.h
index cca0636..15fd4a2 100644
--- a/drivers/gud/mobicore_driver/public/mc_kernel_api.h
+++ b/drivers/gud/MobiCoreDriver/public/mc_kernel_api.h
@@ -35,13 +35,12 @@
  * @requested_size:	memory size requested in bytes
  * @handle:		pointer to handle
  * @kernel_virt_addr:	virtual user start address
- * @phys_addr:		physical start address
  *
  * Returns 0 if OK
  */
 int mobicore_allocate_wsm(struct mc_instance *instance,
 			  unsigned long requested_size, uint32_t *handle,
-			  void **virt_kernel_addr, void **phys_addr);
+			  void **virt_kernel_addr);
 
 /*
  * mobicore_free() - Free a WSM buffer allocated with mobicore_allocate_wsm
@@ -58,12 +57,11 @@
  * @addr:		address of the buffer (NB it must be kernel virtual!)
  * @len:		buffer length (in bytes)
  * @handle:		unique handle
- * @phys:		pointer for physical address of L2 table
  *
  * Returns 0 if no error
  */
 int mobicore_map_vmem(struct mc_instance *instance, void *addr,
-		      uint32_t len, uint32_t *handle, uint32_t *phys);
+		      uint32_t len, uint32_t *handle);
 
 /*
  * mobicore_unmap_vmem() - Unmap a virtual memory buffer from MobiCore
@@ -74,4 +72,12 @@
  */
 int mobicore_unmap_vmem(struct mc_instance *instance, uint32_t handle);
 
+/*
+ * mobicore_sleep_ready() - Test if mobicore can sleep
+ *
+ * Returns true if mobicore can sleep, false if it can't sleep
+ */
+bool mobicore_sleep_ready(void);
+
+
 #endif /* _MC_KERNEL_API_H_ */
diff --git a/drivers/gud/mobicore_driver/public/mc_linux.h b/drivers/gud/MobiCoreDriver/public/mc_linux.h
similarity index 81%
rename from drivers/gud/mobicore_driver/public/mc_linux.h
rename to drivers/gud/MobiCoreDriver/public/mc_linux.h
index af027dc..98e7af1 100644
--- a/drivers/gud/mobicore_driver/public/mc_linux.h
+++ b/drivers/gud/MobiCoreDriver/public/mc_linux.h
@@ -1,7 +1,7 @@
 /*
  * The MobiCore Driver Kernel Module is a Linux device driver, which represents
  * the command proxy on the lowest layer to the secure world (Swd). Additional
- * services like memory allocation via mmap and generation of a L2 tables for
+ * services like memory allocation via mmap and generation of a MMU tables for
  * given virtual memory are also supported. IRQ functionality receives
  * information from the SWd in the non secure world (NWd).
  * As customary the driver is handled as linux device driver with "open",
@@ -55,8 +55,6 @@
  * INIT request data to SWD
  */
 struct mc_ioctl_init {
-	/* notification buffer start/length [16:16] [start, length] */
-	uint32_t  nq_offset;
 	/* length of notification queue */
 	uint32_t  nq_length;
 	/* mcp buffer start/length [16:16] [start, length] */
@@ -76,8 +74,7 @@
 };
 
 /*
- * Data exchange structure of the MC_IO_MAP_WSM, MC_IO_MAP_MCI, and
- *				  MC_IO_MAP_PWSM commands.
+ * Data exchange structure of the MC_IO_MAP_WSM and MC_IO_MAP_MCI commands.
  *
  * Allocate a contiguous memory buffer for a process.
  * The physical address can be used as for later calls to mmap.
@@ -86,19 +83,19 @@
  * already. I.e. Daemon was restarted.
  */
 struct mc_ioctl_map {
-	size_t	      len;	/* Buffer length */
-	uint32_t      handle;	/* WSM handle */
-	unsigned long addr;	/* Virtual address */
-	unsigned long phys_addr;/* physical address of WSM (or NULL) */
-	bool	      reused;	/* if WSM memory was reused, or new allocated */
+	size_t		len;	/* Buffer length */
+	uint32_t	handle;	/* WSM handle */
+	uint64_t	phys_addr; /* physical address of WSM (or 0) */
+	unsigned long	addr;	/* Virtual address */
+	bool		reused;	/* if WSM memory was reused, or new allocated */
 };
 
 /*
  * Data exchange structure of the MC_IO_REG_WSM command.
  *
- * Allocates a physical L2 table and maps the buffer into this page.
- * Returns the physical address of the L2 table.
- * The page alignment will be created and the appropriated pSize and pOffsetL2
+ * Allocates a physical MMU table and maps the buffer into this page.
+ * Returns the physical address of the MMU table.
+ * The page alignment will be created and the appropriated pSize and pOffsetMMU
  * will be modified to the used values.
  */
 struct mc_ioctl_reg_wsm {
@@ -106,19 +103,7 @@
 	uint32_t len;		/* size of the virtual address space */
 	uint32_t pid;		/* process id */
 	uint32_t handle;	/* driver handle for locked memory */
-	uint32_t table_phys;	/* physical address of the L2 table */
-};
-
-
-/*
- * Data exchange structure of the MC_DRV_MODULE_FC_EXECUTE ioctl command.
- * internal, unsupported
- */
-struct mc_ioctl_execute {
-	/* base address of mobicore binary */
-	uint32_t phys_start_addr;
-	/* length of DDR area */
-	uint32_t length;
+	uint64_t table_phys;	/* physical address of the MMU table */
 };
 
 /*
@@ -127,10 +112,10 @@
 struct mc_ioctl_resolv_cont_wsm {
 	/* driver handle for buffer */
 	uint32_t handle;
-	/* base address of memory */
-	uint32_t phys;
 	/* length memory */
 	uint32_t length;
+	/* base address of memory */
+	uint64_t phys;
 	/* fd to owner of the buffer */
 	int32_t fd;
 };
@@ -144,7 +129,7 @@
 	/* fd to owner of the buffer */
 	int32_t fd;
 	/* base address of memory */
-	uint32_t phys;
+	uint64_t phys;
 };
 
 
@@ -180,28 +165,24 @@
  */
 #define MC_IO_FREE		_IO(MC_IOC_MAGIC, 5)
 /*
- * Creates a L2 Table of the given base address and the size of the
+ * Creates a MMU Table of the given base address and the size of the
  * data.
- * Parameter: mc_ioctl_app_reg_wsm_l2_params
+ * Parameter: mc_ioctl_reg_wsm
  */
 #define MC_IO_REG_WSM		_IOWR(MC_IOC_MAGIC, 6, struct mc_ioctl_reg_wsm)
 #define MC_IO_UNREG_WSM		_IO(MC_IOC_MAGIC, 7)
 #define MC_IO_LOCK_WSM		_IO(MC_IOC_MAGIC, 8)
 #define MC_IO_UNLOCK_WSM	_IO(MC_IOC_MAGIC, 9)
-#define MC_IO_EXECUTE		_IOWR(MC_IOC_MAGIC, 10, struct mc_ioctl_execute)
 
 /*
  * Allocate contiguous memory for a process for later mapping with mmap.
- * MC_DRV_KMOD_MMAP_WSM	usual operation, pages are registered in
+ * MC_IO_MAP_WSM	usual operation, pages are registered in
  *					device structure and freed later.
- * MC_DRV_KMOD_MMAP_MCI	get Instance of MCI, allocates or mmaps
+ * MC_IO_MAP_MCI	get Instance of MCI, allocates or mmaps
  *					the MCI to daemon
- * MC_DRV_KMOD_MMAP_PERSISTENTWSM	special operation, without
- *						registration of pages
  */
 #define MC_IO_MAP_WSM		_IOWR(MC_IOC_MAGIC, 11, struct mc_ioctl_map)
 #define MC_IO_MAP_MCI		_IOWR(MC_IOC_MAGIC, 12, struct mc_ioctl_map)
-#define MC_IO_MAP_PWSM		_IOWR(MC_IOC_MAGIC, 13, struct mc_ioctl_map)
 
 /*
  * Clean orphaned WSM buffers. Only available to the daemon and should
@@ -215,7 +196,7 @@
 #define MC_IO_CLEAN_WSM		_IO(MC_IOC_MAGIC, 14)
 
 /*
- * Get L2 phys address of a buffer handle allocated to the user.
+ * Get MMU phys address of a buffer handle allocated to the user.
  * Only available to the daemon.
  */
 #define MC_IO_RESOLVE_WSM	_IOWR(MC_IOC_MAGIC, 15, \
diff --git a/drivers/gud/mobicore_driver/public/version.h b/drivers/gud/MobiCoreDriver/public/version.h
similarity index 100%
rename from drivers/gud/mobicore_driver/public/version.h
rename to drivers/gud/MobiCoreDriver/public/version.h
diff --git a/drivers/gud/MobiCoreKernelApi/Makefile b/drivers/gud/MobiCoreKernelApi/Makefile
new file mode 100644
index 0000000..9b37eea
--- /dev/null
+++ b/drivers/gud/MobiCoreKernelApi/Makefile
@@ -0,0 +1,52 @@
+#
+# this makefile is called from the kernel make syste
+ifeq ($(MODE),release)
+    ccflags-y += -O2 -DNDEBUG
+else  # DEBUG
+    # "-O" is needed to expand inlines
+    ccflags-y += -O -g3 -DDEBUG
+endif # DEBUG/RELEASE
+
+ifdef MOBICORE_CFLAGS
+    ccflags-y +=$(MOBICORE_CFLAGS)
+endif
+
+#Set the extra symbols
+ifdef MCDRV_SYMBOLS_FILE
+    KBUILD_EXTRA_SYMBOLS=$(MCDRV_SYMBOLS_FILE)
+endif
+
+ifeq ($(PLATFORM), ARM_VE_A9X4_QEMU)
+	ccflags-y += -DMC_NETLINK_COMPAT_V37
+endif
+
+ifeq ($(PLATFORM), MSM8974_SURF_STD)
+	ccflags-y += -DMC_NETLINK_COMPAT_V37
+endif
+
+ifeq ($(PLATFORM), EXYNOS_5422_STD)
+	ccflags-y += -DMC_NETLINK_COMPAT_V37
+endif
+
+ifeq ($(PLATFORM), EXYNOS_5430_STD)
+	ccflags-y += -DMC_NETLINK_COMPAT_V37
+endif
+
+#EXTRA_CFLAGS += -DDEBUG -DDEBUG_VERBOSE
+#EXTRA_CFLAGS += -Wno-declaration-after-statement
+ccflags-y += -Wno-declaration-after-statement
+# add our module to kernel.
+obj-m += mcKernelApi.o
+
+mcKernelApi-objs := main.o clientlib.o device.o session.o connection.o
+
+clean:
+	rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions \
+		Module.markers Module.symvers modules.order
+
+depend .depend dep:
+	$(CC) $(CFLAGS) -M *.c > .depend
+
+ifeq (.depend,$(wildcard .depend))
+    include .depend
+endif
diff --git a/drivers/gud/MobiCoreKernelApi/build.sh b/drivers/gud/MobiCoreKernelApi/build.sh
new file mode 100644
index 0000000..86fe1b8
--- /dev/null
+++ b/drivers/gud/MobiCoreKernelApi/build.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+if [ -z $COMP_PATH_ROOT ]; then
+	echo "The build environment is not set!"
+	echo "Trying to source setupDrivers.sh automatically!"
+	source ../setupDrivers.sh || exit 1
+fi
+
+ROOT_PATH=$(dirname $(readlink -f $BASH_SOURCE))
+# These folders need to be relative to the kernel dir or absolute!
+PLATFORM=EXYNOS_4X12_STD
+CODE_INCLUDE=$(readlink -f $ROOT_PATH/Locals/Code)
+
+MOBICORE_DRIVER=$COMP_PATH_MobiCoreDriverMod
+MOBICORE_DAEMON=$COMP_PATH_MobiCoreDriverLib/Public
+MOBICORE_CFLAGS="-I$MOBICORE_DRIVER/Public -I$MOBICORE_DAEMON -I$COMP_PATH_MobiCore/inc/Mci -I$COMP_PATH_MobiCore/inc -I$CODE_INCLUDE/include -I$CODE_INCLUDE/public"
+MCDRV_SYMBOLS_FILE="$COMP_PATH_ROOT/MobiCoreDriverMod/Locals/Code/Module.symvers"
+
+if [ ! -f $MCDRV_SYMBOLS_FILE ]; then
+	echo "Please build the Mobicore Driver Module first!"
+	echo "Otherwise you will see warnings of missing symbols"
+fi
+
+# Clean first
+make -C $CODE_INCLUDE clean
+
+make -C $LINUX_PATH \
+	MODE=$MODE \
+	ARCH=arm \
+	CROSS_COMPILE=$CROSS_COMPILE \
+	M=$CODE_INCLUDE \
+	"MOBICORE_CFLAGS=$MOBICORE_CFLAGS" \
+	MCDRV_SYMBOLS_FILE=$MCDRV_SYMBOLS_FILE \
+	modules
diff --git a/drivers/gud/mobicore_kernelapi/clientlib.c b/drivers/gud/MobiCoreKernelApi/clientlib.c
similarity index 95%
rename from drivers/gud/mobicore_kernelapi/clientlib.c
rename to drivers/gud/MobiCoreKernelApi/clientlib.c
index 16b52e5..65b4a1c 100644
--- a/drivers/gud/mobicore_kernelapi/clientlib.c
+++ b/drivers/gud/MobiCoreKernelApi/clientlib.c
@@ -25,6 +25,7 @@
 
 /* device list */
 LIST_HEAD(devices);
+atomic_t device_usage = ATOMIC_INIT(0);
 
 static struct mcore_device_t *resolve_device_id(uint32_t device_id)
 {
@@ -71,14 +72,20 @@
 	do {
 		struct mcore_device_t *device = resolve_device_id(device_id);
 		if (device != NULL) {
-			MCDRV_DBG_ERROR(mc_kapi,
-					"Device %d already opened", device_id);
-			mc_result = MC_DRV_ERR_INVALID_OPERATION;
+			MCDRV_DBG(mc_kapi,
+				  "Device %d already opened\n", device_id);
+			atomic_inc(&device_usage);
+			mc_result = MC_DRV_OK;
 			break;
 		}
 
 		/* Open new connection to device */
 		dev_con = connection_new();
+		if (dev_con == NULL) {
+			mc_result = MC_DRV_ERR_NO_FREE_MEMORY;
+			break;
+		}
+
 		if (!connection_connect(dev_con, MC_DAEMON_PID)) {
 			MCDRV_DBG_ERROR(
 				mc_kapi,
@@ -144,6 +151,10 @@
 		/* there is no payload to read */
 
 		device = mcore_device_create(device_id, dev_con);
+		if (device == NULL) {
+			mc_result = MC_DRV_ERR_NO_FREE_MEMORY;
+			break;
+		}
 		if (!mcore_device_open(device, MC_DRV_MOD_DEVNODE_FULLPATH)) {
 			mcore_device_cleanup(device);
 			MCDRV_DBG_ERROR(mc_kapi,
@@ -154,6 +165,7 @@
 		}
 
 		add_device(device);
+		atomic_inc(&device_usage);
 
 	} while (false);
 
@@ -177,6 +189,12 @@
 			mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
 			break;
 		}
+		/* Check if it's not used by other modules */
+		if (!atomic_dec_and_test(&device_usage)) {
+			mc_result = MC_DRV_OK;
+			break;
+		}
+
 		struct connection *dev_con = device->connection;
 
 		/* Return if not all sessions have been closed */
@@ -274,12 +292,12 @@
 		}
 		struct connection *dev_con = device->connection;
 
-		/* Get the physical address of the given TCI */
+		/* Get the wsm of the given TCI */
 		struct wsm *wsm =
 			mcore_device_find_contiguous_wsm(device, tci);
 		if (wsm == NULL) {
 			MCDRV_DBG_ERROR(mc_kapi,
-					"Could not resolve TCI phy address ");
+					"Could not resolve TCI address ");
 			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
 			break;
 		}
@@ -292,14 +310,14 @@
 		}
 
 		/* Prepare open session command */
-		struct mc_drv_cmd_open_session_t cmdOpenSession = {
+		struct mc_drv_cmd_open_session_t cmd_open_session = {
 			{
 				MC_DRV_CMD_OPEN_SESSION
 			},
 			{
 				session->device_id,
 				*uuid,
-				(uint32_t)(wsm->phys_addr) & 0xFFF,
+				(uint32_t)(wsm->virt_addr) & 0xFFF,
 				wsm->handle,
 				len
 			}
@@ -307,9 +325,9 @@
 
 		/* Transmit command data */
 		int len = connection_write_data(dev_con,
-						&cmdOpenSession,
-						sizeof(cmdOpenSession));
-		if (len != sizeof(cmdOpenSession)) {
+						&cmd_open_session,
+						sizeof(cmd_open_session));
+		if (len != sizeof(cmd_open_session)) {
 			MCDRV_DBG_ERROR(mc_kapi,
 					"CMD_OPEN_SESSION writeData failed %d",
 					len);
@@ -370,6 +388,10 @@
 
 		/* Set up second channel for notifications */
 		struct connection *session_connection = connection_new();
+		if (session_connection == NULL) {
+			mc_result = MC_DRV_ERR_NO_FREE_MEMORY;
+			break;
+		}
 
 		if (!connection_connect(session_connection, MC_DAEMON_PID)) {
 			MCDRV_DBG_ERROR(
@@ -422,9 +444,13 @@
 		/* there is no payload. */
 
 		/* Session established, new session object must be created */
-		mcore_device_create_new_session(device,
-						session->session_id,
-						session_connection);
+		if (!mcore_device_create_new_session(device,
+						     session->session_id,
+						     session_connection)) {
+			connection_cleanup(session_connection);
+			mc_result = MC_DRV_ERR_NO_FREE_MEMORY;
+			break;
+		}
 
 	} while (false);
 
@@ -706,7 +732,6 @@
 	MCDRV_DBG_VERBOSE(mc_kapi, "===%s()===", __func__);
 
 	do {
-
 		/* Get the device associated wit the given session */
 		device = resolve_device_id(device_id);
 		if (device == NULL) {
@@ -805,7 +830,7 @@
 			{
 				session->session_id,
 				bulk_buf->handle,
-				(uint32_t)bulk_buf->phys_addr_wsm_l2,
+				0,
 				(uint32_t)(bulk_buf->virt_addr) & 0xFFF,
 				bulk_buf->len
 			}
@@ -819,8 +844,8 @@
 		/* Read command response */
 		struct mc_drv_response_header_t rsp_header;
 		int len = connection_read_datablock(dev_con,
-						    &rsp_header,
-						    sizeof(rsp_header));
+							&rsp_header,
+							sizeof(rsp_header));
 		if (len != sizeof(rsp_header)) {
 			MCDRV_DBG_ERROR(mc_kapi,
 					"CMD_MAP_BULK_BUF readRsp failed %d",
diff --git a/drivers/gud/mobicore_kernelapi/common.h b/drivers/gud/MobiCoreKernelApi/common.h
similarity index 100%
rename from drivers/gud/mobicore_kernelapi/common.h
rename to drivers/gud/MobiCoreKernelApi/common.h
diff --git a/drivers/gud/mobicore_kernelapi/connection.c b/drivers/gud/MobiCoreKernelApi/connection.c
similarity index 96%
rename from drivers/gud/mobicore_kernelapi/connection.c
rename to drivers/gud/MobiCoreKernelApi/connection.c
index 03288a0..0372b82 100644
--- a/drivers/gud/mobicore_kernelapi/connection.c
+++ b/drivers/gud/MobiCoreKernelApi/connection.c
@@ -28,6 +28,10 @@
 	struct connection *conn;
 
 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+	if (conn == NULL) {
+		MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+		return NULL;
+	}
 	conn->sequence_magic = mcapi_unique_id();
 	mutex_init(&conn->data_lock);
 	sema_init(&conn->data_available_sem, SEM_NO_DATA_AVAILABLE);
@@ -36,14 +40,6 @@
 	return conn;
 }
 
-struct connection *connection_create(int socket_descriptor, pid_t dest)
-{
-	struct connection *conn = connection_new();
-
-	conn->peer_pid = dest;
-	return conn;
-}
-
 void connection_cleanup(struct connection *conn)
 {
 	if (!conn)
diff --git a/drivers/gud/mobicore_kernelapi/connection.h b/drivers/gud/MobiCoreKernelApi/connection.h
similarity index 95%
rename from drivers/gud/mobicore_kernelapi/connection.h
rename to drivers/gud/MobiCoreKernelApi/connection.h
index 6c3ff00..57e783b 100644
--- a/drivers/gud/mobicore_kernelapi/connection.h
+++ b/drivers/gud/MobiCoreKernelApi/connection.h
@@ -44,7 +44,6 @@
 };
 
 struct connection *connection_new(void);
-struct connection *connection_create(int socket_descriptor, pid_t dest);
 void connection_cleanup(struct connection *conn);
 bool connection_connect(struct connection *conn, pid_t dest);
 size_t connection_read_datablock(struct connection *conn, void *buffer,
diff --git a/drivers/gud/mobicore_kernelapi/device.c b/drivers/gud/MobiCoreKernelApi/device.c
similarity index 85%
rename from drivers/gud/mobicore_kernelapi/device.c
rename to drivers/gud/MobiCoreKernelApi/device.c
index a176322..04db4c3 100644
--- a/drivers/gud/mobicore_kernelapi/device.c
+++ b/drivers/gud/MobiCoreKernelApi/device.c
@@ -18,16 +18,18 @@
 #include "device.h"
 #include "common.h"
 
-struct wsm *wsm_create(void *virt_addr, uint32_t len, uint32_t handle,
-		       void *phys_addr)
+static struct wsm *wsm_create(void *virt_addr, uint32_t len, uint32_t handle)
 {
 	struct wsm *wsm;
 
 	wsm = kzalloc(sizeof(*wsm), GFP_KERNEL);
+	if (wsm == NULL) {
+		MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+		return NULL;
+	}
 	wsm->virt_addr = virt_addr;
 	wsm->len = len;
 	wsm->handle = handle;
-	wsm->phys_addr = phys_addr;
 	return wsm;
 }
 
@@ -37,11 +39,15 @@
 	struct mcore_device_t *dev;
 
 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (dev == NULL) {
+		MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+		return NULL;
+	}
 	dev->device_id = device_id;
 	dev->connection = connection;
 
 	INIT_LIST_HEAD(&dev->session_vector);
-	INIT_LIST_HEAD(&dev->wsm_l2_vector);
+	INIT_LIST_HEAD(&dev->wsm_mmu_vector);
 
 	return dev;
 }
@@ -63,7 +69,7 @@
 	}
 
 	/* Free all allocated WSM descriptors */
-	list_for_each_safe(pos, q, &dev->wsm_l2_vector) {
+	list_for_each_safe(pos, q, &dev->wsm_mmu_vector) {
 		wsm = list_entry(pos, struct wsm, list);
 		list_del(pos);
 		kfree(wsm);
@@ -74,7 +80,7 @@
 	kfree(dev);
 }
 
-bool mcore_device_open(struct mcore_device_t *dev, const char *deviceName)
+bool mcore_device_open(struct mcore_device_t *dev, const char *device_name)
 {
 	dev->instance = mobicore_open();
 	return (dev->instance != NULL);
@@ -102,6 +108,8 @@
 	}
 	struct session *session =
 			session_create(session_id, dev->instance, connection);
+	if (session == NULL)
+		return false;
 	list_add_tail(&(session->list), &(dev->session_vector));
 	return true;
 }
@@ -154,16 +162,19 @@
 		/* Allocate shared memory */
 		void *virt_addr;
 		uint32_t handle;
-		void *phys_addr;
 		int ret = mobicore_allocate_wsm(dev->instance, len, &handle,
-						&virt_addr, &phys_addr);
+						&virt_addr);
 		if (ret != 0)
 			break;
 
-		/* Register (vaddr,paddr) with device */
-		wsm = wsm_create(virt_addr, len, handle, phys_addr);
+		/* Register (vaddr) with device */
+		wsm = wsm_create(virt_addr, len, handle);
+		if (wsm == NULL) {
+			mobicore_free_wsm(dev->instance, handle);
+			break;
+		}
 
-		list_add_tail(&(wsm->list), &(dev->wsm_l2_vector));
+		list_add_tail(&(wsm->list), &(dev->wsm_mmu_vector));
 
 	} while (0);
 
@@ -177,7 +188,7 @@
 	struct wsm *tmp;
 	struct list_head *pos;
 
-	list_for_each(pos, &dev->wsm_l2_vector) {
+	list_for_each(pos, &dev->wsm_mmu_vector) {
 		tmp = list_entry(pos, struct wsm, list);
 		if (tmp == wsm) {
 			ret = true;
@@ -205,7 +216,7 @@
 	struct wsm *wsm;
 	struct list_head *pos;
 
-	list_for_each(pos, &dev->wsm_l2_vector) {
+	list_for_each(pos, &dev->wsm_mmu_vector) {
 		wsm = list_entry(pos, struct wsm, list);
 		if (virt_addr == wsm->virt_addr)
 			return wsm;
diff --git a/drivers/gud/mobicore_kernelapi/device.h b/drivers/gud/MobiCoreKernelApi/device.h
similarity index 95%
rename from drivers/gud/mobicore_kernelapi/device.h
rename to drivers/gud/MobiCoreKernelApi/device.h
index 16626bd..c795ee8 100644
--- a/drivers/gud/mobicore_kernelapi/device.h
+++ b/drivers/gud/MobiCoreKernelApi/device.h
@@ -21,7 +21,7 @@
 struct mcore_device_t {
 	/* MobiCore Trustlet session associated with the device */
 	struct list_head	session_vector;
-	struct list_head	 wsm_l2_vector; /* WSM L2 Table  */
+	struct list_head	wsm_mmu_vector; /* WSM L2 or L3 Table  */
 
 	uint32_t		device_id;	/* Device identifier */
 	struct connection	*connection;	/* The device connection */
@@ -36,7 +36,7 @@
 void mcore_device_cleanup(struct mcore_device_t *dev);
 
 
-bool mcore_device_open(struct mcore_device_t *dev, const char *deviceName);
+bool mcore_device_open(struct mcore_device_t *dev, const char *device_name);
 void mcore_device_close(struct mcore_device_t *dev);
 bool mcore_device_has_sessions(struct mcore_device_t *dev);
 bool mcore_device_create_new_session(
diff --git a/drivers/gud/mobicore_kernelapi/include/mcinq.h b/drivers/gud/MobiCoreKernelApi/include/mcinq.h
similarity index 100%
rename from drivers/gud/mobicore_kernelapi/include/mcinq.h
rename to drivers/gud/MobiCoreKernelApi/include/mcinq.h
diff --git a/drivers/gud/mobicore_kernelapi/include/mcuuid.h b/drivers/gud/MobiCoreKernelApi/include/mcuuid.h
similarity index 100%
rename from drivers/gud/mobicore_kernelapi/include/mcuuid.h
rename to drivers/gud/MobiCoreKernelApi/include/mcuuid.h
diff --git a/drivers/gud/mobicore_kernelapi/main.c b/drivers/gud/MobiCoreKernelApi/main.c
similarity index 97%
rename from drivers/gud/mobicore_kernelapi/main.c
rename to drivers/gud/MobiCoreKernelApi/main.c
index 8943c26..5da3ef7 100644
--- a/drivers/gud/mobicore_kernelapi/main.c
+++ b/drivers/gud/MobiCoreKernelApi/main.c
@@ -150,6 +150,10 @@
 	dev_info(mc_kapi, "Mobicore API module initialized!\n");
 
 	mod_ctx = kzalloc(sizeof(struct mc_kernelapi_ctx), GFP_KERNEL);
+	if (mod_ctx == NULL) {
+		MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+		return -ENOMEM;
+	}
 #ifdef MC_NETLINK_COMPAT_V37
 	mod_ctx->sk = netlink_kernel_create(&init_net, MC_DAEMON_NETLINK,
 					    &cfg);
diff --git a/drivers/gud/mobicore_kernelapi/public/mobicore_driver_api.h b/drivers/gud/MobiCoreKernelApi/public/mobicore_driver_api.h
similarity index 100%
rename from drivers/gud/mobicore_kernelapi/public/mobicore_driver_api.h
rename to drivers/gud/MobiCoreKernelApi/public/mobicore_driver_api.h
diff --git a/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h b/drivers/gud/MobiCoreKernelApi/public/mobicore_driver_cmd.h
similarity index 99%
rename from drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h
rename to drivers/gud/MobiCoreKernelApi/public/mobicore_driver_cmd.h
index eaf7e6c..993d581 100644
--- a/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h
+++ b/drivers/gud/MobiCoreKernelApi/public/mobicore_driver_cmd.h
@@ -171,7 +171,7 @@
 struct mc_drv_cmd_map_bulk_mem_payload_t {
 	uint32_t session_id;
 	uint32_t handle;
-	uint32_t phys_addr_l2;
+	uint32_t rfu;
 	uint32_t offset_payload;
 	uint32_t len_bulk_mem;
 };
diff --git a/drivers/gud/mobicore_kernelapi/session.c b/drivers/gud/MobiCoreKernelApi/session.c
similarity index 86%
rename from drivers/gud/mobicore_kernelapi/session.c
rename to drivers/gud/MobiCoreKernelApi/session.c
index dae2c00..2ea50e8 100644
--- a/drivers/gud/mobicore_kernelapi/session.c
+++ b/drivers/gud/MobiCoreKernelApi/session.c
@@ -14,15 +14,18 @@
 #include "session.h"
 
 struct bulk_buffer_descriptor *bulk_buffer_descriptor_create(
-	void *virt_addr, uint32_t len, uint32_t handle, void *phys_addr_wsm_l2)
+	void *virt_addr, uint32_t len, uint32_t handle)
 {
 	struct bulk_buffer_descriptor *desc;
 
 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+	if (desc == NULL) {
+		MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+		return NULL;
+	}
 	desc->virt_addr = virt_addr;
 	desc->len = len;
 	desc->handle = handle;
-	desc->phys_addr_wsm_l2 = phys_addr_wsm_l2;
 
 	return desc;
 }
@@ -33,6 +36,10 @@
 	struct session *session;
 
 	session = kzalloc(sizeof(*session), GFP_KERNEL);
+	if (session == NULL) {
+		MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+		return NULL;
+	}
 	session->session_id = session_id;
 	session->instance = instance;
 	session->notification_connection = connection;
@@ -47,19 +54,14 @@
 {
 	struct bulk_buffer_descriptor *bulk_buf_descr;
 	struct list_head *pos, *q;
-	unsigned int phys_addr_wsm_l2;
 
 	/* Unmap still mapped buffers */
 	list_for_each_safe(pos, q, &session->bulk_buffer_descriptors) {
 		bulk_buf_descr =
 			list_entry(pos, struct bulk_buffer_descriptor, list);
 
-		phys_addr_wsm_l2 =
-			(unsigned int)bulk_buf_descr->phys_addr_wsm_l2;
-
 		MCDRV_DBG_VERBOSE(mc_kapi,
-				  "Phys Addr of L2 Table = 0x%X, handle= %d",
-				  phys_addr_wsm_l2,
+				  "handle= %d",
 				  bulk_buf_descr->handle);
 
 		/* ignore any error, as we cannot do anything in this case. */
@@ -110,11 +112,10 @@
 		 * Prepare the interface structure for memory registration in
 		 * Kernel Module
 		 */
-		uint32_t l2_table_phys;
 		uint32_t handle;
 
 		int ret = mobicore_map_vmem(session->instance, buf, len,
-					    &handle, &l2_table_phys);
+					    &handle);
 
 		if (ret != 0) {
 			MCDRV_DBG_ERROR(mc_kapi,
@@ -123,15 +124,15 @@
 			break;
 		}
 
-		MCDRV_DBG_VERBOSE(mc_kapi,
-				  "Phys Addr of L2 Table = 0x%X, handle=%d",
-				  (unsigned int)l2_table_phys, handle);
+		MCDRV_DBG_VERBOSE(mc_kapi, "handle=%d", handle);
 
 		/* Create new descriptor */
 		bulk_buf_descr =
-			bulk_buffer_descriptor_create(buf, len,
-						      handle,
-						      (void *)l2_table_phys);
+			bulk_buffer_descriptor_create(buf, len, handle);
+		if (bulk_buf_descr == NULL) {
+			mobicore_unmap_vmem(session->instance, handle);
+			break;
+		}
 
 		/* Add to vector of descriptors */
 		list_add_tail(&(bulk_buf_descr->list),
@@ -165,8 +166,7 @@
 		MCDRV_DBG_ERROR(mc_kapi, "Virtual Address not found");
 		ret = false;
 	} else {
-		MCDRV_DBG_VERBOSE(mc_kapi, "WsmL2 phys=0x%X, handle=%d",
-				  (unsigned int)bulk_buf->phys_addr_wsm_l2,
+		MCDRV_DBG_VERBOSE(mc_kapi, "Wsm handle=%d",
 				  bulk_buf->handle);
 
 		/* ignore any error, as we cannot do anything */
diff --git a/drivers/gud/mobicore_kernelapi/session.h b/drivers/gud/MobiCoreKernelApi/session.h
similarity index 96%
rename from drivers/gud/mobicore_kernelapi/session.h
rename to drivers/gud/MobiCoreKernelApi/session.h
index 4a834e5..edcadcd 100644
--- a/drivers/gud/mobicore_kernelapi/session.h
+++ b/drivers/gud/MobiCoreKernelApi/session.h
@@ -19,9 +19,6 @@
 	uint32_t	len;		/* Length of the Bulk buffer */
 	uint32_t	handle;
 
-	/* The physical address of the L2 table of the Bulk buffer*/
-	void		*phys_addr_wsm_l2;
-
 	/* The list param for using the kernel lists*/
 	struct list_head list;
 };
@@ -29,8 +26,7 @@
 struct bulk_buffer_descriptor *bulk_buffer_descriptor_create(
 	void		*virt_addr,
 	uint32_t	len,
-	uint32_t	handle,
-	void		*phys_addr_wsm_l2
+	uint32_t	handle
 );
 
 /*
diff --git a/drivers/gud/mobicore_kernelapi/wsm.h b/drivers/gud/MobiCoreKernelApi/wsm.h
similarity index 74%
rename from drivers/gud/mobicore_kernelapi/wsm.h
rename to drivers/gud/MobiCoreKernelApi/wsm.h
index f8a107c..3a1767d 100644
--- a/drivers/gud/mobicore_kernelapi/wsm.h
+++ b/drivers/gud/MobiCoreKernelApi/wsm.h
@@ -17,17 +17,7 @@
 	void			*virt_addr;
 	uint32_t		len;
 	uint32_t		handle;
-	void			*phys_addr;
 	struct list_head	list;
 };
 
-struct wsm *wsm_create(
-	void			*virt_addr,
-	uint32_t		len,
-	uint32_t		handle,
-
-	/* NULL this may be unknown, so is can be omitted */
-	void			*phys_addr
-);
-
 #endif /* _MC_KAPI_WSM_H_ */
diff --git a/drivers/gud/mobicore_driver/build_tag.h b/drivers/gud/build_tag.h
similarity index 96%
rename from drivers/gud/mobicore_driver/build_tag.h
rename to drivers/gud/build_tag.h
index 4a24275..18faf5a 100644
--- a/drivers/gud/mobicore_driver/build_tag.h
+++ b/drivers/gud/build_tag.h
@@ -26,4 +26,4 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 #define MOBICORE_COMPONENT_BUILD_TAG \
-		"*** t-base-202_V001 ###"
+		"*** t-base-300-QC-8974-Android-V001 ###"
diff --git a/drivers/gud/mobicore_driver/mem.c b/drivers/gud/mobicore_driver/mem.c
deleted file mode 100644
index 33c51b6..0000000
--- a/drivers/gud/mobicore_driver/mem.c
+++ /dev/null
@@ -1,708 +0,0 @@
-/*
- * MobiCore Driver Kernel Module.
- *
- * This module is written as a Linux device driver.
- * This driver represents the command proxy on the lowest layer, from the
- * secure world to the non secure world, and vice versa.
- * This driver is located in the non secure world (Linux).
- * This driver offers IOCTL commands, for access to the secure world, and has
- * the interface from the secure world to the normal world.
- * The access to the driver is possible with a file descriptor,
- * which has to be created by the fd = open(/dev/mobicore) command.
- *
- * <-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
- * <-- Copyright Trustonic Limited 2013 -->
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include "main.h"
-#include "debug.h"
-#include "mem.h"
-
-#include <linux/highmem.h>
-#include <linux/slab.h>
-#include <linux/kthread.h>
-#include <linux/pagemap.h>
-#include <linux/device.h>
-
-
-/* MobiCore memory context data */
-struct mc_mem_context mem_ctx;
-
-/* convert L2 PTE to page pointer */
-static inline struct page *l2_pte_to_page(pte_t pte)
-{
-	unsigned long phys_page_addr = ((unsigned long)pte & PAGE_MASK);
-	unsigned int pfn = phys_page_addr >> PAGE_SHIFT;
-	struct page *page = pfn_to_page(pfn);
-	return page;
-}
-
-/* convert page pointer to L2 PTE */
-static inline pte_t page_to_l2_pte(struct page *page)
-{
-	unsigned long pfn = page_to_pfn(page);
-	unsigned long phys_addr = (pfn << PAGE_SHIFT);
-	pte_t pte = (pte_t)(phys_addr & PAGE_MASK);
-	return pte;
-}
-
-static inline void release_page(struct page *page)
-{
-	SetPageDirty(page);
-
-	page_cache_release(page);
-}
-
-static int lock_pages(struct task_struct *task, void *virt_start_page_addr,
-	int pages_no, struct page **pages)
-{
-	int locked_pages;
-
-	/* lock user pages, must hold the mmap_sem to do this. */
-	down_read(&(task->mm->mmap_sem));
-	locked_pages = get_user_pages(
-				task,
-				task->mm,
-				(unsigned long)virt_start_page_addr,
-				pages_no,
-				1, /* write access */
-				0,
-				pages,
-				NULL);
-	up_read(&(task->mm->mmap_sem));
-
-	/* check if we could lock all pages. */
-	if (locked_pages != pages_no) {
-		MCDRV_DBG_ERROR(mcd, "get_user_pages() failed, locked_pages=%d",
-				locked_pages);
-		if (locked_pages > 0) {
-			/* release all locked pages. */
-			release_pages(pages, locked_pages, 0);
-		}
-		return -ENOMEM;
-	}
-
-	return 0;
-}
-
-/* Get kernel pointer to shared L2 table given a per-process reference */
-struct l2table *get_l2_table_kernel_virt(struct mc_l2_table *table)
-{
-	if (WARN(!table, "Invalid L2 table"))
-		return NULL;
-
-	if (WARN(!table->set, "Invalid L2 table set"))
-		return NULL;
-
-	if (WARN(!table->set->kernel_virt, "Invalid L2 pointer"))
-		return NULL;
-
-	return &(table->set->kernel_virt->table[table->idx]);
-}
-
-/* Get physical address of a shared L2 table given a per-process reference */
-struct l2table *get_l2_table_phys(struct mc_l2_table *table)
-{
-	if (WARN(!table, "Invalid L2 table"))
-		return NULL;
-	if (WARN(!table->set, "Invalid L2 table set"))
-		return NULL;
-	if (WARN(!table->set->kernel_virt, "Invalid L2 phys pointer"))
-		return NULL;
-
-	return &(table->set->phys->table[table->idx]);
-}
-
-static inline int in_use(struct mc_l2_table *table)
-{
-	return atomic_read(&table->usage) > 0;
-}
-
-/*
- * Search the list of used l2 tables and return the one with the handle.
- * Assumes the table_lock is taken.
- */
-struct mc_l2_table *find_l2_table(unsigned int handle)
-{
-	struct mc_l2_table *table;
-
-	list_for_each_entry(table, &mem_ctx.l2_tables, list) {
-		if (table->handle == handle)
-			return table;
-	}
-	return NULL;
-}
-
-/*
- * Allocate a new l2 table store plus L2_TABLES_PER_PAGE in the l2 free tables
- * list. Assumes the table_lock is already taken by the caller above.
- */
-static int alloc_table_store(void)
-{
-	unsigned long store;
-	struct mc_l2_tables_set *l2table_set;
-	struct mc_l2_table *l2table, *l2table2;
-	struct page *page;
-	int ret = 0, i;
-	/* temp list for holding the l2 tables */
-	LIST_HEAD(temp);
-
-	store = get_zeroed_page(GFP_KERNEL);
-	if (!store)
-		return -ENOMEM;
-
-	/*
-	 * Actually, locking is not necessary, because kernel
-	 * memory is not supposed to get swapped out. But we
-	 * play safe....
-	 */
-	page = virt_to_page(store);
-	SetPageReserved(page);
-
-	/* add all the descriptors to the free descriptors list */
-	l2table_set = kmalloc(sizeof(*l2table_set), GFP_KERNEL | __GFP_ZERO);
-	if (l2table_set == NULL) {
-		ret = -ENOMEM;
-		goto free_store;
-	}
-	/* initialize */
-	l2table_set->kernel_virt = (void *)store;
-	l2table_set->page = page;
-	l2table_set->phys = (void *)virt_to_phys((void *)store);
-	/* the set is not yet used */
-	atomic_set(&l2table_set->used_tables, 0);
-
-	/* init add to list. */
-	INIT_LIST_HEAD(&(l2table_set->list));
-	list_add(&l2table_set->list, &mem_ctx.l2_tables_sets);
-
-	for (i = 0; i < L2_TABLES_PER_PAGE; i++) {
-		/* allocate a WSM L2 descriptor */
-		l2table  = kmalloc(sizeof(*l2table), GFP_KERNEL | __GFP_ZERO);
-		if (l2table == NULL) {
-			ret = -ENOMEM;
-			MCDRV_DBG_ERROR(mcd, "out of memory\n");
-			/* Free the full temp list and the store in this case */
-			goto free_temp_list;
-		}
-
-		/* set set reference */
-		l2table->set = l2table_set;
-		l2table->idx = i;
-		l2table->virt = get_l2_table_kernel_virt(l2table);
-		l2table->phys = (unsigned long)get_l2_table_phys(l2table);
-		atomic_set(&l2table->usage, 0);
-
-		/* add to temp list. */
-		INIT_LIST_HEAD(&l2table->list);
-		list_add_tail(&l2table->list, &temp);
-	}
-
-	/*
-	 * If everything went ok then merge the temp list with the global
-	 * free list
-	 */
-	list_splice_tail(&temp, &mem_ctx.free_l2_tables);
-	return 0;
-free_temp_list:
-	list_for_each_entry_safe(l2table, l2table2, &temp, list) {
-		kfree(l2table);
-	}
-
-	list_del(&l2table_set->list);
-
-free_store:
-	free_page(store);
-	return ret;
-
-}
-/*
- * Get a l2 table from the free tables list or allocate a new one and
- * initialize it. Assumes the table_lock is already taken.
- */
-static struct mc_l2_table *alloc_l2_table(struct mc_instance *instance)
-{
-	int ret = 0;
-	struct mc_l2_table *table = NULL;
-
-	if (list_empty(&mem_ctx.free_l2_tables)) {
-		ret = alloc_table_store();
-		if (ret) {
-			MCDRV_DBG_ERROR(mcd, "Failed to allocate new store!");
-			return ERR_PTR(-ENOMEM);
-		}
-		/* if it's still empty something wrong has happened */
-		if (list_empty(&mem_ctx.free_l2_tables)) {
-			MCDRV_DBG_ERROR(mcd,
-					"Free list not updated correctly!");
-			return ERR_PTR(-EFAULT);
-		}
-	}
-
-	/* get a WSM L2 descriptor */
-	table  = list_first_entry(&mem_ctx.free_l2_tables,
-		struct mc_l2_table, list);
-	if (table == NULL) {
-		MCDRV_DBG_ERROR(mcd, "out of memory\n");
-		return ERR_PTR(-ENOMEM);
-	}
-	/* Move it to the used l2 tables list */
-	list_move_tail(&table->list, &mem_ctx.l2_tables);
-
-	table->handle = get_unique_id();
-	table->owner = instance;
-
-	atomic_inc(&table->set->used_tables);
-	atomic_inc(&table->usage);
-
-	MCDRV_DBG_VERBOSE(mcd,
-			  "chunkPhys=%p,idx=%d", table->set->phys, table->idx);
-
-	return table;
-}
-
-/*
- * Frees the object associated with a l2 table. Initially the object is moved
- * to the free tables list, but if all the 4 lists of the store are free
- * then the store is also released.
- * Assumes the table_lock is already taken.
- */
-static void free_l2_table(struct mc_l2_table *table)
-{
-	struct mc_l2_tables_set *l2table_set;
-
-	if (WARN(!table, "Invalid table"))
-		return;
-
-	l2table_set = table->set;
-	if (WARN(!l2table_set, "Invalid table set"))
-		return;
-
-	list_move_tail(&table->list, &mem_ctx.free_l2_tables);
-
-	/* if nobody uses this set, we can release it. */
-	if (atomic_dec_and_test(&l2table_set->used_tables)) {
-		struct mc_l2_table *tmp;
-
-		/* remove from list */
-		list_del(&l2table_set->list);
-		/*
-		 * All the l2 tables are in the free list for this set
-		 * so we can just remove them from there
-		 */
-		list_for_each_entry_safe(table, tmp, &mem_ctx.free_l2_tables,
-					 list) {
-			if (table->set == l2table_set) {
-				list_del(&table->list);
-				kfree(table);
-			}
-		} /* end while */
-
-		/*
-		 * We shouldn't recover from this since it was some data
-		 * corruption before
-		 */
-		BUG_ON(!l2table_set->page);
-		ClearPageReserved(l2table_set->page);
-
-		BUG_ON(!l2table_set->kernel_virt);
-		free_page((unsigned long)l2table_set->kernel_virt);
-
-		kfree(l2table_set);
-	}
-}
-
-/*
- * Create a L2 table in a WSM container that has been allocates previously.
- * Assumes the table lock is already taken or there is no need to take like
- * when first creating the l2 table the full list is locked.
- *
- * @task	pointer to task owning WSM
- * @wsm_buffer	user space WSM start
- * @wsm_len	WSM length
- * @table	Pointer to L2 table details
- */
-static int map_buffer(struct task_struct *task, void *wsm_buffer,
-		      unsigned int wsm_len, struct mc_l2_table *table)
-{
-	int		ret = 0;
-	unsigned int	i, nr_of_pages;
-	/* start address of the 4 KiB page of wsm_buffer */
-	void		*virt_addr_page;
-	struct page	*page;
-	struct l2table	*l2table;
-	struct page	**l2table_as_array_of_pointers_to_page;
-	/* page offset in wsm buffer */
-	unsigned int offset;
-
-	if (WARN(!wsm_buffer, "Invalid WSM buffer pointer"))
-		return -EINVAL;
-
-	if (WARN(wsm_len == 0, "Invalid WSM buffer length"))
-		return -EINVAL;
-
-	if (WARN(!table, "Invalid mapping table for WSM"))
-		return -EINVAL;
-
-	/* no size > 1Mib supported */
-	if (wsm_len > SZ_1M) {
-		MCDRV_DBG_ERROR(mcd, "size > 1 MiB\n");
-		return -EINVAL;
-	}
-
-	MCDRV_DBG_VERBOSE(mcd, "WSM addr=0x%p, len=0x%08x\n", wsm_buffer,
-			  wsm_len);
-
-
-	/* calculate page usage */
-	virt_addr_page = (void *)(((unsigned long)(wsm_buffer)) & PAGE_MASK);
-	offset = (unsigned int)	(((unsigned long)(wsm_buffer)) & (~PAGE_MASK));
-	nr_of_pages  = PAGE_ALIGN(offset + wsm_len) / PAGE_SIZE;
-
-	MCDRV_DBG_VERBOSE(mcd, "virt addr page start=0x%p, pages=%d\n",
-			  virt_addr_page, nr_of_pages);
-
-	/* L2 table can hold max 1MiB in 256 pages. */
-	if ((nr_of_pages * PAGE_SIZE) > SZ_1M) {
-		MCDRV_DBG_ERROR(mcd, "WSM paged exceed 1 MiB\n");
-		return -EINVAL;
-	}
-
-	l2table = table->virt;
-	/*
-	 * We use the memory for the L2 table to hold the pointer
-	 * and convert them later. This works, as everything comes
-	 * down to a 32 bit value.
-	 */
-	l2table_as_array_of_pointers_to_page = (struct page **)l2table;
-
-	/* Request comes from user space */
-	if (task != NULL && !is_vmalloc_addr(wsm_buffer)) {
-		/*
-		 * lock user page in memory, so they do not get swapped
-		 * out.
-		 * REV axh: Kernel 2.6.27 added a new get_user_pages_fast()
-		 * function, maybe it is called fast_gup() in some versions.
-		 * handle user process doing a fork().
-		 * Child should not get things.
-		 * http://osdir.com/ml/linux-media/2009-07/msg00813.html
-		 * http://lwn.net/Articles/275808/
-		 */
-		ret = lock_pages(task, virt_addr_page, nr_of_pages,
-				 l2table_as_array_of_pointers_to_page);
-		if (ret != 0) {
-			MCDRV_DBG_ERROR(mcd, "lock_user_pages() failed\n");
-			return ret;
-		}
-	}
-	/* Request comes from kernel space(cont buffer) */
-	else if (task == NULL && !is_vmalloc_addr(wsm_buffer)) {
-		void *uaddr = wsm_buffer;
-		for (i = 0; i < nr_of_pages; i++) {
-			page = virt_to_page(uaddr);
-			if (!page) {
-				MCDRV_DBG_ERROR(mcd, "failed to map address");
-				return -EINVAL;
-			}
-			get_page(page);
-			l2table_as_array_of_pointers_to_page[i] = page;
-			uaddr += PAGE_SIZE;
-		}
-	}
-	/* Request comes from kernel space(vmalloc buffer) */
-	else {
-		void *uaddr = wsm_buffer;
-		for (i = 0; i < nr_of_pages; i++) {
-			page = vmalloc_to_page(uaddr);
-			if (!page) {
-				MCDRV_DBG_ERROR(mcd, "failed to map address");
-				return -EINVAL;
-			}
-			get_page(page);
-			l2table_as_array_of_pointers_to_page[i] = page;
-			uaddr += PAGE_SIZE;
-		}
-	}
-
-	table->pages = nr_of_pages;
-
-	/*
-	 * create L2 Table entries.
-	 * used_l2table->table contains a list of page pointers here.
-	 * For a proper cleanup we have to ensure that the following
-	 * code either works and used_l2table contains a valid L2 table
-	 * - or fails and used_l2table->table contains the list of page
-	 * pointers.
-	 * Any mixed contents will make cleanup difficult.
-	 */
-	for (i = 0; i < nr_of_pages; i++) {
-		pte_t pte;
-		page = l2table_as_array_of_pointers_to_page[i];
-
-		/*
-		 * create L2 table entry, see ARM MMU docu for details
-		 * about flags stored in the lowest 12 bits.
-		 * As a side reference, the Article
-		 * "ARM's multiply-mapped memory mess"
-		 * found in the collection at
-		 * http://lwn.net/Articles/409032/
-		 * is also worth reading.
-		 */
-		pte = page_to_l2_pte(page)
-				| PTE_EXT_AP1 | PTE_EXT_AP0
-				| PTE_CACHEABLE | PTE_BUFFERABLE
-				| PTE_TYPE_SMALL | PTE_TYPE_EXT | PTE_EXT_NG;
-		/*
-		 * Linux uses different mappings for SMP systems(the
-		 * sharing flag is set for the pte. In order not to
-		 * confuse things too much in Mobicore make sure the
-		 * shared buffers have the same flags.
-		 * This should also be done in SWD side
-		 */
-#ifdef CONFIG_SMP
-		pte |= PTE_EXT_SHARED | PTE_EXT_TEX(1);
-#endif
-
-		l2table->table_entries[i] = pte;
-		MCDRV_DBG_VERBOSE(mcd, "L2 entry %d:  0x%08x\n", i,
-				  (unsigned int)(pte));
-	}
-
-	/* ensure rest of table is empty */
-	while (i < 255)
-		l2table->table_entries[i++] = (pte_t)0;
-
-
-	return ret;
-}
-
-/*
- * Remove a L2 table in a WSM container. Afterwards the container may be
- * released. Assumes the table_lock and the lock is taken.
- */
-static void unmap_buffers(struct mc_l2_table *table)
-{
-	struct l2table *l2table;
-	int i;
-
-	if (WARN_ON(!table))
-		return;
-
-	/* found the table, now release the resources. */
-	MCDRV_DBG_VERBOSE(mcd, "clear L2 table, phys_base=%p, nr_of_pages=%d\n",
-			  (void *)table->phys, table->pages);
-
-	l2table = table->virt;
-
-	/* release all locked user space pages */
-	for (i = 0; i < table->pages; i++) {
-		/* convert physical entries from L2 table to page pointers */
-		pte_t pte = l2table->table_entries[i];
-		struct page *page = l2_pte_to_page(pte);
-		release_page(page);
-	}
-
-	/* remember that all pages have been freed */
-	table->pages = 0;
-}
-
-/* Delete a used l2 table. Assumes the table_lock and the lock is taken */
-static void unmap_l2_table(struct mc_l2_table *table)
-{
-	/* Check if it's not locked by other processes too! */
-	if (!atomic_dec_and_test(&table->usage))
-		return;
-
-	/* release if Nwd and Swd/MC do no longer use it. */
-	unmap_buffers(table);
-	free_l2_table(table);
-}
-
-int mc_free_l2_table(struct mc_instance *instance, uint32_t handle)
-{
-	struct mc_l2_table *table;
-	int ret = 0;
-
-	if (WARN(!instance, "No instance data available"))
-		return -EFAULT;
-
-	mutex_lock(&mem_ctx.table_lock);
-	table = find_l2_table(handle);
-
-	if (table == NULL) {
-		MCDRV_DBG_VERBOSE(mcd, "entry not found");
-		ret = -EINVAL;
-		goto err_unlock;
-	}
-	if (instance != table->owner && !is_daemon(instance)) {
-		MCDRV_DBG_ERROR(mcd, "instance does no own it");
-		ret = -EPERM;
-		goto err_unlock;
-	}
-	/* free table (if no further locks exist) */
-	unmap_l2_table(table);
-err_unlock:
-	mutex_unlock(&mem_ctx.table_lock);
-
-	return ret;
-}
-
-int mc_lock_l2_table(struct mc_instance *instance, uint32_t handle)
-{
-	int ret = 0;
-	struct mc_l2_table *table = NULL;
-
-	if (WARN(!instance, "No instance data available"))
-		return -EFAULT;
-
-	mutex_lock(&mem_ctx.table_lock);
-	table = find_l2_table(handle);
-
-	if (table == NULL) {
-		MCDRV_DBG_VERBOSE(mcd, "entry not found %u\n", handle);
-		ret = -EINVAL;
-		goto table_err;
-	}
-	if (instance != table->owner && !is_daemon(instance)) {
-		MCDRV_DBG_ERROR(mcd, "instance does no own it\n");
-		ret = -EPERM;
-		goto table_err;
-	}
-
-	/* lock entry */
-	atomic_inc(&table->usage);
-table_err:
-	mutex_unlock(&mem_ctx.table_lock);
-	return ret;
-}
-/*
- * Allocate L2 table and map buffer into it.
- * That is, create respective table entries.
- * Must hold Semaphore mem_ctx.wsm_l2_sem
- */
-struct mc_l2_table *mc_alloc_l2_table(struct mc_instance *instance,
-	struct task_struct *task, void *wsm_buffer, unsigned int wsm_len)
-{
-	int ret = 0;
-	struct mc_l2_table *table;
-
-	if (WARN(!instance, "No instance data available"))
-		return ERR_PTR(-EFAULT);
-
-	mutex_lock(&mem_ctx.table_lock);
-	table = alloc_l2_table(instance);
-	if (IS_ERR(table)) {
-		MCDRV_DBG_ERROR(mcd, "allocate_used_l2_table() failed\n");
-		ret = -ENOMEM;
-		goto err_no_mem;
-	}
-
-	/* create the L2 page for the WSM */
-	ret = map_buffer(task, wsm_buffer, wsm_len, table);
-
-	if (ret != 0) {
-		MCDRV_DBG_ERROR(mcd, "map_buffer() failed\n");
-		unmap_l2_table(table);
-		goto err_no_mem;
-	}
-	MCDRV_DBG(mcd, "mapped buffer %p to table with handle %d @ %lx",
-		  wsm_buffer, table->handle, table->phys);
-
-	mutex_unlock(&mem_ctx.table_lock);
-	return table;
-err_no_mem:
-	mutex_unlock(&mem_ctx.table_lock);
-	return ERR_PTR(ret);
-}
-
-uint32_t mc_find_l2_table(uint32_t handle, int32_t fd)
-{
-	uint32_t ret = 0;
-	struct mc_l2_table *table = NULL;
-
-	mutex_lock(&mem_ctx.table_lock);
-	table = find_l2_table(handle);
-
-	if (table == NULL) {
-		MCDRV_DBG_ERROR(mcd, "entry not found %u\n", handle);
-		ret = 0;
-		goto table_err;
-	}
-
-	/* It's safe here not to lock the instance since the owner of
-	 * the table will be cleared only with the table lock taken */
-	if (!mc_check_owner_fd(table->owner, fd)) {
-		MCDRV_DBG_ERROR(mcd, "not valid owner%u\n", handle);
-		ret = 0;
-		goto table_err;
-	}
-
-	ret = table->phys;
-table_err:
-	mutex_unlock(&mem_ctx.table_lock);
-	return ret;
-}
-
-void mc_clean_l2_tables(void)
-{
-	struct mc_l2_table *table, *tmp;
-
-	mutex_lock(&mem_ctx.table_lock);
-	/* Check if some WSM is orphaned. */
-	list_for_each_entry_safe(table, tmp, &mem_ctx.l2_tables, list) {
-		if (table->owner == NULL) {
-			MCDRV_DBG(mcd,
-				  "clearing orphaned WSM L2: p=%lx pages=%d\n",
-				  table->phys, table->pages);
-			unmap_l2_table(table);
-		}
-	}
-	mutex_unlock(&mem_ctx.table_lock);
-}
-
-void mc_clear_l2_tables(struct mc_instance *instance)
-{
-	struct mc_l2_table *table, *tmp;
-
-	mutex_lock(&mem_ctx.table_lock);
-	/* Check if some WSM is still in use. */
-	list_for_each_entry_safe(table, tmp, &mem_ctx.l2_tables, list) {
-		if (table->owner == instance) {
-			MCDRV_DBG(mcd, "release WSM L2: p=%lx pages=%d\n",
-				  table->phys, table->pages);
-			/* unlock app usage and free or mark it as orphan */
-			table->owner = NULL;
-			unmap_l2_table(table);
-		}
-	}
-	mutex_unlock(&mem_ctx.table_lock);
-}
-
-int mc_init_l2_tables(void)
-{
-	/* init list for WSM L2 chunks. */
-	INIT_LIST_HEAD(&mem_ctx.l2_tables_sets);
-
-	/* L2 table descriptor list. */
-	INIT_LIST_HEAD(&mem_ctx.l2_tables);
-
-	/* L2 table descriptor list. */
-	INIT_LIST_HEAD(&mem_ctx.free_l2_tables);
-
-	mutex_init(&mem_ctx.table_lock);
-
-	return 0;
-}
-
-void mc_release_l2_tables()
-{
-	struct mc_l2_table *table;
-	/* Check if some WSM is still in use. */
-	list_for_each_entry(table, &mem_ctx.l2_tables, list) {
-		WARN(1, "WSM L2 still in use: phys=%lx ,nr_of_pages=%d\n",
-		     table->phys, table->pages);
-	}
-}
diff --git a/drivers/gud/mobicore_driver/mem.h b/drivers/gud/mobicore_driver/mem.h
deleted file mode 100644
index 397a6cc..0000000
--- a/drivers/gud/mobicore_driver/mem.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * MobiCore driver module.(interface to the secure world SWD)
- *
- * <-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
- * <-- Copyright Trustonic Limited 2013 -->
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _MC_MEM_H_
-#define _MC_MEM_H_
-
-#define FREE_FROM_SWD	1
-#define FREE_FROM_NWD	0
-
-#define LOCKED_BY_APP	(1U << 0)
-#define LOCKED_BY_MC	(1U << 1)
-
-/*
- * MobiCore specific page tables for world shared memory.
- * Linux uses shadow page tables, see arch/arm/include/asm/pgtable-2level.
- * MobiCore uses the default ARM format.
- *
- * Number of page table entries in one L2 table. This is ARM specific, an
- * L2 table covers 1 MiB by using 256 entry referring to 4KiB pages each.
- */
-#define MC_ARM_L2_TABLE_ENTRIES		256
-
-/* ARM level 2 (L2) table with 256 entries. Size: 1k */
-struct l2table {
-	pte_t	table_entries[MC_ARM_L2_TABLE_ENTRIES];
-};
-
-/* Number of pages for L2 tables. There are 4 table in each page. */
-#define L2_TABLES_PER_PAGE		4
-
-/* Store for four L2 tables in one 4kb page*/
-struct mc_l2_table_store {
-	struct l2table table[L2_TABLES_PER_PAGE];
-};
-
-/* Usage and maintenance information about mc_l2_table_store */
-struct mc_l2_tables_set {
-	struct list_head		list;
-	/* kernel virtual address */
-	struct mc_l2_table_store	*kernel_virt;
-	/* physical address */
-	struct mc_l2_table_store	*phys;
-	/* pointer to page struct */
-	struct page			*page;
-	/* How many pages from this set are used */
-	atomic_t			used_tables;
-};
-
-/*
- * L2 table allocated to the Daemon or a TLC describing a world shared buffer.
- * When users map a malloc()ed area into SWd, a L2 table is allocated.
- * In addition, the area of maximum 1MB virtual address space is mapped into
- * the L2 table and a handle for this table is returned to the user.
- */
-struct mc_l2_table {
-	struct list_head	list;
-	/* Table lock */
-	struct mutex		lock;
-	/* handle as communicated to user mode */
-	unsigned int		handle;
-	/* Number of references kept to this l2 table */
-	atomic_t		usage;
-	/* owner of this L2 table */
-	struct mc_instance	*owner;
-	/* set describing where our L2 table is stored */
-	struct mc_l2_tables_set	*set;
-	/* index into L2 table set */
-	unsigned int		idx;
-	/* size of buffer */
-	unsigned int		pages;
-	/* virtual address*/
-	void			*virt;
-	unsigned long		phys;
-};
-
-/* MobiCore Driver Memory context data. */
-struct mc_mem_context {
-	struct mc_instance	*daemon_inst;
-	/* Backing store for L2 tables */
-	struct list_head	l2_tables_sets;
-	/* Bookkeeping for used L2 tables */
-	struct list_head	l2_tables;
-	/* Bookkeeping for free L2 tables */
-	struct list_head	free_l2_tables;
-	/* semaphore to synchronize access to above lists */
-	struct mutex		table_lock;
-};
-
-/*
- * Allocate L2 table and map buffer into it.
- * That is, create respective table entries.
- */
-struct mc_l2_table *mc_alloc_l2_table(struct mc_instance *instance,
-	struct task_struct *task, void *wsm_buffer, unsigned int wsm_len);
-
-/* Delete all the l2 tables associated with an instance */
-void mc_clear_l2_tables(struct mc_instance *instance);
-
-/* Release all orphaned L2 tables */
-void mc_clean_l2_tables(void);
-
-/* Delete a used l2 table. */
-int mc_free_l2_table(struct mc_instance *instance, uint32_t handle);
-
-/*
- * Lock a l2 table - the daemon adds +1 to refcount of the L2 table
- * marking it in use by SWD so it doesn't get released when the TLC dies.
- */
-int mc_lock_l2_table(struct mc_instance *instance, uint32_t handle);
-/* Unlock l2 table. */
-int mc_unlock_l2_table(struct mc_instance *instance, uint32_t handle);
-/* Return the phys address of l2 table. */
-uint32_t mc_find_l2_table(uint32_t handle, int32_t fd);
-/* Release all used l2 tables to Linux memory space */
-void mc_release_l2_tables(void);
-
-/* Initialize all l2 tables structure */
-int mc_init_l2_tables(void);
-
-#endif /* _MC_MEM_H_ */
diff --git a/drivers/gud/mobicore_driver/ops.c b/drivers/gud/mobicore_driver/ops.c
deleted file mode 100644
index 9d4af72..0000000
--- a/drivers/gud/mobicore_driver/ops.c
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- * MobiCore Driver Kernel Module.
- *
- * This module is written as a Linux device driver.
- * This driver represents the command proxy on the lowest layer, from the
- * secure world to the non secure world, and vice versa.
- * This driver is located in the non secure world (Linux).
- * This driver offers IOCTL commands, for access to the secure world, and has
- * the interface from the secure world to the normal world.
- * The access to the driver is possible with a file descriptor,
- * which has to be created by the fd = open(/dev/mobicore) command.
- *
- * <-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
- * <-- Copyright Trustonic Limited 2013 -->
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kthread.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/workqueue.h>
-#include <linux/cpu.h>
-
-#include "main.h"
-#include "fastcall.h"
-#include "ops.h"
-#include "mem.h"
-#include "pm.h"
-#include "debug.h"
-
-/* MobiCore context data */
-static struct mc_context *ctx;
-
-static inline long smc(union fc_generic *fc)
-{
-	/* If we request sleep yields must be filtered out as they
-	 * make no sense */
-	if (ctx->mcp)
-		if (ctx->mcp->flags.sleep_mode.SleepReq) {
-			if (fc->as_in.cmd == MC_SMC_N_YIELD)
-				return MC_FC_RET_ERR_INVALID;
-		}
-	return _smc(fc);
-}
-
-#ifdef MC_FASTCALL_WORKER_THREAD
-
-static struct task_struct *fastcall_thread;
-static DEFINE_KTHREAD_WORKER(fastcall_worker);
-
-struct fastcall_work {
-	struct kthread_work work;
-	void *data;
-};
-
-static void fastcall_work_func(struct kthread_work *work)
-{
-	struct fastcall_work *fc_work =
-		container_of(work, struct fastcall_work, work);
-
-#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
-	mc_pm_clock_enable();
-#endif
-
-	smc(fc_work->data);
-
-#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
-	mc_pm_clock_disable();
-#endif
-}
-
-void mc_fastcall(void *data)
-{
-	struct fastcall_work fc_work = {
-		KTHREAD_WORK_INIT(fc_work.work, fastcall_work_func),
-		.data = data,
-	};
-
-	queue_kthread_work(&fastcall_worker, &fc_work.work);
-	flush_kthread_work(&fc_work.work);
-}
-
-int mc_fastcall_init(struct mc_context *context)
-{
-	int ret = 0;
-
-	ctx = context;
-
-	fastcall_thread = kthread_create(kthread_worker_fn, &fastcall_worker,
-					 "mc_fastcall");
-	if (IS_ERR(fastcall_thread)) {
-		ret = PTR_ERR(fastcall_thread);
-		fastcall_thread = NULL;
-		MCDRV_DBG_ERROR(mcd, "cannot create fastcall wq (%d)\n", ret);
-		return ret;
-	}
-
-	/* this thread MUST run on CPU 0 */
-	kthread_bind(fastcall_thread, 0);
-	wake_up_process(fastcall_thread);
-
-	return 0;
-}
-
-void mc_fastcall_destroy(void)
-{
-	if (!IS_ERR_OR_NULL(fastcall_thread)) {
-		kthread_stop(fastcall_thread);
-		fastcall_thread = NULL;
-	}
-}
-#else
-
-struct fastcall_work_struct {
-	struct work_struct work;
-	void *data;
-};
-
-static void fastcall_work_func(struct work_struct *work)
-{
-	struct fastcall_work_struct *fc_work =
-		container_of(work, struct fastcall_work_struct, work);
-
-#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
-	mc_pm_clock_enable();
-#endif
-
-	smc(fc_work->data);
-
-#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
-	mc_pm_clock_disable();
-#endif
-}
-
-void mc_fastcall(void *data)
-{
-	struct fastcall_work_struct work = {
-		.data = data,
-	};
-	INIT_WORK(&work.work, fastcall_work_func);
-	schedule_work_on(0, &work.work);
-
-	flush_work(&work.work);
-}
-
-int mc_fastcall_init(struct mc_context *context)
-{
-	ctx = context;
-	return 0;
-};
-
-void mc_fastcall_destroy(void) {};
-#endif
-
-int mc_info(uint32_t ext_info_id, uint32_t *state, uint32_t *ext_info)
-{
-	int ret = 0;
-	union mc_fc_info fc_info;
-
-	MCDRV_DBG_VERBOSE(mcd, "enter\n");
-
-	memset(&fc_info, 0, sizeof(fc_info));
-	fc_info.as_in.cmd = MC_FC_INFO;
-	fc_info.as_in.ext_info_id = ext_info_id;
-
-	MCDRV_DBG(mcd, "fc_info <- cmd=0x%08x, ext_info_id=0x%08x\n",
-		  fc_info.as_in.cmd, fc_info.as_in.ext_info_id);
-
-	mc_fastcall(&(fc_info.as_generic));
-
-	MCDRV_DBG(mcd,
-		  "fc_info -> r=0x%08x ret=0x%08x state=0x%08x ext_info=0x%08x",
-		  fc_info.as_out.resp,
-		  fc_info.as_out.ret,
-		  fc_info.as_out.state,
-		  fc_info.as_out.ext_info);
-
-	ret = convert_fc_ret(fc_info.as_out.ret);
-
-	*state  = fc_info.as_out.state;
-	*ext_info = fc_info.as_out.ext_info;
-
-	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
-
-	return ret;
-}
-
-/* Yield to MobiCore */
-int mc_yield(void)
-{
-	int ret = 0;
-	union fc_generic yield;
-
-	MCDRV_DBG_VERBOSE(mcd, "enter\n");
-
-	memset(&yield, 0, sizeof(yield));
-	yield.as_in.cmd = MC_SMC_N_YIELD;
-	mc_fastcall(&yield);
-	ret = convert_fc_ret(yield.as_out.ret);
-
-	return ret;
-}
-
-/* call common notify */
-int mc_nsiq(void)
-{
-	int ret = 0;
-	union fc_generic nsiq;
-	MCDRV_DBG_VERBOSE(mcd, "enter\n");
-
-	memset(&nsiq, 0, sizeof(nsiq));
-	nsiq.as_in.cmd = MC_SMC_N_SIQ;
-	mc_fastcall(&nsiq);
-	ret = convert_fc_ret(nsiq.as_out.ret);
-
-	return ret;
-}
-
-/* call common notify */
-int _nsiq(void)
-{
-	int ret = 0;
-	union fc_generic nsiq;
-	MCDRV_DBG_VERBOSE(mcd, "enter\n");
-
-	memset(&nsiq, 0, sizeof(nsiq));
-	nsiq.as_in.cmd = MC_SMC_N_SIQ;
-	_smc(&nsiq);
-	ret = convert_fc_ret(nsiq.as_out.ret);
-
-	return ret;
-}
-
-/* Call the INIT fastcall to setup MobiCore initialization */
-int mc_init(uint32_t base, uint32_t nq_offset, uint32_t nq_length,
-	uint32_t mcp_offset, uint32_t mcp_length)
-{
-	int ret = 0;
-	union mc_fc_init fc_init;
-
-	MCDRV_DBG_VERBOSE(mcd, "enter\n");
-
-	memset(&fc_init, 0, sizeof(fc_init));
-
-	fc_init.as_in.cmd = MC_FC_INIT;
-	/* base address of mci buffer 4KB aligned */
-	fc_init.as_in.base = base;
-	/* notification buffer start/length [16:16] [start, length] */
-	fc_init.as_in.nq_info = (nq_offset << 16) | (nq_length & 0xFFFF);
-	/* mcp buffer start/length [16:16] [start, length] */
-	fc_init.as_in.mcp_info = (mcp_offset << 16) | (mcp_length & 0xFFFF);
-
-	/*
-	 * Set KMOD notification queue to start of MCI
-	 * mciInfo was already set up in mmap
-	 */
-	MCDRV_DBG(mcd,
-		  "cmd=0x%08x, base=0x%08x,nq_info=0x%08x, mcp_info=0x%08x\n",
-		  fc_init.as_in.cmd, fc_init.as_in.base, fc_init.as_in.nq_info,
-		  fc_init.as_in.mcp_info);
-
-	mc_fastcall(&fc_init.as_generic);
-
-	MCDRV_DBG(mcd, "out cmd=0x%08x, ret=0x%08x\n", fc_init.as_out.resp,
-		  fc_init.as_out.ret);
-
-	ret = convert_fc_ret(fc_init.as_out.ret);
-
-	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
-
-	return ret;
-}
-
-/* Return MobiCore driver version */
-uint32_t mc_get_version(void)
-{
-	MCDRV_DBG(mcd, "MobiCore driver version is %i.%i\n",
-		  MCDRVMODULEAPI_VERSION_MAJOR,
-		  MCDRVMODULEAPI_VERSION_MINOR);
-
-	return MC_VERSION(MCDRVMODULEAPI_VERSION_MAJOR,
-					MCDRVMODULEAPI_VERSION_MINOR);
-}
diff --git a/drivers/gud/setupDrivers.sh b/drivers/gud/setupDrivers.sh
new file mode 100644
index 0000000..8f877b7
--- /dev/null
+++ b/drivers/gud/setupDrivers.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+export COMP_PATH_ROOT=$(dirname $(readlink -f $BASH_SOURCE)) #set this to the absolute path of the folder containing this file
+
+# This part has to be set by the customer
+# To be set, absolute path of kernel folder
+export LINUX_PATH=
+# To be set, absolute path! CROSS_COMPILE variable needed by kernel eg /home/user/arm-2009q3/bin/arm-none-linux-gnueabi-
+export CROSS_COMPILE=
+# To be set, build mode debug or release
+export MODE=debug
+# To be set, the absolute path to the Linux Android NDK
+export NDK_PATH=
+
+# Global variables needed by build scripts
+export COMP_PATH_Logwrapper=$COMP_PATH_ROOT/Logwrapper/Out
+export COMP_PATH_MobiCore=$COMP_PATH_ROOT/MobiCore/Out
+export COMP_PATH_MobiCoreDriverMod=$COMP_PATH_ROOT/mobicore_driver/Out
+export COMP_PATH_MobiCoreDriverLib=$COMP_PATH_ROOT/daemon/Out
+export COMP_PATH_AndroidNdkLinux=$NDK_PATH
\ No newline at end of file
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
index 7bc8773..37a11d2 100644
--- a/drivers/hwmon/qpnp-adc-common.c
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -986,6 +986,92 @@
 }
 EXPORT_SYMBOL(qpnp_vadc_check_result);
 
+int qpnp_adc_get_revid_version(struct device *dev)
+{
+	struct pmic_revid_data *revid_data;
+	struct device_node *revid_dev_node;
+
+	revid_dev_node = of_parse_phandle(dev->of_node,
+						"qcom,pmic-revid", 0);
+	if (!revid_dev_node) {
+		pr_debug("Missing qcom,pmic-revid property\n");
+		return -EINVAL;
+	}
+
+	revid_data = get_revid_data(revid_dev_node);
+	if (IS_ERR(revid_data)) {
+		pr_debug("revid error rc = %ld\n", PTR_ERR(revid_data));
+		return -EINVAL;
+	}
+
+	if ((revid_data->rev1 == PM8941_V3P1_REV1) &&
+		(revid_data->rev2 == PM8941_V3P1_REV2) &&
+		(revid_data->rev3 == PM8941_V3P1_REV3) &&
+		(revid_data->rev4 == PM8941_V3P1_REV4) &&
+		(revid_data->pmic_type == PM8941_V3P1_TYPE) &&
+		(revid_data->pmic_subtype == PM8941_V3P1_SUBTYPE))
+			return QPNP_REV_ID_8941_3_1;
+	else if ((revid_data->rev1 == PM8941_V3P0_REV1) &&
+		(revid_data->rev2 == PM8941_V3P0_REV2) &&
+		(revid_data->rev3 == PM8941_V3P0_REV3) &&
+		(revid_data->rev4 == PM8941_V3P0_REV4) &&
+		(revid_data->pmic_type == PM8941_V3P0_TYPE) &&
+		(revid_data->pmic_subtype == PM8941_V3P0_SUBTYPE))
+			return QPNP_REV_ID_8941_3_0;
+	else if ((revid_data->rev1 == PM8941_V2P0_REV1) &&
+		(revid_data->rev2 == PM8941_V2P0_REV2) &&
+		(revid_data->rev3 == PM8941_V2P0_REV3) &&
+		(revid_data->rev4 == PM8941_V2P0_REV4) &&
+		(revid_data->pmic_type == PM8941_V2P0_TYPE) &&
+		(revid_data->pmic_subtype == PM8941_V2P0_SUBTYPE))
+			return QPNP_REV_ID_8941_2_0;
+	else if ((revid_data->rev1 == PM8226_V2P2_REV1) &&
+		(revid_data->rev2 == PM8226_V2P2_REV2) &&
+		(revid_data->rev3 == PM8226_V2P2_REV3) &&
+		(revid_data->rev4 == PM8226_V2P2_REV4) &&
+		(revid_data->pmic_type == PM8226_V2P2_TYPE) &&
+		(revid_data->pmic_subtype == PM8226_V2P2_SUBTYPE))
+			return QPNP_REV_ID_8026_2_2;
+	else if ((revid_data->rev1 == PM8226_V2P1_REV1) &&
+		(revid_data->rev2 == PM8226_V2P1_REV2) &&
+		(revid_data->rev3 == PM8226_V2P1_REV3) &&
+		(revid_data->rev4 == PM8226_V2P1_REV4) &&
+		(revid_data->pmic_type == PM8226_V2P1_TYPE) &&
+		(revid_data->pmic_subtype == PM8226_V2P1_SUBTYPE))
+			return QPNP_REV_ID_8026_2_1;
+	else if ((revid_data->rev1 == PM8226_V2P0_REV1) &&
+		(revid_data->rev2 == PM8226_V2P0_REV2) &&
+		(revid_data->rev3 == PM8226_V2P0_REV3) &&
+		(revid_data->rev4 == PM8226_V2P0_REV4) &&
+		(revid_data->pmic_type == PM8226_V2P0_TYPE) &&
+		(revid_data->pmic_subtype == PM8226_V2P0_SUBTYPE))
+			return QPNP_REV_ID_8026_2_0;
+	else if ((revid_data->rev1 == PM8226_V1P0_REV1) &&
+		(revid_data->rev2 == PM8226_V1P0_REV2) &&
+		(revid_data->rev3 == PM8226_V1P0_REV3) &&
+		(revid_data->rev4 == PM8226_V1P0_REV4) &&
+		(revid_data->pmic_type == PM8226_V1P0_TYPE) &&
+		(revid_data->pmic_subtype == PM8226_V1P0_SUBTYPE))
+			return QPNP_REV_ID_8026_1_0;
+	else if ((revid_data->rev1 == PM8110_V1P0_REV1) &&
+		(revid_data->rev2 == PM8110_V1P0_REV2) &&
+		(revid_data->rev3 == PM8110_V1P0_REV3) &&
+		(revid_data->rev4 == PM8110_V1P0_REV4) &&
+		(revid_data->pmic_type == PM8110_V1P0_TYPE) &&
+		(revid_data->pmic_subtype == PM8110_V1P0_SUBTYPE))
+			return QPNP_REV_ID_8110_1_0;
+	else if ((revid_data->rev1 == PM8110_V2P0_REV1) &&
+		(revid_data->rev2 == PM8110_V2P0_REV2) &&
+		(revid_data->rev3 == PM8110_V2P0_REV3) &&
+		(revid_data->rev4 == PM8110_V2P0_REV4) &&
+		(revid_data->pmic_type == PM8110_V2P0_TYPE) &&
+		(revid_data->pmic_subtype == PM8110_V2P0_SUBTYPE))
+			return QPNP_REV_ID_8110_2_0;
+	else
+		return -EINVAL;
+}
+EXPORT_SYMBOL(qpnp_adc_get_revid_version);
+
 int32_t qpnp_adc_get_devicetree_data(struct spmi_device *spmi,
 			struct qpnp_adc_drv *adc_qpnp)
 {
diff --git a/drivers/hwmon/qpnp-adc-current.c b/drivers/hwmon/qpnp-adc-current.c
index 067a887..ec6d8ec 100644
--- a/drivers/hwmon/qpnp-adc-current.c
+++ b/drivers/hwmon/qpnp-adc-current.c
@@ -340,48 +340,8 @@
 	return 0;
 }
 
-#define QPNP_IADC_PM8941_3_1_REV2	3
-#define QPNP_IADC_PM8941_3_1_REV3	2
-#define QPNP_IADC_PM8026_1_REV2		1
-#define QPNP_IADC_PM8026_1_REV3		2
-#define QPNP_IADC_PM8026_2_REV2		4
-#define QPNP_IADC_PM8026_2_REV3		2
-#define QPNP_IADC_PM8110_1_REV2		2
-#define QPNP_IADC_PM8110_1_REV3		2
-
-#define QPNP_IADC_REV_ID_8941_3_1	1
-#define QPNP_IADC_REV_ID_8026_1_0	2
-#define QPNP_IADC_REV_ID_8026_2_0	3
-#define QPNP_IADC_REV_ID_8110_1_0	4
-
-static void qpnp_temp_comp_version_check(struct qpnp_iadc_chip *iadc,
-						int32_t *version)
-{
-	if ((iadc->iadc_comp.revision_dig_major ==
-			QPNP_IADC_PM8941_3_1_REV2) &&
-			(iadc->iadc_comp.revision_ana_minor ==
-			QPNP_IADC_PM8941_3_1_REV3))
-		*version = QPNP_IADC_REV_ID_8941_3_1;
-	else if ((iadc->iadc_comp.revision_dig_major ==
-			QPNP_IADC_PM8026_1_REV2) &&
-			(iadc->iadc_comp.revision_ana_minor ==
-			QPNP_IADC_PM8026_1_REV3))
-		*version = QPNP_IADC_REV_ID_8026_1_0;
-	else if ((iadc->iadc_comp.revision_dig_major ==
-			QPNP_IADC_PM8026_2_REV2) &&
-			(iadc->iadc_comp.revision_ana_minor ==
-			QPNP_IADC_PM8026_2_REV3))
-		*version = QPNP_IADC_REV_ID_8026_2_0;
-	else if ((iadc->iadc_comp.revision_dig_major ==
-			QPNP_IADC_PM8110_1_REV2) &&
-			(iadc->iadc_comp.revision_ana_minor ==
-			QPNP_IADC_PM8110_1_REV3))
-		*version = QPNP_IADC_REV_ID_8110_1_0;
-	else
-		*version = -EINVAL;
-
-	return;
-}
+#define QPNP_IADC_PM8026_2_REV2	4
+#define QPNP_IADC_PM8026_2_REV3	2
 
 #define QPNP_COEFF_1					969000
 #define QPNP_COEFF_2					32
@@ -408,15 +368,19 @@
 #define QPNP_COEFF_22					5000000
 #define QPNP_COEFF_23					3722500
 #define QPNP_COEFF_24					84
+#define QPNP_COEFF_25					33
+#define QPNP_COEFF_26					22
+#define QPNP_COEFF_27					53
+#define QPNP_COEFF_28					48
 
 static int32_t qpnp_iadc_comp(int64_t *result, struct qpnp_iadc_chip *iadc,
 							int64_t die_temp)
 {
 	int64_t temp_var = 0, sys_gain_coeff = 0, old;
 	int32_t coeff_a = 0, coeff_b = 0;
-	int32_t version;
+	int version = 0;
 
-	qpnp_temp_comp_version_check(iadc, &version);
+	version = qpnp_adc_get_revid_version(iadc->dev);
 	if (version == -EINVAL)
 		return 0;
 
@@ -431,7 +395,7 @@
 				iadc->iadc_comp.sys_gain;
 
 	switch (version) {
-	case QPNP_IADC_REV_ID_8941_3_1:
+	case QPNP_REV_ID_8941_3_1:
 		switch (iadc->iadc_comp.id) {
 		case COMP_ID_GF:
 			if (!iadc->iadc_comp.ext_rsense) {
@@ -470,7 +434,60 @@
 			break;
 		}
 		break;
-	case QPNP_IADC_REV_ID_8026_1_0:
+	case QPNP_REV_ID_8026_2_1:
+	case QPNP_REV_ID_8026_2_2:
+		/* pm8026 rev 2.1 and 2.2 */
+		switch (iadc->iadc_comp.id) {
+		case COMP_ID_GF:
+			if (!iadc->iadc_comp.ext_rsense) {
+				/* internal rsense */
+				if (*result < 0) {
+					/* charge */
+					coeff_a = 0;
+					coeff_b = 0;
+				} else {
+					coeff_a = QPNP_COEFF_25;
+					coeff_b = 0;
+				}
+			} else {
+				if (*result < 0) {
+					/* charge */
+					coeff_a = 0;
+					coeff_b = 0;
+				} else {
+					/* discharge */
+					coeff_a = 0;
+					coeff_b = 0;
+				}
+			}
+			break;
+		case COMP_ID_TSMC:
+		default:
+			if (!iadc->iadc_comp.ext_rsense) {
+				/* internal rsense */
+				if (*result < 0) {
+					/* charge */
+					coeff_a = 0;
+					coeff_b = 0;
+				} else {
+					coeff_a = QPNP_COEFF_26;
+					coeff_b = 0;
+				}
+			} else {
+				if (*result < 0) {
+					/* charge */
+					coeff_a = 0;
+					coeff_b = 0;
+				} else {
+					/* discharge */
+					coeff_a = 0;
+					coeff_b = 0;
+				}
+			}
+			break;
+		}
+		break;
+	case QPNP_REV_ID_8026_1_0:
 		/* pm8026 rev 1.0 */
 		switch (iadc->iadc_comp.id) {
 		case COMP_ID_GF:
@@ -522,7 +539,7 @@
 			break;
 		}
 		break;
-	case QPNP_IADC_REV_ID_8110_1_0:
+	case QPNP_REV_ID_8110_1_0:
 		/* pm8110 rev 1.0 */
 		switch (iadc->iadc_comp.id) {
 		case COMP_ID_GF:
@@ -554,8 +571,41 @@
 			break;
 		}
 		break;
+	case QPNP_REV_ID_8110_2_0:
+		die_temp -= 25000;
+		/* pm8110 rev 2.0 */
+		switch (iadc->iadc_comp.id) {
+		case COMP_ID_GF:
+			if (!iadc->iadc_comp.ext_rsense) {
+				/* internal rsense */
+				if (*result < 0) {
+					/* charge */
+					coeff_a = 0;
+					coeff_b = 0;
+				} else {
+					coeff_a = QPNP_COEFF_27;
+					coeff_b = 0;
+				}
+			}
+			break;
+		case COMP_ID_SMIC:
+		default:
+			if (!iadc->iadc_comp.ext_rsense) {
+				/* internal rsense */
+				if (*result < 0) {
+					/* charge */
+					coeff_a = 0;
+					coeff_b = 0;
+				} else {
+					coeff_a = QPNP_COEFF_28;
+					coeff_b = 0;
+				}
+			}
+			break;
+		}
+		break;
 	default:
-	case QPNP_IADC_REV_ID_8026_2_0:
+	case QPNP_REV_ID_8026_2_0:
 		/* pm8026 rev 1.0 */
 		coeff_a = 0;
 		coeff_b = 0;
@@ -578,7 +628,8 @@
 		temp_var = div64_s64(temp_var * sys_gain_coeff, 1000000);
 		*result = div64_s64(*result * 1000, temp_var);
 	}
-	pr_debug("%lld compensated into %lld\n", old, *result);
+	pr_debug("%lld compensated into %lld, a: %d, b: %d, sys_gain: %lld\n",
+			old, *result, coeff_a, coeff_b, sys_gain_coeff);
 
 	return 0;
 }
@@ -844,9 +895,10 @@
 							bool batfet_closed)
 {
 	uint8_t rslt_lsb, rslt_msb;
-	int32_t rc = 0;
+	int32_t rc = 0, version = 0;
 	uint16_t raw_data;
 	uint32_t mode_sel = 0;
+	bool iadc_offset_ch_batfet_check;
 
 	if (qpnp_iadc_is_valid(iadc) < 0)
 		return -EPROBE_DEFER;
@@ -868,13 +920,22 @@
 	iadc->adc->calib.gain_raw = raw_data;
 
 	/*
-	 * there is a features in the BMS where if the batfet is opened
-	 * the BMS reads from INTERNAL_RSENSE (channel 0) actually go to
+	 * there is a features on PM8941 in the BMS where if the batfet is
+	 * opened the BMS reads from INTERNAL_RSENSE (channel 0) actually go to
 	 * OFFSET_CALIBRATION_CSP_CSN (channel 5). Hence if batfet is opened
 	 * we have to calibrate based on OFFSET_CALIBRATION_CSP_CSN even for
 	 * internal rsense.
 	 */
-	if (!batfet_closed || iadc->external_rsense) {
+	version = qpnp_adc_get_revid_version(iadc->dev);
+	if ((version == QPNP_REV_ID_8941_3_1) ||
+			(version == QPNP_REV_ID_8941_3_0) ||
+			(version == QPNP_REV_ID_8941_2_0))
+		iadc_offset_ch_batfet_check = true;
+	else
+		iadc_offset_ch_batfet_check = false;
+
+	if ((iadc_offset_ch_batfet_check && !batfet_closed) ||
+						(iadc->external_rsense)) {
 		/* external offset calculation */
 		rc = qpnp_iadc_configure(iadc, OFFSET_CALIBRATION_CSP_CSN,
 						&raw_data, mode_sel);
@@ -1014,7 +1075,7 @@
 
 int32_t qpnp_iadc_get_rsense(struct qpnp_iadc_chip *iadc, int32_t *rsense)
 {
-	uint8_t	rslt_rsense;
+	uint8_t	rslt_rsense = 0;
 	int32_t	rc = 0, sign_bit = 0;
 
 	if (qpnp_iadc_is_valid(iadc) < 0)
@@ -1022,36 +1083,37 @@
 
 	if (iadc->external_rsense) {
 		*rsense = iadc->rsense;
-		return rc;
-	}
-
-	if (iadc->default_internal_rsense) {
+	} else if (iadc->default_internal_rsense) {
 		*rsense = iadc->rsense_workaround_value;
-		return rc;
-	}
+	} else {
 
-	rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_NOMINAL_RSENSE, &rslt_rsense);
-	if (rc < 0) {
-		pr_err("qpnp adc rsense read failed with %d\n", rc);
-		return rc;
-	}
+		rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_NOMINAL_RSENSE,
+							&rslt_rsense);
+		if (rc < 0) {
+			pr_err("qpnp adc rsense read failed with %d\n", rc);
+			return rc;
+		}
 
-	pr_debug("rsense:0%x\n", rslt_rsense);
+		pr_debug("rsense:0%x\n", rslt_rsense);
 
-	if (rslt_rsense & QPNP_RSENSE_MSB_SIGN_CHECK)
-		sign_bit = 1;
+		if (rslt_rsense & QPNP_RSENSE_MSB_SIGN_CHECK)
+			sign_bit = 1;
 
-	rslt_rsense &= ~QPNP_RSENSE_MSB_SIGN_CHECK;
+		rslt_rsense &= ~QPNP_RSENSE_MSB_SIGN_CHECK;
 
-	if (sign_bit)
-		*rsense = QPNP_IADC_INTERNAL_RSENSE_N_OHMS_FACTOR -
+		if (sign_bit)
+			*rsense = QPNP_IADC_INTERNAL_RSENSE_N_OHMS_FACTOR -
 			(rslt_rsense * QPNP_IADC_RSENSE_LSB_N_OHMS_PER_BIT);
-	else
-		*rsense = QPNP_IADC_INTERNAL_RSENSE_N_OHMS_FACTOR +
+		else
+			*rsense = QPNP_IADC_INTERNAL_RSENSE_N_OHMS_FACTOR +
 			(rslt_rsense * QPNP_IADC_RSENSE_LSB_N_OHMS_PER_BIT);
-
+	}
 	pr_debug("rsense value is %d\n", *rsense);
 
+	if (*rsense == 0)
+		pr_err("incorrect rsens value:%d rslt_rsense:%d\n",
+				*rsense, rslt_rsense);
+
 	return rc;
 }
 EXPORT_SYMBOL(qpnp_iadc_get_rsense);
@@ -1140,10 +1202,12 @@
 		result->result_uv = -result->result_uv;
 		result_current = -result_current;
 	}
+	result_current *= -1;
 	rc = qpnp_iadc_comp_result(iadc, &result_current);
 	if (rc < 0)
 		pr_err("Error during compensating the IADC\n");
 	rc = 0;
+	result_current *= -1;
 
 	result->result_ua = (int32_t) result_current;
 fail:
@@ -1215,6 +1279,11 @@
 	if (qpnp_iadc_is_valid(iadc) < 0)
 		return -EPROBE_DEFER;
 
+	if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) {
+		pr_err("raw offset errors! run iadc calibration again\n");
+		return -EINVAL;
+	}
+
 	mutex_lock(&iadc->adc->adc_lock);
 
 	if (iadc->iadc_poll_eoc) {
@@ -1251,6 +1320,11 @@
 	result_current = i_result->result_uv;
 	result_current *= QPNP_IADC_NANO_VOLTS_FACTOR;
 	/* Intentional fall through. Process the result w/o comp */
+	if (!rsense_u_ohms) {
+		pr_err("rsense error=%d\n", rsense_u_ohms);
+		goto fail_release_vadc;
+	}
+
 	do_div(result_current, rsense_u_ohms);
 
 	if (sign) {
diff --git a/drivers/hwmon/qpnp-adc-voltage.c b/drivers/hwmon/qpnp-adc-voltage.c
index d462fb3..346a72d 100644
--- a/drivers/hwmon/qpnp-adc-voltage.c
+++ b/drivers/hwmon/qpnp-adc-voltage.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -512,63 +512,52 @@
 #define QPNP_VBAT_COEFF_13	102640000
 #define QPNP_VBAT_COEFF_14	22220000
 #define QPNP_VBAT_COEFF_15	83060000
-
-#define QPNP_VADC_REV_ID_8941_3_1	1
-#define QPNP_VADC_REV_ID_8026_1_0	2
-#define QPNP_VADC_REV_ID_8026_2_0	3
-
-static void qpnp_temp_comp_version_check(struct qpnp_vadc_chip *vadc,
-							int32_t *version)
-{
-	if (vadc->revision_dig_major == 3 &&
-			vadc->revision_ana_minor == 2)
-		*version = QPNP_VADC_REV_ID_8941_3_1;
-	else if (vadc->revision_dig_major == 1 &&
-			vadc->revision_ana_minor == 2)
-		*version = QPNP_VADC_REV_ID_8026_1_0;
-	else if (vadc->revision_dig_major == 2 &&
-			vadc->revision_ana_minor == 2)
-		*version = QPNP_VADC_REV_ID_8026_2_0;
-	else
-		*version = -EINVAL;
-
-	return;
-}
+#define QPNP_VBAT_COEFF_16	2810
+#define QPNP_VBAT_COEFF_17	5260
+#define QPNP_VBAT_COEFF_18	8027
+#define QPNP_VBAT_COEFF_19	2347
+#define QPNP_VBAT_COEFF_20	6043
+#define QPNP_VBAT_COEFF_21	1914
+#define QPNP_VBAT_OFFSET_SMIC	9446
+#define QPNP_VBAT_OFFSET_GF	9441
+#define QPNP_OCV_OFFSET_SMIC	4596
+#define QPNP_OCV_OFFSET_GF	5896
+#define QPNP_VBAT_COEFF_22	6800
+#define QPNP_VBAT_COEFF_23	3500
+#define QPNP_VBAT_COEFF_24	4360
+#define QPNP_VBAT_COEFF_25	8060
 
 static int32_t qpnp_ocv_comp(int64_t *result,
 			struct qpnp_vadc_chip *vadc, int64_t die_temp)
 {
 	int64_t temp_var = 0;
 	int64_t old = *result;
-	int32_t version;
+	int version;
 
-	qpnp_temp_comp_version_check(vadc, &version);
+	version = qpnp_adc_get_revid_version(vadc->dev);
 	if (version == -EINVAL)
 		return 0;
 
-	if (die_temp < 25000)
-		return 0;
-
-	if (die_temp > 60000)
-		die_temp = 60000;
+	if (version == QPNP_REV_ID_8026_2_2) {
+		if (die_temp > 25000)
+			return 0;
+	}
 
 	switch (version) {
-	case QPNP_VADC_REV_ID_8941_3_1:
+	case QPNP_REV_ID_8941_3_1:
 		switch (vadc->id) {
 		case COMP_ID_TSMC:
-			temp_var = (((die_temp *
-			(-QPNP_VBAT_COEFF_4))
-			+ QPNP_VBAT_COEFF_5));
+			 temp_var = ((die_temp - 25000) *
+			(-QPNP_VBAT_COEFF_4));
 			break;
 		default:
 		case COMP_ID_GF:
-			temp_var = (((die_temp *
-			(-QPNP_VBAT_COEFF_1))
-			+ QPNP_VBAT_COEFF_2));
+			temp_var = ((die_temp - 25000) *
+			(-QPNP_VBAT_COEFF_1));
 			break;
 		}
 		break;
-	case QPNP_VADC_REV_ID_8026_1_0:
+	case QPNP_REV_ID_8026_1_0:
 		switch (vadc->id) {
 		case COMP_ID_TSMC:
 			temp_var = (((die_temp *
@@ -583,19 +572,55 @@
 			break;
 		}
 		break;
-	case QPNP_VADC_REV_ID_8026_2_0:
+	case QPNP_REV_ID_8026_2_0:
+	case QPNP_REV_ID_8026_2_1:
 		switch (vadc->id) {
 		case COMP_ID_TSMC:
-			temp_var = ((die_temp - 2500) *
+			temp_var = ((die_temp - 25000) *
 			(-QPNP_VBAT_COEFF_10));
 			break;
 		default:
 		case COMP_ID_GF:
-			temp_var = ((die_temp - 2500) *
+			temp_var = ((die_temp - 25000) *
 			(-QPNP_VBAT_COEFF_8));
 			break;
 		}
 		break;
+	case QPNP_REV_ID_8026_2_2:
+		switch (vadc->id) {
+		case COMP_ID_TSMC:
+			*result -= QPNP_VBAT_COEFF_22;
+			temp_var = (die_temp - 25000) *
+					QPNP_VBAT_COEFF_24;
+			break;
+		default:
+		case COMP_ID_GF:
+			*result -= QPNP_VBAT_COEFF_22;
+			temp_var = (die_temp - 25000) *
+					QPNP_VBAT_COEFF_25;
+			break;
+		}
+	case QPNP_REV_ID_8110_2_0:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			*result -= QPNP_OCV_OFFSET_SMIC;
+			if (die_temp < 25000)
+				temp_var = QPNP_VBAT_COEFF_18;
+			else
+				temp_var = QPNP_VBAT_COEFF_19;
+			temp_var = (die_temp - 25000) * temp_var;
+			break;
+		default:
+		case COMP_ID_GF:
+			*result -= QPNP_OCV_OFFSET_GF;
+			if (die_temp < 25000)
+				temp_var = QPNP_VBAT_COEFF_20;
+			else
+				temp_var = QPNP_VBAT_COEFF_21;
+			temp_var = (die_temp - 25000) * temp_var;
+			break;
+		}
+		break;
 	default:
 		temp_var = 0;
 		break;
@@ -618,35 +643,36 @@
 {
 	int64_t temp_var = 0;
 	int64_t old = *result;
-	int32_t version;
+	int version;
 
-	qpnp_temp_comp_version_check(vadc, &version);
+	version = qpnp_adc_get_revid_version(vadc->dev);
 	if (version == -EINVAL)
 		return 0;
 
-	if (die_temp < 25000)
-		return 0;
-
-	/* min(die_temp_c, 60_degC) */
-	if (die_temp > 60000)
-		die_temp = 60000;
+	if (version != QPNP_REV_ID_8941_3_1) {
+		/* min(die_temp_c, 60_degC) */
+		if (die_temp > 60000)
+			die_temp = 60000;
+	}
 
 	switch (version) {
-	case QPNP_VADC_REV_ID_8941_3_1:
+	case QPNP_REV_ID_8941_3_1:
 		switch (vadc->id) {
 		case COMP_ID_TSMC:
-			temp_var = (die_temp *
+			temp_var = ((die_temp - 25000) *
 			(-QPNP_VBAT_COEFF_1));
 			break;
 		default:
 		case COMP_ID_GF:
-			temp_var = (((die_temp *
-			(-QPNP_VBAT_COEFF_6))
-			+ QPNP_VBAT_COEFF_7));
+			/* min(die_temp_c, 60_degC) */
+			if (die_temp > 60000)
+				die_temp = 60000;
+			temp_var = ((die_temp - 25000) *
+			(-QPNP_VBAT_COEFF_1));
 			break;
 		}
 		break;
-	case QPNP_VADC_REV_ID_8026_1_0:
+	case QPNP_REV_ID_8026_1_0:
 		switch (vadc->id) {
 		case COMP_ID_TSMC:
 			temp_var = (((die_temp *
@@ -661,19 +687,47 @@
 			break;
 		}
 		break;
-	case QPNP_VADC_REV_ID_8026_2_0:
+	case QPNP_REV_ID_8026_2_0:
+	case QPNP_REV_ID_8026_2_1:
 		switch (vadc->id) {
 		case COMP_ID_TSMC:
-			temp_var = ((die_temp - 2500) *
+			temp_var = ((die_temp - 25000) *
 			(-QPNP_VBAT_COEFF_11));
 			break;
 		default:
 		case COMP_ID_GF:
-			temp_var = ((die_temp - 2500) *
+			temp_var = ((die_temp - 25000) *
 			(-QPNP_VBAT_COEFF_9));
 			break;
 		}
 		break;
+	case QPNP_REV_ID_8026_2_2:
+		switch (vadc->id) {
+		case COMP_ID_TSMC:
+			*result -= QPNP_VBAT_COEFF_23;
+			temp_var = 0;
+			break;
+		default:
+		case COMP_ID_GF:
+			*result -= QPNP_VBAT_COEFF_23;
+			temp_var = 0;
+			break;
+		}
+	case QPNP_REV_ID_8110_2_0:
+		switch (vadc->id) {
+		case COMP_ID_SMIC:
+			*result -= QPNP_VBAT_OFFSET_SMIC;
+			temp_var = ((die_temp - 25000) *
+			(QPNP_VBAT_COEFF_17));
+			break;
+		default:
+		case COMP_ID_GF:
+			*result -= QPNP_VBAT_OFFSET_GF;
+			temp_var = ((die_temp - 25000) *
+			(QPNP_VBAT_COEFF_16));
+			break;
+		}
+		break;
 	default:
 		temp_var = 0;
 		break;
@@ -692,7 +746,7 @@
 }
 
 int32_t qpnp_vbat_sns_comp_result(struct qpnp_vadc_chip *vadc,
-						int64_t *result)
+					int64_t *result, bool is_pon_ocv)
 {
 	struct qpnp_vadc_result die_temp_result;
 	int rc = 0;
@@ -708,7 +762,12 @@
 		return rc;
 	}
 
-	rc = qpnp_ocv_comp(result, vadc, die_temp_result.physical);
+	if (is_pon_ocv)
+		rc = qpnp_ocv_comp(result, vadc, die_temp_result.physical);
+	else
+		rc = qpnp_vbat_sns_comp(result, vadc,
+				die_temp_result.physical);
+
 	if (rc < 0)
 		pr_err("Error with vbat compensation\n");
 
diff --git a/drivers/input/misc/mma8x5x.c b/drivers/input/misc/mma8x5x.c
index d708d94..d576752 100644
--- a/drivers/input/misc/mma8x5x.c
+++ b/drivers/input/misc/mma8x5x.c
@@ -26,8 +26,8 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/i2c.h>
-#include <linux/input-polldev.h>
 #include <linux/sensors.h>
+#include <linux/input.h>
 #include <linux/regulator/consumer.h>
 #include <linux/of_gpio.h>
 #include <linux/irq.h>
@@ -44,8 +44,6 @@
 #define POLL_INTERVAL_MAX	10000
 #define POLL_INTERVAL		100 /* msecs */
 
-/* if sensor is standby ,set POLL_STOP_TIME to slow down the poll */
-#define POLL_STOP_TIME		10000
 #define INPUT_FUZZ			32
 #define INPUT_FLAT			32
 #define INPUT_DATA_DIVIDER	16
@@ -81,6 +79,7 @@
 #define MMA_INT_ROUTING_CFG	0x01
 
 #define MMA_POWER_CFG_MASK	0xFE
+#define MMA_ODR_MASK		0x38
 
 struct sensor_regulator {
 	struct regulator *vreg;
@@ -189,7 +188,7 @@
 };
 struct mma8x5x_data {
 	struct i2c_client *client;
-	struct input_polled_dev *poll_dev;
+	struct delayed_work dwork;
 	struct input_dev *idev;
 	struct mutex data_lock;
 	struct sensors_classdev cdev;
@@ -229,7 +228,6 @@
 	{{ 0,  1,  0}, { 1,  0,	0}, {0, 0,  -1} },
 	{{ 1,  0,  0}, { 0, -1,	0}, {0, 0,  -1} },
 };
-static struct mma8x5x_data *drv_data;
 static int mma8x5x_config_regulator(struct i2c_client *client, bool on)
 {
 	int rc = 0, i;
@@ -408,7 +406,7 @@
 	if (result < 0)
 		goto out;
 
-	val = (u8)result | val;
+	val = ((u8)result & ~MMA_ODR_MASK) | val;
 	result = i2c_smbus_write_byte_data(client, MMA8X5X_CTRL_REG1,
 					   (val & MMA_POWER_CFG_MASK));
 	if (result < 0)
@@ -487,32 +485,30 @@
 
 static void mma8x5x_report_data(struct mma8x5x_data *pdata)
 {
-	struct input_polled_dev *poll_dev = pdata->poll_dev;
 	struct mma8x5x_data_axis data;
+
 	mutex_lock(&pdata->data_lock);
-	if ((pdata->active & MMA_STATE_MASK) == MMA_STANDBY) {
-		poll_dev->poll_interval = POLL_STOP_TIME;
-		/* if standby ,set as 10s to slow the poll. */
-		goto out;
-	} else {
-		if (poll_dev->poll_interval == POLL_STOP_TIME)
-			poll_dev->poll_interval = pdata->poll_delay;
-	}
 	if (mma8x5x_read_data(pdata->client, &data) != 0)
 		goto out;
 	mma8x5x_data_convert(pdata, &data);
-	input_report_abs(poll_dev->input, ABS_X, data.x);
-	input_report_abs(poll_dev->input, ABS_Y, data.y);
-	input_report_abs(poll_dev->input, ABS_Z, data.z);
-	input_sync(poll_dev->input);
+	input_report_abs(pdata->idev, ABS_X, data.x);
+	input_report_abs(pdata->idev, ABS_Y, data.y);
+	input_report_abs(pdata->idev, ABS_Z, data.z);
+	input_sync(pdata->idev);
 out:
 	mutex_unlock(&pdata->data_lock);
 }
 
-static void mma8x5x_dev_poll(struct input_polled_dev *dev)
+static void mma8x5x_dev_poll(struct work_struct *work)
 {
-	struct mma8x5x_data *pdata = (struct mma8x5x_data *)dev->private;
-	mma8x5x_report_data(pdata);
+	struct mma8x5x_data *pdata = container_of((struct delayed_work *)work,
+				struct mma8x5x_data, dwork);
+
+	if ((pdata->active & MMA_STATE_MASK) == MMA_ACTIVED) {
+		mma8x5x_report_data(pdata);
+		schedule_delayed_work(&pdata->dwork,
+					msecs_to_jiffies(pdata->poll_delay));
+	}
 }
 
 static irqreturn_t mma8x5x_interrupt(int vec, void *data)
@@ -577,6 +573,10 @@
 				dev_err(&client->dev, "change device state failed!");
 				goto err_failed;
 			}
+			if (!pdata->use_int)
+				schedule_delayed_work(&pdata->dwork,
+					msecs_to_jiffies(pdata->poll_delay));
+
 			pdata->active = MMA_ACTIVED;
 			dev_dbg(&client->dev, "%s:mma enable setting active.\n",
 					__func__);
@@ -597,7 +597,10 @@
 				dev_err(&client->dev, "change device state failed!");
 				goto err_failed;
 			}
-
+			/*
+			 * Set standby state,
+			 * polling work queue will stop after next call.
+			 */
 			pdata->active = MMA_STANDBY;
 			dev_dbg(&client->dev, "%s:mma enable setting inactive.\n",
 					__func__);
@@ -616,7 +619,7 @@
 static ssize_t mma8x5x_enable_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
-	struct mma8x5x_data *pdata = drv_data;
+	struct mma8x5x_data *pdata = dev_get_drvdata(dev);
 	struct i2c_client *client;
 	u8 val;
 	int enable;
@@ -641,7 +644,7 @@
 				    struct device_attribute *attr,
 				    const char *buf, size_t count)
 {
-	struct mma8x5x_data *pdata = drv_data;
+	struct mma8x5x_data *pdata = dev_get_drvdata(dev);
 	struct i2c_client *client;
 	int ret;
 	unsigned long enable;
@@ -663,7 +666,7 @@
 static ssize_t mma8x5x_position_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
-	struct mma8x5x_data *pdata = drv_data;
+	struct mma8x5x_data *pdata = dev_get_drvdata(dev);
 	int position = 0;
 
 	if (!pdata) {
@@ -680,7 +683,7 @@
 				    struct device_attribute *attr,
 				    const char *buf, size_t count)
 {
-	struct mma8x5x_data *pdata = drv_data;
+	struct mma8x5x_data *pdata = dev_get_drvdata(dev);
 	int position;
 	int ret;
 
@@ -714,7 +717,6 @@
 	} else {
 		mutex_lock(&pdata->data_lock);
 		pdata->poll_delay = delay_ms;
-		pdata->poll_dev->poll_interval = pdata->poll_delay;
 		mutex_unlock(&pdata->data_lock);
 	}
 
@@ -724,7 +726,7 @@
 static ssize_t mma8x5x_poll_delay_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
-	struct mma8x5x_data *pdata = drv_data;
+	struct mma8x5x_data *pdata = dev_get_drvdata(dev);
 
 	if (!pdata) {
 		dev_err(dev, "Invalid driver private data!");
@@ -738,7 +740,7 @@
 				    struct device_attribute *attr,
 				    const char *buf, size_t count)
 {
-	struct mma8x5x_data *pdata = drv_data;
+	struct mma8x5x_data *pdata = dev_get_drvdata(dev);
 	int delay;
 	int ret;
 
@@ -825,7 +827,6 @@
 	struct input_dev *idev;
 	struct mma8x5x_data *pdata;
 	struct i2c_adapter *adapter;
-	struct input_polled_dev *poll_dev;
 	adapter = to_i2c_adapter(client->dev.parent);
 	/* power on the device */
 	result = mma8x5x_config_regulator(client, 1);
@@ -836,7 +837,7 @@
 					 I2C_FUNC_SMBUS_BYTE |
 					 I2C_FUNC_SMBUS_BYTE_DATA);
 	if (!result)
-		goto err_out;
+		goto err_check_id;
 
 	chip_id = i2c_smbus_read_byte_data(client, MMA8X5X_WHO_AM_I);
 
@@ -846,14 +847,14 @@
 			chip_id, MMA8451_ID, MMA8452_ID, MMA8453_ID,
 			MMA8652_ID, MMA8653_ID);
 		result = -EINVAL;
-		goto err_out;
+		goto err_check_id;
 	}
 	/* set the private data */
 	pdata = kzalloc(sizeof(struct mma8x5x_data), GFP_KERNEL);
 	if (!pdata) {
 		result = -ENOMEM;
 		dev_err(&client->dev, "alloc data memory error!\n");
-		goto err_out;
+		goto err_check_id;
 	}
 
 	if (client->dev.of_node) {
@@ -867,12 +868,10 @@
 	}
 
 	/* Initialize the MMA8X5X chip */
-	drv_data = pdata;
 	pdata->client = client;
 	pdata->chip_id = chip_id;
 	pdata->mode = MODE_2G;
-	pdata->poll_delay = POLL_STOP_TIME;
-	pdata->poll_dev = NULL;
+	pdata->poll_delay = POLL_INTERVAL;
 
 	mutex_init(&pdata->data_lock);
 	i2c_set_clientdata(client, pdata);
@@ -894,29 +893,9 @@
 			if (result) {
 				dev_err(&client->dev,
 					"set_direction for irq gpio failed\n");
-				goto err_set_direction;
+				goto err_set_gpio_direction;
 			}
 		}
-		idev = input_allocate_device();
-		if (!idev) {
-			result = -ENOMEM;
-			dev_err(&client->dev, "alloc input device failed!\n");
-			goto err_alloc_poll_device;
-		}
-		input_set_drvdata(idev, pdata);
-		idev->name = ACCEL_INPUT_DEV_NAME;
-		idev->uniq = mma8x5x_id2name(pdata->chip_id);
-		idev->id.bustype = BUS_I2C;
-		idev->evbit[0] = BIT_MASK(EV_ABS);
-		input_set_abs_params(idev, ABS_X, -0x7fff, 0x7fff, 0, 0);
-		input_set_abs_params(idev, ABS_Y, -0x7fff, 0x7fff, 0, 0);
-		input_set_abs_params(idev, ABS_Z, -0x7fff, 0x7fff, 0, 0);
-		result = input_register_device(idev);
-		if (result) {
-			dev_err(&client->dev, "register input device failed!\n");
-			goto err_register_device;
-		}
-		pdata->idev = idev;
 		device_init_wakeup(&client->dev, true);
 		enable_irq_wake(client->irq);
 		result = request_threaded_irq(client->irq, NULL,
@@ -930,34 +909,29 @@
 		}
 		mma8x5x_device_int_init(client);
 	} else {
-		/* create the input poll device */
-		poll_dev = input_allocate_polled_device();
-		if (!poll_dev) {
-			result = -ENOMEM;
-			dev_err(&client->dev, "alloc poll device failed!\n");
-			goto err_alloc_poll_device;
-		}
-		pdata->poll_dev = poll_dev;
-		pdata->idev = NULL;
-		poll_dev->poll = mma8x5x_dev_poll;
-		poll_dev->poll_interval = POLL_STOP_TIME;
-		poll_dev->poll_interval_min = POLL_INTERVAL_MIN;
-		poll_dev->poll_interval_max = POLL_INTERVAL_MAX;
-		poll_dev->private = pdata;
-		idev = poll_dev->input;
-		idev->name = ACCEL_INPUT_DEV_NAME;
-		idev->uniq = mma8x5x_id2name(pdata->chip_id);
-		idev->id.bustype = BUS_I2C;
-		idev->evbit[0] = BIT_MASK(EV_ABS);
-		input_set_abs_params(idev, ABS_X, -0x7fff, 0x7fff, 0, 0);
-		input_set_abs_params(idev, ABS_Y, -0x7fff, 0x7fff, 0, 0);
-		input_set_abs_params(idev, ABS_Z, -0x7fff, 0x7fff, 0, 0);
-		result = input_register_polled_device(pdata->poll_dev);
-		if (result) {
-			dev_err(&client->dev, "register poll device failed!\n");
-			goto err_register_device;
-		}
+		INIT_DELAYED_WORK(&pdata->dwork, mma8x5x_dev_poll);
 	}
+	idev = input_allocate_device();
+	if (!idev) {
+		result = -ENOMEM;
+		dev_err(&client->dev, "alloc input device failed!\n");
+		goto err_alloc_poll_device;
+	}
+	input_set_drvdata(idev, pdata);
+	idev->name = ACCEL_INPUT_DEV_NAME;
+	idev->uniq = mma8x5x_id2name(pdata->chip_id);
+	idev->id.bustype = BUS_I2C;
+	idev->evbit[0] = BIT_MASK(EV_ABS);
+	input_set_abs_params(idev, ABS_X, -0x7fff, 0x7fff, 0, 0);
+	input_set_abs_params(idev, ABS_Y, -0x7fff, 0x7fff, 0, 0);
+	input_set_abs_params(idev, ABS_Z, -0x7fff, 0x7fff, 0, 0);
+	result = input_register_device(idev);
+	if (result) {
+		dev_err(&client->dev, "register input device failed!\n");
+		goto err_register_device;
+	}
+	pdata->idev = idev;
+
 	result = sysfs_create_group(&idev->dev.kobj, &mma8x5x_attr_group);
 	if (result) {
 		dev_err(&client->dev, "create device file failed!\n");
@@ -983,23 +957,20 @@
 err_create_class_sysfs:
 	sysfs_remove_group(&idev->dev.kobj, &mma8x5x_attr_group);
 err_create_sysfs:
-	input_unregister_polled_device(pdata->poll_dev);
+	input_unregister_device(idev);
+err_register_device:
+	input_free_device(idev);
+err_alloc_poll_device:
 err_register_irq:
 	if (pdata->use_int)
 		device_init_wakeup(&client->dev, false);
-err_register_device:
-	if (pdata->use_int)
-		input_free_device(idev);
-	else
-		input_free_polled_device(pdata->poll_dev);
-err_alloc_poll_device:
-err_set_direction:
+err_set_gpio_direction:
 	if (gpio_is_valid(pdata->int_pin) && pdata->use_int)
 		gpio_free(pdata->int_pin);
 err_request_gpio:
 err_parse_dt:
 	kfree(pdata);
-err_out:
+err_check_id:
 	mma8x5x_config_regulator(client, 0);
 err_power_on:
 	return result;
@@ -1007,14 +978,22 @@
 static int __devexit mma8x5x_remove(struct i2c_client *client)
 {
 	struct mma8x5x_data *pdata = i2c_get_clientdata(client);
-	struct input_polled_dev *poll_dev;
+	struct input_dev *idev;
+
 	mma8x5x_device_stop(client);
 	if (pdata) {
-		poll_dev = pdata->poll_dev;
-		input_unregister_polled_device(poll_dev);
-		input_free_polled_device(poll_dev);
+		idev = pdata->idev;
+		sysfs_remove_group(&idev->dev.kobj, &mma8x5x_attr_group);
+		if (pdata->use_int) {
+			device_init_wakeup(&client->dev, false);
+			if (gpio_is_valid(pdata->int_pin))
+				gpio_free(pdata->int_pin);
+		}
+		input_unregister_device(idev);
+		input_free_device(idev);
 		kfree(pdata);
 	}
+	mma8x5x_config_regulator(client, 0);
 	return 0;
 }
 
@@ -1026,8 +1005,10 @@
 
 	if (pdata->use_int && pdata->active == MMA_ACTIVED)
 		return 0;
-	if (pdata->active == MMA_ACTIVED)
+	if (pdata->active == MMA_ACTIVED) {
 		mma8x5x_device_stop(client);
+		cancel_delayed_work_sync(&pdata->dwork);
+	}
 	if (pdata->active & MMA_SHUTTEDDOWN)
 		return 0;
 	if (!mma8x5x_config_regulator(client, 0))
@@ -1058,6 +1039,8 @@
 	if (pdata->active == MMA_ACTIVED) {
 		val = i2c_smbus_read_byte_data(client, MMA8X5X_CTRL_REG1);
 		i2c_smbus_write_byte_data(client, MMA8X5X_CTRL_REG1, val|0x01);
+		schedule_delayed_work(&pdata->dwork,
+				msecs_to_jiffies(pdata->poll_delay));
 	}
 
 	return 0;
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index cd6989c..be7c3c6 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1983,6 +1983,7 @@
 				    const char *buf, size_t count)
 {
 	struct mxt_data *data = dev_get_drvdata(dev);
+	struct device *adapter = data->client->adapter->dev.parent;
 	unsigned long value;
 	int err = 0;
 
@@ -2000,7 +2001,7 @@
 		if (atomic_read(&data->st_enabled) == 0)
 			break;
 
-		pm_runtime_put(data->client->adapter->dev.parent);
+		pm_runtime_put(adapter);
 		atomic_set(&data->st_enabled, 0);
 		mxt_secure_touch_notify(data);
 		mxt_interrupt(data->client->irq, data);
@@ -2012,7 +2013,7 @@
 			break;
 		}
 
-		if (pm_runtime_get(data->client->adapter->dev.parent) < 0) {
+		if (pm_runtime_get_sync(adapter) < 0) {
 			dev_err(&data->client->dev, "pm_runtime_get failed\n");
 			err = -EIO;
 			break;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 8e70129..cb1dbd4 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -87,6 +87,16 @@
          section mappings and TLB misses should be quite infrequent.
          Most people can probably say Y here.
 
+config MSM_IOMMU_VBIF_CHECK
+	bool "Enable support for VBIF check when IOMMU gets stuck"
+	depends on MSM_IOMMU
+	help
+	  Enables an extra check in the IOMMU driver that logs debugging
+	  information when TLB sync or iommu halt issue occurs. This helps
+	  in debugging such issues.
+
+	  If unsure, say N here.
+
 config IOMMU_NON_SECURE
 	bool "Turns on programming of secure SMMU by kernel"
 	depends on MSM_IOMMU
diff --git a/drivers/iommu/msm_iommu-v1.c b/drivers/iommu/msm_iommu-v1.c
index 84f81bf..911a9e7 100644
--- a/drivers/iommu/msm_iommu-v1.c
+++ b/drivers/iommu/msm_iommu-v1.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,7 @@
 #include <linux/platform_device.h>
 #include <linux/errno.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/interrupt.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
@@ -143,13 +144,141 @@
 	.iommu_lock_release = _iommu_lock_release,
 };
 
-void iommu_halt(const struct msm_iommu_drvdata *iommu_drvdata)
+#ifdef CONFIG_MSM_IOMMU_VBIF_CHECK
+
+#define VBIF_XIN_HALT_CTRL0 0x200
+#define VBIF_XIN_HALT_CTRL1 0x204
+#define VBIF_AXI_HALT_CTRL0 0x208
+#define VBIF_AXI_HALT_CTRL1 0x20C
+
+static void __halt_vbif_xin(void __iomem *vbif_base)
+{
+	pr_err("Halting VBIF_XIN\n");
+	writel_relaxed(0xFFFFFFFF, vbif_base + VBIF_XIN_HALT_CTRL0);
+}
+
+static void __dump_vbif_state(void __iomem *base, void __iomem *vbif_base)
+{
+	unsigned int reg_val;
+
+	reg_val = readl_relaxed(base + MICRO_MMU_CTRL);
+	pr_err("Value of SMMU_IMPLDEF_MICRO_MMU_CTRL = 0x%x\n", reg_val);
+
+	reg_val = readl_relaxed(vbif_base + VBIF_XIN_HALT_CTRL0);
+	pr_err("Value of VBIF_XIN_HALT_CTRL0 = 0x%x\n", reg_val);
+	reg_val = readl_relaxed(vbif_base + VBIF_XIN_HALT_CTRL1);
+	pr_err("Value of VBIF_XIN_HALT_CTRL1 = 0x%x\n", reg_val);
+	reg_val = readl_relaxed(vbif_base + VBIF_AXI_HALT_CTRL0);
+	pr_err("Value of VBIF_AXI_HALT_CTRL0 = 0x%x\n", reg_val);
+	reg_val = readl_relaxed(vbif_base + VBIF_AXI_HALT_CTRL1);
+	pr_err("Value of VBIF_AXI_HALT_CTRL1 = 0x%x\n", reg_val);
+}
+
+static int __check_vbif_state(struct msm_iommu_drvdata const *drvdata)
+{
+	phys_addr_t addr = (phys_addr_t) (drvdata->phys_base
+			   - (phys_addr_t) 0x4000);
+	void __iomem *base = ioremap(addr, 0x1000);
+	int ret = 0;
+
+	if (base) {
+		__dump_vbif_state(drvdata->base, base);
+		__halt_vbif_xin(drvdata->base);
+		__dump_vbif_state(drvdata->base, base);
+		iounmap(base);
+	} else {
+		pr_err("%s: Unable to ioremap\n", __func__);
+		ret = -ENOMEM;
+	}
+	return ret;
+}
+
+static void check_halt_state(struct msm_iommu_drvdata const *drvdata)
+{
+	int res;
+	unsigned int val;
+	void __iomem *base = drvdata->base;
+	char const *name = drvdata->name;
+
+	pr_err("Timed out waiting for IOMMU halt to complete for %s\n", name);
+	res = __check_vbif_state(drvdata);
+	if (res)
+		BUG();
+
+	pr_err("Checking if IOMMU halt completed for %s\n", name);
+
+	res = readl_tight_poll_timeout(
+		GLB_REG(MICRO_MMU_CTRL, base), val,
+			(val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 5000000);
+
+	if (res) {
+		pr_err("Timed out (again) waiting for IOMMU halt to complete for %s\n",
+			name);
+	} else {
+		pr_err("IOMMU halt completed. VBIF FIFO most likely not getting drained by master\n");
+	}
+	BUG();
+}
+
+static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata,
+				int ctx)
+{
+	int res;
+	unsigned int val;
+	void __iomem *base = drvdata->base;
+	char const *name = drvdata->name;
+
+	pr_err("Timed out waiting for TLB SYNC to complete for %s\n", name);
+	res = __check_vbif_state(drvdata);
+	if (res)
+		BUG();
+
+	pr_err("Checking if TLB sync completed for %s\n", name);
+
+	res = readl_tight_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val,
+				(val & CB_TLBSTATUS_SACTIVE) == 0, 5000000);
+	if (res) {
+		pr_err("Timed out (again) waiting for TLB SYNC to complete for %s\n",
+			name);
+	} else {
+		pr_err("TLB Sync completed. VBIF FIFO most likely not getting drained by master\n");
+	}
+	BUG();
+}
+
+#else
+
+/*
+ * For targets without VBIF or for targets with the VBIF check disabled
+ * we directly just crash to capture the issue
+ */
+static void check_halt_state(struct msm_iommu_drvdata const *drvdata)
+{
+	BUG();
+}
+
+static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata,
+				int ctx)
+{
+	BUG();
+}
+
+#endif
+
+void iommu_halt(struct msm_iommu_drvdata const *iommu_drvdata)
 {
 	if (iommu_drvdata->halt_enabled) {
-		SET_MICRO_MMU_CTRL_HALT_REQ(iommu_drvdata->base, 1);
+		unsigned int val;
+		void __iomem *base = iommu_drvdata->base;
+		int res;
 
-		while (GET_MICRO_MMU_CTRL_IDLE(iommu_drvdata->base) == 0)
-			cpu_relax();
+		SET_MICRO_MMU_CTRL_HALT_REQ(base, 1);
+		res = readl_tight_poll_timeout(
+			GLB_REG(MICRO_MMU_CTRL, base), val,
+			     (val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 5000000);
+
+		if (res)
+			check_halt_state(iommu_drvdata);
 		/* Ensure device is idle before continuing */
 		mb();
 	}
@@ -173,15 +302,19 @@
 	}
 }
 
-static void __sync_tlb(void __iomem *base, int ctx)
+static void __sync_tlb(struct msm_iommu_drvdata *iommu_drvdata, int ctx)
 {
+	unsigned int val;
+	unsigned int res;
+	void __iomem *base = iommu_drvdata->base;
+
 	SET_TLBSYNC(base, ctx, 0);
-
-	/* No barrier needed due to register proximity */
-	while (GET_CB_TLBSTATUS_SACTIVE(base, ctx))
-		cpu_relax();
-
 	/* No barrier needed due to read dependency */
+
+	res = readl_tight_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val,
+				(val & CB_TLBSTATUS_SACTIVE) == 0, 5000000);
+	if (res)
+		check_tlb_sync_state(iommu_drvdata, ctx);
 }
 
 static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
@@ -205,7 +338,7 @@
 		SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
 			   ctx_drvdata->asid | (va & CB_TLBIVA_VA));
 		mb();
-		__sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
+		__sync_tlb(iommu_drvdata, ctx_drvdata->num);
 		__disable_clocks(iommu_drvdata);
 	}
 fail:
@@ -232,7 +365,7 @@
 		SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
 			     ctx_drvdata->asid);
 		mb();
-		__sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
+		__sync_tlb(iommu_drvdata, ctx_drvdata->num);
 		__disable_clocks(iommu_drvdata);
 	}
 
diff --git a/drivers/iommu/msm_iommu_dev-v1.c b/drivers/iommu/msm_iommu_dev-v1.c
index a9d164e..ff6b58c 100644
--- a/drivers/iommu/msm_iommu_dev-v1.c
+++ b/drivers/iommu/msm_iommu_dev-v1.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -299,6 +299,7 @@
 	if (!drvdata->base)
 		return -ENOMEM;
 
+	drvdata->phys_base = r->start;
 	drvdata->glb_base = drvdata->base;
 
 	if (of_get_property(pdev->dev.of_node, "vdd-supply", NULL)) {
diff --git a/drivers/leds/leds-qpnp.c b/drivers/leds/leds-qpnp.c
index d87520f..eba5ca8 100644
--- a/drivers/leds/leds-qpnp.c
+++ b/drivers/leds/leds-qpnp.c
@@ -691,7 +691,7 @@
 {
 	int rc;
 	u8 val;
-	int duty_us;
+	int duty_us, duty_ns, period_us;
 
 	if (led->cdev.brightness) {
 		if (led->cdev.brightness < led->mpp_cfg->min_brightness) {
@@ -710,13 +710,23 @@
 			}
 		}
 		if (led->mpp_cfg->pwm_mode == PWM_MODE) {
-			pwm_disable(led->mpp_cfg->pwm_cfg->pwm_dev);
-			duty_us = (led->mpp_cfg->pwm_cfg->pwm_period_us *
-					led->cdev.brightness) / LED_FULL;
 			/*config pwm for brightness scaling*/
-			rc = pwm_config_us(led->mpp_cfg->pwm_cfg->pwm_dev,
+			period_us = led->mpp_cfg->pwm_cfg->pwm_period_us;
+			if (period_us > INT_MAX / NSEC_PER_USEC) {
+				duty_us = (period_us * led->cdev.brightness) /
+					LED_FULL;
+				rc = pwm_config_us(
+					led->mpp_cfg->pwm_cfg->pwm_dev,
 					duty_us,
-					led->mpp_cfg->pwm_cfg->pwm_period_us);
+					period_us);
+			} else {
+				duty_ns = ((period_us * NSEC_PER_USEC) /
+					LED_FULL) * led->cdev.brightness;
+				rc = pwm_config(
+					led->mpp_cfg->pwm_cfg->pwm_dev,
+					duty_ns,
+					period_us * NSEC_PER_USEC);
+			}
 			if (rc < 0) {
 				dev_err(&led->spmi_dev->dev, "Failed to " \
 					"configure pwm for new values\n");
@@ -1219,8 +1229,8 @@
 
 static int qpnp_kpdbl_set(struct qpnp_led_data *led)
 {
-	int duty_us;
 	int rc;
+	int duty_us, duty_ns, period_us;
 
 	if (led->cdev.brightness) {
 		if (!led->kpdbl_cfg->pwm_cfg->blinking)
@@ -1237,11 +1247,22 @@
 		}
 
 		if (led->kpdbl_cfg->pwm_cfg->mode == PWM_MODE) {
-			duty_us = (led->kpdbl_cfg->pwm_cfg->pwm_period_us *
-				led->cdev.brightness) / KPDBL_MAX_LEVEL;
-			rc = pwm_config_us(led->kpdbl_cfg->pwm_cfg->pwm_dev,
+			period_us = led->kpdbl_cfg->pwm_cfg->pwm_period_us;
+			if (period_us > INT_MAX / NSEC_PER_USEC) {
+				duty_us = (period_us * led->cdev.brightness) /
+					KPDBL_MAX_LEVEL;
+				rc = pwm_config_us(
+					led->kpdbl_cfg->pwm_cfg->pwm_dev,
 					duty_us,
-					led->kpdbl_cfg->pwm_cfg->pwm_period_us);
+					period_us);
+			} else {
+				duty_ns = ((period_us * NSEC_PER_USEC) /
+					KPDBL_MAX_LEVEL) * led->cdev.brightness;
+				rc = pwm_config(
+					led->kpdbl_cfg->pwm_cfg->pwm_dev,
+					duty_ns,
+					period_us * NSEC_PER_USEC);
+			}
 			if (rc < 0) {
 				dev_err(&led->spmi_dev->dev, "pwm config failed\n");
 				return rc;
@@ -1262,7 +1283,7 @@
 
 		if (led->kpdbl_cfg->always_on) {
 			rc = pwm_config_us(led->kpdbl_cfg->pwm_cfg->pwm_dev, 0,
-					led->kpdbl_cfg->pwm_cfg->pwm_period_us);
+				led->kpdbl_cfg->pwm_cfg->pwm_period_us);
 			if (rc < 0) {
 				dev_err(&led->spmi_dev->dev,
 						"pwm config failed\n");
@@ -1300,19 +1321,30 @@
 
 static int qpnp_rgb_set(struct qpnp_led_data *led)
 {
-	int duty_us;
 	int rc;
+	int duty_us, duty_ns, period_us;
 
 	if (led->cdev.brightness) {
 		if (!led->rgb_cfg->pwm_cfg->blinking)
 			led->rgb_cfg->pwm_cfg->mode =
 				led->rgb_cfg->pwm_cfg->default_mode;
 		if (led->rgb_cfg->pwm_cfg->mode == PWM_MODE) {
-			duty_us = (led->rgb_cfg->pwm_cfg->pwm_period_us *
-				led->cdev.brightness) / LED_FULL;
-			rc = pwm_config_us(led->rgb_cfg->pwm_cfg->pwm_dev,
+			period_us = led->rgb_cfg->pwm_cfg->pwm_period_us;
+			if (period_us > INT_MAX / NSEC_PER_USEC) {
+				duty_us = (period_us * led->cdev.brightness) /
+					LED_FULL;
+				rc = pwm_config_us(
+					led->rgb_cfg->pwm_cfg->pwm_dev,
 					duty_us,
-					led->rgb_cfg->pwm_cfg->pwm_period_us);
+					period_us);
+			} else {
+				duty_ns = ((period_us * NSEC_PER_USEC) /
+					LED_FULL) * led->cdev.brightness;
+				rc = pwm_config(
+					led->rgb_cfg->pwm_cfg->pwm_dev,
+					duty_ns,
+					period_us * NSEC_PER_USEC);
+			}
 			if (rc < 0) {
 				dev_err(&led->spmi_dev->dev,
 					"pwm config failed\n");
diff --git a/drivers/md/dm-req-crypt.c b/drivers/md/dm-req-crypt.c
index 16141b5..ab21404 100644
--- a/drivers/md/dm-req-crypt.c
+++ b/drivers/md/dm-req-crypt.c
@@ -41,7 +41,7 @@
 #define MAX_ENCRYPTION_BUFFERS 1
 #define MIN_IOS 16
 #define MIN_POOL_PAGES 32
-#define KEY_SIZE_XTS 32
+#define KEY_SIZE_XTS 64
 #define AES_XTS_IV_LEN 16
 
 #define DM_REQ_CRYPT_ERROR -1
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
index 044f6f1..eb05015 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -176,15 +176,15 @@
 	uint32_t irq_status0, uint32_t irq_status1,
 	struct msm_isp_timestamp *ts)
 {
+	uint32_t cnt;
 	if (!(irq_status0 & 0x1F))
 		return;
 
 	if (irq_status0 & BIT(0)) {
 		ISP_DBG("%s: SOF IRQ\n", __func__);
-		if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
-			&& vfe_dev->axi_data.src_info[VFE_PIX_0].
-			pix_stream_count == 0) {
-			msm_isp_sof_notify(vfe_dev, VFE_PIX_0, ts);
+		cnt = vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count;
+		if (cnt > 0) {
+			msm_isp_sof_notify(vfe_dev, VFE_RAW_0, ts);
 			if (vfe_dev->axi_data.stream_update)
 				msm_isp_axi_stream_update(vfe_dev);
 			msm_isp_update_framedrop_reg(vfe_dev);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index d53d7f6..c3fb176 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,7 +13,7 @@
 #include <linux/module.h>
 #include <mach/iommu.h>
 #include <linux/ratelimit.h>
-
+#include <asm/div64.h>
 #include "msm_isp40.h"
 #include "msm_isp_util.h"
 #include "msm_isp_axi_util.h"
@@ -36,8 +36,8 @@
 #define VFE40_8x26_VERSION 0x20000013
 #define VFE40_8x26V2_VERSION 0x20010014
 
-#define VFE40_BURST_LEN 3
-#define VFE40_STATS_BURST_LEN 2
+#define VFE40_BURST_LEN 1
+#define VFE40_STATS_BURST_LEN 1
 #define VFE40_UB_SIZE 1536
 #define VFE40_EQUAL_SLICE_UB 228
 #define VFE40_WM_BASE(idx) (0x6C + 0x24 * idx)
@@ -370,15 +370,16 @@
 	uint32_t irq_status0, uint32_t irq_status1,
 	struct msm_isp_timestamp *ts)
 {
+	int cnt;
+
 	if (!(irq_status0 & 0xF))
 		return;
 
 	if (irq_status0 & (1 << 0)) {
 		ISP_DBG("%s: SOF IRQ\n", __func__);
-		if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
-			&& vfe_dev->axi_data.src_info[VFE_PIX_0].
-			pix_stream_count == 0) {
-			msm_isp_sof_notify(vfe_dev, VFE_PIX_0, ts);
+		cnt = vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count;
+		if (cnt > 0) {
+			msm_isp_sof_notify(vfe_dev, VFE_RAW_0, ts);
 			if (vfe_dev->axi_data.stream_update)
 				msm_isp_axi_stream_update(vfe_dev);
 			msm_isp_update_framedrop_reg(vfe_dev);
@@ -1077,7 +1078,6 @@
 	uint8_t num_used_wms = 0;
 	uint32_t prop_size = 0;
 	uint32_t wm_ub_size;
-	uint32_t delta;
 
 	for (i = 0; i < axi_data->hw_info->num_wm; i++) {
 		if (axi_data->free_wm[i] > 0) {
@@ -1089,9 +1089,11 @@
 		axi_data->hw_info->min_wm_ub * num_used_wms;
 	for (i = 0; i < axi_data->hw_info->num_wm; i++) {
 		if (axi_data->free_wm[i]) {
-			delta =
-				(axi_data->wm_image_size[i] *
-					prop_size)/total_image_size;
+			uint64_t delta = 0;
+			uint64_t temp = (uint64_t)axi_data->wm_image_size[i] *
+					(uint64_t)prop_size;
+			do_div(temp, total_image_size);
+			delta = temp;
 			wm_ub_size = axi_data->hw_info->min_wm_ub + delta;
 			msm_camera_io_w(ub_offset << 16 | (wm_ub_size - 1),
 				vfe_dev->vfe_base + VFE40_WM_BASE(i) + 0x10);
@@ -1118,7 +1120,7 @@
 static void msm_vfe40_cfg_axi_ub(struct vfe_device *vfe_dev)
 {
 	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
-	axi_data->wm_ub_cfg_policy = MSM_WM_UB_EQUAL_SLICING;
+	axi_data->wm_ub_cfg_policy = MSM_WM_UB_CFG_DEFAULT;
 	if (axi_data->wm_ub_cfg_policy == MSM_WM_UB_EQUAL_SLICING)
 		msm_vfe40_cfg_axi_ub_equal_slicing(vfe_dev);
 	else
@@ -1407,8 +1409,8 @@
 }
 
 static struct msm_vfe_axi_hardware_info msm_vfe40_axi_hw_info = {
-	.num_wm = 5,
-	.num_comp_mask = 2,
+	.num_wm = 7,
+	.num_comp_mask = 3,
 	.num_rdi = 3,
 	.num_rdi_master = 3,
 	.min_wm_ub = 64,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 01106d2..a60fa09 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -81,6 +81,10 @@
 		return rc;
 
 	switch (stream_cfg_cmd->output_format) {
+	case V4L2_PIX_FMT_YUYV:
+	case V4L2_PIX_FMT_YVYU:
+	case V4L2_PIX_FMT_UYVY:
+	case V4L2_PIX_FMT_VYUY:
 	case V4L2_PIX_FMT_SBGGR8:
 	case V4L2_PIX_FMT_SGBRG8:
 	case V4L2_PIX_FMT_SGRBG8:
@@ -167,6 +171,10 @@
 	uint32_t size = 0;
 	struct msm_vfe_axi_plane_cfg *plane_cfg = stream_info->plane_cfg;
 	switch (stream_info->output_format) {
+	case V4L2_PIX_FMT_YUYV:
+	case V4L2_PIX_FMT_YVYU:
+	case V4L2_PIX_FMT_UYVY:
+	case V4L2_PIX_FMT_VYUY:
 	case V4L2_PIX_FMT_SBGGR8:
 	case V4L2_PIX_FMT_SGBRG8:
 	case V4L2_PIX_FMT_SGRBG8:
@@ -444,7 +452,7 @@
 	sof_event.frame_id = vfe_dev->axi_data.src_info[frame_src].frame_id;
 	sof_event.timestamp = ts->event_time;
 	sof_event.mono_timestamp = ts->buf_time;
-	msm_isp_send_event(vfe_dev, ISP_EVENT_SOF, &sof_event);
+	msm_isp_send_event(vfe_dev, ISP_EVENT_SOF + frame_src, &sof_event);
 }
 
 void msm_isp_calculate_framedrop(
@@ -1215,6 +1223,8 @@
 			msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info);
 			stream_info->state = ACTIVE;
 		}
+		vfe_dev->axi_data.src_info[
+			SRC_TO_INTF(stream_info->stream_src)].frame_id = 0;
 	}
 	msm_isp_update_stream_bandwidth(vfe_dev);
 	vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev, wm_reload_mask);
@@ -1228,16 +1238,6 @@
 			update_camif_state(vfe_dev, camif_update);
 	}
 
-	if (vfe_dev->axi_data.src_info[VFE_RAW_0].raw_stream_count > 0) {
-		vfe_dev->axi_data.src_info[VFE_RAW_0].frame_id = 0;
-	}
-	else if (vfe_dev->axi_data.src_info[VFE_RAW_1].raw_stream_count > 0) {
-		vfe_dev->axi_data.src_info[VFE_RAW_1].frame_id = 0;
-	}
-	else if (vfe_dev->axi_data.src_info[VFE_RAW_2].raw_stream_count > 0) {
-		vfe_dev->axi_data.src_info[VFE_RAW_2].frame_id = 0;
-	}
-
 	if (wait_for_complete)
 		rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update);
 
@@ -1294,7 +1294,15 @@
 		rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update);
 		if (rc < 0) {
 			pr_err("%s: wait for config done failed\n", __func__);
-			return rc;
+			for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+				stream_info = &axi_data->stream_info[
+				HANDLE_TO_IDX(
+					stream_cfg_cmd->stream_handle[i])];
+				stream_info->state = STOP_PENDING;
+				msm_isp_axi_stream_enable_cfg(
+					vfe_dev, stream_info);
+				stream_info->state = INACTIVE;
+			}
 		}
 	}
 	msm_isp_update_stream_bandwidth(vfe_dev);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index 191a2ec..cb46e9c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -13,6 +13,7 @@
 #include <linux/io.h>
 #include <media/v4l2-subdev.h>
 #include <linux/ratelimit.h>
+#include <asm/div64.h>
 
 #include "msm.h"
 #include "msm_isp_util.h"
@@ -24,11 +25,10 @@
 static DEFINE_MUTEX(bandwidth_mgr_mutex);
 static struct msm_isp_bandwidth_mgr isp_bandwidth_mgr;
 
-#define MSM_ISP_MIN_AB 300000000
-#define MSM_ISP_MIN_IB 450000000
+#define MSM_ISP_MIN_AB 450000000
+#define MSM_ISP_MIN_IB 900000000
 
 #define VFE40_8974V2_VERSION 0x1001001A
-
 static struct msm_bus_vectors msm_isp_init_vectors[] = {
 	{
 		.src = MSM_BUS_MASTER_VFE,
@@ -217,7 +217,8 @@
 {
 	uint32_t avtimer_msw_1st = 0, avtimer_lsw = 0;
 	uint32_t avtimer_msw_2nd = 0;
-	uint8_t iter = 0;
+	uint64_t av_timer_tick = 0;
+
 	if (!vfe_dev->p_avtimer_msw || !vfe_dev->p_avtimer_lsw) {
 		pr_err("%s: ioremap failed\n", __func__);
 		return;
@@ -226,15 +227,10 @@
 		avtimer_msw_1st = msm_camera_io_r(vfe_dev->p_avtimer_msw);
 		avtimer_lsw = msm_camera_io_r(vfe_dev->p_avtimer_lsw);
 		avtimer_msw_2nd = msm_camera_io_r(vfe_dev->p_avtimer_msw);
-	} while ((avtimer_msw_1st != avtimer_msw_2nd)
-		&& (iter++ < AVTIMER_ITERATION_CTR));
-	/*Just return if the MSW TimeStamps don't converge after
-	a few iterations Application needs to handle the zero TS values*/
-	if (iter >= AVTIMER_ITERATION_CTR) {
-		pr_err("%s: AVTimer MSW TS did not converge !!!\n", __func__);
-		return;
-	}
-	time_stamp->vt_time.tv_sec = avtimer_msw_1st;
+	} while (avtimer_msw_1st != avtimer_msw_2nd);
+	av_timer_tick = ((uint64_t)avtimer_msw_1st << 32) | avtimer_lsw;
+	avtimer_lsw = do_div(av_timer_tick, USEC_PER_SEC);
+	time_stamp->vt_time.tv_sec = (uint32_t)(av_timer_tick);
 	time_stamp->vt_time.tv_usec = avtimer_lsw;
 }
 
@@ -806,6 +802,12 @@
 	case V4L2_PIX_FMT_NV61:
 		val = CAL_WORD(pixel_per_line, 1, 8);
 		break;
+	case V4L2_PIX_FMT_YUYV:
+	case V4L2_PIX_FMT_YVYU:
+	case V4L2_PIX_FMT_UYVY:
+	case V4L2_PIX_FMT_VYUY:
+		val = CAL_WORD(pixel_per_line, 2, 8);
+		break;
 		/*TD: Add more image format*/
 	default:
 		msm_isp_print_fourcc_error(__func__, output_format);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
index 981c210..8662657 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
@@ -457,9 +457,8 @@
 			break;
 		}
 		for (i = 0; i < csid_params.lut_params.num_cid; i++) {
-			vc_cfg = kzalloc(csid_params.lut_params.num_cid *
-				sizeof(struct msm_camera_csid_vc_cfg),
-				GFP_KERNEL);
+			vc_cfg = kzalloc(sizeof(struct msm_camera_csid_vc_cfg),
+			    GFP_KERNEL);
 			if (!vc_cfg) {
 				pr_err("%s: %d failed\n", __func__, __LINE__);
 				for (i--; i >= 0; i--)
@@ -469,8 +468,7 @@
 			}
 			if (copy_from_user(vc_cfg,
 				(void *)csid_params.lut_params.vc_cfg[i],
-				(csid_params.lut_params.num_cid *
-				sizeof(struct msm_camera_csid_vc_cfg)))) {
+				sizeof(struct msm_camera_csid_vc_cfg))) {
 				pr_err("%s: %d failed\n", __func__, __LINE__);
 				kfree(vc_cfg);
 				for (i--; i >= 0; i--)
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 4b7a3be..cdc649f 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1479,10 +1479,18 @@
 		pr_err("MARK LTR\n");
 		break;
 	}
-	case HAL_PARAM_VENC_HIER_P_NUM_FRAMES:
+	case HAL_PARAM_VENC_HIER_P_MAX_ENH_LAYERS:
 	{
 		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_HIER_P_NUM_ENH_LAYER;
+			HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER;
+		pkt->rg_property_data[1] = *(u32 *)pdata;
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
+	case HAL_CONFIG_VENC_HIER_P_NUM_FRAMES:
+	{
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER;
 		pkt->rg_property_data[1] = *(u32 *)pdata;
 		pkt->size += sizeof(u32) * 2;
 		break;
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index ea7d670..66d6878 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -693,11 +693,20 @@
 	int rc = 0;
 	int i;
 	struct hal_buffer_requirements *buff_req_buffer;
+
 	if (!inst || !f || !inst->core || !inst->core->device) {
 		dprintk(VIDC_ERR,
 			"Invalid input, inst = %p, format = %p\n", inst, f);
 		return -EINVAL;
 	}
+
+	rc = msm_comm_try_get_bufreqs(inst);
+	if (rc) {
+		dprintk(VIDC_ERR, "Getting buffer requirements failed: %d\n",
+				rc);
+		return rc;
+	}
+
 	hdev = inst->core->device;
 	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
 		fmt = inst->fmts[CAPTURE_PORT];
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index d8b608437..030aa29 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -116,6 +116,7 @@
 	"Extradata input crop",
 	"Extradata digital zoom",
 	"Extradata aspect ratio",
+	"Extradata LTR",
 	"Extradata macroblock metadata",
 };
 
@@ -1000,11 +1001,53 @@
 	return rc;
 }
 
+static int msm_venc_enable_hier_p(struct msm_vidc_inst *inst)
+{
+	int num_enh_layers = 0;
+	u32 property_id = 0;
+	struct hfi_device *hdev = NULL;
+	int rc = 0;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	if (inst->fmts[CAPTURE_PORT]->fourcc != V4L2_PIX_FMT_VP8)
+		return 0;
+
+	num_enh_layers = inst->capability.hier_p.max - 1;
+	if (!num_enh_layers)
+		return 0;
+
+	hdev = inst->core->device;
+	property_id = HAL_PARAM_VENC_HIER_P_MAX_ENH_LAYERS;
+
+	rc = call_hfi_op(hdev, session_set_property,
+			(void *)inst->session, property_id,
+			(void *)&num_enh_layers);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"%s: failed with error = %d\n", __func__, rc);
+	}
+	return rc;
+}
+
 static inline int start_streaming(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
 	struct vb2_buf_entry *temp;
 	struct list_head *ptr, *next;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = msm_venc_enable_hier_p(inst);
+	if (rc)
+		return rc;
+
 	if (inst->capability.pixelprocess_capabilities &
 		HAL_VIDEO_ENCODER_SCALING_CAPABILITY)
 		rc = msm_comm_check_scaling_supported(inst);
@@ -2101,7 +2144,7 @@
 		pdata = &markltr;
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_HIER_P_NUM_LAYERS:
-		property_id = HAL_PARAM_VENC_HIER_P_NUM_FRAMES;
+		property_id = HAL_CONFIG_VENC_HIER_P_NUM_FRAMES;
 		hier_p_layers = ctrl->val;
 		if (hier_p_layers > (inst->capability.hier_p.max - 1)) {
 			dprintk(VIDC_ERR,
@@ -2614,6 +2657,14 @@
 			"Invalid input, inst = %p, format = %p\n", inst, f);
 		return -EINVAL;
 	}
+
+	rc = msm_comm_try_get_bufreqs(inst);
+	if (rc) {
+		dprintk(VIDC_WARN, "Getting new buffer requirements failed: %d\n",
+				rc);
+		return rc;
+	}
+
 	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
 		fmt = inst->fmts[CAPTURE_PORT];
 		height = inst->prop.height[CAPTURE_PORT];
@@ -2643,8 +2694,16 @@
 				buff_req_buffer->buffer_size : 0;
 		}
 		for (i = 0; i < fmt->num_planes; ++i) {
-			inst->bufq[CAPTURE_PORT].vb2_bufq.plane_sizes[i] =
-			f->fmt.pix_mp.plane_fmt[i].sizeimage;
+			if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+				inst->bufq[OUTPUT_PORT].vb2_bufq.
+				plane_sizes[i] =
+				f->fmt.pix_mp.plane_fmt[i].sizeimage;
+			} else if (f->type ==
+				V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+				inst->bufq[CAPTURE_PORT].vb2_bufq.
+				plane_sizes[i] =
+				f->fmt.pix_mp.plane_fmt[i].sizeimage;
+			}
 		}
 	} else {
 		dprintk(VIDC_ERR,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index f0721c3..9dbecfb 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -1343,7 +1343,6 @@
 			mutex_lock(&inst->lock);
 		}
 		mutex_unlock(&inst->lock);
-		msm_smem_delete_client(inst->mem_client);
 		debugfs_remove_recursive(inst->debugfs_root);
 	}
 }
@@ -1401,7 +1400,9 @@
 	for (i = 0; i < MAX_PORT_NUM; i++)
 		vb2_queue_release(&inst->bufq[i].vb2_bufq);
 
+	msm_smem_delete_client(inst->mem_client);
 	pr_info(VIDC_DBG_TAG "Closed video instance: %p\n", VIDC_INFO, inst);
 	kfree(inst);
+
 	return 0;
 }
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 70114de..051f171 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1196,6 +1196,8 @@
 			vb->v4l2_buf.flags |= V4L2_QCOM_BUF_DATA_CORRUPT;
 		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DROP_FRAME)
 			vb->v4l2_buf.flags |= V4L2_QCOM_BUF_DROP_FRAME;
+		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_MBAFF)
+			vb->v4l2_buf.flags |= V4L2_MSM_BUF_FLAG_MBAFF;
 		switch (fill_buf_done->picture_type) {
 		case HAL_PICTURE_IDR:
 			vb->v4l2_buf.flags |= V4L2_QCOM_BUF_FLAG_IDRFRAME;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 4fcd20e..008407d 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -527,6 +527,25 @@
 	return rc;
 }
 
+static void venus_hfi_set_registers(struct venus_hfi_device *device)
+{
+	struct reg_set *reg_set;
+	int i;
+
+	if (!device->res) {
+		dprintk(VIDC_ERR,
+			"device resources null, cannot set registers\n");
+		return;
+	}
+
+	reg_set = &device->res->reg_set;
+	for (i = 0; i < reg_set->count; i++) {
+		venus_hfi_write_register(device,
+				reg_set->reg_tbl[i].reg,
+				reg_set->reg_tbl[i].value, 0);
+	}
+}
+
 static int venus_hfi_core_start_cpu(struct venus_hfi_device *device)
 {
 	u32 ctrl_status = 0, count = 0, rc = 0;
@@ -962,6 +981,7 @@
 
 	device->power_enabled = 0;
 	--device->pwr_cnt;
+	dprintk(VIDC_INFO, "entering power collapse\n");
 already_disabled:
 	return rc;
 }
@@ -1001,18 +1021,45 @@
 		goto err_enable_clk;
 	}
 
+
+	/*
+	 * Re-program all of the registers that get reset as a result of
+	 * regulator_disable() and _enable()
+	 */
+	venus_hfi_set_registers(device);
+
+	venus_hfi_write_register(device, VIDC_UC_REGION_ADDR,
+			(u32)device->iface_q_table.align_device_addr, 0);
+	venus_hfi_write_register(device,
+			VIDC_UC_REGION_SIZE, SHARED_QSIZE, 0);
+	venus_hfi_write_register(device, VIDC_CPU_CS_SCIACMDARG2,
+		(u32)device->iface_q_table.align_device_addr,
+		device->iface_q_table.align_virtual_addr);
+
+	if (!IS_ERR_OR_NULL(device->sfr.align_device_addr))
+		venus_hfi_write_register(device, VIDC_SFR_ADDR,
+				(u32)device->sfr.align_device_addr, 0);
+	if (!IS_ERR_OR_NULL(device->qdss.align_device_addr))
+		venus_hfi_write_register(device, VIDC_MMAP_ADDR,
+				(u32)device->qdss.align_device_addr, 0);
+
+	/* Reboot the firmware */
 	rc = venus_hfi_tzbsp_set_video_state(TZBSP_VIDEO_STATE_RESUME);
 	if (rc) {
 		dprintk(VIDC_ERR, "Failed to resume video core %d\n", rc);
 		goto err_set_video_state;
 	}
+
+	/* Wait for boot completion */
 	rc = venus_hfi_reset_core(device);
 	if (rc) {
 		dprintk(VIDC_ERR, "Failed to reset venus core");
 		goto err_reset_core;
 	}
+
 	device->power_enabled = 1;
 	++device->pwr_cnt;
+	dprintk(VIDC_INFO, "resuming from power collapse\n");
 	return rc;
 err_reset_core:
 	venus_hfi_tzbsp_set_video_state(TZBSP_VIDEO_STATE_SUSPEND);
@@ -1497,25 +1544,6 @@
 	return -ENOMEM;
 }
 
-static void venus_hfi_set_registers(struct venus_hfi_device *device)
-{
-	struct reg_set *reg_set;
-	int i;
-
-	if (!device->res) {
-		dprintk(VIDC_ERR,
-			"device resources null, cannot set registers\n");
-		return;
-	}
-
-	reg_set = &device->res->reg_set;
-	for (i = 0; i < reg_set->count; i++) {
-		venus_hfi_write_register(device,
-				reg_set->reg_tbl[i].reg,
-				reg_set->reg_tbl[i].value, 0);
-	}
-}
-
 static int venus_hfi_sys_set_debug(struct venus_hfi_device *device, int debug)
 {
 	u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE];
@@ -1561,9 +1589,6 @@
 
 	dev->intr_status = 0;
 	INIT_LIST_HEAD(&dev->sess_head);
-	mutex_init(&dev->read_lock);
-	mutex_init(&dev->write_lock);
-	mutex_init(&dev->session_lock);
 	venus_hfi_set_registers(dev);
 
 	if (!dev->hal_client) {
@@ -2028,6 +2053,10 @@
 
 	new_session = (struct hal_session *)
 		kzalloc(sizeof(struct hal_session), GFP_KERNEL);
+	if (!new_session) {
+		dprintk(VIDC_ERR, "new session fail: Out of memory\n");
+		return NULL;
+	}
 	new_session->session_id = (u32) session_id;
 	if (session_type == 1)
 		new_session->is_decoder = 0;
@@ -3301,7 +3330,6 @@
 			__func__, device);
 		return -EINVAL;
 	}
-	mutex_init(&device->clk_pwr_lock);
 	device->clk_gating_level = VCODEC_CLK;
 	rc = venus_hfi_iommu_attach(device);
 	if (rc) {
@@ -3574,6 +3602,11 @@
 		goto error_createq_pm;
 	}
 
+	mutex_init(&hdevice->read_lock);
+	mutex_init(&hdevice->write_lock);
+	mutex_init(&hdevice->session_lock);
+	mutex_init(&hdevice->clk_pwr_lock);
+
 	if (hal_ctxt.dev_count == 0)
 		INIT_LIST_HEAD(&hal_ctxt.dev_head);
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index c764758..d7350b6 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -44,6 +44,7 @@
 #define HAL_BUFFERFLAG_READONLY         0x00000200
 #define HAL_BUFFERFLAG_ENDOFSUBFRAME    0x00000400
 #define HAL_BUFFERFLAG_EOSEQ            0x00200000
+#define HAL_BUFFERFLAG_MBAFF            0x08000000
 #define HAL_BUFFERFLAG_DROP_FRAME       0x20000000
 
 
@@ -185,7 +186,8 @@
 	HAL_CONFIG_VENC_MARKLTRFRAME,
 	HAL_CONFIG_VENC_USELTRFRAME,
 	HAL_CONFIG_VENC_LTRPERIOD,
-	HAL_PARAM_VENC_HIER_P_NUM_FRAMES,
+	HAL_CONFIG_VENC_HIER_P_NUM_FRAMES,
+	HAL_PARAM_VENC_HIER_P_MAX_ENH_LAYERS,
 };
 
 enum hal_domain {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 5117266..7f4dd04 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -305,8 +305,6 @@
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x018)
 #define HFI_PROPERTY_PARAM_VENC_MULTIREF_P				\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x019)
-#define HFI_PROPERTY_PARAM_VENC_HIER_P_NUM_ENH_LAYER	\
-	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01A)
 #define HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT		\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01B)
 #define HFI_PROPERTY_PARAM_VENC_LTRMODE		\
@@ -319,6 +317,8 @@
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01F)
 #define  HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES \
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x020)
+#define HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER	\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x026)
 #define HFI_PROPERTY_CONFIG_VENC_COMMON_START				\
 	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000)
 #define HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE				\
@@ -342,6 +342,8 @@
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x009)
 #define  HFI_PROPERTY_CONFIG_VENC_USELTRFRAME			\
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00A)
+#define  HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER		\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00B)
 #define  HFI_PROPERTY_CONFIG_VENC_LTRPERIOD			\
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00C)
 #define HFI_PROPERTY_CONFIG_VPE_COMMON_START				\
diff --git a/drivers/media/platform/msm/wfd/vsg-subdev.c b/drivers/media/platform/msm/wfd/vsg-subdev.c
index 433468e..960e45c 100644
--- a/drivers/media/platform/msm/wfd/vsg-subdev.c
+++ b/drivers/media/platform/msm/wfd/vsg-subdev.c
@@ -337,6 +337,7 @@
 static int vsg_start(struct v4l2_subdev *sd)
 {
 	struct vsg_context *context = NULL;
+	int rc = 0;
 
 	if (!sd) {
 		WFD_MSG_ERR("ERROR, invalid arguments into %s\n", __func__);
@@ -345,18 +346,24 @@
 
 	context = (struct vsg_context *)sd->dev_priv;
 
+	mutex_lock(&context->mutex);
 	if (context->state == VSG_STATE_STARTED) {
 		WFD_MSG_ERR("VSG not stopped, start not allowed\n");
-		return -EINPROGRESS;
+		rc = -EINPROGRESS;
+		goto err_bad_state;
 	} else if (context->state == VSG_STATE_ERROR) {
 		WFD_MSG_ERR("VSG in error state, not allowed to restart\n");
-		return -ENOTRECOVERABLE;
+		rc = -ENOTRECOVERABLE;
+		goto err_bad_state;
 	}
 
 	context->state = VSG_STATE_STARTED;
 	hrtimer_start(&context->threshold_timer, ns_to_ktime(context->
 			max_frame_interval), HRTIMER_MODE_REL);
-	return 0;
+
+err_bad_state:
+	mutex_unlock(&context->mutex);
+	return rc;
 }
 
 static int vsg_stop(struct v4l2_subdev *sd)
diff --git a/drivers/media/radio/radio-iris.c b/drivers/media/radio/radio-iris.c
index 9f18508..02441ec 100644
--- a/drivers/media/radio/radio-iris.c
+++ b/drivers/media/radio/radio-iris.c
@@ -1892,13 +1892,6 @@
 		return;
 	}
 
-	if (radio->mode == FM_RECV_TURNING_ON) {
-		radio->mode = FM_RECV;
-		iris_q_event(radio, IRIS_EVT_RADIO_READY);
-	} else if (radio->mode == FM_TRANS_TURNING_ON) {
-		radio->mode = FM_TRANS;
-		iris_q_event(radio, IRIS_EVT_RADIO_READY);
-	}
 	radio_hci_req_complete(hdev, rsp->status);
 }
 
@@ -3777,7 +3770,19 @@
 				radio->mode = FM_OFF;
 				goto END;
 			} else {
-				initialise_recv(radio);
+				retval = initialise_recv(radio);
+				if (retval < 0) {
+					FMDERR("Error while initialising"\
+						"radio %d\n", retval);
+					hci_cmd(HCI_FM_DISABLE_RECV_CMD,
+							radio->fm_hdev);
+					radio->mode = FM_OFF;
+					goto END;
+				}
+			}
+			if (radio->mode == FM_RECV_TURNING_ON) {
+				radio->mode = FM_RECV;
+				iris_q_event(radio, IRIS_EVT_RADIO_READY);
 			}
 			break;
 		case FM_TRANS:
@@ -3794,7 +3799,19 @@
 				radio->mode = FM_OFF;
 				goto END;
 			} else {
-				initialise_trans(radio);
+				retval = initialise_trans(radio);
+				if (retval < 0) {
+					FMDERR("Error while initialising"\
+							"radio %d\n", retval);
+					hci_cmd(HCI_FM_DISABLE_TRANS_CMD,
+								radio->fm_hdev);
+					radio->mode = FM_OFF;
+					goto END;
+				}
+			}
+			if (radio->mode == FM_TRANS_TURNING_ON) {
+				radio->mode = FM_TRANS;
+				iris_q_event(radio, IRIS_EVT_RADIO_READY);
 			}
 			break;
 		case FM_OFF:
diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c
index 668cc73..e5311ce 100644
--- a/drivers/media/video/videobuf2-core.c
+++ b/drivers/media/video/videobuf2-core.c
@@ -1104,9 +1104,13 @@
 	 * consistent after getting driver's lock back.
 	 */
 	if (q->memory == V4L2_MEMORY_USERPTR) {
-		mmap_sem = &current->active_mm->mmap_sem;
+		bool mm_exists = !!current->mm;
+
+		mmap_sem = mm_exists ? &current->mm->mmap_sem : NULL;
 		call_qop(q, wait_prepare, q);
-		down_read(mmap_sem);
+		/* kthreads have no userspace, hence no pages to lock */
+		if (mmap_sem)
+			down_read(mmap_sem);
 		call_qop(q, wait_finish, q);
 	}
 
diff --git a/drivers/misc/qfp_fuse.c b/drivers/misc/qfp_fuse.c
index 3a088dc..f271f96 100644
--- a/drivers/misc/qfp_fuse.c
+++ b/drivers/misc/qfp_fuse.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011, 2014 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -28,7 +28,7 @@
 /*
  * Time QFPROM requires to reliably burn a fuse.
  */
-#define QFPROM_BLOW_TIMEOUT_US      10
+#define QFPROM_BLOW_TIMEOUT_US      20
 #define QFPROM_BLOW_TIMER_OFFSET    0x2038
 /*
  * Denotes number of cycles required to blow the fuse.
@@ -42,6 +42,10 @@
 #define QFP_FUSE_READY              0x01
 #define QFP_FUSE_OFF                0x00
 
+#define QFP_FUSE_BUF_SIZE           64
+#define UINT32_MAX                  (0xFFFFFFFFU)
+
+
 struct qfp_priv_t {
 	uint32_t base;
 	uint32_t end;
@@ -53,6 +57,23 @@
 /* We need only one instance of this for the driver */
 static struct qfp_priv_t *qfp_priv;
 
+static inline bool is_usr_req_valid(const struct qfp_fuse_req *req)
+{
+	uint32_t size = qfp_priv->end - qfp_priv->base;
+	uint32_t req_size;
+
+	if (req->size >= (UINT32_MAX / sizeof(uint32_t)))
+		return false;
+	req_size = req->size * sizeof(uint32_t);
+	if ((req_size == 0) || (req_size > size))
+		return false;
+	if (req->offset >= size)
+		return false;
+	if ((req->offset + req_size) > size)
+		return false;
+
+	return true;
+}
 
 static int qfp_fuse_open(struct inode *inode, struct file *filp)
 {
@@ -177,7 +198,9 @@
 {
 	int err = 0;
 	struct qfp_fuse_req req;
-	u32 *buf = NULL;
+	u32 fuse_buf[QFP_FUSE_BUF_SIZE];
+	u32 *buf = fuse_buf;
+	u32 *ptr = NULL;
 	int i;
 
 	/* Verify user arguments. */
@@ -199,25 +222,21 @@
 		}
 
 		/* Check for limits */
-		if (!req.size) {
-			pr_err("Request size zero.\n");
-			err = -EFAULT;
+		if (is_usr_req_valid(&req) == false) {
+			pr_err("Invalid request\n");
+			err = -EINVAL;
 			break;
 		}
 
-		if (qfp_priv->base + req.offset + (req.size - 1) * 4 >
-				qfp_priv->end) {
-			pr_err("Req size exceeds QFPROM addr space\n");
-			err = -EFAULT;
-			break;
-		}
-
-		/* Allocate memory for buffer */
-		buf = kzalloc(req.size * 4, GFP_KERNEL);
-		if (buf == NULL) {
-			pr_alert("No memory for data\n");
-			err = -ENOMEM;
-			break;
+		if (req.size > QFP_FUSE_BUF_SIZE) {
+			/* Allocate memory for buffer */
+			ptr = kzalloc(req.size * 4, GFP_KERNEL);
+			if (ptr == NULL) {
+				pr_alert("No memory for data\n");
+				err = -ENOMEM;
+				break;
+			}
+			buf = ptr;
 		}
 
 		if (mutex_lock_interruptible(&qfp_priv->lock)) {
@@ -251,24 +270,21 @@
 			break;
 		}
 		/* Check for limits */
-		if (!req.size) {
-			pr_err("Request size zero.\n");
-			err = -EFAULT;
-			break;
-		}
-		if (qfp_priv->base + req.offset + (req.size - 1) * 4 >
-				qfp_priv->end) {
-			pr_err("Req size exceeds QFPROM space\n");
-			err = -EFAULT;
+		if (is_usr_req_valid(&req) == false) {
+			pr_err("Invalid request\n");
+			err = -EINVAL;
 			break;
 		}
 
-		/* Allocate memory for buffer */
-		buf = kzalloc(4 * (req.size), GFP_KERNEL);
-		if (buf == NULL) {
-			pr_alert("No memory for data\n");
-			err = -ENOMEM;
-			break;
+		if (req.size > QFP_FUSE_BUF_SIZE) {
+			/* Allocate memory for buffer */
+			ptr = kzalloc(req.size * 4, GFP_KERNEL);
+			if (ptr == NULL) {
+				pr_alert("No memory for data\n");
+				err = -ENOMEM;
+				break;
+			}
+			buf = ptr;
 		}
 
 		/* Copy user data to local buffer */
@@ -296,7 +312,7 @@
 		pr_err("Invalid ioctl command.\n");
 		return -ENOTTY;
 	}
-	kfree(buf);
+	kfree(ptr);
 	return err;
 }
 
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index df3e5d1..7ad51e6 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -287,7 +287,7 @@
 	/* Get the handle of the shared fd */
 	svc->ihandle = ion_import_dma_buf(qseecom.ion_clnt,
 					listener->ifd_data_fd);
-	if (svc->ihandle == NULL) {
+	if (IS_ERR_OR_NULL(svc->ihandle)) {
 		pr_err("Ion client could not retrieve the handle\n");
 		return -ENOMEM;
 	}
@@ -503,26 +503,31 @@
 	return;
 }
 
-static void __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
+static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
 {
 	struct qseecom_clk *qclk;
+	int ret = 0;
 	mutex_lock(&clk_access_lock);
 	if (ce == CLK_QSEE)
 		qclk = &qseecom.qsee;
 	else
 		qclk = &qseecom.ce_drv;
 
-	if (qclk->clk_access_cnt == 0) {
-		mutex_unlock(&clk_access_lock);
-		return;
+	if (qclk->clk_access_cnt > 2) {
+		pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
+		ret = -EINVAL;
+		goto err_dec_ref_cnt;
 	}
-	qclk->clk_access_cnt--;
+	if (qclk->clk_access_cnt == 2)
+		qclk->clk_access_cnt--;
+
+err_dec_ref_cnt:
 	mutex_unlock(&clk_access_lock);
-	return;
+	return ret;
 }
 
 
-static int qseecom_scale_bus_bandwidth_timer(uint32_t mode, uint32_t duration)
+static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
 {
 	int32_t ret = 0;
 	int32_t request_mode = INACTIVE;
@@ -537,11 +542,23 @@
 		request_mode = mode;
 	}
 
-	__qseecom_set_msm_bus_request(request_mode);
-	if (qseecom.timer_running) {
-		__qseecom_decrease_clk_ref_count(CLK_QSEE);
-		del_timer_sync(&(qseecom.bw_scale_down_timer));
+	ret = __qseecom_set_msm_bus_request(request_mode);
+	if (ret) {
+		pr_err("set msm bus request failed (%d),request_mode (%d)\n",
+			ret, request_mode);
+		goto err_scale_timer;
 	}
+
+	if (qseecom.timer_running) {
+		ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
+		if (ret) {
+			pr_err("Failed to decrease clk ref count.\n");
+			goto err_scale_timer;
+		}
+		del_timer_sync(&(qseecom.bw_scale_down_timer));
+		qseecom.timer_running = false;
+	}
+err_scale_timer:
 	mutex_unlock(&qsee_bw_mutex);
 	return ret;
 }
@@ -598,18 +615,23 @@
 	return ret;
 }
 
+static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
+{
+	mutex_lock(&qsee_bw_mutex);
+	qseecom.bw_scale_down_timer.expires = jiffies +
+		msecs_to_jiffies(duration);
+	add_timer(&(qseecom.bw_scale_down_timer));
+	qseecom.timer_running = true;
+	mutex_unlock(&qsee_bw_mutex);
+}
+
 static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
 {
 	if (!qseecom.support_bus_scaling)
 		qsee_disable_clock_vote(data, CLK_SFPB);
-	else {
-		mutex_lock(&qsee_bw_mutex);
-		qseecom.bw_scale_down_timer.expires = jiffies +
-			msecs_to_jiffies(QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
-		add_timer(&(qseecom.bw_scale_down_timer));
-		qseecom.timer_running = true;
-		mutex_unlock(&qsee_bw_mutex);
-	}
+	else
+		__qseecom_add_bw_scale_down_timer(
+			QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
 	return;
 }
 
@@ -617,8 +639,9 @@
 {
 	int ret = 0;
 	if (qseecom.support_bus_scaling) {
-		qseecom_scale_bus_bandwidth_timer(
-			MEDIUM, QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
+		ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
+		if (ret)
+			pr_err("Failed to set bw MEDIUM.\n");
 	} else {
 		ret = qsee_vote_for_clock(data, CLK_SFPB);
 		if (ret)
@@ -1089,16 +1112,13 @@
 		return -EINVAL;
 	}
 
-	if (((uint32_t)req_ptr->cmd_req_buf <
-			data_ptr->client.user_virt_sb_base)
-			|| ((uint32_t)req_ptr->cmd_req_buf >=
-			(data_ptr->client.user_virt_sb_base +
-			data_ptr->client.sb_length))) {
-		pr_err("cmd buffer address not within shared bufffer\n");
+	/* Clients need to ensure req_buf is at base offset of shared buffer */
+	if ((uint32_t)req_ptr->cmd_req_buf !=
+			data_ptr->client.user_virt_sb_base) {
+		pr_err("cmd buf not pointing to base offset of shared buffer\n");
 		return -EINVAL;
 	}
 
-
 	if (((uint32_t)req_ptr->resp_buf < data_ptr->client.user_virt_sb_base)
 			|| ((uint32_t)req_ptr->resp_buf >=
 			(data_ptr->client.user_virt_sb_base +
@@ -1117,8 +1137,6 @@
 					(uint32_t)req_ptr->resp_buf));
 	send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
 
-	pr_debug("CMD ID (%x), KEY_TYPE (%d)\n", send_svc_ireq_ptr->qsee_cmd_id,
-	((struct qseecom_rpmb_provision_key *)req_ptr->cmd_req_buf)->key_type);
 	return ret;
 }
 
@@ -1143,6 +1161,21 @@
 		return -EINVAL;
 	}
 
+	if (data->client.sb_virt == NULL) {
+		pr_err("sb_virt null\n");
+		return -EINVAL;
+	}
+
+	if (data->client.user_virt_sb_base == 0) {
+		pr_err("user_virt_sb_base is null\n");
+		return -EINVAL;
+	}
+
+	if (data->client.sb_length == 0) {
+		pr_err("sb_length is 0\n");
+		return -EINVAL;
+	}
+
 	data->type = QSEECOM_SECURE_SERVICE;
 
 	switch (req.cmd_id) {
@@ -1158,10 +1191,9 @@
 	}
 
 	if (qseecom.support_bus_scaling) {
-		qseecom_scale_bus_bandwidth_timer(HIGH,
-					QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		ret = qseecom_scale_bus_bandwidth_timer(HIGH);
 		if (ret) {
-			pr_err("Fail to set bw HIGH%d\n", ret);
+			pr_err("Fail to set bw HIGH\n");
 			return ret;
 		}
 	} else {
@@ -1193,15 +1225,9 @@
 			qsee_disable_clock_vote(data, CLK_DFAB);
 			qsee_disable_clock_vote(data, CLK_SFPB);
 		} else {
-			mutex_lock(&qsee_bw_mutex);
-			qseecom.bw_scale_down_timer.expires = jiffies +
-				msecs_to_jiffies(
+			__qseecom_add_bw_scale_down_timer(
 				QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
-			add_timer(&(qseecom.bw_scale_down_timer));
-			qseecom.timer_running = true;
-			mutex_unlock(&qsee_bw_mutex);
 		}
-
 		goto exit;
 	}
 
@@ -1229,12 +1255,8 @@
 		qsee_disable_clock_vote(data, CLK_DFAB);
 		qsee_disable_clock_vote(data, CLK_SFPB);
 	} else {
-		mutex_lock(&qsee_bw_mutex);
-		qseecom.bw_scale_down_timer.expires = jiffies +
-			msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
-		add_timer(&(qseecom.bw_scale_down_timer));
-		qseecom.timer_running = true;
-		mutex_unlock(&qsee_bw_mutex);
+		__qseecom_add_bw_scale_down_timer(
+			QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
 	}
 
 exit:
@@ -1550,7 +1572,7 @@
 		if (wait_event_freezable(this_lstnr->rcv_req_wq,
 				__qseecom_listener_has_rcvd_req(data,
 				this_lstnr))) {
-			pr_warning("Interrupted: exiting Listener Service = %d\n",
+			pr_debug("Interrupted: exiting Listener Service = %d\n",
 						(uint32_t)data->listener.id);
 			/* woken up for different reason */
 			return -ERESTARTSYS;
@@ -2069,18 +2091,19 @@
 
 	mutex_lock(&app_access_lock);
 	atomic_inc(&data->ioctl_count);
-	if (qseecom.support_bus_scaling)
-		qseecom_scale_bus_bandwidth_timer(INACTIVE,
-					QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
-	ret = __qseecom_send_cmd(data, &req);
 	if (qseecom.support_bus_scaling) {
-		mutex_lock(&qsee_bw_mutex);
-		qseecom.bw_scale_down_timer.expires = jiffies +
-			msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
-		add_timer(&(qseecom.bw_scale_down_timer));
-		qseecom.timer_running = true;
-		mutex_unlock(&qsee_bw_mutex);
+		ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+		if (ret) {
+			pr_err("Failed to set bw.\n");
+			atomic_dec(&data->ioctl_count);
+			mutex_unlock(&app_access_lock);
+			return ret;
+		}
 	}
+	ret = __qseecom_send_cmd(data, &req);
+	if (qseecom.support_bus_scaling)
+		__qseecom_add_bw_scale_down_timer(
+			QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
 
 	atomic_dec(&data->ioctl_count);
 	mutex_unlock(&app_access_lock);
@@ -3200,20 +3223,20 @@
 		}
 		/* Only one client allowed here at a time */
 		mutex_lock(&app_access_lock);
-		if (qseecom.support_bus_scaling)
-			qseecom_scale_bus_bandwidth_timer(INACTIVE,
-					QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		if (qseecom.support_bus_scaling) {
+			ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+			if (ret) {
+				pr_err("Failed to set bw.\n");
+				ret = -EINVAL;
+				mutex_unlock(&app_access_lock);
+				break;
+			}
+		}
 		atomic_inc(&data->ioctl_count);
 		ret = qseecom_send_cmd(data, argp);
-		if (qseecom.support_bus_scaling) {
-			mutex_lock(&qsee_bw_mutex);
-			qseecom.bw_scale_down_timer.expires = jiffies +
-				msecs_to_jiffies(
+		if (qseecom.support_bus_scaling)
+			__qseecom_add_bw_scale_down_timer(
 				QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
-			add_timer(&(qseecom.bw_scale_down_timer));
-			qseecom.timer_running = true;
-			mutex_unlock(&qsee_bw_mutex);
-		}
 		atomic_dec(&data->ioctl_count);
 		wake_up_all(&data->abort_wq);
 		mutex_unlock(&app_access_lock);
@@ -3232,20 +3255,21 @@
 		}
 		/* Only one client allowed here at a time */
 		mutex_lock(&app_access_lock);
-		if (qseecom.support_bus_scaling)
-			qseecom_scale_bus_bandwidth_timer(INACTIVE,
-					QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		if (qseecom.support_bus_scaling) {
+			ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+			if (ret) {
+				pr_err("Failed to set bw.\n");
+				mutex_unlock(&app_access_lock);
+				ret = -EINVAL;
+				break;
+			}
+		}
 		atomic_inc(&data->ioctl_count);
 		ret = qseecom_send_modfd_cmd(data, argp);
-		if (qseecom.support_bus_scaling) {
-			mutex_lock(&qsee_bw_mutex);
-			qseecom.bw_scale_down_timer.expires = jiffies +
-				msecs_to_jiffies(
+		if (qseecom.support_bus_scaling)
+			__qseecom_add_bw_scale_down_timer(
 				QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
-			add_timer(&(qseecom.bw_scale_down_timer));
-			qseecom.timer_running = true;
-			mutex_unlock(&qsee_bw_mutex);
-		}		atomic_dec(&data->ioctl_count);
+		atomic_dec(&data->ioctl_count);
 		wake_up_all(&data->abort_wq);
 		mutex_unlock(&app_access_lock);
 		if (ret)
@@ -3264,7 +3288,7 @@
 		ret = qseecom_receive_req(data);
 		atomic_dec(&data->ioctl_count);
 		wake_up_all(&data->abort_wq);
-		if (ret)
+		if (ret && (ret != -ERESTARTSYS))
 			pr_err("failed qseecom_receive_req: %d\n", ret);
 		break;
 	}
@@ -4097,7 +4121,8 @@
 	qclk = &qseecom.qsee;
 
 	if (qseecom.cumulative_mode != INACTIVE) {
-		ret = __qseecom_set_msm_bus_request(INACTIVE);
+		ret = msm_bus_scale_client_update_request(
+			qseecom.qsee_perf_client, INACTIVE);
 		if (ret)
 			pr_err("Fail to scale down bus\n");
 	}
@@ -4109,6 +4134,10 @@
 			clk_disable_unprepare(qclk->ce_core_clk);
 		if (qclk->ce_bus_clk != NULL)
 			clk_disable_unprepare(qclk->ce_bus_clk);
+		if (qseecom.timer_running) {
+			del_timer_sync(&(qseecom.bw_scale_down_timer));
+			qseecom.timer_running = false;
+		}
 	}
 	mutex_unlock(&clk_access_lock);
 	return 0;
@@ -4127,9 +4156,11 @@
 		mode = qseecom.cumulative_mode;
 
 	if (qseecom.cumulative_mode != INACTIVE) {
-		ret = __qseecom_set_msm_bus_request(mode);
+		ret = msm_bus_scale_client_update_request(
+			qseecom.qsee_perf_client, qseecom.cumulative_mode);
 		if (ret)
-			pr_err("Fail to scale down bus\n");
+			pr_err("Fail to scale up bus to %d\n",
+				qseecom.cumulative_mode);
 	}
 
 	mutex_lock(&clk_access_lock);
@@ -4155,6 +4186,11 @@
 			qclk->clk_access_cnt = 0;
 			goto ce_bus_clk_err;
 		}
+		qseecom.bw_scale_down_timer.expires = jiffies +
+			msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		add_timer(&(qseecom.bw_scale_down_timer));
+		qseecom.timer_running = true;
+
 	}
 	mutex_unlock(&clk_access_lock);
 	return 0;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 9c6bef6..6de1cde 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2638,15 +2638,13 @@
 	struct mmc_host *host = card->host;
 	unsigned long flags;
 
+	if (req && !mq->mqrq_prev->req) {
+		mmc_rpm_hold(host, &card->dev);
 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
 	if (mmc_bus_needs_resume(card->host)) {
 		mmc_resume_bus(card->host);
-		mmc_blk_set_blksize(md, card);
 	}
 #endif
-
-	if (req && !mq->mqrq_prev->req) {
-		mmc_rpm_hold(host, &card->dev);
 		/* claim host only for the first request */
 		mmc_claim_host(card->host);
 		if (card->ext_csd.bkops_en)
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index b36faff..667da01 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -421,6 +421,12 @@
 	else if (!mmc_card_sdio(card) && mmc_use_core_runtime_pm(card->host))
 		pm_runtime_enable(&card->dev);
 
+	if (mmc_card_sdio(card)) {
+		ret = device_init_wakeup(&card->dev, true);
+		if (ret)
+			pr_err("%s: %s: failed to init wakeup: %d\n",
+			       mmc_hostname(card->host), __func__, ret);
+	}
 	ret = device_add(&card->dev);
 	if (ret)
 		return ret;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index c496077..1feb26b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -983,6 +983,10 @@
  */
 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
 {
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	if (mmc_bus_needs_resume(host))
+		mmc_resume_bus(host);
+#endif
 	__mmc_start_req(host, mrq);
 	mmc_wait_for_req_done(host, mrq);
 }
@@ -2011,9 +2015,6 @@
 		host->bus_ops->resume(host);
 	}
 
-	if (host->bus_ops->detect && !host->bus_dead)
-		host->bus_ops->detect(host);
-
 	mmc_bus_put(host);
 	printk("%s: Deferred resume completed\n", mmc_hostname(host));
 	return 0;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index c7fa876..c082f77 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -77,16 +77,40 @@
 {
 	struct mmc_host *host = cls_dev_to_mmc_host(dev);
 	int ret = 0;
+	unsigned long flags;
 
 	if (!mmc_use_core_pm(host))
 		return 0;
 
+	spin_lock_irqsave(&host->clk_lock, flags);
+	/*
+	 * let the driver know that suspend is in progress and must
+	 * be aborted on receiving a sdio card interrupt
+	 */
+	host->dev_status = DEV_SUSPENDING;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
 	if (!pm_runtime_suspended(dev)) {
 		ret = mmc_suspend_host(host);
 		if (ret < 0)
 			pr_err("%s: %s: failed: ret: %d\n", mmc_hostname(host),
 			       __func__, ret);
 	}
+	/*
+	 * If SDIO function driver doesn't want to power off the card,
+	 * atleast turn off clocks to allow deep sleep.
+	 */
+	if (!ret && host->card && mmc_card_sdio(host->card) &&
+	    host->ios.clock) {
+		spin_lock_irqsave(&host->clk_lock, flags);
+		host->clk_old = host->ios.clock;
+		host->ios.clock = 0;
+		host->clk_gated = true;
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		mmc_set_ios(host);
+	}
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->dev_status = DEV_SUSPENDED;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
 	return ret;
 }
 
@@ -104,6 +128,7 @@
 			pr_err("%s: %s: failed: ret: %d\n", mmc_hostname(host),
 			       __func__, ret);
 	}
+	host->dev_status = DEV_RESUMED;
 	return ret;
 }
 
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 885d0d2..63952e7 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -340,6 +340,8 @@
 	card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
 	mmc_select_card_type(card);
 
+	card->ext_csd.raw_drive_strength = ext_csd[EXT_CSD_DRIVE_STRENGTH];
+
 	card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
 	card->ext_csd.raw_erase_timeout_mult =
 		ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
index 4407d91..d517205 100644
--- a/drivers/mmc/core/quirks.c
+++ b/drivers/mmc/core/quirks.c
@@ -50,6 +50,14 @@
 #define SDIO_DEVICE_ID_MSM_QCA_AR6003_2	0x301
 #endif
 
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6004_1
+#define SDIO_DEVICE_ID_MSM_QCA_AR6004_1	0x400
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6004_2
+#define SDIO_DEVICE_ID_MSM_QCA_AR6004_2	0x401
+#endif
+
 /*
  * This hook just adds a quirk for all sdio devices
  */
@@ -78,6 +86,12 @@
 	SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6003_2,
 		   remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
 
+	SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6004_1,
+		   remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6004_2,
+		   remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
 	SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
 		   add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
 
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 3d8ceb4..2b48f77 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -85,6 +85,7 @@
 	struct sched_param param = { .sched_priority = 1 };
 	unsigned long period, idle_period;
 	int ret;
+	bool ws;
 
 	sched_setscheduler(current, SCHED_FIFO, &param);
 
@@ -118,6 +119,17 @@
 		ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
 		if (ret)
 			break;
+		ws = false;
+		/*
+		 * prevent suspend if it has started when scheduled;
+		 * 100 msec (approx. value) should be enough for the system to
+		 * resume and attend to the card's request
+		 */
+		if ((host->dev_status == DEV_SUSPENDING) ||
+		    (host->dev_status == DEV_SUSPENDED)) {
+			pm_wakeup_event(&host->card->dev, 100);
+			ws = true;
+		}
 		ret = process_sdio_pending_irqs(host);
 		host->sdio_irq_pending = false;
 		mmc_release_host(host);
@@ -154,6 +166,12 @@
 			host->ops->enable_sdio_irq(host, 1);
 			mmc_host_clk_release(host);
 		}
+		/*
+		 * function drivers would have processed the event from card
+		 * unless suspended, hence release wake source
+		 */
+		if (ws && (host->dev_status == DEV_RESUMED))
+			pm_relax(&host->card->dev);
 		if (!kthread_should_stop())
 			schedule_timeout(period);
 		set_current_state(TASK_RUNNING);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 739a237..d3a0e9e 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -31,6 +31,7 @@
 #include <linux/delay.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
+#include <linux/irq.h>
 #include <linux/mmc/mmc.h>
 #include <linux/pm.h>
 #include <linux/pm_runtime.h>
@@ -38,10 +39,18 @@
 #include <linux/dma-mapping.h>
 #include <mach/gpio.h>
 #include <mach/msm_bus.h>
+#include <mach/mpm.h>
 #include <linux/iopoll.h>
 
 #include "sdhci-pltfm.h"
 
+enum sdc_mpm_pin_state {
+	SDC_DAT1_DISABLE,
+	SDC_DAT1_ENABLE,
+	SDC_DAT1_ENWAKE,
+	SDC_DAT1_DISWAKE,
+};
+
 #define SDHCI_VER_100		0x2B
 #define CORE_HC_MODE		0x78
 #define HC_MODE_EN		0x1
@@ -156,6 +165,9 @@
 
 #define INVALID_TUNING_PHASE	-1
 
+#define sdhci_is_valid_mpm_wakeup_int(_h) ((_h)->pdata->mpm_sdiowakeup_int >= 0)
+#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
+
 static const u32 tuning_block_64[] = {
 	0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
 	0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
@@ -283,6 +295,8 @@
 	struct sdhci_msm_bus_voting_data *voting_data;
 	u32 *sup_clk_table;
 	unsigned char sup_clk_cnt;
+	int mpm_sdiowakeup_int;
+	int sdiowakeup_irq;
 };
 
 struct sdhci_msm_bus_vote {
@@ -318,6 +332,7 @@
 	bool calibration_done;
 	u8 saved_tuning_phase;
 	atomic_t controller_clock;
+	bool is_sdiowakeup_enabled;
 };
 
 enum vdd_io_level {
@@ -1338,7 +1353,7 @@
 	struct device_node *np = dev->of_node;
 	u32 bus_width = 0;
 	u32 cpu_dma_latency;
-	int len, i;
+	int len, i, mpm_int;
 	int clk_table_len;
 	u32 *clk_table = NULL;
 	enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
@@ -1433,6 +1448,12 @@
 	if (of_get_property(np, "qcom,nonremovable", NULL))
 		pdata->nonremovable = true;
 
+	if (!of_property_read_u32(np, "qcom,dat1-mpm-int",
+				  &mpm_int))
+		pdata->mpm_sdiowakeup_int = mpm_int;
+	else
+		pdata->mpm_sdiowakeup_int = -1;
+
 	return pdata;
 out:
 	return NULL;
@@ -1933,6 +1954,39 @@
 	return ret;
 }
 
+/*
+ * Acquire spin-lock host->lock before calling this function
+ */
+static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
+					      bool enable)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	if (enable && !msm_host->is_sdiowakeup_enabled)
+		enable_irq(msm_host->pdata->sdiowakeup_irq);
+	else if (!enable && msm_host->is_sdiowakeup_enabled)
+		disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
+	else
+		dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
+			__func__, enable, msm_host->is_sdiowakeup_enabled);
+	msm_host->is_sdiowakeup_enabled = enable;
+}
+
+static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
+{
+	struct sdhci_host *host = (struct sdhci_host *)data;
+	unsigned long flags;
+
+	pr_debug("%s: irq (%d) received\n", __func__, irq);
+
+	spin_lock_irqsave(&host->lock, flags);
+	sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return IRQ_HANDLED;
+}
+
 static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
 {
 	struct sdhci_host *host = (struct sdhci_host *)data;
@@ -2609,6 +2663,38 @@
 	.enable_controller_clock = sdhci_msm_enable_controller_clock,
 };
 
+static int sdhci_msm_cfg_mpm_pin_wakeup(struct sdhci_host *host, unsigned mode)
+{
+	int ret = 0;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	unsigned int pin = msm_host->pdata->mpm_sdiowakeup_int;
+
+	if (!pin)
+		return 0;
+
+	switch (mode) {
+	case SDC_DAT1_DISABLE:
+		ret = msm_mpm_enable_pin(pin, 0);
+		break;
+	case SDC_DAT1_ENABLE:
+		ret = msm_mpm_set_pin_type(pin, IRQ_TYPE_LEVEL_LOW);
+		if (!ret)
+			ret = msm_mpm_enable_pin(pin, 1);
+		break;
+	case SDC_DAT1_ENWAKE:
+		ret = msm_mpm_set_pin_wake(pin, 1);
+		break;
+	case SDC_DAT1_DISWAKE:
+		ret = msm_mpm_set_pin_wake(pin, 0);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
 static int __devinit sdhci_msm_probe(struct platform_device *pdev)
 {
 	struct sdhci_host *host;
@@ -2619,6 +2705,7 @@
 	u32 vdd_max_current;
 	u16 host_version;
 	u32 pwr, irq_status, irq_ctl;
+	unsigned long flags;
 
 	pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
 	msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
@@ -2837,6 +2924,8 @@
 		host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
 	}
 
+	host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
+
 	/* Setup PWRCTL irq */
 	msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
 	if (msm_host->pwr_irq < 0) {
@@ -2891,7 +2980,7 @@
 	msm_host->mmc->caps2 |= MMC_CAP2_STOP_REQUEST;
 	msm_host->mmc->caps2 |= MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE;
 	msm_host->mmc->caps2 |= MMC_CAP2_CORE_PM;
-	msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
+	msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
 
 	if (msm_host->pdata->nonremovable)
 		msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
@@ -2917,6 +3006,27 @@
 		dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
 	}
 
+	msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
+							  "sdiowakeup_irq");
+	if (msm_host->pdata->sdiowakeup_irq >= 0) {
+		msm_host->is_sdiowakeup_enabled = true;
+		ret = request_irq(msm_host->pdata->sdiowakeup_irq,
+				  sdhci_msm_sdiowakeup_irq,
+				  IRQF_SHARED | IRQF_TRIGGER_LOW,
+				  "sdhci-msm sdiowakeup", host);
+		if (ret) {
+			dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
+				__func__, msm_host->pdata->sdiowakeup_irq, ret);
+			msm_host->pdata->sdiowakeup_irq = -1;
+			msm_host->is_sdiowakeup_enabled = false;
+			goto free_cd_gpio;
+		} else {
+			spin_lock_irqsave(&host->lock, flags);
+			sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
+			spin_unlock_irqrestore(&host->lock, flags);
+		}
+	}
+
 	ret = sdhci_add_host(host);
 	if (ret) {
 		dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
@@ -2950,6 +3060,16 @@
 	else if (mmc_use_core_runtime_pm(host->mmc))
 		pm_runtime_enable(&pdev->dev);
 
+	if (msm_host->pdata->mpm_sdiowakeup_int != -1) {
+		ret = sdhci_msm_cfg_mpm_pin_wakeup(host, SDC_DAT1_ENABLE);
+		if (ret) {
+			pr_err("%s: enabling wakeup: failed: ret: %d\n",
+			       mmc_hostname(host->mmc), ret);
+			ret = 0;
+			msm_host->pdata->mpm_sdiowakeup_int = -1;
+		}
+	}
+
 	/* Successful initialization */
 	goto out;
 
@@ -2961,6 +3081,8 @@
 free_cd_gpio:
 	if (gpio_is_valid(msm_host->pdata->status_gpio))
 		mmc_cd_gpio_free(msm_host->mmc);
+	if (sdhci_is_valid_gpio_wakeup_int(msm_host))
+		free_irq(msm_host->pdata->sdiowakeup_irq, host);
 vreg_deinit:
 	sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
 bus_unregister:
@@ -3006,6 +3128,12 @@
 	pm_runtime_disable(&pdev->dev);
 	sdhci_pltfm_free(pdev);
 
+	if (sdhci_is_valid_mpm_wakeup_int(msm_host))
+		sdhci_msm_cfg_mpm_pin_wakeup(host, SDC_DAT1_DISABLE);
+
+	if (sdhci_is_valid_gpio_wakeup_int(msm_host))
+		free_irq(msm_host->pdata->sdiowakeup_irq, host);
+
 	if (gpio_is_valid(msm_host->pdata->status_gpio))
 		mmc_cd_gpio_free(msm_host->mmc);
 
@@ -3021,13 +3149,75 @@
 	return 0;
 }
 
+static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	unsigned long flags;
+	int ret = 0;
+
+	if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
+	      (sdhci_is_valid_mpm_wakeup_int(msm_host) ||
+	      sdhci_is_valid_gpio_wakeup_int(msm_host)) &&
+	      mmc_card_wake_sdio_irq(host->mmc))) {
+		return 1;
+	}
+
+	spin_lock_irqsave(&host->lock, flags);
+	if (enable) {
+		/* configure DAT1 gpio if applicable */
+		if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
+			ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
+			if (!ret)
+				sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
+			goto out;
+		} else {
+			ret = sdhci_msm_cfg_mpm_pin_wakeup(host,
+							   SDC_DAT1_ENWAKE);
+			if (ret)
+				goto out;
+			ret = enable_irq_wake(host->irq);
+			if (ret)
+				sdhci_msm_cfg_mpm_pin_wakeup(host,
+							     SDC_DAT1_DISWAKE);
+		}
+	} else {
+		if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
+			ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
+			sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
+		} else {
+			ret = sdhci_msm_cfg_mpm_pin_wakeup(host,
+							   SDC_DAT1_DISWAKE);
+			if (ret)
+				goto out;
+			ret = disable_irq_wake(host->irq);
+		}
+	}
+out:
+	if (ret)
+		pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d mpm: %d\n",
+		       mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
+		       ret, msm_host->pdata->sdiowakeup_irq,
+		       msm_host->pdata->mpm_sdiowakeup_int);
+	spin_unlock_irqrestore(&host->lock, flags);
+	return ret;
+}
+
 static int sdhci_msm_runtime_suspend(struct device *dev)
 {
 	struct sdhci_host *host = dev_get_drvdata(dev);
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int ret;
+
+	ret = sdhci_msm_cfg_sdio_wakeup(host, true);
+	/* pwr_irq is not monitored by mpm on suspend, hence disable it */
+	if (!ret)
+		goto skip_disable_host_irq;
 
 	disable_irq(host->irq);
+
+skip_disable_host_irq:
 	disable_irq(msm_host->pwr_irq);
 
 	/*
@@ -3048,10 +3238,17 @@
 	struct sdhci_host *host = dev_get_drvdata(dev);
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int ret;
 
-	enable_irq(msm_host->pwr_irq);
+	ret = sdhci_msm_cfg_sdio_wakeup(host, false);
+	if (!ret)
+		goto skip_enable_host_irq;
+
 	enable_irq(host->irq);
 
+skip_enable_host_irq:
+	enable_irq(msm_host->pwr_irq);
+
 	return 0;
 }
 
@@ -3103,6 +3300,26 @@
 out:
 	return ret;
 }
+
+static int sdhci_msm_suspend_noirq(struct device *dev)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int ret = 0;
+
+	/*
+	 * ksdioirqd may get scheduled after sdhc suspend, hence retry
+	 * suspend in case the clocks are ON
+	 */
+	if (atomic_read(&msm_host->clks_on)) {
+		pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
+			mmc_hostname(host->mmc), __func__);
+		ret = -EAGAIN;
+	}
+
+	return ret;
+}
 #endif
 
 #ifdef CONFIG_PM
@@ -3110,6 +3327,7 @@
 	SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
 	SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
 			   NULL)
+	.suspend_noirq = sdhci_msm_suspend_noirq,
 };
 
 #define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 32f5220..32ff175 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -32,6 +32,7 @@
 #include "sdhci.h"
 
 #define DRIVER_NAME "sdhci"
+#define SDHCI_SUSPEND_TIMEOUT 300 /* 300 ms */
 
 #define DBG(f, x...) \
 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
@@ -68,6 +69,12 @@
 }
 #endif
 
+static inline int sdhci_get_async_int_status(struct sdhci_host *host)
+{
+	return (sdhci_readw(host, SDHCI_HOST_CONTROL2) &
+		 SDHCI_CTRL_ASYNC_INT_ENABLE) >> 14;
+}
+
 static void sdhci_dump_state(struct sdhci_host *host)
 {
 	struct mmc_host *mmc = host->mmc;
@@ -1503,7 +1510,8 @@
 		mrq->cmd->error = -EIO;
 		if (mrq->data)
 			mrq->data->error = -EIO;
-		tasklet_schedule(&host->finish_tasklet);
+		mmc_request_done(host->mmc, mrq);
+		sdhci_runtime_pm_put(host);
 		return;
 	}
 
@@ -1578,6 +1586,33 @@
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
+static void sdhci_cfg_async_intr(struct sdhci_host *host, bool enable)
+{
+	if (!host->async_int_supp)
+		return;
+
+	if (enable)
+		sdhci_writew(host,
+			     sdhci_readw(host, SDHCI_HOST_CONTROL2) |
+			     SDHCI_CTRL_ASYNC_INT_ENABLE,
+			     SDHCI_HOST_CONTROL2);
+	else
+		sdhci_writew(host, sdhci_readw(host, SDHCI_HOST_CONTROL2) &
+			     ~SDHCI_CTRL_ASYNC_INT_ENABLE,
+			     SDHCI_HOST_CONTROL2);
+}
+
+static void sdhci_cfg_irq(struct sdhci_host *host, bool enable)
+{
+	if (enable && !host->irq_enabled) {
+		enable_irq(host->irq);
+		host->irq_enabled = true;
+	} else if (!enable && host->irq_enabled) {
+		disable_irq_nosync(host->irq);
+		host->irq_enabled = false;
+	}
+}
+
 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
 {
 	unsigned long flags;
@@ -1593,9 +1628,26 @@
 		return;
 	}
 
-	if (ios->clock)
+	spin_lock_irqsave(&host->lock, flags);
+	/* lock is being released intermittently below, hence disable irq */
+	sdhci_cfg_irq(host, false);
+	spin_unlock_irqrestore(&host->lock, flags);
+	if (ios->clock) {
 		sdhci_set_clock(host, ios->clock);
-
+		if (host->async_int_supp && sdhci_get_async_int_status(host)) {
+			if (host->disable_sdio_irq_deferred) {
+				pr_debug("%s: %s: disable sdio irq\n",
+					 mmc_hostname(host->mmc), __func__);
+				host->mmc->ops->enable_sdio_irq(host->mmc, 0);
+				host->disable_sdio_irq_deferred = false;
+			}
+			spin_lock_irqsave(&host->lock, flags);
+			sdhci_cfg_async_intr(host, false);
+			spin_unlock_irqrestore(&host->lock, flags);
+			pr_debug("%s: %s: unconfig async intr\n",
+				 mmc_hostname(host->mmc), __func__);
+		}
+	}
 	/*
 	 * The controller clocks may be off during power-up and we may end up
 	 * enabling card clock before giving power to the card. Hence, during
@@ -1621,6 +1673,7 @@
 	}
 	spin_lock_irqsave(&host->lock, flags);
 	if (!host->clock) {
+		sdhci_cfg_irq(host, true);
 		spin_unlock_irqrestore(&host->lock, flags);
 		mutex_unlock(&host->ios_mutex);
 		return;
@@ -1780,9 +1833,18 @@
 		if (host->vmmc && vdd_bit != -1)
 			mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
 	}
-	if (!ios->clock)
+	if (!ios->clock) {
+		if (host->async_int_supp && host->mmc->card &&
+		    mmc_card_sdio(host->mmc->card)) {
+			sdhci_cfg_async_intr(host, true);
+			pr_debug("%s: %s: config async intr\n",
+				mmc_hostname(host->mmc), __func__);
+		}
 		sdhci_set_clock(host, ios->clock);
-
+	}
+	spin_lock_irqsave(&host->lock, flags);
+	sdhci_cfg_irq(host, true);
+	spin_unlock_irqrestore(&host->lock, flags);
 	mmiowb();
 	mutex_unlock(&host->ios_mutex);
 }
@@ -1862,6 +1924,14 @@
 	if (host->flags & SDHCI_DEVICE_DEAD)
 		goto out;
 
+	if (!enable && !host->clock) {
+		pr_debug("%s: %s: defered disabling card intr\n",
+			 host->mmc ? mmc_hostname(host->mmc) : "null",
+			 __func__);
+		host->disable_sdio_irq_deferred = true;
+		return;
+	}
+
 	if (enable)
 		host->flags |= SDHCI_SDIO_IRQ_ENABLED;
 	else
@@ -2741,6 +2811,23 @@
 		return IRQ_HANDLED;
 	}
 
+	if (!host->clock && host->mmc->card &&
+	    mmc_card_sdio(host->mmc->card)) {
+		/* SDIO async. interrupt is level-sensitive */
+		sdhci_cfg_irq(host, false);
+		pr_debug("%s: got async-irq: clocks: %d gated: %d host-irq[en:1/dis:0]: %d\n",
+			mmc_hostname(host->mmc), host->clock,
+			host->mmc->clk_gated, host->irq_enabled);
+		spin_unlock(&host->lock);
+		/* prevent suspend till the ksdioirqd runs or resume happens */
+		if ((host->mmc->dev_status == DEV_SUSPENDING) ||
+		    (host->mmc->dev_status == DEV_SUSPENDED))
+			pm_wakeup_event(&host->mmc->card->dev,
+					SDHCI_SUSPEND_TIMEOUT);
+		else
+			mmc_signal_sdio_irq(host->mmc);
+		return IRQ_HANDLED;
+	}
 	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
 
 	if (!intmask || intmask == 0xffffffff) {
@@ -2836,9 +2923,13 @@
 	/*
 	 * We have to delay this as it calls back into the driver.
 	 */
-	if (cardint)
+	if (cardint) {
+		/* clks are on, but suspend may be in progress */
+		if (host->mmc->dev_status == DEV_SUSPENDING)
+			pm_wakeup_event(&host->mmc->card->dev,
+					SDHCI_SUSPEND_TIMEOUT);
 		mmc_signal_sdio_irq(host->mmc);
-
+	}
 	return result;
 }
 
@@ -3488,6 +3579,7 @@
 	if (ret)
 		goto untasklet;
 
+	host->irq_enabled = true;
 	host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
 	if (IS_ERR(host->vmmc)) {
 		pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
@@ -3531,8 +3623,12 @@
 					mmc_hostname(mmc), ret);
 	}
 
+	if (caps[0] & SDHCI_ASYNC_INTR)
+		host->async_int_supp = true;
 	mmc_add_host(mmc);
 
+	if (host->quirks2 & SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR)
+		sdhci_clear_set_irqs(host, SDHCI_INT_DATA_END_BIT, 0);
 	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
 		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
 		(host->flags & SDHCI_USE_ADMA) ? "ADMA" :
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index db4806d..8c2320b 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -175,6 +175,7 @@
 #define   SDHCI_CTRL_DRV_TYPE_D		0x0030
 #define  SDHCI_CTRL_EXEC_TUNING		0x0040
 #define  SDHCI_CTRL_TUNED_CLK		0x0080
+#define  SDHCI_CTRL_ASYNC_INT_ENABLE	0x4000
 #define  SDHCI_CTRL_PRESET_VAL_ENABLE	0x8000
 
 #define SDHCI_CAPABILITIES	0x40
@@ -195,6 +196,7 @@
 #define  SDHCI_CAN_VDD_300	0x02000000
 #define  SDHCI_CAN_VDD_180	0x04000000
 #define  SDHCI_CAN_64BIT	0x10000000
+#define  SDHCI_ASYNC_INTR	0x20000000
 
 #define  SDHCI_SUPPORT_SDR50	0x00000001
 #define  SDHCI_SUPPORT_SDR104	0x00000002
diff --git a/drivers/net/usb/rmnet_usb_data.c b/drivers/net/usb/rmnet_usb_data.c
index b0db01e..84b3324 100644
--- a/drivers/net/usb/rmnet_usb_data.c
+++ b/drivers/net/usb/rmnet_usb_data.c
@@ -550,8 +550,7 @@
 		break;
 
 	default:
-		dev_err(&unet->intf->dev, "[%s] error: "
-			"rmnet_ioct called for unsupported cmd[%d]",
+		dev_dbg(&unet->intf->dev, "[%s] error: rmnet_ioctl called for unsupported cmd[0x%x]\n",
 			dev->name, cmd);
 		return -EINVAL;
 	}
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index 38c70a3..d3d8d0e 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -36,9 +36,11 @@
 #include <linux/mfd/pm8xxx/misc.h>
 #include <linux/qpnp/qpnp-adc.h>
 
+#include <mach/board.h>
 #include <mach/msm_smd.h>
 #include <mach/msm_iomap.h>
 #include <mach/subsystem_restart.h>
+#include <mach/subsystem_notif.h>
 
 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
 #include "wcnss_prealloc.h"
@@ -144,6 +146,21 @@
 #define MSM_PRONTO_PLL_BASE				0xfb21b1c0
 #define PRONTO_PLL_STATUS_OFFSET		0x1c
 
+#define MSM_PRONTO_MCU_BASE			0xfb080c00
+#define MCU_APB2PHY_STATUS_OFFSET		0xec
+#define MCU_CBR_CCAHB_ERR_OFFSET		0x380
+#define MCU_CBR_CAHB_ERR_OFFSET			0x384
+#define MCU_CBR_CCAHB_TIMEOUT_OFFSET		0x388
+#define MCU_CBR_CAHB_TIMEOUT_OFFSET		0x38c
+#define MCU_DBR_CDAHB_ERR_OFFSET		0x390
+#define MCU_DBR_DAHB_ERR_OFFSET			0x394
+#define MCU_DBR_CDAHB_TIMEOUT_OFFSET		0x398
+#define MCU_DBR_DAHB_TIMEOUT_OFFSET		0x39c
+#define MCU_FDBR_CDAHB_ERR_OFFSET		0x3a0
+#define MCU_FDBR_FDAHB_ERR_OFFSET		0x3a4
+#define MCU_FDBR_CDAHB_TIMEOUT_OFFSET		0x3a8
+#define MCU_FDBR_FDAHB_TIMEOUT_OFFSET		0x3ac
+
 #define MSM_PRONTO_TXP_STATUS           0xfb08040c
 #define MSM_PRONTO_TXP_PHY_ABORT        0xfb080488
 #define MSM_PRONTO_BRDG_ERR_SRC         0xfb080fb0
@@ -188,6 +205,7 @@
 
 /* max 20mhz channel count */
 #define WCNSS_MAX_CH_NUM			45
+#define WCNSS_MAX_PIL_RETRY			3
 
 #define VALID_VERSION(version) \
 	((strncmp(version, "INVALID", WCNSS_VERSION_LEN)) ? 1 : 0)
@@ -360,6 +378,7 @@
 	void __iomem *pronto_ccpu_base;
 	void __iomem *pronto_saw2_base;
 	void __iomem *pronto_pll_base;
+	void __iomem *pronto_mcu_base;
 	void __iomem *wlan_tx_status;
 	void __iomem *wlan_tx_phy_aborts;
 	void __iomem *wlan_brdg_err_source;
@@ -389,6 +408,7 @@
 	struct mutex vbat_monitor_mutex;
 	u16 unsafe_ch_count;
 	u16 unsafe_ch_list[WCNSS_MAX_CH_NUM];
+	void *wcnss_notif_hdle;
 } *penv = NULL;
 
 static ssize_t wcnss_wlan_macaddr_store(struct device *dev,
@@ -561,45 +581,47 @@
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_SPARE_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s:  PRONTO_PMU_SPARE %08x\n", __func__, reg);
+	pr_err("PRONTO_PMU_SPARE %08x\n", reg);
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_CPU_CBCR_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s:  PRONTO_PMU_COM_CPU_CBCR %08x\n",
-						__func__, reg);
+	pr_err("PRONTO_PMU_COM_CPU_CBCR %08x\n", reg);
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_AHB_CBCR_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s:  PRONTO_PMU_COM_AHB_CBCR %08x\n",
-						__func__, reg);
+	pr_err("PRONTO_PMU_COM_AHB_CBCR %08x\n", reg);
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CFG_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s:  PRONTO_PMU_CFG %08x\n", __func__, reg);
+	pr_err("PRONTO_PMU_CFG %08x\n", reg);
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_CSR_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s:  PRONTO_PMU_COM_CSR %08x\n",
-						__func__, reg);
+	pr_err("PRONTO_PMU_COM_CSR %08x\n", reg);
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_SOFT_RESET_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s:  PRONTO_PMU_SOFT_RESET %08x\n",
-						__func__, reg);
+	pr_err("PRONTO_PMU_SOFT_RESET %08x\n", reg);
 
 	reg_addr = penv->pronto_saw2_base + PRONTO_SAW2_SPM_STS_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s: PRONTO_SAW2_SPM_STS %08x\n", __func__, reg);
+	pr_err("PRONTO_SAW2_SPM_STS %08x\n", reg);
+
+	reg_addr = penv->pronto_pll_base + PRONTO_PLL_STATUS_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("PRONTO_PLL_STATUS %08x\n", reg);
+
+	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CPU_AHB_CMD_RCGR_OFFSET;
+	reg4 = readl_relaxed(reg_addr);
+	pr_err("PMU_CPU_CMD_RCGR %08x\n", reg4);
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_GDSCR_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s:  PRONTO_PMU_COM_GDSCR %08x\n",
-						__func__, reg);
+	pr_err("PRONTO_PMU_COM_GDSCR %08x\n", reg);
 	reg >>= 31;
 
 	if (!reg) {
-		pr_info_ratelimited("%s:  Cannot log, Pronto common SS is power collapsed\n",
-				__func__);
+		pr_err("Cannot log, Pronto common SS is power collapsed\n");
 		return;
 	}
 	reg &= ~(PRONTO_PMU_COM_GDSCR_SW_COLLAPSE
@@ -613,35 +635,31 @@
 
 	reg_addr = penv->pronto_a2xb_base + A2XB_CFG_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s: A2XB_CFG_OFFSET %08x\n", __func__, reg);
+	pr_err("A2XB_CFG_OFFSET %08x\n", reg);
 
 	reg_addr = penv->pronto_a2xb_base + A2XB_INT_SRC_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s: A2XB_INT_SRC_OFFSET %08x\n", __func__, reg);
+	pr_err("A2XB_INT_SRC_OFFSET %08x\n", reg);
 
 	reg_addr = penv->pronto_a2xb_base + A2XB_ERR_INFO_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s: A2XB_ERR_INFO_OFFSET %08x\n", __func__, reg);
+	pr_err("A2XB_ERR_INFO_OFFSET %08x\n", reg);
 
 	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_INVALID_ADDR_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s: CCU_CCPU_INVALID_ADDR %08x\n", __func__, reg);
+	pr_err("CCU_CCPU_INVALID_ADDR %08x\n", reg);
 
 	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR0_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR0 %08x\n", __func__, reg);
+	pr_err("CCU_CCPU_LAST_ADDR0 %08x\n", reg);
 
 	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR1_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR1 %08x\n", __func__, reg);
+	pr_err("CCU_CCPU_LAST_ADDR1 %08x\n", reg);
 
 	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR2_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR2 %08x\n", __func__, reg);
-
-	reg_addr = penv->pronto_pll_base + PRONTO_PLL_STATUS_OFFSET;
-	reg = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s: PRONTO_PLL_STATUS %08x\n", __func__, reg);
+	pr_err("CCU_CCPU_LAST_ADDR2 %08x\n", reg);
 
 	tst_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_OFFSET;
 	tst_ctrl_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_CTRL_OFFSET;
@@ -651,24 +669,21 @@
 	reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_RDFIFO;
 	writel_relaxed(reg, tst_ctrl_addr);
 	reg = readl_relaxed(tst_addr);
-	pr_info_ratelimited("%s:  Read data FIFO testbus %08x\n",
-					__func__, reg);
+	pr_err("Read data FIFO testbus %08x\n", reg);
 
 	/*  command FIFO */
 	reg = 0;
 	reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_CMDFIFO;
 	writel_relaxed(reg, tst_ctrl_addr);
 	reg = readl_relaxed(tst_addr);
-	pr_info_ratelimited("%s:  Command FIFO testbus %08x\n",
-					__func__, reg);
+	pr_err("Command FIFO testbus %08x\n", reg);
 
 	/*  write data FIFO */
 	reg = 0;
 	reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_WRFIFO;
 	writel_relaxed(reg, tst_ctrl_addr);
 	reg = readl_relaxed(tst_addr);
-	pr_info_ratelimited("%s:  Rrite data FIFO testbus %08x\n",
-					__func__, reg);
+	pr_err("Rrite data FIFO testbus %08x\n", reg);
 
 	/*   AXIM SEL CFG0 */
 	reg = 0;
@@ -676,8 +691,7 @@
 				WCNSS_TSTBUS_CTRL_AXIM_CFG0;
 	writel_relaxed(reg, tst_ctrl_addr);
 	reg = readl_relaxed(tst_addr);
-	pr_info_ratelimited("%s:  AXIM SEL CFG0 testbus %08x\n",
-					__func__, reg);
+	pr_err("AXIM SEL CFG0 testbus %08x\n", reg);
 
 	/*   AXIM SEL CFG1 */
 	reg = 0;
@@ -685,8 +699,7 @@
 				WCNSS_TSTBUS_CTRL_AXIM_CFG1;
 	writel_relaxed(reg, tst_ctrl_addr);
 	reg = readl_relaxed(tst_addr);
-	pr_info_ratelimited("%s:  AXIM SEL CFG1 testbus %08x\n",
-					__func__, reg);
+	pr_err("AXIM SEL CFG1 testbus %08x\n", reg);
 
 	/*   CTRL SEL CFG0 */
 	reg = 0;
@@ -694,8 +707,7 @@
 		WCNSS_TSTBUS_CTRL_CTRL_CFG0;
 	writel_relaxed(reg, tst_ctrl_addr);
 	reg = readl_relaxed(tst_addr);
-	pr_info_ratelimited("%s:  CTRL SEL CFG0 testbus %08x\n",
-					__func__, reg);
+	pr_err("CTRL SEL CFG0 testbus %08x\n", reg);
 
 	/*   CTRL SEL CFG1 */
 	reg = 0;
@@ -703,7 +715,7 @@
 		WCNSS_TSTBUS_CTRL_CTRL_CFG1;
 	writel_relaxed(reg, tst_ctrl_addr);
 	reg = readl_relaxed(tst_addr);
-	pr_info_ratelimited("%s:  CTRL SEL CFG1 testbus %08x\n", __func__, reg);
+	pr_err("CTRL SEL CFG1 testbus %08x\n", reg);
 
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_WLAN_BCR_OFFSET;
@@ -714,30 +726,79 @@
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_WLAN_AHB_CBCR_OFFSET;
 	reg3 = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s:  PMU_WLAN_AHB_CBCR %08x\n", __func__, reg3);
-
-	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CPU_AHB_CMD_RCGR_OFFSET;
-	reg4 = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s:  PMU_CPU_CMD_RCGR %08x\n", __func__, reg4);
+	pr_err("PMU_WLAN_AHB_CBCR %08x\n", reg3);
 
 	if ((reg & PRONTO_PMU_WLAN_BCR_BLK_ARES) ||
 		(reg2 & PRONTO_PMU_WLAN_GDSCR_SW_COLLAPSE) ||
 		(!(reg4 & PRONTO_PMU_CPU_AHB_CMD_RCGR_ROOT_EN)) ||
 		(reg3 & PRONTO_PMU_WLAN_AHB_CBCR_CLK_OFF) ||
 		(!(reg3 & PRONTO_PMU_WLAN_AHB_CBCR_CLK_EN))) {
-		pr_info_ratelimited("%s:  Cannot log, wlan domain is power collapsed\n",
-				__func__);
+		pr_err("Cannot log, wlan domain is power collapsed\n");
 		return;
 	}
 
+	msleep(50);
+
 	reg = readl_relaxed(penv->wlan_tx_phy_aborts);
-	pr_info_ratelimited("%s: WLAN_TX_PHY_ABORTS %08x\n", __func__, reg);
+	pr_err("WLAN_TX_PHY_ABORTS %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_APB2PHY_STATUS_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_APB2PHY_STATUS %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_CBR_CCAHB_ERR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_CBR_CCAHB_ERR %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_CBR_CAHB_ERR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_CBR_CAHB_ERR %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_CBR_CCAHB_TIMEOUT_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_CBR_CCAHB_TIMEOUT %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_CBR_CAHB_TIMEOUT_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_CBR_CAHB_TIMEOUT %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_DBR_CDAHB_ERR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_DBR_CDAHB_ERR %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_DBR_DAHB_ERR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_DBR_DAHB_ERR %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_DBR_CDAHB_TIMEOUT_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_DBR_CDAHB_TIMEOUT %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_DBR_DAHB_TIMEOUT_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_DBR_DAHB_TIMEOUT %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_FDBR_CDAHB_ERR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_FDBR_CDAHB_ERR %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_FDBR_FDAHB_ERR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_FDBR_FDAHB_ERR %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_FDBR_CDAHB_TIMEOUT_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_FDBR_CDAHB_TIMEOUT %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_FDBR_FDAHB_TIMEOUT_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_FDBR_FDAHB_TIMEOUT %08x\n", reg);
 
 	reg = readl_relaxed(penv->wlan_brdg_err_source);
-	pr_info_ratelimited("%s: WLAN_BRDG_ERR_SOURCE %08x\n", __func__, reg);
+	pr_err("WLAN_BRDG_ERR_SOURCE %08x\n", reg);
 
 	reg = readl_relaxed(penv->wlan_tx_status);
-	pr_info_ratelimited("%s: WLAN_TX_STATUS %08x\n", __func__, reg);
+	pr_err("WLAN_TXP_STATUS %08x\n", reg);
 
 	reg = readl_relaxed(penv->alarms_txctl);
 	pr_err("ALARMS_TXCTL %08x\n", reg);
@@ -748,6 +809,22 @@
 EXPORT_SYMBOL(wcnss_pronto_log_debug_regs);
 
 #ifdef CONFIG_WCNSS_REGISTER_DUMP_ON_BITE
+static void wcnss_log_iris_regs(void)
+{
+	int i;
+	u32 reg_val;
+	u32 regs_array[] = {
+		0x04, 0x05, 0x11, 0x1e, 0x40, 0x48,
+		0x49, 0x4b, 0x00, 0x01, 0x4d};
+
+	pr_info("IRIS Registers [address] : value\n");
+
+	for (i = 0; i < ARRAY_SIZE(regs_array); i++) {
+		reg_val = wcnss_rf_read_reg(regs_array[i]);
+		pr_info("[0x%08x] : 0x%08x\n", regs_array[i], reg_val);
+	}
+}
+
 void wcnss_log_debug_regs_on_bite(void)
 {
 	struct platform_device *pdev = wcnss_get_platform_device();
@@ -768,10 +845,12 @@
 		clk_rate = clk_get_rate(measure);
 		pr_debug("wcnss: clock frequency is: %luHz\n", clk_rate);
 
-		if (clk_rate)
+		if (clk_rate) {
 			wcnss_pronto_log_debug_regs();
-		else
+		} else {
 			pr_err("clock frequency is zero, cannot access PMU or other registers\n");
+			wcnss_log_iris_regs();
+		}
 	}
 }
 #endif
@@ -2049,6 +2128,7 @@
 	unsigned long wcnss_phys_addr;
 	int size = 0;
 	struct resource *res;
+	int pil_retry = 0;
 	int has_pronto_hw = of_property_read_bool(pdev->dev.of_node,
 									"qcom,has-pronto-hw");
 
@@ -2230,6 +2310,13 @@
 			pr_err("%s: ioremap alarms TACTL failed\n", __func__);
 			goto fail_ioremap11;
 		}
+		penv->pronto_mcu_base = ioremap(MSM_PRONTO_MCU_BASE, SZ_1K);
+		if (!penv->pronto_mcu_base) {
+			ret = -ENOMEM;
+			pr_err("%s: ioremap wcnss physical(mcu) failed\n",
+				__func__);
+			goto fail_ioremap12;
+		}
 	}
 	penv->adc_tm_dev = qpnp_get_adc_tm(&penv->pdev->dev, "wcnss");
 	if (IS_ERR(penv->adc_tm_dev)) {
@@ -2240,12 +2327,17 @@
 		penv->fw_vbatt_state = WCNSS_CONFIG_UNSPECIFIED;
 	}
 
-	/* trigger initialization of the WCNSS */
-	penv->pil = subsystem_get(WCNSS_PIL_DEVICE);
-	if (IS_ERR(penv->pil)) {
-		dev_err(&pdev->dev, "Peripheral Loader failed on WCNSS.\n");
-		ret = PTR_ERR(penv->pil);
-		wcnss_pronto_log_debug_regs();
+	do {
+		/* trigger initialization of the WCNSS */
+		penv->pil = subsystem_get(WCNSS_PIL_DEVICE);
+		if (IS_ERR(penv->pil)) {
+			dev_err(&pdev->dev, "Peripheral Loader failed on WCNSS.\n");
+			ret = PTR_ERR(penv->pil);
+			wcnss_pronto_log_debug_regs();
+		}
+	} while (pil_retry++ < WCNSS_MAX_PIL_RETRY && IS_ERR(penv->pil));
+
+	if (pil_retry >= WCNSS_MAX_PIL_RETRY) {
 		penv->pil = NULL;
 		goto fail_pil;
 	}
@@ -2255,6 +2347,9 @@
 fail_pil:
 	if (penv->riva_ccu_base)
 		iounmap(penv->riva_ccu_base);
+	if (penv->pronto_mcu_base)
+		iounmap(penv->pronto_mcu_base);
+fail_ioremap12:
 	if (penv->alarms_tactl)
 		iounmap(penv->alarms_tactl);
 fail_ioremap11:
@@ -2421,6 +2516,22 @@
 }
 
 
+static int wcnss_notif_cb(struct notifier_block *this, unsigned long code,
+				void *ss_handle)
+{
+	pr_debug("%s: wcnss notification event: %lu\n", __func__, code);
+
+	if (SUBSYS_POWERUP_FAILURE == code)
+		wcnss_pronto_log_debug_regs();
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block wnb = {
+	.notifier_call = wcnss_notif_cb,
+};
+
+
 static const struct file_operations wcnss_node_fops = {
 	.owner = THIS_MODULE,
 	.open = wcnss_node_open,
@@ -2460,6 +2571,13 @@
 		return -ENOENT;
 	}
 
+	/* register wcnss event notification */
+	penv->wcnss_notif_hdle = subsys_notif_register_notifier("wcnss", &wnb);
+	if (IS_ERR(penv->wcnss_notif_hdle)) {
+		pr_err("wcnss: register event notification failed!\n");
+		return PTR_ERR(penv->wcnss_notif_hdle);
+	}
+
 	mutex_init(&penv->dev_lock);
 	mutex_init(&penv->ctrl_lock);
 	mutex_init(&penv->vbat_monitor_mutex);
@@ -2484,6 +2602,8 @@
 static int __devexit
 wcnss_wlan_remove(struct platform_device *pdev)
 {
+	if (penv->wcnss_notif_hdle)
+		subsys_notif_unregister_notifier(penv->wcnss_notif_hdle, &wnb);
 	wcnss_remove_sysfs(&pdev->dev);
 	penv = NULL;
 	return 0;
diff --git a/drivers/nfc/nfc-nci.c b/drivers/nfc/nfc-nci.c
index c6192ed..8cd4bd1 100644
--- a/drivers/nfc/nfc-nci.c
+++ b/drivers/nfc/nfc-nci.c
@@ -13,6 +13,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/fs.h>
+#include <linux/reboot.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
 #include <linux/irq.h>
@@ -47,7 +48,7 @@
 MODULE_DEVICE_TABLE(of, msm_match_table);
 
 #define MAX_BUFFER_SIZE			(780)
-#define PACKET_MAX_LENGTH			(258)
+#define PACKET_MAX_LENGTH		(258)
 /* Read data */
 #define PACKET_HEADER_SIZE_NCI	(4)
 #define PACKET_TYPE_NCI			(16)
@@ -58,7 +59,7 @@
 #define NTF_TIMEOUT				(10000)
 #define	CORE_RESET_RSP_GID		(0x60)
 #define	CORE_RESET_OID			(0x00)
-#define CORE_RST_NTF_LENGTH			(0x02)
+#define CORE_RST_NTF_LENGTH		(0x02)
 
 static void clk_req_update(struct work_struct *work);
 
@@ -115,6 +116,9 @@
 static int					ftm_werr_code;
 
 
+unsigned int	disable_ctrl;
+bool		region2_sent;
+
 static void qca199x_init_stat(struct qca199x_dev *qca199x_dev)
 {
 	qca199x_dev->count_irq = 0;
@@ -491,6 +495,12 @@
 	}
 	mutex_unlock(&qca199x_dev->read_mutex);
 
+	/* If we detect a Region2 command prior to power-down */
+	if ((tmp[0] == 0x2F) && (tmp[1] == 0x01) && (tmp[2] == 0x02) &&
+		(tmp[3] == 0x08) && (tmp[4] == 0x00)) {
+		region2_sent = true;
+	}
+
 	return ret;
 }
 
@@ -606,7 +616,7 @@
 		if (r < 0)
 			goto err_req;
 		/*
-			Also, set flag for initial NCI write following resetas
+			Also, set flag for initial NCI write following reset as
 			may wish to do some house keeping. Ensure no pending
 			messages in NFCC buffers which may be wrongly
 			construed as response to initial message
@@ -718,7 +728,7 @@
 	int r = 0;
 	unsigned short	slave_addr	=	0xE;
 	unsigned short	curr_addr;
-
+	unsigned char	raw_nci_wake[]			= {0x10, 0x0F};
 	unsigned char raw_chip_version_addr		= 0x00;
 	unsigned char raw_chip_rev_id_addr		= 0x9C;
 	unsigned char raw_chip_version			= 0xFF;
@@ -728,9 +738,22 @@
 
 	platform_data = qca199x_dev->client->dev.platform_data;
 
+	/*
+	 * Always wake up chip when reading 0x9C, otherwise this
+	 * register is not updated
+	 */
+	curr_addr = qca199x_dev->client->addr;
+	qca199x_dev->client->addr = slave_addr;
+	r = nfc_i2c_write(qca199x_dev->client, &raw_nci_wake[0],
+						sizeof(raw_nci_wake));
+	r = sizeof(raw_nci_wake);
+	if (r != sizeof(raw_nci_wake))
+		goto invalid_wake_up;
+	qca199x_dev->state = NFCC_STATE_NORMAL_WAKE;
+
+	/* sleep to ensure the NFCC has time to wake up */
+	usleep(100);
 	if (arg == 0) {
-		curr_addr = qca199x_dev->client->addr;
-		qca199x_dev->client->addr = slave_addr;
 		r = nfc_i2c_write(qca199x_dev->client,
 				&raw_chip_version_addr, 1);
 		if (r < 0)
@@ -739,21 +762,19 @@
 		r = i2c_master_recv(qca199x_dev->client, &raw_chip_version, 1);
 		/* Restore original NFCC slave I2C address */
 		qca199x_dev->client->addr = curr_addr;
-	}
-	if (arg == 1) {
-		curr_addr = qca199x_dev->client->addr;
-		qca199x_dev->client->addr = slave_addr;
+	} else if (arg == 1) {
 		r = nfc_i2c_write(qca199x_dev->client,
 				&raw_chip_rev_id_addr, 1);
 		if (r < 0)
 			goto invalid_wr;
-		usleep(10);
+		usleep(20);
 		r = i2c_master_recv(qca199x_dev->client, &raw_chip_version, 1);
 		/* Restore original NFCC slave I2C address */
 		qca199x_dev->client->addr = curr_addr;
 	}
-
 	return raw_chip_version;
+invalid_wake_up:
+	raw_chip_version = 0xFE;
 invalid_wr:
 	raw_chip_version = 0xFF;
 	dev_err(&qca199x_dev->client->dev,
@@ -929,121 +950,117 @@
 	if (r < 0)
 		goto err_init;
 
-	if (0x10 != (0x10 & buf)) {
-		RAW(s73, 0x02);
+	RAW(s73, 0x02);
 
-		r = nfc_i2c_write(client, &raw_s73[0], sizeof(raw_s73));
-		if (r < 0)
-			goto err_init;
-
-		usleep(1000);
-		RAW(1p8_CONTROL_011, XTAL_CLOCK | 0x01);
-
-		r = nfc_i2c_write(client, &raw_1p8_CONTROL_011[0],
-						sizeof(raw_1p8_CONTROL_011));
-		if (r < 0)
-			goto err_init;
-
-		usleep(1000);
-		RAW(1P8_CONTROL_010, (0x8));
-		r = nfc_i2c_write(client, &raw_1P8_CONTROL_010[0],
-						sizeof(raw_1P8_CONTROL_010));
-		if (r < 0)
-			goto err_init;
-
-		usleep(10000);  /* 10ms wait */
-		RAW(1P8_CONTROL_010, (0xC));
-		r = nfc_i2c_write(client, &raw_1P8_CONTROL_010[0],
-					sizeof(raw_1P8_CONTROL_010));
-		if (r < 0)
-			goto err_init;
-
-		usleep(100);  /* 100uS wait */
-		RAW(1P8_X0_0B0, (FREQ_SEL_19));
-		r = nfc_i2c_write(client, &raw_1P8_X0_0B0[0],
-						sizeof(raw_1P8_X0_0B0));
-		if (r < 0)
-			goto err_init;
-
-		usleep(1000);
-
-		/* PWR_EN = 1 */
-		RAW(1P8_CONTROL_010, (0xd));
-		r = nfc_i2c_write(client, &raw_1P8_CONTROL_010[0],
-						sizeof(raw_1P8_CONTROL_010));
-		if (r < 0)
-			goto err_init;
-
-
-		usleep(20000);  /* 20ms wait */
-		/* LS_EN = 1 */
-		RAW(1P8_CONTROL_010, 0xF);
-		r = nfc_i2c_write(client, &raw_1P8_CONTROL_010[0],
-						sizeof(raw_1P8_CONTROL_010));
-		if (r < 0)
-			goto err_init;
-
-		usleep(20000);  /* 20ms wait */
-
-		/* Enable the PMIC clock */
-		RAW(1P8_PAD_CFG_CLK_REQ, (0x1));
-		r = nfc_i2c_write(client, &raw_1P8_PAD_CFG_CLK_REQ[0],
-					  sizeof(raw_1P8_PAD_CFG_CLK_REQ));
-		if (r < 0)
-			goto err_init;
-
-		usleep(1000);
-
-		RAW(1P8_PAD_CFG_PWR_REQ, (0x1));
-		r = nfc_i2c_write(client, &raw_1P8_PAD_CFG_PWR_REQ[0],
-					  sizeof(raw_1P8_PAD_CFG_PWR_REQ));
-		if (r < 0)
-			goto err_init;
-
-		usleep(1000);
-
-		RAW(slave2, 0x10);
-		r = nfc_i2c_write(client, &raw_slave2[0], sizeof(raw_slave2));
-		if (r < 0)
-			goto err_init;
-
-		usleep(1000);
-
-		RAW(slave1, NCI_I2C_SLAVE);
-		r = nfc_i2c_write(client, &raw_slave1[0], sizeof(raw_slave1));
-		if (r < 0)
-			goto err_init;
-
-		usleep(1000);
-
-		/* QCA199x NFCC CPU should now boot... */
-		r = i2c_master_recv(client, &raw_slave1_rd, 1);
-		/* Talk on NCI slave address NCI_I2C_SLAVE 0x2C*/
-		client->addr = NCI_I2C_SLAVE;
-
-		/*
-			Start with small delay and then we will poll until we
-			get a core reset notification - This is time for chip
-			& NFCC controller to come-up.
-		*/
-		usleep(1000); /* 1 ms */
-
-		do {
-			ret = i2c_master_recv(client, rsp, 5);
-			/* Found core reset notification */
-			if (((rsp[0] == CORE_RESET_RSP_GID) &&
-				(rsp[1] == CORE_RESET_OID) &&
-				(rsp[2] == CORE_RST_NTF_LENGTH))
-					|| time_taken == NTF_TIMEOUT) {
-				core_reset_completed = true;
-			}
-			usleep(10);  /* 10us sleep before retry */
-			time_taken++;
-		} while (!core_reset_completed);
-		r = 0;
-	} else {
+	r = nfc_i2c_write(client, &raw_s73[0], sizeof(raw_s73));
+	if (r < 0)
 		goto err_init;
-	}
+
+	usleep(1000);
+	RAW(1p8_CONTROL_011, XTAL_CLOCK | 0x01);
+
+	r = nfc_i2c_write(client, &raw_1p8_CONTROL_011[0],
+					sizeof(raw_1p8_CONTROL_011));
+	if (r < 0)
+		goto err_init;
+
+	usleep(1000);
+	RAW(1P8_CONTROL_010, (0x8));
+	r = nfc_i2c_write(client, &raw_1P8_CONTROL_010[0],
+					sizeof(raw_1P8_CONTROL_010));
+	if (r < 0)
+		goto err_init;
+
+	usleep(10000);  /* 10ms wait */
+	RAW(1P8_CONTROL_010, (0xC));
+	r = nfc_i2c_write(client, &raw_1P8_CONTROL_010[0],
+				sizeof(raw_1P8_CONTROL_010));
+	if (r < 0)
+		goto err_init;
+
+	usleep(100);  /* 100uS wait */
+	RAW(1P8_X0_0B0, (FREQ_SEL_19));
+	r = nfc_i2c_write(client, &raw_1P8_X0_0B0[0],
+					sizeof(raw_1P8_X0_0B0));
+	if (r < 0)
+		goto err_init;
+
+	usleep(1000);
+
+	/* PWR_EN = 1 */
+	RAW(1P8_CONTROL_010, (0xd));
+	r = nfc_i2c_write(client, &raw_1P8_CONTROL_010[0],
+					sizeof(raw_1P8_CONTROL_010));
+	if (r < 0)
+		goto err_init;
+
+
+	usleep(20000);  /* 20ms wait */
+	/* LS_EN = 1 */
+	RAW(1P8_CONTROL_010, 0xF);
+	r = nfc_i2c_write(client, &raw_1P8_CONTROL_010[0],
+					sizeof(raw_1P8_CONTROL_010));
+	if (r < 0)
+		goto err_init;
+
+	usleep(20000);  /* 20ms wait */
+
+	/* Enable the PMIC clock */
+	RAW(1P8_PAD_CFG_CLK_REQ, (0x1));
+	r = nfc_i2c_write(client, &raw_1P8_PAD_CFG_CLK_REQ[0],
+				  sizeof(raw_1P8_PAD_CFG_CLK_REQ));
+	if (r < 0)
+		goto err_init;
+
+	usleep(1000);
+
+	RAW(1P8_PAD_CFG_PWR_REQ, (0x1));
+	r = nfc_i2c_write(client, &raw_1P8_PAD_CFG_PWR_REQ[0],
+				  sizeof(raw_1P8_PAD_CFG_PWR_REQ));
+	if (r < 0)
+		goto err_init;
+
+	usleep(1000);
+
+	RAW(slave2, 0x10);
+	r = nfc_i2c_write(client, &raw_slave2[0], sizeof(raw_slave2));
+	if (r < 0)
+		goto err_init;
+
+	usleep(1000);
+
+	RAW(slave1, NCI_I2C_SLAVE);
+	r = nfc_i2c_write(client, &raw_slave1[0], sizeof(raw_slave1));
+	if (r < 0)
+		goto err_init;
+
+	usleep(1000);
+
+	/* QCA199x NFCC CPU should now boot... */
+	r = i2c_master_recv(client, &raw_slave1_rd, 1);
+	/* Talk on NCI slave address NCI_I2C_SLAVE 0x2C*/
+	client->addr = NCI_I2C_SLAVE;
+
+	/*
+		Start with small delay and then we will poll until we
+		get a core reset notification - This is time for chip
+		& NFCC controller to come-up.
+	*/
+	usleep(1000); /* 1 ms */
+
+	do {
+		ret = i2c_master_recv(client, rsp, 5);
+		/* Found core reset notification */
+		if (((rsp[0] == CORE_RESET_RSP_GID) &&
+			(rsp[1] == CORE_RESET_OID) &&
+			(rsp[2] == CORE_RST_NTF_LENGTH))
+				|| time_taken == NTF_TIMEOUT) {
+			core_reset_completed = true;
+		}
+		usleep(10);  /* 10us sleep before retry */
+		time_taken++;
+	} while (!core_reset_completed);
+	r = 0;
 	return r;
 err_init:
 	r = 1;
@@ -1143,6 +1160,7 @@
 	pdata->dis_gpio = of_get_named_gpio(np, "qcom,dis-gpio", 0);
 	if ((!gpio_is_valid(pdata->dis_gpio)))
 		return -EINVAL;
+	disable_ctrl = pdata->dis_gpio;
 
 	pdata->irq_gpio = of_get_named_gpio(np, "qcom,irq-gpio", 0);
 	if ((!gpio_is_valid(pdata->irq_gpio)))
@@ -1246,8 +1264,12 @@
 		goto err_free_dev;
 	}
 
-	/* Put device in ULPM */
-	gpio_set_value(platform_data->dis_gpio, 0);
+	/* Guarantee that the NFCC starts in a clean state. */
+	gpio_set_value(platform_data->dis_gpio, 1);/* HPD */
+	usleep(200);
+	gpio_set_value(platform_data->dis_gpio, 0);/* ULPM */
+	usleep(200);
+
 	r = nfcc_hw_check(client, platform_data->reg);
 	if (r) {
 		/* We don't think there is hardware but just in case HPD */
@@ -1397,18 +1419,6 @@
 		gpio_set_value(platform_data->dis_gpio, 1);
 		goto err_nfcc_not_present;
 	}
-	regulators.regulator = regulator_get(&client->dev, regulators.name);
-	if (IS_ERR(regulators.regulator)) {
-		r = PTR_ERR(regulators.regulator);
-		pr_err("regulator get of %s failed (%d)\n", regulators.name, r);
-	} else {
-		/* Enable the regulator */
-		r = regulator_enable(regulators.regulator);
-		if (r) {
-			pr_err("vreg %s enable failed (%d)\n",
-				regulators.name, r);
-		}
-	}
 
 	logging_level = 0;
 	/* request irq.  The irq is set whenever the chip has data available
@@ -1451,6 +1461,10 @@
 	}
 	i2c_set_clientdata(client, qca199x_dev);
 	gpio_set_value(platform_data->dis_gpio, 1);
+
+	/* To keep track if region2 command has been sent to controller */
+	region2_sent = false;
+
 	dev_dbg(&client->dev,
 	"nfc-nci probe: %s, probing qca1990 exited successfully\n",
 		 __func__);
@@ -1525,17 +1539,52 @@
 	},
 };
 
+
+static int nfcc_reboot(struct notifier_block *notifier, unsigned long val,
+		      void *v)
+{
+	/*
+	 * Set DISABLE=1 *ONLY* if the NFC service has been disabled.
+	 * This will put NFCC into HPD(Hard Power Down) state for power
+	 * saving when powering down(Low Batt. or Power off handset)
+	 * If user requires NFC and CE mode when powered down(PD) the
+	 * middleware puts NFCC into region2 prior to PD. In this case
+	 * we DO NOT HPD chip as this will trash Region2 and CE support
+	 * when handset is PD.
+	 */
+	if (region2_sent == false) {
+		/* HPD the NFCC */
+		gpio_set_value(disable_ctrl, 1);
+	}
+	return NOTIFY_OK;
+}
+
+
+static struct notifier_block nfcc_notifier = {
+	.notifier_call	= nfcc_reboot,
+	.next		= NULL,
+	.priority	= 0
+};
+
 /*
  * module load/unload record keeping
  */
 static int __init qca199x_dev_init(void)
 {
+	int ret;
+
+	ret = register_reboot_notifier(&nfcc_notifier);
+	if (ret) {
+		pr_err("cannot register reboot notifier (err=%d)\n", ret);
+		return ret;
+	}
 	return i2c_add_driver(&qca199x);
 }
 module_init(qca199x_dev_init);
 
 static void __exit qca199x_dev_exit(void)
 {
+	unregister_reboot_notifier(&nfcc_notifier);
 	i2c_del_driver(&qca199x);
 }
 module_exit(qca199x_dev_exit);
diff --git a/drivers/nfc/nfc-nci.h b/drivers/nfc/nfc-nci.h
index 9bfb77d..297c152 100644
--- a/drivers/nfc/nfc-nci.h
+++ b/drivers/nfc/nfc-nci.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -223,9 +223,3 @@
 	unsigned int	reg;
 };
 #endif
-/* enable LDO */
-struct vregs_info {
-	const char * const name;
-	struct regulator *regulator;
-};
-struct vregs_info regulators = {"vlogic", NULL};
diff --git a/drivers/platform/msm/ipa/ipa.c b/drivers/platform/msm/ipa/ipa.c
index 20603f5..c93ec5f 100644
--- a/drivers/platform/msm/ipa/ipa.c
+++ b/drivers/platform/msm/ipa/ipa.c
@@ -544,6 +544,13 @@
 			retval = -EFAULT;
 			break;
 		}
+
+		if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props
+				> IPA_NUM_PROPS_MAX) {
+			retval = -EFAULT;
+			break;
+		}
+
 		pyld_sz = sz + ((struct ipa_ioc_query_intf_tx_props *)
 				header)->num_tx_props *
 			sizeof(struct ipa_ioc_tx_intf_prop);
@@ -572,6 +579,13 @@
 			retval = -EFAULT;
 			break;
 		}
+
+		if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props
+				> IPA_NUM_PROPS_MAX) {
+			retval = -EFAULT;
+			break;
+		}
+
 		pyld_sz = sz + ((struct ipa_ioc_query_intf_rx_props *)
 				header)->num_rx_props *
 			sizeof(struct ipa_ioc_rx_intf_prop);
diff --git a/drivers/platform/msm/ipa/ipa_intf.c b/drivers/platform/msm/ipa/ipa_intf.c
index 5ee1929..ea5c97f 100644
--- a/drivers/platform/msm/ipa/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_intf.c
@@ -60,6 +60,18 @@
 		return -EINVAL;
 	}
 
+	if (tx && tx->num_props > IPA_NUM_PROPS_MAX) {
+		IPAERR("invalid tx num_props=%d max=%d\n", tx->num_props,
+				IPA_NUM_PROPS_MAX);
+		return -EINVAL;
+	}
+
+	if (rx && rx->num_props > IPA_NUM_PROPS_MAX) {
+		IPAERR("invalid rx num_props=%d max=%d\n", rx->num_props,
+				IPA_NUM_PROPS_MAX);
+		return -EINVAL;
+	}
+
 	len = sizeof(struct ipa_intf);
 	intf = kzalloc(len, GFP_KERNEL);
 	if (intf == NULL) {
diff --git a/drivers/platform/msm/qpnp-power-on.c b/drivers/platform/msm/qpnp-power-on.c
index 0ef2639..f489566 100644
--- a/drivers/platform/msm/qpnp-power-on.c
+++ b/drivers/platform/msm/qpnp-power-on.c
@@ -36,6 +36,7 @@
 #define QPNP_PON_REASON1(base)			(base + 0x8)
 #define QPNP_PON_WARM_RESET_REASON1(base)	(base + 0xA)
 #define QPNP_PON_WARM_RESET_REASON2(base)	(base + 0xB)
+#define QPNP_POFF_REASON1(base)			(base + 0xC)
 #define QPNP_PON_KPDPWR_S1_TIMER(base)		(base + 0x40)
 #define QPNP_PON_KPDPWR_S2_TIMER(base)		(base + 0x41)
 #define QPNP_PON_KPDPWR_S2_CNTL(base)		(base + 0x42)
@@ -93,7 +94,8 @@
 #define QPNP_PON_S3_DBC_DELAY_MASK		0x07
 #define QPNP_PON_RESET_TYPE_MAX			0xF
 #define PON_S1_COUNT_MAX			0xF
-#define PON_REASON_MAX				8
+#define QPNP_PON_MIN_DBC_US			(USEC_PER_SEC / 64)
+#define QPNP_PON_MAX_DBC_US			(USEC_PER_SEC * 2)
 
 #define QPNP_KEY_STATUS_DELAY			msecs_to_jiffies(250)
 #define QPNP_PON_REV_B				0x01
@@ -147,6 +149,26 @@
 	[7] = "Triggered from KPD (power key press)",
 };
 
+static const char * const qpnp_poff_reason[] = {
+	[0] = "Triggered from SOFT (Software)",
+	[1] = "Triggered from PS_HOLD (PS_HOLD/MSM controlled shutdown)",
+	[2] = "Triggered from PMIC_WD (PMIC watchdog)",
+	[3] = "Triggered from GP1 (Keypad_Reset1)",
+	[4] = "Triggered from GP2 (Keypad_Reset2)",
+	[5] = "Triggered from KPDPWR_AND_RESIN"
+		"(Simultaneous power key and reset line)",
+	[6] = "Triggered from RESIN_N (Reset line/Volume Down Key)",
+	[7] = "Triggered from KPDPWR_N (Long Power Key hold)",
+	[8] = "N/A",
+	[9] = "N/A",
+	[10] = "N/A",
+	[11] = "Triggered from CHARGER (Charger ENUM_TIMER, BOOT_DONE)",
+	[12] = "Triggered from TFT (Thermal Fault Tolerance)",
+	[13] = "Triggered from UVLO (Under Voltage Lock Out)",
+	[14] = "Triggered from OTST3 (Overtemp)",
+	[15] = "Triggered from STAGE3 (Stage 3 reset)",
+};
+
 static int
 qpnp_pon_masked_write(struct qpnp_pon *pon, u16 addr, u8 mask, u8 val)
 {
@@ -1035,9 +1057,10 @@
 	struct device_node *itr = NULL;
 	u32 delay = 0, s3_debounce = 0;
 	int rc, sys_reset, index;
-	u8 pon_sts = 0;
+	u8 pon_sts = 0, buf[2];
 	const char *s3_src;
 	u8 s3_src_reg;
+	u16 poff_sts = 0;
 
 	pon = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_pon),
 							GFP_KERNEL);
@@ -1085,14 +1108,38 @@
 		dev_err(&pon->spmi->dev, "Unable to read PON_RESASON1 reg\n");
 		return rc;
 	}
-	index = ffs(pon_sts);
-	if ((index > PON_REASON_MAX) || (index < 0))
-		index = 0;
 
+	index = ffs(pon_sts) - 1;
 	cold_boot = !qpnp_pon_is_warm_reset();
-	pr_info("PMIC@SID%d Power-on reason: %s and '%s' boot\n",
-		pon->spmi->sid, index ? qpnp_pon_reason[index - 1] :
-		"Unknown", cold_boot ? "cold" : "warm");
+	if (index >= ARRAY_SIZE(qpnp_pon_reason) || index < 0)
+		dev_info(&pon->spmi->dev,
+			"PMIC@SID%d Power-on reason: Unknown and '%s' boot\n",
+			pon->spmi->sid, cold_boot ? "cold" : "warm");
+	else
+		dev_info(&pon->spmi->dev,
+			"PMIC@SID%d Power-on reason: %s and '%s' boot\n",
+			pon->spmi->sid, qpnp_pon_reason[index],
+			cold_boot ? "cold" : "warm");
+
+	/* POFF reason */
+	rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+				QPNP_POFF_REASON1(pon->base),
+				buf, 2);
+	if (rc) {
+		dev_err(&pon->spmi->dev, "Unable to read POFF_RESASON regs\n");
+		return rc;
+	}
+	poff_sts = buf[0] | (buf[1] << 8);
+	index = ffs(poff_sts) - 1;
+	if (index >= ARRAY_SIZE(qpnp_poff_reason) || index < 0)
+		dev_info(&pon->spmi->dev,
+				"PMIC@SID%d: Unknown power-off reason\n",
+				pon->spmi->sid);
+	else
+		dev_info(&pon->spmi->dev,
+				"PMIC@SID%d: Power-off reason: %s\n",
+				pon->spmi->sid,
+				qpnp_poff_reason[index]);
 
 	rc = of_property_read_u32(pon->spmi->dev.of_node,
 				"qcom,pon-dbc-delay", &delay);
diff --git a/drivers/power/qpnp-bms.c b/drivers/power/qpnp-bms.c
index a0d9a24..ed0279e 100644
--- a/drivers/power/qpnp-bms.c
+++ b/drivers/power/qpnp-bms.c
@@ -491,7 +491,7 @@
 }
 
 static inline int convert_vbatt_raw_to_uv(struct qpnp_bms_chip *chip,
-					uint16_t reading)
+					uint16_t reading, bool is_pon_ocv)
 {
 	int64_t uv;
 	int rc;
@@ -500,7 +500,7 @@
 	pr_debug("%u raw converted into %lld uv\n", reading, uv);
 	uv = adjust_vbatt_reading(chip, uv);
 	pr_debug("adjusted into %lld uv\n", uv);
-	rc = qpnp_vbat_sns_comp_result(chip->vadc_dev, &uv);
+	rc = qpnp_vbat_sns_comp_result(chip->vadc_dev, &uv, is_pon_ocv);
 	if (rc)
 		pr_debug("could not compensate vbatt\n");
 	pr_debug("compensated into %lld uv\n", uv);
@@ -699,7 +699,7 @@
 
 static void convert_and_store_ocv(struct qpnp_bms_chip *chip,
 				struct raw_soc_params *raw,
-				int batt_temp)
+				int batt_temp, bool is_pon_ocv)
 {
 	int rc;
 
@@ -711,7 +711,7 @@
 		pr_err("Vadc reference voltage read failed, rc = %d\n", rc);
 	chip->prev_last_good_ocv_raw = raw->last_good_ocv_raw;
 	raw->last_good_ocv_uv = convert_vbatt_raw_to_uv(chip,
-					raw->last_good_ocv_raw);
+					raw->last_good_ocv_raw, is_pon_ocv);
 	chip->last_ocv_uv = raw->last_good_ocv_uv;
 	chip->last_ocv_temp = batt_temp;
 	chip->software_cc_uah = 0;
@@ -1042,7 +1042,7 @@
 	mutex_unlock(&chip->bms_output_lock);
 
 	if (chip->prev_last_good_ocv_raw == OCV_RAW_UNINITIALIZED) {
-		convert_and_store_ocv(chip, raw, batt_temp);
+		convert_and_store_ocv(chip, raw, batt_temp, true);
 		pr_debug("PON_OCV_UV = %d, cc = %llx\n",
 				chip->last_ocv_uv, raw->cc);
 		warm_reset = qpnp_pon_is_warm_reset();
@@ -1078,7 +1078,7 @@
 		pr_debug("EOC Battery full ocv_reading = 0x%x\n",
 				chip->ocv_reading_at_100);
 	} else if (chip->prev_last_good_ocv_raw != raw->last_good_ocv_raw) {
-		convert_and_store_ocv(chip, raw, batt_temp);
+		convert_and_store_ocv(chip, raw, batt_temp, false);
 		/* forget the old cc value upon ocv */
 		chip->last_cc_uah = INT_MIN;
 	} else {
@@ -2205,7 +2205,12 @@
 		pr_err("adc vbat failed err = %d\n", rc);
 		return soc;
 	}
-	if (soc == 0 && vbat_uv > chip->v_cutoff_uv) {
+
+	/* only clamp when discharging */
+	if (is_battery_charging(chip))
+		return soc;
+
+	if (soc <= 0 && vbat_uv > chip->v_cutoff_uv) {
 		pr_debug("clamping soc to 1, vbat (%d) > cutoff (%d)\n",
 						vbat_uv, chip->v_cutoff_uv);
 		return 1;
@@ -2418,8 +2423,13 @@
 	}
 	mutex_unlock(&chip->soc_invalidation_mutex);
 
-	pr_debug("SOC before adjustment = %d\n", soc);
-	new_calculated_soc = adjust_soc(chip, &params, soc, batt_temp);
+	if (chip->first_time_calc_soc && !chip->shutdown_soc_invalid) {
+		pr_debug("Skip adjustment when shutdown SOC has been forced\n");
+		new_calculated_soc = soc;
+	} else {
+		pr_debug("SOC before adjustment = %d\n", soc);
+		new_calculated_soc = adjust_soc(chip, &params, soc, batt_temp);
+	}
 
 	/* always clamp soc due to BMS hw/sw immaturities */
 	new_calculated_soc = clamp_soc_based_on_voltage(chip,
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index 2dc77e6..6301724 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -650,24 +650,23 @@
 	return (batfet_closed_rt_sts & BAT_FET_ON_IRQ) ? 1 : 0;
 }
 
-#define USB_VALID_BIT	BIT(7)
 static int
 qpnp_chg_is_usb_chg_plugged_in(struct qpnp_chg_chip *chip)
 {
-	u8 usbin_valid_rt_sts;
+	u8 usb_chgpth_rt_sts;
 	int rc;
 
-	rc = qpnp_chg_read(chip, &usbin_valid_rt_sts,
-				 chip->usb_chgpth_base + CHGR_STATUS , 1);
+	rc = qpnp_chg_read(chip, &usb_chgpth_rt_sts,
+				 INT_RT_STS(chip->usb_chgpth_base), 1);
 
 	if (rc) {
 		pr_err("spmi read failed: addr=%03X, rc=%d\n",
-				chip->usb_chgpth_base + CHGR_STATUS, rc);
+				INT_RT_STS(chip->usb_chgpth_base), rc);
 		return rc;
 	}
-	pr_debug("chgr usb sts 0x%x\n", usbin_valid_rt_sts);
+	pr_debug("chgr usb sts 0x%x\n", usb_chgpth_rt_sts);
 
-	return (usbin_valid_rt_sts & USB_VALID_BIT) ? 1 : 0;
+	return (usb_chgpth_rt_sts & USBIN_VALID_IRQ) ? 1 : 0;
 }
 
 static bool
@@ -686,10 +685,10 @@
 	return !!(buck_sts & IBAT_LOOP_IRQ);
 }
 
-#define USB_VALID_MASK 0xC0
-#define USB_COARSE_DET 0x10
-#define USB_VALID_UVP_VALUE    0x00
-#define USB_VALID_OVP_VALUE    0x40
+#define USB_VALID_MASK		0xC0
+#define USB_VALID_IN_MASK	BIT(7)
+#define USB_COARSE_DET		0x10
+#define USB_VALID_OVP_VALUE	0x40
 static int
 qpnp_chg_check_usb_coarse_det(struct qpnp_chg_chip *chip)
 {
@@ -708,7 +707,8 @@
 static int
 qpnp_chg_check_usbin_health(struct qpnp_chg_chip *chip)
 {
-	u8 usbin_chg_rt_sts, usbin_health = 0;
+	u8 usbin_chg_rt_sts, usb_chgpth_rt_sts;
+	u8 usbin_health = 0;
 	int rc;
 
 	rc = qpnp_chg_read(chip, &usbin_chg_rt_sts,
@@ -720,13 +720,23 @@
 		return rc;
 	}
 
-	pr_debug("chgr usb sts 0x%x\n", usbin_chg_rt_sts);
+	rc = qpnp_chg_read(chip, &usb_chgpth_rt_sts,
+		INT_RT_STS(chip->usb_chgpth_base) , 1);
+
+	if (rc) {
+		pr_err("spmi read failed: addr=%03X, rc=%d\n",
+		INT_RT_STS(chip->usb_chgpth_base), rc);
+		return rc;
+	}
+
+	pr_debug("chgr usb sts 0x%x, chgpth rt sts 0x%x\n",
+				usbin_chg_rt_sts, usb_chgpth_rt_sts);
 	if ((usbin_chg_rt_sts & USB_COARSE_DET) == USB_COARSE_DET) {
 		if ((usbin_chg_rt_sts & USB_VALID_MASK)
 			 == USB_VALID_OVP_VALUE) {
 			usbin_health = USBIN_OVP;
 			pr_err("Over voltage charger inserted\n");
-		} else if ((usbin_chg_rt_sts & USB_VALID_BIT) != 0) {
+		} else if ((usb_chgpth_rt_sts & USBIN_VALID_IRQ) != 0) {
 			usbin_health = USBIN_OK;
 			pr_debug("Valid charger inserted\n");
 		}
@@ -1067,7 +1077,8 @@
 		return rc;
 	}
 
-	rc = override_dcin_ilimit(chip, 0);
+	if (enable)
+		rc = override_dcin_ilimit(chip, 0);
 	return rc;
 }
 
@@ -3558,7 +3569,7 @@
 			state == ADC_TM_WARM_STATE ? "warm" : "cool");
 
 	if (state == ADC_TM_WARM_STATE) {
-		if (temp > chip->warm_bat_decidegc) {
+		if (temp >= chip->warm_bat_decidegc) {
 			/* Normal to warm */
 			bat_warm = true;
 			bat_cool = false;
@@ -3566,7 +3577,7 @@
 				chip->warm_bat_decidegc - HYSTERISIS_DECIDEGC;
 			chip->adc_param.state_request =
 				ADC_TM_COOL_THR_ENABLE;
-		} else if (temp >
+		} else if (temp >=
 				chip->cool_bat_decidegc + HYSTERISIS_DECIDEGC){
 			/* Cool to normal */
 			bat_warm = false;
@@ -3578,7 +3589,7 @@
 					ADC_TM_HIGH_LOW_THR_ENABLE;
 		}
 	} else {
-		if (temp < chip->cool_bat_decidegc) {
+		if (temp <= chip->cool_bat_decidegc) {
 			/* Normal to cool */
 			bat_warm = false;
 			bat_cool = true;
@@ -3586,7 +3597,7 @@
 				chip->cool_bat_decidegc + HYSTERISIS_DECIDEGC;
 			chip->adc_param.state_request =
 				ADC_TM_WARM_THR_ENABLE;
-		} else if (temp <
+		} else if (temp <=
 				chip->warm_bat_decidegc - HYSTERISIS_DECIDEGC){
 			/* Warm to normal */
 			bat_warm = false;
@@ -5120,6 +5131,14 @@
 	qpnp_chg_force_run_on_batt(chip, chip->charging_disabled);
 	qpnp_chg_set_appropriate_vddmax(chip);
 
+	if (chip->parallel_ovp_mode) {
+		rc = override_dcin_ilimit(chip, 1);
+		if (rc) {
+			pr_err("Override DCIN LLIMIT %d\n", rc);
+			goto unregister_dc_psy;
+		}
+	}
+
 	rc = qpnp_chg_request_irqs(chip);
 	if (rc) {
 		pr_err("failed to request interrupts %d\n", rc);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 450c4fb..047bbc4 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1989,8 +1989,8 @@
 int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
 {
 	struct regulator_dev *rdev = regulator->rdev;
-	int prev_min_uV, prev_max_uV;
 	int ret = 0;
+	int old_min_uV, old_max_uV;
 
 	mutex_lock(&rdev->mutex);
 
@@ -2013,24 +2013,28 @@
 	if (ret < 0)
 		goto out;
 
-	prev_min_uV = regulator->min_uV;
-	prev_max_uV = regulator->max_uV;
-
+	/* restore original values in case of error */
+	old_min_uV = regulator->min_uV;
+	old_max_uV = regulator->max_uV;
 	regulator->min_uV = min_uV;
 	regulator->max_uV = max_uV;
 
 	ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
-	if (ret < 0) {
-		regulator->min_uV = prev_min_uV;
-		regulator->max_uV = prev_max_uV;
-		goto out;
-	}
+	if (ret < 0)
+		goto out2;
 
 	ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
+	if (ret < 0)
+		goto out2;
 
 out:
 	mutex_unlock(&rdev->mutex);
 	return ret;
+out2:
+	regulator->min_uV = old_min_uV;
+	regulator->max_uV = old_max_uV;
+	mutex_unlock(&rdev->mutex);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(regulator_set_voltage);
 
diff --git a/drivers/rtc/alarm.c b/drivers/rtc/alarm.c
index 51e176f..fb97329 100644
--- a/drivers/rtc/alarm.c
+++ b/drivers/rtc/alarm.c
@@ -71,9 +71,11 @@
 static bool suspended;
 static long power_on_alarm;
 
+static void alarm_shutdown(struct platform_device *dev);
 void set_power_on_alarm(long secs)
 {
 	power_on_alarm = secs;
+	alarm_shutdown(NULL);
 }
 
 
@@ -520,8 +522,11 @@
 
 	spin_lock_irqsave(&alarm_slock, flags);
 
-	if (!power_on_alarm)
+	if (!power_on_alarm) {
+		spin_unlock_irqrestore(&alarm_slock, flags);
 		goto disable_alarm;
+	}
+	spin_unlock_irqrestore(&alarm_slock, flags);
 
 	rtc_read_time(alarm_rtc_dev, &rtc_time);
 	getnstimeofday(&wall_time);
@@ -549,11 +554,9 @@
 		pr_alarm(FLOW, "Power-on alarm set to %lu\n",
 				alarm_time);
 
-	spin_unlock_irqrestore(&alarm_slock, flags);
 	return;
 
 disable_alarm:
-	spin_unlock_irqrestore(&alarm_slock, flags);
 	rtc_alarm_irq_enable(alarm_rtc_dev, 0);
 }
 
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index f858822..aa61ab9 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -27,7 +27,6 @@
 #include <linux/timer.h>
 #include <mach/sps.h>
 #include "slim-msm.h"
-#include <mach/qdsp6v2/apr.h>
 
 #define NGD_SLIM_NAME	"ngd_msm_ctrl"
 #define SLIM_LA_MGR	0xFF
@@ -265,9 +264,13 @@
 	u8 txn_mt;
 	u16 txn_mc = txn->mc;
 	u8 wbuf[SLIM_MSGQ_BUF_LEN];
+	bool report_sat = false;
 
+	if (txn->mc == SLIM_USR_MC_REPORT_SATELLITE &&
+		txn->mt == SLIM_MSG_MT_SRC_REFERRED_USER)
+		report_sat = true;
 	if (!pm_runtime_enabled(dev->dev) && dev->state == MSM_CTRL_ASLEEP &&
-			txn->mc != SLIM_USR_MC_REPORT_SATELLITE) {
+			report_sat == false) {
 		/*
 		 * Counter-part of system-suspend when runtime-pm is not enabled
 		 * This way, resume can be left empty and device will be put in
@@ -295,7 +298,7 @@
 		return 0;
 	}
 	/* If txn is tried when controller is down, wait for ADSP to boot */
-	if (txn->mc != SLIM_USR_MC_REPORT_SATELLITE) {
+	if (!report_sat) {
 		if (dev->state == MSM_CTRL_DOWN) {
 			u8 mc = (u8)txn->mc;
 			int timeout;
@@ -358,8 +361,7 @@
 	}
 	mutex_lock(&dev->tx_lock);
 
-	if (txn->mc != SLIM_USR_MC_REPORT_SATELLITE &&
-		(dev->state != MSM_CTRL_AWAKE)) {
+	if (report_sat == false && dev->state != MSM_CTRL_AWAKE) {
 		dev_err(dev->dev, "controller not ready");
 		mutex_unlock(&dev->tx_lock);
 		msm_slim_put_ctrl(dev);
@@ -436,11 +438,13 @@
 		puc = ((u8 *)pbuf) + 2;
 	if (txn->rbuf)
 		*(puc++) = txn->tid;
-	if ((txn->mt == SLIM_MSG_MT_CORE) &&
+	if (((txn->mt == SLIM_MSG_MT_CORE) &&
 		((txn->mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
 		txn->mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
 		(txn->mc >= SLIM_MSG_MC_REQUEST_VALUE &&
-		 txn->mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
+		 txn->mc <= SLIM_MSG_MC_CHANGE_VALUE))) ||
+		(txn->mc == SLIM_USR_MC_REPEAT_CHANGE_VALUE &&
+		txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER)) {
 		*(puc++) = (txn->ec & 0xFF);
 		*(puc++) = (txn->ec >> 8)&0xFF;
 	}
@@ -540,11 +544,49 @@
 	}
 ngd_xfer_err:
 	mutex_unlock(&dev->tx_lock);
-	if (txn_mc != SLIM_USR_MC_REPORT_SATELLITE)
+	if (!report_sat)
 		msm_slim_put_ctrl(dev);
 	return ret ? ret : dev->err;
 }
 
+static int ngd_user_msg(struct slim_controller *ctrl, u8 la, u8 mt, u8 mc,
+				struct slim_ele_access *msg, u8 *buf, u8 len)
+{
+	struct slim_msg_txn txn;
+
+	if (mt != SLIM_MSG_MT_DEST_REFERRED_USER ||
+		mc != SLIM_USR_MC_REPEAT_CHANGE_VALUE) {
+		return -EPROTONOSUPPORT;
+	}
+	if (len > SLIM_MAX_VE_SLC_BYTES ||
+		msg->start_offset > MSM_SLIM_VE_MAX_MAP_ADDR)
+		return -EINVAL;
+	if (len <= 4) {
+		txn.ec = len - 1;
+	} else if (len <= 8) {
+		if (len & 0x1)
+			return -EINVAL;
+		txn.ec = ((len >> 1) + 1);
+	} else {
+		if (len & 0x3)
+			return -EINVAL;
+		txn.ec = ((len >> 2) + 3);
+	}
+	txn.ec |= (0x8 | ((msg->start_offset & 0xF) << 4));
+	txn.ec |= ((msg->start_offset & 0xFF0) << 4);
+
+	txn.la = la;
+	txn.mt = mt;
+	txn.mc = mc;
+	txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+	txn.len = len;
+	txn.rl = len + 6;
+	txn.wbuf = buf;
+	txn.rbuf = NULL;
+	txn.comp = msg->comp;
+	return ngd_xfer_msg(ctrl, &txn);
+}
+
 static int ngd_xferandwait_ack(struct slim_controller *ctrl,
 				struct slim_msg_txn *txn)
 {
@@ -814,7 +856,7 @@
 					prev_state);
 			/* ADSP SSR, send device_up notifications */
 			if (prev_state == MSM_CTRL_DOWN)
-				schedule_work(&dev->slave_notify);
+				complete(&dev->qmi.slave_notify);
 		} else if (ret == -EIO) {
 			pr_info("capability message NACKed, retrying");
 			if (retries < INIT_MX_RETRIES) {
@@ -1032,11 +1074,7 @@
 
 	while (!kthread_should_stop()) {
 		set_current_state(TASK_INTERRUPTIBLE);
-		ret = wait_for_completion_interruptible(notify);
-		if (ret) {
-			dev_err(dev->dev, "rx thread wait err:%d", ret);
-			continue;
-		}
+		wait_for_completion(notify);
 		/* 1 irq notification per message */
 		if (dev->use_rx_msgqs != MSM_MSGQ_ENABLED) {
 			msm_slim_rx_dequeue(dev, (u8 *)buffer);
@@ -1065,31 +1103,49 @@
 	return 0;
 }
 
-static void ngd_laddr_lookup(struct work_struct *work)
+static int ngd_notify_slaves(void *data)
 {
-	struct msm_slim_ctrl *dev =
-		container_of(work, struct msm_slim_ctrl, slave_notify);
+	struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
 	struct slim_controller *ctrl = &dev->ctrl;
 	struct slim_device *sbdev;
 	struct list_head *pos, *next;
-	int i;
-	slim_framer_booted(ctrl);
-	mutex_lock(&ctrl->m_ctrl);
-	list_for_each_safe(pos, next, &ctrl->devs) {
-		int ret = 0;
-		sbdev = list_entry(pos, struct slim_device, dev_list);
-		mutex_unlock(&ctrl->m_ctrl);
-		for (i = 0; i < LADDR_RETRY; i++) {
-			ret = slim_get_logical_addr(sbdev, sbdev->e_addr,
-					6, &sbdev->laddr);
-			if (!ret)
-				break;
-			else /* time for ADSP to assign LA */
-				msleep(20);
+	int ret, i = 0;
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		wait_for_completion(&dev->qmi.slave_notify);
+		/* Probe devices for first notification */
+		if (!i) {
+			dev->err = 0;
+			if (dev->dev->of_node)
+				of_register_slim_devices(&dev->ctrl);
+
+			/*
+			 * Add devices registered with board-info now that
+			 * controller is up
+			 */
+			slim_ctrl_add_boarddevs(&dev->ctrl);
+		} else {
+			slim_framer_booted(ctrl);
 		}
+		i++;
 		mutex_lock(&ctrl->m_ctrl);
+		list_for_each_safe(pos, next, &ctrl->devs) {
+			sbdev = list_entry(pos, struct slim_device, dev_list);
+			mutex_unlock(&ctrl->m_ctrl);
+			for (i = 0; i < LADDR_RETRY; i++) {
+				ret = slim_get_logical_addr(sbdev,
+						sbdev->e_addr,
+						6, &sbdev->laddr);
+				if (!ret)
+					break;
+				else /* time for ADSP to assign LA */
+					msleep(20);
+			}
+			mutex_lock(&ctrl->m_ctrl);
+		}
+		mutex_unlock(&ctrl->m_ctrl);
 	}
-	mutex_unlock(&ctrl->m_ctrl);
+	return 0;
 }
 
 static void ngd_adsp_down(struct work_struct *work)
@@ -1130,18 +1186,9 @@
 	struct resource		*bam_mem;
 	struct resource		*slim_mem;
 	struct resource		*irq, *bam_irq;
-	enum apr_subsys_state q6_state;
 	bool			rxreg_access = false;
 	bool			slim_mdm = false;
 
-	q6_state = apr_get_q6_state();
-	if (q6_state == APR_SUBSYS_DOWN) {
-		dev_dbg(&pdev->dev, "defering %s, adsp_state %d\n", __func__,
-			q6_state);
-		return -EPROBE_DEFER;
-	} else
-		dev_dbg(&pdev->dev, "adsp is ready\n");
-
 	slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 						"slimbus_physical");
 	if (!slim_mem) {
@@ -1222,6 +1269,7 @@
 	dev->ctrl.get_laddr = ngd_get_laddr;
 	dev->ctrl.allocbw = ngd_allocbw;
 	dev->ctrl.xfer_msg = ngd_xfer_msg;
+	dev->ctrl.xfer_user_msg = ngd_user_msg;
 	dev->ctrl.wakeup =  ngd_clk_pause_wakeup;
 	dev->ctrl.alloc_port = msm_alloc_port;
 	dev->ctrl.dealloc_port = msm_dealloc_port;
@@ -1246,6 +1294,7 @@
 	dev->use_tx_msgqs = MSM_MSGQ_RESET;
 
 	init_completion(&dev->rx_msgq_notify);
+	init_completion(&dev->qmi.slave_notify);
 
 	/* Register with framework */
 	ret = slim_add_numbered_controller(&dev->ctrl);
@@ -1267,6 +1316,7 @@
 	}
 
 	init_completion(&dev->qmi.qmi_comp);
+	dev->err = -EPROBE_DEFER;
 	pm_runtime_use_autosuspend(dev->dev);
 	pm_runtime_set_autosuspend_delay(dev->dev, MSM_SLIM_AUTOSUSPEND);
 	pm_runtime_set_suspended(dev->dev);
@@ -1282,7 +1332,6 @@
 				dev->mdm.ssr);
 	}
 
-	INIT_WORK(&dev->slave_notify, ngd_laddr_lookup);
 	INIT_WORK(&dev->qmi.ssr_down, ngd_adsp_down);
 	INIT_WORK(&dev->qmi.ssr_up, ngd_adsp_up);
 	dev->qmi.nb.notifier_call = ngd_qmi_available;
@@ -1298,23 +1347,27 @@
 
 	/* Fire up the Rx message queue thread */
 	dev->rx_msgq_thread = kthread_run(ngd_slim_rx_msgq_thread, dev,
-					NGD_SLIM_NAME "_ngd_msgq_thread");
+					"ngd_rx_thread%d", dev->ctrl.nr);
 	if (IS_ERR(dev->rx_msgq_thread)) {
 		ret = PTR_ERR(dev->rx_msgq_thread);
-		dev_err(dev->dev, "Failed to start Rx message queue thread\n");
-		goto err_thread_create_failed;
+		dev_err(dev->dev, "Failed to start Rx thread:%d\n", ret);
+		goto err_rx_thread_create_failed;
 	}
 
-	if (pdev->dev.of_node)
-		of_register_slim_devices(&dev->ctrl);
-
-	/* Add devices registered with board-info now that controller is up */
-	slim_ctrl_add_boarddevs(&dev->ctrl);
-
+	/* Start thread to probe, and notify slaves */
+	dev->qmi.slave_thread = kthread_run(ngd_notify_slaves, dev,
+					"ngd_notify_sl%d", dev->ctrl.nr);
+	if (IS_ERR(dev->qmi.slave_thread)) {
+		ret = PTR_ERR(dev->qmi.slave_thread);
+		dev_err(dev->dev, "Failed to start notifier thread:%d\n", ret);
+		goto err_notify_thread_create_failed;
+	}
 	dev_dbg(dev->dev, "NGD SB controller is up!\n");
 	return 0;
 
-err_thread_create_failed:
+err_notify_thread_create_failed:
+	kthread_stop(dev->rx_msgq_thread);
+err_rx_thread_create_failed:
 	qmi_svc_event_notifier_unregister(SLIMBUS_QMI_SVC_ID,
 				SLIMBUS_QMI_SVC_V1,
 				SLIMBUS_QMI_INS_ID, &dev->qmi.nb);
diff --git a/drivers/slimbus/slim-msm.c b/drivers/slimbus/slim-msm.c
index 5d30e54..8589b9f 100644
--- a/drivers/slimbus/slim-msm.c
+++ b/drivers/slimbus/slim-msm.c
@@ -305,7 +305,7 @@
 }
 
 enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
-				u8 pn, u8 **done_buf, u32 *done_len)
+				u8 pn, phys_addr_t *done_buf, u32 *done_len)
 {
 	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
 	struct sps_iovec sio;
@@ -313,7 +313,7 @@
 	if (done_len)
 		*done_len = 0;
 	if (done_buf)
-		*done_buf = NULL;
+		*done_buf = 0;
 	if (!dev->pipes[pn].connected)
 		return SLIM_P_DISCONNECT;
 	ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
@@ -321,7 +321,7 @@
 		if (done_len)
 			*done_len = sio.size;
 		if (done_buf)
-			*done_buf = (u8 *)sio.addr;
+			*done_buf = (phys_addr_t)sio.addr;
 	}
 	dev_dbg(dev->dev, "get iovec returned %d\n", ret);
 	return SLIM_P_INPROGRESS;
@@ -346,7 +346,7 @@
 		complete(comp);
 }
 
-int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
+int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, phys_addr_t iobuf,
 			u32 len, struct completion *comp)
 {
 	struct sps_register_event sreg;
@@ -366,7 +366,7 @@
 		dev_dbg(dev->dev, "sps register event error:%x\n", ret);
 		return ret;
 	}
-	ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, comp,
+	ret = sps_transfer_one(dev->pipes[pn].sps, iobuf, len, comp,
 				SPS_IOVEC_FLAG_INT);
 	dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
 	if (!ret) {
diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h
index 5ffa300..63178cc 100644
--- a/drivers/slimbus/slim-msm.h
+++ b/drivers/slimbus/slim-msm.h
@@ -37,6 +37,10 @@
 #define SLIM_USR_MC_CONNECT_SINK	0x2D
 #define SLIM_USR_MC_DISCONNECT_PORT	0x2E
 
+#define SLIM_USR_MC_REPEAT_CHANGE_VALUE	0x0
+#define MSM_SLIM_VE_MAX_MAP_ADDR	0xFFF
+#define SLIM_MAX_VE_SLC_BYTES		16
+
 #define MSM_SLIM_AUTOSUSPEND		MSEC_PER_SEC
 
 /*
@@ -199,6 +203,8 @@
 struct msm_slim_qmi {
 	struct qmi_handle		*handle;
 	struct task_struct		*task;
+	struct task_struct		*slave_thread;
+	struct completion		slave_notify;
 	struct kthread_work		kwork;
 	struct kthread_worker		kworker;
 	struct completion		qmi_comp;
@@ -257,7 +263,6 @@
 	struct completion	ctrl_up;
 	int			nsats;
 	u32			ver;
-	struct work_struct	slave_notify;
 	struct msm_slim_qmi	qmi;
 	struct msm_slim_pdata	pdata;
 	struct msm_slim_mdm	mdm;
@@ -306,8 +311,8 @@
 void msm_dealloc_port(struct slim_controller *ctrl, u8 pn);
 int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn);
 enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
-				u8 pn, u8 **done_buf, u32 *done_len);
-int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
+				u8 pn, phys_addr_t *done_buf, u32 *done_len);
+int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, phys_addr_t iobuf,
 			u32 len, struct completion *comp);
 int msm_send_msg_buf(struct msm_slim_ctrl *dev, u32 *buf, u8 len, u32 tx_reg);
 u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len);
diff --git a/drivers/slimbus/slimbus.c b/drivers/slimbus/slimbus.c
index caf7a87..fecf5ec 100644
--- a/drivers/slimbus/slimbus.c
+++ b/drivers/slimbus/slimbus.c
@@ -1107,6 +1107,28 @@
 EXPORT_SYMBOL_GPL(slim_xfer_msg);
 
 /*
+ * User message:
+ * slim_user_msg: Send user message that is interpreted by destination device
+ * @sb: Client handle sending the message
+ * @la: Destination device for this user message
+ * @mt: Message Type (Soruce-referred, or Destination-referred)
+ * @mc: Message Code
+ * @msg: Message structure (start offset, number of bytes) to be sent
+ * @buf: data buffer to be sent
+ * @len: data buffer size in bytes
+ */
+int slim_user_msg(struct slim_device *sb, u8 la, u8 mt, u8 mc,
+				struct slim_ele_access *msg, u8 *buf, u8 len)
+{
+	if (!sb || !sb->ctrl || !msg || mt == SLIM_MSG_MT_CORE)
+		return -EINVAL;
+	if (!sb->ctrl->xfer_user_msg)
+		return -EPROTONOSUPPORT;
+	return sb->ctrl->xfer_user_msg(sb->ctrl, la, mt, mc, msg, buf, len);
+}
+EXPORT_SYMBOL(slim_user_msg);
+
+/*
  * slim_alloc_mgrports: Allocate port on manager side.
  * @sb: device/client handle.
  * @req: Port request type.
@@ -1462,7 +1484,7 @@
  * Client will call slim_port_get_xfer_status to get error and/or number of
  * bytes transferred if used asynchronously.
  */
-int slim_port_xfer(struct slim_device *sb, u32 ph, u8 *iobuf, u32 len,
+int slim_port_xfer(struct slim_device *sb, u32 ph, phys_addr_t iobuf, u32 len,
 				struct completion *comp)
 {
 	struct slim_controller *ctrl = sb->ctrl;
@@ -1492,7 +1514,7 @@
  * processed from the multiple transfers.
  */
 enum slim_port_err slim_port_get_xfer_status(struct slim_device *sb, u32 ph,
-			u8 **done_buf, u32 *done_len)
+			phys_addr_t *done_buf, u32 *done_len)
 {
 	struct slim_controller *ctrl = sb->ctrl;
 	u8 pn = SLIM_HDL_TO_PORT(ph);
@@ -1505,7 +1527,7 @@
 	 */
 	if (la != SLIM_LA_MANAGER) {
 		if (done_buf)
-			*done_buf = NULL;
+			*done_buf = 0;
 		if (done_len)
 			*done_len = 0;
 		return SLIM_P_NOT_OWNED;
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index 39e81fa..d3e4612 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1980,14 +1980,17 @@
 	if (dd->use_rlock)
 		remote_mutex_lock(&dd->r_lock);
 
-	if (!msm_spi_is_valid_state(dd)) {
+	spin_lock_irqsave(&dd->queue_lock, flags);
+	dd->transfer_pending = 1;
+	spin_unlock_irqrestore(&dd->queue_lock, flags);
+
+	if (dd->suspended || !msm_spi_is_valid_state(dd)) {
 		dev_err(dd->dev, "%s: SPI operational state not valid\n",
 			__func__);
 		status_error = 1;
 	}
-
 	spin_lock_irqsave(&dd->queue_lock, flags);
-	dd->transfer_pending = 1;
+
 	while (!list_empty(&dd->queue)) {
 		dd->cur_msg = list_entry(dd->queue.next,
 					 struct spi_message, queue);
diff --git a/drivers/spmi/qpnp-int.c b/drivers/spmi/qpnp-int.c
index 3e14333..9fc7299 100644
--- a/drivers/spmi/qpnp-int.c
+++ b/drivers/spmi/qpnp-int.c
@@ -184,6 +184,22 @@
 	return 0;
 }
 
+static void qpnpint_irq_ack(struct irq_data *d)
+{
+	struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
+	int rc;
+
+	pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
+
+	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_LATCHED_CLR,
+				&irq_d->mask_shift, 1);
+	if (rc) {
+		pr_err_ratelimited("spmi write failure on irq %d, rc=%d\n",
+				d->irq, rc);
+		return;
+	}
+}
+
 static void qpnpint_irq_mask(struct irq_data *d)
 {
 	struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
@@ -223,44 +239,10 @@
 
 static void qpnpint_irq_mask_ack(struct irq_data *d)
 {
-	struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
-	struct q_chip_data *chip_d = irq_d->chip_d;
-	struct q_perip_data *per_d = irq_d->per_d;
-	int rc;
-	uint8_t prev_int_en = per_d->int_en;
-
 	pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
 
-	if (!chip_d->cb) {
-		pr_warn_ratelimited("No arbiter on bus=%u slave=%u offset=%u\n",
-				chip_d->bus_nr, irq_d->spmi_slave,
-				irq_d->spmi_offset);
-		return;
-	}
-
-	per_d->int_en &= ~irq_d->mask_shift;
-
-	if (prev_int_en && !(per_d->int_en)) {
-		/*
-		 * no interrupt on this peripheral is enabled
-		 * ask the arbiter to ignore this peripheral
-		 */
-		qpnpint_arbiter_op(d, irq_d, chip_d->cb->mask);
-	}
-
-	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
-							&irq_d->mask_shift, 1);
-	if (rc) {
-		pr_err("spmi failure on irq %d\n", d->irq);
-		return;
-	}
-
-	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_LATCHED_CLR,
-							&irq_d->mask_shift, 1);
-	if (rc) {
-		pr_err("spmi failure on irq %d\n", d->irq);
-		return;
-	}
+	qpnpint_irq_mask(d);
+	qpnpint_irq_ack(d);
 }
 
 static void qpnpint_irq_unmask(struct irq_data *d)
@@ -269,6 +251,7 @@
 	struct q_chip_data *chip_d = irq_d->chip_d;
 	struct q_perip_data *per_d = irq_d->per_d;
 	int rc;
+	uint8_t buf[2];
 	uint8_t prev_int_en = per_d->int_en;
 
 	pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
@@ -289,12 +272,29 @@
 		 */
 		qpnpint_arbiter_op(d, irq_d, chip_d->cb->unmask);
 	}
-	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_SET,
-					&irq_d->mask_shift, 1);
+
+	/* Check the current state of the interrupt enable bit. */
+	rc = qpnpint_spmi_read(irq_d, QPNPINT_REG_EN_SET, buf, 1);
 	if (rc) {
-		pr_err("spmi failure on irq %d\n", d->irq);
+		pr_err("SPMI read failure for IRQ %d, rc=%d\n", d->irq, rc);
 		return;
 	}
+
+	if (!(buf[0] & irq_d->mask_shift)) {
+		/*
+		 * Since the interrupt is currently disabled, write to both the
+		 * LATCHED_CLR and EN_SET registers so that a spurious interrupt
+		 * cannot be triggered when the interrupt is enabled.
+		 */
+		buf[0] = irq_d->mask_shift;
+		buf[1] = irq_d->mask_shift;
+		rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_LATCHED_CLR, buf, 2);
+		if (rc) {
+			pr_err("SPMI write failure for IRQ %d, rc=%d\n", d->irq,
+				rc);
+			return;
+		}
+	}
 }
 
 static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
@@ -336,6 +336,11 @@
 		return rc;
 	}
 
+	if (flow_type & IRQ_TYPE_EDGE_BOTH)
+		__irq_set_handler_locked(d->irq, handle_edge_irq);
+	else
+		__irq_set_handler_locked(d->irq, handle_level_irq);
+
 	return 0;
 }
 
@@ -363,6 +368,7 @@
 
 static struct irq_chip qpnpint_chip = {
 	.name		= "qpnp-int",
+	.irq_ack	= qpnpint_irq_ack,
 	.irq_mask	= qpnpint_irq_mask,
 	.irq_mask_ack	= qpnpint_irq_mask_ack,
 	.irq_unmask	= qpnpint_irq_unmask,
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 7ca247a..41ebc1c 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -365,7 +365,9 @@
 	if (!sc->nr_to_scan)
 		return lru_count;
 
-	mutex_lock(&ashmem_mutex);
+	if (!mutex_trylock(&ashmem_mutex))
+		return -1;
+
 	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
 		struct inode *inode = range->asma->file->f_dentry->d_inode;
 		loff_t start = range->pgstart * PAGE_SIZE;
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 21863e8..8cd70ac 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -92,13 +92,43 @@
 }
 EXPORT_SYMBOL(sensor_get_id);
 
+static void init_sensor_trip(struct sensor_info *sensor)
+{
+	int ret = 0, i = 0;
+	enum thermal_trip_type type;
+
+	for (i = 0; ((sensor->max_idx == -1) ||
+		(sensor->min_idx == -1)) &&
+		(sensor->tz->ops->get_trip_type) &&
+		(i < sensor->tz->trips); i++) {
+
+		sensor->tz->ops->get_trip_type(sensor->tz, i, &type);
+		if (type == THERMAL_TRIP_CONFIGURABLE_HI)
+			sensor->max_idx = i;
+		if (type == THERMAL_TRIP_CONFIGURABLE_LOW)
+			sensor->min_idx = i;
+		type = 0;
+	}
+
+	ret = sensor->tz->ops->get_trip_temp(sensor->tz,
+		sensor->min_idx, &sensor->threshold_min);
+	if (ret)
+		pr_err("Unable to get MIN trip temp. sensor:%d err:%d\n",
+				sensor->sensor_id, ret);
+
+	ret = sensor->tz->ops->get_trip_temp(sensor->tz,
+		sensor->max_idx, &sensor->threshold_max);
+	if (ret)
+		pr_err("Unable to get MAX trip temp. sensor:%d err:%d\n",
+				sensor->sensor_id, ret);
+}
+
 static int __update_sensor_thresholds(struct sensor_info *sensor)
 {
 	long max_of_low_thresh = LONG_MIN;
 	long min_of_high_thresh = LONG_MAX;
 	struct sensor_threshold *pos, *var;
-	enum thermal_trip_type type;
-	int i, ret = 0;
+	int ret = 0;
 
 	if (!sensor->tz->ops->set_trip_temp ||
 		!sensor->tz->ops->activate_trip_type ||
@@ -108,19 +138,8 @@
 		goto update_done;
 	}
 
-	for (i = 0; ((sensor->max_idx == -1) || (sensor->min_idx == -1)) &&
-		(sensor->tz->ops->get_trip_type) && (i < sensor->tz->trips);
-		i++) {
-		sensor->tz->ops->get_trip_type(sensor->tz, i, &type);
-		if (type == THERMAL_TRIP_CONFIGURABLE_HI)
-			sensor->max_idx = i;
-		if (type == THERMAL_TRIP_CONFIGURABLE_LOW)
-			sensor->min_idx = i;
-		sensor->tz->ops->get_trip_temp(sensor->tz,
-			THERMAL_TRIP_CONFIGURABLE_LOW, &sensor->threshold_min);
-		sensor->tz->ops->get_trip_temp(sensor->tz,
-			THERMAL_TRIP_CONFIGURABLE_HI, &sensor->threshold_max);
-	}
+	if ((sensor->max_idx == -1) || (sensor->min_idx == -1))
+		init_sensor_trip(sensor);
 
 	list_for_each_entry_safe(pos, var, &sensor->threshold_list, list) {
 		if (!pos->active)
@@ -368,7 +387,7 @@
 
 	sensor->sensor_id = tz->id;
 	sensor->tz = tz;
-	sensor->threshold_min = 0;
+	sensor->threshold_min = LONG_MIN;
 	sensor->threshold_max = LONG_MAX;
 	sensor->max_idx = -1;
 	sensor->min_idx = -1;
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 4058bec..a701ec8 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -251,7 +251,6 @@
 	struct workqueue_struct *hsuart_wq; /* hsuart workqueue */
 	struct mutex clk_mutex; /* mutex to guard against clock off/clock on */
 	struct work_struct disconnect_rx_endpoint; /* disconnect rx_endpoint */
-	bool tty_flush_receive;
 	enum uart_core_type uart_type;
 	u32 bam_handle;
 	resource_size_t bam_mem;
@@ -319,6 +318,7 @@
 		[UART_DM_TXFS] = 0x4c,
 		[UART_DM_RXFS] = 0x50,
 		[UART_DM_RX_TRANS_CTRL] = 0xcc,
+		[UART_DM_BCR] = 0xc8,
 };
 
 static struct of_device_id msm_hs_match_table[] = {
@@ -1838,12 +1838,13 @@
 	 * Do the work buffer related work in BAM
 	 * mode that is equivalent to legacy mode
 	 */
+	spin_lock_irqsave(&(msm_uport->uport.lock), flags);
 
-	if (!msm_uport->tty_flush_receive)
+	if (!uart_circ_empty(tx_buf))
 		tx_buf->tail = (tx_buf->tail +
 		tx->tx_count) & ~UART_XMIT_SIZE;
 	else
-		msm_uport->tty_flush_receive = false;
+		MSM_HS_DBG("%s:circ buffer is empty\n", __func__);
 
 	tx->dma_in_flight = 0;
 
@@ -1860,7 +1861,6 @@
 	if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
 		uart_write_wakeup(uport);
 
-	spin_lock_irqsave(&(msm_uport->uport.lock), flags);
 	if (msm_uport->tx.flush == FLUSH_STOP) {
 		msm_uport->tx.flush = FLUSH_SHUTDOWN;
 		wake_up(&msm_uport->tx.wait);
@@ -2036,14 +2036,6 @@
 
 }
 
-static void msm_hs_flush_buffer(struct uart_port *uport)
-{
-	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
-
-	if (msm_uport->tx.dma_in_flight)
-		msm_uport->tty_flush_receive = true;
-}
-
 /*
  *  Standard API, Break Signal
  *
@@ -2281,11 +2273,9 @@
 		/* Do not update tx_buf.tail if uart_flush_buffer already
 		 * called in serial core
 		 */
-		if (!msm_uport->tty_flush_receive)
+		if (!uart_circ_empty(tx_buf))
 			tx_buf->tail = (tx_buf->tail +
 					tx->tx_count) & ~UART_XMIT_SIZE;
-		else
-			msm_uport->tty_flush_receive = false;
 
 		tx->dma_in_flight = 0;
 
@@ -2665,7 +2655,11 @@
 		}
 	}
 
-	msm_hs_write(uport, UARTDM_BCR_ADDR, 0x003F);
+	data = (UARTDM_BCR_TX_BREAK_DISABLE | UARTDM_BCR_STALE_IRQ_EMPTY |
+		UARTDM_BCR_RX_DMRX_LOW_EN | UARTDM_BCR_RX_STAL_IRQ_DMRX_EQL |
+		UARTDM_BCR_RX_DMRX_1BYTE_RES_EN);
+	msm_hs_write(uport, UART_DM_BCR, data);
+
 	/* Set auto RFR Level */
 	data = msm_hs_read(uport, UART_DM_MR1);
 	data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
@@ -2708,8 +2702,6 @@
 	tx->tx_ready_int_en = 0;
 	tx->dma_in_flight = 0;
 	rx->rx_cmd_exec = false;
-	msm_uport->tty_flush_receive = false;
-	MSM_HS_DBG("%s: Setting tty_flush_receive to false\n", __func__);
 
 	if (!is_blsp_uart(msm_uport)) {
 		tx->xfer.complete_func = msm_hs_dmov_tx_callback;
@@ -3751,7 +3743,7 @@
 	.config_port = msm_hs_config_port,
 	.release_port = msm_hs_release_port,
 	.request_port = msm_hs_request_port,
-	.flush_buffer = msm_hs_flush_buffer,
+	.flush_buffer = NULL,
 	.ioctl = msm_hs_ioctl,
 };
 
diff --git a/drivers/tty/serial/msm_serial_hs_hwreg.h b/drivers/tty/serial/msm_serial_hs_hwreg.h
index d912b9f..064bbda 100644
--- a/drivers/tty/serial/msm_serial_hs_hwreg.h
+++ b/drivers/tty/serial/msm_serial_hs_hwreg.h
@@ -81,6 +81,7 @@
 	UART_DM_TXFS,
 	UART_DM_RXFS,
 	UART_DM_RX_TRANS_CTRL,
+	UART_DM_BCR,
 	UART_DM_LAST,
 };
 
@@ -94,7 +95,11 @@
  * UARTDM Core v1.4 STALE_IRQ_EMPTY bit defination
  * Stale interrupt will fire if bit is set when RX-FIFO is empty
  */
+#define UARTDM_BCR_TX_BREAK_DISABLE	0x1
 #define UARTDM_BCR_STALE_IRQ_EMPTY	0x2
+#define UARTDM_BCR_RX_DMRX_LOW_EN	0x4
+#define UARTDM_BCR_RX_STAL_IRQ_DMRX_EQL	0x10
+#define UARTDM_BCR_RX_DMRX_1BYTE_RES_EN	0x20
 
 /* TRANSFER_CONTROL Register for UARTDM Core v1.4 */
 #define UARTDM_RX_TRANS_CTRL_ADDR      0xcc
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index b1ec3fc..2a66c4c 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -28,6 +28,7 @@
 obj-$(CONFIG_USB_IMX21_HCD)	+= host/
 obj-$(CONFIG_USB_FSL_MPH_DR_OF)	+= host/
 obj-$(CONFIG_USB_PEHCI_HCD)	+= host/
+obj-$(CONFIG_USB_ICE40_HCD)	+= host/
 
 obj-$(CONFIG_USB_C67X00_HCD)	+= c67x00/
 
@@ -37,6 +38,7 @@
 obj-$(CONFIG_USB_PRINTER)	+= class/
 obj-$(CONFIG_USB_WDM)		+= class/
 obj-$(CONFIG_USB_TMC)		+= class/
+obj-$(CONFIG_USB_CCID_BRIDGE)	+= class/
 
 obj-$(CONFIG_USB_STORAGE)	+= storage/
 obj-$(CONFIG_USB)		+= storage/
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
index f1e4220..66bb317 100644
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -345,6 +345,11 @@
 	return 0;
 }
 
+void ci13xxx_msm_shutdown(struct platform_device *pdev)
+{
+	ci13xxx_pullup(&_udc->gadget, 0);
+}
+
 void msm_hw_bam_disable(bool bam_disable)
 {
 	u32 val;
@@ -364,6 +369,7 @@
 		.name = "msm_hsusb",
 	},
 	.remove = ci13xxx_msm_remove,
+	.shutdown = ci13xxx_msm_shutdown,
 };
 MODULE_ALIAS("platform:msm_hsusb");
 
diff --git a/drivers/usb/gadget/f_audio_source.c b/drivers/usb/gadget/f_audio_source.c
index 37f229b..33b645e 100644
--- a/drivers/usb/gadget/f_audio_source.c
+++ b/drivers/usb/gadget/f_audio_source.c
@@ -327,15 +327,22 @@
 	s64 msecs;
 	s64 frames;
 	ktime_t now;
+	unsigned long flags;
 
+	spin_lock_irqsave(&audio->lock, flags);
 	/* audio->substream will be null if we have been closed */
-	if (!audio->substream)
+	if (!audio->substream) {
+		spin_unlock_irqrestore(&audio->lock, flags);
 		return;
+	}
 	/* audio->buffer_pos will be null if we have been stopped */
-	if (!audio->buffer_pos)
+	if (!audio->buffer_pos) {
+		spin_unlock_irqrestore(&audio->lock, flags);
 		return;
+	}
 
 	runtime = audio->substream->runtime;
+	spin_unlock_irqrestore(&audio->lock, flags);
 
 	/* compute number of frames to send */
 	now = ktime_get();
@@ -359,8 +366,21 @@
 
 	while (frames > 0) {
 		req = audio_req_get(audio);
-		if (!req)
+		spin_lock_irqsave(&audio->lock, flags);
+		/* audio->substream will be null if we have been closed */
+		if (!audio->substream) {
+			spin_unlock_irqrestore(&audio->lock, flags);
+			return;
+		}
+		/* audio->buffer_pos will be null if we have been stopped */
+		if (!audio->buffer_pos) {
+			spin_unlock_irqrestore(&audio->lock, flags);
+			return;
+		}
+		if (!req) {
+			spin_unlock_irqrestore(&audio->lock, flags);
 			break;
+		}
 
 		length = frames_to_bytes(runtime, frames);
 		if (length > IN_EP_MAX_PACKET_SIZE)
@@ -386,6 +406,7 @@
 		}
 
 		req->length = length;
+		spin_unlock_irqrestore(&audio->lock, flags);
 		ret = usb_ep_queue(audio->in_ep, req, GFP_ATOMIC);
 		if (ret < 0) {
 			pr_err("usb_ep_queue failed ret: %d\n", ret);
diff --git a/drivers/usb/gadget/f_diag.c b/drivers/usb/gadget/f_diag.c
index e218130..16f961e 100644
--- a/drivers/usb/gadget/f_diag.c
+++ b/drivers/usb/gadget/f_diag.c
@@ -26,6 +26,7 @@
 #include <linux/usb/gadget.h>
 #include <linux/workqueue.h>
 #include <linux/debugfs.h>
+#include <linux/kmemleak.h>
 
 static DEFINE_SPINLOCK(ch_lock);
 static LIST_HEAD(usb_diag_ch_list);
@@ -381,6 +382,7 @@
 		req = usb_ep_alloc_request(ctxt->in, GFP_ATOMIC);
 		if (!req)
 			goto fail;
+		kmemleak_not_leak(req);
 		req->complete = diag_write_complete;
 		list_add_tail(&req->list, &ctxt->write_pool);
 	}
@@ -389,6 +391,7 @@
 		req = usb_ep_alloc_request(ctxt->out, GFP_ATOMIC);
 		if (!req)
 			goto fail;
+		kmemleak_not_leak(req);
 		req->complete = diag_read_complete;
 		list_add_tail(&req->list, &ctxt->read_pool);
 	}
diff --git a/drivers/usb/gadget/f_rmnet.c b/drivers/usb/gadget/f_rmnet.c
index 2fa8c63..ebcec96 100644
--- a/drivers/usb/gadget/f_rmnet.c
+++ b/drivers/usb/gadget/f_rmnet.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -47,7 +47,7 @@
 
 	/* control info */
 	struct list_head		cpkt_resp_q;
-	atomic_t			notify_count;
+	unsigned long			notify_count;
 	unsigned long			cpkts_len;
 };
 
@@ -605,7 +605,7 @@
 		list_del(&cpkt->list);
 		rmnet_free_ctrl_pkt(cpkt);
 	}
-	atomic_set(&dev->notify_count, 0);
+	dev->notify_count = 0;
 	spin_unlock_irqrestore(&dev->lock, flags);
 }
 
@@ -619,6 +619,7 @@
 		__func__, xport_to_str(dxport),
 		dev, dev->port_num);
 
+	usb_ep_fifo_flush(dev->notify);
 	frmnet_purge_responses(dev);
 
 	port_num = rmnet_ports[dev->port_num].data_xport_num;
@@ -754,7 +755,7 @@
 		return;
 	}
 
-	if (atomic_inc_return(&dev->notify_count) != 1) {
+	if (++dev->notify_count != 1) {
 		spin_unlock_irqrestore(&dev->lock, flags);
 		return;
 	}
@@ -772,7 +773,14 @@
 	if (ret) {
 		spin_lock_irqsave(&dev->lock, flags);
 		if (!list_empty(&dev->cpkt_resp_q)) {
-			atomic_dec(&dev->notify_count);
+			if (dev->notify_count > 0)
+				dev->notify_count--;
+			else {
+				pr_debug("%s: Invalid notify_count=%lu to decrement\n",
+					 __func__, dev->notify_count);
+				spin_unlock_irqrestore(&dev->lock, flags);
+				return;
+			}
 			cpkt = list_first_entry(&dev->cpkt_resp_q,
 					struct rmnet_ctrl_pkt, list);
 			list_del(&cpkt->list);
@@ -911,7 +919,9 @@
 	case -ECONNRESET:
 	case -ESHUTDOWN:
 		/* connection gone */
-		atomic_set(&dev->notify_count, 0);
+		spin_lock_irqsave(&dev->lock, flags);
+		dev->notify_count = 0;
+		spin_unlock_irqrestore(&dev->lock, flags);
 		break;
 	default:
 		pr_err("rmnet notify ep error %d\n", status);
@@ -920,14 +930,34 @@
 		if (!atomic_read(&dev->ctrl_online))
 			break;
 
-		if (atomic_dec_and_test(&dev->notify_count))
+		spin_lock_irqsave(&dev->lock, flags);
+		if (dev->notify_count > 0) {
+			dev->notify_count--;
+			if (dev->notify_count == 0) {
+				spin_unlock_irqrestore(&dev->lock, flags);
+				break;
+			}
+		} else {
+			pr_debug("%s: Invalid notify_count=%lu to decrement\n",
+					__func__, dev->notify_count);
+			spin_unlock_irqrestore(&dev->lock, flags);
 			break;
+		}
+		spin_unlock_irqrestore(&dev->lock, flags);
 
 		status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
 		if (status) {
 			spin_lock_irqsave(&dev->lock, flags);
 			if (!list_empty(&dev->cpkt_resp_q)) {
-				atomic_dec(&dev->notify_count);
+				if (dev->notify_count > 0)
+					dev->notify_count--;
+				else {
+					pr_err("%s: Invalid notify_count=%lu to decrement\n",
+						__func__, dev->notify_count);
+					spin_unlock_irqrestore(&dev->lock,
+								flags);
+					break;
+				}
 				cpkt = list_first_entry(&dev->cpkt_resp_q,
 						struct rmnet_ctrl_pkt, list);
 				list_del(&cpkt->list);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 4357867..2a24bec 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -693,3 +693,16 @@
 config USB_OCTEON2_COMMON
 	bool
 	default y if USB_OCTEON_EHCI || USB_OCTEON_OHCI
+
+config USB_ICE40_HCD
+	tristate "ICE40 FPGA based SPI to Inter-Chip USB host controller"
+	depends on USB && SPI
+	help
+	  A driver for ICE40 FPGA based SPI to Inter-Chip USB host
+	  controller. This driver registers as a SPI protocol driver
+	  and interacts with the SPI subsystem on one side and interacts
+	  with the USB core on the other side. Control and Bulk transfers
+	  are supported.
+
+	  To compile this driver a module, choose M here: the module
+	  will be called "ice40-hcd".
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 7d35f5b..7c5b452 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -42,3 +42,4 @@
 obj-$(CONFIG_USB_FSL_MPH_DR_OF)	+= fsl-mph-dr-of.o
 obj-$(CONFIG_USB_OCTEON2_COMMON) += octeon2-common.o
 obj-$(CONFIG_MIPS_ALCHEMY)	+= alchemy-common.o
+obj-$(CONFIG_USB_ICE40_HCD)	+= ice40-hcd.o
diff --git a/drivers/usb/host/ehci-msm2.c b/drivers/usb/host/ehci-msm2.c
index 7ae0a54..5b08db6 100644
--- a/drivers/usb/host/ehci-msm2.c
+++ b/drivers/usb/host/ehci-msm2.c
@@ -46,6 +46,10 @@
 
 #define PDEV_NAME_LEN 20
 
+static bool uicc_card_present;
+module_param(uicc_card_present, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(uicc_card_present, "UICC card inserted");
+
 struct msm_hcd {
 	struct ehci_hcd				ehci;
 	spinlock_t				wakeup_lock;
@@ -295,6 +299,11 @@
 static void msm_ehci_vbus_power(struct msm_hcd *mhcd, bool on)
 {
 	int ret;
+	const struct msm_usb_host_platform_data *pdata;
+
+	pdata = mhcd->dev->platform_data;
+	if (pdata && pdata->is_uicc)
+		return;
 
 	if (!mhcd->vbus) {
 		pr_err("vbus is NULL.");
@@ -352,6 +361,10 @@
 
 	pdata = mhcd->dev->platform_data;
 
+	/* For uicc card connection, external vbus is not required */
+	if (pdata && pdata->is_uicc)
+		return 0;
+
 	if (!init) {
 		if (pdata && pdata->dock_connect_irq)
 			free_irq(pdata->dock_connect_irq, mhcd);
@@ -1323,6 +1336,8 @@
 	pdata->resume_gpio = of_get_named_gpio(node, "qcom,resume-gpio", 0);
 	if (pdata->resume_gpio < 0)
 		pdata->resume_gpio = 0;
+	pdata->is_uicc = of_property_read_bool(node,
+					"qcom,usb2-enable-uicc");
 
 	return pdata;
 }
@@ -1339,6 +1354,14 @@
 
 	dev_dbg(&pdev->dev, "ehci_msm2 probe\n");
 
+	/*
+	 * Fail probe in case of uicc till userspace activates driver through
+	 * sysfs entry.
+	 */
+	if (!uicc_card_present && pdev->dev.of_node && of_property_read_bool(
+				pdev->dev.of_node, "qcom,usb2-enable-uicc"))
+		return -ENODEV;
+
 	if (pdev->dev.of_node) {
 		dev_dbg(&pdev->dev, "device tree enabled\n");
 		pdev->dev.platform_data = ehci_msm2_dt_to_pdata(pdev);
@@ -1612,6 +1635,10 @@
 	if (mhcd->resume_gpio)
 		gpio_free(mhcd->resume_gpio);
 
+	/* If the device was removed no need to call pm_runtime_disable */
+	if (pdev->dev.power.power_state.event != PM_EVENT_INVALID)
+		pm_runtime_disable(&pdev->dev);
+
 	device_init_wakeup(&pdev->dev, 0);
 	pm_runtime_set_suspended(&pdev->dev);
 
diff --git a/drivers/usb/host/ice40-hcd.c b/drivers/usb/host/ice40-hcd.c
new file mode 100644
index 0000000..4d62a3e
--- /dev/null
+++ b/drivers/usb/host/ice40-hcd.c
@@ -0,0 +1,2092 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2001-2004 by David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Root HUB management and Asynchronous scheduling traversal
+ * Based on ehci-hub.c and ehci-q.c
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/ktime.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/spinlock.h>
+#include <linux/firmware.h>
+#include <linux/spi/spi.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/ch11.h>
+
+#include <asm/unaligned.h>
+#include <mach/gpiomux.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/ice40.h>
+
+#define FADDR_REG 0x00 /* R/W: Device address */
+#define HCMD_REG 0x01 /* R/W: Host transfer command */
+#define XFRST_REG 0x02 /* R: Transfer status */
+#define IRQ_REG 0x03 /* R/C: IRQ status */
+#define IEN_REG 0x04 /* R/W: IRQ enable */
+#define CTRL0_REG 0x05 /* R/W: Host control command */
+#define CTRL1_REG 0x06 /* R/W: Host control command */
+#define WBUF0_REG 0x10 /* W: Tx fifo 0 */
+#define WBUF1_REG 0x11 /* W: Tx fifo 1 */
+#define SUBUF_REG 0x12 /* W: SETUP fifo */
+#define WBLEN_REG 0x13 /* W: Tx fifo size */
+#define RBUF0_REG 0x18 /* R: Rx fifo 0 */
+#define RBUF1_REG 0x19 /* R: Rx fifo 1 */
+#define RBLEN_REG 0x1B /* R: Rx fifo size */
+
+#define WRITE_CMD(addr) ((addr << 3) | 1)
+#define READ_CMD(addr) ((addr << 3) | 0)
+
+/* Host controller command register definitions */
+#define HCMD_EP(ep) (ep & 0xF)
+#define HCMD_BSEL(sel) (sel << 4)
+#define HCMD_TOGV(toggle) (toggle << 5)
+#define HCMD_PT(token) (token << 6)
+
+/* Transfer status register definitions */
+#define XFR_MASK(xfr) (xfr & 0xF)
+#define XFR_SUCCESS 0x0
+#define XFR_BUSY 0x1
+#define XFR_PKTERR 0x2
+#define XFR_PIDERR 0x3
+#define XFR_NAK 0x4
+#define XFR_STALL 0x5
+#define XFR_WRONGPID 0x6
+#define XFR_CRCERR 0x7
+#define XFR_TOGERR 0x8
+#define XFR_BADLEN 0x9
+#define XFR_TIMEOUT 0xA
+
+#define LINE_STATE(xfr) ((xfr & 0x30) >> 4) /* D+, D- */
+#define DPST	BIT(5)
+#define DMST	BIT(4)
+#define PLLOK	BIT(6)
+#define R64B	BIT(7)
+
+/* Interrupt enable/status register definitions */
+#define RESET_IRQ BIT(0)
+#define RESUME_IRQ BIT(1)
+#define SUSP_IRQ BIT(3)
+#define DISCONNECT_IRQ BIT(4)
+#define CONNECT_IRQ BIT(5)
+#define FRAME_IRQ BIT(6)
+#define XFR_IRQ BIT(7)
+
+/* Control 0 register definitions */
+#define RESET_CTRL BIT(0)
+#define FRAME_RESET_CTRL BIT(1)
+#define DET_BUS_CTRL BIT(2)
+#define RESUME_CTRL BIT(3)
+#define SOFEN_CTRL BIT(4)
+#define DM_PD_CTRL BIT(6)
+#define DP_PD_CTRL BIT(7)
+#define HRST_CTRL  BIT(5)
+
+/* Control 1 register definitions */
+#define INT_EN_CTRL BIT(0)
+
+enum ice40_xfr_type {
+	FIRMWARE_XFR,
+	REG_WRITE_XFR,
+	REG_READ_XFR,
+	SETUP_XFR,
+	DATA_IN_XFR,
+	DATA_OUT_XFR,
+};
+
+enum ice40_ep_phase {
+	SETUP_PHASE = 1,
+	DATA_PHASE,
+	STATUS_PHASE,
+};
+
+struct ice40_ep {
+	u8 xcat_err;
+	bool unlinking;
+	bool halted;
+	struct usb_host_endpoint *ep;
+	struct list_head ep_list;
+};
+
+struct ice40_hcd {
+	spinlock_t lock;
+
+	struct mutex wlock;
+	struct mutex rlock;
+
+	u8 devnum;
+	u32 port_flags;
+	u8 ctrl0;
+	u8 wblen0;
+
+	enum ice40_ep_phase ep0_state;
+	struct usb_hcd *hcd;
+
+	struct list_head async_list;
+	struct workqueue_struct *wq;
+	struct work_struct async_work;
+
+	int reset_gpio;
+	int slave_select_gpio;
+	int config_done_gpio;
+	int vcc_en_gpio;
+	int clk_en_gpio;
+
+	struct regulator *core_vcc;
+	struct regulator *spi_vcc;
+	struct regulator *gpio_vcc;
+	bool powered;
+
+	struct dentry *dbg_root;
+	bool pcd_pending;
+
+	/* SPI stuff later */
+	struct spi_device *spi;
+
+	struct spi_message *fmsg;
+	struct spi_transfer *fmsg_xfr; /* size 1 */
+
+	struct spi_message *wmsg;
+	struct spi_transfer *wmsg_xfr; /* size 1 */
+	u8 *w_tx_buf;
+	u8 *w_rx_buf;
+
+	struct spi_message *rmsg;
+	struct spi_transfer *rmsg_xfr; /* size 1 */
+	u8 *r_tx_buf;
+	u8 *r_rx_buf;
+
+	struct spi_message *setup_msg;
+	struct spi_transfer *setup_xfr; /* size 2 */
+	u8 *setup_buf; /* size 1 for SUBUF */
+
+	struct spi_message *in_msg;
+	struct spi_transfer *in_xfr; /* size 2 */
+	u8 *in_buf; /* size 2 for reading from RBUF0 */
+
+	struct spi_message *out_msg;
+	struct spi_transfer *out_xfr; /* size 2 */
+	u8 *out_buf; /* size 1 for writing WBUF0 */
+};
+
+static char fw_name[16] = "ice40.bin";
+module_param_string(fw, fw_name, sizeof(fw_name), S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(fw, "firmware blob file name");
+
+static bool debugger;
+module_param(debugger, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debugger, "true to use the debug port");
+
+static inline struct ice40_hcd *hcd_to_ihcd(struct usb_hcd *hcd)
+{
+	return *((struct ice40_hcd **) hcd->hcd_priv);
+}
+
+static void ice40_spi_reg_write(struct ice40_hcd *ihcd, u8 val, u8 addr)
+{
+	int ret;
+
+	/*
+	 * Register Write Pattern:
+	 * TX: 1st byte is CMD (register + write), 2nd byte is value
+	 * RX: Ignore
+	 *
+	 * The Mutex is to protect concurrent register writes as
+	 * we have only 1 SPI message struct.
+	 */
+
+	mutex_lock(&ihcd->wlock);
+
+	ihcd->w_tx_buf[0] = WRITE_CMD(addr);
+	ihcd->w_tx_buf[1] = val;
+	ret = spi_sync(ihcd->spi, ihcd->wmsg);
+	if (ret < 0) /* should not happen */
+		pr_err("failed. val = %d addr = %d\n", val, addr);
+
+	trace_ice40_reg_write(addr, val, ihcd->w_tx_buf[0],
+			ihcd->w_tx_buf[1], ret);
+
+	mutex_unlock(&ihcd->wlock);
+}
+
+static int ice40_spi_reg_read(struct ice40_hcd *ihcd, u8 addr)
+{
+	int ret;
+
+	/*
+	 * Register Read Pattern:
+	 * TX: 1st byte is CMD (register + read)
+	 * RX: 1st, 2nd byte Ignore, 3rd byte value.
+	 *
+	 * The Mutex is to protect concurrent register reads as
+	 * we have only 1 SPI message struct.
+	 */
+
+	mutex_lock(&ihcd->rlock);
+
+	ihcd->r_tx_buf[0] = READ_CMD(addr);
+	ret = spi_sync(ihcd->spi, ihcd->rmsg);
+	if (ret < 0)
+		pr_err("failed. addr = %d\n", addr);
+	else
+		ret = ihcd->r_rx_buf[2];
+
+	trace_ice40_reg_read(addr, ihcd->r_tx_buf[0], ret);
+
+	mutex_unlock(&ihcd->rlock);
+
+	return ret;
+}
+
+static int ice40_poll_xfer(struct ice40_hcd *ihcd, int usecs)
+{
+	ktime_t start = ktime_get();
+	u8 val, retry = 0;
+	u8 ret = ~0; /* time out */
+
+again:
+
+	/*
+	 * The SPI transaction may take tens of usec. Use ktime
+	 * based checks rather than loop count.
+	 */
+	do {
+		val = ice40_spi_reg_read(ihcd, XFRST_REG);
+
+		if (XFR_MASK(val) != XFR_BUSY)
+			return val;
+
+	} while (ktime_us_delta(ktime_get(), start) < usecs);
+
+	/*
+	 * The SPI transaction involves a context switch. For any
+	 * reason, if we are scheduled out more than usecs after
+	 * the 1st read, this extra read will help.
+	 */
+	if (!retry) {
+		retry = 1;
+		goto again;
+	}
+
+	return ret;
+}
+
+static int
+ice40_handshake(struct ice40_hcd *ihcd, u8 reg, u8 mask, u8 done, int usecs)
+{
+	ktime_t start = ktime_get();
+	u8 val, retry = 0;
+
+again:
+	do {
+		val = ice40_spi_reg_read(ihcd, reg);
+		val &= mask;
+
+		if (val == done)
+			return 0;
+
+	} while (ktime_us_delta(ktime_get(), start) < usecs);
+
+	if (!retry) {
+		retry = 1;
+		goto again;
+	}
+
+	return -ETIMEDOUT;
+}
+
+
+static const char hcd_name[] = "ice40-hcd";
+
+static int ice40_reset(struct usb_hcd *hcd)
+{
+	struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+	u8 ctrl, status;
+	int ret = 0;
+
+	/*
+	 * Program the defualt address 0. The device address is
+	 * re-programmed after SET_ADDRESS in URB handling path.
+	 */
+	ihcd->devnum = 0;
+	ice40_spi_reg_write(ihcd, 0, FADDR_REG);
+
+	ihcd->wblen0 = ~0;
+	/*
+	 * Read the line state. This driver is loaded after the
+	 * UICC card insertion. So the line state should indicate
+	 * that a Full-speed device is connected. Return error
+	 * if there is no device connected.
+	 *
+	 * There can be no device connected during debug. A debugfs
+	 * file is provided to sample the bus line and update the
+	 * port flags accordingly.
+	 */
+
+	if (debugger)
+		goto out;
+
+	ctrl = ice40_spi_reg_read(ihcd, CTRL0_REG);
+	ice40_spi_reg_write(ihcd, ctrl | DET_BUS_CTRL, CTRL0_REG);
+
+	ret = ice40_handshake(ihcd, CTRL0_REG, DET_BUS_CTRL, 0, 5000);
+	if (ret) {
+		pr_err("bus detection failed\n");
+		goto out;
+	}
+
+	status = ice40_spi_reg_read(ihcd, XFRST_REG);
+	pr_debug("line state (D+, D-) is %d\n", LINE_STATE(status));
+
+	if (status & DPST) {
+		pr_debug("Full speed device connected\n");
+		ihcd->port_flags |= USB_PORT_STAT_CONNECTION;
+	} else {
+		pr_err("No device connected\n");
+		ret = -ENODEV;
+	}
+out:
+	return ret;
+}
+
+static int ice40_run(struct usb_hcd *hcd)
+{
+	struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+
+	/*
+	 * HCD_FLAG_POLL_RH flag is not set by us. Core will not poll
+	 * for the port status periodically. This uses_new_polling
+	 * flag tells core that this hcd will call usb_hcd_poll_rh_status
+	 * upon port change.
+	 */
+	hcd->uses_new_polling = 1;
+
+	/*
+	 * Cache the ctrl0 register to avoid multiple reads. This register
+	 * is written during reset and resume.
+	 */
+	ihcd->ctrl0 = ice40_spi_reg_read(ihcd, CTRL0_REG);
+	ihcd->ctrl0 |= SOFEN_CTRL;
+	ice40_spi_reg_write(ihcd, ihcd->ctrl0, CTRL0_REG);
+
+	return 0;
+}
+
+static void ice40_stop(struct usb_hcd *hcd)
+{
+	struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+
+	cancel_work_sync(&ihcd->async_work);
+}
+
+/*
+ * The _Error looks odd. But very helpful when looking for
+ * any errors in logs.
+ */
+static char __maybe_unused *xfr_status_string(int status)
+{
+	switch (XFR_MASK(status)) {
+	case XFR_SUCCESS:
+		return "Ack";
+	case XFR_BUSY:
+		return "Busy_Error";
+	case XFR_PKTERR:
+		return "Pkt_Error";
+	case XFR_PIDERR:
+		return "PID_Error";
+	case XFR_NAK:
+		return "Nak";
+	case XFR_STALL:
+		return "Stall_Error";
+	case XFR_WRONGPID:
+		return "WrongPID_Error";
+	case XFR_CRCERR:
+		return "CRC_Error";
+	case XFR_TOGERR:
+		return "Togg_Error";
+	case XFR_BADLEN:
+		return "BadLen_Error";
+	case XFR_TIMEOUT:
+		return "Timeout_Error";
+	default:
+		return "Unknown_Error";
+	}
+}
+
+static int ice40_xfer_setup(struct ice40_hcd *ihcd, struct urb *urb)
+{
+	struct usb_host_endpoint *ep = urb->ep;
+	struct ice40_ep *iep = ep->hcpriv;
+	void *buf = urb->setup_packet;
+	int ret, status;
+	u8 cmd;
+
+	/*
+	 * SETUP transaction Handling:
+	 * - copy the setup buffer to SUBUF fifo
+	 * - Program HCMD register to initiate the SETP transaction.
+	 * - poll for completion by reading XFRST register.
+	 * - Interpret the result.
+	 */
+
+	ihcd->setup_buf[0] = WRITE_CMD(SUBUF_REG);
+	ihcd->setup_xfr[1].tx_buf = buf;
+	ihcd->setup_xfr[1].len = sizeof(struct usb_ctrlrequest);
+
+	ret = spi_sync(ihcd->spi, ihcd->setup_msg);
+	if (ret < 0) {
+		pr_err("SPI transfer failed\n");
+		status = ret = -EIO;
+		goto out;
+	}
+
+	cmd = HCMD_PT(2) | HCMD_TOGV(0) | HCMD_BSEL(0) | HCMD_EP(0);
+	ice40_spi_reg_write(ihcd, cmd, HCMD_REG);
+
+	status = ice40_poll_xfer(ihcd, 1000);
+	switch (XFR_MASK(status)) {
+	case XFR_SUCCESS:
+		iep->xcat_err = 0;
+		ret = 0;
+		break;
+	case XFR_NAK: /* Device should not return Nak for SETUP */
+	case XFR_STALL:
+		iep->xcat_err = 0;
+		ret = -EPIPE;
+		break;
+	case XFR_PKTERR:
+	case XFR_PIDERR:
+	case XFR_WRONGPID:
+	case XFR_CRCERR:
+	case XFR_TIMEOUT:
+		if (++iep->xcat_err < 8)
+			ret = -EINPROGRESS;
+		else
+			ret = -EPROTO;
+		break;
+	default:
+		pr_err("transaction timed out\n");
+		ret = -EIO;
+	}
+
+out:
+	trace_ice40_setup(xfr_status_string(status), ret);
+	return ret;
+}
+
+static int ice40_xfer_in(struct ice40_hcd *ihcd, struct urb *urb)
+{
+	struct usb_host_endpoint *ep = urb->ep;
+	struct usb_device *udev = urb->dev;
+	u32 total_len = urb->transfer_buffer_length;
+	u16 maxpacket = usb_endpoint_maxp(&ep->desc);
+	u8 epnum = usb_pipeendpoint(urb->pipe);
+	bool is_out = usb_pipeout(urb->pipe);
+	struct ice40_ep *iep = ep->hcpriv;
+	u8 cmd, status, len = 0, t, expected_len;
+	void *buf;
+	int ret;
+	bool short_packet = true;
+
+	if (epnum == 0 && ihcd->ep0_state == STATUS_PHASE) {
+		expected_len = 0;
+		buf = NULL;
+		t = 1; /* STATUS PHASE is always DATA1 */
+	} else {
+		expected_len = min_t(u32, maxpacket,
+				total_len - urb->actual_length);
+		buf = urb->transfer_buffer + urb->actual_length;
+		t = usb_gettoggle(udev, epnum, is_out);
+	}
+
+	/*
+	 * IN transaction Handling:
+	 * - Program HCMD register to initiate the IN transaction.
+	 * - poll for completion by reading XFRST register.
+	 * - Interpret the result.
+	 * - If ACK is received and we expect some data, read RBLEN
+	 * - Read the data from RBUF
+	 */
+
+	cmd = HCMD_PT(0) | HCMD_TOGV(t) | HCMD_BSEL(0) | HCMD_EP(epnum);
+	ice40_spi_reg_write(ihcd, cmd, HCMD_REG);
+
+	status = ice40_poll_xfer(ihcd, 1000);
+	switch (XFR_MASK(status)) {
+	case XFR_SUCCESS:
+		usb_dotoggle(udev, epnum, is_out);
+		iep->xcat_err = 0;
+		ret = 0;
+		if ((expected_len == 64) && (status & R64B))
+			short_packet = false;
+		break;
+	case XFR_NAK:
+		iep->xcat_err = 0;
+		ret = -EINPROGRESS;
+		break;
+	case XFR_TOGERR:
+		/*
+		 * Peripheral had missed the previous Ack and sent
+		 * the same packet again. Ack is sent by the hardware.
+		 * As the data is received already, ignore this
+		 * event.
+		 */
+		ret = -EINPROGRESS;
+		break;
+	case XFR_PKTERR:
+	case XFR_PIDERR:
+	case XFR_WRONGPID:
+	case XFR_CRCERR:
+	case XFR_TIMEOUT:
+		if (++iep->xcat_err < 8)
+			ret = -EINPROGRESS;
+		else
+			ret = -EPROTO;
+		break;
+	case XFR_STALL:
+		ret = -EPIPE;
+		break;
+	case XFR_BADLEN:
+		ret = -EOVERFLOW;
+		break;
+	default:
+		pr_err("transaction timed out\n");
+		ret = -EIO;
+	}
+
+	/*
+	 * Proceed further only if Ack is received and
+	 * we are expecting some data.
+	 */
+	if (ret || !expected_len)
+		goto out;
+
+	if (short_packet)
+		len = ice40_spi_reg_read(ihcd, RBLEN_REG);
+	else
+		len = 64;
+
+	/* babble condition */
+	if (len > expected_len) {
+		pr_err("overflow condition\n");
+		ret = -EOVERFLOW;
+		goto out;
+	}
+
+	/*
+	 * zero len packet received. nothing to read from
+	 * FIFO.
+	 */
+	if (len == 0) {
+		ret = 0;
+		goto out;
+	}
+
+	ihcd->in_buf[0] = READ_CMD(RBUF0_REG);
+
+	ihcd->in_xfr[1].rx_buf = buf;
+	ihcd->in_xfr[1].len = len;
+
+	ret = spi_sync(ihcd->spi, ihcd->in_msg);
+	if (ret < 0) {
+		pr_err("SPI transfer failed\n");
+		ret = -EIO;
+		goto out;
+	}
+
+	urb->actual_length += len;
+	if ((urb->actual_length == total_len) ||
+			(len < expected_len))
+		ret = 0; /* URB completed */
+	else
+		ret = -EINPROGRESS; /* still pending */
+out:
+	trace_ice40_in(epnum, xfr_status_string(status), len,
+			expected_len, ret);
+	return ret;
+}
+
+static int ice40_xfer_out(struct ice40_hcd *ihcd, struct urb *urb)
+{
+	struct usb_host_endpoint *ep = urb->ep;
+	struct usb_device *udev = urb->dev;
+	u32 total_len = urb->transfer_buffer_length;
+	u16 maxpacket = usb_endpoint_maxp(&ep->desc);
+	u8 epnum = usb_pipeendpoint(urb->pipe);
+	bool is_out = usb_pipeout(urb->pipe);
+	struct ice40_ep *iep = ep->hcpriv;
+	u8 cmd, status, len, t;
+	void *buf;
+	int ret;
+
+	if (epnum == 0 && ihcd->ep0_state == STATUS_PHASE) {
+		len = 0;
+		buf = NULL;
+		t = 1; /* STATUS PHASE is always DATA1 */
+	} else {
+		len = min_t(u32, maxpacket, total_len - urb->actual_length);
+		buf = urb->transfer_buffer + urb->actual_length;
+		t = usb_gettoggle(udev, epnum, is_out);
+	}
+
+	/*
+	 * OUT transaction Handling:
+	 * - If we need to send data, write the data to WBUF Fifo
+	 * - Program the WBLEN register
+	 * - Program HCMD register to initiate the OUT transaction.
+	 * - poll for completion by reading XFRST register.
+	 * - Interpret the result.
+	 */
+
+
+	if (!len)
+		goto no_data;
+
+	ihcd->out_buf[0] = WRITE_CMD(WBUF0_REG);
+
+	ihcd->out_xfr[1].tx_buf = buf;
+	ihcd->out_xfr[1].len = len;
+
+	ret = spi_sync(ihcd->spi, ihcd->out_msg);
+	if (ret < 0) {
+		pr_err("SPI transaction failed\n");
+		status = ret = -EIO;
+		goto out;
+	}
+
+no_data:
+	/*
+	 * Cache the WBLEN register and update it only if it
+	 * is changed from the previous value.
+	 */
+	if (len != ihcd->wblen0) {
+		ice40_spi_reg_write(ihcd, len, WBLEN_REG);
+		ihcd->wblen0 = len;
+	}
+
+	cmd = HCMD_PT(1) | HCMD_TOGV(t) | HCMD_BSEL(0) | HCMD_EP(epnum);
+	ice40_spi_reg_write(ihcd, cmd, HCMD_REG);
+
+	status = ice40_poll_xfer(ihcd, 1000);
+	switch (XFR_MASK(status)) {
+	case XFR_SUCCESS:
+		usb_dotoggle(udev, epnum, is_out);
+		urb->actual_length += len;
+		iep->xcat_err = 0;
+		if (!len || (urb->actual_length == total_len))
+			ret = 0; /* URB completed */
+		else
+			ret = -EINPROGRESS; /* pending */
+		break;
+	case XFR_NAK:
+		iep->xcat_err = 0;
+		ret = -EINPROGRESS;
+		break;
+	case XFR_PKTERR:
+	case XFR_PIDERR:
+	case XFR_WRONGPID:
+	case XFR_CRCERR:
+	case XFR_TIMEOUT:
+		if (++iep->xcat_err < 8)
+			ret = -EINPROGRESS;
+		else
+			ret = -EPROTO;
+		break;
+	case XFR_STALL:
+		ret = -EPIPE;
+		break;
+	case XFR_BADLEN:
+		ret = -EOVERFLOW;
+		break;
+	default:
+		pr_err("transaction timed out\n");
+		ret = -EIO;
+	}
+
+out:
+	trace_ice40_out(epnum, xfr_status_string(status), len, ret);
+	return ret;
+}
+
+static int ice40_process_urb(struct ice40_hcd *ihcd, struct urb *urb)
+{
+	struct usb_device *udev = urb->dev;
+	u8 devnum = usb_pipedevice(urb->pipe);
+	bool is_out = usb_pipeout(urb->pipe);
+	u32 total_len = urb->transfer_buffer_length;
+	int ret = 0;
+
+	/*
+	 * The USB device address can be reset to 0 by core temporarily
+	 * during reset recovery process. Don't assume anything about
+	 * device address. The device address is programmed as 0 by
+	 * default. If the device address is different to the previous
+	 * cached value, re-program it here before proceeding. The device
+	 * address register (FADDR) holds the value across multiple
+	 * transactions and we support only one device.
+	 */
+	if (ihcd->devnum != devnum) {
+		ice40_spi_reg_write(ihcd, devnum, FADDR_REG);
+		ihcd->devnum = devnum;
+	}
+
+	switch (usb_pipetype(urb->pipe)) {
+	case PIPE_CONTROL:
+		switch (ihcd->ep0_state) {
+		case SETUP_PHASE:
+			trace_ice40_ep0("SETUP");
+			ret = ice40_xfer_setup(ihcd, urb);
+			if (ret)
+				break;
+			if (total_len) {
+				ihcd->ep0_state = DATA_PHASE;
+				/*
+				 * Data stage always begin with
+				 * DATA1 PID.
+				 */
+				usb_settoggle(udev, 0, is_out, 1);
+			} else {
+				ihcd->ep0_state = STATUS_PHASE;
+				goto do_status;
+			}
+			/* fall through */
+		case DATA_PHASE:
+			trace_ice40_ep0("DATA");
+			if (is_out)
+				ret = ice40_xfer_out(ihcd, urb);
+			else
+				ret = ice40_xfer_in(ihcd, urb);
+			if (ret)
+				break;
+			/* DATA Phase is completed successfully */
+			ihcd->ep0_state = STATUS_PHASE;
+			/* fall through */
+		case STATUS_PHASE:
+do_status:
+			trace_ice40_ep0("STATUS");
+			/* zero len DATA transfers have IN status */
+			if (!total_len || is_out)
+				ret = ice40_xfer_in(ihcd, urb);
+			else
+				ret = ice40_xfer_out(ihcd, urb);
+			if (ret)
+				break;
+			ihcd->ep0_state = SETUP_PHASE;
+			break;
+		default:
+			pr_err("unknown stage for a control transfer\n");
+			break;
+		}
+		break;
+	case PIPE_BULK:
+		if (is_out)
+			ret = ice40_xfer_out(ihcd, urb);
+		else
+			ret = ice40_xfer_in(ihcd, urb);
+		/*
+		 * We may have to support zero len packet terminations
+		 * for URB_ZERO_PACKET URBs.
+		 */
+		break;
+	default:
+		pr_err("IN/ISO transfers not supported\n");
+		break;
+	}
+
+	return ret;
+}
+
+/* Must be called with spin lock and interrupts disabled */
+static void ice40_complete_urb(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+	struct usb_host_endpoint *ep = urb->ep;
+	struct ice40_ep *iep = ep->hcpriv;
+	struct urb *first_urb;
+	bool needs_update = false;
+	bool control = usb_pipecontrol(urb->pipe);
+
+	/*
+	 * If the active URB i.e the first URB in the ep list is being
+	 * removed, clear the transaction error count. If it is a control
+	 * URB ep0_state needs to be reset to SETUP_PHASE.
+	 */
+	first_urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
+	if (urb == first_urb)
+		needs_update = true;
+
+	usb_hcd_unlink_urb_from_ep(hcd, urb);
+	spin_unlock(&ihcd->lock);
+	trace_ice40_urb_done(urb, status);
+	usb_hcd_giveback_urb(ihcd->hcd, urb, status);
+	spin_lock(&ihcd->lock);
+
+	if (needs_update) {
+		iep->xcat_err = 0;
+		if (control)
+			ihcd->ep0_state = SETUP_PHASE;
+	}
+}
+
+static void ice40_async_work(struct work_struct *work)
+{
+	struct ice40_hcd *ihcd = container_of(work,
+			struct ice40_hcd, async_work);
+	struct usb_hcd *hcd = ihcd->hcd;
+	struct list_head *tmp, *uent, *utmp;
+	struct ice40_ep *iep;
+	struct usb_host_endpoint *ep;
+	struct urb *urb;
+	unsigned long flags;
+	int status;
+
+	/*
+	 * Traverse the active endpoints circularly and process URBs.
+	 * If any endpoint is marked for unlinking, the URBs are
+	 * completed here. The endpoint is removed from active list
+	 * if a URB is retired with -EPIPE/-EPROTO errors.
+	 */
+
+	spin_lock_irqsave(&ihcd->lock, flags);
+
+	if (list_empty(&ihcd->async_list))
+		goto out;
+
+	iep = list_first_entry(&ihcd->async_list, struct ice40_ep, ep_list);
+	while (1) {
+		ep = iep->ep;
+
+		urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
+		if (urb->unlinked) {
+			status = urb->unlinked;
+		} else {
+			spin_unlock_irqrestore(&ihcd->lock, flags);
+			status = ice40_process_urb(ihcd, urb);
+			spin_lock_irqsave(&ihcd->lock, flags);
+		}
+
+		if ((status == -EPIPE) || (status == -EPROTO))
+			iep->halted = true;
+
+		if (status != -EINPROGRESS)
+			ice40_complete_urb(hcd, urb, status);
+
+		if (iep->unlinking) {
+			list_for_each_safe(uent, utmp, &ep->urb_list) {
+				urb = list_entry(uent, struct urb, urb_list);
+				if (urb->unlinked)
+					ice40_complete_urb(hcd, urb, 0);
+			}
+			iep->unlinking = false;
+		}
+
+		tmp = iep->ep_list.next;
+		if (list_empty(&ep->urb_list) || iep->halted) {
+			list_del_init(&iep->ep_list);
+
+			if (list_empty(&ihcd->async_list))
+				break;
+		}
+
+		if (tmp == &ihcd->async_list)
+			tmp = tmp->next;
+		iep = list_entry(tmp, struct ice40_ep, ep_list);
+	}
+out:
+	spin_unlock_irqrestore(&ihcd->lock, flags);
+}
+
+static int
+ice40_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+{
+	struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+	struct usb_device *udev = urb->dev;
+	struct usb_host_endpoint *ep = urb->ep;
+	bool is_out = usb_pipeout(urb->pipe);
+	u8 epnum = usb_pipeendpoint(urb->pipe);
+	struct ice40_ep *iep;
+	unsigned long flags;
+	int ret;
+
+	/*
+	 * This bridge chip supports only Full-speed. So ISO is not
+	 * supported. Interrupt support is not implemented as there
+	 * is no use case.
+	 */
+	if (usb_pipeisoc(urb->pipe) || usb_pipeint(urb->pipe)) {
+		pr_debug("iso and int xfers not supported\n");
+		ret = -ENOTSUPP;
+		goto out;
+	}
+
+	spin_lock_irqsave(&ihcd->lock, flags);
+
+	ret = usb_hcd_link_urb_to_ep(hcd, urb);
+	if (ret)
+		goto rel_lock;
+
+	trace_ice40_urb_enqueue(urb);
+
+	iep = ep->hcpriv;
+	if (!iep) {
+		iep = kzalloc(sizeof(struct ice40_ep), GFP_ATOMIC);
+		if (!iep) {
+			pr_debug("fail to allocate iep\n");
+			ret = -ENOMEM;
+			goto unlink;
+		}
+		ep->hcpriv = iep;
+		INIT_LIST_HEAD(&iep->ep_list);
+		iep->ep = ep;
+		usb_settoggle(udev, epnum, is_out, 0);
+		if (usb_pipecontrol(urb->pipe))
+			ihcd->ep0_state = SETUP_PHASE;
+	}
+
+	/*
+	 * We expect the interface driver to clear the stall condition
+	 * before queueing another URB. For example mass storage
+	 * device may STALL a bulk endpoint for un-supported command.
+	 * The storage driver clear the STALL condition before queueing
+	 * another URB.
+	 */
+	iep->halted = false;
+	if (list_empty(&iep->ep_list))
+		list_add_tail(&iep->ep_list, &ihcd->async_list);
+
+	queue_work(ihcd->wq, &ihcd->async_work);
+
+	spin_unlock_irqrestore(&ihcd->lock, flags);
+
+	return 0;
+unlink:
+	usb_hcd_unlink_urb_from_ep(hcd, urb);
+rel_lock:
+	spin_unlock_irqrestore(&ihcd->lock, flags);
+out:
+	return ret;
+}
+
+static int
+ice40_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+	struct usb_host_endpoint *ep = urb->ep;
+	struct ice40_ep *iep;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&ihcd->lock, flags);
+
+	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+	if (ret)
+		goto rel_lock;
+
+	trace_ice40_urb_dequeue(urb);
+	iep = ep->hcpriv;
+
+	/*
+	 * If the endpoint is not in asynchronous schedule, complete
+	 * the URB immediately. Otherwise mark it as being unlinked.
+	 * The asynchronous schedule work will take care of completing
+	 * the URB when this endpoint is encountered during traversal.
+	 */
+	if (list_empty(&iep->ep_list))
+		ice40_complete_urb(hcd, urb, status);
+	else
+		iep->unlinking = true;
+
+rel_lock:
+	spin_unlock_irqrestore(&ihcd->lock, flags);
+	return ret;
+}
+
+static void
+ice40_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+	struct ice40_ep	*iep = ep->hcpriv;
+
+	/*
+	 * If there is no I/O on this endpoint before, ep->hcpriv
+	 * will be NULL. nothing to do in this case.
+	 */
+	if (!iep)
+		return;
+
+	if (!list_empty(&ep->urb_list))
+		pr_err("trying to disable an non-empty endpoint\n");
+
+	kfree(iep);
+	ep->hcpriv = NULL;
+}
+
+
+static int ice40_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+	int ret = 0;
+
+	/*
+	 * core calls hub_status_method during suspend/resume.
+	 * return 0 if there is no port change. pcd_pending
+	 * is set to true when a device is connected and line
+	 * state is sampled via debugfs command. clear this
+	 * flag after returning the port change status.
+	 */
+	if (ihcd->pcd_pending) {
+		*buf = (1 << 1);
+		ret = 1;
+		ihcd->pcd_pending = false;
+	}
+
+	return ret;
+}
+
+static void ice40_hub_descriptor(struct usb_hub_descriptor *desc)
+{
+	/* There is nothing special about us!! */
+	desc->bDescLength = 9;
+	desc->bDescriptorType = 0x29;
+	desc->bNbrPorts = 1;
+	desc->wHubCharacteristics = cpu_to_le16(HUB_CHAR_NO_LPSM |
+				HUB_CHAR_NO_OCPM);
+	desc->bPwrOn2PwrGood = 0;
+	desc->bHubContrCurrent = 0;
+	desc->u.hs.DeviceRemovable[0] = 0;
+	desc->u.hs.DeviceRemovable[1] = ~0;
+}
+
+static int
+ice40_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+			u16 wIndex, char *buf, u16 wLength)
+{
+	int ret = 0;
+	u8 ctrl;
+	struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+
+	/*
+	 * We have only 1 port. No special locking is required while
+	 * handling root hub commands. The bridge chip does not maintain
+	 * any port states. Maintain different port states in software.
+	 */
+	switch (typeReq) {
+	case ClearPortFeature:
+		if (wIndex != 1 || wLength != 0)
+			goto error;
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+			/*
+			 * The device is resumed as part of the root hub
+			 * resume to simplify the resume sequence. so
+			 * we may simply return from here. If device is
+			 * resumed before root hub is suspended, this
+			 * flags will be cleared here.
+			 */
+			if (!(ihcd->port_flags & USB_PORT_STAT_SUSPEND))
+				break;
+			ihcd->port_flags &= ~USB_PORT_STAT_SUSPEND;
+			break;
+		case USB_PORT_FEAT_ENABLE:
+			ihcd->port_flags &= ~USB_PORT_STAT_ENABLE;
+			break;
+		case USB_PORT_FEAT_POWER:
+			ihcd->port_flags &= ~USB_PORT_STAT_POWER;
+			break;
+		case USB_PORT_FEAT_C_CONNECTION:
+			ihcd->port_flags &= ~(USB_PORT_STAT_C_CONNECTION << 16);
+			break;
+		case USB_PORT_FEAT_C_ENABLE:
+		case USB_PORT_FEAT_C_SUSPEND:
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+		case USB_PORT_FEAT_C_RESET:
+			/* nothing special here */
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case GetHubDescriptor:
+		ice40_hub_descriptor((struct usb_hub_descriptor *) buf);
+		break;
+	case GetHubStatus:
+		put_unaligned_le32(0, buf);
+		break;
+	case GetPortStatus:
+		if (wIndex != 1)
+			goto error;
+
+		/*
+		 * Core resets the device and requests port status to
+		 * stop the reset signaling. If there is a reset in
+		 * progress, finish it here.
+		 */
+		ctrl = ice40_spi_reg_read(ihcd, CTRL0_REG);
+		if (!(ctrl & RESET_CTRL))
+			ihcd->port_flags &= ~USB_PORT_STAT_RESET;
+
+		put_unaligned_le32(ihcd->port_flags, buf);
+		break;
+	case SetPortFeature:
+		if (wIndex != 1 || wLength != 0)
+			goto error;
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+			if (ihcd->port_flags & USB_PORT_STAT_RESET)
+				goto error;
+			if (!(ihcd->port_flags & USB_PORT_STAT_ENABLE))
+				goto error;
+			/* SOFs will be stopped during root hub suspend */
+			ihcd->port_flags |= USB_PORT_STAT_SUSPEND;
+			break;
+		case USB_PORT_FEAT_POWER:
+			ihcd->port_flags |= USB_PORT_STAT_POWER;
+			break;
+		case USB_PORT_FEAT_RESET:
+			/* Good time to enable the port */
+			ice40_spi_reg_write(ihcd, ihcd->ctrl0 |
+					RESET_CTRL, CTRL0_REG);
+			ihcd->port_flags |= USB_PORT_STAT_RESET;
+			ihcd->port_flags |= USB_PORT_STAT_ENABLE;
+			break;
+		default:
+			goto error;
+		}
+		break;
+	default:
+error:
+		/* "protocol stall" on error */
+		ret = -EPIPE;
+	}
+
+	trace_ice40_hub_control(typeReq, wValue, wIndex, wLength, ret);
+	return ret;
+}
+
+static void ice40_spi_power_off(struct ice40_hcd *ihcd);
+static int ice40_bus_suspend(struct usb_hcd *hcd)
+{
+	struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+
+	trace_ice40_bus_suspend(0); /* start */
+
+	/* This happens only during debugging */
+	if (!ihcd->devnum) {
+		pr_debug("device still not connected. abort suspend\n");
+		trace_ice40_bus_suspend(2); /* failure */
+		return -EAGAIN;
+	}
+	/*
+	 * Stop sending the SOFs on downstream port. The device
+	 * finds the bus idle and enter suspend. The device
+	 * takes ~3 msec to enter suspend.
+	 */
+	ihcd->ctrl0 &= ~SOFEN_CTRL;
+	ice40_spi_reg_write(ihcd, ihcd->ctrl0, CTRL0_REG);
+	usleep_range(4500, 5000);
+
+	/*
+	 * Power collapse the bridge chip to avoid the leakage
+	 * current.
+	 */
+	ice40_spi_power_off(ihcd);
+
+	trace_ice40_bus_suspend(1); /* successful */
+	pm_relax(&ihcd->spi->dev);
+	return 0;
+}
+
+static int ice40_spi_load_fw(struct ice40_hcd *ihcd);
+static int ice40_bus_resume(struct usb_hcd *hcd)
+{
+	struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+	u8 ctrl0;
+	int ret;
+
+	pm_stay_awake(&ihcd->spi->dev);
+	trace_ice40_bus_resume(0); /* start */
+	/*
+	 * Power up the bridge chip and load the configuration file.
+	 * Re-program the previous settings. For now we need to
+	 * update the device address only.
+	 */
+	ice40_spi_load_fw(ihcd);
+	ice40_spi_reg_write(ihcd, ihcd->devnum, FADDR_REG);
+	ihcd->wblen0 = ~0;
+
+	/*
+	 * Program the bridge chip to drive resume signaling. The SOFs
+	 * are automatically transmitted after resume completion. It
+	 * will take ~20 msec for resume completion.
+	 */
+	ice40_spi_reg_write(ihcd, ihcd->ctrl0 | RESUME_CTRL, CTRL0_REG);
+	usleep_range(20000, 21000);
+	ret = ice40_handshake(ihcd, CTRL0_REG, RESUME_CTRL, 0, 5000);
+	if (ret) {
+		pr_err("resume failed\n");
+		trace_ice40_bus_resume(2); /* failure */
+		return -ENODEV;
+	}
+
+	ctrl0 = ice40_spi_reg_read(ihcd, CTRL0_REG);
+	if (!(ctrl0 & SOFEN_CTRL)) {
+		pr_err("SOFs are not transmitted after resume\n");
+		trace_ice40_bus_resume(3); /* failure */
+		return -ENODEV;
+	}
+
+	ihcd->port_flags &= ~USB_PORT_STAT_SUSPEND;
+	ihcd->ctrl0 |= SOFEN_CTRL;
+
+	trace_ice40_bus_resume(1); /* success */
+	return 0;
+}
+
+static void ice40_set_autosuspend_delay(struct usb_device *dev)
+{
+	/*
+	 * Immediate suspend for root hub and 500 msec auto-suspend
+	 * timeout for the card.
+	 */
+	if (!dev->parent)
+		pm_runtime_set_autosuspend_delay(&dev->dev, 0);
+	else
+		pm_runtime_set_autosuspend_delay(&dev->dev, 500);
+}
+
+static const struct hc_driver ice40_hc_driver = {
+	.description = hcd_name,
+	.product_desc = "ICE40 SPI Host Controller",
+	.hcd_priv_size = sizeof(struct ice40_hcd *),
+	.flags = HCD_USB11,
+
+	/* setup and clean up */
+	.reset = ice40_reset,
+	.start = ice40_run,
+	.stop = ice40_stop,
+
+	/* endpoint and I/O routines */
+	.urb_enqueue = ice40_urb_enqueue,
+	.urb_dequeue = ice40_urb_dequeue,
+	.endpoint_disable = ice40_endpoint_disable,
+
+	/* Root hub operations */
+	.hub_status_data = ice40_hub_status_data,
+	.hub_control = ice40_hub_control,
+	.bus_suspend = ice40_bus_suspend,
+	.bus_resume = ice40_bus_resume,
+
+	.set_autosuspend_delay = ice40_set_autosuspend_delay,
+};
+
+static int ice40_spi_parse_dt(struct ice40_hcd *ihcd)
+{
+	struct device_node *node = ihcd->spi->dev.of_node;
+	int ret = 0;
+
+	if (!node) {
+		pr_err("device specific info missing\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	ihcd->reset_gpio = of_get_named_gpio(node, "lattice,reset-gpio", 0);
+	if (ihcd->reset_gpio < 0) {
+		pr_err("reset gpio is missing\n");
+		ret = ihcd->reset_gpio;
+		goto out;
+	}
+
+	ihcd->slave_select_gpio = of_get_named_gpio(node,
+				"lattice,slave-select-gpio", 0);
+	if (ihcd->slave_select_gpio < 0) {
+		pr_err("slave select gpio is missing\n");
+		ret = ihcd->slave_select_gpio;
+		goto out;
+	}
+
+	ihcd->config_done_gpio = of_get_named_gpio(node,
+				"lattice,config-done-gpio", 0);
+	if (ihcd->config_done_gpio < 0) {
+		pr_err("config done gpio is missing\n");
+		ret = ihcd->config_done_gpio;
+		goto out;
+	}
+
+	ihcd->vcc_en_gpio = of_get_named_gpio(node, "lattice,vcc-en-gpio", 0);
+	if (ihcd->vcc_en_gpio < 0) {
+		pr_err("vcc enable gpio is missing\n");
+		ret = ihcd->vcc_en_gpio;
+		goto out;
+	}
+
+	/*
+	 * When clk-en-gpio is present, it is used to enable the 19.2 MHz
+	 * clock from MSM to the bridge chip. Otherwise on-board clock
+	 * is used.
+	 */
+	ihcd->clk_en_gpio = of_get_named_gpio(node, "lattice,clk-en-gpio", 0);
+	if (ihcd->clk_en_gpio < 0)
+		ihcd->clk_en_gpio = 0;
+out:
+	return ret;
+}
+
+static void ice40_spi_power_off(struct ice40_hcd *ihcd)
+{
+	if (!ihcd->powered)
+		return;
+
+	gpio_direction_output(ihcd->vcc_en_gpio, 0);
+	regulator_disable(ihcd->core_vcc);
+	regulator_disable(ihcd->spi_vcc);
+	if (ihcd->gpio_vcc)
+		regulator_disable(ihcd->gpio_vcc);
+	if (ihcd->clk_en_gpio)
+		gpio_direction_output(ihcd->clk_en_gpio, 0);
+
+	ihcd->powered = false;
+}
+
+static int ice40_spi_power_up(struct ice40_hcd *ihcd)
+{
+	int ret;
+
+	if (ihcd->clk_en_gpio) {
+		ret = gpio_direction_output(ihcd->clk_en_gpio, 1);
+		if (ret < 0) {
+			pr_err("fail to enabel clk %d\n", ret);
+			goto out;
+		}
+	}
+
+	if (ihcd->gpio_vcc) {
+		ret = regulator_enable(ihcd->gpio_vcc); /* 1.8 V */
+		if (ret < 0) {
+			pr_err("fail to enable gpio vcc\n");
+			goto disable_clk;
+		}
+	}
+
+	ret = regulator_enable(ihcd->spi_vcc); /* 1.8 V */
+	if (ret < 0) {
+		pr_err("fail to enable spi vcc\n");
+		goto disable_gpio_vcc;
+	}
+
+	ret = regulator_enable(ihcd->core_vcc); /* 1.2 V */
+	if (ret < 0) {
+		pr_err("fail to enable core vcc\n");
+		goto disable_spi_vcc;
+	}
+
+	ret = gpio_direction_output(ihcd->vcc_en_gpio, 1);
+	if (ret < 0) {
+		pr_err("fail to assert vcc gpio\n");
+		goto disable_core_vcc;
+	}
+
+	ihcd->powered = true;
+
+	return 0;
+
+disable_core_vcc:
+	regulator_disable(ihcd->core_vcc);
+disable_spi_vcc:
+	regulator_disable(ihcd->spi_vcc);
+disable_gpio_vcc:
+	if (ihcd->gpio_vcc)
+		regulator_disable(ihcd->gpio_vcc);
+disable_clk:
+	if (ihcd->clk_en_gpio)
+		gpio_direction_output(ihcd->clk_en_gpio, 0);
+out:
+	return ret;
+}
+
+static struct gpiomux_setting slave_select_setting = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_NONE,
+	.dir = GPIOMUX_OUT_LOW,
+};
+
+static int ice40_spi_cache_fw(struct ice40_hcd *ihcd)
+{
+	const struct firmware *fw;
+	void *buf;
+	size_t buf_len;
+	int ret;
+
+	ret = request_firmware(&fw, fw_name, &ihcd->spi->dev);
+	if (ret < 0) {
+		pr_err("fail to get the firmware\n");
+		goto out;
+	}
+
+	pr_debug("received firmware size = %zu\n", fw->size);
+
+	/*
+	 * The bridge expects additional clock cycles after
+	 * receiving the configuration data. We don't have a
+	 * direct control over SPI clock. Add extra bytes
+	 * to the confiration data.
+	 */
+	buf_len = fw->size + 16;
+	buf = devm_kzalloc(&ihcd->spi->dev, buf_len, GFP_KERNEL);
+	if (!buf) {
+		pr_err("fail to allocate firmware buffer\n");
+		ret = -ENOMEM;
+		goto release;
+	}
+
+	/*
+	 * The firmware buffer can not be used for DMA as it
+	 * is not physically contiguous. We copy the data
+	 * in kmalloc buffer. This buffer will be freed only
+	 * during unbind or rmmod.
+	 */
+	memcpy(buf, fw->data, fw->size);
+	release_firmware(fw);
+
+	/*
+	 * The bridge supports only 25 MHz during configuration
+	 * file loading.
+	 */
+	ihcd->fmsg_xfr[0].tx_buf = buf;
+	ihcd->fmsg_xfr[0].len = buf_len;
+	ihcd->fmsg_xfr[0].speed_hz = 25000000;
+
+	return 0;
+
+release:
+	release_firmware(fw);
+out:
+	return ret;
+}
+
+static int ice40_spi_load_fw(struct ice40_hcd *ihcd)
+{
+	int ret, i;
+	struct gpiomux_setting old_setting;
+
+	ret = gpio_direction_output(ihcd->reset_gpio, 0);
+	if (ret  < 0) {
+		pr_err("fail to assert reset %d\n", ret);
+		goto out;
+	}
+
+	ret = gpio_direction_output(ihcd->vcc_en_gpio, 0);
+	if (ret < 0) {
+		pr_err("fail to de-assert vcc_en gpio %d\n", ret);
+		goto out;
+	}
+
+	/*
+	 * The bridge chip samples the chip select signal during
+	 * power-up. If it is low, it enters SPI slave mode and
+	 * accepts the configuration data from us. The chip
+	 * select signal is managed by the SPI controller driver.
+	 * We temporarily override the chip select config to
+	 * drive it low. The SPI bus needs to be locked down during
+	 * this period to avoid other slave data going to our
+	 * bridge chip.
+	 *
+	 */
+	spi_bus_lock(ihcd->spi->master);
+
+	ret = msm_gpiomux_write(ihcd->slave_select_gpio, GPIOMUX_SUSPENDED,
+			&slave_select_setting, &old_setting);
+	if (ret < 0) {
+		pr_err("fail to select the slave %d\n", ret);
+		goto out;
+	}
+
+	ret = ice40_spi_power_up(ihcd);
+	if (ret < 0) {
+		pr_err("fail to power up the chip\n");
+		goto out;
+	}
+
+
+	/*
+	 * The databook says 1200 usec is required before the
+	 * chip becomes ready for the SPI transfer.
+	 */
+	usleep_range(1200, 1250);
+
+	ret = msm_gpiomux_write(ihcd->slave_select_gpio, GPIOMUX_SUSPENDED,
+			&old_setting, NULL);
+	if (ret < 0) {
+		pr_err("fail to de-select the slave %d\n", ret);
+		goto power_off;
+	}
+
+	ret = spi_sync_locked(ihcd->spi, ihcd->fmsg);
+
+	spi_bus_unlock(ihcd->spi->master);
+
+	if (ret < 0) {
+		pr_err("spi write failed\n");
+		goto power_off;
+	}
+
+	for (i = 0; i < 1000; i++) {
+		ret = gpio_get_value(ihcd->config_done_gpio);
+		if (ret) {
+			pr_debug("config done asserted %d\n", i);
+			break;
+		}
+		udelay(1);
+	}
+
+	if (ret <= 0) {
+		pr_err("config done not asserted\n");
+		ret = -ENODEV;
+		goto power_off;
+	}
+
+	ret = gpio_direction_output(ihcd->reset_gpio, 1);
+	if (ret  < 0) {
+		pr_err("fail to assert reset %d\n", ret);
+		goto power_off;
+	}
+	udelay(50);
+
+	ret = ice40_spi_reg_read(ihcd, XFRST_REG);
+	pr_debug("XFRST val is %x\n", ret);
+	if (!(ret & PLLOK)) {
+		pr_err("The PLL2 is not synchronized\n");
+		goto power_off;
+	}
+
+	pr_info("Firmware load success\n");
+
+	return 0;
+
+power_off:
+	ice40_spi_power_off(ihcd);
+out:
+	return ret;
+}
+
+static int ice40_spi_init_regulators(struct ice40_hcd *ihcd)
+{
+	int ret;
+
+	ihcd->spi_vcc = devm_regulator_get(&ihcd->spi->dev, "spi-vcc");
+	if (IS_ERR(ihcd->spi_vcc)) {
+		ret = PTR_ERR(ihcd->spi_vcc);
+		if (ret != -EPROBE_DEFER)
+			pr_err("fail to get spi-vcc %d\n", ret);
+		goto out;
+	}
+
+	ret = regulator_set_voltage(ihcd->spi_vcc, 1800000, 1800000);
+	if (ret < 0) {
+		pr_err("fail to set spi-vcc %d\n", ret);
+		goto out;
+	}
+
+	ihcd->core_vcc = devm_regulator_get(&ihcd->spi->dev, "core-vcc");
+	if (IS_ERR(ihcd->core_vcc)) {
+		ret = PTR_ERR(ihcd->core_vcc);
+		if (ret != -EPROBE_DEFER)
+			pr_err("fail to get core-vcc %d\n", ret);
+		goto out;
+	}
+
+	ret = regulator_set_voltage(ihcd->core_vcc, 1200000, 1200000);
+	if (ret < 0) {
+		pr_err("fail to set core-vcc %d\n", ret);
+		goto out;
+	}
+
+	if (!of_get_property(ihcd->spi->dev.of_node, "gpio-supply", NULL))
+		goto out;
+
+	ihcd->gpio_vcc = devm_regulator_get(&ihcd->spi->dev, "gpio");
+	if (IS_ERR(ihcd->gpio_vcc)) {
+		ret = PTR_ERR(ihcd->gpio_vcc);
+		if (ret != -EPROBE_DEFER)
+			pr_err("fail to get gpio_vcc %d\n", ret);
+		goto out;
+	}
+
+	ret = regulator_set_voltage(ihcd->gpio_vcc, 1800000, 1800000);
+	if (ret < 0) {
+		pr_err("fail to set gpio_vcc %d\n", ret);
+		goto out;
+	}
+
+out:
+	return ret;
+}
+
+static int ice40_spi_request_gpios(struct ice40_hcd *ihcd)
+{
+	int ret;
+
+	ret = devm_gpio_request(&ihcd->spi->dev, ihcd->reset_gpio,
+				"ice40_reset");
+	if (ret < 0) {
+		pr_err("fail to request reset gpio\n");
+		goto out;
+	}
+
+	ret = devm_gpio_request(&ihcd->spi->dev, ihcd->config_done_gpio,
+				"ice40_config_done");
+	if (ret < 0) {
+		pr_err("fail to request config_done gpio\n");
+		goto out;
+	}
+
+	ret = devm_gpio_request(&ihcd->spi->dev, ihcd->vcc_en_gpio,
+				"ice40_vcc_en");
+	if (ret < 0) {
+		pr_err("fail to request vcc_en gpio\n");
+		goto out;
+	}
+
+	if (ihcd->clk_en_gpio) {
+
+		ret = devm_gpio_request(&ihcd->spi->dev, ihcd->clk_en_gpio,
+					"ice40_clk_en");
+		if (ret < 0)
+			pr_err("fail to request clk_en gpio\n");
+	}
+
+out:
+	return ret;
+}
+
+static int
+ice40_spi_init_one_xfr(struct ice40_hcd *ihcd, enum ice40_xfr_type type)
+{
+	struct spi_message **m;
+	struct spi_transfer **t;
+	int n;
+
+	switch (type) {
+	case FIRMWARE_XFR:
+		m = &ihcd->fmsg;
+		t = &ihcd->fmsg_xfr;
+		n = 1;
+		break;
+	case REG_WRITE_XFR:
+		m = &ihcd->wmsg;
+		t = &ihcd->wmsg_xfr;
+		n = 1;
+		break;
+	case REG_READ_XFR:
+		m = &ihcd->rmsg;
+		t = &ihcd->rmsg_xfr;
+		n = 1;
+		break;
+	case SETUP_XFR:
+		m = &ihcd->setup_msg;
+		t = &ihcd->setup_xfr;
+		n = 2;
+		break;
+	case DATA_IN_XFR:
+		m = &ihcd->in_msg;
+		t = &ihcd->in_xfr;
+		n = 2;
+		break;
+	case DATA_OUT_XFR:
+		m = &ihcd->out_msg;
+		t = &ihcd->out_xfr;
+		n = 2;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*m = devm_kzalloc(&ihcd->spi->dev, sizeof(**m), GFP_KERNEL);
+	if (*m == NULL)
+		goto out;
+
+	*t = devm_kzalloc(&ihcd->spi->dev, n * sizeof(**t), GFP_KERNEL);
+	if (*t == NULL)
+		goto out;
+
+	spi_message_init_with_transfers(*m, *t, n);
+
+	return 0;
+out:
+	return -ENOMEM;
+}
+
+static int ice40_spi_init_xfrs(struct ice40_hcd *ihcd)
+{
+	int ret = -ENOMEM;
+
+	ret = ice40_spi_init_one_xfr(ihcd, FIRMWARE_XFR);
+	if (ret < 0)
+		goto out;
+
+	ret = ice40_spi_init_one_xfr(ihcd, REG_WRITE_XFR);
+	if (ret < 0)
+		goto out;
+
+	ihcd->w_tx_buf = devm_kzalloc(&ihcd->spi->dev, 2, GFP_KERNEL);
+	if (!ihcd->w_tx_buf)
+		goto out;
+
+	ihcd->w_rx_buf = devm_kzalloc(&ihcd->spi->dev, 2, GFP_KERNEL);
+	if (!ihcd->w_rx_buf)
+		goto out;
+
+	ihcd->wmsg_xfr[0].tx_buf = ihcd->w_tx_buf;
+	ihcd->wmsg_xfr[0].rx_buf = ihcd->w_rx_buf;
+	ihcd->wmsg_xfr[0].len = 2;
+
+	ret = ice40_spi_init_one_xfr(ihcd, REG_READ_XFR);
+	if (ret < 0)
+		goto out;
+
+	ihcd->r_tx_buf = devm_kzalloc(&ihcd->spi->dev, 3, GFP_KERNEL);
+	if (!ihcd->r_tx_buf)
+		goto out;
+
+	ihcd->r_rx_buf = devm_kzalloc(&ihcd->spi->dev, 3, GFP_KERNEL);
+	if (!ihcd->r_rx_buf)
+		goto out;
+
+	ihcd->rmsg_xfr[0].tx_buf = ihcd->r_tx_buf;
+	ihcd->rmsg_xfr[0].rx_buf = ihcd->r_rx_buf;
+	ihcd->rmsg_xfr[0].len = 3;
+
+	ret = ice40_spi_init_one_xfr(ihcd, SETUP_XFR);
+	if (ret < 0)
+		goto out;
+
+	ihcd->setup_buf = devm_kzalloc(&ihcd->spi->dev, 1, GFP_KERNEL);
+	if (!ihcd->setup_buf)
+		goto out;
+	ihcd->setup_xfr[0].tx_buf = ihcd->setup_buf;
+	ihcd->setup_xfr[0].len = 1;
+
+	ret = ice40_spi_init_one_xfr(ihcd, DATA_IN_XFR);
+	if (ret < 0)
+		goto out;
+	ihcd->in_buf = devm_kzalloc(&ihcd->spi->dev, 2, GFP_KERNEL);
+	if (!ihcd->in_buf)
+		goto out;
+	ihcd->in_xfr[0].tx_buf = ihcd->in_buf;
+	ihcd->in_xfr[0].len = 2;
+
+	ret = ice40_spi_init_one_xfr(ihcd, DATA_OUT_XFR);
+	if (ret < 0)
+		goto out;
+	ihcd->out_buf = devm_kzalloc(&ihcd->spi->dev, 1, GFP_KERNEL);
+	if (!ihcd->out_buf)
+		goto out;
+	ihcd->out_xfr[0].tx_buf = ihcd->out_buf;
+	ihcd->out_xfr[0].len = 1;
+
+	return 0;
+
+out:
+	return -ENOMEM;
+}
+
+static int ice40_dbg_cmd_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, NULL, inode->i_private);
+}
+
+static ssize_t ice40_dbg_cmd_write(struct file *file, const char __user *ubuf,
+				size_t count, loff_t *ppos)
+{
+	struct seq_file *s = file->private_data;
+	struct ice40_hcd *ihcd = s->private;
+	char buf[32];
+	int ret;
+	u8 status, addr;
+
+	memset(buf, 0x00, sizeof(buf));
+
+	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	if (!strcmp(buf, "poll")) {
+		if (!HCD_RH_RUNNING(ihcd->hcd)) {
+			ret = -EAGAIN;
+			goto out;
+		}
+		/*
+		 * The bridge chip supports interrupt for device
+		 * connect and disconnect. We don;t have a real
+		 * use case of connect/disconnect. This debugfs
+		 * interface provides a way to enumerate the
+		 * attached device.
+		 */
+		ice40_spi_reg_write(ihcd, ihcd->ctrl0 |
+				DET_BUS_CTRL, CTRL0_REG);
+		ice40_handshake(ihcd, CTRL0_REG, DET_BUS_CTRL, 0, 5000);
+		status = ice40_spi_reg_read(ihcd, XFRST_REG);
+		if ((status & DPST)) {
+			ihcd->port_flags |= USB_PORT_STAT_CONNECTION;
+			ihcd->port_flags |= USB_PORT_STAT_C_CONNECTION << 16;
+			ihcd->pcd_pending = true;
+			usb_hcd_poll_rh_status(ihcd->hcd);
+		} else if (ihcd->port_flags & USB_PORT_STAT_CONNECTION) {
+			ihcd->port_flags &= ~USB_PORT_STAT_ENABLE;
+			ihcd->port_flags &= ~USB_PORT_STAT_CONNECTION;
+			ihcd->port_flags |= (USB_PORT_STAT_C_CONNECTION << 16);
+			ihcd->pcd_pending = true;
+			usb_hcd_poll_rh_status(ihcd->hcd);
+		}
+	} else if (!strcmp(buf, "rwtest")) {
+		ihcd->devnum = 1;
+		ice40_spi_reg_write(ihcd, 0x1, FADDR_REG);
+		addr = ice40_spi_reg_read(ihcd, FADDR_REG);
+		pr_info("addr written was 0x1 read as %x\n", addr);
+	} else if (!strcmp(buf, "force_disconnect")) {
+		if (!HCD_RH_RUNNING(ihcd->hcd)) {
+			ret = -EAGAIN;
+			goto out;
+		}
+		/*
+		 * Forcfully disconnect the device. This is required
+		 * for simulating the disconnect on a USB port which
+		 * does not have pull-down resistors.
+		 */
+		ihcd->port_flags &= ~USB_PORT_STAT_ENABLE;
+		ihcd->port_flags &= ~USB_PORT_STAT_CONNECTION;
+		ihcd->port_flags |= (USB_PORT_STAT_C_CONNECTION << 16);
+		ihcd->pcd_pending = true;
+		usb_hcd_poll_rh_status(ihcd->hcd);
+	} else {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = count;
+out:
+	return ret;
+}
+
+const struct file_operations ice40_dbg_cmd_ops = {
+	.open = ice40_dbg_cmd_open,
+	.write = ice40_dbg_cmd_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int ice40_debugfs_init(struct ice40_hcd *ihcd)
+{
+	struct dentry *dir;
+	int ret = 0;
+
+	dir = debugfs_create_dir("ice40_hcd", NULL);
+
+	if (!dir || IS_ERR(dir)) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	ihcd->dbg_root = dir;
+
+	dir = debugfs_create_file("command", S_IWUSR, ihcd->dbg_root, ihcd,
+			&ice40_dbg_cmd_ops);
+
+	if (!dir) {
+		debugfs_remove_recursive(ihcd->dbg_root);
+		ihcd->dbg_root = NULL;
+		ret = -ENODEV;
+	}
+
+out:
+	return ret;
+}
+
+static int ice40_spi_probe(struct spi_device *spi)
+{
+	struct ice40_hcd *ihcd;
+	int ret;
+
+	ihcd = devm_kzalloc(&spi->dev, sizeof(*ihcd), GFP_KERNEL);
+	if (!ihcd) {
+		pr_err("fail to allocate ihcd\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+	ihcd->spi = spi;
+
+	ret = ice40_spi_parse_dt(ihcd);
+	if (ret) {
+		pr_err("fail to parse dt node\n");
+		goto out;
+	}
+
+	ret = ice40_spi_init_regulators(ihcd);
+	if (ret) {
+		pr_err("fail to init regulators\n");
+		goto out;
+	}
+
+	ret = ice40_spi_request_gpios(ihcd);
+	if (ret) {
+		pr_err("fail to request gpios\n");
+		goto out;
+	}
+
+	spin_lock_init(&ihcd->lock);
+	INIT_LIST_HEAD(&ihcd->async_list);
+	INIT_WORK(&ihcd->async_work, ice40_async_work);
+	mutex_init(&ihcd->wlock);
+	mutex_init(&ihcd->rlock);
+
+	/*
+	 * Enable all our trace points. Useful in debugging card
+	 * enumeration issues.
+	 */
+	ret = trace_set_clr_event(__stringify(TRACE_SYSTEM), NULL, 1);
+	if (ret < 0)
+		pr_err("fail to enable trace points with %d\n", ret);
+
+	ihcd->wq = create_singlethread_workqueue("ice40_wq");
+	if (!ihcd->wq) {
+		pr_err("fail to create workqueue\n");
+		ret = -ENOMEM;
+		goto destroy_mutex;
+	}
+
+	ret = ice40_spi_init_xfrs(ihcd);
+	if (ret) {
+		pr_err("fail to init spi xfrs %d\n", ret);
+		goto destroy_wq;
+	}
+
+	ret = ice40_spi_cache_fw(ihcd);
+	if (ret) {
+		pr_err("fail to cache fw %d\n", ret);
+		goto destroy_wq;
+	}
+
+	ret = ice40_spi_load_fw(ihcd);
+	if (ret) {
+		pr_err("fail to load fw %d\n", ret);
+		goto destroy_wq;
+	}
+
+	ihcd->hcd = usb_create_hcd(&ice40_hc_driver, &spi->dev, "ice40");
+	if (!ihcd->hcd) {
+		pr_err("fail to alloc hcd\n");
+		ret = -ENOMEM;
+		goto power_off;
+	}
+	*((struct ice40_hcd **) ihcd->hcd->hcd_priv) = ihcd;
+
+	ret = usb_add_hcd(ihcd->hcd, 0, 0);
+
+	if (ret < 0) {
+		pr_err("fail to add HCD\n");
+		goto put_hcd;
+	}
+
+	ice40_debugfs_init(ihcd);
+
+	/*
+	 * We manage the power states of the bridge chip
+	 * as part of root hub suspend/resume. We don't
+	 * need to implement any additional runtime PM
+	 * methods.
+	 */
+	pm_runtime_no_callbacks(&spi->dev);
+	pm_runtime_set_active(&spi->dev);
+	pm_runtime_enable(&spi->dev);
+
+	/*
+	 * This does not mean bridge chip can wakeup the
+	 * system from sleep. It's activity can prevent
+	 * or abort the system sleep. The device_init_wakeup
+	 * creates the wakeup source for us which we will
+	 * use to control system sleep.
+	 */
+	device_init_wakeup(&spi->dev, 1);
+	pm_stay_awake(&spi->dev);
+
+	pr_debug("success\n");
+
+	return 0;
+
+put_hcd:
+	usb_put_hcd(ihcd->hcd);
+power_off:
+	ice40_spi_power_off(ihcd);
+destroy_wq:
+	destroy_workqueue(ihcd->wq);
+destroy_mutex:
+	mutex_destroy(&ihcd->rlock);
+	mutex_destroy(&ihcd->wlock);
+out:
+	pr_info("ice40_spi_probe failed\n");
+	return ret;
+}
+
+static int ice40_spi_remove(struct spi_device *spi)
+{
+	struct usb_hcd *hcd = spi_get_drvdata(spi);
+	struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+
+	debugfs_remove_recursive(ihcd->dbg_root);
+
+	usb_remove_hcd(hcd);
+	usb_put_hcd(hcd);
+	destroy_workqueue(ihcd->wq);
+	ice40_spi_power_off(ihcd);
+
+	pm_runtime_disable(&spi->dev);
+	pm_relax(&spi->dev);
+
+	return 0;
+}
+
+static struct of_device_id ice40_spi_of_match_table[] = {
+	{ .compatible = "lattice,ice40-spi-usb", },
+	{},
+};
+
+static struct spi_driver ice40_spi_driver = {
+	.driver = {
+		.name =		"ice40_spi",
+		.owner =	THIS_MODULE,
+		.of_match_table = ice40_spi_of_match_table,
+	},
+	.probe =	ice40_spi_probe,
+	.remove =	ice40_spi_remove,
+};
+
+module_spi_driver(ice40_spi_driver);
+
+MODULE_DESCRIPTION("ICE40 FPGA based SPI-USB bridge HCD");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ad09139..0a82e58 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1013,9 +1013,6 @@
 	}
 
 	xhci = hcd_to_xhci(hcd);
-	if (xhci->xhc_state & XHCI_STATE_HALTED)
-		return -ENODEV;
-
 	if (check_virt_dev) {
 		if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
 			printk(KERN_DEBUG "xHCI %s called with unaddressed "
@@ -1031,6 +1028,9 @@
 		}
 	}
 
+	if (xhci->xhc_state & XHCI_STATE_HALTED)
+		return -ENODEV;
+
 	return 1;
 }
 
diff --git a/drivers/video/msm/mdss/dsi_host_v2.c b/drivers/video/msm/mdss/dsi_host_v2.c
index ca7f199..e62cc59 100644
--- a/drivers/video/msm/mdss/dsi_host_v2.c
+++ b/drivers/video/msm/mdss/dsi_host_v2.c
@@ -203,6 +203,13 @@
 	struct mdss_dsi_ctrl_pdata *ctrl =
 		(struct mdss_dsi_ctrl_pdata *)ptr;
 
+	spin_lock(&ctrl->mdp_lock);
+
+	if (ctrl->dsi_irq_mask == 0) {
+		spin_unlock(&ctrl->mdp_lock);
+		return IRQ_HANDLED;
+	}
+
 	isr = MIPI_INP(dsi_host_private->dsi_base + DSI_INT_CTRL);
 	MIPI_OUTP(dsi_host_private->dsi_base + DSI_INT_CTRL, isr);
 
@@ -213,22 +220,20 @@
 		msm_dsi_error(dsi_host_private->dsi_base);
 	}
 
-	spin_lock(&ctrl->mdp_lock);
-
 	if (isr & DSI_INTR_VIDEO_DONE)
 		complete(&ctrl->video_comp);
 
 	if (isr & DSI_INTR_CMD_DMA_DONE)
 		complete(&ctrl->dma_comp);
 
-	spin_unlock(&ctrl->mdp_lock);
-
 	if (isr & DSI_INTR_BTA_DONE)
 		complete(&ctrl->bta_comp);
 
 	if (isr & DSI_INTR_CMD_MDP_DONE)
 		complete(&ctrl->mdp_comp);
 
+	spin_unlock(&ctrl->mdp_lock);
+
 	return IRQ_HANDLED;
 }
 
@@ -236,6 +241,13 @@
 			struct mdss_dsi_ctrl_pdata *ctrl)
 {
 	int ret;
+	u32 isr;
+
+	msm_dsi_ahb_ctrl(1);
+	isr = MIPI_INP(dsi_host_private->dsi_base + DSI_INT_CTRL);
+	isr &= ~DSI_INTR_ALL_MASK;
+	MIPI_OUTP(dsi_host_private->dsi_base + DSI_INT_CTRL, isr);
+	msm_dsi_ahb_ctrl(0);
 
 	ret = devm_request_irq(dev, irq_no, msm_dsi_isr_handler,
 				IRQF_DISABLED, "DSI", ctrl);
@@ -1216,6 +1228,13 @@
 		mutex_unlock(&ctrl_pdata->mutex);
 		return ret;
 	}
+	pinfo->panel_power_on = 1;
+	ret = mdss_dsi_panel_reset(pdata, 1);
+	if (ret) {
+		pr_err("%s: Panel reset failed\n", __func__);
+		mutex_unlock(&ctrl_pdata->mutex);
+		return ret;
+	}
 
 	msm_dsi_ahb_ctrl(1);
 	msm_dsi_prepare_clocks();
@@ -1503,14 +1522,6 @@
 							__func__, __LINE__);
 		rc = -ENODEV;
 		goto error_irq_resource;
-	} else {
-		rc = msm_dsi_irq_init(&pdev->dev, mdss_dsi_mres->start,
-					ctrl_pdata);
-		if (rc) {
-			dev_err(&pdev->dev, "%s: failed to init irq, rc=%d\n",
-								__func__, rc);
-			goto error_irq_resource;
-		}
 	}
 
 	rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
@@ -1573,6 +1584,14 @@
 
 	msm_dsi_ctrl_init(ctrl_pdata);
 
+	rc = msm_dsi_irq_init(&pdev->dev, mdss_dsi_mres->start,
+					   ctrl_pdata);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: failed to init irq, rc=%d\n",
+			__func__, rc);
+		goto error_device_register;
+	}
+
 	rc = dsi_panel_device_register_v2(pdev, ctrl_pdata);
 	if (rc) {
 		pr_err("%s: dsi panel dev reg failed\n", __func__);
diff --git a/drivers/video/msm/mdss/dsi_host_v2.h b/drivers/video/msm/mdss/dsi_host_v2.h
index b297452..0f3ea8d 100644
--- a/drivers/video/msm/mdss/dsi_host_v2.h
+++ b/drivers/video/msm/mdss/dsi_host_v2.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -25,6 +25,7 @@
 #define DSI_INTR_CMD_MDP_DONE			BIT(8)
 #define DSI_INTR_CMD_DMA_DONE_MASK		BIT(1)
 #define DSI_INTR_CMD_DMA_DONE			BIT(0)
+#define DSI_INTR_ALL_MASK			0x2220202
 
 #define DSI_BTA_TERM				BIT(1)
 
diff --git a/drivers/video/msm/mdss/dsi_status_v2.c b/drivers/video/msm/mdss/dsi_status_v2.c
index d62ddf3..565401d 100644
--- a/drivers/video/msm/mdss/dsi_status_v2.c
+++ b/drivers/video/msm/mdss/dsi_status_v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -82,7 +82,19 @@
 	mdp3_session = pdsi_status->mfd->mdp.private1;
 	mutex_lock(&mdp3_session->lock);
 
-	ret = ctrl_pdata->check_status(ctrl_pdata);
+	if (!mdp3_session->status) {
+		pr_info("display off already\n");
+		mutex_unlock(&mdp3_session->lock);
+		return;
+	}
+
+	if (mdp3_session->wait_for_dma_done)
+		ret = mdp3_session->wait_for_dma_done(mdp3_session);
+
+	if (!ret)
+		ret = ctrl_pdata->check_status(ctrl_pdata);
+	else
+		pr_err("wait_for_dma_done error\n");
 
 	mutex_unlock(&mdp3_session->lock);
 
diff --git a/drivers/video/msm/mdss/dsi_v2.c b/drivers/video/msm/mdss/dsi_v2.c
index ccde545..bc76fd0 100644
--- a/drivers/video/msm/mdss/dsi_v2.c
+++ b/drivers/video/msm/mdss/dsi_v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -66,6 +66,7 @@
 	if (enable) {
 		dsi_ctrl_gpio_request(ctrl_pdata);
 		mdss_dsi_panel_reset(pdata, 1);
+		pdata->panel_info.panel_power_on = 1;
 		rc = ctrl_pdata->on(pdata);
 		if (rc)
 			pr_err("dsi_panel_handler panel on failed %d\n", rc);
@@ -73,6 +74,7 @@
 		if (dsi_intf.op_mode_config)
 			dsi_intf.op_mode_config(DSI_CMD_MODE, pdata);
 		rc = ctrl_pdata->off(pdata);
+		pdata->panel_info.panel_power_on = 0;
 		mdss_dsi_panel_reset(pdata, 0);
 		dsi_ctrl_gpio_free(ctrl_pdata);
 	}
@@ -202,75 +204,23 @@
 {
 	int rc = 0;
 
-	if (gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
-		rc = gpio_request(ctrl_pdata->disp_en_gpio, "disp_enable");
-		if (rc)
-			goto gpio_request_err4;
-
-		ctrl_pdata->disp_en_gpio_requested = 1;
-	}
-
-	if (gpio_is_valid(ctrl_pdata->rst_gpio)) {
-		rc = gpio_request(ctrl_pdata->rst_gpio, "disp_rst_n");
-		if (rc)
-			goto gpio_request_err3;
-
-		ctrl_pdata->rst_gpio_requested = 1;
-	}
-
 	if (gpio_is_valid(ctrl_pdata->disp_te_gpio)) {
 		rc = gpio_request(ctrl_pdata->disp_te_gpio, "disp_te");
 		if (rc)
-			goto gpio_request_err2;
-
-		ctrl_pdata->disp_te_gpio_requested = 1;
+			ctrl_pdata->disp_te_gpio_requested = 0;
+		else
+			ctrl_pdata->disp_te_gpio_requested = 1;
 	}
 
-	if (gpio_is_valid(ctrl_pdata->mode_gpio)) {
-		rc = gpio_request(ctrl_pdata->mode_gpio, "panel_mode");
-		if (rc)
-			goto gpio_request_err1;
-
-		ctrl_pdata->mode_gpio_requested = 1;
-	}
-
-	return rc;
-
-gpio_request_err1:
-	if (gpio_is_valid(ctrl_pdata->disp_te_gpio))
-		gpio_free(ctrl_pdata->disp_te_gpio);
-gpio_request_err2:
-	if (gpio_is_valid(ctrl_pdata->rst_gpio))
-		gpio_free(ctrl_pdata->rst_gpio);
-gpio_request_err3:
-	if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
-		gpio_free(ctrl_pdata->disp_en_gpio);
-gpio_request_err4:
-	ctrl_pdata->disp_en_gpio_requested = 0;
-	ctrl_pdata->rst_gpio_requested = 0;
-	ctrl_pdata->disp_te_gpio_requested = 0;
-	ctrl_pdata->mode_gpio_requested = 0;
 	return rc;
 }
 
 void dsi_ctrl_gpio_free(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
 {
-	if (ctrl_pdata->disp_en_gpio_requested) {
-		gpio_free(ctrl_pdata->disp_en_gpio);
-		ctrl_pdata->disp_en_gpio_requested = 0;
-	}
-	if (ctrl_pdata->rst_gpio_requested) {
-		gpio_free(ctrl_pdata->rst_gpio);
-		ctrl_pdata->rst_gpio_requested = 0;
-	}
 	if (ctrl_pdata->disp_te_gpio_requested) {
 		gpio_free(ctrl_pdata->disp_te_gpio);
 		ctrl_pdata->disp_te_gpio_requested = 0;
 	}
-	if (ctrl_pdata->mode_gpio_requested) {
-		gpio_free(ctrl_pdata->mode_gpio);
-		ctrl_pdata->mode_gpio_requested = 0;
-	}
 }
 
 static int dsi_parse_vreg(struct device *dev, struct dss_module_power *mp)
diff --git a/drivers/video/msm/mdss/mdp3_ctrl.c b/drivers/video/msm/mdss/mdp3_ctrl.c
index b324130..095a387 100644
--- a/drivers/video/msm/mdss/mdp3_ctrl.c
+++ b/drivers/video/msm/mdss/mdp3_ctrl.c
@@ -157,6 +157,7 @@
 {
 	struct mdp3_session_data *session = (struct mdp3_session_data *)arg;
 	schedule_work(&session->dma_done_work);
+	complete(&session->dma_completion);
 }
 
 void vsync_count_down(void *arg)
@@ -798,7 +799,7 @@
 
 	rc = mdp3_dma->stop(mdp3_dma, mdp3_session->intf);
 	if (rc) {
-		pr_err("fail to stop the MDP3 dma\n");
+		pr_err("fail to stop the MDP3 dma %d\n", rc);
 		goto reset_error;
 	}
 
@@ -897,9 +898,7 @@
 			dma->source_config.stride = stride;
 			dma->output_config.pack_pattern =
 				mdp3_ctrl_get_pack_pattern(req->src.format);
-			mdp3_clk_enable(1, 0);
-			mdp3_session->dma->dma_config_source(dma);
-			mdp3_clk_enable(0, 0);
+			dma->update_src_cfg = true;
 		}
 		mdp3_session->overlay.id = 1;
 		req->id = 1;
@@ -923,14 +922,6 @@
 	mutex_lock(&mdp3_session->lock);
 
 	if (mdp3_session->overlay.id == ndx && ndx == 1) {
-		struct mdp3_dma *dma = mdp3_session->dma;
-		dma->source_config.format = format;
-		dma->source_config.stride = fix->line_length;
-		dma->output_config.pack_pattern =
-			mdp3_ctrl_get_pack_pattern(mfd->fb_imgType);
-		mdp3_clk_enable(1, 0);
-		mdp3_session->dma->dma_config_source(dma);
-		mdp3_clk_enable(0, 0);
 		mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
 		mdp3_bufq_deinit(&mdp3_session->bufq_in);
 	} else {
@@ -1011,7 +1002,11 @@
 	panel = mdp3_session->panel;
 	if (!mdp3_iommu_is_attached(MDP3_CLIENT_DMA_P)) {
 		pr_debug("continuous splash screen, IOMMU not attached\n");
-		mdp3_ctrl_reset(mfd);
+		rc = mdp3_ctrl_reset(mfd);
+		if (rc) {
+			pr_err("fail to reset display\n");
+			return -EINVAL;
+		}
 		reset_done = true;
 	}
 
@@ -1042,7 +1037,8 @@
 					MDP_NOTIFY_FRAME_DONE);
 			}
 		}
-
+		mdp3_session->dma_active = 1;
+		init_completion(&mdp3_session->dma_completion);
 		mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_FLUSHED);
 		mdp3_bufq_push(&mdp3_session->bufq_out, data);
 	}
@@ -1092,7 +1088,11 @@
 
 	if (!mdp3_iommu_is_attached(MDP3_CLIENT_DMA_P)) {
 		pr_debug("continuous splash screen, IOMMU not attached\n");
-		mdp3_ctrl_reset(mfd);
+		rc = mdp3_ctrl_reset(mfd);
+		if (rc) {
+			pr_err("fail to reset display\n");
+			return;
+		}
 	}
 
 	mutex_lock(&mdp3_session->lock);
@@ -1132,6 +1132,8 @@
 					MDP_NOTIFY_FRAME_DONE);
 			}
 		}
+		mdp3_session->dma_active = 1;
+		init_completion(&mdp3_session->dma_completion);
 		mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_FLUSHED);
 	} else {
 		pr_debug("mdp3_ctrl_pan_display no memory, stop interface");
@@ -1753,6 +1755,23 @@
 	return rc;
 }
 
+int mdp3_wait_for_dma_done(struct mdp3_session_data *session)
+{
+	int rc = 0;
+
+	if (session->dma_active) {
+		rc = wait_for_completion_timeout(&session->dma_completion,
+			KOFF_TIMEOUT);
+		if (rc > 0) {
+			session->dma_active = 0;
+			rc = 0;
+		} else if (rc == 0) {
+			rc = -ETIME;
+		}
+	}
+	return rc;
+}
+
 int mdp3_ctrl_init(struct msm_fb_data_type *mfd)
 {
 	struct device *dev = mfd->fbi->dev;
@@ -1827,6 +1846,9 @@
 	mdp3_session->vsync_timer.data = (u32)mdp3_session;
 	mdp3_session->vsync_period = 1000 / mfd->panel_info->mipi.frame_rate;
 	mfd->mdp.private1 = mdp3_session;
+	init_completion(&mdp3_session->dma_completion);
+	if (intf_type != MDP3_DMA_OUTPUT_SEL_DSI_VIDEO)
+		mdp3_session->wait_for_dma_done = mdp3_wait_for_dma_done;
 
 	rc = sysfs_create_group(&dev->kobj, &vsync_fs_attr_group);
 	if (rc) {
diff --git a/drivers/video/msm/mdss/mdp3_ctrl.h b/drivers/video/msm/mdss/mdp3_ctrl.h
index cfad1d3..416b7c2 100644
--- a/drivers/video/msm/mdss/mdp3_ctrl.h
+++ b/drivers/video/msm/mdss/mdp3_ctrl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -61,6 +61,10 @@
 
 	int vsync_enabled;
 	atomic_t vsync_countdown; /* Used to count down  */
+
+	bool dma_active;
+	struct completion dma_completion;
+	int (*wait_for_dma_done)(struct mdp3_session_data *session);
 };
 
 int mdp3_ctrl_init(struct msm_fb_data_type *mfd);
diff --git a/drivers/video/msm/mdss/mdp3_dma.c b/drivers/video/msm/mdss/mdp3_dma.c
index 800c4b3..8a13de2 100644
--- a/drivers/video/msm/mdss/mdp3_dma.c
+++ b/drivers/video/msm/mdss/mdp3_dma.c
@@ -18,7 +18,7 @@
 #include "mdp3_hwio.h"
 
 #define DMA_STOP_POLL_SLEEP_US 1000
-#define DMA_STOP_POLL_TIMEOUT_US 32000
+#define DMA_STOP_POLL_TIMEOUT_US 200000
 #define DMA_HISTO_RESET_TIMEOUT_MS 40
 #define DMA_LUT_CONFIG_MASK 0xfffffbe8
 #define DMA_CCS_CONFIG_MASK 0xfffffc17
@@ -605,6 +605,13 @@
 			}
 		}
 	}
+	if (dma->update_src_cfg) {
+		if (dma->output_config.out_sel ==
+				 MDP3_DMA_OUTPUT_SEL_DSI_VIDEO && intf->active)
+			pr_err("configuring dma source while dma is active\n");
+		dma->dma_config_source(dma);
+		dma->update_src_cfg = false;
+	}
 	spin_lock_irqsave(&dma->dma_lock, flag);
 	MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_ADDR, (u32)buf);
 	dma->source_config.buf = buf;
@@ -961,6 +968,7 @@
 	dma->vsync_client.handler = NULL;
 	dma->vsync_client.arg = NULL;
 	dma->histo_state = MDP3_DMA_HISTO_STATE_IDLE;
+	dma->update_src_cfg = false;
 
 	memset(&dma->cursor, 0, sizeof(dma->cursor));
 	memset(&dma->ccs_config, 0, sizeof(dma->ccs_config));
diff --git a/drivers/video/msm/mdss/mdp3_dma.h b/drivers/video/msm/mdss/mdp3_dma.h
index 207168f..80ebb9b 100644
--- a/drivers/video/msm/mdss/mdp3_dma.h
+++ b/drivers/video/msm/mdss/mdp3_dma.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -259,6 +259,7 @@
 	int histo_state;
 	struct mdp3_dma_histogram_data histo_data;
 	unsigned int vsync_status;
+	bool update_src_cfg;
 
 	int (*dma_config)(struct mdp3_dma *dma,
 			struct mdp3_dma_source *source_config,
diff --git a/drivers/video/msm/mdss/mdss.h b/drivers/video/msm/mdss/mdss.h
index ada1281..7e6faa8 100644
--- a/drivers/video/msm/mdss/mdss.h
+++ b/drivers/video/msm/mdss/mdss.h
@@ -192,6 +192,7 @@
 
 	int handoff_pending;
 	struct mdss_prefill_data prefill_data;
+	bool ulps;
 };
 extern struct mdss_data_type *mdss_res;
 
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
index ece867c..6de0df6 100644
--- a/drivers/video/msm/mdss/mdss_dsi.c
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -72,13 +72,25 @@
 			goto error;
 		}
 
-		if (pdata->panel_info.panel_power_on == 0)
-			mdss_dsi_panel_reset(pdata, 1);
-
+		if (!pdata->panel_info.mipi.lp11_init) {
+			ret = mdss_dsi_panel_reset(pdata, 1);
+			if (ret) {
+				pr_err("%s: Panel reset failed. rc=%d\n",
+						__func__, ret);
+				if (msm_dss_enable_vreg(
+				ctrl_pdata->power_data.vreg_config,
+				ctrl_pdata->power_data.num_vreg, 0))
+					pr_err("Disable vregs failed\n");
+				goto error;
+			}
+		}
 	} else {
-
-		mdss_dsi_panel_reset(pdata, 0);
-
+		ret = mdss_dsi_panel_reset(pdata, 0);
+		if (ret) {
+			pr_err("%s: Panel reset failed. rc=%d\n",
+					__func__, ret);
+			goto error;
+		}
 		ret = msm_dss_enable_vreg(
 			ctrl_pdata->power_data.vreg_config,
 			ctrl_pdata->power_data.num_vreg, 0);
@@ -296,7 +308,7 @@
 
 	if (!pdata->panel_info.panel_power_on) {
 		pr_warn("%s:%d Panel already off.\n", __func__, __LINE__);
-		return -EPERM;
+		return 0;
 	}
 
 	pdata->panel_info.panel_power_on = 0;
@@ -309,7 +321,7 @@
 				ctrl_pdata, ctrl_pdata->ndx);
 
 	if (pdata->panel_info.type == MIPI_CMD_PANEL)
-		mdss_dsi_clk_ctrl(ctrl_pdata, 1);
+		mdss_dsi_clk_ctrl(ctrl_pdata, DSI_ALL_CLKS, 1);
 
 	/* disable DSI controller */
 	mdss_dsi_controller_cfg(0, pdata);
@@ -317,7 +329,7 @@
 	/* disable DSI phy */
 	mdss_dsi_phy_disable(ctrl_pdata);
 
-	mdss_dsi_clk_ctrl(ctrl_pdata, 0);
+	mdss_dsi_clk_ctrl(ctrl_pdata, DSI_ALL_CLKS, 0);
 
 	ret = mdss_dsi_panel_power_on(pdata, 0);
 	if (ret) {
@@ -335,63 +347,22 @@
 	return ret;
 }
 
-int mdss_dsi_on(struct mdss_panel_data *pdata)
+static void __mdss_dsi_ctrl_setup(struct mdss_panel_data *pdata)
 {
-	int ret = 0;
-	u32 clk_rate;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
 	struct mdss_panel_info *pinfo;
 	struct mipi_panel_info *mipi;
+	u32 clk_rate;
 	u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
 	u32 ystride, bpp, data, dst_bpp;
 	u32 dummy_xres, dummy_yres;
-	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
 	u32 hsync_period, vsync_period;
 
-	if (pdata == NULL) {
-		pr_err("%s: Invalid input data\n", __func__);
-		return -EINVAL;
-	}
-
-	if (pdata->panel_info.panel_power_on) {
-		pr_warn("%s:%d Panel already on.\n", __func__, __LINE__);
-		return 0;
-	}
-
 	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
 				panel_data);
 
-	pr_debug("%s+: ctrl=%p ndx=%d\n",
-				__func__, ctrl_pdata, ctrl_pdata->ndx);
-
 	pinfo = &pdata->panel_info;
 
-	ret = msm_dss_enable_vreg(ctrl_pdata->power_data.vreg_config,
-				ctrl_pdata->power_data.num_vreg, 1);
-	if (ret) {
-		pr_err("%s:Failed to enable vregs. rc=%d\n", __func__, ret);
-		return ret;
-	}
-
-	pdata->panel_info.panel_power_on = 1;
-
-	if (!pdata->panel_info.mipi.lp11_init)
-		mdss_dsi_panel_reset(pdata, 1);
-
-	ret = mdss_dsi_bus_clk_start(ctrl_pdata);
-	if (ret) {
-		pr_err("%s: failed to enable bus clocks. rc=%d\n", __func__,
-			ret);
-		mdss_dsi_panel_power_on(pdata, 0);
-		pdata->panel_info.panel_power_on = 0;
-		return ret;
-	}
-
-	mdss_dsi_phy_sw_reset((ctrl_pdata->ctrl_base));
-	mdss_dsi_phy_init(pdata);
-	mdss_dsi_bus_clk_stop(ctrl_pdata);
-
-	mdss_dsi_clk_ctrl(ctrl_pdata, 1);
-
 	clk_rate = pdata->panel_info.clk_rate;
 	clk_rate = min(clk_rate, pdata->panel_info.clk_max);
 
@@ -421,7 +392,7 @@
 	vsync_period = vspw + vbp + height + dummy_yres + vfp;
 	hsync_period = hspw + hbp + width + dummy_xres + hfp;
 
-	mipi  = &pdata->panel_info.mipi;
+	mipi = &pdata->panel_info.mipi;
 	if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
 		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x24,
 			((hspw + hbp + width + dummy_xres) << 16 |
@@ -459,19 +430,247 @@
 		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x64, data);
 		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x5C, data);
 	}
+}
 
+static inline bool __mdss_dsi_ulps_feature_enabled(
+	struct mdss_panel_data *pdata)
+{
+	return pdata->panel_info.ulps_feature_enabled;
+}
+
+static int mdss_dsi_ulps_config_sub(struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+	int enable)
+{
+	int ret = 0;
+	struct mdss_panel_data *pdata = NULL;
+	u32 lane_status = 0;
+
+	if (!ctrl_pdata) {
+		pr_err("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	pdata = &ctrl_pdata->panel_data;
+
+	if (!__mdss_dsi_ulps_feature_enabled(pdata)) {
+		pr_debug("%s: ULPS feature not supported. enable=%d\n",
+			__func__, enable);
+		return -ENOTSUPP;
+	}
+
+	if (enable && !ctrl_pdata->ulps) {
+		/* No need to configure ULPS mode when entering suspend state */
+		if (!pdata->panel_info.panel_power_on) {
+			pr_err("%s: panel off. returning\n", __func__);
+			goto error;
+		}
+
+		if (__mdss_dsi_clk_enabled(ctrl_pdata, DSI_LINK_CLKS)) {
+			pr_err("%s: cannot enter ulps mode if dsi clocks are on\n",
+				__func__);
+			ret = -EPERM;
+			goto error;
+		}
+
+		ret = mdss_dsi_clk_ctrl(ctrl_pdata, DSI_ALL_CLKS, 1);
+		if (ret) {
+			pr_err("%s: Failed to enable clocks. rc=%d\n",
+				__func__, ret);
+			goto error;
+		}
+
+		/*
+		 * ULPS Entry Request.
+		 * Wait for a short duration to ensure that the lanes
+		 * enter ULP state.
+		 */
+		MIPI_OUTP(ctrl_pdata->ctrl_base + 0x0AC, 0x01F);
+		usleep(100);
+		lane_status = MIPI_INP(ctrl_pdata->ctrl_base + 0xA8);
+		if (lane_status & 0x1F00) {
+			pr_err("%s: ULPS entry req failed. Lane status=0x%08x\n",
+				__func__, lane_status);
+			ret = -EINVAL;
+			mdss_dsi_clk_ctrl(ctrl_pdata, DSI_ALL_CLKS, 0);
+			goto error;
+		}
+
+		/* Enable MMSS DSI Clamps */
+		MIPI_OUTP(ctrl_pdata->mmss_misc_io.base + 0x14, 0x3FF);
+		MIPI_OUTP(ctrl_pdata->mmss_misc_io.base + 0x14, 0x83FF);
+
+		wmb();
+
+		MIPI_OUTP(ctrl_pdata->mmss_misc_io.base + 0x108, 0x1);
+		/* disable DSI controller */
+		mdss_dsi_controller_cfg(0, pdata);
+
+		mdss_dsi_clk_ctrl(ctrl_pdata, DSI_ALL_CLKS, 0);
+		ctrl_pdata->ulps = true;
+	} else if (ctrl_pdata->ulps) {
+		ret = mdss_dsi_clk_ctrl(ctrl_pdata, DSI_BUS_CLKS, 1);
+		if (ret) {
+			pr_err("%s: Failed to enable bus clocks. rc=%d\n",
+				__func__, ret);
+			goto error;
+		}
+
+		MIPI_OUTP(ctrl_pdata->mmss_misc_io.base + 0x108, 0x0);
+		mdss_dsi_phy_init(pdata);
+
+		__mdss_dsi_ctrl_setup(pdata);
+		mdss_dsi_sw_reset(pdata);
+		mdss_dsi_host_init(pdata);
+		mdss_dsi_op_mode_config(pdata->panel_info.mipi.mode,
+			pdata);
+
+		/*
+		 * ULPS Entry Request. This is needed because, after power
+		 * collapse and reset, the DSI controller resets back to
+		 * idle state and not ULPS.
+		 * Wait for a short duration to ensure that the lanes
+		 * enter ULP state.
+		 */
+		MIPI_OUTP(ctrl_pdata->ctrl_base + 0x0AC, 0x01F);
+		usleep(100);
+
+		/* Disable MMSS DSI Clamps */
+		MIPI_OUTP(ctrl_pdata->mmss_misc_io.base + 0x14, 0x3FF);
+		MIPI_OUTP(ctrl_pdata->mmss_misc_io.base + 0x14, 0x0);
+
+		ret = mdss_dsi_clk_ctrl(ctrl_pdata, DSI_LINK_CLKS, 1);
+		if (ret) {
+			pr_err("%s: Failed to enable link clocks. rc=%d\n",
+				__func__, ret);
+			mdss_dsi_clk_ctrl(ctrl_pdata, DSI_BUS_CLKS, 0);
+			goto error;
+		}
+
+		/*
+		 * ULPS Exit Request
+		 * Hardware requirement is to wait for at least 1ms
+		 */
+		MIPI_OUTP(ctrl_pdata->ctrl_base + 0x0AC, 0x1F00);
+		usleep(1000);
+		MIPI_OUTP(ctrl_pdata->ctrl_base + 0x0AC, 0x0);
+
+		/*
+		 * Wait for a short duration before enabling
+		 * data transmission
+		 */
+		usleep(100);
+
+		lane_status = MIPI_INP(ctrl_pdata->ctrl_base + 0xA8);
+		mdss_dsi_clk_ctrl(ctrl_pdata, DSI_LINK_CLKS, 0);
+		mdss_dsi_clk_ctrl(ctrl_pdata, DSI_BUS_CLKS, 0);
+		ctrl_pdata->ulps = false;
+	}
+
+	pr_debug("%s: DSI lane status = 0x%08x. Ulps %s\n", __func__,
+		lane_status, enable ? "enabled" : "disabled");
+
+error:
+	return ret;
+}
+
+static int mdss_dsi_ulps_config(struct mdss_dsi_ctrl_pdata *ctrl,
+	int enable)
+{
+	int rc;
+	struct mdss_dsi_ctrl_pdata *mctrl = NULL;
+
+	if (&ctrl->mmss_misc_io == NULL) {
+		pr_err("%s: mmss_misc_io is NULL. ULPS not valid\n", __func__);
+		return -EINVAL;
+	}
+
+	if (mdss_dsi_is_slave_ctrl(ctrl)) {
+		mctrl = mdss_dsi_get_master_ctrl();
+		if (!mctrl) {
+			pr_err("%s: Unable to get master control\n", __func__);
+			return -EINVAL;
+		}
+	}
+
+	if (mctrl) {
+		pr_debug("%s: configuring ulps (%s) for master ctrl%d\n",
+			__func__, (enable ? "on" : "off"), ctrl->ndx);
+		rc = mdss_dsi_ulps_config_sub(mctrl, enable);
+		if (rc)
+			return rc;
+	}
+
+	pr_debug("%s: configuring ulps (%s) for ctrl%d\n",
+		__func__, (enable ? "on" : "off"), ctrl->ndx);
+	return mdss_dsi_ulps_config_sub(ctrl, enable);
+}
+
+int mdss_dsi_on(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	struct mdss_panel_info *pinfo;
+	struct mipi_panel_info *mipi;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	if (pdata->panel_info.panel_power_on) {
+		pr_warn("%s:%d Panel already on.\n", __func__, __LINE__);
+		return 0;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	pr_debug("%s+: ctrl=%p ndx=%d\n",
+				__func__, ctrl_pdata, ctrl_pdata->ndx);
+
+	pinfo = &pdata->panel_info;
+	mipi = &pdata->panel_info.mipi;
+
+	ret = mdss_dsi_panel_power_on(pdata, 1);
+	if (ret) {
+		pr_err("%s:Panel power on failed. rc=%d\n", __func__, ret);
+		return ret;
+	}
+
+	mdss_dsi_clk_ctrl(ctrl_pdata, DSI_BUS_CLKS, 1);
+	if (ret) {
+		pr_err("%s: failed to enable bus clocks. rc=%d\n", __func__,
+			ret);
+		ret = mdss_dsi_panel_power_on(pdata, 0);
+		if (ret) {
+			pr_err("%s: Panel reset failed. rc=%d\n",
+					__func__, ret);
+			return ret;
+		}
+		pdata->panel_info.panel_power_on = 0;
+		return ret;
+	}
+	pdata->panel_info.panel_power_on = 1;
+
+	mdss_dsi_phy_sw_reset((ctrl_pdata->ctrl_base));
+	mdss_dsi_phy_init(pdata);
+	mdss_dsi_clk_ctrl(ctrl_pdata, DSI_BUS_CLKS, 0);
+
+	mdss_dsi_clk_ctrl(ctrl_pdata, DSI_ALL_CLKS, 1);
+
+	__mdss_dsi_ctrl_setup(pdata);
 	mdss_dsi_sw_reset(pdata);
-	mdss_dsi_host_init(mipi, pdata);
+	mdss_dsi_host_init(pdata);
 
 	/*
 	 * Issue hardware reset line after enabling the DSI clocks and data
 	 * data lanes for LP11 init
 	 */
-	if (pdata->panel_info.mipi.lp11_init)
+	if (mipi->lp11_init)
 		mdss_dsi_panel_reset(pdata, 1);
 
-	if (pdata->panel_info.mipi.init_delay)
-		usleep(pdata->panel_info.mipi.init_delay);
+	if (mipi->init_delay)
+		usleep(mipi->init_delay);
 
 	if (mipi->force_clk_lane_hs) {
 		u32 tmp;
@@ -483,7 +682,7 @@
 	}
 
 	if (pdata->panel_info.type == MIPI_CMD_PANEL)
-		mdss_dsi_clk_ctrl(ctrl_pdata, 0);
+		mdss_dsi_clk_ctrl(ctrl_pdata, DSI_ALL_CLKS, 0);
 
 	pr_debug("%s-:\n", __func__);
 	return 0;
@@ -545,6 +744,17 @@
 				panel_data);
 	mipi = &pdata->panel_info.mipi;
 
+	if (__mdss_dsi_ulps_feature_enabled(pdata) &&
+		(ctrl_pdata->ulps)) {
+		/* Disable ULPS mode before blanking the panel */
+		ret = mdss_dsi_ulps_config(ctrl_pdata, 0);
+		if (ret) {
+			pr_err("%s: failed to exit ULPS mode. rc=%d\n",
+				__func__, ret);
+			return ret;
+		}
+	}
+
 	mdss_dsi_op_mode_config(DSI_CMD_MODE, pdata);
 
 	if (pdata->panel_info.type == MIPI_CMD_PANEL) {
@@ -591,7 +801,7 @@
 		"Incorrect Ctrl state=0x%x\n", ctrl_pdata->ctrl_state);
 
 	mdss_dsi_sw_reset(pdata);
-	mdss_dsi_host_init(mipi, pdata);
+	mdss_dsi_host_init(pdata);
 	mdss_dsi_op_mode_config(mipi->mode, pdata);
 
 	if (ctrl_pdata->on_cmds.link_state == DSI_LP_MODE) {
@@ -676,8 +886,8 @@
 				MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0004,
 								dsi_ctrl);
 				mdss_dsi_controller_cfg(true, pdata);
-				mdss_dsi_clk_ctrl(ctrl_pdata, 0);
-				mdss_dsi_clk_ctrl(ctrl_pdata, 1);
+				mdss_dsi_clk_ctrl(ctrl_pdata, DSI_ALL_CLKS, 0);
+				mdss_dsi_clk_ctrl(ctrl_pdata, DSI_ALL_CLKS, 1);
 				dsi_ctrl |= 0x2;
 				MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0004,
 								dsi_ctrl);
@@ -798,6 +1008,9 @@
 	case MDSS_EVENT_ENABLE_PARTIAL_UPDATE:
 		rc = mdss_dsi_ctl_partial_update(pdata);
 		break;
+	case MDSS_EVENT_DSI_ULPS_CTRL:
+		rc = mdss_dsi_ulps_config(ctrl_pdata, (int)arg);
+		break;
 	default:
 		pr_debug("%s: unhandled event=%d\n", __func__, event);
 		break;
@@ -1028,6 +1241,9 @@
 		pr_err("%s: failed to de-init vregs\n", __func__);
 	mdss_dsi_put_dt_vreg_data(&pdev->dev, &ctrl_pdata->power_data);
 	mfd = platform_get_drvdata(pdev);
+	msm_dss_iounmap(&ctrl_pdata->mmss_misc_io);
+	msm_dss_iounmap(&ctrl_pdata->phy_io);
+	msm_dss_iounmap(&ctrl_pdata->ctrl_io);
 	return 0;
 }
 
@@ -1038,7 +1254,6 @@
 {
 	int rc = 0;
 	u32 index;
-	struct resource *mdss_dsi_mres;
 
 	rc = of_property_read_u32(pdev->dev.of_node, "cell-index", &index);
 	if (rc) {
@@ -1066,25 +1281,33 @@
 		return -EPERM;
 	}
 
-	mdss_dsi_mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mdss_dsi_mres) {
-		pr_err("%s:%d unable to get the DSI ctrl resources",
+	rc = msm_dss_ioremap_byname(pdev, &ctrl->ctrl_io, "dsi_ctrl");
+	if (rc) {
+		pr_err("%s:%d unable to remap dsi ctrl resources",
 			       __func__, __LINE__);
-		return -ENOMEM;
+		return rc;
 	}
 
-	ctrl->ctrl_base = ioremap(mdss_dsi_mres->start,
-		resource_size(mdss_dsi_mres));
-	if (!(ctrl->ctrl_base)) {
-		pr_err("%s:%d unable to remap dsi resources",
+	ctrl->ctrl_base = ctrl->ctrl_io.base;
+	ctrl->reg_size = ctrl->ctrl_io.len;
+
+	rc = msm_dss_ioremap_byname(pdev, &ctrl->phy_io, "dsi_phy");
+	if (rc) {
+		pr_err("%s:%d unable to remap dsi phy resources",
 			       __func__, __LINE__);
-		return -ENOMEM;
+		return rc;
 	}
 
-	ctrl->reg_size = resource_size(mdss_dsi_mres);
+	pr_info("%s: ctrl_base=%p ctrl_size=%x phy_base=%p phy_size=%x\n",
+		__func__, ctrl->ctrl_base, ctrl->reg_size, ctrl->phy_io.base,
+		ctrl->phy_io.len);
 
-	pr_info("%s: dsi base=%x size=%x\n",
-		__func__, (int)ctrl->ctrl_base, ctrl->reg_size);
+	rc = msm_dss_ioremap_byname(pdev, &ctrl->mmss_misc_io,
+		"mmss_misc_phys");
+	if (rc) {
+		pr_debug("%s:%d mmss_misc IO remap failed\n",
+			__func__, __LINE__);
+	}
 
 	return 0;
 }
@@ -1223,18 +1446,9 @@
 	ctrl_pdata->disp_en_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
 		"qcom,platform-enable-gpio", 0);
 
-	if (!gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
+	if (!gpio_is_valid(ctrl_pdata->disp_en_gpio))
 		pr_err("%s:%d, Disp_en gpio not specified\n",
 						__func__, __LINE__);
-	} else {
-		rc = gpio_request(ctrl_pdata->disp_en_gpio, "disp_enable");
-		if (rc) {
-			pr_err("request reset gpio failed, rc=%d\n",
-			       rc);
-			gpio_free(ctrl_pdata->disp_en_gpio);
-			return -ENODEV;
-		}
-	}
 
 	if (pinfo->type == MIPI_CMD_PANEL) {
 		ctrl_pdata->disp_te_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
@@ -1251,7 +1465,6 @@
 		if (rc) {
 			pr_err("request TE gpio failed, rc=%d\n",
 			       rc);
-			gpio_free(ctrl_pdata->disp_te_gpio);
 			return -ENODEV;
 		}
 		rc = gpio_tlmm_config(GPIO_CFG(
@@ -1281,44 +1494,18 @@
 
 	ctrl_pdata->rst_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
 			 "qcom,platform-reset-gpio", 0);
-	if (!gpio_is_valid(ctrl_pdata->rst_gpio)) {
+	if (!gpio_is_valid(ctrl_pdata->rst_gpio))
 		pr_err("%s:%d, reset gpio not specified\n",
 						__func__, __LINE__);
-	} else {
-		rc = gpio_request(ctrl_pdata->rst_gpio, "disp_rst_n");
-		if (rc) {
-			pr_err("request reset gpio failed, rc=%d\n",
-				rc);
-			gpio_free(ctrl_pdata->rst_gpio);
-			if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
-				gpio_free(ctrl_pdata->disp_en_gpio);
-			return -ENODEV;
-		}
-	}
 
 	if (pinfo->mode_gpio_state != MODE_GPIO_NOT_VALID) {
 
 		ctrl_pdata->mode_gpio = of_get_named_gpio(
 					ctrl_pdev->dev.of_node,
 					"qcom,platform-mode-gpio", 0);
-		if (!gpio_is_valid(ctrl_pdata->mode_gpio)) {
+		if (!gpio_is_valid(ctrl_pdata->mode_gpio))
 			pr_info("%s:%d, mode gpio not specified\n",
 							__func__, __LINE__);
-		} else {
-			rc = gpio_request(ctrl_pdata->mode_gpio, "panel_mode");
-			if (rc) {
-				pr_err("request panel mode gpio failed,rc=%d\n",
-									rc);
-				gpio_free(ctrl_pdata->mode_gpio);
-				if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
-					gpio_free(ctrl_pdata->disp_en_gpio);
-				if (gpio_is_valid(ctrl_pdata->rst_gpio))
-					gpio_free(ctrl_pdata->rst_gpio);
-				if (gpio_is_valid(ctrl_pdata->disp_te_gpio))
-					gpio_free(ctrl_pdata->disp_te_gpio);
-				return -ENODEV;
-			}
-		}
 	}
 
 	if (mdss_dsi_clk_init(ctrl_pdev, ctrl_pdata)) {
@@ -1359,7 +1546,7 @@
 			return rc;
 		}
 
-		mdss_dsi_clk_ctrl(ctrl_pdata, 1);
+		mdss_dsi_clk_ctrl(ctrl_pdata, DSI_ALL_CLKS, 1);
 		ctrl_pdata->ctrl_state |=
 			(CTRL_STATE_PANEL_INIT | CTRL_STATE_MDP_ACTIVE);
 	} else {
@@ -1369,10 +1556,6 @@
 	rc = mdss_register_panel(ctrl_pdev, &(ctrl_pdata->panel_data));
 	if (rc) {
 		pr_err("%s: unable to register MIPI DSI panel\n", __func__);
-		if (ctrl_pdata->rst_gpio)
-			gpio_free(ctrl_pdata->rst_gpio);
-		if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
-			gpio_free(ctrl_pdata->disp_en_gpio);
 		return rc;
 	}
 
diff --git a/drivers/video/msm/mdss/mdss_dsi.h b/drivers/video/msm/mdss/mdss_dsi.h
index 2c9c37d..b0b884f 100644
--- a/drivers/video/msm/mdss/mdss_dsi.h
+++ b/drivers/video/msm/mdss/mdss_dsi.h
@@ -151,8 +151,8 @@
 #define DSI_CMD_TERM    BIT(0)
 
 extern struct device dsi_dev;
-extern int mdss_dsi_clk_on;
 extern u32 dsi_irq;
+extern struct mdss_dsi_ctrl_pdata *ctrl_list[];
 
 struct dsiphy_pll_divider_config {
 	u32 clk_rate;
@@ -223,12 +223,18 @@
 	DSI_CTRL_MAX,
 };
 
+/* DSI controller #0 is always treated as a master in broadcast mode */
+#define DSI_CTRL_MASTER		DSI_CTRL_0
+#define DSI_CTRL_SLAVE		DSI_CTRL_1
+
+#define DSI_BUS_CLKS	BIT(0)
+#define DSI_LINK_CLKS	BIT(1)
+#define DSI_ALL_CLKS	((DSI_BUS_CLKS) | (DSI_LINK_CLKS))
+
 #define DSI_EV_PLL_UNLOCKED		0x0001
 #define DSI_EV_MDP_FIFO_UNDERFLOW	0x0002
 #define DSI_EV_MDP_BUSY_RELEASE		0x80000000
 
-#define DSI_FLAG_CLOCK_MASTER		0x80000000
-
 struct mdss_dsi_ctrl_pdata {
 	int ndx;	/* panel_num */
 	int (*on) (struct mdss_panel_data *pdata);
@@ -238,28 +244,28 @@
 	int (*cmdlist_commit)(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp);
 	struct mdss_panel_data panel_data;
 	unsigned char *ctrl_base;
+	struct dss_io_data ctrl_io;
+	struct dss_io_data mmss_misc_io;
+	struct dss_io_data phy_io;
 	int reg_size;
-	u32 clk_cnt;
-	int clk_cnt_sub;
+	u32 bus_clk_cnt;
+	u32 link_clk_cnt;
 	u32 flags;
 	struct clk *mdp_core_clk;
 	struct clk *ahb_clk;
 	struct clk *axi_clk;
+	struct clk *mmss_misc_ahb_clk;
 	struct clk *byte_clk;
 	struct clk *esc_clk;
 	struct clk *pixel_clk;
 	u8 ctrl_state;
 	int panel_mode;
 	int irq_cnt;
-	int mdss_dsi_clk_on;
 	int rst_gpio;
 	int disp_en_gpio;
 	int disp_te_gpio;
 	int mode_gpio;
-	int rst_gpio_requested;
-	int disp_en_gpio_requested;
 	int disp_te_gpio_requested;
-	int mode_gpio_requested;
 	int bklt_ctrl;	/* backlight ctrl */
 	int pwm_period;
 	int pwm_pmic_gpio;
@@ -290,6 +296,8 @@
 	struct mutex mutex;
 	struct mutex cmd_mutex;
 
+	bool ulps;
+
 	struct dsi_buf tx_buf;
 	struct dsi_buf rx_buf;
 };
@@ -303,8 +311,7 @@
 int mdss_dsi_cmds_rx(struct mdss_dsi_ctrl_pdata *ctrl,
 			struct dsi_cmd_desc *cmds, int rlen);
 
-void mdss_dsi_host_init(struct mipi_panel_info *pinfo,
-				struct mdss_panel_data *pdata);
+void mdss_dsi_host_init(struct mdss_panel_data *pdata);
 void mdss_dsi_op_mode_config(int mode,
 				struct mdss_panel_data *pdata);
 void mdss_dsi_cmd_mode_ctrl(int enable);
@@ -312,20 +319,14 @@
 void mdss_dsi_cmd_mdp_start(struct mdss_dsi_ctrl_pdata *ctrl);
 void mdss_dsi_cmd_bta_sw_trigger(struct mdss_panel_data *pdata);
 void mdss_dsi_ack_err_status(struct mdss_dsi_ctrl_pdata *ctrl);
-void mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable);
-int mdss_dsi_link_clk_start(struct mdss_dsi_ctrl_pdata *ctrl);
-void mdss_dsi_link_clk_stop(struct mdss_dsi_ctrl_pdata *ctrl);
-int mdss_dsi_bus_clk_start(struct mdss_dsi_ctrl_pdata *ctrl);
-void mdss_dsi_bus_clk_stop(struct mdss_dsi_ctrl_pdata *ctrl);
+int mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl,
+	u8 clk_type, int enable);
 void mdss_dsi_clk_req(struct mdss_dsi_ctrl_pdata *ctrl,
 				int enable);
 void mdss_dsi_controller_cfg(int enable,
 				struct mdss_panel_data *pdata);
 void mdss_dsi_sw_reset(struct mdss_panel_data *pdata);
 
-struct mdss_dsi_ctrl_pdata *mdss_dsi_ctrl_slave(
-				struct mdss_dsi_ctrl_pdata *ctrl);
-
 irqreturn_t mdss_dsi_isr(int irq, void *ptr);
 void mdss_dsi_irq_handler_config(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
 
@@ -337,7 +338,7 @@
 void mdss_dsi_clk_deinit(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
 int mdss_dsi_enable_bus_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
 void mdss_dsi_disable_bus_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
-void mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable);
+int mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable);
 void mdss_dsi_phy_disable(struct mdss_dsi_ctrl_pdata *ctrl);
 void mdss_dsi_phy_init(struct mdss_panel_data *pdata);
 void mdss_dsi_phy_sw_reset(unsigned char *ctrl_base);
@@ -351,8 +352,52 @@
 int mdss_dsi_cmdlist_commit(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp);
 void mdss_dsi_cmdlist_kickoff(int intf);
 int mdss_dsi_bta_status_check(struct mdss_dsi_ctrl_pdata *ctrl);
+bool __mdss_dsi_clk_enabled(struct mdss_dsi_ctrl_pdata *ctrl, u8 clk_type);
 
 int mdss_dsi_panel_init(struct device_node *node,
 		struct mdss_dsi_ctrl_pdata *ctrl_pdata,
 		bool cmd_cfg_cont_splash);
+
+static inline bool mdss_dsi_broadcast_mode_enabled(void)
+{
+	return ctrl_list[DSI_CTRL_MASTER]->shared_pdata.broadcast_enable &&
+		ctrl_list[DSI_CTRL_SLAVE] &&
+		ctrl_list[DSI_CTRL_SLAVE]->shared_pdata.broadcast_enable;
+}
+
+static inline struct mdss_dsi_ctrl_pdata *mdss_dsi_get_master_ctrl(void)
+{
+	if (mdss_dsi_broadcast_mode_enabled())
+		return ctrl_list[DSI_CTRL_MASTER];
+	else
+		return NULL;
+}
+
+static inline struct mdss_dsi_ctrl_pdata *mdss_dsi_get_slave_ctrl(void)
+{
+	if (mdss_dsi_broadcast_mode_enabled())
+		return ctrl_list[DSI_CTRL_SLAVE];
+	else
+		return NULL;
+}
+
+static inline bool mdss_dsi_is_master_ctrl(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	return mdss_dsi_broadcast_mode_enabled() &&
+		(ctrl->ndx == DSI_CTRL_MASTER);
+}
+
+static inline bool mdss_dsi_is_slave_ctrl(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	return mdss_dsi_broadcast_mode_enabled() &&
+		(ctrl->ndx == DSI_CTRL_SLAVE);
+}
+
+static inline struct mdss_dsi_ctrl_pdata *mdss_dsi_get_ctrl_by_index(int ndx)
+{
+	if (ndx >= DSI_CTRL_MAX)
+		return NULL;
+
+	return ctrl_list[ndx];
+}
 #endif /* MDSS_DSI_H */
diff --git a/drivers/video/msm/mdss/mdss_dsi_host.c b/drivers/video/msm/mdss/mdss_dsi_host.c
index b4478ac..a570914 100644
--- a/drivers/video/msm/mdss/mdss_dsi_host.c
+++ b/drivers/video/msm/mdss/mdss_dsi_host.c
@@ -29,10 +29,7 @@
 
 #define VSYNC_PERIOD 17
 
-static struct mdss_dsi_ctrl_pdata *left_ctrl_pdata;
-
-static struct mdss_dsi_ctrl_pdata *ctrl_list[DSI_CTRL_MAX];
-
+struct mdss_dsi_ctrl_pdata *ctrl_list[DSI_CTRL_MAX];
 
 struct mdss_hw mdss_dsi0_hw = {
 	.hw_ndx = MDSS_HW_DSI0,
@@ -72,14 +69,6 @@
 
 void mdss_dsi_ctrl_init(struct mdss_dsi_ctrl_pdata *ctrl)
 {
-	if (ctrl->shared_pdata.broadcast_enable)
-		if (ctrl->panel_data.panel_info.pdest
-					== DISPLAY_1) {
-			pr_debug("%s: Broadcast mode enabled.\n",
-				 __func__);
-			left_ctrl_pdata = ctrl;
-		}
-
 	if (ctrl->panel_data.panel_info.pdest == DISPLAY_1) {
 		mdss_dsi0_hw.ptr = (void *)(ctrl);
 		ctrl->dsi_hw = &mdss_dsi0_hw;
@@ -90,11 +79,9 @@
 		ctrl->ndx = DSI_CTRL_1;
 	}
 
-	ctrl_list[ctrl->ndx] = ctrl;	/* keep it */
+	ctrl->panel_mode = ctrl->panel_data.panel_info.mipi.mode;
 
-	if (ctrl->shared_pdata.broadcast_enable)
-		if (ctrl->ndx == DSI_CTRL_1)
-			ctrl->flags |= DSI_FLAG_CLOCK_MASTER;
+	ctrl_list[ctrl->ndx] = ctrl;	/* keep it */
 
 	if (mdss_register_irq(ctrl->dsi_hw))
 		pr_err("%s: mdss_register_irq failed.\n", __func__);
@@ -121,22 +108,6 @@
 	}
 }
 
-struct mdss_dsi_ctrl_pdata *mdss_dsi_ctrl_slave(
-				struct mdss_dsi_ctrl_pdata *ctrl)
-{
-	int ndx;
-	struct mdss_dsi_ctrl_pdata *sctrl = NULL;
-
-	/* only two controllers */
-	ndx = ctrl->ndx;
-	ndx += 1;
-	ndx %= DSI_CTRL_MAX;
-	sctrl = ctrl_list[ndx];
-
-	return sctrl;
-
-}
-
 void mdss_dsi_clk_req(struct mdss_dsi_ctrl_pdata *ctrl, int enable)
 {
 	if (enable == 0) {
@@ -146,22 +117,26 @@
 		mutex_unlock(&ctrl->cmd_mutex);
 	}
 
-	mdss_dsi_clk_ctrl(ctrl, enable);
+	mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, enable);
 }
 
 void mdss_dsi_pll_relock(struct mdss_dsi_ctrl_pdata *ctrl)
 {
 	int i, cnt;
 
-	cnt = ctrl->clk_cnt;
+	/*
+	 * todo: this code does not work very well with dual
+	 * dsi use cases. Need to fix this eventually.
+	 */
+	cnt = ctrl->link_clk_cnt;
 
 	/* disable dsi clk */
 	for (i = 0; i < cnt; i++)
-		mdss_dsi_clk_ctrl(ctrl, 0);
+		mdss_dsi_clk_ctrl(ctrl, DSI_LINK_CLKS, 0);
 
 	/* enable dsi clk */
 	for (i = 0; i < cnt; i++)
-		mdss_dsi_clk_ctrl(ctrl, 1);
+		mdss_dsi_clk_ctrl(ctrl, DSI_LINK_CLKS, 1);
 }
 
 void mdss_dsi_enable_irq(struct mdss_dsi_ctrl_pdata *ctrl, u32 term)
@@ -250,12 +225,12 @@
 	MIPI_OUTP((ctrl->ctrl_base) + 0x015c, 0x0);
 }
 
-void mdss_dsi_host_init(struct mipi_panel_info *pinfo,
-				struct mdss_panel_data *pdata)
+void mdss_dsi_host_init(struct mdss_panel_data *pdata)
 {
 	u32 dsi_ctrl, intr_ctrl;
 	u32 data;
 	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mipi_panel_info *pinfo = NULL;
 
 	if (pdata == NULL) {
 		pr_err("%s: Invalid input data\n", __func__);
@@ -265,9 +240,9 @@
 	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
 				panel_data);
 
-	pinfo->rgb_swap = DSI_RGB_SWAP_RGB;
+	pinfo = &pdata->panel_info.mipi;
 
-	ctrl_pdata->panel_mode = pinfo->mode;
+	pinfo->rgb_swap = DSI_RGB_SWAP_RGB;
 
 	if (pinfo->mode == DSI_VIDEO_MODE) {
 		data = 0;
@@ -339,7 +314,7 @@
 
 	/* from frame buffer, low power mode */
 	/* DSI_COMMAND_MODE_DMA_CTRL */
-	if (ctrl_pdata->shared_pdata.broadcast_enable)
+	if (mdss_dsi_broadcast_mode_enabled())
 		MIPI_OUTP(ctrl_pdata->ctrl_base + 0x3C, 0x94000000);
 	else
 		MIPI_OUTP(ctrl_pdata->ctrl_base + 0x3C, 0x14000000);
@@ -542,6 +517,7 @@
 {
 	u32 dsi_ctrl, intr_ctrl;
 	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_dsi_ctrl_pdata *mctrl = NULL;
 
 	if (pdata == NULL) {
 		pr_err("%s: Invalid input data\n", __func__);
@@ -551,12 +527,15 @@
 	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
 				panel_data);
 
-	if (ctrl_pdata->shared_pdata.broadcast_enable)
-		if (pdata->panel_info.pdest == DISPLAY_1) {
-			pr_debug("%s: Broadcast mode. 1st ctrl\n",
-				 __func__);
-			return;
-		}
+	/*
+	 * In broadcast mode, the configuration for master controller
+	 * would be done when the slave controller is configured
+	 */
+	if (mdss_dsi_is_master_ctrl(ctrl_pdata)) {
+		pr_debug("%s: Broadcast mode enabled. skipping config for ctrl%d\n",
+			__func__, ctrl_pdata->ndx);
+		return;
+	}
 
 	dsi_ctrl = MIPI_INP((ctrl_pdata->ctrl_base) + 0x0004);
 	/*If Video enabled, Keep Video and Cmd mode ON */
@@ -577,17 +556,22 @@
 			DSI_INTR_CMD_MDP_DONE_MASK | DSI_INTR_BTA_DONE_MASK;
 	}
 
-	if (ctrl_pdata->shared_pdata.broadcast_enable)
-		if ((pdata->panel_info.pdest == DISPLAY_2)
-		  && (left_ctrl_pdata != NULL)) {
-			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0110,
-				  intr_ctrl); /* DSI_INTL_CTRL */
-			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0004,
-					dsi_ctrl);
+	/* Ensure that for slave controller, master is also configured */
+	if (mdss_dsi_is_slave_ctrl(ctrl_pdata)) {
+		mctrl = mdss_dsi_get_master_ctrl();
+		if (mctrl) {
+			pr_debug("%s: configuring ctrl%d\n", __func__,
+				mctrl->ndx);
+			MIPI_OUTP(mctrl->ctrl_base + 0x0110, intr_ctrl);
+			MIPI_OUTP(mctrl->ctrl_base + 0x0004, dsi_ctrl);
+		} else {
+			pr_warn("%s: Unable to get master control\n",
+				__func__);
 		}
+	}
 
-	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0110,
-				intr_ctrl); /* DSI_INTL_CTRL */
+	pr_debug("%s: configuring ctrl%d\n", __func__, ctrl_pdata->ndx);
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0110, intr_ctrl);
 	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0004, dsi_ctrl);
 	wmb();
 }
@@ -638,7 +622,7 @@
 
 	pr_debug("%s: Checking BTA status\n", __func__);
 
-	mdss_dsi_clk_ctrl(ctrl_pdata, 1);
+	mdss_dsi_clk_ctrl(ctrl_pdata, DSI_ALL_CLKS, 1);
 	spin_lock_irqsave(&ctrl_pdata->mdp_lock, flag);
 	INIT_COMPLETION(ctrl_pdata->bta_comp);
 	mdss_dsi_enable_irq(ctrl_pdata, DSI_BTA_TERM);
@@ -653,7 +637,7 @@
 		pr_err("%s: DSI BTA error: %i\n", __func__, ret);
 	}
 
-	mdss_dsi_clk_ctrl(ctrl_pdata, 0);
+	mdss_dsi_clk_ctrl(ctrl_pdata, DSI_ALL_CLKS, 0);
 	pr_debug("%s: BTA done with ret: %d\n", __func__, ret);
 
 	return ret;
@@ -742,6 +726,40 @@
 	return tot;
 }
 
+/**
+ * __mdss_dsi_cmd_mode_config() - Enable/disable command mode engine
+ * @ctrl: pointer to the dsi controller structure
+ * @enable: true to enable command mode, false to disable command mode
+ *
+ * This function can be used to temporarily enable the command mode
+ * engine (even for video mode panels) so as to transfer any dma commands to
+ * the panel. It can also be used to disable the command mode engine
+ * when no longer needed.
+ *
+ * Return: true, if there was a mode switch to command mode for video mode
+ * panels.
+ */
+static inline bool __mdss_dsi_cmd_mode_config(
+	struct mdss_dsi_ctrl_pdata *ctrl, bool enable)
+{
+	bool mode_changed = false;
+	u32 dsi_ctrl;
+
+	dsi_ctrl = MIPI_INP((ctrl->ctrl_base) + 0x0004);
+	/* if currently in video mode, enable command mode */
+	if (enable) {
+		if ((dsi_ctrl) & BIT(1)) {
+			MIPI_OUTP((ctrl->ctrl_base) + 0x0004,
+				dsi_ctrl | BIT(2));
+			mode_changed = true;
+		}
+	} else {
+		MIPI_OUTP((ctrl->ctrl_base) + 0x0004, dsi_ctrl & ~BIT(2));
+	}
+
+	return mode_changed;
+}
+
 /*
  * mdss_dsi_cmds_tx:
  * thread context only
@@ -749,61 +767,49 @@
 int mdss_dsi_cmds_tx(struct mdss_dsi_ctrl_pdata *ctrl,
 		struct dsi_cmd_desc *cmds, int cnt)
 {
-	u32 dsi_ctrl, data;
-	int video_mode, ret = 0;
-	u32 left_dsi_ctrl = 0;
-	bool left_ctrl_restore = false;
+	int ret = 0;
+	bool ctrl_restore = false, mctrl_restore = false;
+	struct mdss_dsi_ctrl_pdata *mctrl = NULL;
 
-	if (ctrl->shared_pdata.broadcast_enable) {
-		if (ctrl->ndx == DSI_CTRL_0) {
-			pr_debug("%s: Broadcast mode. 1st ctrl\n",
-				 __func__);
-			return 0;
-		}
+	/*
+	 * In broadcast mode, the configuration for master controller
+	 * would be done when the slave controller is configured
+	 */
+	if (mdss_dsi_is_master_ctrl(ctrl)) {
+		pr_debug("%s: Broadcast mode enabled. skipping config for ctrl%d\n",
+			__func__, ctrl->ndx);
+		return 0;
 	}
 
-	if (ctrl->shared_pdata.broadcast_enable) {
-		if ((ctrl->ndx == DSI_CTRL_1)
-		  && (left_ctrl_pdata != NULL)) {
-			left_dsi_ctrl = MIPI_INP(left_ctrl_pdata->ctrl_base
-								+ 0x0004);
-			video_mode =
-				left_dsi_ctrl & 0x02; /* VIDEO_MODE_EN */
-			if (video_mode) {
-				data = left_dsi_ctrl | 0x04; /* CMD_MODE_EN */
-				MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0004,
-						data);
-				left_ctrl_restore = true;
-			}
-		}
+	/*
+	 * Turn on cmd mode in order to transmit the commands.
+	 * For video mode, do not send cmds more than one pixel line,
+	 * since it only transmit it during BLLP.
+	 * Ensure that for slave controller, master is also configured
+	 */
+	if (mdss_dsi_is_slave_ctrl(ctrl)) {
+		mctrl = mdss_dsi_get_master_ctrl();
+		if (!mctrl)
+			pr_warn("%s: Unable to get master control\n",
+				__func__);
+		else
+			mctrl_restore = __mdss_dsi_cmd_mode_config(mctrl, 1);
 	}
 
-	/* turn on cmd mode
-	* for video mode, do not send cmds more than
-	* one pixel line, since it only transmit it
-	* during BLLP.
-	*/
-	dsi_ctrl = MIPI_INP((ctrl->ctrl_base) + 0x0004);
-	video_mode = dsi_ctrl & 0x02; /* VIDEO_MODE_EN */
-	if (video_mode) {
-		data = dsi_ctrl | 0x04; /* CMD_MODE_EN */
-		MIPI_OUTP((ctrl->ctrl_base) + 0x0004, data);
-	}
+	ctrl_restore = __mdss_dsi_cmd_mode_config(ctrl, 1);
 
 	ret = mdss_dsi_cmds2buf_tx(ctrl, cmds, cnt);
 	if (IS_ERR_VALUE(ret)) {
-		pr_err("%s: failed to call\n",
-			__func__);
+		pr_err("%s: failed to call\n", __func__);
 		cnt = -EINVAL;
 	}
 
-	if (left_ctrl_restore)
-		MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0004,
-					left_dsi_ctrl); /*restore */
+	if (mctrl_restore)
+		__mdss_dsi_cmd_mode_config(mctrl, 0);
 
-	if (video_mode)
-		MIPI_OUTP((ctrl->ctrl_base) + 0x0004,
-					dsi_ctrl); /* restore */
+	if (ctrl_restore)
+		__mdss_dsi_cmd_mode_config(ctrl, 0);
+
 	return cnt;
 }
 
@@ -836,45 +842,35 @@
 	int short_response, diff, pkt_size, ret = 0;
 	struct dsi_buf *tp, *rp;
 	char cmd;
-	u32 dsi_ctrl, data;
-	int video_mode;
-	u32 left_dsi_ctrl = 0;
-	bool left_ctrl_restore = false;
+	bool ctrl_restore = false, mctrl_restore = false;
+	struct mdss_dsi_ctrl_pdata *mctrl = NULL;
 
-	if (ctrl->shared_pdata.broadcast_enable) {
-		if (ctrl->ndx == DSI_CTRL_0) {
-			pr_debug("%s: Broadcast mode. 1st ctrl\n",
-				 __func__);
-			return 0;
-		}
+	/*
+	 * In broadcast mode, the configuration for master controller
+	 * would be done when the slave controller is configured
+	 */
+	if (mdss_dsi_is_master_ctrl(ctrl)) {
+		pr_debug("%s: Broadcast mode enabled. skipping config for ctrl%d\n",
+			__func__, ctrl->ndx);
+		return 0;
 	}
 
-	if (ctrl->shared_pdata.broadcast_enable) {
-		if ((ctrl->ndx == DSI_CTRL_1)
-		  && (left_ctrl_pdata != NULL)) {
-			left_dsi_ctrl = MIPI_INP(left_ctrl_pdata->ctrl_base
-								+ 0x0004);
-			video_mode = left_dsi_ctrl & 0x02; /* VIDEO_MODE_EN */
-			if (video_mode) {
-				data = left_dsi_ctrl | 0x04; /* CMD_MODE_EN */
-				MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0004,
-						data);
-				left_ctrl_restore = true;
-			}
-		}
+	/*
+	 * Turn on cmd mode in order to transmit the commands.
+	 * For video mode, do not send cmds more than one pixel line,
+	 * since it only transmit it during BLLP.
+	 * Ensure that for slave controller, master is also configured
+	 */
+	if (mdss_dsi_is_slave_ctrl(ctrl)) {
+		mctrl = mdss_dsi_get_master_ctrl();
+		if (!mctrl)
+			pr_warn("%s: Unable to get master control\n",
+				__func__);
+		else
+			mctrl_restore = __mdss_dsi_cmd_mode_config(mctrl, 1);
 	}
 
-	/* turn on cmd mode
-	* for video mode, do not send cmds more than
-	* one pixel line, since it only transmit it
-	* during BLLP.
-	*/
-	dsi_ctrl = MIPI_INP((ctrl->ctrl_base) + 0x0004);
-	video_mode = dsi_ctrl & 0x02; /* VIDEO_MODE_EN */
-	if (video_mode) {
-		data = dsi_ctrl | 0x04; /* CMD_MODE_EN */
-		MIPI_OUTP((ctrl->ctrl_base) + 0x0004, data);
-	}
+	ctrl_restore = __mdss_dsi_cmd_mode_config(ctrl, 1);
 
 	if (rlen == 0) {
 		short_response = 1;
@@ -1001,12 +997,11 @@
 		rp->len = 0;
 	}
 end:
-	if (left_ctrl_restore)
-		MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0004,
-					left_dsi_ctrl); /*restore */
-	if (video_mode)
-		MIPI_OUTP((ctrl->ctrl_base) + 0x0004,
-					dsi_ctrl); /* restore */
+	if (mctrl_restore)
+		__mdss_dsi_cmd_mode_config(mctrl, 0);
+
+	if (ctrl_restore)
+		__mdss_dsi_cmd_mode_config(ctrl, 0);
 
 	return rp->len;
 }
@@ -1020,6 +1015,7 @@
 	int domain = MDSS_IOMMU_DOMAIN_UNSECURE;
 	char *bp;
 	unsigned long size, addr;
+	struct mdss_dsi_ctrl_pdata *mctrl = NULL;
 
 	bp = tp->data;
 
@@ -1041,24 +1037,27 @@
 
 	INIT_COMPLETION(ctrl->dma_comp);
 
-	if (ctrl->shared_pdata.broadcast_enable)
-		if ((ctrl->ndx == DSI_CTRL_1)
-		  && (left_ctrl_pdata != NULL)) {
-			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x048, addr);
-			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x04c, len);
+	/* Ensure that for slave controller, master is also configured */
+	if (mdss_dsi_is_slave_ctrl(ctrl)) {
+		mctrl = mdss_dsi_get_master_ctrl();
+		if (mctrl) {
+			MIPI_OUTP(mctrl->ctrl_base + 0x048, addr);
+			MIPI_OUTP(mctrl->ctrl_base + 0x04c, len);
+		} else {
+			pr_warn("%s: Unable to get master control\n",
+				__func__);
 		}
+	}
 
 	MIPI_OUTP((ctrl->ctrl_base) + 0x048, addr);
 	MIPI_OUTP((ctrl->ctrl_base) + 0x04c, len);
 	wmb();
 
-	if (ctrl->shared_pdata.broadcast_enable)
-		if ((ctrl->ndx == DSI_CTRL_1)
-		  && (left_ctrl_pdata != NULL)) {
-			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x090, 0x01);
-		}
+	/* Trigger on master controller as well */
+	if (mctrl)
+		MIPI_OUTP(mctrl->ctrl_base + 0x090, 0x01);
 
-	MIPI_OUTP((ctrl->ctrl_base) + 0x090, 0x01);	/* trigger */
+	MIPI_OUTP((ctrl->ctrl_base) + 0x090, 0x01);
 	wmb();
 
 	ret = wait_for_completion_timeout(&ctrl->dma_comp,
@@ -1208,13 +1207,11 @@
 		len = mdss_dsi_cmds_rx(ctrl, req->cmds, req->rlen);
 		memcpy(req->rbuf, rp->data, rp->len);
 		/*
-		 * For dual DSI cases, early return of controller - 0
+		 * For dual DSI cases, early return of master ctrl
 		 * is valid. Hence, for those cases the return value
 		 * is zero even though we don't send any commands.
-		 *
 		 */
-		if ((ctrl->shared_pdata.broadcast_enable &&
-			ctrl->ndx == DSI_CTRL_0) || (len != 0))
+		if (mdss_dsi_is_master_ctrl(ctrl) || (len != 0))
 			ret = 0;
 	} else {
 		pr_err("%s: No rx buffer provided\n", __func__);
@@ -1251,14 +1248,14 @@
 	mdss_bus_bandwidth_ctrl(1);
 
 	pr_debug("%s:  from_mdp=%d pid=%d\n", __func__, from_mdp, current->pid);
-	mdss_dsi_clk_ctrl(ctrl, 1);
+	mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 1);
 
 	if (req->flags & CMD_REQ_RX)
 		ret = mdss_dsi_cmdlist_rx(ctrl, req);
 	else
 		ret = mdss_dsi_cmdlist_tx(ctrl, req);
 
-	mdss_dsi_clk_ctrl(ctrl, 0);
+	mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 0);
 	mdss_bus_bandwidth_ctrl(0);
 
 need_lock:
@@ -1463,6 +1460,7 @@
 	u32 isr;
 	struct mdss_dsi_ctrl_pdata *ctrl =
 			(struct mdss_dsi_ctrl_pdata *)ptr;
+	struct mdss_dsi_ctrl_pdata *mctrl = NULL;
 
 	if (!ctrl->ctrl_base)
 		pr_err("%s:%d DSI base adr no Initialized",
@@ -1471,14 +1469,19 @@
 	isr = MIPI_INP(ctrl->ctrl_base + 0x0110);/* DSI_INTR_CTRL */
 	MIPI_OUTP(ctrl->ctrl_base + 0x0110, isr);
 
-	if (ctrl->shared_pdata.broadcast_enable)
-		if ((ctrl->panel_data.panel_info.pdest == DISPLAY_2)
-		    && (left_ctrl_pdata != NULL)) {
+	if (mdss_dsi_is_slave_ctrl(ctrl)) {
+		mctrl = mdss_dsi_get_master_ctrl();
+		if (mctrl) {
 			u32 isr0;
-			isr0 = MIPI_INP(left_ctrl_pdata->ctrl_base
-						+ 0x0110);/* DSI_INTR_CTRL */
-			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0110, isr0);
+			isr0 = MIPI_INP(mctrl->ctrl_base + 0x0110);
+			if (isr0 & DSI_INTR_CMD_DMA_DONE)
+				MIPI_OUTP(mctrl->ctrl_base + 0x0110,
+					DSI_INTR_CMD_DMA_DONE);
+		} else {
+			pr_warn("%s: Unable to get master control\n",
+				__func__);
 		}
+	}
 
 	pr_debug("%s: ndx=%d isr=%x\n", __func__, ctrl->ndx, isr);
 
diff --git a/drivers/video/msm/mdss/mdss_dsi_panel.c b/drivers/video/msm/mdss/mdss_dsi_panel.c
index 7abb761..f450dec 100644
--- a/drivers/video/msm/mdss/mdss_dsi_panel.c
+++ b/drivers/video/msm/mdss/mdss_dsi_panel.c
@@ -153,15 +153,53 @@
 	mdss_dsi_cmdlist_put(ctrl, &cmdreq);
 }
 
-void mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable)
+static int mdss_dsi_request_gpios(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	int rc = 0;
+
+	if (gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
+		rc = gpio_request(ctrl_pdata->disp_en_gpio,
+						"disp_enable");
+		if (rc) {
+			pr_err("request disp_en gpio failed, rc=%d\n",
+				       rc);
+			goto disp_en_gpio_err;
+		}
+	}
+	rc = gpio_request(ctrl_pdata->rst_gpio, "disp_rst_n");
+	if (rc) {
+		pr_err("request reset gpio failed, rc=%d\n",
+			rc);
+		goto rst_gpio_err;
+	}
+	if (gpio_is_valid(ctrl_pdata->mode_gpio)) {
+		rc = gpio_request(ctrl_pdata->mode_gpio, "panel_mode");
+		if (rc) {
+			pr_err("request panel mode gpio failed,rc=%d\n",
+								rc);
+			goto mode_gpio_err;
+		}
+	}
+	return rc;
+
+mode_gpio_err:
+	gpio_free(ctrl_pdata->rst_gpio);
+rst_gpio_err:
+	if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
+		gpio_free(ctrl_pdata->disp_en_gpio);
+disp_en_gpio_err:
+	return rc;
+}
+
+int mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable)
 {
 	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
 	struct mdss_panel_info *pinfo = NULL;
-	int i;
+	int i, rc = 0;
 
 	if (pdata == NULL) {
 		pr_err("%s: Invalid input data\n", __func__);
-		return;
+		return -EINVAL;
 	}
 
 	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
@@ -175,21 +213,28 @@
 	if (!gpio_is_valid(ctrl_pdata->rst_gpio)) {
 		pr_debug("%s:%d, reset line not configured\n",
 			   __func__, __LINE__);
-		return;
+		return rc;
 	}
 
 	pr_debug("%s: enable = %d\n", __func__, enable);
 	pinfo = &(ctrl_pdata->panel_data.panel_info);
 
 	if (enable) {
-		if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
-			gpio_set_value((ctrl_pdata->disp_en_gpio), 1);
+		rc = mdss_dsi_request_gpios(ctrl_pdata);
+		if (rc) {
+			pr_err("gpio request failed\n");
+			return rc;
+		}
+		if (!pinfo->panel_power_on) {
+			if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
+				gpio_set_value((ctrl_pdata->disp_en_gpio), 1);
 
-		for (i = 0; i < pdata->panel_info.rst_seq_len; ++i) {
-			gpio_set_value((ctrl_pdata->rst_gpio),
-				pdata->panel_info.rst_seq[i]);
-			if (pdata->panel_info.rst_seq[++i])
-				usleep(pdata->panel_info.rst_seq[i] * 1000);
+			for (i = 0; i < pdata->panel_info.rst_seq_len; ++i) {
+				gpio_set_value((ctrl_pdata->rst_gpio),
+					pdata->panel_info.rst_seq[i]);
+				if (pdata->panel_info.rst_seq[++i])
+					usleep(pinfo->rst_seq[i] * 1000);
+			}
 		}
 
 		if (gpio_is_valid(ctrl_pdata->mode_gpio)) {
@@ -205,10 +250,16 @@
 			pr_debug("%s: Reset panel done\n", __func__);
 		}
 	} else {
-		gpio_set_value((ctrl_pdata->rst_gpio), 0);
-		if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
+		if (gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
 			gpio_set_value((ctrl_pdata->disp_en_gpio), 0);
+			gpio_free(ctrl_pdata->disp_en_gpio);
+		}
+		gpio_set_value((ctrl_pdata->rst_gpio), 0);
+		gpio_free(ctrl_pdata->rst_gpio);
+		if (gpio_is_valid(ctrl_pdata->mode_gpio))
+			gpio_free(ctrl_pdata->mode_gpio);
 	}
+	return rc;
 }
 
 static char caset[] = {0x2a, 0x00, 0x00, 0x03, 0x00};	/* DTYPE_DCS_LWRITE */
@@ -297,6 +348,16 @@
 		break;
 	case BL_DCS_CMD:
 		mdss_dsi_panel_bklt_dcs(ctrl_pdata, bl_level);
+		if (mdss_dsi_is_master_ctrl(ctrl_pdata)) {
+			struct mdss_dsi_ctrl_pdata *sctrl =
+				mdss_dsi_get_slave_ctrl();
+			if (!sctrl) {
+				pr_err("%s: Invalid slave ctrl data\n",
+					__func__);
+				return;
+			}
+			mdss_dsi_panel_bklt_dcs(sctrl, bl_level);
+		}
 		break;
 	default:
 		pr_err("%s: Unknown bl_ctrl configuration\n",
@@ -632,6 +693,35 @@
 	return 0;
 }
 
+static int mdss_dsi_parse_panel_features(struct device_node *np,
+	struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct mdss_panel_info *pinfo;
+
+	if (!np || !ctrl) {
+		pr_err("%s: Invalid arguments\n", __func__);
+		return -ENODEV;
+	}
+
+	pinfo = &ctrl->panel_data.panel_info;
+
+	pinfo->cont_splash_enabled = of_property_read_bool(np,
+		"qcom,cont-splash-enabled");
+
+	pinfo->partial_update_enabled = of_property_read_bool(np,
+		"qcom,partial-update-enabled");
+	pr_info("%s:%d Partial update %s\n", __func__, __LINE__,
+		(pinfo->partial_update_enabled ? "enabled" : "disabled"));
+	if (pinfo->partial_update_enabled)
+		ctrl->partial_update_fnc = mdss_dsi_panel_partial_update;
+
+	pinfo->ulps_feature_enabled = of_property_read_bool(np,
+		"qcom,ulps-enabled");
+	pr_info("%s: ulps feature %s", __func__,
+		(pinfo->ulps_feature_enabled ? "enabled" : "disabled"));
+
+	return 0;
+}
 
 static int mdss_panel_parse_dt(struct device_node *np,
 			struct mdss_dsi_ctrl_pdata *ctrl_pdata)
@@ -911,6 +1001,12 @@
 	mdss_dsi_parse_dcs_cmds(np, &ctrl_pdata->off_cmds,
 		"qcom,mdss-dsi-off-command", "qcom,mdss-dsi-off-command-state");
 
+	rc = mdss_dsi_parse_panel_features(np, ctrl_pdata);
+	if (rc) {
+		pr_err("%s: failed to parse panel features\n", __func__);
+		goto error;
+	}
+
 	return 0;
 
 error:
@@ -923,14 +1019,15 @@
 {
 	int rc = 0;
 	static const char *panel_name;
-	bool cont_splash_enabled;
-	bool partial_update_enabled;
+	struct mdss_panel_info *pinfo;
 
-	if (!node) {
-		pr_err("%s: no panel node\n", __func__);
+	if (!node || !ctrl_pdata) {
+		pr_err("%s: Invalid arguments\n", __func__);
 		return -ENODEV;
 	}
 
+	pinfo = &ctrl_pdata->panel_data.panel_info;
+
 	pr_debug("%s:%d\n", __func__, __LINE__);
 	panel_name = of_get_property(node, "qcom,mdss-dsi-panel-name", NULL);
 	if (!panel_name)
@@ -945,33 +1042,10 @@
 		return rc;
 	}
 
-	if (cmd_cfg_cont_splash)
-		cont_splash_enabled = of_property_read_bool(node,
-				"qcom,cont-splash-enabled");
-	else
-		cont_splash_enabled = false;
-	if (!cont_splash_enabled) {
-		pr_info("%s:%d Continuous splash flag not found.\n",
-				__func__, __LINE__);
-		ctrl_pdata->panel_data.panel_info.cont_splash_enabled = 0;
-	} else {
-		pr_info("%s:%d Continuous splash flag enabled.\n",
-				__func__, __LINE__);
-
-		ctrl_pdata->panel_data.panel_info.cont_splash_enabled = 1;
-	}
-
-	partial_update_enabled = of_property_read_bool(node,
-						"qcom,partial-update-enabled");
-	if (partial_update_enabled) {
-		pr_info("%s:%d Partial update enabled.\n", __func__, __LINE__);
-		ctrl_pdata->panel_data.panel_info.partial_update_enabled = 1;
-		ctrl_pdata->partial_update_fnc = mdss_dsi_panel_partial_update;
-	} else {
-		pr_info("%s:%d Partial update disabled.\n", __func__, __LINE__);
-		ctrl_pdata->panel_data.panel_info.partial_update_enabled = 0;
-		ctrl_pdata->partial_update_fnc = NULL;
-	}
+	if (!cmd_cfg_cont_splash)
+		pinfo->cont_splash_enabled = false;
+	pr_info("%s: Continuous splash %s", __func__,
+		pinfo->cont_splash_enabled ? "enabled" : "disabled");
 
 	ctrl_pdata->on = mdss_dsi_panel_on;
 	ctrl_pdata->off = mdss_dsi_panel_off;
diff --git a/drivers/video/msm/mdss/mdss_dsi_status.c b/drivers/video/msm/mdss/mdss_dsi_status.c
index f0c4f4c..fd7f3fd 100644
--- a/drivers/video/msm/mdss/mdss_dsi_status.c
+++ b/drivers/video/msm/mdss/mdss_dsi_status.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -86,6 +86,15 @@
 		mutex_lock(ctl->shared_lock);
 	mutex_lock(&mdp5_data->ov_lock);
 
+	if (pdsi_status->mfd->shutdown_pending) {
+		mutex_unlock(&mdp5_data->ov_lock);
+		if (ctl->shared_lock)
+			mutex_unlock(ctl->shared_lock);
+		pr_err("%s: DSI turning off, avoiding BTA status check\n",
+							__func__);
+		return;
+	}
+
 	/*
 	 * For the command mode panels, we return pan display
 	 * IOCTL on vsync interrupt. So, after vsync interrupt comes
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index 3bace66..1aec58e 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -828,6 +828,8 @@
 		}
 		break;
 	}
+	/* Notify listeners */
+	sysfs_notify(&mfd->fbi->dev->kobj, NULL, "show_blank_event");
 
 	return ret;
 }
@@ -1214,9 +1216,11 @@
 	struct mdss_fb_proc_info *pinfo = NULL;
 	int result;
 	int pid = current->tgid;
+	struct task_struct *task = current->group_leader;
 
 	if (mfd->shutdown_pending) {
-		pr_err("Shutdown pending. Aborting operation\n");
+		pr_err("Shutdown pending. Aborting operation. Request from pid:%d name=%s\n",
+				pid, task->comm);
 		return -EPERM;
 	}
 
diff --git a/drivers/video/msm/mdss/mdss_hdmi_hdcp.c b/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
index e56e9fa..2240941 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
@@ -1050,7 +1050,10 @@
 	}
 
 	if (++hdcp_ctrl->auth_retries == AUTH_RETRIES_TIME) {
-		hdmi_hdcp_off(hdcp_ctrl);
+		mutex_lock(hdcp_ctrl->init_data.mutex);
+		hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+		mutex_unlock(hdcp_ctrl->init_data.mutex);
+
 		hdcp_ctrl->auth_retries = 0;
 		ret = -ERANGE;
 	}
@@ -1077,13 +1080,6 @@
 		return 0;
 	}
 
-	ret = hdmi_msm_if_abort_reauth(hdcp_ctrl);
-
-	if (ret) {
-		DEV_ERR("%s: abort reauthentication!\n", __func__);
-		return ret;
-	}
-
 	/*
 	 * Disable HPD circuitry.
 	 * This is needed to reset the HDCP cipher engine so that when we
@@ -1109,6 +1105,13 @@
 		DSS_REG_R(hdcp_ctrl->init_data.core_io,
 		HDMI_HPD_CTRL) | BIT(28));
 
+	ret = hdmi_msm_if_abort_reauth(hdcp_ctrl);
+
+	if (ret) {
+		DEV_ERR("%s: abort reauthentication!\n", __func__);
+		return ret;
+	}
+
 	/* Restart authentication attempt */
 	DEV_DBG("%s: %s: Scheduling work to start HDCP authentication",
 		__func__, HDCP_STATE_NAME);
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.c b/drivers/video/msm/mdss/mdss_hdmi_tx.c
index cd2f8e4..2b409f5 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1127,6 +1127,17 @@
 	DEV_DBG("%s: Got HPD interrupt\n", __func__);
 
 	if (hdmi_ctrl->hpd_state) {
+		/*
+		 * If a down stream device or bridge chip is attached to hdmi
+		 * Tx core output, it is likely that it might be powering the
+		 * hpd module ON/OFF on cable connect/disconnect as it would
+		 * have its own mechanism of detecting cable. Flush power off
+		 * work is needed in case there is any race condidtion between
+		 * power off and on during fast cable plug in/out.
+		 */
+		if (hdmi_ctrl->ds_registered)
+			flush_work(&hdmi_ctrl->power_off_work);
+
 		if (hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_DDC_PM, true)) {
 			DEV_ERR("%s: Failed to enable ddc power\n", __func__);
 			return;
@@ -1143,6 +1154,13 @@
 		hdmi_tx_set_audio_switch_node(hdmi_ctrl, 0, false);
 		hdmi_tx_wait_for_audio_engine(hdmi_ctrl);
 
+		if (!hdmi_ctrl->panel_power_on) {
+			if (hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_DDC_PM,
+				false))
+				DEV_WARN("%s: Failed to disable ddc power\n",
+					__func__);
+		}
+
 		hdmi_tx_send_cable_notification(hdmi_ctrl, 0);
 		DEV_INFO("%s: sense cable DISCONNECTED: state switch to %d\n",
 			__func__, hdmi_ctrl->sdev.state);
@@ -2344,6 +2362,8 @@
 	ops->set_mhl_max_pclk = hdmi_tx_set_mhl_max_pclk;
 	ops->set_upstream_hpd = hdmi_tx_set_mhl_hpd;
 
+	hdmi_ctrl->ds_registered = true;
+
 	return 0;
 }
 
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.h b/drivers/video/msm/mdss/mdss_hdmi_tx.h
index 8233ba8..54d80dc 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.h
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.h
@@ -83,6 +83,7 @@
 	struct work_struct cable_notify_work;
 
 	bool hdcp_feature_on;
+	bool ds_registered;
 	u32 present_hdcp;
 
 	u8 spd_vendor_name[9];
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index e1786a6..8a215bc 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -296,6 +296,7 @@
 	pr_debug("Disable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
 			mdss_res->irq_ena, mdss_res->irq_mask);
 
+	spin_lock(&mdss_lock);
 	if (!(mdss_res->irq_mask & ndx_bit)) {
 		pr_warn("MDSS HW ndx=%d is NOT set, mask=%x, hist mask=%x\n",
 			hw->hw_ndx, mdss_res->mdp_irq_mask,
@@ -307,6 +308,7 @@
 			disable_irq_nosync(mdss_res->irq);
 		}
 	}
+	spin_unlock(&mdss_lock);
 }
 EXPORT_SYMBOL(mdss_disable_irq_nosync);
 
@@ -505,7 +507,16 @@
 	spin_unlock_irqrestore(&mdp_lock, irq_flags);
 }
 
-/* called from interrupt context */
+/**
+ * mdss_mdp_irq_disable_nosync() - disable mdp irq
+ * @intr_type:	mdp interface type
+ * @intf_num:	mdp interface num
+ *
+ * This fucntion is called from interrupt context
+ * mdp_lock is already held at up stream (mdss_irq_handler)
+ * therefore spin_lock(&mdp_lock) is not allowed here
+ *
+*/
 void mdss_mdp_irq_disable_nosync(u32 intr_type, u32 intf_num)
 {
 	u32 irq;
@@ -1001,8 +1012,9 @@
 		writel_relaxed(1, offset + 16);
 	}
 
-	mdata->nmax_concurrent_ad_hw = (mdata->mdp_rev <= MDSS_MDP_HW_REV_102) ?
-									1 : 2;
+	mdata->nmax_concurrent_ad_hw =
+		(mdata->mdp_rev < MDSS_MDP_HW_REV_103) ? 1 : 2;
+
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 	pr_debug("MDP hw init done\n");
 
@@ -1578,6 +1590,61 @@
 	return 0;
 }
 
+static int  mdss_mdp_parse_dt_pipe_clk_ctrl(struct platform_device *pdev,
+	char *prop_name, struct mdss_mdp_pipe *pipe_list, u32 npipes)
+{
+	int rc = 0;
+	size_t len;
+	const u32 *arr;
+
+	arr = of_get_property(pdev->dev.of_node, prop_name, &len);
+	if (arr) {
+		int i, j;
+
+		len /= sizeof(u32);
+		for (i = 0, j = 0; i < len; j++) {
+			struct mdss_mdp_pipe *pipe = NULL;
+
+			if (j >= npipes) {
+				pr_err("invalid clk ctrl enries for prop: %s\n",
+					prop_name);
+				return -EINVAL;
+			}
+
+			pipe = &pipe_list[j];
+
+			pipe->clk_ctrl.reg_off = be32_to_cpu(arr[i++]);
+			pipe->clk_ctrl.bit_off = be32_to_cpu(arr[i++]);
+
+			/* status register is next in line to ctrl register */
+			pipe->clk_status.reg_off = pipe->clk_ctrl.reg_off + 4;
+			pipe->clk_status.bit_off = be32_to_cpu(arr[i++]);
+
+			pr_debug("%s[%d]: ctrl: reg_off: 0x%x bit_off: %d\n",
+				prop_name, j, pipe->clk_ctrl.reg_off,
+				pipe->clk_ctrl.bit_off);
+			pr_debug("%s[%d]: status: reg_off: 0x%x bit_off: %d\n",
+				prop_name, j, pipe->clk_status.reg_off,
+				pipe->clk_status.bit_off);
+		}
+		if (j != npipes) {
+			pr_err("%s: %d entries found. required %d\n",
+				prop_name, j, npipes);
+			for (i = 0; i < npipes; i++) {
+				memset(&pipe_list[i].clk_ctrl, 0,
+					sizeof(pipe_list[i].clk_ctrl));
+				memset(&pipe_list[i].clk_status, 0,
+					sizeof(pipe_list[i].clk_status));
+			}
+			rc = -EINVAL;
+		}
+	} else {
+		pr_err("error mandatory property '%s' not found\n", prop_name);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
 
 static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev)
 {
@@ -1761,6 +1828,25 @@
 		setup_cnt += mdata->nrgb_pipes - DEFAULT_TOTAL_RGB_PIPES;
 	}
 
+	rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev,
+		"qcom,mdss-pipe-vig-clk-ctrl-offsets", mdata->vig_pipes,
+		mdata->nvig_pipes);
+	if (rc)
+		goto parse_fail;
+
+	rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev,
+		"qcom,mdss-pipe-rgb-clk-ctrl-offsets", mdata->rgb_pipes,
+		mdata->nrgb_pipes);
+	if (rc)
+		goto parse_fail;
+
+	rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev,
+		"qcom,mdss-pipe-dma-clk-ctrl-offsets", mdata->dma_pipes,
+		mdata->ndma_pipes);
+	if (rc)
+		goto parse_fail;
+
+
 	goto parse_done;
 
 parse_fail:
@@ -2405,8 +2491,10 @@
 		pr_debug("Enable MDP FS\n");
 		if (!mdata->fs_ena) {
 			regulator_enable(mdata->fs);
-			mdss_mdp_cx_ctrl(mdata, true);
-			mdss_mdp_batfet_ctrl(mdata, true);
+			if (!mdata->ulps) {
+				mdss_mdp_cx_ctrl(mdata, true);
+				mdss_mdp_batfet_ctrl(mdata, true);
+			}
 		}
 		mdata->fs_ena = true;
 	} else {
@@ -2414,13 +2502,41 @@
 		mdss_iommu_dettach(mdata);
 		if (mdata->fs_ena) {
 			regulator_disable(mdata->fs);
-			mdss_mdp_cx_ctrl(mdata, false);
-			mdss_mdp_batfet_ctrl(mdata, false);
+			if (!mdata->ulps) {
+				mdss_mdp_cx_ctrl(mdata, false);
+				mdss_mdp_batfet_ctrl(mdata, false);
+			}
 		}
 		mdata->fs_ena = false;
 	}
 }
 
+/**
+ * mdss_mdp_footswitch_ctrl_ulps() - MDSS GDSC control with ULPS feature
+ * @on: 1 to turn on footswitch, 0 to turn off footswitch
+ * @dev: framebuffer device node
+ *
+ * MDSS GDSC can be voted off during idle-screen usecase for MIPI DSI command
+ * mode displays with Ultra-Low Power State (ULPS) feature enabled. Upon
+ * subsequent frame update, MDSS GDSC needs to turned back on and hw state
+ * needs to be restored.
+ */
+void mdss_mdp_footswitch_ctrl_ulps(int on, struct device *dev)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	pr_debug("called on=%d\n", on);
+	if (on) {
+		pm_runtime_get_sync(dev);
+		mdss_iommu_attach(mdata);
+		mdss_hw_init(mdata);
+		mdata->ulps = false;
+	} else {
+		mdata->ulps = true;
+		pm_runtime_put_sync(dev);
+	}
+}
+
 static inline int mdss_mdp_suspend_sub(struct mdss_data_type *mdata)
 {
 	mdata->suspend_fs_ena = mdata->fs_ena;
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 3afb27e..adee7b50 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -349,6 +349,11 @@
 	DECLARE_BITMAP(fixed, MAX_DRV_SUP_MMB_BLKS);
 };
 
+struct mdss_mdp_shared_reg_ctrl {
+	u32 reg_off;
+	u32 bit_off;
+};
+
 struct mdss_mdp_pipe {
 	u32 num;
 	u32 type;
@@ -356,6 +361,9 @@
 	char __iomem *base;
 	u32 ftch_id;
 	u32 xin_id;
+	struct mdss_mdp_shared_reg_ctrl clk_ctrl;
+	struct mdss_mdp_shared_reg_ctrl clk_status;
+
 	atomic_t ref_cnt;
 	u32 play_cnt;
 	int pid;
@@ -452,6 +460,16 @@
 };
 
 #define is_vig_pipe(_pipe_id_) ((_pipe_id_) <= MDSS_MDP_SSPP_VIG2)
+
+static inline struct mdss_mdp_ctl *mdss_mdp_get_split_ctl(
+	struct mdss_mdp_ctl *ctl)
+{
+	if (ctl && ctl->mixer_right && (ctl->mixer_right->ctl != ctl))
+		return ctl->mixer_right->ctl;
+
+	return NULL;
+}
+
 static inline void mdss_mdp_ctl_write(struct mdss_mdp_ctl *ctl,
 				      u32 reg, u32 val)
 {
@@ -533,7 +551,8 @@
 		struct mdss_mdp_pipe **left_plist, int left_cnt,
 		struct mdss_mdp_pipe **right_plist, int right_cnt);
 int mdss_mdp_perf_calc_pipe(struct mdss_mdp_pipe *pipe,
-	struct mdss_mdp_perf_params *perf, struct mdss_mdp_img_rect *roi);
+	struct mdss_mdp_perf_params *perf, struct mdss_mdp_img_rect *roi,
+	bool apply_fudge);
 int mdss_mdp_ctl_notify(struct mdss_mdp_ctl *ctl, int event);
 void mdss_mdp_ctl_notifier_register(struct mdss_mdp_ctl *ctl,
 	struct notifier_block *notifier);
@@ -629,10 +648,10 @@
 int mdss_mdp_ctl_addr_setup(struct mdss_data_type *mdata, u32 *ctl_offsets,
 		u32 *wb_offsets, u32 len);
 
+int mdss_mdp_pipe_fetch_halt(struct mdss_mdp_pipe *pipe);
 int mdss_mdp_pipe_destroy(struct mdss_mdp_pipe *pipe);
 int mdss_mdp_pipe_queue_data(struct mdss_mdp_pipe *pipe,
 			     struct mdss_mdp_data *src_data);
-int mdss_mdp_pipe_fetch_halt(struct mdss_mdp_pipe *pipe);
 
 int mdss_mdp_data_check(struct mdss_mdp_data *data,
 			struct mdss_mdp_plane_sizes *ps);
@@ -681,6 +700,8 @@
 
 int mdss_mdp_wb_set_secure(struct msm_fb_data_type *mfd, int enable);
 int mdss_mdp_wb_get_secure(struct msm_fb_data_type *mfd, uint8_t *enable);
+void mdss_mdp_ctl_restore(struct mdss_mdp_ctl *ctl);
+void mdss_mdp_footswitch_ctrl_ulps(int on, struct device *dev);
 
 int mdss_mdp_pipe_program_pixel_extn(struct mdss_mdp_pipe *pipe);
 #define mfd_to_mdp5_data(mfd) (mfd->mdp.private1)
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index b445afa..2c1beab 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -303,6 +303,7 @@
  * @pipe:	Source pipe struct containing updated pipe params
  * @perf:	Structure containing values that should be updated for
  *		performance tuning
+ * @apply_fudge:	Boolean to determine if mdp clock fudge is applicable
  *
  * Function calculates the minimum required performance calculations in order
  * to avoid MDP underflow. The calculations are based on the way MDP
@@ -310,7 +311,8 @@
  * (MDP clock requirement) based on frame size and scaling requirements.
  */
 int mdss_mdp_perf_calc_pipe(struct mdss_mdp_pipe *pipe,
-	struct mdss_mdp_perf_params *perf, struct mdss_mdp_img_rect *roi)
+	struct mdss_mdp_perf_params *perf, struct mdss_mdp_img_rect *roi,
+	bool apply_fudge)
 {
 	struct mdss_mdp_mixer *mixer;
 	int fps = DEFAULT_FRAME_RATE;
@@ -384,7 +386,10 @@
 		perf->bw_overlap = (quota / dst.h) * v_total;
 	}
 
-	perf->mdp_clk_rate = mdss_mdp_clk_fudge_factor(mixer, rate);
+	if (apply_fudge)
+		perf->mdp_clk_rate = mdss_mdp_clk_fudge_factor(mixer, rate);
+	else
+		perf->mdp_clk_rate = rate;
 
 	prefill_params.smp_bytes = mdss_mdp_smp_get_size(pipe);
 	prefill_params.xres = xres;
@@ -440,6 +445,8 @@
 	u64 bw_overlap[MDSS_MDP_MAX_STAGE] = { 0 };
 	u32 v_region[MDSS_MDP_MAX_STAGE * 2] = { 0 };
 	u32 prefill_bytes = 0;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	bool apply_fudge = true;
 
 	BUG_ON(num_pipes > MDSS_MDP_MAX_STAGE);
 
@@ -469,13 +476,36 @@
 	memset(bw_overlap, 0, sizeof(u64) * MDSS_MDP_MAX_STAGE);
 	memset(v_region, 0, sizeof(u32) * MDSS_MDP_MAX_STAGE * 2);
 
+	/*
+	* Apply this logic only for 8x26 to reduce clock rate
+	* for single video playback use case
+	*/
+	if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_101)
+		 && mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
+		u32 npipes = 0;
+		for (i = 0; i < MDSS_MDP_MAX_STAGE; i++) {
+			pipe = mixer->stage_pipe[i];
+			if (pipe) {
+				if (npipes) {
+					apply_fudge = true;
+					break;
+				}
+				npipes++;
+				apply_fudge = !(pipe->src_fmt->is_yuv)
+					|| !(pipe->flags
+					& MDP_SOURCE_ROTATED_90);
+			}
+		}
+	}
+
 	for (i = 0; i < num_pipes; i++) {
 		struct mdss_mdp_perf_params tmp;
 		pipe = pipe_list[i];
 		if (pipe == NULL)
 			continue;
 
-		if (mdss_mdp_perf_calc_pipe(pipe, &tmp, &mixer->roi))
+		if (mdss_mdp_perf_calc_pipe(pipe, &tmp, &mixer->roi,
+			apply_fudge))
 			continue;
 		prefill_bytes += tmp.prefill_bytes;
 		bw_overlap[i] = tmp.bw_overlap;
@@ -622,9 +652,6 @@
 	}
 
 	perf->bw_ctl = max(perf->bw_prefill, perf->bw_overlap);
-
-	if (ctl->is_video_mode)
-		perf->bw_ctl = IB_FUDGE_FACTOR(perf->bw_ctl);
 }
 
 int mdss_mdp_perf_bw_check(struct mdss_mdp_ctl *ctl,
@@ -667,6 +694,9 @@
 			left_plist, (left_plist ? MDSS_MDP_MAX_STAGE : 0),
 			right_plist, (right_plist ? MDSS_MDP_MAX_STAGE : 0));
 
+	if (ctl->is_video_mode)
+		perf->bw_ctl = IB_FUDGE_FACTOR(perf->bw_ctl);
+
 	pr_debug("ctl=%d clk_rate=%u\n", ctl->num, perf->mdp_clk_rate);
 	pr_debug("bw_overlap=%llu bw_prefill=%llu prefill_byptes=%d\n",
 		 perf->bw_overlap, perf->bw_prefill, perf->prefill_bytes);
@@ -1133,15 +1163,6 @@
 	return 0;
 }
 
-static inline struct mdss_mdp_ctl *mdss_mdp_get_split_ctl(
-		struct mdss_mdp_ctl *ctl)
-{
-	if (ctl && ctl->mixer_right && (ctl->mixer_right->ctl != ctl))
-		return ctl->mixer_right->ctl;
-
-	return NULL;
-}
-
 int mdss_mdp_ctl_splash_finish(struct mdss_mdp_ctl *ctl, bool handoff)
 {
 	struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
@@ -1588,6 +1609,25 @@
 	return rc;
 }
 
+/*
+ * mdss_mdp_ctl_restore() - restore mdp ctl path
+ * @ctl: mdp controller.
+ *
+ * This function is called whenever MDP comes out of a power collapse as
+ * a result of a screen update when DSI ULPS mode is enabled. It restores
+ * the MDP controller's software state to the hardware registers.
+ */
+void mdss_mdp_ctl_restore(struct mdss_mdp_ctl *ctl)
+{
+	u32 temp;
+
+	temp = readl_relaxed(ctl->mdata->mdp_base +
+		MDSS_MDP_REG_DISP_INTF_SEL);
+	temp |= (ctl->intf_type << ((ctl->intf_num - MDSS_MDP_INTF0) * 8));
+	writel_relaxed(temp, ctl->mdata->mdp_base +
+		MDSS_MDP_REG_DISP_INTF_SEL);
+}
+
 static int mdss_mdp_ctl_start_sub(struct mdss_mdp_ctl *ctl, bool handoff)
 {
 	struct mdss_mdp_mixer *mixer;
diff --git a/drivers/video/msm/mdss/mdss_mdp_hwio.h b/drivers/video/msm/mdss/mdss_mdp_hwio.h
index bff56d2..4b9ea20 100644
--- a/drivers/video/msm/mdss/mdss_mdp_hwio.h
+++ b/drivers/video/msm/mdss/mdss_mdp_hwio.h
@@ -21,10 +21,6 @@
 #define ENHIST_LUT_ENTRIES 256
 #define HIST_V_SIZE	256
 
-#define MDSS_MDP_HW_REV_100		0x10000000
-#define MDSS_MDP_HW_REV_102		0x10020000
-#define MDSS_MDP_HW_REV_103		0x10030000
-
 #define MDSS_MDP_FETCH_CONFIG_RESET_VALUE	0x00000087
 
 #define MDSS_REG_HW_VERSION				0x0
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
index 79bdee2..96da27e 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
@@ -27,6 +27,7 @@
 #define KOFF_TIMEOUT msecs_to_jiffies(84)
 
 #define STOP_TIMEOUT msecs_to_jiffies(16 * (VSYNC_EXPIRE_TICK + 2))
+#define ULPS_ENTER_TIME msecs_to_jiffies(100)
 
 struct mdss_mdp_cmd_ctx {
 	struct mdss_mdp_ctl *ctl;
@@ -43,6 +44,7 @@
 	struct mutex clk_mtx;
 	spinlock_t clk_lock;
 	struct work_struct clk_work;
+	struct delayed_work ulps_work;
 	struct work_struct pp_done_work;
 	atomic_t pp_done_cnt;
 
@@ -53,6 +55,7 @@
 	u16 start_threshold;
 	u32 vclk_line;	/* vsync clock per line */
 	struct mdss_panel_recovery recovery;
+	bool ulps;
 };
 
 struct mdss_mdp_cmd_ctx mdss_mdp_cmd_ctx_list[MAX_SESSIONS];
@@ -197,12 +200,29 @@
 {
 	unsigned long flags;
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!ctx->panel_on)
+		return;
+
 	mutex_lock(&ctx->clk_mtx);
 	if (!ctx->clk_enabled) {
 		ctx->clk_enabled = 1;
+		if (cancel_delayed_work_sync(&ctx->ulps_work))
+			pr_debug("deleted pending ulps work\n");
+
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+
+		if (ctx->ulps) {
+			if (mdss_mdp_cmd_tearcheck_setup(ctx->ctl, 1))
+				pr_warn("tearcheck setup failed\n");
+			mdss_mdp_ctl_intf_event(ctx->ctl,
+				MDSS_EVENT_DSI_ULPS_CTRL, (void *)0);
+			ctx->ulps = false;
+		}
+
 		mdss_mdp_ctl_intf_event
 			(ctx->ctl, MDSS_EVENT_PANEL_CLK_CTRL, (void *)1);
-		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+
 		mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_RESUME);
 	}
 	spin_lock_irqsave(&ctx->clk_lock, flags);
@@ -231,6 +251,8 @@
 		mdss_mdp_ctl_intf_event
 			(ctx->ctl, MDSS_EVENT_PANEL_CLK_CTRL, (void *)0);
 		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+		if (ctx->panel_on)
+			schedule_delayed_work(&ctx->ulps_work, ULPS_ENTER_TIME);
 	}
 	mutex_unlock(&ctx->clk_mtx);
 }
@@ -261,6 +283,10 @@
 	if (!ctx->vsync_enabled) {
 		if (ctx->rdptr_enabled)
 			ctx->rdptr_enabled--;
+
+		/* keep clk on during kickoff */
+		if (ctx->rdptr_enabled == 0 && ctx->koff_cnt)
+			ctx->rdptr_enabled++;
 	}
 
 	if (ctx->rdptr_enabled == 0) {
@@ -365,11 +391,44 @@
 	mdss_mdp_cmd_clk_off(ctx);
 }
 
+static void __mdss_mdp_cmd_ulps_work(struct work_struct *work)
+{
+	struct delayed_work *dw = to_delayed_work(work);
+	struct mdss_mdp_cmd_ctx *ctx =
+		container_of(dw, struct mdss_mdp_cmd_ctx, ulps_work);
+
+	if (!ctx) {
+		pr_err("%s: invalid ctx\n", __func__);
+		return;
+	}
+
+	mutex_lock(&ctx->clk_mtx);
+	if (ctx->clk_enabled) {
+		mutex_unlock(&ctx->clk_mtx);
+		pr_warn("Cannot enter ulps mode if DSI clocks are on\n");
+		return;
+	}
+	mutex_unlock(&ctx->clk_mtx);
+
+	if (!ctx->panel_on) {
+		pr_err("Panel is off. skipping ULPS configuration\n");
+		return;
+	}
+
+	if (!mdss_mdp_ctl_intf_event(ctx->ctl, MDSS_EVENT_DSI_ULPS_CTRL,
+		(void *)1)) {
+		ctx->ulps = true;
+		ctx->ctl->play_cnt = 0;
+		mdss_mdp_footswitch_ctrl_ulps(0, &ctx->ctl->mfd->pdev->dev);
+	}
+}
+
 static int mdss_mdp_cmd_add_vsync_handler(struct mdss_mdp_ctl *ctl,
 		struct mdss_mdp_vsync_handler *handle)
 {
 	struct mdss_mdp_cmd_ctx *ctx;
 	unsigned long flags;
+	bool enable_rdptr = false;
 
 	ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
 	if (!ctx) {
@@ -381,12 +440,14 @@
 	if (!handle->enabled) {
 		handle->enabled = true;
 		list_add(&handle->list, &ctx->vsync_handlers);
-		if (!handle->cmd_post_flush)
-			ctx->vsync_enabled = 1;
+
+		enable_rdptr = !handle->cmd_post_flush;
+		if (enable_rdptr)
+			ctx->vsync_enabled++;
 	}
 	spin_unlock_irqrestore(&ctx->clk_lock, flags);
 
-	if (!handle->cmd_post_flush)
+	if (enable_rdptr)
 		mdss_mdp_cmd_clk_on(ctx);
 
 	return 0;
@@ -395,11 +456,8 @@
 static int mdss_mdp_cmd_remove_vsync_handler(struct mdss_mdp_ctl *ctl,
 		struct mdss_mdp_vsync_handler *handle)
 {
-
 	struct mdss_mdp_cmd_ctx *ctx;
 	unsigned long flags;
-	struct mdss_mdp_vsync_handler *tmp;
-	int num_rdptr_vsync = 0;
 
 	ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
 	if (!ctx) {
@@ -407,19 +465,17 @@
 		return -ENODEV;
 	}
 
-
 	spin_lock_irqsave(&ctx->clk_lock, flags);
 	if (handle->enabled) {
 		handle->enabled = false;
 		list_del_init(&handle->list);
-	}
-	list_for_each_entry(tmp, &ctx->vsync_handlers, list) {
-		if (!tmp->cmd_post_flush)
-			num_rdptr_vsync++;
-	}
-	if (!num_rdptr_vsync) {
-		ctx->vsync_enabled = 0;
-		ctx->rdptr_enabled = VSYNC_EXPIRE_TICK;
+
+		if (!handle->cmd_post_flush) {
+			if (ctx->vsync_enabled)
+				ctx->vsync_enabled--;
+			else
+				WARN(1, "unbalanced vsync disable");
+		}
 	}
 	spin_unlock_irqrestore(&ctx->clk_lock, flags);
 	return 0;
@@ -515,22 +571,22 @@
 		WARN(rc, "intf %d panel on error (%d)\n", ctl->intf_num, rc);
 	}
 
-	mdss_mdp_cmd_set_partial_roi(ctl);
+	spin_lock_irqsave(&ctx->clk_lock, flags);
+	ctx->koff_cnt++;
+	spin_unlock_irqrestore(&ctx->clk_lock, flags);
 
 	mdss_mdp_cmd_clk_on(ctx);
 
+	mdss_mdp_cmd_set_partial_roi(ctl);
+
 	/*
 	 * tx dcs command if had any
 	 */
 	mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DSI_CMDLIST_KOFF,
 						(void *)&ctx->recovery);
-
 	INIT_COMPLETION(ctx->pp_comp);
 	mdss_mdp_irq_enable(MDSS_MDP_IRQ_PING_PONG_COMP, ctx->pp_num);
 	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
-	spin_lock_irqsave(&ctx->clk_lock, flags);
-	ctx->koff_cnt++;
-	spin_unlock_irqrestore(&ctx->clk_lock, flags);
 	mb();
 
 	return 0;
@@ -583,11 +639,14 @@
 	if (cancel_work_sync(&ctx->clk_work))
 		pr_debug("no pending clk work\n");
 
+	if (cancel_delayed_work_sync(&ctx->ulps_work))
+		pr_debug("deleted pending ulps work\n");
+
+	ctx->panel_on = 0;
 	mdss_mdp_cmd_clk_off(ctx);
 
 	flush_work(&ctx->pp_done_work);
 
-	ctx->panel_on = 0;
 
 	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_RD_PTR, ctx->pp_num,
 				   NULL, NULL);
@@ -653,6 +712,7 @@
 	spin_lock_init(&ctx->clk_lock);
 	mutex_init(&ctx->clk_mtx);
 	INIT_WORK(&ctx->clk_work, clk_ctrl_work);
+	INIT_DELAYED_WORK(&ctx->ulps_work, __mdss_mdp_cmd_ulps_work);
 	INIT_WORK(&ctx->pp_done_work, pingpong_done_work);
 	atomic_set(&ctx->pp_done_cnt, 0);
 	INIT_LIST_HEAD(&ctx->vsync_handlers);
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
index 55a4a4d..f8bdc04 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_video.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -298,6 +298,7 @@
 {
 	struct mdss_mdp_video_ctx *ctx;
 	struct mdss_mdp_vsync_handler *tmp, *handle;
+	struct mdss_mdp_ctl *sctl;
 	int rc;
 	u32 frame_rate = 0;
 
@@ -335,6 +336,10 @@
 
 		mdss_mdp_irq_disable(MDSS_MDP_IRQ_INTF_UNDER_RUN,
 			ctl->intf_num);
+		sctl = mdss_mdp_get_split_ctl(ctl);
+		if (sctl)
+			mdss_mdp_irq_disable(MDSS_MDP_IRQ_INTF_UNDER_RUN,
+				sctl->intf_num);
 	}
 
 	list_for_each_entry_safe(handle, tmp, &ctx->vsync_handlers, list)
@@ -641,6 +646,7 @@
 static int mdss_mdp_video_display(struct mdss_mdp_ctl *ctl, void *arg)
 {
 	struct mdss_mdp_video_ctx *ctx;
+	struct mdss_mdp_ctl *sctl;
 	int rc;
 
 	pr_debug("kickoff ctl=%d\n", ctl->num);
@@ -674,6 +680,11 @@
 		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 
 		mdss_mdp_irq_enable(MDSS_MDP_IRQ_INTF_UNDER_RUN, ctl->intf_num);
+		sctl = mdss_mdp_get_split_ctl(ctl);
+		if (sctl)
+			mdss_mdp_irq_enable(MDSS_MDP_IRQ_INTF_UNDER_RUN,
+				sctl->intf_num);
+
 		mdp_video_write(ctx, MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 1);
 		wmb();
 
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 4a71a84..7628380 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -268,7 +268,7 @@
 	int rc;
 
 	for (;;) {
-		rc = mdss_mdp_perf_calc_pipe(pipe, &perf, NULL);
+		rc = mdss_mdp_perf_calc_pipe(pipe, &perf, NULL, true);
 
 		if (!rc && (perf.mdp_clk_rate <= mdata->max_mdp_clk_rate))
 			break;
@@ -321,10 +321,6 @@
 				rc, src, pipe->dst.h);
 		return rc;
 	}
-	pipe->scale.init_phase_x[0] = (pipe->scale.phase_step_x[0] -
-					(1 << PHASE_STEP_SHIFT)) / 2;
-	pipe->scale.init_phase_y[0] = (pipe->scale.phase_step_y[0] -
-					(1 << PHASE_STEP_SHIFT)) / 2;
 	return rc;
 }
 
@@ -770,17 +766,16 @@
 	LIST_HEAD(destroy_pipes);
 
 	mutex_lock(&mfd->lock);
-	__mdss_mdp_overlay_free_list_purge(mfd);
-
 	list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_cleanup,
 				cleanup_list) {
 		list_move(&pipe->cleanup_list, &destroy_pipes);
+
+		/* make sure pipe fetch has been halted before freeing buffer */
 		mdss_mdp_pipe_fetch_halt(pipe);
-		mdss_mdp_overlay_free_buf(&pipe->back_buf);
-		__mdss_mdp_overlay_free_list_add(mfd, &pipe->front_buf);
-		pipe->mfd = NULL;
 	}
 
+	__mdss_mdp_overlay_free_list_purge(mfd);
+
 	list_for_each_entry(pipe, &mdp5_data->pipes_used, used_list) {
 		if (pipe->back_buf.num_planes) {
 			/* make back buffer active */
@@ -788,9 +783,20 @@
 			swap(pipe->back_buf, pipe->front_buf);
 		}
 	}
-	mutex_unlock(&mfd->lock);
-	list_for_each_entry_safe(pipe, tmp, &destroy_pipes, cleanup_list)
+
+	list_for_each_entry_safe(pipe, tmp, &destroy_pipes, cleanup_list) {
+		/*
+		 * in case of secure UI, the buffer needs to be released as
+		 * soon as session is closed.
+		 */
+		if (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)
+			mdss_mdp_overlay_free_buf(&pipe->front_buf);
+		else
+			__mdss_mdp_overlay_free_list_add(mfd, &pipe->front_buf);
+		mdss_mdp_overlay_free_buf(&pipe->back_buf);
 		mdss_mdp_pipe_destroy(pipe);
+	}
+	mutex_unlock(&mfd->lock);
 }
 
 static void __mdss_mdp_handoff_cleanup_pipes(struct msm_fb_data_type *mfd,
@@ -847,6 +853,11 @@
 	struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
 
 	if (ctl->power_on) {
+		if (mdp5_data->mdata->ulps) {
+			mdss_mdp_footswitch_ctrl_ulps(1, &mfd->pdev->dev);
+			mdss_mdp_ctl_restore(ctl);
+		}
+
 		if (!mdp5_data->mdata->batfet)
 			mdss_mdp_batfet_ctrl(mdp5_data->mdata, true);
 		if (!mfd->panel_info->cont_splash_enabled)
@@ -969,6 +980,11 @@
 	int ret = 0;
 	int sd_in_pipe = 0;
 
+	if (!ctl) {
+		pr_warn("kickoff on fb=%d without a ctl attched\n", mfd->index);
+		return ret;
+	}
+
 	if (ctl->shared_lock)
 		mutex_lock(ctl->shared_lock);
 
@@ -1042,11 +1058,13 @@
 			pipe->mixer = mdss_mdp_mixer_get(tmp,
 					MDSS_MDP_MIXER_MUX_DEFAULT);
 		}
+
+		/* ensure pipes are always reconfigured after power off/on */
+		if (ctl->play_cnt == 0)
+			pipe->params_changed++;
+
 		if (pipe->back_buf.num_planes) {
 			buf = &pipe->back_buf;
-		} else if (ctl->play_cnt == 0 && pipe->front_buf.num_planes) {
-			pipe->params_changed++;
-			buf = &pipe->front_buf;
 		} else if (!pipe->params_changed) {
 			continue;
 		} else if (pipe->front_buf.num_planes) {
@@ -1767,7 +1785,7 @@
 	vsync_ticks = ktime_to_ns(mdp5_data->vsync_time);
 
 	pr_debug("fb%d vsync=%llu", mfd->index, vsync_ticks);
-	ret = scnprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_ticks);
+	ret = scnprintf(buf, PAGE_SIZE, "VSYNC=%llu\n", vsync_ticks);
 
 	return ret;
 }
@@ -2270,10 +2288,12 @@
 		struct mdp_overlay_list *ovlist,
 		struct mdp_overlay *overlays)
 {
+	struct mdss_mdp_pipe *right_plist[MDSS_MDP_MAX_STAGE] = { 0 };
+	struct mdss_mdp_pipe *left_plist[MDSS_MDP_MAX_STAGE] = { 0 };
 	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 	struct mdss_mdp_pipe *pipe;
 	struct mdp_overlay *req;
-	int ret = 0;
+	int ret = 0, left_cnt = 0, right_cnt = 0;
 	int i;
 	u32 new_reqs = 0;
 
@@ -2302,8 +2322,29 @@
 		/* keep track of the new overlays to unset in case of errors */
 		if (pipe->play_cnt == 0)
 			new_reqs |= pipe->ndx;
+
+		if (pipe->flags & MDSS_MDP_RIGHT_MIXER) {
+			if (right_cnt >= MDSS_MDP_MAX_STAGE) {
+				pr_err("too many pipes on right mixer\n");
+				ret = -EINVAL;
+				goto validate_exit;
+			}
+			right_plist[right_cnt] = pipe;
+			right_cnt++;
+		} else {
+			if (left_cnt >= MDSS_MDP_MAX_STAGE) {
+				pr_err("too many pipes on left mixer\n");
+				ret = -EINVAL;
+				goto validate_exit;
+			}
+			left_plist[left_cnt] = pipe;
+			left_cnt++;
+		}
 	}
 
+	ret = mdss_mdp_perf_bw_check(mdp5_data->ctl, left_plist, left_cnt,
+			right_plist, right_cnt);
+
 validate_exit:
 	if (IS_ERR_VALUE(ret))
 		mdss_mdp_overlay_release(mfd, new_reqs);
@@ -2663,6 +2704,56 @@
 	return 0;
 }
 
+static int __mdss_mdp_ctl_handoff(struct mdss_mdp_ctl *ctl,
+	struct mdss_data_type *mdata)
+{
+	int rc = 0;
+	int i, j;
+	u32 mixercfg;
+	struct mdss_mdp_pipe *pipe = NULL;
+
+	if (!ctl || !mdata)
+		return -EINVAL;
+
+	for (i = 0; i < mdata->nmixers_intf; i++) {
+		mixercfg = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_LAYER(i));
+		pr_debug("for lm%d mixercfg = 0x%09x\n", i, mixercfg);
+
+		j = MDSS_MDP_SSPP_VIG0;
+		for (; j < MDSS_MDP_MAX_SSPP && mixercfg; j++) {
+			u32 cfg = j * 3;
+			if ((j == MDSS_MDP_SSPP_VIG3) ||
+			    (j == MDSS_MDP_SSPP_RGB3)) {
+				/* Add 2 to account for Cursor & Border bits */
+				cfg += 2;
+			}
+			if (mixercfg & (0x7 << cfg)) {
+				pr_debug("Pipe %d staged\n", j);
+				pipe = mdss_mdp_pipe_search(mdata, BIT(j));
+				if (!pipe) {
+					pr_warn("Invalid pipe %d staged\n", j);
+					continue;
+				}
+
+				rc = mdss_mdp_pipe_handoff(pipe);
+				if (rc) {
+					pr_err("Failed to handoff pipe%d\n",
+						pipe->num);
+					goto exit;
+				}
+
+				rc = mdss_mdp_mixer_handoff(ctl, i, pipe);
+				if (rc) {
+					pr_err("failed to handoff mix%d\n", i);
+					goto exit;
+				}
+			}
+		}
+	}
+exit:
+	return rc;
+}
+
 /**
  * mdss_mdp_overlay_handoff() - Read MDP registers to handoff an active ctl path
  * @mfd: Msm frame buffer structure associated with the fb device.
@@ -2678,10 +2769,8 @@
 	int rc = 0;
 	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
 	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
-	int i, j;
-	u32 reg;
-	struct mdss_mdp_pipe *pipe = NULL;
 	struct mdss_mdp_ctl *ctl = NULL;
+	struct mdss_mdp_ctl *sctl = NULL;
 
 	if (!mdp5_data->ctl) {
 		ctl = __mdss_mdp_overlay_ctl_init(mfd);
@@ -2705,38 +2794,23 @@
 	ctl->clk_rate = mdss_mdp_get_clk_rate(MDSS_CLK_MDP_SRC);
 	pr_debug("Set the ctl clock rate to %d Hz\n", ctl->clk_rate);
 
-	for (i = 0; i < mdata->nmixers_intf; i++) {
-		reg = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_LAYER(i));
-		pr_debug("for lm%d reg = 0x%09x\n", i, reg);
-		for (j = MDSS_MDP_SSPP_VIG0; j < MDSS_MDP_MAX_SSPP; j++) {
-			u32 cfg = j * 3;
-			if ((j == MDSS_MDP_SSPP_VIG3) ||
-				(j == MDSS_MDP_SSPP_RGB3)) {
-				/* Add 2 to account for Cursor & Border bits */
-				cfg += 2;
-			}
-			if (reg & (0x7 << cfg)) {
-				pr_debug("Pipe %d staged\n", j);
-				pipe = mdss_mdp_pipe_search(mdata, BIT(j));
-				if (!pipe) {
-					pr_warn("Invalid pipe %d staged\n", j);
-					continue;
-				}
+	rc = __mdss_mdp_ctl_handoff(ctl, mdata);
+	if (rc) {
+		pr_err("primary ctl handoff failed. rc=%d\n", rc);
+		goto error;
+	}
 
-				rc = mdss_mdp_pipe_handoff(pipe);
-				if (rc) {
-					pr_err("Failed to handoff pipe num %d\n"
-						, pipe->num);
-					goto error;
-				}
-
-				rc = mdss_mdp_mixer_handoff(ctl, i, pipe);
-				if (rc) {
-					pr_err("failed to handoff mixer num %d\n"
-						, i);
-					goto error;
-				}
-			}
+	if (mfd->split_display) {
+		sctl = mdss_mdp_get_split_ctl(ctl);
+		if (!sctl) {
+			pr_err("cannot get secondary ctl. fail the handoff\n");
+			rc = -EPERM;
+			goto error;
+		}
+		rc = __mdss_mdp_ctl_handoff(sctl, mdata);
+		if (rc) {
+			pr_err("secondary ctl handoff failed. rc=%d\n", rc);
+			goto error;
 		}
 	}
 
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index b6f9b17..50bee17 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -28,6 +28,12 @@
 
 #define PIPE_HALT_TIMEOUT_US	0x4000
 
+/* following offsets are relative to ctrl register bit offset */
+#define CLK_FORCE_ON_OFFSET	0x0
+#define CLK_FORCE_OFF_OFFSET	0x1
+/* following offsets are relative to status register bit offset */
+#define CLK_STATUS_OFFSET	0x0
+
 static DEFINE_MUTEX(mdss_mdp_sspp_lock);
 static DEFINE_MUTEX(mdss_mdp_smp_lock);
 
@@ -210,7 +216,7 @@
 	struct mdss_mdp_plane_sizes ps;
 	int i;
 	int rc = 0, rot_mode = 0, wb_mixer = 0;
-	u32 nlines, format;
+	u32 nlines, format, seg_w;
 	u16 width;
 
 	width = pipe->src.w >> pipe->horz_deci;
@@ -222,19 +228,17 @@
 			return rc;
 		/*
 		 * Override fetch strides with SMP buffer size for both the
-		 * planes
+		 * planes. BWC line buffer needs to be divided into 16
+		 * segments and every segment is aligned to format
+		 * specific RAU size
 		 */
+		seg_w = DIV_ROUND_UP(pipe->src.w, 16);
 		if (pipe->src_fmt->fetch_planes == MDSS_MDP_PLANE_INTERLEAVED) {
-			/*
-			 * BWC line buffer needs to be divided into 16
-			 * segments and every segment is aligned to format
-			 * specific RAU size
-			 */
-			ps.ystride[0] = ALIGN(pipe->src.w / 16 , 32) * 16 *
-				ps.rau_h[0] * pipe->src_fmt->bpp;
+			ps.ystride[0] = ALIGN(seg_w, 32) * 16 * ps.rau_h[0] *
+					pipe->src_fmt->bpp;
 			ps.ystride[1] = 0;
 		} else {
-			u32 bwc_width = ALIGN(pipe->src.w / 16, 64) * 16;
+			u32 bwc_width = ALIGN(seg_w, 64) * 16;
 			ps.ystride[0] = bwc_width * ps.rau_h[0];
 			ps.ystride[1] = bwc_width * ps.rau_h[1];
 			/*
@@ -545,7 +549,7 @@
 	}
 
 	if (pipe && mdss_mdp_pipe_fetch_halt(pipe)) {
-		pr_err("%d failed because vbif client is in bad state\n",
+		pr_err("%d failed because pipe is in bad state\n",
 			pipe->num);
 		atomic_dec(&pipe->ref_cnt);
 		return NULL;
@@ -677,6 +681,7 @@
 
 	if (pipe->play_cnt) {
 		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+		mdss_mdp_pipe_fetch_halt(pipe);
 		mdss_mdp_pipe_sspp_term(pipe);
 		mdss_mdp_smp_free(pipe);
 		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
@@ -692,6 +697,53 @@
 	return 0;
 }
 
+static int mdss_mdp_is_pipe_idle(struct mdss_mdp_pipe *pipe,
+	bool ignore_force_on)
+{
+	u32 reg_val;
+	u32 vbif_idle_mask, forced_on_mask, clk_status_idle_mask;
+	bool is_idle = false, is_forced_on;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+
+	forced_on_mask = BIT(pipe->clk_ctrl.bit_off + CLK_FORCE_ON_OFFSET);
+	reg_val = readl_relaxed(mdata->mdp_base + pipe->clk_ctrl.reg_off);
+	is_forced_on = (reg_val & forced_on_mask) ? true : false;
+
+	pr_debug("pipe#:%d clk_ctrl: 0x%x forced_on_mask: 0x%x\n", pipe->num,
+		reg_val, forced_on_mask);
+	/* if forced on then no need to check status */
+	if (!is_forced_on) {
+		clk_status_idle_mask =
+			BIT(pipe->clk_status.bit_off + CLK_STATUS_OFFSET);
+		reg_val = readl_relaxed(mdata->mdp_base +
+			pipe->clk_status.reg_off);
+
+		if (reg_val & clk_status_idle_mask)
+			is_idle = false;
+
+		pr_debug("pipe#:%d clk_status:0x%x clk_status_idle_mask:0x%x\n",
+			pipe->num, reg_val, clk_status_idle_mask);
+	}
+
+	if (!ignore_force_on && (is_forced_on || !is_idle))
+		goto exit;
+
+	vbif_idle_mask = BIT(pipe->xin_id + 16);
+	reg_val = readl_relaxed(mdata->vbif_base + MMSS_VBIF_XIN_HALT_CTRL1);
+
+	if (reg_val & vbif_idle_mask)
+		is_idle = true;
+
+	pr_debug("pipe#:%d XIN_HALT_CTRL1: 0x%x\n", pipe->num, reg_val);
+
+exit:
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+
+	return is_idle;
+}
+
 /**
  * mdss_mdp_pipe_fetch_halt() - Halt VBIF client corresponding to specified pipe
  * @pipe: pointer to the pipe data structure which needs to be halted.
@@ -711,18 +763,15 @@
 	u32 reg_val, idle_mask, status;
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
 
-	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
-
-	idle_mask = BIT(pipe->xin_id + 16);
-	reg_val = readl_relaxed(mdata->vbif_base + MMSS_VBIF_XIN_HALT_CTRL1);
-
-	is_idle = (reg_val & idle_mask) ? true : false;
+	is_idle = mdss_mdp_is_pipe_idle(pipe, true);
 	if (!is_idle) {
-		pr_debug("%pS: pipe%d is not idle. xin_id=%d halt_ctrl1=0x%x\n",
-			__builtin_return_address(0), pipe->num, pipe->xin_id,
-			reg_val);
+		pr_err("%pS: pipe%d is not idle. xin_id=%d\n",
+			__builtin_return_address(0), pipe->num, pipe->xin_id);
 
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 		mutex_lock(&mdata->reg_lock);
+		idle_mask = BIT(pipe->xin_id + 16);
+
 		reg_val = readl_relaxed(mdata->vbif_base +
 			MMSS_VBIF_XIN_HALT_CTRL0);
 		writel_relaxed(reg_val | BIT(pipe->xin_id),
@@ -743,9 +792,10 @@
 			MMSS_VBIF_XIN_HALT_CTRL0);
 		writel_relaxed(reg_val & ~BIT(pipe->xin_id),
 			mdata->vbif_base + MMSS_VBIF_XIN_HALT_CTRL0);
+
 		mutex_unlock(&mdata->reg_lock);
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 	}
-	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 
 	return rc;
 }
@@ -1065,7 +1115,7 @@
 static int mdss_mdp_pipe_solidfill_setup(struct mdss_mdp_pipe *pipe)
 {
 	int ret;
-	u32 secure, format;
+	u32 secure, format, unpack;
 
 	pr_debug("solid fill setup on pnum=%d\n", pipe->num);
 
@@ -1078,9 +1128,13 @@
 	format = MDSS_MDP_FMT_SOLID_FILL;
 	secure = (pipe->flags & MDP_SECURE_OVERLAY_SESSION ? 0xF : 0x0);
 
+	/* support ARGB color format only */
+	unpack = (C3_ALPHA << 24) | (C2_R_Cr << 16) |
+		(C1_B_Cb << 8) | (C0_G_Y << 0);
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_FORMAT, format);
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR,
 		pipe->bg_color);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN, unpack);
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS, secure);
 
 	return 0;
diff --git a/drivers/video/msm/mdss/mdss_mdp_pp.c b/drivers/video/msm/mdss/mdss_mdp_pp.c
index 5c9ad9c..37b71c7 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pp.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pp.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1306,19 +1306,19 @@
 		goto error;
 	}
 
+	mutex_lock(&hist_info->hist_mutex);
+	spin_lock_irqsave(&hist_info->hist_lock, flag);
 	if (hist_info->col_en) {
 		*op |= op_flags;
-		mutex_lock(&hist_info->hist_mutex);
-		spin_lock_irqsave(&hist_info->hist_lock, flag);
 		col_state = hist_info->col_state;
 		if (col_state == HIST_IDLE) {
 			/* Kick off collection */
 			writel_relaxed(1, base + kick_base);
 			hist_info->col_state = HIST_START;
 		}
-		spin_unlock_irqrestore(&hist_info->hist_lock, flag);
-		mutex_unlock(&hist_info->hist_mutex);
 	}
+	spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+	mutex_unlock(&hist_info->hist_mutex);
 	ret = 0;
 error:
 	return ret;
@@ -1625,7 +1625,7 @@
  */
 int mdss_mdp_pp_resume(struct mdss_mdp_ctl *ctl, u32 dspp_num)
 {
-	u32 flags = 0, disp_num, bl;
+	u32 flags = 0, disp_num, bl, ret = 0;
 	struct pp_sts_type pp_sts;
 	struct mdss_ad_info *ad;
 	struct mdss_data_type *mdata = ctl->mdata;
@@ -1636,7 +1636,9 @@
 	disp_num = ctl->mfd->index;
 
 	if (dspp_num < mdata->nad_cfgs) {
-		ad = &mdata->ad_cfgs[dspp_num];
+		ret = mdss_mdp_get_ad(ctl->mfd, &ad);
+		if (ret)
+			return ret;
 
 		if (PP_AD_STATE_CFG & ad->state)
 			pp_ad_cfg_write(&mdata->ad_off[dspp_num], ad);
@@ -1748,6 +1750,8 @@
 					&mdss_pp_res->dspp_hist[i].hist_mutex);
 				spin_lock_init(
 					&mdss_pp_res->dspp_hist[i].hist_lock);
+				init_completion(
+					&mdss_pp_res->dspp_hist[i].comp);
 			}
 		}
 	}
@@ -1756,6 +1760,7 @@
 		for (i = 0; i < mdata->nvig_pipes; i++) {
 			mutex_init(&vig[i].pp_res.hist.hist_mutex);
 			spin_lock_init(&vig[i].pp_res.hist.hist_lock);
+			init_completion(&vig[i].pp_res.hist.comp);
 		}
 		if (!mdata->pp_bus_hdl) {
 			pp_bus_pdata = &mdp_pp_bus_scale_table;
@@ -2885,22 +2890,23 @@
 
 	mutex_lock(&hist_info->hist_mutex);
 	/* check if it is idle */
+	spin_lock_irqsave(&hist_info->hist_lock, flag);
 	if (hist_info->col_en) {
+		spin_unlock_irqrestore(&hist_info->hist_lock, flag);
 		pr_info("%s Hist collection has already been enabled %d",
 			__func__, (u32) ctl_base);
 		ret = -EINVAL;
 		goto exit;
 	}
-	hist_info->frame_cnt = req->frame_cnt;
-	init_completion(&hist_info->comp);
-	hist_info->hist_cnt_read = 0;
-	hist_info->hist_cnt_sent = 0;
-	hist_info->hist_cnt_time = 0;
-	spin_lock_irqsave(&hist_info->hist_lock, flag);
 	hist_info->read_request = 0;
 	hist_info->col_state = HIST_RESET;
 	hist_info->col_en = true;
 	spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+	hist_info->frame_cnt = req->frame_cnt;
+	INIT_COMPLETION(hist_info->comp);
+	hist_info->hist_cnt_read = 0;
+	hist_info->hist_cnt_sent = 0;
+	hist_info->hist_cnt_time = 0;
 	mdss_mdp_hist_intr_req(&mdata->hist_intr, 3 << shift_bit, true);
 	writel_relaxed(req->frame_cnt, ctl_base + 8);
 	/* Kick out reset start */
@@ -3011,17 +3017,18 @@
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
 
 	mutex_lock(&hist_info->hist_mutex);
+	spin_lock_irqsave(&hist_info->hist_lock, flag);
 	if (hist_info->col_en == false) {
+		spin_unlock_irqrestore(&hist_info->hist_lock, flag);
 		pr_debug("Histogram already disabled (%d)", (u32) ctl_base);
 		ret = -EINVAL;
 		goto exit;
 	}
-	complete_all(&hist_info->comp);
-	spin_lock_irqsave(&hist_info->hist_lock, flag);
 	hist_info->col_en = false;
 	hist_info->col_state = HIST_UNKNOWN;
 	spin_unlock_irqrestore(&hist_info->hist_lock, flag);
 	mdss_mdp_hist_intr_req(&mdata->hist_intr, done_bit, false);
+	complete_all(&hist_info->comp);
 	writel_relaxed(BIT(1), ctl_base);/* cancel */
 	ret = 0;
 exit:
@@ -3262,12 +3269,13 @@
 	struct mdss_mdp_pipe *pipe;
 
 	mutex_lock(&hist_info->hist_mutex);
+	spin_lock_irqsave(&hist_info->hist_lock, flag);
 	if ((hist_info->col_en == 0) ||
 			(hist_info->col_state == HIST_UNKNOWN)) {
+		spin_unlock_irqrestore(&hist_info->hist_lock, flag);
 		ret = -EINVAL;
 		goto hist_collect_exit;
 	}
-	spin_lock_irqsave(&hist_info->hist_lock, flag);
 	/* wait for hist done if cache has no data */
 	if (hist_info->col_state != HIST_READY) {
 		spin_unlock_irqrestore(&hist_info->hist_lock, flag);
@@ -3283,9 +3291,9 @@
 				&(hist_info->comp), timeout);
 
 		mutex_lock(&hist_info->hist_mutex);
+		spin_lock_irqsave(&hist_info->hist_lock, flag);
 		if (wait_ret == 0) {
 			ret = -ETIMEDOUT;
-			spin_lock_irqsave(&hist_info->hist_lock, flag);
 			pr_debug("bin collection timedout, state %d",
 					hist_info->col_state);
 			/*
@@ -3300,37 +3308,33 @@
 			 */
 			hist_info->hist_cnt_time++;
 			hist_info->col_state = HIST_READY;
-			spin_unlock_irqrestore(&hist_info->hist_lock, flag);
 		} else if (wait_ret < 0) {
+			spin_unlock_irqrestore(&hist_info->hist_lock, flag);
 			ret = -EINTR;
 			pr_debug("%s: bin collection interrupted",
 					__func__);
 			goto hist_collect_exit;
 		}
-		if (hist_info->col_state != HIST_READY) {
+		if (hist_info->col_state != HIST_READY &&
+				hist_info->col_state != HIST_UNKNOWN) {
 			ret = -ENODATA;
-			spin_lock_irqsave(&hist_info->hist_lock, flag);
 			hist_info->col_state = HIST_READY;
-			spin_unlock_irqrestore(&hist_info->hist_lock, flag);
 			pr_debug("%s: state is not ready: %d",
 					__func__, hist_info->col_state);
 		}
-	} else {
-		spin_unlock_irqrestore(&hist_info->hist_lock, flag);
 	}
-	spin_lock_irqsave(&hist_info->hist_lock, flag);
 	if (hist_info->col_state == HIST_READY) {
+		hist_info->col_state = HIST_IDLE;
 		spin_unlock_irqrestore(&hist_info->hist_lock, flag);
 		v_base = ctl_base + 0x1C;
 		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 		sum = pp_hist_read(v_base, hist_info);
 		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
-		spin_lock_irqsave(&hist_info->hist_lock, flag);
 		if (expect_sum && sum != expect_sum)
 			ret = -ENODATA;
-		hist_info->col_state = HIST_IDLE;
+	} else {
+		spin_unlock_irqrestore(&hist_info->hist_lock, flag);
 	}
-	spin_unlock_irqrestore(&hist_info->hist_lock, flag);
 hist_collect_exit:
 	mutex_unlock(&hist_info->hist_mutex);
 	return ret;
diff --git a/drivers/video/msm/mdss/mdss_panel.h b/drivers/video/msm/mdss/mdss_panel.h
index 274c523..135a00a 100644
--- a/drivers/video/msm/mdss/mdss_panel.h
+++ b/drivers/video/msm/mdss/mdss_panel.h
@@ -127,6 +127,11 @@
 				 - 1 clock enable
  * @MDSS_EVENT_ENABLE_PARTIAL_UPDATE: Event to update ROI of the panel.
  * @MDSS_EVENT_DSI_CMDLIST_KOFF: acquire dsi_mdp_busy lock before kickoff.
+ * @MDSS_EVENT_DSI_ULPS_CTRL:	Event to configure Ultra Lower Power Saving
+ *				mode for the DSI data and clock lanes. The
+ *				event arguments can have one of these values:
+ *				- 0: Disable ULPS mode
+ *				- 1: Enable ULPS mode
  */
 enum mdss_intf_events {
 	MDSS_EVENT_RESET = 1,
@@ -145,6 +150,7 @@
 	MDSS_EVENT_PANEL_CLK_CTRL,
 	MDSS_EVENT_DSI_CMDLIST_KOFF,
 	MDSS_EVENT_ENABLE_PARTIAL_UPDATE,
+	MDSS_EVENT_DSI_ULPS_CTRL,
 };
 
 struct lcd_panel_info {
@@ -300,6 +306,7 @@
 	int pwm_period;
 	u32 mode_gpio_state;
 	bool dynamic_fps;
+	bool ulps_feature_enabled;
 	char dfps_update;
 	int new_fps;
 
diff --git a/drivers/video/msm/mdss/msm_mdss_io_8974.c b/drivers/video/msm/mdss/msm_mdss_io_8974.c
index 6ebcdf6..834ef66 100644
--- a/drivers/video/msm/mdss/msm_mdss_io_8974.c
+++ b/drivers/video/msm/mdss/msm_mdss_io_8974.c
@@ -64,6 +64,16 @@
 		goto mdss_dsi_clk_err;
 	}
 
+	if (ctrl_pdata->panel_data.panel_info.type == MIPI_CMD_PANEL) {
+		ctrl_pdata->mmss_misc_ahb_clk = clk_get(dev, "core_mmss_clk");
+		if (IS_ERR(ctrl_pdata->mmss_misc_ahb_clk)) {
+			rc = PTR_ERR(ctrl_pdata->mmss_misc_ahb_clk);
+			pr_err("%s: Unable to get mmss misc ahb clk. rc=%d\n",
+				__func__, rc);
+			goto mdss_dsi_clk_err;
+		}
+	}
+
 	ctrl_pdata->byte_clk = clk_get(dev, "byte_clk");
 	if (IS_ERR(ctrl_pdata->byte_clk)) {
 		rc = PTR_ERR(ctrl_pdata->byte_clk);
@@ -105,6 +115,8 @@
 		clk_put(ctrl_pdata->esc_clk);
 	if (ctrl_pdata->pixel_clk)
 		clk_put(ctrl_pdata->pixel_clk);
+	if (ctrl_pdata->mmss_misc_ahb_clk)
+		clk_put(ctrl_pdata->mmss_misc_ahb_clk);
 	if (ctrl_pdata->axi_clk)
 		clk_put(ctrl_pdata->axi_clk);
 	if (ctrl_pdata->ahb_clk)
@@ -247,7 +259,7 @@
 	return 0;
 }
 
-int mdss_dsi_bus_clk_start(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+static int mdss_dsi_bus_clk_start(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
 {
 	int rc = 0;
 
@@ -275,12 +287,26 @@
 		goto error;
 	}
 
+	if (ctrl_pdata->mmss_misc_ahb_clk) {
+		rc = clk_prepare_enable(ctrl_pdata->mmss_misc_ahb_clk);
+		if (rc) {
+			pr_err("%s: failed to enable mmss misc ahb clk.rc=%d\n",
+				__func__, rc);
+			clk_disable_unprepare(ctrl_pdata->axi_clk);
+			clk_disable_unprepare(ctrl_pdata->ahb_clk);
+			clk_disable_unprepare(ctrl_pdata->mdp_core_clk);
+			goto error;
+		}
+	}
+
 error:
 	return rc;
 }
 
-void mdss_dsi_bus_clk_stop(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+static void mdss_dsi_bus_clk_stop(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
 {
+	if (ctrl_pdata->mmss_misc_ahb_clk)
+		clk_disable_unprepare(ctrl_pdata->mmss_misc_ahb_clk);
 	clk_disable_unprepare(ctrl_pdata->axi_clk);
 	clk_disable_unprepare(ctrl_pdata->ahb_clk);
 	clk_disable_unprepare(ctrl_pdata->mdp_core_clk);
@@ -382,11 +408,6 @@
 
 	pr_debug("%s: ndx=%d\n", __func__, ctrl_pdata->ndx);
 
-	if (ctrl_pdata->mdss_dsi_clk_on) {
-		pr_info("%s: mdss_dsi_clks already ON\n", __func__);
-		return 0;
-	}
-
 	rc = clk_enable(ctrl_pdata->esc_clk);
 	if (rc) {
 		pr_err("%s: Failed to enable dsi esc clk\n", __func__);
@@ -405,8 +426,6 @@
 		goto pixel_clk_err;
 	}
 
-	ctrl_pdata->mdss_dsi_clk_on = 1;
-
 	return rc;
 
 pixel_clk_err:
@@ -426,19 +445,12 @@
 
 	pr_debug("%s: ndx=%d\n", __func__, ctrl_pdata->ndx);
 
-	if (ctrl_pdata->mdss_dsi_clk_on == 0) {
-		pr_info("%s: mdss_dsi_clks already OFF\n", __func__);
-		return;
-	}
-
 	clk_disable(ctrl_pdata->esc_clk);
 	clk_disable(ctrl_pdata->pixel_clk);
 	clk_disable(ctrl_pdata->byte_clk);
-
-	ctrl_pdata->mdss_dsi_clk_on = 0;
 }
 
-int mdss_dsi_link_clk_start(struct mdss_dsi_ctrl_pdata *ctrl)
+static int mdss_dsi_link_clk_start(struct mdss_dsi_ctrl_pdata *ctrl)
 {
 	int rc = 0;
 
@@ -468,80 +480,191 @@
 	return rc;
 }
 
-void mdss_dsi_link_clk_stop(struct mdss_dsi_ctrl_pdata *ctrl)
+static void mdss_dsi_link_clk_stop(struct mdss_dsi_ctrl_pdata *ctrl)
 {
 	mdss_dsi_link_clk_disable(ctrl);
 	mdss_dsi_link_clk_unprepare(ctrl);
 }
 
-static void mdss_dsi_clk_ctrl_sub(struct mdss_dsi_ctrl_pdata *ctrl, int enable)
+static int __mdss_dsi_update_clk_cnt(u32 *clk_cnt, int enable)
 {
 	int changed = 0;
 
 	if (enable) {
-		if (ctrl->clk_cnt_sub == 0)
+		if (*clk_cnt == 0)
 			changed++;
-		ctrl->clk_cnt_sub++;
+		(*clk_cnt)++;
 	} else {
-		if (ctrl->clk_cnt_sub) {
-			ctrl->clk_cnt_sub--;
-			if (ctrl->clk_cnt_sub == 0)
+		if (*clk_cnt != 0) {
+			(*clk_cnt)--;
+			if (*clk_cnt == 0)
 				changed++;
 		} else {
-			pr_debug("%s: Can not be turned off\n", __func__);
+			pr_debug("%s: clk cnt already zero\n", __func__);
 		}
 	}
 
-	pr_debug("%s: ndx=%d clk_cnt_sub=%d changed=%d enable=%d\n",
-		__func__, ctrl->ndx, ctrl->clk_cnt_sub, changed, enable);
-	if (changed) {
-		if (enable) {
-			if (mdss_dsi_bus_clk_start(ctrl) == 0)
-				mdss_dsi_link_clk_start(ctrl);
-		} else {
-			mdss_dsi_link_clk_stop(ctrl);
-			mdss_dsi_bus_clk_stop(ctrl);
+	return changed;
+}
+
+static int mdss_dsi_clk_ctrl_sub(struct mdss_dsi_ctrl_pdata *ctrl,
+	u8 clk_type, int enable)
+{
+	int rc = 0;
+
+	pr_debug("%s: ndx=%d clk_type=%08x enable=%d\n", __func__,
+		ctrl->ndx, clk_type, enable);
+
+	if (enable) {
+		if (clk_type & DSI_BUS_CLKS) {
+			rc = mdss_dsi_bus_clk_start(ctrl);
+			if (rc) {
+				pr_err("Failed to start bus clocks. rc=%d\n",
+					rc);
+				goto error;
+			}
 		}
+		if (clk_type & DSI_LINK_CLKS) {
+			rc = mdss_dsi_link_clk_start(ctrl);
+			if (rc) {
+				pr_err("Failed to start link clocks. rc=%d\n",
+					rc);
+				if (clk_type & DSI_BUS_CLKS)
+					mdss_dsi_bus_clk_stop(ctrl);
+				goto error;
+			}
+		}
+	} else {
+		if (clk_type & DSI_LINK_CLKS)
+			mdss_dsi_link_clk_stop(ctrl);
+		if (clk_type & DSI_BUS_CLKS)
+			mdss_dsi_bus_clk_stop(ctrl);
 	}
+
+error:
+	return rc;
 }
 
 static DEFINE_MUTEX(dsi_clk_lock); /* per system */
 
-void mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable)
+bool __mdss_dsi_clk_enabled(struct mdss_dsi_ctrl_pdata *ctrl, u8 clk_type)
 {
-	int changed = 0;
-	struct mdss_dsi_ctrl_pdata *sctrl = NULL;
+	bool bus_enabled = true;
+	bool link_enabled = true;
 
 	mutex_lock(&dsi_clk_lock);
-	if (enable) {
-		if (ctrl->clk_cnt == 0)
-			changed++;
-		ctrl->clk_cnt++;
-	} else {
-		if (ctrl->clk_cnt) {
-			ctrl->clk_cnt--;
-			if (ctrl->clk_cnt == 0)
-				changed++;
-		} else {
-			pr_debug("%s: Can not be turned off\n", __func__);
-		}
+	if (clk_type & DSI_BUS_CLKS)
+		bus_enabled = ctrl->bus_clk_cnt ? true : false;
+	if (clk_type & DSI_LINK_CLKS)
+		link_enabled = ctrl->link_clk_cnt ? true : false;
+	mutex_unlock(&dsi_clk_lock);
+
+	return bus_enabled && link_enabled;
+}
+
+int mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl,
+	u8 clk_type, int enable)
+{
+	int rc = 0;
+	int changed = 0, m_changed = 0;
+	struct mdss_dsi_ctrl_pdata *mctrl = NULL;
+
+	if (!ctrl) {
+		pr_err("%s: Invalid arg\n", __func__);
+		return -EINVAL;
 	}
 
-	pr_debug("%s: ndx=%d clk_cnt=%d changed=%d enable=%d\n",
-		__func__, ctrl->ndx, ctrl->clk_cnt, changed, enable);
-	if (ctrl->flags & DSI_FLAG_CLOCK_MASTER)
-		sctrl = mdss_dsi_ctrl_slave(ctrl);
+	/*
+	 * In broadcast mode, we need to enable clocks for the
+	 * master controller as well when enabling clocks for the
+	 * slave controller
+	 */
+	if (mdss_dsi_is_slave_ctrl(ctrl)) {
+		mctrl = mdss_dsi_get_master_ctrl();
+		if (!mctrl)
+			pr_warn("%s: Unable to get master control\n", __func__);
+	}
+
+	pr_debug("%s++: ndx=%d clk_type=%d bus_clk_cnt=%d link_clk_cnt=%d",
+		__func__, ctrl->ndx, clk_type, ctrl->bus_clk_cnt,
+		ctrl->link_clk_cnt);
+	pr_debug("%s++: mctrl=%s m_bus_clk_cnt=%d m_link_clk_cnt=%d\n, enable=%d\n",
+		__func__, mctrl ? "yes" : "no", mctrl ? mctrl->bus_clk_cnt : -1,
+		mctrl ? mctrl->link_clk_cnt : -1, enable);
+
+	mutex_lock(&dsi_clk_lock);
+	if (clk_type & DSI_BUS_CLKS) {
+		changed = __mdss_dsi_update_clk_cnt(&ctrl->bus_clk_cnt,
+			enable);
+		if (changed && mctrl)
+			m_changed = __mdss_dsi_update_clk_cnt(
+				&mctrl->bus_clk_cnt, enable);
+	}
+
+	if (clk_type & DSI_LINK_CLKS) {
+		changed += __mdss_dsi_update_clk_cnt(&ctrl->link_clk_cnt,
+			enable);
+		if (changed && mctrl)
+			m_changed += __mdss_dsi_update_clk_cnt(
+				&mctrl->link_clk_cnt, enable);
+	}
 
 	if (changed) {
-		if (enable && sctrl)
-			mdss_dsi_clk_ctrl_sub(sctrl, enable);
+		if (enable && m_changed) {
+			rc = mdss_dsi_clk_ctrl_sub(mctrl, clk_type, enable);
+			if (rc) {
+				pr_err("Failed to start mctrl clocks. rc=%d\n",
+					rc);
+				goto error_mctrl_start;
+			}
+		}
 
-		mdss_dsi_clk_ctrl_sub(ctrl, enable);
+		rc = mdss_dsi_clk_ctrl_sub(ctrl, clk_type, enable);
+		if (rc) {
+			pr_err("Failed to %s ctrl clocks. rc=%d\n",
+				(enable ? "start" : "stop"), rc);
+			goto error_ctrl;
+		}
 
-		if (!enable && sctrl)
-			mdss_dsi_clk_ctrl_sub(sctrl, enable);
+		if (!enable && m_changed) {
+			rc = mdss_dsi_clk_ctrl_sub(mctrl, clk_type, enable);
+			if (rc) {
+				pr_err("Failed to stop mctrl clocks. rc=%d\n",
+					rc);
+				goto error_mctrl_stop;
+			}
+		}
 	}
+	goto no_error;
+
+error_mctrl_stop:
+	mdss_dsi_clk_ctrl_sub(ctrl, clk_type, enable ? 0 : 1);
+error_ctrl:
+	mdss_dsi_clk_ctrl_sub(mctrl, clk_type, 0);
+error_mctrl_start:
+	if (clk_type & DSI_BUS_CLKS) {
+		if (mctrl)
+			__mdss_dsi_update_clk_cnt(&mctrl->bus_clk_cnt,
+				enable ? 0 : 1);
+		__mdss_dsi_update_clk_cnt(&ctrl->bus_clk_cnt, enable ? 0 : 1);
+	}
+	if (clk_type & DSI_LINK_CLKS) {
+		if (mctrl)
+			__mdss_dsi_update_clk_cnt(&mctrl->link_clk_cnt,
+				enable ? 0 : 1);
+		__mdss_dsi_update_clk_cnt(&ctrl->link_clk_cnt, enable ? 0 : 1);
+	}
+
+no_error:
 	mutex_unlock(&dsi_clk_lock);
+	pr_debug("%s++: ndx=%d clk_type=%d bus_clk_cnt=%d link_clk_cnt=%d changed=%d",
+		__func__, ctrl->ndx, clk_type, ctrl->bus_clk_cnt,
+		ctrl->link_clk_cnt, changed);
+	pr_debug("%s++: mctrl=%s m_bus_clk_cnt=%d m_link_clk_cnt=%d\n, m_changed=%d, enable=%d\n",
+		__func__, mctrl ? "yes" : "no", mctrl ? mctrl->bus_clk_cnt : -1,
+		mctrl ? mctrl->link_clk_cnt : -1, m_changed, enable);
+
+	return rc;
 }
 
 void mdss_dsi_phy_sw_reset(unsigned char *ctrl_base)
@@ -558,29 +681,39 @@
 
 void mdss_dsi_phy_disable(struct mdss_dsi_ctrl_pdata *ctrl)
 {
-	static struct mdss_dsi_ctrl_pdata *left_ctrl;
+	struct mdss_dsi_ctrl_pdata *ctrl0 = NULL;
 
 	if (ctrl == NULL) {
 		pr_err("%s: Invalid input data\n", __func__);
 		return;
 	}
 
-	if (left_ctrl &&
-			(ctrl->panel_data.panel_info.pdest == DISPLAY_1))
+	/*
+	 * In dual-dsi configuration, the phy should be disabled for the
+	 * first controller only when the second controller is disabled.
+	 * This is true regardless of whether broadcast mode is enabled
+	 * or not.
+	 */
+	if ((ctrl->ndx == DSI_CTRL_0) &&
+		mdss_dsi_get_ctrl_by_index(DSI_CTRL_1)) {
+		pr_debug("%s: Dual dsi detected. skipping config for ctrl%d\n",
+			__func__, ctrl->ndx);
 		return;
-
-	if (left_ctrl &&
-			(ctrl->panel_data.panel_info.pdest
-			 ==
-			 DISPLAY_2)) {
-		MIPI_OUTP(left_ctrl->ctrl_base + 0x0470,
-				0x000);
-		MIPI_OUTP(left_ctrl->ctrl_base + 0x0598,
-				0x000);
 	}
 
-	MIPI_OUTP(ctrl->ctrl_base + 0x0470, 0x000);
-	MIPI_OUTP(ctrl->ctrl_base + 0x0598, 0x000);
+	if (ctrl->ndx == DSI_CTRL_1) {
+		ctrl0 = mdss_dsi_get_ctrl_by_index(DSI_CTRL_0);
+		if (ctrl0) {
+			MIPI_OUTP(ctrl0->phy_io.base + 0x0170, 0x000);
+			MIPI_OUTP(ctrl0->phy_io.base + 0x0298, 0x000);
+		} else {
+			pr_warn("%s: Unable to get control%d\n",
+				__func__, DSI_CTRL_0);
+		}
+	}
+
+	MIPI_OUTP(ctrl->phy_io.base + 0x0170, 0x000);
+	MIPI_OUTP(ctrl->phy_io.base + 0x0298, 0x000);
 
 	/*
 	 * Wait for the registers writes to complete in order to
@@ -593,7 +726,7 @@
 {
 	struct mdss_dsi_phy_ctrl *pd;
 	int i, off, ln, offset;
-	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL, *temp_ctrl = NULL;
 
 	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
 				panel_data);
@@ -601,67 +734,75 @@
 		pr_err("%s: Invalid input data\n", __func__);
 		return;
 	}
+	temp_ctrl = ctrl_pdata;
 
 	pd = &(((ctrl_pdata->panel_data).panel_info.mipi).dsi_phy_db);
 
 	/* Strength ctrl 0 */
-	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0484, pd->strength[0]);
+	MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x0184, pd->strength[0]);
 
-	/* phy regulator ctrl settings. Both the DSI controller
-	   have one regulator */
-	if ((ctrl_pdata->panel_data).panel_info.pdest == DISPLAY_1)
-		off = 0x0580;
-	else
-		off = 0x0580 - 0x600;
+	/*
+	 * Phy regulator ctrl settings.
+	 * In dual dsi configuration, the second controller also uses
+	 * the regulators of the first controller, irrespective of whether
+	 * broadcast mode is enabled or not.
+	 */
+	if (ctrl_pdata->ndx == DSI_CTRL_1) {
+		temp_ctrl = mdss_dsi_get_ctrl_by_index(DSI_CTRL_0);
+		if (!temp_ctrl) {
+			pr_err("%s: Unable to get master ctrl\n", __func__);
+			return;
+		}
+	}
 
 	/* Regulator ctrl 0 */
-	MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 0), 0x0);
+	MIPI_OUTP((temp_ctrl->phy_io.base) + 0x280, 0x0);
 	/* Regulator ctrl - CAL_PWR_CFG */
-	MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 6), pd->regulator[6]);
+	MIPI_OUTP((temp_ctrl->phy_io.base) + 0x298, pd->regulator[6]);
 
 	/* Regulator ctrl - TEST */
-	MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 5), pd->regulator[5]);
+	MIPI_OUTP((temp_ctrl->phy_io.base) + 0x294, pd->regulator[5]);
 	/* Regulator ctrl 3 */
-	MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 3), pd->regulator[3]);
+	MIPI_OUTP((temp_ctrl->phy_io.base) + 0x28c, pd->regulator[3]);
 	/* Regulator ctrl 2 */
-	MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 2), pd->regulator[2]);
+	MIPI_OUTP((temp_ctrl->phy_io.base) + 0x288, pd->regulator[2]);
 	/* Regulator ctrl 1 */
-	MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 1), pd->regulator[1]);
+	MIPI_OUTP((temp_ctrl->phy_io.base) + 0x284, pd->regulator[1]);
 	/* Regulator ctrl 0 */
-	MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 0), pd->regulator[0]);
+	MIPI_OUTP((temp_ctrl->phy_io.base) + 0x280, pd->regulator[0]);
 	/* Regulator ctrl 4 */
-	MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 4), pd->regulator[4]);
+	MIPI_OUTP((temp_ctrl->phy_io.base) + 0x290, pd->regulator[4]);
 
 	/* LDO ctrl 0 */
 	if ((ctrl_pdata->panel_data).panel_info.pdest == DISPLAY_1)
-		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x4dc, 0x00);
+		MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x1dc, 0x00);
 	else
-		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x4dc, 0x00);
+		MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x1dc, 0x00);
 
-	off = 0x0440;	/* phy timing ctrl 0 - 11 */
+	off = 0x0140;	/* phy timing ctrl 0 - 11 */
 	for (i = 0; i < 12; i++) {
-		MIPI_OUTP((ctrl_pdata->ctrl_base) + off, pd->timing[i]);
+		MIPI_OUTP((ctrl_pdata->phy_io.base) + off, pd->timing[i]);
 		wmb();
 		off += 4;
 	}
 
 	/* MMSS_DSI_0_PHY_DSIPHY_CTRL_1 */
-	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0474, 0x00);
+	MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x0174, 0x00);
 	/* MMSS_DSI_0_PHY_DSIPHY_CTRL_0 */
-	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0470, 0x5f);
+	MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x0170, 0x5f);
 	wmb();
 
 	/* Strength ctrl 1 */
-	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0488, pd->strength[1]);
+	MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x0188, pd->strength[1]);
 	wmb();
 
 	/* 4 lanes + clk lane configuration */
 	/* lane config n * (0 - 4) & DataPath setup */
 	for (ln = 0; ln < 5; ln++) {
-		off = 0x0300 + (ln * 0x40);
+		off = (ln * 0x40);
 		for (i = 0; i < 9; i++) {
 			offset = i + (ln * 9);
-			MIPI_OUTP((ctrl_pdata->ctrl_base) + off,
+			MIPI_OUTP((ctrl_pdata->phy_io.base) + off,
 							pd->lanecfg[offset]);
 			wmb();
 			off += 4;
@@ -669,19 +810,19 @@
 	}
 
 	/* MMSS_DSI_0_PHY_DSIPHY_CTRL_0 */
-	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0470, 0x5f);
+	MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x0170, 0x5f);
 	wmb();
 
 	/* DSI_0_PHY_DSIPHY_GLBL_TEST_CTRL */
 	if ((ctrl_pdata->panel_data).panel_info.pdest == DISPLAY_1)
-		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x04d4, 0x01);
+		MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x01d4, 0x01);
 	else
-		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x04d4, 0x00);
+		MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x01d4, 0x00);
 	wmb();
 
-	off = 0x04b4;	/* phy BIST ctrl 0 - 5 */
+	off = 0x01b4;	/* phy BIST ctrl 0 - 5 */
 	for (i = 0; i < 6; i++) {
-		MIPI_OUTP((ctrl_pdata->ctrl_base) + off, pd->bistctrl[i]);
+		MIPI_OUTP((ctrl_pdata->phy_io.base) + off, pd->bistctrl[i]);
 		wmb();
 		off += 4;
 	}
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 70b3eef..3ab25c3 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -51,10 +51,10 @@
  * bound (greatest lower bound)
  */
 #define DEVFREQ_FLAG_LEAST_UPPER_BOUND		0x1
-#define DEVFREQ_FLAG_WAKEUP_MAXFREQ		0x2
 
-#define DEVFREQ_FLAG_FAST_HINT	0x2
-#define DEVFREQ_FLAG_SLOW_HINT	0x4
+#define DEVFREQ_FLAG_FAST_HINT			0x2
+#define DEVFREQ_FLAG_SLOW_HINT			0x4
+#define DEVFREQ_FLAG_WAKEUP_MAXFREQ		0x8
 
 /**
  * struct devfreq_governor_data - mapping to per device governor data
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 272fe77..8b604e3 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -94,6 +94,7 @@
 	u8			raw_erased_mem_count;	/* 181 */
 	u8			raw_ext_csd_structure;	/* 194 */
 	u8			raw_card_type;		/* 196 */
+	u8			raw_drive_strength;	/* 197 */
 	u8			out_of_int_time;	/* 198 */
 	u8			raw_s_a_timeout;		/* 217 */
 	u8			raw_hc_erase_gap_size;	/* 221 */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index bff056d..e53c350 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -197,6 +197,12 @@
 	void *handler_priv;
 };
 
+enum dev_state {
+	DEV_SUSPENDING = 1,
+	DEV_SUSPENDED,
+	DEV_RESUMED,
+};
+
 struct mmc_host {
 	struct device		*parent;
 	struct device		class_dev;
@@ -421,6 +427,7 @@
 		struct delayed_work work;
 		enum mmc_load	state;
 	} clk_scaling;
+	enum dev_state dev_status;
 	unsigned long		private[0] ____cacheline_aligned;
 };
 
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 764beec..b626915 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -303,6 +303,7 @@
 #define EXT_CSD_REV			192	/* RO */
 #define EXT_CSD_STRUCTURE		194	/* RO */
 #define EXT_CSD_CARD_TYPE		196	/* RO */
+#define EXT_CSD_DRIVE_STRENGTH		197	/* RO */
 #define EXT_CSD_OUT_OF_INTERRUPT_TIME	198	/* RO */
 #define EXT_CSD_PART_SWITCH_TIME        199     /* RO */
 #define EXT_CSD_PWR_CL_52_195		200	/* RO */
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 08ca341..65a3fe1 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -160,6 +160,11 @@
  */
 #define SDHCI_QUIRK2_DIVIDE_TOUT_BY_4 (1 << 9)
 
+/*
+ * Some SDHC controllers are unable to handle data-end bit error in
+ * 1-bit mode of SDIO.
+ */
+#define SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR             (1<<9)
 	int irq;		/* Device IRQ */
 	void __iomem *ioaddr;	/* Mapped address */
 
@@ -252,6 +257,9 @@
 	struct mutex ios_mutex;
 	enum sdhci_power_policy power_policy;
 
+	bool irq_enabled; /* host irq status flag */
+	bool async_int_supp;  /* async support to rxv int, when clks are off */
+	bool disable_sdio_irq_deferred; /* status of disabling sdio irq */
 	u32 auto_cmd_err_sts;
 	unsigned long private[0] ____cacheline_aligned;
 };
diff --git a/include/linux/msm_ipa.h b/include/linux/msm_ipa.h
index b2229d3..9368d8f 100644
--- a/include/linux/msm_ipa.h
+++ b/include/linux/msm_ipa.h
@@ -64,6 +64,11 @@
 #define IPA_RESOURCE_NAME_MAX 20
 
 /**
+ * max number of interface properties
+ */
+#define IPA_NUM_PROPS_MAX 20
+
+/**
  * size of the mac address
  */
 #define IPA_MAC_ADDR_SIZE  6
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index e332368..3ec92e6 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -85,6 +85,30 @@
 #define MDP_IMGTYPE2_START 0x10000
 #define MSMFB_DRIVER_VERSION	0xF9E8D701
 
+/* HW Revisions for different MDSS targets */
+#define MDSS_GET_MAJOR(rev)		((rev) >> 28)
+#define MDSS_GET_MINOR(rev)		(((rev) >> 16) & 0xFFF)
+#define MDSS_GET_STEP(rev)		((rev) & 0xFFFF)
+#define MDSS_GET_MAJOR_MINOR(rev)	((rev) >> 16)
+
+#define IS_MDSS_MAJOR_MINOR_SAME(rev1, rev2)	\
+	(MDSS_GET_MAJOR_MINOR((rev1)) == MDSS_GET_MAJOR_MINOR((rev2)))
+
+#define MDSS_MDP_REV(major, minor, step)	\
+	((((major) & 0x000F) << 28) |		\
+	 (((minor) & 0x0FFF) << 16) |		\
+	 ((step)   & 0xFFFF))
+
+#define MDSS_MDP_HW_REV_100	MDSS_MDP_REV(1, 0, 0) /* 8974 v1.0 */
+#define MDSS_MDP_HW_REV_101	MDSS_MDP_REV(1, 1, 0) /* 8x26 v1.0 */
+#define MDSS_MDP_HW_REV_101_1	MDSS_MDP_REV(1, 1, 1) /* 8x26 v2.0, 8926 v1.0 */
+#define MDSS_MDP_HW_REV_101_2	MDSS_MDP_REV(1, 1, 2) /* 8926 v2.0 */
+#define MDSS_MDP_HW_REV_102	MDSS_MDP_REV(1, 2, 0) /* 8974 v2.0 */
+#define MDSS_MDP_HW_REV_102_1	MDSS_MDP_REV(1, 2, 1) /* 8974 v3.0 (Pro) */
+#define MDSS_MDP_HW_REV_103	MDSS_MDP_REV(1, 3, 0) /* 8084 v1.0 */
+#define MDSS_MDP_HW_REV_103_1	MDSS_MDP_REV(1, 3, 1) /* 8084 v1.1 */
+#define MDSS_MDP_HW_REV_200	MDSS_MDP_REV(2, 0, 0) /* 8092 v1.0 */
+
 enum {
 	NOTIFY_UPDATE_START,
 	NOTIFY_UPDATE_STOP,
diff --git a/include/linux/qpnp-revid.h b/include/linux/qpnp-revid.h
index 3cf9f1c..3d271f0 100644
--- a/include/linux/qpnp-revid.h
+++ b/include/linux/qpnp-revid.h
@@ -13,6 +13,13 @@
 #ifndef __QPNP_REVID
 #define __QPNP_REVID
 
+#define PM8226_V2P2_REV1	0x00
+#define PM8226_V2P2_REV2	0x00
+#define PM8226_V2P2_REV3	0x02
+#define PM8226_V2P2_REV4	0x02
+#define PM8226_V2P2_TYPE	0x51
+#define PM8226_V2P2_SUBTYPE	0x04
+
 #define PM8226_V2P1_REV1	0x00
 #define PM8226_V2P1_REV2	0x00
 #define PM8226_V2P1_REV3	0x01
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
index 13eb461..7ba4148 100644
--- a/include/linux/qpnp/qpnp-adc.h
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -20,6 +20,7 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/qpnp-revid.h>
 /**
  * enum qpnp_vadc_channels - QPNP AMUX arbiter channels
  */
@@ -1002,15 +1003,27 @@
  * @chan_prop - Represent the channel properties of the ADC.
  */
 struct qpnp_adc_amux_properties {
-	uint32_t			amux_channel;
-	uint32_t			decimation;
-	uint32_t			mode_sel;
-	uint32_t			hw_settle_time;
-	uint32_t			fast_avg_setup;
-	enum qpnp_vadc_trigger		trigger_channel;
+	uint32_t				amux_channel;
+	uint32_t				decimation;
+	uint32_t				mode_sel;
+	uint32_t				hw_settle_time;
+	uint32_t				fast_avg_setup;
+	enum qpnp_vadc_trigger			trigger_channel;
 	struct qpnp_vadc_chan_properties	chan_prop[0];
 };
 
+/* SW index's for PMIC type and version used by QPNP VADC and IADC */
+#define QPNP_REV_ID_8941_3_1	1
+#define QPNP_REV_ID_8026_1_0	2
+#define QPNP_REV_ID_8026_2_0	3
+#define QPNP_REV_ID_8110_1_0	4
+#define QPNP_REV_ID_8026_2_1	5
+#define QPNP_REV_ID_8110_2_0	6
+#define QPNP_REV_ID_8026_2_2	7
+#define QPNP_REV_ID_8941_3_0	8
+#define QPNP_REV_ID_8941_2_0	9
+
+
 /* Public API */
 #if defined(CONFIG_SENSORS_QPNP_ADC_VOLTAGE)				\
 			|| defined(CONFIG_SENSORS_QPNP_ADC_VOLTAGE_MODULE)
@@ -1366,9 +1379,16 @@
  * qpnp_vadc_sns_comp_result() - Compensate vbatt readings based on temperature
  * @dev:	Structure device for qpnp vadc
  * @result:	Voltage in uV that needs compensation.
+ * @is_pon_ocv: Whether the reading is from a power on OCV or not
  */
 int32_t qpnp_vbat_sns_comp_result(struct qpnp_vadc_chip *dev,
-						int64_t *result);
+					int64_t *result, bool is_pon_ocv);
+/**
+ * qpnp_adc_get_revid_version() - Obtain the PMIC number and revision.
+ * @dev:	Structure device node.
+ * returns internal mapped PMIC number and revision id.
+ */
+int qpnp_adc_get_revid_version(struct device *dev);
 #else
 static inline int32_t qpnp_vadc_read(struct qpnp_vadc_chip *dev,
 				uint32_t channel,
@@ -1482,6 +1502,8 @@
 static inline int32_t qpnp_vbat_sns_comp_result(struct qpnp_vadc_chip *dev,
 						int64_t *result)
 { return -ENXIO; }
+static inline int qpnp_adc_get_revid_version(struct device *dev)
+{ return -ENXIO; }
 #endif
 
 /* Public API */
diff --git a/include/linux/slimbus/slimbus.h b/include/linux/slimbus/slimbus.h
index 56a3a5f..7a7b3eb 100644
--- a/include/linux/slimbus/slimbus.h
+++ b/include/linux/slimbus/slimbus.h
@@ -525,6 +525,8 @@
  * @port_xfer_status: Called by framework when client calls get_xfer_status
  *	API. Returns how much buffer is actually processed and the port
  *	errors (e.g. overflow/underflow) if any.
+ * @xfer_user_msg: Send user message to specified logical address. Underlying
+ *	controller has to support sending user messages. Returns error if any.
  */
 struct slim_controller {
 	struct device		dev;
@@ -567,10 +569,13 @@
 	int			(*framer_handover)(struct slim_controller *ctrl,
 				struct slim_framer *new_framer);
 	int			(*port_xfer)(struct slim_controller *ctrl,
-				u8 pn, u8 *iobuf, u32 len,
+				u8 pn, phys_addr_t iobuf, u32 len,
 				struct completion *comp);
 	enum slim_port_err	(*port_xfer_status)(struct slim_controller *ctr,
-				u8 pn, u8 **done_buf, u32 *done_len);
+				u8 pn, phys_addr_t *done_buf, u32 *done_len);
+	int			(*xfer_user_msg)(struct slim_controller *ctrl,
+				u8 la, u8 mt, u8 mc,
+				struct slim_ele_access *msg, u8 *buf, u8 len);
 };
 #define to_slim_controller(d) container_of(d, struct slim_controller, dev)
 
@@ -744,6 +749,20 @@
 extern int slim_xfer_msg(struct slim_controller *ctrl,
 			struct slim_device *sbdev, struct slim_ele_access *msg,
 			u16 mc, u8 *rbuf, const u8 *wbuf, u8 len);
+
+/*
+ * User message:
+ * slim_user_msg: Send user message that is interpreted by destination device
+ * @sb: Client handle sending the message
+ * @la: Destination device for this user message
+ * @mt: Message Type (Soruce-referred, or Destination-referred)
+ * @mc: Message Code
+ * @msg: Message structure (start offset, number of bytes) to be sent
+ * @buf: data buffer to be sent
+ * @len: data buffer size in bytes
+ */
+extern int slim_user_msg(struct slim_device *sb, u8 la, u8 mt, u8 mc,
+				struct slim_ele_access *msg, u8 *buf, u8 len);
 /* end of message apis */
 
 /* Port management for manager device APIs */
@@ -783,8 +802,8 @@
  * Client will call slim_port_get_xfer_status to get error and/or number of
  * bytes transferred if used asynchronously.
  */
-extern int slim_port_xfer(struct slim_device *sb, u32 ph, u8 *iobuf, u32 len,
-				struct completion *comp);
+extern int slim_port_xfer(struct slim_device *sb, u32 ph, phys_addr_t iobuf,
+				u32 len, struct completion *comp);
 
 /*
  * slim_port_get_xfer_status: Poll for port transfers, or get transfer status
@@ -806,7 +825,7 @@
  * processed from the multiple transfers.
  */
 extern enum slim_port_err slim_port_get_xfer_status(struct slim_device *sb,
-			u32 ph, u8 **done_buf, u32 *done_len);
+			u32 ph, phys_addr_t *done_buf, u32 *done_len);
 
 /*
  * slim_connect_src: Connect source port to channel.
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index fa702ae..9d13091 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -588,6 +588,26 @@
 	list_del(&t->transfer_list);
 }
 
+/**
+ * spi_message_init_with_transfers - Initialize spi_message and append transfers
+ * @m: spi_message to be initialized
+ * @xfers: An array of spi transfers
+ * @num_xfers: Number of items in the xfer array
+ *
+ * This function initializes the given spi_message and adds each spi_transfer in
+ * the given array to the message.
+ */
+static inline void
+spi_message_init_with_transfers(struct spi_message *m,
+struct spi_transfer *xfers, unsigned int num_xfers)
+{
+	unsigned int i;
+
+	spi_message_init(m);
+	for (i = 0; i < num_xfers; ++i)
+		spi_message_add_tail(&xfers[i], m);
+}
+
 /* It's fine to embed message and transaction structures in other data
  * structures so long as you don't free them while they're in use.
  */
@@ -680,6 +700,30 @@
 	return spi_sync(spi, &m);
 }
 
+/**
+ * spi_sync_transfer - synchronous SPI data transfer
+ * @spi: device with which data will be exchanged
+ * @xfers: An array of spi_transfers
+ * @num_xfers: Number of items in the xfer array
+ * Context: can sleep
+ *
+ * Does a synchronous SPI data transfer of the given spi_transfer array.
+ *
+ * For more specific semantics see spi_sync().
+ *
+ * It returns zero on success, else a negative error code.
+ */
+static inline int
+spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers,
+	unsigned int num_xfers)
+{
+	struct spi_message msg;
+
+	spi_message_init_with_transfers(&msg, xfers, num_xfers);
+
+	return spi_sync(spi, &msg);
+}
+
 /* this copies txbuf and rxbuf data; for small transfers only! */
 extern int spi_write_then_read(struct spi_device *spi,
 		const void *txbuf, unsigned n_tx,
diff --git a/include/linux/tick.h b/include/linux/tick.h
index dc15221..78ae909 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -92,7 +92,16 @@
 #  ifdef CONFIG_TICK_ONESHOT
 extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
 #  endif
+#else
+static inline struct tick_device *tick_get_broadcast_device(void)
+{
+	return NULL;
+}
 
+static inline struct cpumask *tick_get_broadcast_mask(void)
+{
+	return NULL;
+}
 # endif /* BROADCAST */
 
 # ifdef CONFIG_TICK_ONESHOT
@@ -109,6 +118,10 @@
 static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
 static inline void tick_check_idle(int cpu) { }
 static inline int tick_oneshot_mode_active(void) { return 0; }
+static inline struct cpumask *tick_get_broadcast_oneshot_mask(void)
+{
+	return NULL;
+}
 # endif
 
 #else /* CONFIG_GENERIC_CLOCKEVENTS */
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index d16448a..195800f 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -523,6 +523,7 @@
 	bool use_sec_phy;
 	bool no_selective_suspend;
 	int resume_gpio;
+	bool is_uicc;
 };
 
 /**
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 81d5b9c..f446f51 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -708,6 +708,7 @@
 #define V4L2_QCOM_BUF_INPUT_UNSUPPORTED 0x200000
 #define V4L2_QCOM_BUF_FLAG_EOS          0x2000
 #define V4L2_QCOM_BUF_FLAG_READONLY     0x400000
+#define V4L2_MSM_BUF_FLAG_MBAFF         0x800000
 
 /*
  *	O V E R L A Y   P R E V I E W
diff --git a/include/media/msmb_isp.h b/include/media/msmb_isp.h
index 3ba0abe..3828221 100644
--- a/include/media/msmb_isp.h
+++ b/include/media/msmb_isp.h
@@ -1,3 +1,14 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
 #ifndef __MSMB_ISP__
 #define __MSMB_ISP__
 
@@ -302,15 +313,16 @@
 	ISP_WM_BUS_OVERFLOW = 4,
 	ISP_STATS_OVERFLOW  = 5,
 	ISP_CAMIF_ERROR     = 6,
-	ISP_SOF             = 7,
-	ISP_EOF             = 8,
-	ISP_EVENT_MAX       = 9
+	ISP_BUF_DONE        = 9,
+	ISP_EVENT_MAX       = 10
 };
 
 #define ISP_EVENT_OFFSET          8
 #define ISP_EVENT_BASE            (V4L2_EVENT_PRIVATE_START)
 #define ISP_BUF_EVENT_BASE        (ISP_EVENT_BASE + (1 << ISP_EVENT_OFFSET))
 #define ISP_STATS_EVENT_BASE      (ISP_EVENT_BASE + (2 << ISP_EVENT_OFFSET))
+#define ISP_SOF_EVENT_BASE        (ISP_EVENT_BASE + (3 << ISP_EVENT_OFFSET))
+#define ISP_EOF_EVENT_BASE        (ISP_EVENT_BASE + (4 << ISP_EVENT_OFFSET))
 #define ISP_EVENT_REG_UPDATE      (ISP_EVENT_BASE + ISP_REG_UPDATE)
 #define ISP_EVENT_START_ACK       (ISP_EVENT_BASE + ISP_START_ACK)
 #define ISP_EVENT_STOP_ACK        (ISP_EVENT_BASE + ISP_STOP_ACK)
@@ -318,8 +330,9 @@
 #define ISP_EVENT_WM_BUS_OVERFLOW (ISP_EVENT_BASE + ISP_WM_BUS_OVERFLOW)
 #define ISP_EVENT_STATS_OVERFLOW  (ISP_EVENT_BASE + ISP_STATS_OVERFLOW)
 #define ISP_EVENT_CAMIF_ERROR     (ISP_EVENT_BASE + ISP_CAMIF_ERROR)
-#define ISP_EVENT_SOF             (ISP_EVENT_BASE + ISP_SOF)
-#define ISP_EVENT_EOF             (ISP_EVENT_BASE + ISP_EOF)
+#define ISP_EVENT_SOF             (ISP_SOF_EVENT_BASE)
+#define ISP_EVENT_EOF             (ISP_EOF_EVENT_BASE)
+#define ISP_EVENT_BUF_DONE        (ISP_EVENT_BASE + ISP_BUF_DONE)
 #define ISP_EVENT_BUF_DIVERT      (ISP_BUF_EVENT_BASE)
 #define ISP_EVENT_STATS_NOTIFY    (ISP_STATS_EVENT_BASE)
 #define ISP_EVENT_COMP_STATS_NOTIFY (ISP_EVENT_STATS_NOTIFY + MSM_ISP_STATS_MAX)
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 9f0d486..ecaef21 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -3224,8 +3224,8 @@
 static inline struct sk_buff *
 cfg80211_vendor_cmd_alloc_reply_skb(struct wiphy *wiphy, int approxlen)
 {
-	return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_TESTMODE,
-					  NL80211_ATTR_TESTDATA, approxlen);
+	return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_VENDOR,
+					  NL80211_ATTR_VENDOR_DATA, approxlen);
 }
 
 /**
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 50660b3..dce56a6 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -131,6 +131,8 @@
 						 * most likely due to retrans in 3WHS.
 						 */
 
+#define TCP_DELACK_SEG          1       /*Number of full MSS to receive before Acking RFC2581*/
+
 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
 					                 * for local resources.
 					                 */
@@ -253,6 +255,10 @@
 extern int sysctl_tcp_thin_linear_timeouts;
 extern int sysctl_tcp_thin_dupack;
 
+/* sysctl variables for controlling various tcp parameters */
+extern int sysctl_tcp_delack_seg;
+extern int sysctl_tcp_use_userconfig;
+
 extern atomic_long_t tcp_memory_allocated;
 extern struct percpu_counter tcp_sockets_allocated;
 extern int tcp_memory_pressure;
@@ -346,6 +352,10 @@
 			       struct pipe_inode_info *pipe, size_t len,
 			       unsigned int flags);
 
+extern int tcp_use_userconfig_sysctl_handler(struct ctl_table *, int,
+                                         void __user *, size_t *, loff_t *);
+extern int tcp_proc_delayed_ack_control(struct ctl_table *, int,
+                void __user *, size_t *, loff_t *);
 static inline void tcp_dec_quickack_mode(struct sock *sk,
 					 const unsigned int pkts)
 {
diff --git a/include/sound/Kbuild b/include/sound/Kbuild
index 60847b0..aeccfed 100644
--- a/include/sound/Kbuild
+++ b/include/sound/Kbuild
@@ -13,3 +13,4 @@
 header-y += compress_offload.h
 header-y += lsm_params.h
 header-y += voice_params.h
+header-y += voice_svc.h
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 69944a6..1c6ea04 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -579,6 +579,15 @@
 	/* Clients must set this field to zero.*/
 } __packed;
 
+#define ASM_PARAM_ID_AAC_STEREO_MIX_COEFF_SELECTION_FLAG_V2 (0x00010DD8)
+
+struct asm_aac_stereo_mix_coeff_selection_param_v2 {
+	struct apr_hdr          hdr;
+	u32                     param_id;
+	u32                     param_size;
+	u32                     aac_stereo_mix_coeff_flag;
+} __packed;
+
 /* Allows a client to connect the desired stream to
  * the desired AFE port through the stream router
  *
diff --git a/include/sound/voice_svc.h b/include/sound/voice_svc.h
new file mode 100644
index 0000000..7045018
--- /dev/null
+++ b/include/sound/voice_svc.h
@@ -0,0 +1,46 @@
+#ifndef __VOICE_SVC_H__
+#define __VOICE_SVC_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define VOICE_SVC_DRIVER_NAME "voice_svc"
+
+#define VOICE_SVC_MVM_STR "MVM"
+#define VOICE_SVC_CVS_STR "CVS"
+#define MAX_APR_SERVICE_NAME_LEN  64
+
+struct voice_svc_register {
+    char svc_name[MAX_APR_SERVICE_NAME_LEN];
+    __u32 src_port;
+    __u8 reg_flag;
+};
+
+struct voice_svc_cmd_response {
+    __u32 src_port;
+    __u32 dest_port;
+    __u32 token;
+    __u32 opcode;
+    __u32 payload_size;
+    __u8 payload[0];
+};
+
+struct voice_svc_cmd_request {
+    char svc_name[MAX_APR_SERVICE_NAME_LEN];
+    __u32 src_port;
+    __u32 dest_port;
+    __u32 token;
+    __u32 opcode;
+    __u32 payload_size;
+    __u8 payload[0];
+};
+
+#define VOICE_SVC_MAGIC 'N'
+
+#define SNDRV_VOICE_SVC_REGISTER_SVC    _IOWR(VOICE_SVC_MAGIC, \
+                    0x01, struct voice_svc_register)
+#define SNDRV_VOICE_SVC_CMD_RESPONSE    _IOWR(VOICE_SVC_MAGIC, \
+                    0x02, struct voice_svc_cmd_response)
+#define SNDRV_VOICE_SVC_CMD_REQUEST    _IOWR(VOICE_SVC_MAGIC, \
+                    0x03, struct voice_svc_cmd_request)
+#endif
diff --git a/include/trace/events/ice40.h b/include/trace/events/ice40.h
new file mode 100644
index 0000000..c0649a8
--- /dev/null
+++ b/include/trace/events/ice40.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ice40
+
+#if !defined(_TRACE_ICE40_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ICE40_H
+
+#include <linux/tracepoint.h>
+#include <linux/usb.h>
+
+TRACE_EVENT(ice40_reg_write,
+
+	TP_PROTO(u8 addr, u8 val, u8 cmd0, u8 cmd1, int ret),
+
+	TP_ARGS(addr, val, cmd0, cmd1, ret),
+
+	TP_STRUCT__entry(
+		__field(u8, addr)
+		__field(u8, val)
+		__field(u8, cmd0)
+		__field(u8, cmd1)
+		__field(int, ret)
+	),
+
+	TP_fast_assign(
+		__entry->addr = addr;
+		__entry->val = val;
+		__entry->cmd0 = cmd0;
+		__entry->cmd1 = cmd1;
+		__entry->ret = ret;
+	),
+
+	TP_printk("addr = %x val = %x cmd0 = %x cmd1 = %x ret = %d",
+			__entry->addr, __entry->val, __entry->cmd0,
+			__entry->cmd1, __entry->ret)
+);
+
+TRACE_EVENT(ice40_reg_read,
+
+	TP_PROTO(u8 addr, u8 cmd0, int ret),
+
+	TP_ARGS(addr, cmd0, ret),
+
+	TP_STRUCT__entry(
+		__field(u8, addr)
+		__field(u8, cmd0)
+		__field(int, ret)
+	),
+
+	TP_fast_assign(
+		__entry->addr = addr;
+		__entry->cmd0 = cmd0;
+		__entry->ret = ret;
+	),
+
+	TP_printk("addr = %x cmd0 = %x ret = %x", __entry->addr,
+			__entry->cmd0, __entry->ret)
+);
+
+TRACE_EVENT(ice40_hub_control,
+
+	TP_PROTO(u16 req, u16 val, u16 index, u16 len, int ret),
+
+	TP_ARGS(req, val, index, len, ret),
+
+	TP_STRUCT__entry(
+		__field(u16, req)
+		__field(u16, val)
+		__field(u16, index)
+		__field(u16, len)
+		__field(int, ret)
+	),
+
+	TP_fast_assign(
+		__entry->req = req;
+		__entry->val = val;
+		__entry->index = index;
+		__entry->len = len;
+		__entry->ret = ret;
+	),
+
+	TP_printk("req = %x val = %x index = %x len = %x ret = %d",
+			__entry->req, __entry->val, __entry->index,
+			__entry->len, __entry->ret)
+);
+
+TRACE_EVENT(ice40_ep0,
+
+	TP_PROTO(const char *state),
+
+	TP_ARGS(state),
+
+	TP_STRUCT__entry(
+		__string(state, state)
+	),
+
+	TP_fast_assign(
+		__assign_str(state, state);
+	),
+
+	TP_printk("ep0 state: %s", __get_str(state))
+);
+
+TRACE_EVENT(ice40_urb_enqueue,
+
+	TP_PROTO(struct urb *urb),
+
+	TP_ARGS(urb),
+
+	TP_STRUCT__entry(
+		__field(u16, epnum)
+		__field(u8, dir)
+		__field(u8, type)
+		__field(u32, len)
+	),
+
+	TP_fast_assign(
+		__entry->epnum = usb_pipeendpoint(urb->pipe);
+		__entry->dir = usb_urb_dir_in(urb);
+		__entry->type = usb_pipebulk(urb->pipe);
+		__entry->len = urb->transfer_buffer_length;
+	),
+
+	TP_printk("URB_LOG: E: ep %d %s %s len %d", __entry->epnum,
+			__entry->dir ? "In" : "Out",
+			__entry->type ? "Bulk" : "ctrl",
+			__entry->len)
+);
+
+TRACE_EVENT(ice40_urb_dequeue,
+
+	TP_PROTO(struct urb *urb),
+
+	TP_ARGS(urb),
+
+	TP_STRUCT__entry(
+		__field(u16, epnum)
+		__field(u8, dir)
+		__field(u8, type)
+		__field(u32, len)
+		__field(int, reason)
+	),
+
+	TP_fast_assign(
+		__entry->epnum = usb_pipeendpoint(urb->pipe);
+		__entry->dir = usb_urb_dir_in(urb);
+		__entry->type = usb_pipebulk(urb->pipe);
+		__entry->len = urb->transfer_buffer_length;
+		__entry->reason = urb->unlinked;
+	),
+
+	TP_printk("URB_LOG: D: ep %d %s %s len %d reason %d",
+			__entry->epnum,
+			__entry->dir ? "In" : "Out",
+			__entry->type ? "Bulk" : "ctrl",
+			__entry->len, __entry->reason)
+);
+
+TRACE_EVENT(ice40_urb_done,
+
+	TP_PROTO(struct urb *urb, int result),
+
+	TP_ARGS(urb, result),
+
+	TP_STRUCT__entry(
+		__field(int, result)
+		__field(u16, epnum)
+		__field(u8, dir)
+		__field(u8, type)
+		__field(u32, len)
+		__field(u32, actual)
+	),
+
+	TP_fast_assign(
+		__entry->result = result;
+		__entry->epnum = usb_pipeendpoint(urb->pipe);
+		__entry->dir = usb_urb_dir_in(urb);
+		__entry->type = usb_pipebulk(urb->pipe);
+		__entry->len = urb->transfer_buffer_length;
+		__entry->actual = urb->actual_length;
+	),
+
+	TP_printk("URB_LOG: C: ep %d %s %s len %d actual %d result %d",
+			__entry->epnum, __entry->dir ? "In" : "Out",
+			__entry->type ? "Bulk" : "ctrl", __entry->len,
+			__entry->actual, __entry->result)
+);
+
+TRACE_EVENT(ice40_bus_suspend,
+
+	TP_PROTO(u8 status),
+
+	TP_ARGS(status),
+
+	TP_STRUCT__entry(
+		__field(u8, status)
+	),
+
+	TP_fast_assign(
+		__entry->status = status;
+	),
+
+	TP_printk("bus_suspend status %d", __entry->status)
+);
+
+TRACE_EVENT(ice40_bus_resume,
+
+	TP_PROTO(u8 status),
+
+	TP_ARGS(status),
+
+	TP_STRUCT__entry(
+		__field(u8, status)
+	),
+
+	TP_fast_assign(
+		__entry->status = status;
+	),
+
+	TP_printk("bus_resume status %d", __entry->status)
+);
+
+TRACE_EVENT(ice40_setup,
+
+	TP_PROTO(const char *token, int ret),
+
+	TP_ARGS(token, ret),
+
+	TP_STRUCT__entry(
+		__string(token, token)
+		__field(int, ret)
+	),
+
+	TP_fast_assign(
+		__assign_str(token, token);
+		__entry->ret = ret;
+	),
+
+	TP_printk("Trace: SETUP %s ret %d",
+		__get_str(token), __entry->ret)
+);
+
+TRACE_EVENT(ice40_in,
+
+	TP_PROTO(u16 ep, const char *token, u8 len, u8 expected, int ret),
+
+	TP_ARGS(ep, token, len, expected, ret),
+
+	TP_STRUCT__entry(
+		__field(u16, ep)
+		__string(token, token)
+		__field(u8, len)
+		__field(u8, expected)
+		__field(int, ret)
+	),
+
+	TP_fast_assign(
+		__entry->ep = ep;
+		__assign_str(token, token);
+		__entry->len = len;
+		__entry->expected = expected;
+		__entry->ret = ret;
+	),
+
+	TP_printk("Trace: %d IN %s len %d expected %d ret %d",
+			__entry->ep, __get_str(token),
+			__entry->len, __entry->expected,
+			__entry->ret)
+);
+
+TRACE_EVENT(ice40_out,
+
+	TP_PROTO(u16 ep, const char *token, u8 len, int ret),
+
+	TP_ARGS(ep, token, len, ret),
+
+	TP_STRUCT__entry(
+		__field(u16, ep)
+		__string(token, token)
+		__field(u8, len)
+		__field(int, ret)
+	),
+
+	TP_fast_assign(
+		__entry->ep = ep;
+		__assign_str(token, token);
+		__entry->len = len;
+		__entry->ret = ret;
+	),
+
+	TP_printk("Trace: %d OUT %s len %d ret %d",
+			__entry->ep, __get_str(token),
+			__entry->len, __entry->ret)
+);
+#endif /* if !defined(_TRACE_ICE40_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 700d2ae..f320017 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -384,7 +384,7 @@
 void tracing_off(void)
 {
 	if (global_trace.buffer)
-		ring_buffer_record_on(global_trace.buffer);
+		ring_buffer_record_off(global_trace.buffer);
 	/*
 	 * This flag is only looked at when buffers haven't been
 	 * allocated yet. We don't really care about the race
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 0218f4b..fd699ca 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -177,7 +177,7 @@
 	struct gen_pool_chunk *chunk;
 	int nbits = size >> pool->min_alloc_order;
 	int nbytes = sizeof(struct gen_pool_chunk) +
-				(nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
+				BITS_TO_LONGS(nbits) * sizeof(long);
 
 	if (nbytes <= PAGE_SIZE)
 		chunk = kmalloc_node(nbytes, __GFP_ZERO, nid);
diff --git a/mm/compaction.c b/mm/compaction.c
index 673142d..35bb243 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -243,7 +243,6 @@
 {
 	int nr_scanned = 0, total_isolated = 0;
 	struct page *cursor, *valid_page = NULL;
-	unsigned long nr_strict_required = end_pfn - blockpfn;
 	unsigned long flags;
 	bool locked = false;
 
@@ -256,11 +255,12 @@
 
 		nr_scanned++;
 		if (!pfn_valid_within(blockpfn))
-			continue;
+			goto isolate_fail;
+
 		if (!valid_page)
 			valid_page = page;
 		if (!PageBuddy(page))
-			continue;
+			goto isolate_fail;
 
 		/*
 		 * The zone lock must be held to isolate freepages.
@@ -281,12 +281,10 @@
 
 		/* Recheck this is a buddy page under lock */
 		if (!PageBuddy(page))
-			continue;
+			goto isolate_fail;
 
 		/* Found a free page, break it into order-0 pages */
 		isolated = split_free_page(page);
-		if (!isolated && strict)
-			break;
 		total_isolated += isolated;
 		for (i = 0; i < isolated; i++) {
 			list_add(&page->lru, freelist);
@@ -297,7 +295,13 @@
 		if (isolated) {
 			blockpfn += isolated - 1;
 			cursor += isolated - 1;
+			continue;
 		}
+
+isolate_fail:
+		if (strict)
+			break;
+
 	}
 
 	trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
@@ -307,7 +311,7 @@
 	 * pages requested were isolated. If there were any failures, 0 is
 	 * returned and CMA will fail.
 	 */
-	if (strict && nr_strict_required > total_isolated)
+	if (strict && blockpfn < end_pfn)
 		total_isolated = 0;
 
 	if (locked)
diff --git a/mm/ksm.c b/mm/ksm.c
index 47c8853..fa73fc6 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -189,6 +189,9 @@
 /* Milliseconds ksmd should sleep between batches */
 static unsigned int ksm_thread_sleep_millisecs = 20;
 
+/* Boolean to indicate whether to use deferred timer or not */
+static bool use_deferred_timer;
+
 #define KSM_RUN_STOP	0
 #define KSM_RUN_MERGE	1
 #define KSM_RUN_UNMERGE	2
@@ -1427,6 +1430,41 @@
 	}
 }
 
+static void process_timeout(unsigned long __data)
+{
+	wake_up_process((struct task_struct *)__data);
+}
+
+static signed long __sched deferred_schedule_timeout(signed long timeout)
+{
+	struct timer_list timer;
+	unsigned long expire;
+
+	__set_current_state(TASK_INTERRUPTIBLE);
+	if (timeout < 0) {
+		pr_err("schedule_timeout: wrong timeout value %lx\n",
+							timeout);
+		__set_current_state(TASK_RUNNING);
+		goto out;
+	}
+
+	expire = timeout + jiffies;
+
+	setup_deferrable_timer_on_stack(&timer, process_timeout,
+			(unsigned long)current);
+	mod_timer(&timer, expire);
+	schedule();
+	del_singleshot_timer_sync(&timer);
+
+	/* Remove the timer from the object tracker */
+	destroy_timer_on_stack(&timer);
+
+	timeout = expire - jiffies;
+
+out:
+	return timeout < 0 ? 0 : timeout;
+}
+
 static int ksmd_should_run(void)
 {
 	return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
@@ -1446,7 +1484,11 @@
 		try_to_freeze();
 
 		if (ksmd_should_run()) {
-			schedule_timeout_interruptible(
+			if (use_deferred_timer)
+				deferred_schedule_timeout(
+				msecs_to_jiffies(ksm_thread_sleep_millisecs));
+			else
+				schedule_timeout_interruptible(
 				msecs_to_jiffies(ksm_thread_sleep_millisecs));
 		} else {
 			wait_event_freezable(ksm_thread_wait,
@@ -1926,6 +1968,26 @@
 }
 KSM_ATTR(run);
 
+static ssize_t deferred_timer_show(struct kobject *kobj,
+				    struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, 8, "%d\n", use_deferred_timer);
+}
+
+static ssize_t deferred_timer_store(struct kobject *kobj,
+				     struct kobj_attribute *attr,
+				     const char *buf, size_t count)
+{
+	unsigned long enable;
+	int err;
+
+	err = kstrtoul(buf, 10, &enable);
+	use_deferred_timer = enable;
+
+	return count;
+}
+KSM_ATTR(deferred_timer);
+
 static ssize_t pages_shared_show(struct kobject *kobj,
 				 struct kobj_attribute *attr, char *buf)
 {
@@ -1980,6 +2042,7 @@
 	&pages_unshared_attr.attr,
 	&pages_volatile_attr.attr,
 	&full_scans_attr.attr,
+	&deferred_timer_attr.attr,
 	NULL,
 };
 
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 7f38d35..a8d7ed0 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -896,13 +896,13 @@
 		sin6->sin6_port = 0;
 		sin6->sin6_addr = ip6->saddr;
 
+		sin6->sin6_flowinfo = 0;
 		if (np->sndflow)
 			sin6->sin6_flowinfo =
 				*(__be32 *)ip6 & IPV6_FLOWINFO_MASK;
 
-		if (__ipv6_addr_needs_scope_id(
-		    ipv6_addr_type(&sin6->sin6_addr)))
-			sin6->sin6_scope_id = IP6CB(skb)->iif;
+		sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
+							  IP6CB(skb)->iif);
 
 		if (inet6_sk(sk)->rxopt.all)
 			pingv6_ops.datagram_recv_ctl(sk, msg, skb);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 7a7724d..6bd622f 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -36,6 +36,10 @@
 static int ip_ttl_max = 255;
 static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+static int tcp_delack_seg_min = TCP_DELACK_MIN;
+static int tcp_delack_seg_max = 60;
+static int tcp_use_userconfig_min;
+static int tcp_use_userconfig_max = 1;
 
 /* Update system visible IP port range */
 static void set_local_port_range(int range[2])
@@ -699,6 +703,25 @@
 		.proc_handler	= proc_dointvec_minmax,
 		.extra1		= &zero
 	},
+	{
+		.procname	= "tcp_delack_seg",
+		.data		= &sysctl_tcp_delack_seg,
+		.maxlen		= sizeof(sysctl_tcp_delack_seg),
+		.mode		= 0644,
+		.proc_handler = tcp_proc_delayed_ack_control,
+		.extra1		= &tcp_delack_seg_min,
+		.extra2		= &tcp_delack_seg_max,
+	},
+	{
+		.procname       = "tcp_use_userconfig",
+		.data           = &sysctl_tcp_use_userconfig,
+		.maxlen         = sizeof(sysctl_tcp_use_userconfig),
+		.mode           = 0644,
+		.proc_handler   = tcp_use_userconfig_sysctl_handler,
+		.extra1		    = &tcp_use_userconfig_min,
+		.extra2		    = &tcp_use_userconfig_max,
+	},
+
 	{ }
 };
 
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 74a286c..706899e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -294,6 +294,12 @@
 EXPORT_SYMBOL(sysctl_tcp_rmem);
 EXPORT_SYMBOL(sysctl_tcp_wmem);
 
+int sysctl_tcp_delack_seg __read_mostly = TCP_DELACK_SEG;
+EXPORT_SYMBOL(sysctl_tcp_delack_seg);
+
+int sysctl_tcp_use_userconfig __read_mostly;
+EXPORT_SYMBOL(sysctl_tcp_use_userconfig);
+
 atomic_long_t tcp_memory_allocated;	/* Current allocated memory. */
 EXPORT_SYMBOL(tcp_memory_allocated);
 
@@ -1213,8 +1219,11 @@
 		   /* Delayed ACKs frequently hit locked sockets during bulk
 		    * receive. */
 		if (icsk->icsk_ack.blocked ||
-		    /* Once-per-two-segments ACK was not sent by tcp_input.c */
-		    tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
+		    /* Once-per-sysctl_tcp_delack_seg segments
+			  * ACK was not sent by tcp_input.c
+			  */
+		    tp->rcv_nxt - tp->rcv_wup > (icsk->icsk_ack.rcv_mss) *
+						sysctl_tcp_delack_seg ||
 		    /*
 		     * If this read emptied read buffer, we send ACK, if
 		     * connection is not bidirectional, user drained
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 257b617..7c3612b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5047,7 +5047,8 @@
 	struct tcp_sock *tp = tcp_sk(sk);
 
 	    /* More than one full frame received... */
-	if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
+	if (((tp->rcv_nxt - tp->rcv_wup) > (inet_csk(sk)->icsk_ack.rcv_mss) *
+					sysctl_tcp_delack_seg &&
 	     /* ... and right edge of window advances far enough.
 	      * (tcp_recvmsg() will send ACK otherwise). Or...
 	      */
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 34d4a02..d1b4792 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -34,7 +34,39 @@
 
 static void tcp_write_timer(unsigned long);
 static void tcp_delack_timer(unsigned long);
-static void tcp_keepalive_timer (unsigned long data);
+static void tcp_keepalive_timer(unsigned long data);
+
+/*Function to reset tcp_ack related sysctl on resetting master control */
+void set_tcp_default(void)
+{
+	sysctl_tcp_delack_seg	= TCP_DELACK_SEG;
+}
+
+/*sysctl handler for tcp_ack realted master control */
+int tcp_proc_delayed_ack_control(ctl_table *table, int write,
+			void __user *buffer, size_t *length, loff_t *ppos)
+{
+	int ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+
+	/* The ret value will be 0 if the input validation is successful
+	 * and the values are written to sysctl table. If not, the stack
+	 * will continue to work with currently configured values
+	 */
+	return ret;
+}
+
+/*sysctl handler for tcp_ack realted master control */
+int tcp_use_userconfig_sysctl_handler(ctl_table *table, int write,
+			void __user *buffer, size_t *length, loff_t *ppos)
+{
+	int ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+
+	if (write && ret == 0) {
+		if (!sysctl_tcp_use_userconfig)
+			set_tcp_default();
+	}
+	return ret;
+}
 
 void tcp_init_xmit_timers(struct sock *sk)
 {
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index c27f165..f6c74c9 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -18,7 +18,7 @@
 	(2402 - 2482 @ 40), (N/A, 20)
 	(5170 - 5250 @ 80), (3, 17)
 	(5250 - 5330 @ 80), (3, 24), DFS
-	(5490 - 5710 @ 80), (3, 24), DFS
+	(5490 - 5730 @ 80), (3, 24), DFS
 	(5735 - 5835 @ 80), (3, 30)
 
 country AL:
@@ -42,7 +42,7 @@
 	(2402 - 2482 @ 40), (N/A, 20)
 	(5170 - 5250 @ 80), (3, 17)
 	(5250 - 5330 @ 80), (3, 24), DFS
-	(5490 - 5710 @ 80), (3, 24), DFS
+	(5490 - 5730 @ 80), (3, 24), DFS
 	(5735 - 5835 @ 80), (3, 30)
 
 country AS:
@@ -64,7 +64,7 @@
 	(2402 - 2482 @ 40), (N/A, 20)
 	(5170 - 5250 @ 80), (3, 17)
 	(5250 - 5330 @ 80), (3, 24), DFS
-	(5490 - 5710 @ 80), (3, 24), DFS
+	(5490 - 5730 @ 80), (3, 24), DFS
 	(5735 - 5835 @ 80), (3, 30)
 
 country AW:
@@ -139,7 +139,7 @@
 	(2402 - 2482 @ 40), (N/A, 20)
 	(5170 - 5250 @ 80), (3, 17)
 	(5250 - 5330 @ 80), (3, 24), DFS
-	(5490 - 5710 @ 80), (3, 24), DFS
+	(5490 - 5730 @ 80), (3, 24), DFS
 	(5735 - 5835 @ 80), (3, 30)
 
 country BS:
@@ -163,7 +163,7 @@
 	(2402 - 2472 @ 40), (N/A, 27)
 	(5170 - 5250 @ 80), (3, 17)
 	(5250 - 5330 @ 80), (3, 24), DFS
-	(5490 - 5710 @ 80), (3, 24), DFS
+	(5490 - 5730 @ 80), (3, 24), DFS
 	(5735 - 5835 @ 80), (3, 30)
 
 country CH: DFS-ETSI
@@ -196,14 +196,14 @@
 	(2402 - 2472 @ 40), (N/A, 27)
 	(5170 - 5250 @ 80), (3, 17)
 	(5250 - 5330 @ 80), (3, 24), DFS
-	(5490 - 5710 @ 80), (3, 24), DFS
+	(5490 - 5730 @ 80), (3, 24), DFS
 	(5735 - 5835 @ 80), (3, 30)
 
 country CR:
 	(2402 - 2482 @ 40), (N/A, 20)
 	(5170 - 5250 @ 20), (3, 17)
 	(5250 - 5330 @ 20), (3, 24), DFS
-	(5490 - 5710 @ 20), (3, 24), DFS
+	(5490 - 5730 @ 20), (3, 24), DFS
 	(5735 - 5835 @ 20), (3, 30)
 
 country CY: DFS-ETSI
@@ -271,7 +271,7 @@
 	(2402 - 2482 @ 40), (N/A, 20)
 	(5170 - 5250 @ 20), (3, 17)
 	(5250 - 5330 @ 20), (3, 24), DFS
-	(5490 - 5710 @ 20), (3, 24), DFS
+	(5490 - 5730 @ 20), (3, 24), DFS
 	(5735 - 5835 @ 20), (3, 30)
 
 country EE: DFS-ETSI
@@ -342,7 +342,7 @@
 	(2402 - 2472 @ 40), (3, 30)
 	(5170 - 5250 @ 80), (6, 17)
 	(5250 - 5330 @ 80), (6, 24), DFS
-	(5490 - 5710 @ 80), (6, 24), DFS
+	(5490 - 5730 @ 80), (6, 24), DFS
 	(5735 - 5835 @ 80), (6, 30)
 
 country GP:
@@ -375,21 +375,21 @@
 	(2402 - 2472 @ 40), (3, 30)
 	(5170 - 5250 @ 20), (6, 17)
 	(5250 - 5330 @ 20), (6, 24), DFS
-	(5490 - 5710 @ 20), (6, 24), DFS
+	(5490 - 5730 @ 20), (6, 24), DFS
 	(5735 - 5835 @ 20), (6, 30)
 
 country HN:
 	(2402 - 2482 @ 40), (N/A, 20)
 	(5170 - 5250 @ 80), (6, 17)
 	(5250 - 5330 @ 80), (6, 24), DFS
-	(5490 - 5710 @ 80), (6, 24), DFS
+	(5490 - 5730 @ 80), (6, 24), DFS
 	(5735 - 5835 @ 80), (6, 30)
 
 country HK:
 	(2402 - 2482 @ 40), (N/A, 20)
 	(5170 - 5250 @ 80), (6, 17)
 	(5250 - 5330 @ 80), (6, 24), DFS
-	(5490 - 5710 @ 80), (6, 24), DFS
+	(5490 - 5730 @ 80), (6, 24), DFS
 	(5735 - 5835 @ 80), (6, 30)
 
 country HR: DFS-ETSI
@@ -462,7 +462,7 @@
 	(2402 - 2482 @ 40), (N/A, 20)
 	(5170 - 5250 @ 80), (6, 17)
 	(5250 - 5330 @ 80), (6, 24), DFS
-	(5490 - 5710 @ 80), (6, 24), DFS
+	(5490 - 5730 @ 80), (6, 24), DFS
 	(5735 - 5835 @ 80), (6, 30)
 
 country JP:
@@ -495,7 +495,7 @@
         (2402 - 2472 @ 40), (N/A, 27)
 	(5170 - 5250 @ 80), (3, 17)
 	(5250 - 5330 @ 80), (3, 24), DFS
-	(5490 - 5710 @ 80), (3, 24), DFS
+	(5490 - 5730 @ 80), (3, 24), DFS
 	(5735 - 5835 @ 80), (3, 30)
 
 country KP:
@@ -537,7 +537,7 @@
 	(2402 - 2482 @ 40), (N/A, 20)
 	(5170 - 5250 @ 20), (3, 17)
 	(5250 - 5330 @ 20), (3, 20), DFS
-	(5490 - 5710 @ 20), (3, 20), DFS
+	(5490 - 5730 @ 20), (3, 20), DFS
 	(5735 - 5835 @ 20), (3, 30)
 
 country LT: DFS-ETSI
@@ -634,7 +634,7 @@
 	(2402 - 2472 @ 40), (3, 27)
 	(5170 - 5250 @ 80), (3, 17)
 	(5250 - 5330 @ 80), (3, 24), DFS
-	(5490 - 5710 @ 80), (3, 24), DFS
+	(5490 - 5730 @ 80), (3, 24), DFS
 	(5735 - 5835 @ 80), (3, 30)
 
 country MW:
@@ -681,7 +681,7 @@
 	(2402 - 2482 @ 40), (N/A, 30)
 	(5170 - 5250 @ 80), (6, 17)
 	(5250 - 5330 @ 80), (6, 24), DFS
-	(5490 - 5710 @ 80), (6, 24), DFS
+	(5490 - 5730 @ 80), (6, 24), DFS
 	(5735 - 5835 @ 80), (6, 30)
 
 country OM:
@@ -700,7 +700,7 @@
 	(2402 - 2482 @ 40), (N/A, 20)
 	(5170 - 5250 @ 80), (6, 20)
 	(5250 - 5330 @ 80), (6, 20), DFS
-	(5490 - 5710 @ 80), (6, 27), DFS
+	(5490 - 5730 @ 80), (6, 27), DFS
 	(5735 - 5835 @ 80), (6, 30)
 
 country PF:
@@ -720,7 +720,7 @@
 	(2402 - 2482 @ 40), (N/A, 20)
 	(5170 - 5250 @ 80), (6, 17)
 	(5250 - 5330 @ 80), (6, 24), DFS
-	(5490 - 5710 @ 80), (6, 24), DFS
+	(5490 - 5730 @ 80), (6, 24), DFS
 	(5735 - 5835 @ 80), (6, 30)
 
 country PK:
@@ -747,7 +747,7 @@
 	(2402 - 2472 @ 40), (3, 30)
 	(5170 - 5250 @ 80), (6, 17)
 	(5250 - 5330 @ 80), (6, 24), DFS
-	(5490 - 5710 @ 80), (6, 24), DFS
+	(5490 - 5730 @ 80), (6, 24), DFS
 	(5735 - 5835 @ 80), (6, 30)
 
 country PY:
@@ -819,7 +819,7 @@
 	(2402 - 2482 @ 40), (N/A, 20)
 	(5170 - 5250 @ 80), (6, 17)
 	(5250 - 5330 @ 80), (6, 24), DFS
-	(5490 - 5710 @ 80), (6, 24), DFS
+	(5490 - 5730 @ 80), (6, 24), DFS
 	(5735 - 5835 @ 80), (6, 30)
 
 country SI: DFS-ETSI
@@ -856,21 +856,21 @@
 country TW:
 	(2402 - 2472 @ 40), (3, 27)
 	(5270 - 5330 @ 40), (6, 17), DFS
-	(5490 - 5710 @ 80), (6, 30), DFS
+	(5490 - 5730 @ 80), (6, 30), DFS
 	(5735 - 5815 @ 80), (6, 30)
 
 country TH:
 	(2402 - 2482 @ 40), (N/A, 20)
 	(5170 - 5250 @ 80), (3, 17)
 	(5250 - 5330 @ 80), (3, 24), DFS
-	(5490 - 5710 @ 80), (3, 24), DFS
+	(5490 - 5730 @ 80), (3, 24), DFS
 	(5735 - 5835 @ 80), (3, 30)
 
 country TT:
 	(2402 - 2482 @ 40), (N/A, 20)
 	(5170 - 5250 @ 40), (3, 17)
 	(5250 - 5330 @ 40), (3, 20), DFS
-	(5490 - 5710 @ 40), (3, 20), DFS
+	(5490 - 5730 @ 40), (3, 20), DFS
 	(5735 - 5835 @ 40), (3, 30)
 
 country TN:
@@ -914,8 +914,8 @@
 	(5170 - 5250 @ 80), (3, 17)
 	(5250 - 5330 @ 80), (3, 24), DFS
 	(5490 - 5600 @ 80), (3, 24), DFS
-	(5650 - 5710 @ 40), (3, 24), DFS
-	(5710 - 5835 @ 80), (3, 30)
+	(5650 - 5730 @ 40), (3, 24), DFS
+	(5735 - 5835 @ 80), (3, 30)
 	# 60g band
 	# reference: http://cfr.regstoday.com/47cfr15.aspx#47_CFR_15p255
 	# channels 1,2,3, EIRP=40dBm(43dBm peak)
@@ -935,14 +935,14 @@
 	(2402 - 2482 @ 40), (N/A, 20)
 	(5170 - 5250 @ 40), (3, 17)
 	(5250 - 5330 @ 40), (3, 20), DFS
-	(5490 - 5710 @ 40), (3, 20), DFS
+	(5490 - 5730 @ 40), (3, 20), DFS
 	(5735 - 5835 @ 40), (3, 30)
 
 country UZ:
 	(2402 - 2472 @ 40), (3, 27)
 	(5170 - 5250 @ 40), (3, 17)
 	(5250 - 5330 @ 40), (3, 20), DFS
-	(5490 - 5710 @ 40), (3, 20), DFS
+	(5490 - 5730 @ 40), (3, 20), DFS
 	(5735 - 5835 @ 40), (3, 30)
 
 country VE:
@@ -956,7 +956,7 @@
 	(2402 - 2482 @ 40), (N/A, 20)
 	(5170 - 5250 @ 80), (3, 17)
 	(5250 - 5330 @ 80), (3, 24), DFS
-	(5490 - 5710 @ 80), (3, 24), DFS
+	(5490 - 5730 @ 80), (3, 24), DFS
 	(5735 - 5835 @ 80), (3, 30)
 
 country VI:
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index e90ef68..b73cfe5 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5431,6 +5431,8 @@
 	return err;
 }
 
+#endif
+
 struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
 					   enum nl80211_commands cmd,
 					   enum nl80211_attrs attr,
@@ -5478,7 +5480,7 @@
 			nl80211_testmode_mcgrp.id, gfp);
 }
 EXPORT_SYMBOL(__cfg80211_send_event_skb);
-#endif
+
 
 static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
 {
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index b28b7eb..c868a74 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3816,6 +3816,11 @@
 	struct lsm_network_audit net = {0,};
 	u32 tsid = task_sid(task);
 
+	if (unlikely(!sksec)) {
+		pr_warn("SELinux: sksec is NULL, socket is already freed\n");
+		return -EINVAL;
+	}
+
 	if (sksec->sid == SECINITSID_KERNEL)
 		return 0;
 
diff --git a/sound/soc/codecs/msm8x10-wcd.c b/sound/soc/codecs/msm8x10-wcd.c
index 452bbab..c73b2c8 100644
--- a/sound/soc/codecs/msm8x10-wcd.c
+++ b/sound/soc/codecs/msm8x10-wcd.c
@@ -1200,6 +1200,13 @@
 			  MSM8X10_WCD_A_CDC_TX2_VOL_CTL_GAIN,
 			  -84, 40, digital_gain),
 
+	SOC_SINGLE_TLV("ADC1 Volume", MSM8X10_WCD_A_TX_1_EN, 2,
+					19, 0, analog_gain),
+	SOC_SINGLE_TLV("ADC2 Volume", MSM8X10_WCD_A_TX_2_EN, 2,
+					19, 0, analog_gain),
+	SOC_SINGLE_TLV("ADC3 Volume", MSM8X10_WCD_A_TX_3_EN, 2,
+					19, 0, analog_gain),
+
 	SOC_SINGLE_S8_TLV("IIR1 INP1 Volume",
 			  MSM8X10_WCD_A_CDC_IIR1_GAIN_B1_CTL,
 			  -84, 40, digital_gain),
diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
index d84ba90..95f2041 100644
--- a/sound/soc/codecs/wcd9306.c
+++ b/sound/soc/codecs/wcd9306.c
@@ -43,6 +43,8 @@
 #define TAPAN_HPH_PA_SETTLE_COMP_OFF 13000
 
 #define DAPM_MICBIAS2_EXTERNAL_STANDALONE "MIC BIAS2 External Standalone"
+#define TAPAN_VALIDATE_RX_SBPORT_RANGE(port) ((port >= 16) && (port <= 20))
+#define TAPAN_CONVERT_RX_SBPORT_ID(port) (port - 16) /* RX1 port ID = 0 */
 
 #define TAPAN_VDD_CX_OPTIMAL_UA 10000
 #define TAPAN_VDD_CX_SLEEP_UA 2000
@@ -3236,6 +3238,8 @@
 }
 
 #define TAPAN_FORMATS (SNDRV_PCM_FMTBIT_S16_LE)
+#define TAPAN_FORMATS_S16_S24_LE (SNDRV_PCM_FMTBIT_S16_LE | \
+				  SNDRV_PCM_FORMAT_S24_LE)
 static int tapan_write(struct snd_soc_codec *codec, unsigned int reg,
 	unsigned int value)
 {
@@ -3643,6 +3647,68 @@
 	return 0;
 }
 
+static void tapan_set_rxsb_port_format(struct snd_pcm_hw_params *params,
+				       struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct tapan_priv *tapan_p = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx_codec_dai_data *cdc_dai;
+	struct wcd9xxx_ch *ch;
+	int port;
+	u8 bit_sel;
+	u16 sb_ctl_reg, field_shift;
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		bit_sel = 0x2;
+		tapan_p->dai[dai->id].bit_width = 16;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		bit_sel = 0x0;
+		tapan_p->dai[dai->id].bit_width = 24;
+		break;
+	default:
+		dev_err(codec->dev, "Invalid format %x\n",
+			params_format(params));
+		return;
+	}
+
+	cdc_dai = &tapan_p->dai[dai->id];
+
+	list_for_each_entry(ch, &cdc_dai->wcd9xxx_ch_list, list) {
+		port = wcd9xxx_get_slave_port(ch->ch_num);
+
+		if (IS_ERR_VALUE(port) ||
+		    !TAPAN_VALIDATE_RX_SBPORT_RANGE(port)) {
+			dev_warn(codec->dev,
+				 "%s: invalid port ID %d returned for RX DAI\n",
+				 __func__, port);
+			return;
+		}
+
+		port = TAPAN_CONVERT_RX_SBPORT_ID(port);
+
+		if (port <= 3) {
+			sb_ctl_reg = TAPAN_A_CDC_CONN_RX_SB_B1_CTL;
+			field_shift = port << 1;
+		} else if (port <= 4) {
+			sb_ctl_reg = TAPAN_A_CDC_CONN_RX_SB_B2_CTL;
+			field_shift = (port - 4) << 1;
+		} else { /* should not happen */
+			dev_warn(codec->dev,
+				 "%s: bad port ID %d\n", __func__, port);
+			return;
+		}
+
+		dev_dbg(codec->dev, "%s: sb_ctl_reg %x field_shift %x\n"
+			"bit_sel %x\n", __func__, sb_ctl_reg, field_shift,
+			bit_sel);
+		snd_soc_update_bits(codec, sb_ctl_reg, 0x3 << field_shift,
+				    bit_sel << field_shift);
+	}
+}
+
+
 static int tapan_hw_params(struct snd_pcm_substream *substream,
 			    struct snd_pcm_hw_params *params,
 			    struct snd_soc_dai *dai)
@@ -3755,29 +3821,7 @@
 			snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_I2S_CTL,
 					    0x03, (rx_fs_rate >> 0x05));
 		} else {
-			switch (params_format(params)) {
-			case SNDRV_PCM_FORMAT_S16_LE:
-				snd_soc_update_bits(codec,
-					TAPAN_A_CDC_CONN_RX_SB_B1_CTL,
-					0xFF, 0xAA);
-				snd_soc_update_bits(codec,
-					TAPAN_A_CDC_CONN_RX_SB_B2_CTL,
-					0xFF, 0x2A);
-				tapan->dai[dai->id].bit_width = 16;
-				break;
-			case SNDRV_PCM_FORMAT_S24_LE:
-				snd_soc_update_bits(codec,
-					TAPAN_A_CDC_CONN_RX_SB_B1_CTL,
-					0xFF, 0x00);
-				snd_soc_update_bits(codec,
-					TAPAN_A_CDC_CONN_RX_SB_B2_CTL,
-					0xFF, 0x00);
-				tapan->dai[dai->id].bit_width = 24;
-				break;
-			default:
-				dev_err(codec->dev, "Invalid format\n");
-				break;
-			}
+			tapan_set_rxsb_port_format(params, dai);
 			tapan->dai[dai->id].rate   = params_rate(params);
 		}
 		break;
@@ -3894,7 +3938,7 @@
 		.playback = {
 			.stream_name = "AIF1 Playback",
 			.rates = WCD9306_RATES,
-			.formats = TAPAN_FORMATS,
+			.formats = TAPAN_FORMATS_S16_S24_LE,
 			.rate_max = 192000,
 			.rate_min = 8000,
 			.channels_min = 1,
@@ -3922,7 +3966,7 @@
 		.playback = {
 			.stream_name = "AIF2 Playback",
 			.rates = WCD9306_RATES,
-			.formats = TAPAN_FORMATS,
+			.formats = TAPAN_FORMATS_S16_S24_LE,
 			.rate_min = 8000,
 			.rate_max = 192000,
 			.channels_min = 1,
@@ -3964,7 +4008,7 @@
 		.playback = {
 			.stream_name = "AIF3 Playback",
 			.rates = WCD9306_RATES,
-			.formats = TAPAN_FORMATS,
+			.formats = TAPAN_FORMATS_S16_S24_LE,
 			.rate_min = 8000,
 			.rate_max = 192000,
 			.channels_min = 1,
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index 5dedec8..7b1a04e 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -1652,6 +1652,21 @@
 	"DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
 };
 
+static const char * const iir_inp2_text[] = {
+	"ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
+	"DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
+};
+
+static const char * const iir_inp3_text[] = {
+	"ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
+	"DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
+};
+
+static const char * const iir_inp4_text[] = {
+	"ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
+	"DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
+};
+
 static const struct soc_enum rx_mix1_inp1_chain_enum =
 	SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_RX1_B1_CTL, 0, 12, rx_mix1_text);
 
@@ -1800,6 +1815,24 @@
 static const struct soc_enum iir2_inp1_mux_enum =
 	SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ2_B1_CTL, 0, 18, iir_inp1_text);
 
+static const struct soc_enum iir1_inp2_mux_enum =
+	SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ1_B2_CTL, 0, 18, iir_inp2_text);
+
+static const struct soc_enum iir2_inp2_mux_enum =
+	SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ2_B2_CTL, 0, 18, iir_inp2_text);
+
+static const struct soc_enum iir1_inp3_mux_enum =
+	SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ1_B3_CTL, 0, 18, iir_inp3_text);
+
+static const struct soc_enum iir2_inp3_mux_enum =
+	SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ2_B3_CTL, 0, 18, iir_inp3_text);
+
+static const struct soc_enum iir1_inp4_mux_enum =
+	SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ1_B4_CTL, 0, 18, iir_inp4_text);
+
+static const struct soc_enum iir2_inp4_mux_enum =
+	SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ2_B4_CTL, 0, 18, iir_inp4_text);
+
 static const struct snd_kcontrol_new rx_mix1_inp1_mux =
 	SOC_DAPM_ENUM("RX1 MIX1 INP1 Mux", rx_mix1_inp1_chain_enum);
 
@@ -2025,6 +2058,24 @@
 static const struct snd_kcontrol_new iir2_inp1_mux =
 	SOC_DAPM_ENUM("IIR2 INP1 Mux", iir2_inp1_mux_enum);
 
+static const struct snd_kcontrol_new iir1_inp2_mux =
+	SOC_DAPM_ENUM("IIR1 INP2 Mux", iir1_inp2_mux_enum);
+
+static const struct snd_kcontrol_new iir2_inp2_mux =
+	SOC_DAPM_ENUM("IIR2 INP2 Mux", iir2_inp2_mux_enum);
+
+static const struct snd_kcontrol_new iir1_inp3_mux =
+	SOC_DAPM_ENUM("IIR1 INP3 Mux", iir1_inp3_mux_enum);
+
+static const struct snd_kcontrol_new iir2_inp3_mux =
+	SOC_DAPM_ENUM("IIR2 INP3 Mux", iir2_inp3_mux_enum);
+
+static const struct snd_kcontrol_new iir1_inp4_mux =
+	SOC_DAPM_ENUM("IIR1 INP4 Mux", iir1_inp4_mux_enum);
+
+static const struct snd_kcontrol_new iir2_inp4_mux =
+	SOC_DAPM_ENUM("IIR2 INP4 Mux", iir2_inp4_mux_enum);
+
 static const struct snd_kcontrol_new anc1_mux =
 	SOC_DAPM_ENUM("ANC1 MUX Mux", anc1_mux_enum);
 
@@ -4012,6 +4063,120 @@
 	{"IIR2 INP1 MUX", "RX6", "SLIM RX6"},
 	{"IIR2 INP1 MUX", "RX7", "SLIM RX7"},
 
+	{"IIR1", NULL, "IIR1 INP2 MUX"},
+	{"IIR1 INP2 MUX", "DEC1", "DEC1 MUX"},
+	{"IIR1 INP2 MUX", "DEC2", "DEC2 MUX"},
+	{"IIR1 INP2 MUX", "DEC3", "DEC3 MUX"},
+	{"IIR1 INP2 MUX", "DEC4", "DEC4 MUX"},
+	{"IIR1 INP2 MUX", "DEC5", "DEC5 MUX"},
+	{"IIR1 INP2 MUX", "DEC6", "DEC6 MUX"},
+	{"IIR1 INP2 MUX", "DEC7", "DEC7 MUX"},
+	{"IIR1 INP2 MUX", "DEC8", "DEC8 MUX"},
+	{"IIR1 INP2 MUX", "DEC9", "DEC9 MUX"},
+	{"IIR1 INP2 MUX", "DEC10", "DEC10 MUX"},
+	{"IIR1 INP2 MUX", "RX1", "SLIM RX1"},
+	{"IIR1 INP2 MUX", "RX2", "SLIM RX2"},
+	{"IIR1 INP2 MUX", "RX3", "SLIM RX3"},
+	{"IIR1 INP2 MUX", "RX4", "SLIM RX4"},
+	{"IIR1 INP2 MUX", "RX5", "SLIM RX5"},
+	{"IIR1 INP2 MUX", "RX6", "SLIM RX6"},
+	{"IIR1 INP2 MUX", "RX7", "SLIM RX7"},
+
+	{"IIR2", NULL, "IIR2 INP2 MUX"},
+	{"IIR2 INP2 MUX", "DEC1", "DEC1 MUX"},
+	{"IIR2 INP2 MUX", "DEC2", "DEC2 MUX"},
+	{"IIR2 INP2 MUX", "DEC3", "DEC3 MUX"},
+	{"IIR2 INP2 MUX", "DEC4", "DEC4 MUX"},
+	{"IIR2 INP2 MUX", "DEC5", "DEC5 MUX"},
+	{"IIR2 INP2 MUX", "DEC6", "DEC6 MUX"},
+	{"IIR2 INP2 MUX", "DEC7", "DEC7 MUX"},
+	{"IIR2 INP2 MUX", "DEC8", "DEC8 MUX"},
+	{"IIR2 INP2 MUX", "DEC9", "DEC9 MUX"},
+	{"IIR2 INP2 MUX", "DEC10", "DEC10 MUX"},
+	{"IIR2 INP2 MUX", "RX1", "SLIM RX1"},
+	{"IIR2 INP2 MUX", "RX2", "SLIM RX2"},
+	{"IIR2 INP2 MUX", "RX3", "SLIM RX3"},
+	{"IIR2 INP2 MUX", "RX4", "SLIM RX4"},
+	{"IIR2 INP2 MUX", "RX5", "SLIM RX5"},
+	{"IIR2 INP2 MUX", "RX6", "SLIM RX6"},
+	{"IIR2 INP2 MUX", "RX7", "SLIM RX7"},
+
+	{"IIR1", NULL, "IIR1 INP3 MUX"},
+	{"IIR1 INP3 MUX", "DEC1", "DEC1 MUX"},
+	{"IIR1 INP3 MUX", "DEC2", "DEC2 MUX"},
+	{"IIR1 INP3 MUX", "DEC3", "DEC3 MUX"},
+	{"IIR1 INP3 MUX", "DEC4", "DEC4 MUX"},
+	{"IIR1 INP3 MUX", "DEC5", "DEC5 MUX"},
+	{"IIR1 INP3 MUX", "DEC6", "DEC6 MUX"},
+	{"IIR1 INP3 MUX", "DEC7", "DEC7 MUX"},
+	{"IIR1 INP3 MUX", "DEC8", "DEC8 MUX"},
+	{"IIR1 INP3 MUX", "DEC9", "DEC9 MUX"},
+	{"IIR1 INP3 MUX", "DEC10", "DEC10 MUX"},
+	{"IIR1 INP3 MUX", "RX1", "SLIM RX1"},
+	{"IIR1 INP3 MUX", "RX2", "SLIM RX2"},
+	{"IIR1 INP3 MUX", "RX3", "SLIM RX3"},
+	{"IIR1 INP3 MUX", "RX4", "SLIM RX4"},
+	{"IIR1 INP3 MUX", "RX5", "SLIM RX5"},
+	{"IIR1 INP3 MUX", "RX6", "SLIM RX6"},
+	{"IIR1 INP3 MUX", "RX7", "SLIM RX7"},
+
+	{"IIR2", NULL, "IIR2 INP3 MUX"},
+	{"IIR2 INP3 MUX", "DEC1", "DEC1 MUX"},
+	{"IIR2 INP3 MUX", "DEC2", "DEC2 MUX"},
+	{"IIR2 INP3 MUX", "DEC3", "DEC3 MUX"},
+	{"IIR2 INP3 MUX", "DEC4", "DEC4 MUX"},
+	{"IIR2 INP3 MUX", "DEC5", "DEC5 MUX"},
+	{"IIR2 INP3 MUX", "DEC6", "DEC6 MUX"},
+	{"IIR2 INP3 MUX", "DEC7", "DEC7 MUX"},
+	{"IIR2 INP3 MUX", "DEC8", "DEC8 MUX"},
+	{"IIR2 INP3 MUX", "DEC9", "DEC9 MUX"},
+	{"IIR2 INP3 MUX", "DEC10", "DEC10 MUX"},
+	{"IIR2 INP3 MUX", "RX1", "SLIM RX1"},
+	{"IIR2 INP3 MUX", "RX2", "SLIM RX2"},
+	{"IIR2 INP3 MUX", "RX3", "SLIM RX3"},
+	{"IIR2 INP3 MUX", "RX4", "SLIM RX4"},
+	{"IIR2 INP3 MUX", "RX5", "SLIM RX5"},
+	{"IIR2 INP3 MUX", "RX6", "SLIM RX6"},
+	{"IIR2 INP3 MUX", "RX7", "SLIM RX7"},
+
+	{"IIR1", NULL, "IIR1 INP4 MUX"},
+	{"IIR1 INP4 MUX", "DEC1", "DEC1 MUX"},
+	{"IIR1 INP4 MUX", "DEC2", "DEC2 MUX"},
+	{"IIR1 INP4 MUX", "DEC3", "DEC3 MUX"},
+	{"IIR1 INP4 MUX", "DEC4", "DEC4 MUX"},
+	{"IIR1 INP4 MUX", "DEC5", "DEC5 MUX"},
+	{"IIR1 INP4 MUX", "DEC6", "DEC6 MUX"},
+	{"IIR1 INP4 MUX", "DEC7", "DEC7 MUX"},
+	{"IIR1 INP4 MUX", "DEC8", "DEC8 MUX"},
+	{"IIR1 INP4 MUX", "DEC9", "DEC9 MUX"},
+	{"IIR1 INP4 MUX", "DEC10", "DEC10 MUX"},
+	{"IIR1 INP4 MUX", "RX1", "SLIM RX1"},
+	{"IIR1 INP4 MUX", "RX2", "SLIM RX2"},
+	{"IIR1 INP4 MUX", "RX3", "SLIM RX3"},
+	{"IIR1 INP4 MUX", "RX4", "SLIM RX4"},
+	{"IIR1 INP4 MUX", "RX5", "SLIM RX5"},
+	{"IIR1 INP4 MUX", "RX6", "SLIM RX6"},
+	{"IIR1 INP4 MUX", "RX7", "SLIM RX7"},
+
+	{"IIR2", NULL, "IIR2 INP4 MUX"},
+	{"IIR2 INP4 MUX", "DEC1", "DEC1 MUX"},
+	{"IIR2 INP4 MUX", "DEC2", "DEC2 MUX"},
+	{"IIR2 INP4 MUX", "DEC3", "DEC3 MUX"},
+	{"IIR2 INP4 MUX", "DEC4", "DEC4 MUX"},
+	{"IIR2 INP4 MUX", "DEC5", "DEC5 MUX"},
+	{"IIR2 INP4 MUX", "DEC6", "DEC6 MUX"},
+	{"IIR2 INP4 MUX", "DEC7", "DEC7 MUX"},
+	{"IIR2 INP4 MUX", "DEC8", "DEC8 MUX"},
+	{"IIR2 INP4 MUX", "DEC9", "DEC9 MUX"},
+	{"IIR2 INP4 MUX", "DEC10", "DEC10 MUX"},
+	{"IIR2 INP4 MUX", "RX1", "SLIM RX1"},
+	{"IIR2 INP4 MUX", "RX2", "SLIM RX2"},
+	{"IIR2 INP4 MUX", "RX3", "SLIM RX3"},
+	{"IIR2 INP4 MUX", "RX4", "SLIM RX4"},
+	{"IIR2 INP4 MUX", "RX5", "SLIM RX5"},
+	{"IIR2 INP4 MUX", "RX6", "SLIM RX6"},
+	{"IIR2 INP4 MUX", "RX7", "SLIM RX7"},
+
 	{"MIC BIAS1 Internal1", NULL, "LDO_H"},
 	{"MIC BIAS1 Internal2", NULL, "LDO_H"},
 	{"MIC BIAS1 External", NULL, "LDO_H"},
@@ -5667,12 +5832,36 @@
 		&iir1_inp1_mux,  taiko_codec_iir_mux_event,
 		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
+	SND_SOC_DAPM_MUX_E("IIR1 INP2 MUX", TAIKO_A_CDC_IIR1_GAIN_B2_CTL, 0, 0,
+		&iir1_inp2_mux,  taiko_codec_iir_mux_event,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("IIR1 INP3 MUX", TAIKO_A_CDC_IIR1_GAIN_B3_CTL, 0, 0,
+		&iir1_inp3_mux,  taiko_codec_iir_mux_event,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("IIR1 INP4 MUX", TAIKO_A_CDC_IIR1_GAIN_B4_CTL, 0, 0,
+		&iir1_inp4_mux,  taiko_codec_iir_mux_event,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
 	SND_SOC_DAPM_MIXER("IIR1", TAIKO_A_CDC_CLK_SD_CTL, 0, 0, NULL, 0),
 
 	SND_SOC_DAPM_MUX_E("IIR2 INP1 MUX", TAIKO_A_CDC_IIR2_GAIN_B1_CTL, 0, 0,
 		&iir2_inp1_mux,  taiko_codec_iir_mux_event,
 		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
+	SND_SOC_DAPM_MUX_E("IIR2 INP2 MUX", TAIKO_A_CDC_IIR2_GAIN_B2_CTL, 0, 0,
+		&iir2_inp2_mux,  taiko_codec_iir_mux_event,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("IIR2 INP3 MUX", TAIKO_A_CDC_IIR2_GAIN_B3_CTL, 0, 0,
+		&iir2_inp3_mux,  taiko_codec_iir_mux_event,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("IIR2 INP4 MUX", TAIKO_A_CDC_IIR2_GAIN_B4_CTL, 0, 0,
+		&iir2_inp4_mux,  taiko_codec_iir_mux_event,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
 	SND_SOC_DAPM_MIXER("IIR2", TAIKO_A_CDC_CLK_SD_CTL, 1, 0, NULL, 0),
 
 	/* AUX PGA */
@@ -6608,18 +6797,21 @@
 	snd_soc_card_change_online_state(codec->card, 1);
 
 	mutex_lock(&codec->mutex);
-	if (codec->reg_def_copy) {
-		pr_debug("%s: Update ASOC cache", __func__);
-		kfree(codec->reg_cache);
-		codec->reg_cache = kmemdup(codec->reg_def_copy,
-						codec->reg_size, GFP_KERNEL);
-	}
+
+	taiko_update_reg_defaults(codec);
+	if (wcd9xxx->mclk_rate == TAIKO_MCLK_CLK_12P288MHZ)
+		snd_soc_update_bits(codec, TAIKO_A_CHIP_CTL, 0x06, 0x0);
+	else if (wcd9xxx->mclk_rate == TAIKO_MCLK_CLK_9P6MHZ)
+		snd_soc_update_bits(codec, TAIKO_A_CHIP_CTL, 0x06, 0x2);
+	taiko_codec_init_reg(codec);
 
 	if (spkr_drv_wrnd == 1)
 		snd_soc_update_bits(codec, TAIKO_A_SPKR_DRV_EN, 0x80, 0x80);
 
-	taiko_update_reg_defaults(codec);
-	taiko_codec_init_reg(codec);
+	codec->cache_sync = true;
+	snd_soc_cache_sync(codec);
+	codec->cache_sync = false;
+
 	ret = taiko_handle_pdata(taiko);
 	if (IS_ERR_VALUE(ret))
 		pr_err("%s: bad pdata\n", __func__);
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 2b43ab6..045a6d0 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -899,6 +899,30 @@
 		.ops = &msm_fe_dai_ops,
 		.name = "LSM8",
 	},
+	{
+		.playback = {
+			.stream_name = "VoWLAN Playback",
+			.aif_name = "VoWLAN_DL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.capture = {
+			.stream_name = "VoWLAN Capture",
+			.aif_name = "VoWLAN_UL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
+		.ops = &msm_fe_dai_ops,
+		.name = "VoWLAN",
+	},
 };
 
 static __devinit int msm_fe_dai_dev_probe(struct platform_device *pdev)
diff --git a/sound/soc/msm/msm8226.c b/sound/soc/msm/msm8226.c
index b512bb5..cc27fc0 100644
--- a/sound/soc/msm/msm8226.c
+++ b/sound/soc/msm/msm8226.c
@@ -32,6 +32,10 @@
 #include "../codecs/wcd9xxx-common.h"
 #include "../codecs/wcd9306.h"
 
+#define SAMPLING_RATE_48KHZ 48000
+#define SAMPLING_RATE_96KHZ 96000
+#define SAMPLING_RATE_192KHZ 192000
+
 #define DRV_NAME "msm8226-asoc-tapan"
 
 #define MSM_SLIM_0_RX_MAX_CHANNELS		2
@@ -154,6 +158,8 @@
 static int ext_spk_amp_gpio = -1;
 static int vdd_spkr_gpio = -1;
 static int msm_proxy_rx_ch = 2;
+
+static int slim0_rx_sample_rate = SAMPLING_RATE_48KHZ;
 static int slim0_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
 
 static inline int param_is_mask(int p)
@@ -388,6 +394,9 @@
 static const char *const slim0_tx_ch_text[] = {"One", "Two", "Three", "Four"};
 static const char *const proxy_rx_ch_text[] = {"One", "Two", "Three", "Four",
 	"Five", "Six", "Seven", "Eight"};
+static char const *rx_bit_format_text[] = {"S16_LE", "S24_LE"};
+static char const *slim0_rx_sample_rate_text[] = {"KHZ_48", "KHZ_96",
+						  "KHZ_192"};
 
 static const struct soc_enum msm_enum[] = {
 	SOC_ENUM_SINGLE_EXT(2, slim0_rx_ch_text),
@@ -399,6 +408,58 @@
 	SOC_ENUM_SINGLE_EXT(2, btsco_rate_text),
 };
 
+static int slim0_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	int sample_rate_val = 0;
+
+	switch (slim0_rx_sample_rate) {
+	case SAMPLING_RATE_192KHZ:
+		sample_rate_val = 2;
+		break;
+
+	case SAMPLING_RATE_96KHZ:
+		sample_rate_val = 1;
+		break;
+
+	case SAMPLING_RATE_48KHZ:
+	default:
+		sample_rate_val = 0;
+		break;
+	}
+
+	ucontrol->value.integer.value[0] = sample_rate_val;
+	pr_debug("%s: slim0_rx_sample_rate = %d\n", __func__,
+				slim0_rx_sample_rate);
+
+	return 0;
+}
+
+static int slim0_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s: ucontrol value = %ld\n", __func__,
+			ucontrol->value.integer.value[0]);
+
+	switch (ucontrol->value.integer.value[0]) {
+	case 2:
+		slim0_rx_sample_rate = SAMPLING_RATE_192KHZ;
+		break;
+	case 1:
+		slim0_rx_sample_rate = SAMPLING_RATE_96KHZ;
+		break;
+	case 0:
+	default:
+		slim0_rx_sample_rate = SAMPLING_RATE_48KHZ;
+		break;
+	}
+
+	pr_debug("%s: slim0_rx_sample_rate = %d\n", __func__,
+			slim0_rx_sample_rate);
+
+	return 0;
+}
+
 static int msm_slim_0_rx_ch_get(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol)
 {
@@ -725,8 +786,7 @@
 	pr_debug("%s()\n", __func__);
 	param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
 					slim0_rx_bit_format);
-
-	rate->min = rate->max = 48000;
+	rate->min = rate->max = slim0_rx_sample_rate;
 	channels->min = channels->max = msm_slim_0_rx_ch;
 
 	return 0;
@@ -780,6 +840,8 @@
 	SOC_ENUM_SINGLE_EXT(2, slim0_rx_ch_text),
 	SOC_ENUM_SINGLE_EXT(4, slim0_tx_ch_text),
 	SOC_ENUM_SINGLE_EXT(8, proxy_rx_ch_text),
+	SOC_ENUM_SINGLE_EXT(2, rx_bit_format_text),
+	SOC_ENUM_SINGLE_EXT(3, slim0_rx_sample_rate_text),
 };
 
 static const struct snd_kcontrol_new msm_snd_controls[] = {
@@ -795,7 +857,8 @@
 			msm_proxy_rx_ch_get, msm_proxy_rx_ch_put),
 	SOC_ENUM_EXT("SLIM_0_RX Format", msm_snd_enum[3],
 			slim0_rx_bit_format_get, slim0_rx_bit_format_put),
-
+	SOC_ENUM_EXT("SLIM_0_RX SampleRate", msm_snd_enum[4],
+			slim0_rx_sample_rate_get, slim0_rx_sample_rate_put),
 };
 
 static int msm_afe_set_config(struct snd_soc_codec *codec)
@@ -1544,7 +1607,7 @@
 		.codec_name = "snd-soc-dummy",
 		.be_id = MSM_FRONTEND_DAI_LSM8,
 	},
-	{
+	{/* hw:x,28 */
 		.name = "INT_HFP_BT Hostless",
 		.stream_name = "INT_HFP_BT Hostless",
 		.cpu_dai_name   = "INT_HFP_BT_HOSTLESS",
@@ -1559,7 +1622,7 @@
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 	},
-	{
+	{/* hw:x,29 */
 		.name = "MSM8226 HFP TX",
 		.stream_name = "MultiMedia6",
 		.cpu_dai_name = "MultiMedia6",
@@ -1575,6 +1638,21 @@
 		.ignore_pmdown_time = 1,
 		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA6,
 	},
+	{/* hw:x,30 */
+		.name = "VoWLAN",
+		.stream_name = "VoWLAN",
+		.cpu_dai_name   = "VoWLAN",
+		.platform_name  = "msm-pcm-voice",
+		.dynamic = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.be_id = MSM_FRONTEND_DAI_VOWLAN,
+	},
 	/* Backend BT/FM DAI Links */
 	{
 		.name = LPASS_BE_INT_BT_SCO_RX,
diff --git a/sound/soc/msm/msm8x10.c b/sound/soc/msm/msm8x10.c
index fe44a23..89df806 100644
--- a/sound/soc/msm/msm8x10.c
+++ b/sound/soc/msm/msm8x10.c
@@ -894,6 +894,21 @@
 		.codec_name = "snd-soc-dummy",
 		.be_id = MSM_FRONTEND_DAI_QCHAT,
 	},
+	{/* hw:x,15 */
+		.name = "MSM8X10 Media9",
+		.stream_name = "MultiMedia9",
+		.cpu_dai_name   = "MultiMedia9",
+		.platform_name  = "msm-pcm-dsp.0",
+		.dynamic = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA9
+	},
 	/* Backend I2S DAI Links */
 	{
 		.name = LPASS_BE_SEC_MI2S_RX,
@@ -1039,6 +1054,19 @@
 		.be_hw_params_fixup = msm_be_hw_params_fixup,
 		.ignore_suspend = 1,
 	},
+	/* Incall Music 2 BACK END DAI Link */
+	{
+		.name = LPASS_BE_VOICE2_PLAYBACK_TX,
+		.stream_name = "Voice2 Farend Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.32770",
+		.platform_name = "msm-pcm-routing",
+		.codec_name     = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.be_id = MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_suspend = 1,
+	},
 };
 
 struct snd_soc_card snd_soc_card_msm8x10 = {
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 1434970..8b2c443 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -57,6 +57,7 @@
 
 struct msm_dai_q6_dai_data {
 	DECLARE_BITMAP(status_mask, STATUS_MAX);
+	DECLARE_BITMAP(hwfree_status, STATUS_MAX);
 	u32 rate;
 	u32 channels;
 	u32 bitwidth;
@@ -1510,6 +1511,11 @@
 			set_bit(STATUS_PORT_STARTED,
 				dai_data->status_mask);
 	}
+	if (!test_bit(STATUS_PORT_STARTED, dai_data->hwfree_status)) {
+		set_bit(STATUS_PORT_STARTED, dai_data->hwfree_status);
+		dev_dbg(dai->dev, "%s: set hwfree_status to started\n",
+				__func__);
+	}
 	return rc;
 }
 
@@ -1525,7 +1531,6 @@
 	struct msm_dai_q6_dai_data *dai_data = &mi2s_dai_config->mi2s_dai_data;
 	struct afe_param_id_i2s_cfg *i2s = &dai_data->port_config.i2s;
 
-
 	dai_data->channels = params_channels(params);
 	switch (dai_data->channels) {
 	case 8:
@@ -1602,10 +1607,14 @@
 	dai_data->port_config.i2s.i2s_cfg_minor_version =
 			AFE_API_VERSION_I2S_CONFIG;
 	dai_data->port_config.i2s.sample_rate = dai_data->rate;
-	if (test_bit(STATUS_PORT_STARTED,
-	    mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask) ||
+	if ((test_bit(STATUS_PORT_STARTED,
+	    mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask) &&
 	    test_bit(STATUS_PORT_STARTED,
-	    mi2s_dai_data->tx_dai.mi2s_dai_data.status_mask)) {
+	    mi2s_dai_data->rx_dai.mi2s_dai_data.hwfree_status)) ||
+	    (test_bit(STATUS_PORT_STARTED,
+	    mi2s_dai_data->tx_dai.mi2s_dai_data.status_mask) &&
+	    test_bit(STATUS_PORT_STARTED,
+	    mi2s_dai_data->tx_dai.mi2s_dai_data.hwfree_status))) {
 		if ((mi2s_dai_data->tx_dai.mi2s_dai_data.rate !=
 		    mi2s_dai_data->rx_dai.mi2s_dai_data.rate) ||
 		   (mi2s_dai_data->rx_dai.mi2s_dai_data.bitwidth !=
@@ -1669,6 +1678,23 @@
 	return 0;
 }
 
+static int msm_dai_q6_mi2s_hw_free(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
+			dev_get_drvdata(dai->dev);
+	struct msm_dai_q6_dai_data *dai_data =
+		(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+		 &mi2s_dai_data->rx_dai.mi2s_dai_data :
+		 &mi2s_dai_data->tx_dai.mi2s_dai_data);
+
+	if (test_bit(STATUS_PORT_STARTED, dai_data->hwfree_status)) {
+		clear_bit(STATUS_PORT_STARTED, dai_data->hwfree_status);
+		dev_dbg(dai->dev, "%s: clear hwfree_status\n", __func__);
+	}
+	return 0;
+}
+
 static void msm_dai_q6_mi2s_shutdown(struct snd_pcm_substream *substream,
 				     struct snd_soc_dai *dai)
 {
@@ -1696,12 +1722,15 @@
 			dev_err(dai->dev, "fail to close AFE port\n");
 		clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
 	}
+	if (test_bit(STATUS_PORT_STARTED, dai_data->hwfree_status))
+		clear_bit(STATUS_PORT_STARTED, dai_data->hwfree_status);
 }
 
 static struct snd_soc_dai_ops msm_dai_q6_mi2s_ops = {
 	.startup	= msm_dai_q6_mi2s_startup,
 	.prepare	= msm_dai_q6_mi2s_prepare,
 	.hw_params	= msm_dai_q6_mi2s_hw_params,
+	.hw_free	= msm_dai_q6_mi2s_hw_free,
 	.set_fmt	= msm_dai_q6_mi2s_set_fmt,
 	.shutdown	= msm_dai_q6_mi2s_shutdown,
 };
diff --git a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
index 63ac5d3..161904c 100644
--- a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
@@ -211,6 +211,14 @@
 		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
 		{-320, -320, 144}
 	},
+	{PROXY,	6, DOLBY_ENDP_EXT_SPEAKERS,
+		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
+		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
+		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
+		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
+		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
+		{-320, -320, 144}
+	},
 	{FM, 2, DOLBY_ENDP_HDMI,
 		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
 		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
@@ -409,7 +417,8 @@
 	for (idx = 0; idx < NUM_DOLBY_ENDP_DEVICE; idx++) {
 		if (dolby_dap_endp_params[idx].device ==
 			dolby_dap_params_states.device) {
-			if (dolby_dap_params_states.device == AUX_DIGITAL) {
+			if (dolby_dap_params_states.device == AUX_DIGITAL ||
+			    dolby_dap_params_states.device == PROXY) {
 				if (dolby_dap_endp_params[idx].device_ch_caps ==
 					device_channels)
 					break;
diff --git a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.h b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.h
index 4544fea..14586f4 100644
--- a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.h
+++ b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.h
@@ -249,7 +249,7 @@
 #define DOLBY_AUTO_ENDDEP_IDX			(MAX_DOLBY_PARAMS+4)
 
 #define TOTAL_LENGTH_DOLBY_PARAM		745
-#define NUM_DOLBY_ENDP_DEVICE			23
+#define NUM_DOLBY_ENDP_DEVICE			24
 #define DOLBY_VIS_PARAM_HEADER_SIZE		 25
 
 #define DOLBY_INVALID_PORT_ID			-1
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 33317fa..157d63a 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -688,6 +688,8 @@
 		session_id = voc_get_session_id(VOICE_SESSION_NAME);
 	else if (val == MSM_FRONTEND_DAI_VOLTE)
 		session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+	else if (val == MSM_FRONTEND_DAI_VOWLAN)
+		session_id = voc_get_session_id(VOWLAN_SESSION_NAME);
 	else if (val == MSM_FRONTEND_DAI_VOICE2)
 		session_id = voc_get_session_id(VOICE2_SESSION_NAME);
 	else if (val == MSM_FRONTEND_DAI_QCHAT)
@@ -723,9 +725,9 @@
 
 			if (voc_get_route_flag(session_id, RX_PATH) &&
 			   voc_get_route_flag(session_id, TX_PATH))
-				voc_enable_cvp(session_id);
+				voc_enable_device(session_id);
 		} else {
-			voc_disable_cvp(session_id);
+			voc_disable_device(session_id);
 		}
 	} else {
 		voc_set_route_flag(session_id, TX_PATH, set);
@@ -734,9 +736,9 @@
 				msm_bedais[reg].port_id, DEV_TX);
 			if (voc_get_route_flag(session_id, RX_PATH) &&
 			   voc_get_route_flag(session_id, TX_PATH))
-				voc_enable_cvp(session_id);
+				voc_enable_device(session_id);
 		} else {
-			voc_disable_cvp(session_id);
+			voc_disable_device(session_id);
 		}
 	}
 }
@@ -2110,6 +2112,9 @@
 	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_PRI_I2S_RX,
 	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
 	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_PRI_I2S_RX,
 	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
@@ -2131,6 +2136,9 @@
 	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SEC_I2S_RX,
 	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
 	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SEC_I2S_RX,
 	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
@@ -2152,6 +2160,9 @@
 	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
 	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
 	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
 	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
@@ -2176,6 +2187,9 @@
 	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
 	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
 	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
 	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
@@ -2200,6 +2214,9 @@
 	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
 	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
 	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
 	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
@@ -2224,6 +2241,9 @@
 	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_MI2S_RX,
 	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
 	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_MI2S_RX,
 	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
@@ -2248,6 +2268,9 @@
 	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_AFE_PCM_RX,
 	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
 	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_AFE_PCM_RX,
 	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
@@ -2272,6 +2295,9 @@
 	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_AUXPCM_RX,
 	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
 	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_AUXPCM_RX,
 	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
@@ -2293,6 +2319,9 @@
 	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
 	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
 	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
 	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
@@ -2314,6 +2343,9 @@
 	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_HDMI_RX,
 	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
 	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_HDMI_RX,
 	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
 	msm_routing_put_voice_stub_mixer),
@@ -2418,6 +2450,33 @@
 	msm_routing_put_voice_mixer),
 };
 
+static const struct snd_kcontrol_new tx_vowlan_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_TX_VoWLAN", MSM_BACKEND_DAI_PRI_I2S_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX_VoWLAN", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_VoWLAN",
+	MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_VOWLAN, 1, 0,
+	msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX_VoWLAN", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_TX_VoWLAN", MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SEC_AUX_PCM_TX_VoWLAN", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("MI2S_TX_VoWLAN", MSM_BACKEND_DAI_MI2S_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("PRI_MI2S_TX_VoWLAN", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
 static const struct snd_kcontrol_new tx_voip_mixer_controls[] = {
 	SOC_SINGLE_EXT("PRI_TX_Voip", MSM_BACKEND_DAI_PRI_I2S_TX,
 	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
@@ -2611,6 +2670,11 @@
 	0, 1, 0, msm_routing_get_fm_pcmrx_switch_mixer,
 	msm_routing_put_fm_pcmrx_switch_mixer);
 
+static const struct snd_kcontrol_new pri_mi2s_rx_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_switch_mixer,
+	msm_routing_put_switch_mixer);
+
 static const struct soc_enum lsm_mux_enum =
 	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(mad_audio_mux_text), mad_audio_mux_text);
 
@@ -3151,6 +3215,8 @@
 	SND_SOC_DAPM_AIF_OUT("VOICE2_UL", "Voice2 Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("VoLTE_DL", "VoLTE Playback", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("VoLTE_UL", "VoLTE Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("VoWLAN_DL", "VoWLAN Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("VoWLAN_UL", "VoWLAN Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("VOIP_UL", "VoIP Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("SLIM0_DL_HL", "SLIMBUS0_HOSTLESS Playback",
 		0, 0, 0, 0),
@@ -3192,6 +3258,9 @@
 	SND_SOC_DAPM_AIF_OUT("PRI_MI2S_UL_HL",
 		"Primary MI2S_TX Hostless Capture",
 		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_MI2S_DL_HL",
+		"Primary MI2S_RX Hostless Playback",
+		0, 0, 0, 0),
 
 	SND_SOC_DAPM_AIF_OUT("MI2S_DL_HL", "MI2S_RX_HOSTLESS Playback",
 		0, 0, 0, 0),
@@ -3298,6 +3367,8 @@
 				&fm_switch_mixer_controls),
 	SND_SOC_DAPM_SWITCH("PCM_RX_DL_HL", SND_SOC_NOPM, 0, 0,
 				&pcm_rx_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("PRI_MI2S_RX_DL_HL", SND_SOC_NOPM, 0, 0,
+				&pri_mi2s_rx_switch_mixer_controls),
 
 	/* Mux Definitions */
 	SND_SOC_DAPM_MUX("LSM1 MUX", SND_SOC_NOPM, 0, 0, &lsm1_mux),
@@ -3415,6 +3486,9 @@
 	SND_SOC_DAPM_MIXER("VoLTE_Tx Mixer",
 				SND_SOC_NOPM, 0, 0, tx_volte_mixer_controls,
 				ARRAY_SIZE(tx_volte_mixer_controls)),
+	SND_SOC_DAPM_MIXER("VoWLAN_Tx Mixer",
+				SND_SOC_NOPM, 0, 0, tx_vowlan_mixer_controls,
+				ARRAY_SIZE(tx_vowlan_mixer_controls)),
 	SND_SOC_DAPM_MIXER("INTERNAL_BT_SCO_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
 	int_bt_sco_rx_mixer_controls, ARRAY_SIZE(int_bt_sco_rx_mixer_controls)),
 	SND_SOC_DAPM_MIXER("INTERNAL_FM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
@@ -3696,6 +3770,7 @@
 	{"PRI_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
 	{"PRI_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
 	{"PRI_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"PRI_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
 	{"PRI_RX_Voice Mixer", "Voip", "VOIP_DL"},
 	{"PRI_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
 	{"PRI_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
@@ -3704,6 +3779,7 @@
 	{"SEC_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
 	{"SEC_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
 	{"SEC_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"SEC_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
 	{"SEC_RX_Voice Mixer", "Voip", "VOIP_DL"},
 	{"SEC_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
 	{"SEC_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
@@ -3712,6 +3788,7 @@
 	{"SEC_MI2S_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
 	{"SEC_MI2S_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
 	{"SEC_MI2S_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"SEC_MI2S_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
 	{"SEC_MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"},
 	{"SEC_MI2S_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
 	{"SEC_MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
@@ -3720,6 +3797,7 @@
 	{"SLIM_0_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
 	{"SLIM_0_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
 	{"SLIM_0_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"SLIM_0_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
 	{"SLIM_0_RX_Voice Mixer", "Voip", "VOIP_DL"},
 	{"SLIM_0_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
 	{"SLIM_0_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
@@ -3729,6 +3807,7 @@
 	{"INTERNAL_BT_SCO_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
 	{"INTERNAL_BT_SCO_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
 	{"INTERNAL_BT_SCO_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"INTERNAL_BT_SCO_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
 	{"INTERNAL_BT_SCO_RX_Voice Mixer", "Voip", "VOIP_DL"},
 	{"INTERNAL_BT_SCO_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
 	{"INTERNAL_BT_SCO_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
@@ -3737,6 +3816,7 @@
 	{"AFE_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
 	{"AFE_PCM_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
 	{"AFE_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"AFE_PCM_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
 	{"AFE_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
 	{"AFE_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
 	{"AFE_PCM_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
@@ -3745,6 +3825,7 @@
 	{"AUX_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
 	{"AUX_PCM_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
 	{"AUX_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"AUX_PCM_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
 	{"AUX_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
 	{"AUX_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
 	{"AUX_PCM_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
@@ -3753,6 +3834,7 @@
 
 	{"SEC_AUX_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
 	{"SEC_AUX_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"SEC_AUX_PCM_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
 	{"SEC_AUX_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
 	{"SEC_AUX_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
 	{"SEC_AUX_PCM_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
@@ -3762,6 +3844,7 @@
 	{"HDMI_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
 	{"HDMI_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
 	{"HDMI_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"HDMI_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
 	{"HDMI_RX_Voice Mixer", "Voip", "VOIP_DL"},
 	{"HDMI_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
 	{"HDMI_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
@@ -3772,6 +3855,7 @@
 	{"MI2S_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
 	{"MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"},
 	{"MI2S_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+	{"MI2S_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
 	{"MI2S_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
 	{"MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
 	{"MI2S_RX", NULL, "MI2S_RX_Voice Mixer"},
@@ -3781,6 +3865,8 @@
 	{"VOC_EXT_EC MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
 	{"VOC_EXT_EC MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
 	{"CS-VOICE_UL1", NULL, "VOC_EXT_EC MUX"},
+	{"VOIP_UL", NULL, "VOC_EXT_EC MUX"},
+	{"VoLTE_UL", NULL, "VOC_EXT_EC MUX"},
 
 	{"Voice_Tx Mixer", "PRI_TX_Voice", "PRI_I2S_TX"},
 	{"Voice_Tx Mixer", "PRI_MI2S_TX_Voice", "PRI_MI2S_TX"},
@@ -3809,6 +3895,16 @@
 	{"VoLTE_Tx Mixer", "SEC_AUX_PCM_TX_VoLTE", "SEC_AUX_PCM_TX"},
 	{"VoLTE_Tx Mixer", "MI2S_TX_VoLTE", "MI2S_TX"},
 	{"VoLTE_UL", NULL, "VoLTE_Tx Mixer"},
+
+	{"VoWLAN_Tx Mixer", "PRI_TX_VoWLAN", "PRI_I2S_TX"},
+	{"VoWLAN_Tx Mixer", "SLIM_0_TX_VoWLAN", "SLIMBUS_0_TX"},
+	{"VoWLAN_Tx Mixer", "INTERNAL_BT_SCO_TX_VoWLAN", "INT_BT_SCO_TX"},
+	{"VoWLAN_Tx Mixer", "AFE_PCM_TX_VoWLAN", "PCM_TX"},
+	{"VoWLAN_Tx Mixer", "AUX_PCM_TX_VoWLAN", "AUX_PCM_TX"},
+	{"VoWLAN_Tx Mixer", "SEC_AUX_PCM_TX_VoWLAN", "SEC_AUX_PCM_TX"},
+	{"VoWLAN_Tx Mixer", "MI2S_TX_VoWLAN", "MI2S_TX"},
+	{"VoWLAN_UL", NULL, "VoWLAN_Tx Mixer"},
+
 	{"Voip_Tx Mixer", "PRI_TX_Voip", "PRI_I2S_TX"},
 	{"Voip_Tx Mixer", "MI2S_TX_Voip", "MI2S_TX"},
 	{"Voip_Tx Mixer", "SLIM_0_TX_Voip", "SLIMBUS_0_TX"},
@@ -3914,6 +4010,8 @@
 	{"MI2S_UL_HL", NULL, "MI2S_TX"},
 	{"PCM_RX_DL_HL", "Switch", "SLIM0_DL_HL"},
 	{"PCM_RX", NULL, "PCM_RX_DL_HL"},
+	{"PRI_MI2S_RX_DL_HL", "Switch", "PRI_MI2S_DL_HL"},
+	{"PRI_MI2S_RX", NULL, "PRI_MI2S_RX_DL_HL"},
 	{"MI2S_UL_HL", NULL, "TERT_MI2S_TX"},
 	{"SEC_I2S_RX", NULL, "SEC_I2S_DL_HL"},
 	{"PRI_MI2S_UL_HL", NULL, "PRI_MI2S_TX"},
@@ -4171,7 +4269,7 @@
 				path_type,
 				bedai->sample_rate,
 				channels,
-				topology, false,
+				topology, fe_dai_perf_mode[i][session_type],
 				bits_per_sample);
 			}
 
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index f2b0436..87e44b2 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -89,6 +89,7 @@
 	MSM_FRONTEND_DAI_LSM6,
 	MSM_FRONTEND_DAI_LSM7,
 	MSM_FRONTEND_DAI_LSM8,
+	MSM_FRONTEND_DAI_VOWLAN,
 	MSM_FRONTEND_DAI_MAX,
 };
 
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
index 1074d76..c54d03c 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
@@ -75,6 +75,14 @@
 		return false;
 }
 
+static bool is_vowlan(struct msm_voice *pvowlan)
+{
+	if (pvowlan == &voice_info[VOWLAN_SESSION_INDEX])
+		return true;
+	else
+		return false;
+}
+
 static uint32_t get_session_id(struct msm_voice *pvoc)
 {
 	uint32_t session_id = 0;
@@ -85,6 +93,8 @@
 		session_id = voc_get_session_id(VOICE2_SESSION_NAME);
 	else if (is_qchat(pvoc))
 		session_id = voc_get_session_id(QCHAT_SESSION_NAME);
+	else if (is_vowlan(pvoc))
+		session_id = voc_get_session_id(VOWLAN_SESSION_NAME);
 	else
 		session_id = voc_get_session_id(VOICE_SESSION_NAME);
 
@@ -134,6 +144,10 @@
 		voice = &voice_info[QCHAT_SESSION_INDEX];
 		pr_debug("%s: Open QCHAT Substream Id=%s\n",
 			 __func__, substream->pcm->id);
+	} else if (!strncmp("VoWLAN", substream->pcm->id, 6)) {
+		voice = &voice_info[VOWLAN_SESSION_INDEX];
+		pr_debug("%s: Open VoWLAN Substream Id=%s\n",
+			 __func__, substream->pcm->id);
 	} else {
 		voice = &voice_info[VOICE_SESSION_INDEX];
 		pr_debug("%s: Open VOICE Substream Id=%s\n",
@@ -391,12 +405,37 @@
 	pr_debug("%s: mute=%d session_id=%#x ramp_duration=%d\n", __func__,
 		mute, session_id, ramp_duration);
 
-	voc_set_tx_mute(session_id, TX_PATH, mute, ramp_duration);
+	ret = voc_set_tx_mute(session_id, TX_PATH, mute, ramp_duration);
 
 done:
 	return ret;
 }
 
+static int msm_voice_tx_device_mute_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	int mute = ucontrol->value.integer.value[0];
+	uint32_t session_id = ucontrol->value.integer.value[1];
+	int ramp_duration = ucontrol->value.integer.value[2];
+
+	if ((mute < 0) || (mute > 1) || (ramp_duration < 0) ||
+	    (ramp_duration > MAX_RAMP_DURATION)) {
+		pr_err(" %s Invalid arguments", __func__);
+
+		ret = -EINVAL;
+		goto done;
+	}
+
+	pr_debug("%s: mute=%d session_id=%#x ramp_duration=%d\n", __func__,
+		 mute, session_id, ramp_duration);
+
+	ret = voc_set_device_mute(session_id, VSS_IVOLUME_DIRECTION_TX,
+				  mute, ramp_duration);
+
+done:
+	return ret;
+}
 
 static int msm_voice_rx_device_mute_put(struct snd_kcontrol *kcontrol,
 					struct snd_ctl_elem_value *ucontrol)
@@ -406,8 +445,8 @@
 	uint32_t session_id = ucontrol->value.integer.value[1];
 	int ramp_duration = ucontrol->value.integer.value[2];
 
-	if ((mute < 0) || (mute > 1) || (ramp_duration < 0)
-		|| (ramp_duration > MAX_RAMP_DURATION)) {
+	if ((mute < 0) || (mute > 1) || (ramp_duration < 0) ||
+	    (ramp_duration > MAX_RAMP_DURATION)) {
 		pr_err(" %s Invalid arguments", __func__);
 
 		ret = -EINVAL;
@@ -415,9 +454,10 @@
 	}
 
 	pr_debug("%s: mute=%d session_id=%#x ramp_duration=%d\n", __func__,
-		mute, session_id, ramp_duration);
+		 mute, session_id, ramp_duration);
 
-	voc_set_rx_device_mute(session_id, mute, ramp_duration);
+	voc_set_device_mute(session_id, VSS_IVOLUME_DIRECTION_RX,
+			    mute, ramp_duration);
 
 done:
 	return ret;
@@ -448,6 +488,7 @@
 	voc_set_tty_mode(voc_get_session_id(VOICE_SESSION_NAME), tty_mode);
 	voc_set_tty_mode(voc_get_session_id(VOICE2_SESSION_NAME), tty_mode);
 	voc_set_tty_mode(voc_get_session_id(VOLTE_SESSION_NAME), tty_mode);
+	voc_set_tty_mode(voc_get_session_id(VOWLAN_SESSION_NAME), tty_mode);
 
 	return 0;
 }
@@ -470,6 +511,8 @@
 static struct snd_kcontrol_new msm_voice_controls[] = {
 	SOC_SINGLE_MULTI_EXT("Voice Rx Device Mute", SND_SOC_NOPM, 0, VSID_MAX,
 				0, 3, NULL, msm_voice_rx_device_mute_put),
+	SOC_SINGLE_MULTI_EXT("Voice Tx Device Mute", SND_SOC_NOPM, 0, VSID_MAX,
+				0, 3, NULL, msm_voice_tx_device_mute_put),
 	SOC_SINGLE_MULTI_EXT("Voice Tx Mute", SND_SOC_NOPM, 0, VSID_MAX,
 				0, 3, NULL, msm_voice_mute_put),
 	SOC_SINGLE_MULTI_EXT("Voice Rx Gain", SND_SOC_NOPM, 0, VSID_MAX, 0, 3,
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h
index f199be6..62c5732 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,7 @@
 	VOLTE_SESSION_INDEX,
 	VOICE2_SESSION_INDEX,
 	QCHAT_SESSION_INDEX,
+	VOWLAN_SESSION_INDEX,
 	VOICE_SESSION_INDEX_MAX,
 };
 
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
index e3c8944..6b32064 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
@@ -47,8 +47,27 @@
 #define MODE_AMR_WB		0xD
 #define MODE_PCM		0xC
 #define MODE_4GV_NW		0xE
+#define MODE_G711		0xA
+#define MODE_G711A		0xF
 
-#define VOIP_MODE_MAX		MODE_4GV_NW
+enum msm_audio_g711a_frame_type {
+	MVS_G711A_SPEECH_GOOD,
+	MVS_G711A_SID,
+	MVS_G711A_NO_DATA,
+	MVS_G711A_ERASURE
+};
+
+enum msm_audio_g711a_mode {
+	MVS_G711A_MODE_MULAW,
+	MVS_G711A_MODE_ALAW
+};
+
+enum msm_audio_g711_mode {
+	MVS_G711_MODE_MULAW,
+	MVS_G711_MODE_ALAW
+};
+
+#define VOIP_MODE_MAX		MODE_G711A
 #define VOIP_RATE_MAX		23850
 
 enum format {
@@ -153,7 +172,7 @@
 	uint32_t evrc_max_rate;
 };
 
-static int voip_get_media_type(uint32_t mode,
+static int voip_get_media_type(uint32_t mode, uint32_t rate_type,
 				unsigned int samp_rate,
 				unsigned int *media_type);
 static int voip_get_rate_type(uint32_t mode,
@@ -358,6 +377,81 @@
 			list_add_tail(&buf_node->list, &prtd->out_queue);
 			break;
 		}
+		case MODE_G711:
+		case MODE_G711A:{
+			/* G711 frames are 10ms each, but the DSP works with
+			 * 20ms frames and sends two 10ms frames per buffer.
+			 * Extract the two frames and put them in separate
+			 * buffers.
+			 */
+			/* Remove the first DSP frame info header.
+			 * Header format: G711A
+			 * Bits 0-1: Frame type
+			 * Bits 2-3: Frame rate
+			 *
+			 * Header format: G711
+			 * Bits 2-3: Frame rate
+			 */
+			if (prtd->mode == MODE_G711A)
+				buf_node->frame.frm_hdr.frame_type =
+							(*voc_pkt) & 0x03;
+			buf_node->frame.frm_hdr.timestamp = timestamp;
+			voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+
+			/* There are two frames in the buffer. Length of the
+			 * first frame:
+			 */
+			buf_node->frame.pktlen = (pkt_len -
+						  2 * DSP_FRAME_HDR_LEN) / 2;
+
+			memcpy(&buf_node->frame.voc_pkt[0],
+			       voc_pkt,
+			       buf_node->frame.pktlen);
+			voc_pkt = voc_pkt + buf_node->frame.pktlen;
+
+			list_add_tail(&buf_node->list, &prtd->out_queue);
+
+			/* Get another buffer from the free Q and fill in the
+			 * second frame.
+			 */
+			if (!list_empty(&prtd->free_out_queue)) {
+				buf_node =
+					list_first_entry(&prtd->free_out_queue,
+							 struct voip_buf_node,
+							 list);
+				list_del(&buf_node->list);
+
+				/* Remove the second DSP frame info header.
+				 * Header format:
+				 * Bits 0-1: Frame type
+				 * Bits 2-3: Frame rate
+				 */
+
+				if (prtd->mode == MODE_G711A)
+					buf_node->frame.frm_hdr.frame_type =
+							(*voc_pkt) & 0x03;
+				buf_node->frame.frm_hdr.timestamp = timestamp;
+				voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+
+				/* There are two frames in the buffer. Length
+				 * of the second frame:
+				 */
+				buf_node->frame.pktlen = (pkt_len -
+						2 * DSP_FRAME_HDR_LEN) / 2;
+
+				memcpy(&buf_node->frame.voc_pkt[0],
+				       voc_pkt,
+				       buf_node->frame.pktlen);
+
+				list_add_tail(&buf_node->list,
+					      &prtd->out_queue);
+			} else {
+				/* Drop the second frame */
+				pr_err("%s: UL data dropped, read is slow\n",
+				       __func__);
+			}
+			break;
+		}
 		default: {
 			buf_node->frame.frm_hdr.timestamp = timestamp;
 			buf_node->frame.pktlen = pkt_len;
@@ -389,6 +483,8 @@
 	unsigned long dsp_flags;
 	uint32_t rate_type;
 	uint32_t frame_rate;
+	u32 pkt_len;
+	u8 *voc_addr = NULL;
 
 	if (prtd->playback_substream == NULL)
 		return;
@@ -454,6 +550,70 @@
 			list_add_tail(&buf_node->list, &prtd->free_in_queue);
 			break;
 		}
+		case MODE_G711:
+		case MODE_G711A:{
+			/* G711 frames are 10ms each but the DSP expects 20ms
+			 * worth of data, so send two 10ms frames per buffer.
+			 */
+			/* Add the first DSP frame info header. Header format:
+			 * Bits 0-1: Frame type
+			 * Bits 2-3: Frame rate
+			 */
+			voc_addr = voc_pkt;
+			voc_pkt = voc_pkt + sizeof(uint32_t);
+
+			*voc_pkt = ((prtd->rate_type  & 0x0F) << 2) |
+				    (buf_node->frame.frm_hdr.frame_type & 0x03);
+			voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+
+			pkt_len = buf_node->frame.pktlen + DSP_FRAME_HDR_LEN;
+
+			memcpy(voc_pkt,
+			       &buf_node->frame.voc_pkt[0],
+			       buf_node->frame.pktlen);
+			voc_pkt = voc_pkt + buf_node->frame.pktlen;
+
+			list_add_tail(&buf_node->list, &prtd->free_in_queue);
+
+			if (!list_empty(&prtd->in_queue)) {
+				/* Get the second buffer. */
+				buf_node = list_first_entry(&prtd->in_queue,
+							struct voip_buf_node,
+							list);
+				list_del(&buf_node->list);
+
+				/* Add the second DSP frame info header.
+				 * Header format:
+				 * Bits 0-1: Frame type
+				 * Bits 2-3: Frame rate
+				 */
+				*voc_pkt = ((prtd->rate_type & 0x0F) << 2) |
+				(buf_node->frame.frm_hdr.frame_type & 0x03);
+				voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+
+				pkt_len = pkt_len + buf_node->frame.pktlen +
+					   DSP_FRAME_HDR_LEN;
+
+				memcpy(voc_pkt,
+				       &buf_node->frame.voc_pkt[0],
+				       buf_node->frame.pktlen);
+
+				list_add_tail(&buf_node->list,
+					      &prtd->free_in_queue);
+			} else {
+				/* Only 10ms worth of data is available, signal
+				 * erasure frame.
+				 */
+				*voc_pkt = ((prtd->rate_type & 0x0F) << 2) |
+					    (MVS_G711A_ERASURE & 0x03);
+
+				pkt_len = pkt_len + DSP_FRAME_HDR_LEN;
+				pr_debug("%s, Only 10ms read, erase 2nd frame\n",
+					 __func__);
+			}
+			*((uint32_t *)voc_addr) = pkt_len;
+			break;
+		}
 		default: {
 			*((uint32_t *)voc_pkt) = buf_node->frame.pktlen;
 			voc_pkt = voc_pkt + sizeof(uint32_t);
@@ -829,10 +989,12 @@
         pr_debug("%s(): mode=%d, playback sample rate=%d, capture sample rate=%d\n",
                   __func__, prtd->mode, prtd->play_samp_rate, prtd->cap_samp_rate);
 
-	if ((runtime->format != FORMAT_S16_LE) && ((prtd->mode == MODE_PCM) ||
-	    (prtd->mode == MODE_AMR) || (prtd->mode == MODE_AMR_WB) ||
+	if ((runtime->format != FORMAT_S16_LE &&
+	     runtime->format != FORMAT_SPECIAL) &&
+	    ((prtd->mode == MODE_AMR) || (prtd->mode == MODE_AMR_WB) ||
 	    (prtd->mode == MODE_IS127) || (prtd->mode == MODE_4GV_NB) ||
-	    (prtd->mode == MODE_4GV_WB) || (prtd->mode == MODE_4GV_NW))) {
+	    (prtd->mode == MODE_4GV_WB) || (prtd->mode == MODE_4GV_NW) ||
+	    (prtd->mode == MODE_G711) || (prtd->mode == MODE_G711A))) {
 		pr_err("%s(): mode:%d and format:%u are not matched\n",
 			__func__, prtd->mode, (uint32_t)runtime->format);
 
@@ -840,21 +1002,19 @@
 		goto done;
 	}
 
-	ret = voip_get_media_type(prtd->mode,
-				  prtd->play_samp_rate,
-				  &media_type);
-	if (ret < 0) {
-		pr_err("%s(): fail at getting media_type, ret=%d\n",
-			__func__, ret);
+	if (runtime->format != FORMAT_S16_LE && (prtd->mode == MODE_PCM)) {
+		pr_err("%s(): mode:%d and format:%u are not matched\n",
+		       __func__, prtd->mode, runtime->format);
 
-		ret = -EINVAL;
+		ret =  -EINVAL;
 		goto done;
 	}
-	pr_debug("%s(): media_type=%d\n", __func__, media_type);
 
 	if ((prtd->mode == MODE_PCM) ||
 	    (prtd->mode == MODE_AMR) ||
-	    (prtd->mode == MODE_AMR_WB)) {
+	    (prtd->mode == MODE_AMR_WB) ||
+	    (prtd->mode == MODE_G711) ||
+	    (prtd->mode == MODE_G711A)) {
 		ret = voip_get_rate_type(prtd->mode,
 					 prtd->rate,
 					 &rate_type);
@@ -909,6 +1069,19 @@
 		pr_debug("%s(): min rate=%d, max rate=%d\n",
 			  __func__, evrc_min_rate_type, evrc_max_rate_type);
 	}
+	ret = voip_get_media_type(prtd->mode,
+				  prtd->rate_type,
+				  prtd->play_samp_rate,
+				  &media_type);
+	if (ret < 0) {
+		pr_err("%s(): fail at getting media_type, ret=%d\n",
+		       __func__, ret);
+
+		ret = -EINVAL;
+		goto done;
+	}
+	pr_debug("%s(): media_type=%d\n", __func__, media_type);
+
 	if ((prtd->play_samp_rate == 8000) &&
 	    (prtd->cap_samp_rate == 8000))
 		voc_config_vocoder(media_type, rate_type,
@@ -1285,6 +1458,10 @@
 		}
 		break;
 	}
+	case MODE_G711:
+	case MODE_G711A:
+		*rate_type = rate;
+		break;
 	default:
 		pr_err("wrong mode type.\n");
 		ret = -EINVAL;
@@ -1294,9 +1471,9 @@
 	return ret;
 }
 
-static int voip_get_media_type(uint32_t mode,
-				unsigned int samp_rate,
-				unsigned int *media_type)
+static int voip_get_media_type(uint32_t mode, uint32_t rate_type,
+			       unsigned int samp_rate,
+			       unsigned int *media_type)
 {
 	int ret = 0;
 
@@ -1327,6 +1504,13 @@
 	case MODE_4GV_NW: /* EVRC-NW */
 		*media_type = VSS_MEDIA_ID_4GV_NW_MODEM;
 		break;
+	case MODE_G711:
+	case MODE_G711A:
+		if (rate_type == MVS_G711A_MODE_MULAW)
+			*media_type = VSS_MEDIA_ID_G711_MULAW;
+		else
+			*media_type = VSS_MEDIA_ID_G711_ALAW;
+		break;
 	default:
 		pr_debug(" input mode is not supported\n");
 		ret = -EINVAL;
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 74b79dd..0d19657 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -393,67 +393,59 @@
 	struct list_head		*ptr, *next;
 	int				result;
 	int				size = 4096;
+
+	if (!set_custom_topology)
+		return;
+
 	get_asm_custom_topology(&cal_block);
 	if (cal_block.cal_size == 0) {
-		pr_debug("%s: no cal to send addr= 0x%x\n",
-				__func__, cal_block.cal_paddr);
-		goto done;
+		pr_debug("%s: no cal to send addr= 0x%pa\n",
+				__func__, &cal_block.cal_paddr);
+		return;
 	}
 
-	if (set_custom_topology) {
-		if (common_client.mmap_apr == NULL) {
-			common_client.mmap_apr = q6asm_mmap_apr_reg();
-			common_client.apr = common_client.mmap_apr;
-			if (common_client.mmap_apr == NULL) {
-				pr_err("%s: q6asm_mmap_apr_reg failed\n",
-					__func__);
-				result = -EPERM;
-				goto done;
-			}
-		}
-		/* Only call this once */
-		set_custom_topology = 0;
+	common_client.mmap_apr = q6asm_mmap_apr_reg();
+	common_client.apr = common_client.mmap_apr;
+	if (common_client.mmap_apr == NULL) {
+		pr_err("%s: q6asm_mmap_apr_reg failed\n",
+			__func__);
+		result = -EPERM;
+		goto mmap_fail;
+	}
+	/* Only call this once */
+	set_custom_topology = 0;
 
-		/* Use first asm buf to map memory */
-		if (common_client.port[IN].buf == NULL) {
-			pr_err("%s: common buf is NULL\n",
-				__func__);
-			goto done;
-		}
-		common_client.port[IN].buf->phys = cal_block.cal_paddr;
+	/* Use first asm buf to map memory */
+	if (common_client.port[IN].buf == NULL) {
+		pr_err("%s: common buf is NULL\n",
+			__func__);
+		goto err_map;
+	}
+	common_client.port[IN].buf->phys = cal_block.cal_paddr;
 
-		result = q6asm_memory_map_regions(&common_client,
-							IN, size, 1, 1);
-		if (result < 0) {
-			pr_err("%s: mmap did not work! addr = 0x%x, size = %d\n",
-				__func__, cal_block.cal_paddr,
-				cal_block.cal_size);
-			goto done;
-		}
+	result = q6asm_memory_map_regions(&common_client,
+						IN, size, 1, 1);
+	if (result < 0) {
+		pr_err("%s: mmap did not work! addr = 0x%pa, size = %zd\n",
+			__func__, &cal_block.cal_paddr,
+			cal_block.cal_size);
+		goto err_map;
+	}
 
-		list_for_each_safe(ptr, next,
-				&common_client.port[IN].mem_map_handle) {
-			buf_node = list_entry(ptr, struct asm_buffer_node,
-						list);
-			if (buf_node->buf_addr_lsw == cal_block.cal_paddr) {
-				topology_map_handle =  buf_node->mmap_hdl;
-				break;
-			}
-		}
-
-		result = q6asm_mmap_apr_dereg();
-		if (result < 0) {
-			pr_err("%s: q6asm_mmap_apr_dereg failed, err %d\n",
-				__func__, result);
-		} else {
-			common_client.mmap_apr = NULL;
+	list_for_each_safe(ptr, next,
+			&common_client.port[IN].mem_map_handle) {
+		buf_node = list_entry(ptr, struct asm_buffer_node,
+					list);
+		if (buf_node->buf_addr_lsw == cal_block.cal_paddr) {
+			topology_map_handle =  buf_node->mmap_hdl;
+			break;
 		}
 	}
 
 	q6asm_add_hdr_custom_topology(ac, &asm_top.hdr,
 				      APR_PKT_SIZE(APR_HDR_SIZE,
 					sizeof(asm_top)), TRUE);
-
+	atomic_set(&ac->cmd_state, 1);
 	asm_top.hdr.opcode = ASM_CMD_ADD_TOPOLOGIES;
 	asm_top.payload_addr_lsw = cal_block.cal_paddr;
 	asm_top.payload_addr_msw = 0;
@@ -468,7 +460,7 @@
 	if (result < 0) {
 		pr_err("%s: Set topologies failed payload = 0x%x\n",
 			__func__, cal_block.cal_paddr);
-		goto done;
+		goto err_unmap;
 	}
 
 	result = wait_event_timeout(ac->cmd_wait,
@@ -476,10 +468,15 @@
 	if (!result) {
 		pr_err("%s: Set topologies failed after timedout payload = 0x%x\n",
 			__func__, cal_block.cal_paddr);
-		goto done;
+		goto err_unmap;
 	}
-
-done:
+	return;
+err_unmap:
+	q6asm_memory_unmap_regions(ac, IN);
+err_map:
+	q6asm_mmap_apr_dereg();
+	set_custom_topology = 1;
+mmap_fail:
 	return;
 }
 
@@ -768,6 +765,7 @@
 	apr_deregister(ac->apr);
 	ac->apr = NULL;
 	ac->mmap_apr = NULL;
+	rtac_set_asm_handle(ac->session, ac->apr);
 	q6asm_session_free(ac);
 	q6asm_mmap_apr_dereg();
 
@@ -857,7 +855,7 @@
 	if (ac->apr == NULL) {
 		pr_err("%s Registration with APR failed\n", __func__);
 		mutex_unlock(&session_lock);
-		goto fail;
+		goto fail_apr1;
 	}
 	ac->apr2 = apr_register("ADSP", "ASM", \
 				(apr_fn)q6asm_callback,\
@@ -867,16 +865,17 @@
 	if (ac->apr2 == NULL) {
 		pr_err("%s Registration with APR-2 failed\n", __func__);
 		mutex_unlock(&session_lock);
-		goto fail;
+		goto fail_apr2;
 	}
+
 	rtac_set_asm_handle(n, ac->apr);
 
 	pr_debug("%s Registering the common port with APR\n", __func__);
 	ac->mmap_apr = q6asm_mmap_apr_reg();
 	if (ac->mmap_apr == NULL) {
 		mutex_unlock(&session_lock);
-		goto fail;
-        }
+		goto fail_mmap;
+	}
 
 	init_waitqueue_head(&ac->cmd_wait);
 	init_waitqueue_head(&ac->time_wait);
@@ -899,9 +898,12 @@
 	mutex_unlock(&session_lock);
 
 	return ac;
-fail:
-	q6asm_audio_client_free(ac);
-	return NULL;
+fail_mmap:
+	apr_deregister(ac->apr2);
+fail_apr2:
+	apr_deregister(ac->apr);
+fail_apr1:
+	q6asm_session_free(ac);
 fail_session:
 	kfree(ac);
 	return NULL;
@@ -1115,12 +1117,20 @@
 	payload = data->payload;
 
 	if (data->opcode == RESET_EVENTS) {
+		struct audio_client *ac_mmap = (struct audio_client *)priv;
+		if (ac_mmap == NULL) {
+			pr_err("%s ac or priv NULL\n", __func__);
+			return -EINVAL;
+		}
 		pr_debug("%s: Reset event is received: %d %d apr[%p]\n",
 				__func__,
 				data->reset_event,
 				data->reset_proc,
 				this_mmap.apr);
+		atomic_set(&this_mmap.ref_cnt, 0);
 		apr_reset(this_mmap.apr);
+		this_mmap.apr = NULL;
+		ac_mmap->mmap_apr = NULL;
 		for (; i <= OUT; i++) {
 			list_for_each_safe(ptr, next,
 				&common_client.port[i].mem_map_handle) {
@@ -1135,7 +1145,6 @@
 			}
 			pr_debug("%s:Clearing custom topology\n", __func__);
 		}
-		this_mmap.apr = NULL;
 		reset_custom_topology_flags();
 		set_custom_topology = 1;
 		topology_map_handle = 0;
@@ -1626,7 +1635,6 @@
 	hdr->dest_port = ((ac->session << 8) & 0xFF00) | (stream_id);
 	if (cmd_flg) {
 		hdr->token = ac->session;
-		atomic_set(&ac->cmd_state, 1);
 	}
 	hdr->pkt_size  = pkt_size;
 	mutex_unlock(&ac->cmd_lock);
@@ -1667,7 +1675,6 @@
 	hdr->dest_port = ((ac->session << 8) & 0xFF00) | (stream_id);
 	if (cmd_flg) {
 		hdr->token = ac->session;
-		atomic_set(&ac->cmd_state, 1);
 	}
 	hdr->pkt_size  = pkt_size;
 	return;
@@ -1711,7 +1718,6 @@
 	hdr->dest_port = 0;
 	if (cmd_flg) {
 		hdr->token = ((ac->session << 8) | 0x0001) ;
-		atomic_set(&ac->cmd_state, 1);
 	}
 	hdr->pkt_size  = pkt_size;
 	mutex_unlock(&ac->cmd_lock);
@@ -1728,7 +1734,6 @@
 	hdr->dest_port = 0;
 	if (cmd_flg) {
 		hdr->token = token;
-		atomic_set(&ac->cmd_state, 1);
 	}
 	hdr->pkt_size  = pkt_size;
 	return;
@@ -1748,6 +1753,7 @@
 	pr_debug("%s:session[%d]", __func__, ac->session);
 
 	q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 	open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ_V3;
 	/* Stream prio : High, provide meta info with encoded frames */
 	open.src_endpointype = ASM_END_POINT_DEVICE_MATRIX;
@@ -1841,7 +1847,7 @@
 		format);
 
 	q6asm_stream_add_hdr(ac, &open.hdr, sizeof(open), TRUE, stream_id);
-
+	atomic_set(&ac->cmd_state, 1);
 	/*
 	 * Updated the token field with stream/session for compressed playback
 	 * Platform driver must know the the stream with which the command is
@@ -1959,6 +1965,7 @@
 
 	ac->io_mode |= NT_MODE;
 	q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 	open.hdr.opcode = ASM_STREAM_CMD_OPEN_READWRITE_V2;
 
 	open.mode_flags = BUFFER_META_ENABLE;
@@ -2068,6 +2075,7 @@
 	pr_debug("%s: session[%d]", __func__, ac->session);
 
 	q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 	open.hdr.opcode = ASM_STREAM_CMD_OPEN_LOOPBACK_V2;
 
 	open.mode_flags = 0;
@@ -2110,6 +2118,7 @@
 	}
 	pr_debug("%s session[%d]", __func__, ac->session);
 	q6asm_add_hdr(ac, &run.hdr, sizeof(run), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 
 	run.hdr.opcode = ASM_SESSION_CMD_RUN_V2;
 	run.flags    = flags;
@@ -2147,7 +2156,7 @@
 	}
 	pr_debug("session[%d]", ac->session);
 	q6asm_stream_add_hdr_async(ac, &run.hdr, sizeof(run), TRUE, stream_id);
-
+	atomic_set(&ac->cmd_state, 1);
 	run.hdr.opcode = ASM_SESSION_CMD_RUN_V2;
 	run.flags    = flags;
 	run.time_lsw = lsw_ts;
@@ -2189,6 +2198,7 @@
 		sample_rate, channels, bit_rate, mode, format);
 
 	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 
 	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
 	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
@@ -2229,6 +2239,7 @@
 	pr_debug("%s: Session %d, num_channels = %d\n",
 			 __func__, ac->session, num_channels);
 	q6asm_add_hdr(ac, &chan_map.hdr, sizeof(chan_map), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 	chan_map.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
 	chan_map.encdec.param_id = ASM_PARAM_ID_DEC_OUTPUT_CHAN_MAP;
 	chan_map.encdec.param_size = sizeof(struct asm_dec_out_chan_map_param) -
@@ -2273,6 +2284,7 @@
 			 ac->session, rate, channels);
 
 	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
 	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
 	enc_cfg.encdec.param_size = sizeof(enc_cfg) - sizeof(enc_cfg.hdr) -
@@ -2334,7 +2346,7 @@
 			 ac->session, rate, channels);
 
 	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
-
+	atomic_set(&ac->cmd_state, 1);
 	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
 	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
 	enc_cfg.encdec.param_size = sizeof(enc_cfg) - sizeof(enc_cfg.hdr) -
@@ -2433,6 +2445,7 @@
 	pr_debug("%s: Session %d\n", __func__, ac->session);
 
 	q6asm_add_hdr(ac, &sbrps.hdr, sizeof(sbrps), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 
 	sbrps.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
 	sbrps.encdec.param_id = ASM_PARAM_ID_AAC_SBR_PS_FLAG;
@@ -2474,6 +2487,7 @@
 			 __func__, ac->session, sce_left, sce_right);
 
 	q6asm_add_hdr(ac, &dual_mono.hdr, sizeof(dual_mono), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 
 	dual_mono.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
 	dual_mono.encdec.param_id = ASM_PARAM_ID_AAC_DUAL_MONO_MAPPING;
@@ -2505,8 +2519,36 @@
 /* Support for selecting stereo mixing coefficients for B family not done */
 int q6asm_cfg_aac_sel_mix_coef(struct audio_client *ac, uint32_t mix_coeff)
 {
-	/* To Be Done */
+	struct asm_aac_stereo_mix_coeff_selection_param_v2 aac_mix_coeff;
+	int rc = 0;
+
+	q6asm_add_hdr(ac, &aac_mix_coeff.hdr, sizeof(aac_mix_coeff), TRUE);
+	atomic_set(&ac->cmd_state, 1);
+	aac_mix_coeff.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	aac_mix_coeff.param_id =
+		ASM_PARAM_ID_AAC_STEREO_MIX_COEFF_SELECTION_FLAG_V2;
+	aac_mix_coeff.param_size =
+		sizeof(struct asm_aac_stereo_mix_coeff_selection_param_v2);
+	aac_mix_coeff.aac_stereo_mix_coeff_flag = mix_coeff;
+	pr_debug("%s, mix_coeff = %u", __func__, mix_coeff);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &aac_mix_coeff);
+	if (rc < 0) {
+		pr_err("%s:Command opcode[0x%x]paramid[0x%x] failed\n",
+			__func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM,
+			ASM_PARAM_ID_AAC_STEREO_MIX_COEFF_SELECTION_FLAG_V2);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+		(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s:timeout opcode[0x%x]\n",
+			__func__, aac_mix_coeff.hdr.opcode);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
 	return 0;
+fail_cmd:
+	return rc;
 }
 
 int q6asm_enc_cfg_blk_qcelp(struct audio_client *ac, uint32_t frames_per_buf,
@@ -2522,6 +2564,7 @@
 		reduced_rate_level, rate_modulation_cmd);
 
 	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
 	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
 	enc_cfg.encdec.param_size = sizeof(struct asm_v13k_enc_cfg) -
@@ -2563,6 +2606,7 @@
 		frames_per_buf,	min_rate, max_rate, rate_modulation_cmd);
 
 	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
 	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
 	enc_cfg.encdec.param_size = sizeof(struct asm_evrc_enc_cfg) -
@@ -2602,6 +2646,7 @@
 		__func__, ac->session, frames_per_buf, band_mode, dtx_enable);
 
 	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
 	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
 	enc_cfg.encdec.param_size = sizeof(struct asm_amrnb_enc_cfg) -
@@ -2639,6 +2684,7 @@
 		__func__, ac->session, frames_per_buf, band_mode, dtx_enable);
 
 	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
 	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
 	enc_cfg.encdec.param_size = sizeof(struct asm_amrwb_enc_cfg) -
@@ -2679,6 +2725,7 @@
 		channels);
 
 	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 
 	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
 	fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
@@ -2739,6 +2786,7 @@
 		channels);
 
 	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 
 	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
 	fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
@@ -2807,7 +2855,7 @@
 		cfg->sample_rate, cfg->ch_cfg);
 
 	q6asm_stream_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE, stream_id);
-
+	atomic_set(&ac->cmd_state, 1);
 	/*
 	 * Updated the token field with stream/session for compressed playback
 	 * Platform driver must know the the stream with which the command is
@@ -2883,6 +2931,7 @@
 		wma_cfg->ch_mask, wma_cfg->encode_opt);
 
 	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 
 	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
 	fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
@@ -2928,6 +2977,7 @@
 		wmapro_cfg->adv_encode_opt, wmapro_cfg->adv_encode_opt2);
 
 	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 
 	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
 	fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
@@ -2975,6 +3025,7 @@
 		cfg->num_channels);
 
 	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 
 	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
 	fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
@@ -3006,6 +3057,7 @@
 	pr_debug("%s: session[%d]param_id[%d]param_value[%d]", __func__,
 			ac->session, param_id, param_value);
 	q6asm_add_hdr(ac, &ddp_cfg.hdr, sizeof(ddp_cfg), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 	ddp_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
 	ddp_cfg.encdec.param_id = param_id;
 	ddp_cfg.encdec.param_size = sizeof(struct asm_dec_ddp_endp_param_v2) -
@@ -3066,6 +3118,7 @@
 							mmap_region_cmd;
 	q6asm_add_mmaphdr(ac, &mmap_regions->hdr, cmd_size,
 			TRUE, ((ac->session << 8) | dir));
+	atomic_set(&ac->cmd_state, 1);
 	mmap_regions->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS;
 	mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
 	mmap_regions->num_regions = bufcnt & 0x00ff;
@@ -3129,7 +3182,7 @@
 	q6asm_add_mmaphdr(ac, &mem_unmap.hdr,
 			sizeof(struct avs_cmd_shared_mem_unmap_regions),
 			TRUE, ((ac->session << 8) | dir));
-
+	atomic_set(&ac->cmd_state, 1);
 	mem_unmap.hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
 	list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) {
 		buf_node = list_entry(ptr, struct asm_buffer_node,
@@ -3227,6 +3280,7 @@
 							mmap_region_cmd;
 	q6asm_add_mmaphdr(ac, &mmap_regions->hdr, cmd_size, TRUE,
 					((ac->session << 8) | dir));
+	atomic_set(&ac->cmd_state, 1);
 	pr_debug("mmap_region=0x%p token=0x%x\n",
 		mmap_regions, ((ac->session << 8) | dir));
 
@@ -3308,6 +3362,7 @@
 	cmd_size = sizeof(struct avs_cmd_shared_mem_unmap_regions);
 	q6asm_add_mmaphdr(ac, &mem_unmap.hdr, cmd_size,
 			TRUE, ((ac->session << 8) | dir));
+	atomic_set(&ac->cmd_state, 1);
 	port = &ac->port[dir];
 	buf_add = (uint32_t)port->buf->phys;
 	mem_unmap.hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
@@ -3365,6 +3420,7 @@
 
 	sz = sizeof(struct asm_volume_ctrl_lr_chan_gain);
 	q6asm_add_hdr_async(ac, &lrgain.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state, 1);
 	lrgain.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
 	lrgain.param.data_payload_addr_lsw = 0;
 	lrgain.param.data_payload_addr_msw = 0;
@@ -3413,6 +3469,7 @@
 
 	sz = sizeof(struct asm_volume_ctrl_mute_config);
 	q6asm_add_hdr_async(ac, &mute.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state, 1);
 	mute.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
 	mute.param.data_payload_addr_lsw = 0;
 	mute.param.data_payload_addr_msw = 0;
@@ -3460,6 +3517,7 @@
 
 	sz = sizeof(struct asm_volume_ctrl_master_gain);
 	q6asm_add_hdr_async(ac, &vol.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state, 1);
 	vol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
 	vol.param.data_payload_addr_lsw = 0;
 	vol.param.data_payload_addr_msw = 0;
@@ -3508,6 +3566,7 @@
 
 	sz = sizeof(struct asm_soft_pause_params);
 	q6asm_add_hdr_async(ac, &softpause.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state, 1);
 	softpause.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
 
 	softpause.param.data_payload_addr_lsw = 0;
@@ -3561,6 +3620,7 @@
 
 	sz = sizeof(struct asm_soft_step_volume_params);
 	q6asm_add_hdr_async(ac, &softvol.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state, 1);
 	softvol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
 	softvol.param.data_payload_addr_lsw = 0;
 	softvol.param.data_payload_addr_msw = 0;
@@ -3619,6 +3679,7 @@
 	sz = sizeof(struct asm_eq_params);
 	eq_params = (struct msm_audio_eq_stream_config *) eq_p;
 	q6asm_add_hdr(ac, &eq.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state, 1);
 
 	eq.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
 	eq.param.data_payload_addr_lsw = 0;
@@ -3696,6 +3757,11 @@
 		mutex_lock(&port->lock);
 
 		dsp_buf = port->dsp_buf;
+		if (port->buf == NULL) {
+			pr_err("%s buf is NULL\n", __func__);
+			mutex_unlock(&port->lock);
+			return -EINVAL;
+		}
 		ab = &port->buf[dsp_buf];
 
 		pr_debug("%s:session[%d]dsp-buf[%d][%p]cpu_buf[%d][%p]\n",
@@ -3821,7 +3887,6 @@
 
 	q6asm_stream_add_hdr_async(
 			ac, &write.hdr, sizeof(write), FALSE, ac->stream_id);
-
 	port = &ac->port[IN];
 	ab = &port->buf[port->dsp_buf];
 
@@ -4130,6 +4195,7 @@
 	q6asm_add_hdr_async(ac, &hdr, (sizeof(struct apr_hdr) +
 			    sizeof(struct asm_stream_cmd_set_pp_params_v2) +
 			    params_length), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 	hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
 	payload_params.data_payload_addr_lsw = 0;
 	payload_params.data_payload_addr_msw = 0;
@@ -4170,7 +4236,7 @@
 		return -EINVAL;
 	}
 	q6asm_stream_add_hdr(ac, &hdr, sizeof(hdr), TRUE, stream_id);
-
+	atomic_set(&ac->cmd_state, 1);
 	/*
 	 * Updated the token field with stream/session for compressed playback
 	 * Platform driver must know the the stream with which the command is
@@ -4279,7 +4345,7 @@
 		return -EINVAL;
 	}
 	q6asm_stream_add_hdr_async(ac, &hdr, sizeof(hdr), TRUE, stream_id);
-
+	atomic_set(&ac->cmd_state, 1);
 	/*
 	 * Updated the token field with stream/session for compressed playback
 	 * Platform driver must know the the stream with which the command is
@@ -4437,6 +4503,7 @@
 	pr_debug("%s:session[%d]enable[%d]\n", __func__,
 						ac->session, enable);
 	q6asm_add_hdr(ac, &tx_overflow.hdr, sizeof(tx_overflow), TRUE);
+	atomic_set(&ac->cmd_state, 1);
 
 	tx_overflow.hdr.opcode = \
 			ASM_SESSION_CMD_REGISTER_FORX_OVERFLOW_EVENTS;
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index 61a262f..1ca9b1a 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -94,6 +94,8 @@
 static int voice_alloc_oob_mem_table(void);
 static int voice_alloc_and_map_cal_mem(struct voice_data *v);
 static int voice_alloc_and_map_oob_mem(struct voice_data *v);
+static int voc_disable_cvp(uint32_t session_id);
+static int voc_enable_cvp(uint32_t session_id);
 
 static struct voice_data *voice_get_session_by_idx(int idx);
 
@@ -141,6 +143,7 @@
 	case VOLTE_SESSION_VSID:
 	case VOIP_SESSION_VSID:
 	case QCHAT_SESSION_VSID:
+	case VOWLAN_SESSION_VSID:
 	case ALL_SESSION_VSID:
 		ret = true;
 		break;
@@ -234,6 +237,9 @@
 	} else if (session_id ==
 			common.voice[VOC_PATH_QCHAT_PASSIVE].session_id) {
 		session_name = QCHAT_SESSION_NAME;
+	} else if (session_id ==
+			common.voice[VOC_PATH_VOWLAN_PASSIVE].session_id) {
+		session_name = VOWLAN_SESSION_NAME;
 	} else if (session_id == common.voice[VOC_PATH_FULL].session_id) {
 		session_name = VOIP_SESSION_NAME;
 	}
@@ -256,6 +262,9 @@
 		else if (!strncmp(name, "QCHAT session", 13))
 			session_id =
 			common.voice[VOC_PATH_QCHAT_PASSIVE].session_id;
+		else if (!strncmp(name, "VoWLAN session", 14))
+			session_id =
+			common.voice[VOC_PATH_VOWLAN_PASSIVE].session_id;
 		else
 			session_id = common.voice[VOC_PATH_FULL].session_id;
 
@@ -291,6 +300,10 @@
 		v = &common.voice[VOC_PATH_QCHAT_PASSIVE];
 		break;
 
+	case VOWLAN_SESSION_VSID:
+		v = &common.voice[VOC_PATH_VOWLAN_PASSIVE];
+		break;
+
 	case ALL_SESSION_VSID:
 		break;
 
@@ -331,6 +344,10 @@
 		idx = VOC_PATH_QCHAT_PASSIVE;
 		break;
 
+	case VOWLAN_SESSION_VSID:
+		idx = VOC_PATH_VOWLAN_PASSIVE;
+		break;
+
 	case ALL_SESSION_VSID:
 		idx = MAX_VOC_SESSIONS - 1;
 		break;
@@ -375,6 +392,11 @@
 	return (session_id == common.voice[VOC_PATH_QCHAT_PASSIVE].session_id);
 }
 
+static bool is_vowlan_session(u32 session_id)
+{
+	return (session_id == common.voice[VOC_PATH_VOWLAN_PASSIVE].session_id);
+}
+
 static bool is_voc_state_active(int voc_state)
 {
 	if ((voc_state == VOC_RUN) ||
@@ -433,6 +455,7 @@
 	common.voice[VOC_PATH_VOICE2_PASSIVE].session_id = VOICE2_SESSION_VSID;
 	common.voice[VOC_PATH_FULL].session_id = VOIP_SESSION_VSID;
 	common.voice[VOC_PATH_QCHAT_PASSIVE].session_id = QCHAT_SESSION_VSID;
+	common.voice[VOC_PATH_VOWLAN_PASSIVE].session_id = VOWLAN_SESSION_VSID;
 }
 
 static int voice_apr_register(void)
@@ -661,6 +684,10 @@
 				strlcpy(mvm_session_cmd.mvm_session.name,
 				QCHAT_SESSION_VSID_STR,
 				sizeof(mvm_session_cmd.mvm_session.name));
+			} else if (is_vowlan_session(v->session_id)) {
+				strlcpy(mvm_session_cmd.mvm_session.name,
+				VOWLAN_SESSION_VSID_STR,
+				sizeof(mvm_session_cmd.mvm_session.name));
 			} else {
 				strlcpy(mvm_session_cmd.mvm_session.name,
 				"default modem voice",
@@ -753,6 +780,10 @@
 				strlcpy(cvs_session_cmd.cvs_session.name,
 				QCHAT_SESSION_VSID_STR,
 				sizeof(cvs_session_cmd.cvs_session.name));
+			} else if (is_vowlan_session(v->session_id)) {
+				strlcpy(cvs_session_cmd.cvs_session.name,
+				VOWLAN_SESSION_VSID_STR,
+				sizeof(cvs_session_cmd.cvs_session.name));
 			} else {
 			strlcpy(cvs_session_cmd.cvs_session.name,
 				"default modem voice",
@@ -1052,39 +1083,37 @@
 	}
 	mvm_handle = voice_get_mvm_handle(v);
 
-	if (v->tty_mode) {
-		/* send tty mode cmd to mvm */
-		mvm_tty_mode_cmd.hdr.hdr_field = APR_HDR_FIELD(
-						APR_MSG_TYPE_SEQ_CMD,
-						APR_HDR_LEN(APR_HDR_SIZE),
-						APR_PKT_VER);
-		mvm_tty_mode_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
-						sizeof(mvm_tty_mode_cmd) -
-						APR_HDR_SIZE);
-		pr_debug("%s: pkt size = %d\n",
-			 __func__, mvm_tty_mode_cmd.hdr.pkt_size);
-		mvm_tty_mode_cmd.hdr.src_port =
-				voice_get_idx_for_session(v->session_id);
-		mvm_tty_mode_cmd.hdr.dest_port = mvm_handle;
-		mvm_tty_mode_cmd.hdr.token = 0;
-		mvm_tty_mode_cmd.hdr.opcode = VSS_ISTREAM_CMD_SET_TTY_MODE;
-		mvm_tty_mode_cmd.tty_mode.mode = v->tty_mode;
-		pr_debug("tty mode =%d\n", mvm_tty_mode_cmd.tty_mode.mode);
+	/* send tty mode cmd to mvm */
+	mvm_tty_mode_cmd.hdr.hdr_field = APR_HDR_FIELD(
+					APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE),
+					APR_PKT_VER);
+	mvm_tty_mode_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(mvm_tty_mode_cmd) -
+					APR_HDR_SIZE);
+	pr_debug("%s: pkt size = %d\n",
+		 __func__, mvm_tty_mode_cmd.hdr.pkt_size);
+	mvm_tty_mode_cmd.hdr.src_port =
+			voice_get_idx_for_session(v->session_id);
+	mvm_tty_mode_cmd.hdr.dest_port = mvm_handle;
+	mvm_tty_mode_cmd.hdr.token = 0;
+	mvm_tty_mode_cmd.hdr.opcode = VSS_ISTREAM_CMD_SET_TTY_MODE;
+	mvm_tty_mode_cmd.tty_mode.mode = v->tty_mode;
+	pr_debug("tty mode =%d\n", mvm_tty_mode_cmd.tty_mode.mode);
 
-		v->mvm_state = CMD_STATUS_FAIL;
-		ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_tty_mode_cmd);
-		if (ret < 0) {
-			pr_err("%s: Error %d sending SET_TTY_MODE\n",
-			       __func__, ret);
-			goto fail;
-		}
-		ret = wait_event_timeout(v->mvm_wait,
-					 (v->mvm_state == CMD_STATUS_SUCCESS),
-					 msecs_to_jiffies(TIMEOUT_MS));
-		if (!ret) {
-			pr_err("%s: wait_event timeout\n", __func__);
-			goto fail;
-		}
+	v->mvm_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_tty_mode_cmd);
+	if (ret < 0) {
+		pr_err("%s: Error %d sending SET_TTY_MODE\n",
+		       __func__, ret);
+		goto fail;
+	}
+	ret = wait_event_timeout(v->mvm_wait,
+				 (v->mvm_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
 	}
 	return 0;
 fail:
@@ -3137,8 +3166,7 @@
 		voice_send_netid_timing_cmd(v);
 	}
 
-	/* enable slowtalk if st_enable is set */
-	if (v->st_enable)
+	if (v->st_enable && !v->tty_mode)
 		voice_send_set_pp_enable_cmd(v,
 					     MODULE_ID_VOICE_MODULE_ST,
 					     v->st_enable);
@@ -4350,7 +4378,7 @@
 	return ret;
 }
 
-int voc_disable_cvp(uint32_t session_id)
+static int voc_disable_cvp(uint32_t session_id)
 {
 	struct voice_data *v = voice_get_session(session_id);
 	int ret = 0;
@@ -4385,7 +4413,7 @@
 	return ret;
 }
 
-int voc_enable_cvp(uint32_t session_id)
+static int voc_enable_cvp(uint32_t session_id)
 {
 	struct voice_data *v = voice_get_session(session_id);
 	int ret = 0;
@@ -4457,10 +4485,8 @@
 			goto fail;
 		}
 
-		/* Send tty mode if tty device is used */
 		voice_send_tty_mode_cmd(v);
-		/* enable slowtalk */
-		if (v->st_enable)
+		if (v->st_enable && !v->tty_mode)
 			voice_send_set_pp_enable_cmd(v,
 					     MODULE_ID_VOICE_MODULE_ST,
 					     v->st_enable);
@@ -4541,8 +4567,8 @@
 	return ret;
 }
 
-int voc_set_rx_device_mute(uint32_t session_id, uint32_t mute,
-					uint32_t ramp_duration)
+int voc_set_device_mute(uint32_t session_id, uint32_t dir, uint32_t mute,
+			uint32_t ramp_duration)
 {
 	struct voice_data *v = NULL;
 	int ret = 0;
@@ -4552,16 +4578,23 @@
 	while (voice_itr_get_next_session(&itr, &v)) {
 		if (v != NULL) {
 			mutex_lock(&v->lock);
-			v->dev_rx.dev_mute = mute;
-			v->dev_rx.dev_mute_ramp_duration_ms =
+			if (dir == VSS_IVOLUME_DIRECTION_TX) {
+				v->dev_tx.dev_mute = mute;
+				v->dev_tx.dev_mute_ramp_duration_ms =
 							ramp_duration;
+			} else {
+				v->dev_rx.dev_mute = mute;
+				v->dev_rx.dev_mute_ramp_duration_ms =
+							ramp_duration;
+			}
+
 			if (((v->voc_state == VOC_RUN) ||
 				(v->voc_state == VOC_STANDBY)) &&
 				(v->lch_mode == 0))
 				ret = voice_send_device_mute_cmd(v,
-						VSS_IVOLUME_DIRECTION_RX,
-						v->dev_rx.dev_mute,
-						ramp_duration);
+							dir,
+							mute,
+							ramp_duration);
 			mutex_unlock(&v->lock);
 		} else {
 			pr_err("%s: invalid session_id 0x%x\n", __func__,
@@ -4653,8 +4686,8 @@
 				v->st_enable = enable;
 
 			if (v->voc_state == VOC_RUN) {
-				if (module_id ==
-				    MODULE_ID_VOICE_MODULE_ST)
+				if ((module_id == MODULE_ID_VOICE_MODULE_ST) &&
+				    (!v->tty_mode))
 					ret = voice_send_set_pp_enable_cmd(v,
 						MODULE_ID_VOICE_MODULE_ST,
 						enable);
@@ -4877,6 +4910,110 @@
 	return ret;
 }
 
+int voc_disable_device(uint32_t session_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	pr_debug("%s: voc state=%d\n", __func__, v->voc_state);
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+	if (v->voc_state == VOC_RUN) {
+		ret = voice_pause_voice_call(v);
+		if (ret < 0) {
+			pr_err("%s: Pause Voice Call failed for session 0x%x, err %d!\n",
+			       __func__, v->session_id, ret);
+			goto done;
+		}
+		rtac_remove_voice(voice_get_cvs_handle(v));
+		voice_send_cvp_deregister_vol_cal_cmd(v);
+		voice_send_cvp_deregister_cal_cmd(v);
+		voice_send_cvp_deregister_dev_cfg_cmd(v);
+
+		v->voc_state = VOC_CHANGE;
+	} else {
+		pr_debug("%s: called in voc state=%d, No_OP\n",
+			 __func__, v->voc_state);
+	}
+
+	if (common.ec_ref_ext)
+		voc_set_ext_ec_ref(AFE_PORT_INVALID, false);
+done:
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+int voc_enable_device(uint32_t session_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	pr_debug("%s: voc state=%d\n", __func__, v->voc_state);
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+	if (v->voc_state == VOC_CHANGE) {
+		ret = voice_send_tty_mode_cmd(v);
+		if (ret < 0) {
+			pr_err("%s: Sending TTY mode failed, ret=%d\n",
+			       __func__, ret);
+			/* Not a critical error, allow voice call to continue */
+		}
+
+		if (v->tty_mode) {
+			/* disable slowtalk */
+			voice_send_set_pp_enable_cmd(v,
+						     MODULE_ID_VOICE_MODULE_ST,
+						     0);
+		} else {
+			/* restore slowtalk */
+			voice_send_set_pp_enable_cmd(v,
+						     MODULE_ID_VOICE_MODULE_ST,
+						     v->st_enable);
+		}
+
+		ret = voice_send_set_device_cmd(v);
+		if (ret < 0) {
+			pr_err("%s: Set device failed, ret=%d\n",
+			       __func__, ret);
+			goto done;
+		}
+
+		voice_send_cvp_register_dev_cfg_cmd(v);
+		voice_send_cvp_register_cal_cmd(v);
+		voice_send_cvp_register_vol_cal_cmd(v);
+
+		rtac_add_voice(voice_get_cvs_handle(v),
+			       voice_get_cvp_handle(v),
+			       v->dev_rx.port_id, v->dev_tx.port_id,
+			       v->session_id);
+
+		ret = voice_send_start_voice_cmd(v);
+		if (ret < 0) {
+			pr_err("%s: Fail in sending START_VOICE, ret=%d\n",
+			       __func__, ret);
+			goto done;
+		}
+		v->voc_state = VOC_RUN;
+	} else {
+		pr_debug("%s: called in voc state=%d, No_OP\n",
+			 __func__, v->voc_state);
+	}
+
+done:
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
 int voc_set_lch(uint32_t session_id, enum voice_lch_mode lch_mode)
 {
 	struct voice_data *v = voice_get_session(session_id);
diff --git a/sound/soc/msm/qdsp6v2/q6voice.h b/sound/soc/msm/qdsp6v2/q6voice.h
index 9efc9fc..c6f3482 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.h
+++ b/sound/soc/msm/qdsp6v2/q6voice.h
@@ -1319,7 +1319,7 @@
 	void *buf;
 };
 
-#define MAX_VOC_SESSIONS 5
+#define MAX_VOC_SESSIONS 6
 
 struct common_data {
 	/* these default values are for all devices */
@@ -1393,21 +1393,25 @@
 #define VOC_PATH_VOLTE_PASSIVE 2
 #define VOC_PATH_VOICE2_PASSIVE 3
 #define VOC_PATH_QCHAT_PASSIVE 4
+#define VOC_PATH_VOWLAN_PASSIVE 5
 
 #define MAX_SESSION_NAME_LEN 32
-#define VOICE_SESSION_NAME  "Voice session"
-#define VOIP_SESSION_NAME   "VoIP session"
-#define VOLTE_SESSION_NAME  "VoLTE session"
-#define VOICE2_SESSION_NAME "Voice2 session"
-#define QCHAT_SESSION_NAME  "QCHAT session"
+#define VOICE_SESSION_NAME   "Voice session"
+#define VOIP_SESSION_NAME    "VoIP session"
+#define VOLTE_SESSION_NAME   "VoLTE session"
+#define VOICE2_SESSION_NAME  "Voice2 session"
+#define QCHAT_SESSION_NAME   "QCHAT session"
+#define VOWLAN_SESSION_NAME  "VoWLAN session"
 
 #define VOICE2_SESSION_VSID_STR "10DC1000"
 #define QCHAT_SESSION_VSID_STR "10803000"
+#define VOWLAN_SESSION_VSID_STR "10002000"
 #define VOICE_SESSION_VSID  0x10C01000
 #define VOICE2_SESSION_VSID 0x10DC1000
 #define VOLTE_SESSION_VSID  0x10C02000
 #define VOIP_SESSION_VSID   0x10004000
 #define QCHAT_SESSION_VSID  0x10803000
+#define VOWLAN_SESSION_VSID 0x10002000
 #define ALL_SESSION_VSID    0xFFFFFFFF
 #define VSID_MAX            ALL_SESSION_VSID
 
@@ -1440,11 +1444,9 @@
 			uint32_t ramp_duration);
 int voc_set_tx_mute(uint32_t session_id, uint32_t dir, uint32_t mute,
 		    uint32_t ramp_duration);
-int voc_set_rx_device_mute(uint32_t session_id, uint32_t mute,
-			   uint32_t ramp_duration);
+int voc_set_device_mute(uint32_t session_id, uint32_t dir, uint32_t mute,
+			uint32_t ramp_duration);
 int voc_get_rx_device_mute(uint32_t session_id);
-int voc_disable_cvp(uint32_t session_id);
-int voc_enable_cvp(uint32_t session_id);
 int voc_set_route_flag(uint32_t session_id, uint8_t path_dir, uint8_t set);
 uint8_t voc_get_route_flag(uint32_t session_id, uint8_t path_dir);
 int voc_enable_dtmf_rx_detection(uint32_t session_id, uint32_t enable);
@@ -1466,5 +1468,7 @@
 int voice_get_idx_for_session(u32 session_id);
 int voc_set_ext_ec_ref(uint16_t port_id, bool state);
 int voc_update_amr_vocoder_rate(uint32_t session_id);
+int voc_disable_device(uint32_t session_id);
+int voc_enable_device(uint32_t session_id);
 
 #endif
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 2f9e319..56efb97 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2308,6 +2308,7 @@
 		if (ret < 0) {
 			dev_err(dapm->dev, "Failed to add route %s->%s\n",
 				route->source, route->sink);
+			mutex_unlock(&dapm->card->dapm_mutex);
 			return ret;
 		}
 		route++;
@@ -3017,6 +3018,7 @@
 			dev_err(dapm->dev,
 				"ASoC: Failed to create DAPM control %s: %d\n",
 				widget->name, ret);
+			mutex_unlock(&dapm->card->dapm_mutex);
 			return ret;
 		}
 		widget++;